max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
emcee3/pools/__init__.py | dfm/emcee3 | 23 | 6620651 | <filename>emcee3/pools/__init__.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from .default import DefaultPool
from .interruptible import InterruptiblePool
from .jl import JoblibPool
__all__ = ["DefaultPool", "InterruptiblePool", "JoblibPool"]
| <filename>emcee3/pools/__init__.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from .default import DefaultPool
from .interruptible import InterruptiblePool
from .jl import JoblibPool
__all__ = ["DefaultPool", "InterruptiblePool", "JoblibPool"]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.085541 | 1 |
mailserver/setup.py | lp-programming/txmailserver | 0 | 6620652 | from setuptools import setup
setup(name="txmailserver",
version="imap4-0.1",
description="This project provides a starter mail server written using Twisted.",
keywords="twisted smtp imap pop email",
url="https://launchpad.net/txmailserver",
license="MIT / X / Expat License",
packages=["txmailserver"],
install_requires=[# -*- Extra requirements: -*-
"zope.interface",
"twisted"
],
test_suite="tests",
tests_require=["nose"]
)
| from setuptools import setup
setup(name="txmailserver",
version="imap4-0.1",
description="This project provides a starter mail server written using Twisted.",
keywords="twisted smtp imap pop email",
url="https://launchpad.net/txmailserver",
license="MIT / X / Expat License",
packages=["txmailserver"],
install_requires=[# -*- Extra requirements: -*-
"zope.interface",
"twisted"
],
test_suite="tests",
tests_require=["nose"]
)
| en | 0.662293 | # -*- Extra requirements: -*- | 1.223706 | 1 |
music_waterfall.py | TristanCacqueray/pyrender | 1 | 6620653 | <reponame>TristanCacqueray/pyrender
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0
# This is a basic Julia set explorer
from utils import *
#constants
FPS = 25
def main(argv):
if len(argv) < 2:
print "usage: %s audio.wav [start_frame]" % argv[0]
return
wav, audio_frame_size, audio_frames_path = load_wav(argv[1], fps = FPS)
screen = Screen(WINSIZE)
spectrogram = SpectroGram(audio_frame_size)
waterfall = Waterfall((WINSIZE[0] * 5 / 6., WINSIZE[1]), audio_frame_size)
wavgraph = WavGraph((WINSIZE[0] / 6, WINSIZE[1]), audio_frame_size)
screen.add(waterfall, (0, 0))
screen.add(wavgraph, (WINSIZE[0] * 5/6., 0))
frame = 0
if len(argv) == 3:
frame = int(argv[2])
sound = pygame.mixer.Sound(array = wav[audio_frames_path[frame]:])
sound.play()
pause = False
clock = pygame.time.Clock()
while True:
start_time = time.time()
if not pause:
audio_buf = wav[audio_frames_path[frame]:audio_frames_path[frame]+audio_frame_size]
spectrogram.transform(audio_buf)
# Waterfall
wavgraph.render(audio_buf)
waterfall.render(spectrogram)
screen.update()
pygame.display.update()
for e in pygame.event.get():
if e.type not in (KEYDOWN, MOUSEBUTTONDOWN):
continue
if e.type == MOUSEBUTTONDOWN:
print "Freq:", WINSIZE[1] - e.pos[1]
else:
if e.key == K_SPACE:
pause = not pause
if pause:
pygame.mixer.pause()
else:
pygame.mixer.unpause()
if e.key == K_ESCAPE:
exit(0)
end_time = time.time()
elapsed = end_time - start_time
if elapsed > 1 / (FPS * 1.2):
print "Getting slow... %s" % elapsed
clock.tick(FPS)
if not pause:
frame += 1
if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt:
pass
| #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0
# This is a basic Julia set explorer
from utils import *
#constants
FPS = 25
def main(argv):
if len(argv) < 2:
print "usage: %s audio.wav [start_frame]" % argv[0]
return
wav, audio_frame_size, audio_frames_path = load_wav(argv[1], fps = FPS)
screen = Screen(WINSIZE)
spectrogram = SpectroGram(audio_frame_size)
waterfall = Waterfall((WINSIZE[0] * 5 / 6., WINSIZE[1]), audio_frame_size)
wavgraph = WavGraph((WINSIZE[0] / 6, WINSIZE[1]), audio_frame_size)
screen.add(waterfall, (0, 0))
screen.add(wavgraph, (WINSIZE[0] * 5/6., 0))
frame = 0
if len(argv) == 3:
frame = int(argv[2])
sound = pygame.mixer.Sound(array = wav[audio_frames_path[frame]:])
sound.play()
pause = False
clock = pygame.time.Clock()
while True:
start_time = time.time()
if not pause:
audio_buf = wav[audio_frames_path[frame]:audio_frames_path[frame]+audio_frame_size]
spectrogram.transform(audio_buf)
# Waterfall
wavgraph.render(audio_buf)
waterfall.render(spectrogram)
screen.update()
pygame.display.update()
for e in pygame.event.get():
if e.type not in (KEYDOWN, MOUSEBUTTONDOWN):
continue
if e.type == MOUSEBUTTONDOWN:
print "Freq:", WINSIZE[1] - e.pos[1]
else:
if e.key == K_SPACE:
pause = not pause
if pause:
pygame.mixer.pause()
else:
pygame.mixer.unpause()
if e.key == K_ESCAPE:
exit(0)
end_time = time.time()
elapsed = end_time - start_time
if elapsed > 1 / (FPS * 1.2):
print "Getting slow... %s" % elapsed
clock.tick(FPS)
if not pause:
frame += 1
if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt:
pass | en | 0.627025 | #!/usr/bin/env python # Licensed under the Apache License, Version 2.0 # This is a basic Julia set explorer #constants # Waterfall | 2.577241 | 3 |
core/slack.py | uktrade/url-protection-checker | 0 | 6620654 | from checker.models import ApplicationsItem, NonPaasSites
from django.conf import settings
import requests
import json
def non_paas_alert():
open_site_list = ''
sites_to_check = NonPaasSites.objects.filter(reporting_enabled=True)
if sites_to_check.filter(is_protected=False):
open_site_list += 'The following non PaaS sites are open\n'
for site in sites_to_check:
if site.is_protected is False:
open_site_list += f'{site.site_url}\n'
print(open_site_list)
if settings.SLACK_ENABLED == 'True':
print("Sending results to slack")
url = f'{settings.SLACK_URL}/api/chat.postMessage'
data = {'channel': f'{settings.SLACK_CHANNEL}', 'text': open_site_list}
headers = {'Content-type': 'application/json; charset=utf-8',
'Authorization': f'Bearer {settings.SLACK_TOKEN}'}
r = requests.post(url, data=json.dumps(data), headers=headers)
def daily_alert(ip_filter_slack_report):
slack_message = 'This is the daily url protection report.\n'
urls_open = ''
slack_report = ''
count_routes_open = 0
space_name = ''
for app in ApplicationsItem.objects.filter(reporting_enabled=True):
if app.is_protected is False:
if space_name != app.applications.spaces.space_name:
urls_open += f'\nSPACE: *{app.applications.spaces.space_name}*\n'
urls_open += f'The Application: *{app.applications.app_name}* '
# urls_open += f'in Space: {app.spaces.space_name} '
urls_open += f'has the following route unprotected\n\t{app.app_route}\n'
count_routes_open += 1
space_name = app.applications.spaces.space_name
slack_message += f'Number of routes open = {count_routes_open}'
if count_routes_open != 0:
slack_report = '```\n'
slack_report += f'{urls_open}\n```'
slack_report += f'\n```{slack_message}```\n'
# breakpoint()
if ip_filter_slack_report:
slack_report += f'\n```{ip_filter_slack_report}```\n'
print(slack_report)
if settings.SLACK_ENABLED == 'True':
print("Sending results to slack")
url = f'{settings.SLACK_URL}/api/chat.postMessage'
data = {'channel': f'{settings.SLACK_CHANNEL}', 'text': slack_report}
headers = {'Content-type': 'application/json; charset=utf-8',
'Authorization': f'Bearer {settings.SLACK_TOKEN}'}
r = requests.post(url, data=json.dumps(data), headers=headers)
print(r.text)
print(slack_report)
non_paas_alert()
| from checker.models import ApplicationsItem, NonPaasSites
from django.conf import settings
import requests
import json
def non_paas_alert():
open_site_list = ''
sites_to_check = NonPaasSites.objects.filter(reporting_enabled=True)
if sites_to_check.filter(is_protected=False):
open_site_list += 'The following non PaaS sites are open\n'
for site in sites_to_check:
if site.is_protected is False:
open_site_list += f'{site.site_url}\n'
print(open_site_list)
if settings.SLACK_ENABLED == 'True':
print("Sending results to slack")
url = f'{settings.SLACK_URL}/api/chat.postMessage'
data = {'channel': f'{settings.SLACK_CHANNEL}', 'text': open_site_list}
headers = {'Content-type': 'application/json; charset=utf-8',
'Authorization': f'Bearer {settings.SLACK_TOKEN}'}
r = requests.post(url, data=json.dumps(data), headers=headers)
def daily_alert(ip_filter_slack_report):
slack_message = 'This is the daily url protection report.\n'
urls_open = ''
slack_report = ''
count_routes_open = 0
space_name = ''
for app in ApplicationsItem.objects.filter(reporting_enabled=True):
if app.is_protected is False:
if space_name != app.applications.spaces.space_name:
urls_open += f'\nSPACE: *{app.applications.spaces.space_name}*\n'
urls_open += f'The Application: *{app.applications.app_name}* '
# urls_open += f'in Space: {app.spaces.space_name} '
urls_open += f'has the following route unprotected\n\t{app.app_route}\n'
count_routes_open += 1
space_name = app.applications.spaces.space_name
slack_message += f'Number of routes open = {count_routes_open}'
if count_routes_open != 0:
slack_report = '```\n'
slack_report += f'{urls_open}\n```'
slack_report += f'\n```{slack_message}```\n'
# breakpoint()
if ip_filter_slack_report:
slack_report += f'\n```{ip_filter_slack_report}```\n'
print(slack_report)
if settings.SLACK_ENABLED == 'True':
print("Sending results to slack")
url = f'{settings.SLACK_URL}/api/chat.postMessage'
data = {'channel': f'{settings.SLACK_CHANNEL}', 'text': slack_report}
headers = {'Content-type': 'application/json; charset=utf-8',
'Authorization': f'Bearer {settings.SLACK_TOKEN}'}
r = requests.post(url, data=json.dumps(data), headers=headers)
print(r.text)
print(slack_report)
non_paas_alert()
| en | 0.391104 | # urls_open += f'in Space: {app.spaces.space_name} ' # breakpoint() | 2.15341 | 2 |
minecraft/update.py | AmyLucyRose/InfiniFuse | 1 | 6620655 | <reponame>AmyLucyRose/InfiniFuse<gh_stars>1-10
from minecraft.blockstate import BlockState
from minecraft.chunk import Chunk
import json
def blockStates():
"""Update blockState files based on debug map"""
def create_property(value):
"""Create a property dict fitting the given value"""
prop = {}
if value in ['false', 'true']:
prop['type'] = 'bool'
elif value.isdigit():
prop['type'] = 'int'
prop['min'] = value
prop['max'] = value
else:
prop['type'] = 'str'
prop['values'] = [value]
return prop
def convert_property(prop : dict):
"""Convert a property to str type
Used when a property appears to store multiple data types
"""
if prop['type'] == 'bool':
values = ['false', 'true']
elif prop['type'] == 'int':
values = [i for i in range(prop['min'], prop['max'])]
else:
raise TypeError('Can only convert bool and int properties to str')
return {'type':'str', 'values':values}
def update_property(prop: dict, value):
if prop['type'] == 'bool':
if value not in ['false', 'true']:
prop = convert_property(prop)
elif prop['type'] == 'int':
if not value.isdigit():
prop = convert_property(prop)
elif value < prop['min']:
prop['min'] = value
elif value > prop['max']:
prop['max'] = value
if prop['type'] == 'str':
if value not in prop['values']:
prop['values'].append(value)
return prop
def update_block(block : BlockState):
"""Update this block's file so that current properties are valid"""
try:
valid = block.validProperties
except FileNotFoundError:
valid = {}
try:
for key, value in block['Properties'].items():
value = str(value)
if key in valid:
valid[key] = update_property(valid[key], value)
else:
valid[key] = create_property(value)
except KeyError:
pass
with open(block.filePath, mode='w') as f:
json.dump(valid, f, indent = 4)
chunkX = 0
chunkZ = 0
emptyChunks = 0
while True:
testChunk = Chunk.from_world(chunkX, chunkZ, world = 'debug')
emptyBlocks = 0
print(f'Reading {testChunk}')
for x in range(1,16,2):
for z in range(1,16,2):
testBlock = testChunk.get_block(x,70,z)
if testBlock['Name'] == 'minecraft:air':
emptyBlocks += 1
update_block(testBlock)
if emptyBlocks >= 64:
emptyChunks += 1
if emptyChunks > 1:
break
else:
chunkX = 0
chunkZ += 1
else:
chunkX += 1
emptyChunks = 0
| from minecraft.blockstate import BlockState
from minecraft.chunk import Chunk
import json
def blockStates():
"""Update blockState files based on debug map"""
def create_property(value):
"""Create a property dict fitting the given value"""
prop = {}
if value in ['false', 'true']:
prop['type'] = 'bool'
elif value.isdigit():
prop['type'] = 'int'
prop['min'] = value
prop['max'] = value
else:
prop['type'] = 'str'
prop['values'] = [value]
return prop
def convert_property(prop : dict):
"""Convert a property to str type
Used when a property appears to store multiple data types
"""
if prop['type'] == 'bool':
values = ['false', 'true']
elif prop['type'] == 'int':
values = [i for i in range(prop['min'], prop['max'])]
else:
raise TypeError('Can only convert bool and int properties to str')
return {'type':'str', 'values':values}
def update_property(prop: dict, value):
if prop['type'] == 'bool':
if value not in ['false', 'true']:
prop = convert_property(prop)
elif prop['type'] == 'int':
if not value.isdigit():
prop = convert_property(prop)
elif value < prop['min']:
prop['min'] = value
elif value > prop['max']:
prop['max'] = value
if prop['type'] == 'str':
if value not in prop['values']:
prop['values'].append(value)
return prop
def update_block(block : BlockState):
"""Update this block's file so that current properties are valid"""
try:
valid = block.validProperties
except FileNotFoundError:
valid = {}
try:
for key, value in block['Properties'].items():
value = str(value)
if key in valid:
valid[key] = update_property(valid[key], value)
else:
valid[key] = create_property(value)
except KeyError:
pass
with open(block.filePath, mode='w') as f:
json.dump(valid, f, indent = 4)
chunkX = 0
chunkZ = 0
emptyChunks = 0
while True:
testChunk = Chunk.from_world(chunkX, chunkZ, world = 'debug')
emptyBlocks = 0
print(f'Reading {testChunk}')
for x in range(1,16,2):
for z in range(1,16,2):
testBlock = testChunk.get_block(x,70,z)
if testBlock['Name'] == 'minecraft:air':
emptyBlocks += 1
update_block(testBlock)
if emptyBlocks >= 64:
emptyChunks += 1
if emptyChunks > 1:
break
else:
chunkX = 0
chunkZ += 1
else:
chunkX += 1
emptyChunks = 0 | en | 0.80579 | Update blockState files based on debug map Create a property dict fitting the given value Convert a property to str type Used when a property appears to store multiple data types Update this block's file so that current properties are valid | 3.194305 | 3 |
env_init.py | JoeEmp/performanc_testing_field | 0 | 6620656 | <reponame>JoeEmp/performanc_testing_field
import unittest
import os
import logging
from dbmodules.base import BaseDBServer, CoreDBServer
from dbmodules.good import Good, GoodTable
from dbmodules.user import Users, UserTable
from dbmodules.order import Order, OrderTable
from faker import Faker
from com.pe_encrypt import md5_text
from faker.providers import BaseProvider
from random import choice, seed
import pymysql
from settings import *
from com.pe_database import metadata, ENGINE
root_dir = os.path.abspath(os.path.dirname(__file__))
log_file = os.path.join(root_dir, 'env_init.log')
user_data_file = os.path.join(root_dir, './tests/user.txt')
good_data_file = os.path.join(root_dir, './tests/good.txt')
db_file = os.path.join(root_dir, "petest.db")
seed(0)
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(filename)s %(funcName)s %(message)s',
filename=log_file
)
class GoodProvider(BaseProvider):
tags = ['', '低过老罗', '工厂价', '全网最低', ]
names = ['肯尼亚AA水洗', '耶加雪菲水洗', '智能水壶', '小米手机', 'iPhone',
'星际争霸2数字典藏版', '飞鹤奶粉', 'MacbookAir M1', '蜜桃猫手机壳'
'星空', '蒙娜丽莎', '伏尔加河上的纤夫', '马拉之死', '这个需求做不了']
age = ['2020款', '2021款', '2022款', '']
def good_name(self):
good = choice(self.age)+" "+choice(self.names) + " "+choice(self.tags)
return good.strip(' ')
def create_schema():
try:
db = pymysql.connect("localhost", "root", "123456")
db.cursor().execute("CREATE SCHEMA `%s` DEFAULT CHARACTER SET utf8mb4 ;" % MYSQL_SCHEMA)
db.commit()
except Exception as e:
print(e)
db.rollback()
db.close()
def set_time_zone():
# 修正时区
try:
db = pymysql.connect("localhost", "root", "123456")
db.cursor().execute("set global time_zone = '+8:00';")
db.commit()
except Exception as e:
print(e)
db.rollback()
db.close()
class create_db_testcase(unittest.TestCase):
@classmethod
def setUpClass(cls):
create_schema()
set_time_zone()
def setUp(self):
suffix = self._testMethodName.split('_')[-2]
if 'user' == suffix.lower():
self.table = UserTable
elif 'good' == suffix.lower():
self.table = GoodTable
elif 'order' == suffix.lower():
self.table = OrderTable
self.table.drop(ENGINE, checkfirst=True)
self.table.create(ENGINE, checkfirst=True)
self.db = CoreDBServer(self.table)
# @unittest.skip('skip')
def test_create_user_table(self):
values = {"username": 'test_user', "password": '<PASSWORD>'}
self.db.insert(**values)
# @unittest.skip('skip')
def test_create_good_table(self):
values = {'name': 'test_good'}
self.db.insert(**values)
# @unittest.skip('skip')
def test_create_order_table(self):
values = {'username': 'lilei',
'order_no': '202001250159591234', 'good_ids': '[1,2]'}
self.db.insert(**values)
def tearDown(self):
self.db.clear()
return super().tearDown()
@classmethod
def tearDownClass(cls):
return super().tearDownClass()
class init_faker_data():
def __init__(self):
self.fake = Faker('zh-CN')
self.fake.add_provider(GoodProvider)
Faker.seed(0)
def add_user_data(self):
self.table = CoreDBServer(UserTable)
data_list = []
with open(user_data_file, 'w') as f:
name_set = set([])
for _ in range(10000):
while True:
cur_len = len(name_set)
username, password = self.fake.email(), self.fake.password()
name_set.add(username)
if len(name_set) > cur_len:
break
data_list.append(
{"username": username, "password": <PASSWORD>)
}
)
f.write(username+','+password+os.linesep)
self.table.many_insert(data_list=data_list)
def add_good_data(self):
self.table = CoreDBServer(GoodTable)
data_list = []
with open(good_data_file, 'w') as f:
for _ in range(1000):
godd_name, inventory, price = self.fake.good_name(
), self.fake.pyint(), self.fake.pyint()
data_list.append(
{"name": godd_name, "inventory": inventory, "price": price})
f.write(godd_name+","+str(inventory) +
","+str(price)+os.linesep)
self.table.many_insert(data_list=data_list)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
unittest.main()
elif 'data' == sys.argv[1]:
data = init_faker_data()
data.add_user_data()
data.add_good_data()
| import unittest
import os
import logging
from dbmodules.base import BaseDBServer, CoreDBServer
from dbmodules.good import Good, GoodTable
from dbmodules.user import Users, UserTable
from dbmodules.order import Order, OrderTable
from faker import Faker
from com.pe_encrypt import md5_text
from faker.providers import BaseProvider
from random import choice, seed
import pymysql
from settings import *
from com.pe_database import metadata, ENGINE
root_dir = os.path.abspath(os.path.dirname(__file__))
log_file = os.path.join(root_dir, 'env_init.log')
user_data_file = os.path.join(root_dir, './tests/user.txt')
good_data_file = os.path.join(root_dir, './tests/good.txt')
db_file = os.path.join(root_dir, "petest.db")
seed(0)
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(filename)s %(funcName)s %(message)s',
filename=log_file
)
class GoodProvider(BaseProvider):
tags = ['', '低过老罗', '工厂价', '全网最低', ]
names = ['肯尼亚AA水洗', '耶加雪菲水洗', '智能水壶', '小米手机', 'iPhone',
'星际争霸2数字典藏版', '飞鹤奶粉', 'MacbookAir M1', '蜜桃猫手机壳'
'星空', '蒙娜丽莎', '伏尔加河上的纤夫', '马拉之死', '这个需求做不了']
age = ['2020款', '2021款', '2022款', '']
def good_name(self):
good = choice(self.age)+" "+choice(self.names) + " "+choice(self.tags)
return good.strip(' ')
def create_schema():
try:
db = pymysql.connect("localhost", "root", "123456")
db.cursor().execute("CREATE SCHEMA `%s` DEFAULT CHARACTER SET utf8mb4 ;" % MYSQL_SCHEMA)
db.commit()
except Exception as e:
print(e)
db.rollback()
db.close()
def set_time_zone():
# 修正时区
try:
db = pymysql.connect("localhost", "root", "123456")
db.cursor().execute("set global time_zone = '+8:00';")
db.commit()
except Exception as e:
print(e)
db.rollback()
db.close()
class create_db_testcase(unittest.TestCase):
@classmethod
def setUpClass(cls):
create_schema()
set_time_zone()
def setUp(self):
suffix = self._testMethodName.split('_')[-2]
if 'user' == suffix.lower():
self.table = UserTable
elif 'good' == suffix.lower():
self.table = GoodTable
elif 'order' == suffix.lower():
self.table = OrderTable
self.table.drop(ENGINE, checkfirst=True)
self.table.create(ENGINE, checkfirst=True)
self.db = CoreDBServer(self.table)
# @unittest.skip('skip')
def test_create_user_table(self):
values = {"username": 'test_user', "password": '<PASSWORD>'}
self.db.insert(**values)
# @unittest.skip('skip')
def test_create_good_table(self):
values = {'name': 'test_good'}
self.db.insert(**values)
# @unittest.skip('skip')
def test_create_order_table(self):
values = {'username': 'lilei',
'order_no': '202001250159591234', 'good_ids': '[1,2]'}
self.db.insert(**values)
def tearDown(self):
self.db.clear()
return super().tearDown()
@classmethod
def tearDownClass(cls):
return super().tearDownClass()
class init_faker_data():
def __init__(self):
self.fake = Faker('zh-CN')
self.fake.add_provider(GoodProvider)
Faker.seed(0)
def add_user_data(self):
self.table = CoreDBServer(UserTable)
data_list = []
with open(user_data_file, 'w') as f:
name_set = set([])
for _ in range(10000):
while True:
cur_len = len(name_set)
username, password = self.fake.email(), self.fake.password()
name_set.add(username)
if len(name_set) > cur_len:
break
data_list.append(
{"username": username, "password": <PASSWORD>)
}
)
f.write(username+','+password+os.linesep)
self.table.many_insert(data_list=data_list)
def add_good_data(self):
self.table = CoreDBServer(GoodTable)
data_list = []
with open(good_data_file, 'w') as f:
for _ in range(1000):
godd_name, inventory, price = self.fake.good_name(
), self.fake.pyint(), self.fake.pyint()
data_list.append(
{"name": godd_name, "inventory": inventory, "price": price})
f.write(godd_name+","+str(inventory) +
","+str(price)+os.linesep)
self.table.many_insert(data_list=data_list)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
unittest.main()
elif 'data' == sys.argv[1]:
data = init_faker_data()
data.add_user_data()
data.add_good_data() | en | 0.092292 | # 修正时区 # @unittest.skip('skip') # @unittest.skip('skip') # @unittest.skip('skip') | 1.967437 | 2 |
muse_origin/origin.py | musevlt/origin | 1 | 6620657 | <gh_stars>1-10
import datetime
import glob
import inspect
import logging
import os
import shutil
import sys
import warnings
from collections import OrderedDict
from logging.handlers import RotatingFileHandler
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.utils import lazyproperty
from mpdaf.log import setup_logging
from mpdaf.MUSE import FieldsMap
from mpdaf.obj import Cube, Image
from . import steps
from .lib_origin import timeit
from .version import __version__
try:
# With PyYaml 5.1, load and safe have been renamed to unsafe_* and
# replaced by the safe_* functions. We need the full ones to
# be able to dump Python objects, yay!
from yaml import unsafe_load as load_yaml, dump as dump_yaml
except ImportError: # pragma: no cover
from yaml import load as load_yaml, dump as dump_yaml
CURDIR = os.path.dirname(os.path.abspath(__file__))
class ORIGIN(steps.LogMixin):
"""ORIGIN: detectiOn and extRactIon of Galaxy emIssion liNes
This is the main class to interact with all the steps. An Origin object is
mainly composed by:
- cube data (raw data and covariance)
- 1D dictionary of spectral profiles
- MUSE PSF
Attributes
----------
path : str
Path where the ORIGIN data will be stored.
name : str
Name of the session and basename for the sources.
param : dict
Parameters values.
cube_raw : array (Nz, Ny, Nx)
Raw data.
var : array (Nz, Ny, Nx)
Variance.
wcs : `mpdaf.obj.WCS`
RA-DEC coordinates.
wave : `mpdaf.obj.WaveCoord`
Spectral coordinates.
profiles : list of array
List of spectral profiles to test
FWHM_profiles : list
FWHM of the profiles in pixels.
wfields : None or list of arrays
List of weight maps (one per fields in the case of MUSE mosaic)
None: just one field
PSF : array (Nz, PSF_size, PSF_size) or list of arrays
MUSE PSF (one per field)
LBDA_FWHM_PSF: list of floats
Value of the FWMH of the PSF in pixel for each wavelength step (mean of
the fields).
FWHM_PSF : float or list of float
Mean of the fwhm of the PSF in pixel (one per field).
imawhite : `~mpdaf.obj.Image`
White image
segmap : `~mpdaf.obj.Image`
Segmentation map
cube_std : `~mpdaf.obj.Cube`
standardized data for PCA. Result of step01.
cont_dct : `~mpdaf.obj.Cube`
DCT continuum. Result of step01.
ima_std : `~mpdaf.obj.Image`
Mean of standardized data for PCA along the wavelength axis.
Result of step01.
ima_dct : `~mpdaf.obj.Image`
Mean of DCT continuum cube along the wavelength axis.
Result of step01.
nbAreas : int
Number of area (segmentation) for the PCA computation.
Result of step02.
areamap : `~mpdaf.obj.Image`
PCA area. Result of step02.
testO2 : list of arrays (one per PCA area)
Result of the O2 test (step03).
histO2 : list of arrays (one per PCA area)
PCA histogram (step03).
binO2 : list of arrays (one per PCA area)
Bins for the PCA histogram (step03).
thresO2 : list of float
For each area, threshold value (step03).
meaO2 : list of float
Location parameter of the Gaussian fit used to
estimate the threshold (step03).
stdO2 : list of float
Scale parameter of the Gaussian fit used to
estimate the threshold (step03).
cube_faint : `~mpdaf.obj.Cube`
Projection on the eigenvectors associated to the lower eigenvalues
of the data cube (representing the faint signal). Result of step04.
mapO2 : `~mpdaf.obj.Image`
The numbers of iterations used by testO2 for each spaxel.
Result of step04.
cube_correl : `~mpdaf.obj.Cube`
Cube of T_GLR values (step05).
cube_profile : `~mpdaf.obj.Cube` (type int)
PSF profile associated to the T_GLR (step05).
maxmap : `~mpdaf.obj.Image`
Map of maxima along the wavelength axis (step05).
cube_local_max : `~mpdaf.obj.Cube`
Local maxima from max correlation (step05).
cube_local_min : `~mpdaf.obj.Cube`
Local maxima from min correlation (step05).
threshold : float
Estimated threshold (step06).
Pval : `astropy.table.Table`
Table with the purity results for each threshold (step06):
- PVal_r : The purity function
- index_pval : index value to plot
- Det_m : Number of detections (-DATA)
- Det_M : Number of detections (+DATA)
Cat0 : `astropy.table.Table`
Catalog returned by step07
Pval_comp : `astropy.table.Table`
Table with the purity results for each threshold in compl (step08):
- PVal_r : The purity function
- index_pval : index value to plot
- Det_m : Number of detections (-DATA)
- Det_M : Number of detections (+DATA)
Cat1 : `astropy.table.Table`
Catalog returned by step08
spectra : list of `~mpdaf.obj.Spectrum`
Estimated lines. Result of step09.
Cat2 : `astropy.table.Table`
Catalog returned by step09.
"""
def __init__(
self,
filename,
name="origin",
path=".",
loglevel="DEBUG",
logcolor=False,
fieldmap=None,
profiles=None,
PSF=None,
LBDA_FWHM_PSF=None,
FWHM_PSF=None,
PSF_size=25,
param=None,
imawhite=None,
wfields=None,
):
self.path = path
self.name = name
self.outpath = os.path.join(path, name)
self.param = param or {}
self.file_handler = None
os.makedirs(self.outpath, exist_ok=True)
# stdout & file logger
setup_logging(
name="muse_origin",
level=loglevel,
color=logcolor,
fmt="%(levelname)-05s: %(message)s",
stream=sys.stdout,
)
self.logger = logging.getLogger("muse_origin")
self._setup_logfile(self.logger)
self.param["loglevel"] = loglevel
self.param["logcolor"] = logcolor
self._loginfo("Step 00 - Initialization (ORIGIN v%s)", __version__)
# dict of Step instances, indexed by step names
self.steps = OrderedDict()
# dict containing the data attributes of each step, to expose them on
# the ORIGIN object
self._dataobjs = {}
for i, cls in enumerate(steps.STEPS, start=1):
# Instantiate the step object, give it a step number
step = cls(self, i, self.param)
# force its signature to be the same as step.run (without the
# ORIGIN instance), which allows to see its arguments and their
# default value.
sig = inspect.signature(step.run)
step.__signature__ = sig.replace(
parameters=[p for p in sig.parameters.values() if p.name != "orig"]
)
self.steps[step.name] = step
# Insert the __call__ method of the step in the ORIGIN object. This
# allows to run a step with a method like "step01_preprocessing".
self.__dict__[step.method_name] = step
for name, _ in step._dataobjs:
self._dataobjs[name] = step
# MUSE data cube
self._loginfo("Read the Data Cube %s", filename)
self.param["cubename"] = filename
self.cube = Cube(filename)
self.Nz, self.Ny, self.Nx = self.shape = self.cube.shape
# RA-DEC coordinates
self.wcs = self.cube.wcs
# spectral coordinates
self.wave = self.cube.wave
# List of spectral profile
if profiles is None:
profiles = os.path.join(CURDIR, "Dico_3FWHM.fits")
self.param["profiles"] = profiles
# FSF
self.param["fieldmap"] = fieldmap
self.param["PSF_size"] = PSF_size
self._read_fsf(
self.cube,
fieldmap=fieldmap,
wfields=wfields,
PSF=PSF,
LBDA_FWHM_PSF=LBDA_FWHM_PSF,
FWHM_PSF=FWHM_PSF,
PSF_size=PSF_size,
)
# additional images
self.ima_white = imawhite if imawhite else self.cube.mean(axis=0)
self.testO2, self.histO2, self.binO2 = None, None, None
self._loginfo("00 Done")
def __getattr__(self, name):
# Use __getattr__ to provide access to the steps data attributes
# via the ORIGIN object. This will also trigger the loading of
# the objects if needed.
if name in self._dataobjs:
return getattr(self._dataobjs[name], name)
else:
raise AttributeError(f"unknown attribute {name}")
def __dir__(self):
return (
super().__dir__()
+ list(self._dataobjs.keys())
+ [o.method_name for o in self.steps.values()]
)
@lazyproperty
def cube_raw(self):
# Flux - set to 0 the Nan
return self.cube.data.filled(fill_value=0)
@lazyproperty
def mask(self):
return self.cube._mask
@lazyproperty
def var(self):
# variance - set to Inf the Nan
return self.cube.var.filled(np.inf)
@classmethod
def init(
cls,
cube,
fieldmap=None,
profiles=None,
PSF=None,
LBDA_FWHM_PSF=None,
FWHM_PSF=None,
PSF_size=25,
name="origin",
path=".",
loglevel="DEBUG",
logcolor=False,
):
"""Create a ORIGIN object.
An Origin object is composed by:
- cube data (raw data and covariance)
- 1D dictionary of spectral profiles
- MUSE PSF
- parameters used to segment the cube in different zones.
Parameters
----------
cube : str
Cube FITS file name
fieldmap : str
FITS file containing the field map (mosaic)
profiles : str
FITS of spectral profiles
If None, a default dictionary of 20 profiles is used.
PSF : str
Cube FITS filename containing a MUSE PSF per wavelength.
If None, PSF are computed with a Moffat function
(13x13 pixels, beta=2.6, fwhm1=0.76, fwhm2=0.66,
lambda1=4750, lambda2=7000)
LBDA_FWHM_PSF: list of float
Value of the FWMH of the PSF in pixel for each wavelength step
(mean of the fields).
FWHM_PSF : list of float
FWHM of the PSFs in pixels, one per field.
PSF_size : int
Spatial size of the PSF (when reconstructed from the cube header).
name : str
Name of this session and basename for the sources.
ORIGIN.write() method saves the session in a folder that
has this name. The ORIGIN.load() method will be used to
load a session, continue it or create a new from it.
loglevel : str
Level for the logger (defaults to DEBUG).
logcolor : bool
Use color for the logger levels.
"""
return cls(
cube,
path=path,
name=name,
fieldmap=fieldmap,
profiles=profiles,
PSF=PSF,
LBDA_FWHM_PSF=LBDA_FWHM_PSF,
FWHM_PSF=FWHM_PSF,
PSF_size=PSF_size,
loglevel=loglevel,
logcolor=logcolor,
)
@classmethod
@timeit
def load(cls, folder, newname=None, loglevel=None, logcolor=None):
"""Load a previous session of ORIGIN.
ORIGIN.write() method saves a session in a folder that has the name of
the ORIGIN object (self.name).
Parameters
----------
folder : str
Folder name (with the relative path) where the ORIGIN data
have been stored.
newname : str
New name for this session. This parameter lets the user to load a
previous session but continue in a new one. If None, the user will
continue the loaded session.
loglevel : str
Level for the logger (by default reuse the saved level).
logcolor : bool
Use color for the logger levels.
"""
path = os.path.dirname(os.path.abspath(folder))
name = os.path.basename(folder)
with open(f"{folder}/{name}.yaml", "r") as stream:
param = load_yaml(stream)
if "FWHM PSF" in param:
FWHM_PSF = np.asarray(param["FWHM PSF"])
else:
FWHM_PSF = None
if "LBDA_FWHM PSF" in param:
LBDA_FWHM_PSF = np.asarray(param["LBDA FWHM PSF"])
else:
LBDA_FWHM_PSF = None
if os.path.isfile(param["PSF"]):
PSF = param["PSF"]
else:
if os.path.isfile("%s/cube_psf.fits" % folder):
PSF = "%s/cube_psf.fits" % folder
else:
PSF_files = glob.glob("%s/cube_psf_*.fits" % folder)
if len(PSF_files) == 0:
PSF = None
elif len(PSF_files) == 1:
PSF = PSF_files[0]
else:
PSF = sorted(PSF_files)
wfield_files = glob.glob("%s/wfield_*.fits" % folder)
if len(wfield_files) == 0:
wfields = None
else:
wfields = sorted(wfield_files)
# step0
if os.path.isfile("%s/ima_white.fits" % folder):
ima_white = Image("%s/ima_white.fits" % folder)
else:
ima_white = None
if newname is not None:
# copy outpath to the new path
shutil.copytree(os.path.join(path, name), os.path.join(path, newname))
name = newname
loglevel = loglevel if loglevel is not None else param["loglevel"]
logcolor = logcolor if logcolor is not None else param["logcolor"]
obj = cls(
path=path,
name=name,
param=param,
imawhite=ima_white,
loglevel=loglevel,
logcolor=logcolor,
filename=param["cubename"],
fieldmap=param["fieldmap"],
wfields=wfields,
profiles=param["profiles"],
PSF=PSF,
FWHM_PSF=FWHM_PSF,
LBDA_FWHM_PSF=LBDA_FWHM_PSF,
)
for step in obj.steps.values():
step.load(obj.outpath)
# special case for step3
NbAreas = param.get("nbareas")
if NbAreas is not None:
if os.path.isfile("%s/testO2_1.txt" % folder):
obj.testO2 = [
np.loadtxt("%s/testO2_%d.txt" % (folder, area), ndmin=1)
for area in range(1, NbAreas + 1)
]
if os.path.isfile("%s/histO2_1.txt" % folder):
obj.histO2 = [
np.loadtxt("%s/histO2_%d.txt" % (folder, area), ndmin=1)
for area in range(1, NbAreas + 1)
]
if os.path.isfile("%s/binO2_1.txt" % folder):
obj.binO2 = [
np.loadtxt("%s/binO2_%d.txt" % (folder, area), ndmin=1)
for area in range(1, NbAreas + 1)
]
return obj
def info(self):
"""Prints the processing log."""
with open(self.logfile) as f:
for line in f:
if line.find("Done") == -1:
print(line, end="")
def status(self):
"""Prints the processing status."""
for name, step in self.steps.items():
print(f"- {step.idx:02d}, {name}: {step.status.name}")
def _setup_logfile(self, logger):
if self.file_handler is not None:
# Remove the handlers before adding a new one
self.file_handler.close()
logger.handlers.remove(self.file_handler)
self.logfile = os.path.join(self.outpath, self.name + ".log")
self.file_handler = RotatingFileHandler(self.logfile, "a", 1000000, 1)
self.file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(message)s")
self.file_handler.setFormatter(formatter)
logger.addHandler(self.file_handler)
def set_loglevel(self, level):
"""Set the logging level for the console logger."""
handler = next(
h for h in self.logger.handlers if isinstance(h, logging.StreamHandler)
)
handler.setLevel(level)
self.param["loglevel"] = level
@property
def nbAreas(self):
"""Number of area (segmentation) for the PCA."""
return self.param.get("nbareas")
@property
def threshold_correl(self):
"""Estimated threshold used to detect lines on local maxima of max
correl."""
return self.param.get("threshold")
@threshold_correl.setter
def threshold_correl(self, value):
self.param["threshold"] = value
@property
def threshold_std(self):
"""Estimated threshold used to detect complementary lines on local
maxima of std cube."""
return self.param.get("threshold_std")
@threshold_std.setter
def threshold_std(self, value):
self.param["threshold_std"] = value
@lazyproperty
def profiles(self):
"""Read the list of spectral profiles."""
profiles = self.param["profiles"]
self._loginfo("Load dictionary of spectral profile %s", profiles)
with fits.open(profiles) as hdul:
profiles = [hdu.data for hdu in hdul[1:]]
# check that the profiles have the same size
if len({p.shape[0] for p in profiles}) != 1:
raise ValueError("The profiles must have the same size")
return profiles
@lazyproperty
def FWHM_profiles(self):
"""Read the list of FWHM of the spectral profiles."""
with fits.open(self.param["profiles"]) as hdul:
return [hdu.header["FWHM"] for hdu in hdul[1:]]
def _read_fsf(
self,
cube,
fieldmap=None,
wfields=None,
PSF=None,
LBDA_FWHM_PSF=None,
FWHM_PSF=None,
PSF_size=25,
):
"""Read FSF cube(s), with fieldmap in the case of MUSE mosaic.
There are two ways to specify the PSF informations:
- with the ``PSF``, ``FWHM_PSF``, and ``LBDA_FWHM`` parameters.
- or read from the cube header and fieldmap.
If there are multiple fields, for a mosaic, we also need weight maps.
If the cube contains a FSF model and a fieldmap is given, these weight
maps are computed automatically.
Parameters
----------
cube : mpdaf.obj.Cube
The input datacube.
fieldmap : str
FITS file containing the field map (mosaic).
wfields : list of str
List of weight maps (one per fields in the case of MUSE mosaic).
PSF : str or list of str
Cube FITS filename containing a MUSE PSF per wavelength, or a list
of filenames for multiple fields (mosaic).
LBDA_FWHM_PSF: list of float
Value of the FWMH of the PSF in pixel for each wavelength step
(mean of the fields).
FWHM_PSF : list of float
FWHM of the PSFs in pixels, one per field.
PSF_size : int
Spatial size of the PSF (when reconstructed from the cube header).
"""
self.wfields = None
info = self.logger.info
if PSF is None or FWHM_PSF is None or LBDA_FWHM_PSF is None:
info("Compute FSFs from the datacube FITS header keywords")
if "FSFMODE" not in cube.primary_header:
raise ValueError("missing PSF keywords in the cube FITS header")
# FSF created from FSF*** keywords
try:
from mpdaf.MUSE import FSFModel
except ImportError:
sys.exit("you must upgrade MPDAF")
fsf = FSFModel.read(cube)
lbda = cube.wave.coord()
shape = (PSF_size, PSF_size)
if isinstance(fsf, FSFModel): # just one FSF
self.PSF = fsf.get_3darray(lbda, shape)
self.LBDA_FWHM_PSF = fsf.get_fwhm(lbda, unit="pix")
self.FWHM_PSF = np.mean(self.LBDA_FWHM_PSF)
# mean of the fwhm of the FSF in pixel
info("mean FWHM of the FSFs = %.2f pixels", self.FWHM_PSF)
else:
self.PSF = [f.get_3darray(lbda, shape) for f in fsf]
fwhm = np.array([f.get_fwhm(lbda, unit="pix") for f in fsf])
self.LBDA_FWHM_PSF = np.mean(fwhm, axis=0)
self.FWHM_PSF = np.mean(fwhm, axis=1)
for i, fwhm in enumerate(self.FWHM_PSF):
info("mean FWHM of the FSFs (field %d) = %.2f pixels", i, fwhm)
info("Compute weight maps from field map %s", fieldmap)
fmap = FieldsMap(fieldmap, nfields=len(fsf))
# weighted field map
self.wfields = fmap.compute_weights()
self.param["PSF"] = cube.primary_header["FSFMODE"]
else:
self.LBDA_FWHM_PSF = LBDA_FWHM_PSF
if isinstance(PSF, str):
info("Load FSFs from %s", PSF)
self.param["PSF"] = PSF
self.PSF = fits.getdata(PSF)
if self.PSF.shape[1] != self.PSF.shape[2]:
raise ValueError("PSF must be a square image.")
if not self.PSF.shape[1] % 2:
raise ValueError("The spatial size of the PSF must be odd.")
if self.PSF.shape[0] != self.shape[0]:
raise ValueError(
"PSF and data cube have not the same"
"dimensions along the spectral axis."
)
# mean of the fwhm of the FSF in pixel
self.FWHM_PSF = np.mean(FWHM_PSF)
self.param["FWHM PSF"] = FWHM_PSF.tolist()
info("mean FWHM of the FSFs = %.2f pixels", self.FWHM_PSF)
else:
nfields = len(PSF)
self.wfields = []
self.PSF = []
self.FWHM_PSF = list(FWHM_PSF)
for n in range(nfields):
info("Load FSF from %s", PSF[n])
self.PSF.append(fits.getdata(PSF[n]))
info("Load weight maps from %s", wfields[n])
self.wfields.append(fits.getdata(wfields[n]))
info(
"mean FWHM of the FSFs (field %d) = %.2f pixels", n, FWHM_PSF[n]
)
self.param["FWHM PSF"] = self.FWHM_PSF.tolist()
self.param["LBDA FWHM PSF"] = self.LBDA_FWHM_PSF.tolist()
@timeit
def write(self, path=None, erase=False):
"""Save the current session in a folder that will have the name of the
ORIGIN object (self.name).
The ORIGIN.load(folder, newname=None) method will be used to load a
session. The parameter newname will let the user to load a session but
continue in a new one.
Parameters
----------
path : str
Path where the folder (self.name) will be stored.
erase : bool
Remove the folder if it exists.
"""
self._loginfo("Writing...")
# adapt session if path changes
if path is not None and path != self.path:
if not os.path.exists(path):
raise ValueError(f"path does not exist: {path}")
self.path = path
outpath = os.path.join(path, self.name)
# copy outpath to the new path
shutil.copytree(self.outpath, outpath)
self.outpath = outpath
self._setup_logfile(self.logger)
if erase:
shutil.rmtree(self.outpath)
os.makedirs(self.outpath, exist_ok=True)
# PSF
if isinstance(self.PSF, list):
for i, psf in enumerate(self.PSF):
cube = Cube(data=psf, mask=np.ma.nomask, copy=False)
cube.write(os.path.join(self.outpath, "cube_psf_%02d.fits" % i))
else:
cube = Cube(data=self.PSF, mask=np.ma.nomask, copy=False)
cube.write(os.path.join(self.outpath, "cube_psf.fits"))
if self.wfields is not None:
for i, wfield in enumerate(self.wfields):
im = Image(data=wfield, mask=np.ma.nomask)
im.write(os.path.join(self.outpath, "wfield_%02d.fits" % i))
if self.ima_white is not None:
self.ima_white.write("%s/ima_white.fits" % self.outpath)
for step in self.steps.values():
step.dump(self.outpath)
# parameters in .yaml
with open(f"{self.outpath}/{self.name}.yaml", "w") as stream:
dump_yaml(self.param, stream)
# step3 - saving this manually for now
if self.nbAreas is not None:
if self.testO2 is not None:
for area in range(1, self.nbAreas + 1):
np.savetxt(
"%s/testO2_%d.txt" % (self.outpath, area), self.testO2[area - 1]
)
if self.histO2 is not None:
for area in range(1, self.nbAreas + 1):
np.savetxt(
"%s/histO2_%d.txt" % (self.outpath, area), self.histO2[area - 1]
)
if self.binO2 is not None:
for area in range(1, self.nbAreas + 1):
np.savetxt(
"%s/binO2_%d.txt" % (self.outpath, area), self.binO2[area - 1]
)
self._loginfo("Current session saved in %s", self.outpath)
def plot_areas(self, ax=None, **kwargs):
""" Plot the 2D segmentation for PCA from self.step02_areas()
on the test used to perform this segmentation.
Parameters
----------
ax : matplotlib.Axes
The Axes instance in which the image is drawn.
kwargs : matplotlib.artist.Artist
Optional extra keyword/value arguments to be passed to ``ax.imshow()``.
"""
if ax is None:
ax = plt.gca()
kwargs.setdefault("cmap", "jet")
kwargs.setdefault("alpha", 0.7)
kwargs.setdefault("interpolation", "nearest")
kwargs["origin"] = "lower"
cax = ax.imshow(self.areamap._data, **kwargs)
i0 = np.min(self.areamap._data)
i1 = np.max(self.areamap._data)
if i0 != i1:
from matplotlib.colors import BoundaryNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
n = i1 - i0 + 1
bounds = np.linspace(i0, i1 + 1, n + 1) - 0.5
norm = BoundaryNorm(bounds, n + 1)
divider = make_axes_locatable(ax)
cax2 = divider.append_axes("right", size="5%", pad=1)
plt.colorbar(
cax,
cax=cax2,
cmap=kwargs["cmap"],
norm=norm,
spacing="proportional",
ticks=bounds + 0.5,
boundaries=bounds,
format="%1i",
)
def plot_step03_PCA_threshold(
self, log10=False, ncol=3, legend=True, xlim=None, fig=None, **fig_kw
):
""" Plot the histogram and the threshold for the starting point of the PCA.
Parameters
----------
log10 : bool
Draw histogram in logarithmic scale or not
ncol : int
Number of colomns in the subplots
legend : bool
If true, write pfa and threshold values as legend
xlim : (float, float)
Set the data limits for the x-axes
fig : matplotlib.Figure
Figure instance in which the image is drawn
**fig_kw : matplotlib.artist.Artist
All additional keyword arguments are passed to the figure() call.
"""
if self.nbAreas is None:
raise ValueError("Run the step 02 to initialize self.nbAreas")
if fig is None:
fig = plt.figure()
if self.nbAreas <= ncol:
n = 1
m = self.nbAreas
else:
n = self.nbAreas // ncol
m = ncol
if (n * m) < self.nbAreas:
n = n + 1
for area in range(1, self.nbAreas + 1):
if area == 1:
ax = fig.add_subplot(n, m, area, **fig_kw)
else:
ax = fig.add_subplot(n, m, area, sharey=fig.axes[0], **fig_kw)
self.plot_PCA_threshold(area, "step03", log10, legend, xlim, ax)
# Fine-tune figure
for a in fig.axes[:-1]:
a.set_xlabel("")
for a in fig.axes[1:]:
a.set_ylabel("")
plt.setp([a.get_yticklabels() for a in fig.axes], visible=False)
plt.setp([a.get_yticklabels() for a in fig.axes[0::m]], visible=True)
plt.setp([a.get_yticklines() for a in fig.axes], visible=False)
plt.setp([a.get_yticklines() for a in fig.axes[0::m]], visible=True)
fig.subplots_adjust(wspace=0)
if xlim is not None:
plt.setp([a.get_xticklabels() for a in fig.axes[:-m]], visible=False)
plt.setp([a.get_xticklines() for a in fig.axes[:-m]], visible=False)
fig.subplots_adjust(hspace=0)
def plot_step03_PCA_stat(self, cutoff=5, ax=None):
"""Plot the threshold value according to the area.
Median Absolute Deviation is used to find outliers.
Parameters
----------
cutoff : float
Median Absolute Deviation cutoff
ax : matplotlib.Axes
The Axes instance in which the image is drawn
"""
if self.nbAreas is None:
raise ValueError("Run the step 02 to initialize self.nbAreas")
if self.thresO2 is None:
raise ValueError("Run the step 03 to compute the threshold values")
if ax is None:
ax = plt.gca()
ax.plot(np.arange(1, self.nbAreas + 1), self.thresO2, "+")
med = np.median(self.thresO2)
diff = np.absolute(self.thresO2 - med)
mad = np.median(diff)
if mad != 0:
ksel = (diff / mad) > cutoff
if ksel.any():
ax.plot(
np.arange(1, self.nbAreas + 1)[ksel],
np.asarray(self.thresO2)[ksel],
"ro",
)
ax.set_xlabel("area")
ax.set_ylabel("Threshold")
ax.set_title(f"PCA threshold (med={med:.2f}, mad= {mad:.2f})")
def plot_PCA_threshold(
self, area, pfa_test="step03", log10=False, legend=True, xlim=None, ax=None
):
""" Plot the histogram and the threshold for the starting point of the PCA.
Parameters
----------
area : int in [1, nbAreas]
Area ID
pfa_test : float or str
PFA of the test (if 'step03', the value set during step03 is used)
log10 : bool
Draw histogram in logarithmic scale or not
legend : bool
If true, write pfa and threshold values as legend
xlim : (float, float)
Set the data limits for the x-axis
ax : matplotlib.Axes
Axes instance in which the image is drawn
"""
if self.nbAreas is None:
raise ValueError("Run the step 02 to initialize self.nbAreas")
if pfa_test == "step03":
param = self.param["compute_PCA_threshold"]["params"]
if "pfa_test" in param:
pfa_test = param["pfa_test"]
hist = self.histO2[area - 1]
bins = self.binO2[area - 1]
thre = self.thresO2[area - 1]
mea = self.meaO2[area - 1]
std = self.stdO2[area - 1]
else:
raise ValueError(
"pfa_test param is None: set a value or run the Step03"
)
else:
if self.cube_std is None:
raise ValueError("Run the step 01 to initialize self.cube_std")
# limits of each spatial zone
ksel = self.areamap._data == area
# Data in this spatio-spectral zone
cube_temp = self.cube_std._data[:, ksel]
# Compute_PCA_threshold
from .lib_origin import Compute_PCA_threshold
testO2, hist, bins, thre, mea, std = Compute_PCA_threshold(
cube_temp, pfa_test
)
if ax is None:
ax = plt.gca()
from scipy import stats
center = (bins[:-1] + bins[1:]) / 2
gauss = stats.norm.pdf(center, loc=mea, scale=std)
gauss *= hist.max() / gauss.max()
if log10:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
gauss = np.log10(gauss)
hist = np.log10(hist)
ax.plot(center, hist, "-k")
ax.plot(center, hist, ".r")
ax.plot(center, gauss, "-b", alpha=0.5)
ax.axvline(thre, color="b", lw=2, alpha=0.5)
ax.grid()
if xlim is None:
ax.set_xlim((center.min(), center.max()))
else:
ax.set_xlim(xlim)
ax.set_xlabel("frequency")
ax.set_ylabel("value")
kwargs = dict(transform=ax.transAxes, bbox=dict(facecolor="red", alpha=0.5))
if legend:
text = "zone %d\npfa %.2f\nthreshold %.2f" % (area, pfa_test, thre)
ax.text(0.1, 0.8, text, **kwargs)
else:
ax.text(0.9, 0.9, "%d" % area, **kwargs)
def plot_mapPCA(self, area=None, iteration=None, ax=None, **kwargs):
""" Plot at a given iteration (or at the end) the number of times
a spaxel got cleaned by the PCA.
Parameters
----------
area: int in [1, nbAreas]
if None draw the full map for all areas
iteration : int
Display the nuisance/bacground pixels at iteration k
ax : matplotlib.Axes
The Axes instance in which the image is drawn
kwargs : matplotlib.artist.Artist
Optional extra keyword/value arguments to be passed to ``ax.imshow()``.
"""
if self.mapO2 is None:
raise ValueError("Run the step 04 to initialize self.mapO2")
themap = self.mapO2.copy()
title = "Number of times the spaxel got cleaned by the PCA"
if iteration is not None:
title += "\n%d iterations" % iteration
if area is not None:
mask = np.ones_like(self.mapO2._data, dtype=np.bool)
mask[self.areamap._data == area] = False
themap._mask = mask
title += " (zone %d)" % area
if iteration is not None:
themap[themap._data < iteration] = np.ma.masked
if ax is None:
ax = plt.gca()
kwargs.setdefault("cmap", "jet")
themap.plot(title=title, colorbar="v", ax=ax, **kwargs)
def plot_purity(self, comp=False, ax=None, log10=False, legend=True):
"""Draw number of sources per threshold computed in step06/step08.
Parameters
----------
comp : bool
If True, plot purity curves for the complementary lines (step08).
ax : matplotlib.Axes
The Axes instance in which the image is drawn.
log10 : bool
To draw histogram in logarithmic scale or not.
legend : bool
To draw the legend.
"""
if ax is None:
ax = plt.gca()
if comp:
threshold = self.threshold_std
purity = self.param["purity_std"]
Pval = self.Pval_comp
else:
threshold = self.threshold_correl
purity = self.param["purity"]
Pval = self.Pval
if Pval is None:
raise ValueError("Run the step 06")
Tval_r = Pval["Tval_r"]
ax2 = ax.twinx()
ax2.plot(Tval_r, Pval["Pval_r"], "y.-", label="purity")
ax.plot(Tval_r, Pval["Det_M"], "b.-", label="n detections (+DATA)")
ax.plot(Tval_r, Pval["Det_m"], "g.-", label="n detections (-DATA)")
ax2.plot(threshold, purity, "xr")
if log10:
ax.set_yscale("log")
ax2.set_yscale("log")
ym, yM = ax.get_ylim()
ax.plot(
[threshold, threshold],
[ym, yM],
"r",
alpha=0.25,
lw=2,
label="automatic threshold",
)
ax.set_ylim((ym, yM))
ax.set_xlabel("Threshold")
ax2.set_ylabel("Purity")
ax.set_ylabel("Number of detections")
ax.set_title("threshold %f" % threshold)
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
if legend:
ax.legend(h1 + h2, l1 + l2, loc=2)
def plot_NB(self, src_ind, ax1=None, ax2=None, ax3=None):
"""Plot the narrow band images.
Parameters
----------
src_ind : int
Index of the object in self.Cat0.
ax1 : matplotlib.Axes
The Axes instance in which the NB image around the source is drawn.
ax2 : matplotlib.Axes
The Axes instance in which a other NB image for check is drawn.
ax3 : matplotlib.Axes
The Axes instance in which the difference is drawn.
"""
if self.Cat0 is None:
raise ValueError("Run the step 05 to initialize self.Cat0")
if ax1 is None and ax2 is None and ax3 is None:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4))
# Coordinates of the source
x0 = self.Cat0[src_ind]["x0"]
y0 = self.Cat0[src_ind]["y0"]
z0 = self.Cat0[src_ind]["z0"]
# Larger spatial ranges for the plots
longxy0 = 20
y01 = max(0, y0 - longxy0)
y02 = min(self.shape[1], y0 + longxy0 + 1)
x01 = max(0, x0 - longxy0)
x02 = min(self.shape[2], x0 + longxy0 + 1)
# Coordinates in this window
y00 = y0 - y01
x00 = x0 - x01
# spectral profile
num_prof = self.Cat0[src_ind]["profile"]
profil0 = self.profiles[num_prof]
# length of the spectral profile
profil1 = profil0[profil0 > 1e-13]
long0 = profil1.shape[0]
# half-length of the spectral profile
longz = long0 // 2
# spectral range
intz1 = max(0, z0 - longz)
intz2 = min(self.shape[0], z0 + longz + 1)
# subcube for the plot
cube_test_plot = self.cube_raw[intz1:intz2, y01:y02, x01:x02]
wcs = self.wcs[y01:y02, x01:x02]
# controle cube
nb_ranges = 3
if (z0 + longz + nb_ranges * long0) < self.shape[0]:
intz1c = intz1 + nb_ranges * long0
intz2c = intz2 + nb_ranges * long0
else:
intz1c = intz1 - nb_ranges * long0
intz2c = intz2 - nb_ranges * long0
cube_controle_plot = self.cube_raw[intz1c:intz2c, y01:y02, x01:x02]
# (1/sqrt(2)) * difference of the 2 sububes
diff_cube_plot = (1 / np.sqrt(2)) * (cube_test_plot - cube_controle_plot)
if ax1 is not None:
ax1.plot(x00, y00, "m+")
ima_test_plot = Image(data=cube_test_plot.sum(axis=0), wcs=wcs)
title = "cube test - (%d,%d)\n" % (x0, y0)
title += "lambda=%d int=[%d,%d[" % (z0, intz1, intz2)
ima_test_plot.plot(colorbar="v", title=title, ax=ax1)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if ax2 is not None:
ax2.plot(x00, y00, "m+")
ima_controle_plot = Image(data=cube_controle_plot.sum(axis=0), wcs=wcs)
title = "check - (%d,%d)\n" % (x0, y0) + "int=[%d,%d[" % (intz1c, intz2c)
ima_controle_plot.plot(colorbar="v", title=title, ax=ax2)
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
if ax3 is not None:
ax3.plot(x00, y00, "m+")
ima_diff_plot = Image(data=diff_cube_plot.sum(axis=0), wcs=wcs)
title = "Difference narrow band - (%d,%d)\n" % (x0, y0) + "int=[%d,%d[" % (
intz1c,
intz2c,
)
ima_diff_plot.plot(colorbar="v", title=title, ax=ax3)
ax3.get_xaxis().set_visible(False)
ax3.get_yaxis().set_visible(False)
def plot_sources(
self, x, y, circle=False, vmin=0, vmax=30, title=None, ax=None, **kwargs
):
"""Plot detected emission lines on the 2D map of maximum of the T_GLR
values over the spectral channels.
Parameters
----------
x : array
Coordinates along the x-axis of the estimated lines in pixels.
y : array
Coordinates along the y-axis of the estimated lines in pixels.
circle : bool
If true, plot circles with a diameter equal to the
mean of the fwhm of the PSF.
vmin : float
Minimum pixel value to use for the scaling.
vmax : float
Maximum pixel value to use for the scaling.
title : str
An optional title for the figure (None by default).
ax : matplotlib.Axes
the Axes instance in which the image is drawn
kwargs : matplotlib.artist.Artist
Optional arguments passed to ``ax.imshow()``.
"""
if ax is None:
ax = plt.gca()
self.maxmap.plot(vmin=vmin, vmax=vmax, title=title, ax=ax, **kwargs)
if circle:
fwhm = (
self.FWHM_PSF
if self.wfields is None
else np.max(np.array(self.FWHM_PSF))
)
radius = np.round(fwhm / 2)
for pos in zip(x, y):
ax.add_artist(plt.Circle(pos, radius, color="k", fill=False))
else:
ax.plot(x, y, "k+")
def plot_segmaps(self, axes=None, figsize=(6, 6)):
"""Plot the segmentation maps:
- segmap_cont: segmentation map computed on the white-light image.
- segmap_merged: segmentation map merged with the cont one and another
one computed on the residual.
- segmap_purity: combines self.segmap and a segmentation on the maxmap.
- segmap_label: segmentation map used for the catalog, either the one
given as input, otherwise self.segmap_cont.
"""
segmaps = {}
ncolors = 0
for name in ("segmap_cont", "segmap_merged", "segmap_purity", "segmap_label"):
segm = getattr(self, name, None)
if segm:
segmaps[name] = segm
ncolors = max(ncolors, len(np.unique(segm._data)))
nseg = len(segmaps)
if nseg == 0:
self.logger.warning("nothing to plot")
return
try:
# TODO: this will be renamed to make_random_cmap in a future
# version of photutils
from photutils.utils.colormaps import random_cmap
except ImportError:
self.logger.error("photutils is needed for this")
cmap = "jet"
else:
cmap = random_cmap(ncolors=ncolors)
cmap.colors[0] = (0.0, 0.0, 0.0)
if axes is None:
figsize = (figsize[0] * nseg, figsize[1])
fig, axes = plt.subplots(1, nseg, sharex=True, sharey=True, figsize=figsize)
if nseg == 1:
axes = [axes]
for ax, (name, im) in zip(axes, segmaps.items()):
im.plot(ax=ax, cmap=cmap, title=name, colorbar="v")
def plot_min_max_hist(self, ax=None, comp=False):
"""Plot the histograms of local maxima and minima."""
if comp:
cube_local_max = self.cube_std_local_max._data
cube_local_min = self.cube_std_local_min._data
else:
cube_local_max = self.cube_local_max._data
cube_local_min = self.cube_local_min._data
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
ax.set_yscale("log")
ax.grid(which="major", linewidth=1)
ax.grid(which="minor", linewidth=1, linestyle=":")
maxloc = cube_local_max[cube_local_max > 0]
bins = np.arange((maxloc.max() + 1) * 2) / 2
ax.hist(
maxloc, bins=bins, histtype="step", label="max", linewidth=2, cumulative=-1
)
minloc = cube_local_min[cube_local_min > 0]
bins = np.arange((minloc.max() + 1) * 2) / 2
ax.hist(
minloc, bins=bins, histtype="step", label="min", linewidth=2, cumulative=-1
)
minloc2 = cube_local_min[:, self.segmap_purity._data == 0]
minloc2 = minloc2[minloc2 > 0]
ax.hist(
minloc2,
bins=bins,
histtype="step",
label="min filt",
linewidth=2,
cumulative=-1,
)
ax.legend()
ax.set_title("Cumulative histogram of min/max loc")
def timestat(self, table=False):
"""Print CPU usage by steps.
If ``table`` is True, an astropy.table.Table is returned.
"""
if table:
name = []
exdate = []
extime = []
tot = 0
for s in self.steps.items():
if "execution_date" in s[1].meta.keys():
name.append(s[1].method_name)
exdate.append(s[1].meta["execution_date"])
t = s[1].meta["runtime"]
tot += t
extime.append(datetime.timedelta(seconds=t))
name.append("Total")
exdate.append("")
extime.append(str(datetime.timedelta(seconds=tot)))
return Table(
data=[name, exdate, extime],
names=["Step", "Exec Date", "Exec Time"],
masked=True,
)
else:
tot = 0
for s in self.steps.items():
name = s[1].method_name
if "execution_date" in s[1].meta.keys():
exdate = s[1].meta["execution_date"]
t = s[1].meta["runtime"]
tot += t
extime = datetime.timedelta(seconds=t)
self.logger.info(
"%s executed: %s run time: %s", name, exdate, str(extime)
)
self.logger.info(
"*** Total run time: %s", str(datetime.timedelta(seconds=tot))
)
def stat(self):
"""Print detection summary."""
d = self._get_stat()
self.logger.info(
"ORIGIN PCA pfa %.2f Back Purity: %.2f "
"Threshold: %.2f Bright Purity %.2f Threshold %.2f",
d["pca"],
d["back_purity"],
d["back_threshold"],
d["bright_purity"],
d["bright_threshold"],
)
self.logger.info("Nb of detected lines: %d", d["tot_nlines"])
self.logger.info(
"Nb of sources Total: %d Background: %d Cont: %d",
d["tot_nsources"],
d["back_nsources"],
d["cont_nsources"],
)
self.logger.info(
"Nb of sources detected in faint (after PCA): %d "
"in std (before PCA): %d",
d["faint_nsources"],
d["bright_nsources"],
)
def _get_stat(self):
p = self.param
cat = self.Cat3_sources
if cat:
back = cat[cat["seg_label"] == 0]
cont = cat[cat["seg_label"] > 0]
bright = cat[cat["comp"] == 1]
faint = cat[cat["comp"] == 0]
return dict(
pca=p["compute_PCA_threshold"]["params"]["pfa_test"],
back_purity=p["purity"],
back_threshold=p["threshold"],
bright_purity=p["purity_std"],
bright_threshold=p["threshold_std"],
tot_nlines=len(self.Cat3_lines),
tot_nsources=len(cat),
back_nsources=len(back),
cont_nsources=len(cont),
faint_nsources=len(faint),
bright_nsources=len(bright),
)
| import datetime
import glob
import inspect
import logging
import os
import shutil
import sys
import warnings
from collections import OrderedDict
from logging.handlers import RotatingFileHandler
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.utils import lazyproperty
from mpdaf.log import setup_logging
from mpdaf.MUSE import FieldsMap
from mpdaf.obj import Cube, Image
from . import steps
from .lib_origin import timeit
from .version import __version__
try:
# With PyYaml 5.1, load and safe have been renamed to unsafe_* and
# replaced by the safe_* functions. We need the full ones to
# be able to dump Python objects, yay!
from yaml import unsafe_load as load_yaml, dump as dump_yaml
except ImportError: # pragma: no cover
from yaml import load as load_yaml, dump as dump_yaml
CURDIR = os.path.dirname(os.path.abspath(__file__))
class ORIGIN(steps.LogMixin):
"""ORIGIN: detectiOn and extRactIon of Galaxy emIssion liNes
This is the main class to interact with all the steps. An Origin object is
mainly composed by:
- cube data (raw data and covariance)
- 1D dictionary of spectral profiles
- MUSE PSF
Attributes
----------
path : str
Path where the ORIGIN data will be stored.
name : str
Name of the session and basename for the sources.
param : dict
Parameters values.
cube_raw : array (Nz, Ny, Nx)
Raw data.
var : array (Nz, Ny, Nx)
Variance.
wcs : `mpdaf.obj.WCS`
RA-DEC coordinates.
wave : `mpdaf.obj.WaveCoord`
Spectral coordinates.
profiles : list of array
List of spectral profiles to test
FWHM_profiles : list
FWHM of the profiles in pixels.
wfields : None or list of arrays
List of weight maps (one per fields in the case of MUSE mosaic)
None: just one field
PSF : array (Nz, PSF_size, PSF_size) or list of arrays
MUSE PSF (one per field)
LBDA_FWHM_PSF: list of floats
Value of the FWMH of the PSF in pixel for each wavelength step (mean of
the fields).
FWHM_PSF : float or list of float
Mean of the fwhm of the PSF in pixel (one per field).
imawhite : `~mpdaf.obj.Image`
White image
segmap : `~mpdaf.obj.Image`
Segmentation map
cube_std : `~mpdaf.obj.Cube`
standardized data for PCA. Result of step01.
cont_dct : `~mpdaf.obj.Cube`
DCT continuum. Result of step01.
ima_std : `~mpdaf.obj.Image`
Mean of standardized data for PCA along the wavelength axis.
Result of step01.
ima_dct : `~mpdaf.obj.Image`
Mean of DCT continuum cube along the wavelength axis.
Result of step01.
nbAreas : int
Number of area (segmentation) for the PCA computation.
Result of step02.
areamap : `~mpdaf.obj.Image`
PCA area. Result of step02.
testO2 : list of arrays (one per PCA area)
Result of the O2 test (step03).
histO2 : list of arrays (one per PCA area)
PCA histogram (step03).
binO2 : list of arrays (one per PCA area)
Bins for the PCA histogram (step03).
thresO2 : list of float
For each area, threshold value (step03).
meaO2 : list of float
Location parameter of the Gaussian fit used to
estimate the threshold (step03).
stdO2 : list of float
Scale parameter of the Gaussian fit used to
estimate the threshold (step03).
cube_faint : `~mpdaf.obj.Cube`
Projection on the eigenvectors associated to the lower eigenvalues
of the data cube (representing the faint signal). Result of step04.
mapO2 : `~mpdaf.obj.Image`
The numbers of iterations used by testO2 for each spaxel.
Result of step04.
cube_correl : `~mpdaf.obj.Cube`
Cube of T_GLR values (step05).
cube_profile : `~mpdaf.obj.Cube` (type int)
PSF profile associated to the T_GLR (step05).
maxmap : `~mpdaf.obj.Image`
Map of maxima along the wavelength axis (step05).
cube_local_max : `~mpdaf.obj.Cube`
Local maxima from max correlation (step05).
cube_local_min : `~mpdaf.obj.Cube`
Local maxima from min correlation (step05).
threshold : float
Estimated threshold (step06).
Pval : `astropy.table.Table`
Table with the purity results for each threshold (step06):
- PVal_r : The purity function
- index_pval : index value to plot
- Det_m : Number of detections (-DATA)
- Det_M : Number of detections (+DATA)
Cat0 : `astropy.table.Table`
Catalog returned by step07
Pval_comp : `astropy.table.Table`
Table with the purity results for each threshold in compl (step08):
- PVal_r : The purity function
- index_pval : index value to plot
- Det_m : Number of detections (-DATA)
- Det_M : Number of detections (+DATA)
Cat1 : `astropy.table.Table`
Catalog returned by step08
spectra : list of `~mpdaf.obj.Spectrum`
Estimated lines. Result of step09.
Cat2 : `astropy.table.Table`
Catalog returned by step09.
"""
def __init__(
self,
filename,
name="origin",
path=".",
loglevel="DEBUG",
logcolor=False,
fieldmap=None,
profiles=None,
PSF=None,
LBDA_FWHM_PSF=None,
FWHM_PSF=None,
PSF_size=25,
param=None,
imawhite=None,
wfields=None,
):
self.path = path
self.name = name
self.outpath = os.path.join(path, name)
self.param = param or {}
self.file_handler = None
os.makedirs(self.outpath, exist_ok=True)
# stdout & file logger
setup_logging(
name="muse_origin",
level=loglevel,
color=logcolor,
fmt="%(levelname)-05s: %(message)s",
stream=sys.stdout,
)
self.logger = logging.getLogger("muse_origin")
self._setup_logfile(self.logger)
self.param["loglevel"] = loglevel
self.param["logcolor"] = logcolor
self._loginfo("Step 00 - Initialization (ORIGIN v%s)", __version__)
# dict of Step instances, indexed by step names
self.steps = OrderedDict()
# dict containing the data attributes of each step, to expose them on
# the ORIGIN object
self._dataobjs = {}
for i, cls in enumerate(steps.STEPS, start=1):
# Instantiate the step object, give it a step number
step = cls(self, i, self.param)
# force its signature to be the same as step.run (without the
# ORIGIN instance), which allows to see its arguments and their
# default value.
sig = inspect.signature(step.run)
step.__signature__ = sig.replace(
parameters=[p for p in sig.parameters.values() if p.name != "orig"]
)
self.steps[step.name] = step
# Insert the __call__ method of the step in the ORIGIN object. This
# allows to run a step with a method like "step01_preprocessing".
self.__dict__[step.method_name] = step
for name, _ in step._dataobjs:
self._dataobjs[name] = step
# MUSE data cube
self._loginfo("Read the Data Cube %s", filename)
self.param["cubename"] = filename
self.cube = Cube(filename)
self.Nz, self.Ny, self.Nx = self.shape = self.cube.shape
# RA-DEC coordinates
self.wcs = self.cube.wcs
# spectral coordinates
self.wave = self.cube.wave
# List of spectral profile
if profiles is None:
profiles = os.path.join(CURDIR, "Dico_3FWHM.fits")
self.param["profiles"] = profiles
# FSF
self.param["fieldmap"] = fieldmap
self.param["PSF_size"] = PSF_size
self._read_fsf(
self.cube,
fieldmap=fieldmap,
wfields=wfields,
PSF=PSF,
LBDA_FWHM_PSF=LBDA_FWHM_PSF,
FWHM_PSF=FWHM_PSF,
PSF_size=PSF_size,
)
# additional images
self.ima_white = imawhite if imawhite else self.cube.mean(axis=0)
self.testO2, self.histO2, self.binO2 = None, None, None
self._loginfo("00 Done")
def __getattr__(self, name):
# Use __getattr__ to provide access to the steps data attributes
# via the ORIGIN object. This will also trigger the loading of
# the objects if needed.
if name in self._dataobjs:
return getattr(self._dataobjs[name], name)
else:
raise AttributeError(f"unknown attribute {name}")
def __dir__(self):
return (
super().__dir__()
+ list(self._dataobjs.keys())
+ [o.method_name for o in self.steps.values()]
)
@lazyproperty
def cube_raw(self):
# Flux - set to 0 the Nan
return self.cube.data.filled(fill_value=0)
@lazyproperty
def mask(self):
return self.cube._mask
@lazyproperty
def var(self):
# variance - set to Inf the Nan
return self.cube.var.filled(np.inf)
@classmethod
def init(
cls,
cube,
fieldmap=None,
profiles=None,
PSF=None,
LBDA_FWHM_PSF=None,
FWHM_PSF=None,
PSF_size=25,
name="origin",
path=".",
loglevel="DEBUG",
logcolor=False,
):
"""Create a ORIGIN object.
An Origin object is composed by:
- cube data (raw data and covariance)
- 1D dictionary of spectral profiles
- MUSE PSF
- parameters used to segment the cube in different zones.
Parameters
----------
cube : str
Cube FITS file name
fieldmap : str
FITS file containing the field map (mosaic)
profiles : str
FITS of spectral profiles
If None, a default dictionary of 20 profiles is used.
PSF : str
Cube FITS filename containing a MUSE PSF per wavelength.
If None, PSF are computed with a Moffat function
(13x13 pixels, beta=2.6, fwhm1=0.76, fwhm2=0.66,
lambda1=4750, lambda2=7000)
LBDA_FWHM_PSF: list of float
Value of the FWMH of the PSF in pixel for each wavelength step
(mean of the fields).
FWHM_PSF : list of float
FWHM of the PSFs in pixels, one per field.
PSF_size : int
Spatial size of the PSF (when reconstructed from the cube header).
name : str
Name of this session and basename for the sources.
ORIGIN.write() method saves the session in a folder that
has this name. The ORIGIN.load() method will be used to
load a session, continue it or create a new from it.
loglevel : str
Level for the logger (defaults to DEBUG).
logcolor : bool
Use color for the logger levels.
"""
return cls(
cube,
path=path,
name=name,
fieldmap=fieldmap,
profiles=profiles,
PSF=PSF,
LBDA_FWHM_PSF=LBDA_FWHM_PSF,
FWHM_PSF=FWHM_PSF,
PSF_size=PSF_size,
loglevel=loglevel,
logcolor=logcolor,
)
@classmethod
@timeit
def load(cls, folder, newname=None, loglevel=None, logcolor=None):
"""Load a previous session of ORIGIN.
ORIGIN.write() method saves a session in a folder that has the name of
the ORIGIN object (self.name).
Parameters
----------
folder : str
Folder name (with the relative path) where the ORIGIN data
have been stored.
newname : str
New name for this session. This parameter lets the user to load a
previous session but continue in a new one. If None, the user will
continue the loaded session.
loglevel : str
Level for the logger (by default reuse the saved level).
logcolor : bool
Use color for the logger levels.
"""
path = os.path.dirname(os.path.abspath(folder))
name = os.path.basename(folder)
with open(f"{folder}/{name}.yaml", "r") as stream:
param = load_yaml(stream)
if "FWHM PSF" in param:
FWHM_PSF = np.asarray(param["FWHM PSF"])
else:
FWHM_PSF = None
if "LBDA_FWHM PSF" in param:
LBDA_FWHM_PSF = np.asarray(param["LBDA FWHM PSF"])
else:
LBDA_FWHM_PSF = None
if os.path.isfile(param["PSF"]):
PSF = param["PSF"]
else:
if os.path.isfile("%s/cube_psf.fits" % folder):
PSF = "%s/cube_psf.fits" % folder
else:
PSF_files = glob.glob("%s/cube_psf_*.fits" % folder)
if len(PSF_files) == 0:
PSF = None
elif len(PSF_files) == 1:
PSF = PSF_files[0]
else:
PSF = sorted(PSF_files)
wfield_files = glob.glob("%s/wfield_*.fits" % folder)
if len(wfield_files) == 0:
wfields = None
else:
wfields = sorted(wfield_files)
# step0
if os.path.isfile("%s/ima_white.fits" % folder):
ima_white = Image("%s/ima_white.fits" % folder)
else:
ima_white = None
if newname is not None:
# copy outpath to the new path
shutil.copytree(os.path.join(path, name), os.path.join(path, newname))
name = newname
loglevel = loglevel if loglevel is not None else param["loglevel"]
logcolor = logcolor if logcolor is not None else param["logcolor"]
obj = cls(
path=path,
name=name,
param=param,
imawhite=ima_white,
loglevel=loglevel,
logcolor=logcolor,
filename=param["cubename"],
fieldmap=param["fieldmap"],
wfields=wfields,
profiles=param["profiles"],
PSF=PSF,
FWHM_PSF=FWHM_PSF,
LBDA_FWHM_PSF=LBDA_FWHM_PSF,
)
for step in obj.steps.values():
step.load(obj.outpath)
# special case for step3
NbAreas = param.get("nbareas")
if NbAreas is not None:
if os.path.isfile("%s/testO2_1.txt" % folder):
obj.testO2 = [
np.loadtxt("%s/testO2_%d.txt" % (folder, area), ndmin=1)
for area in range(1, NbAreas + 1)
]
if os.path.isfile("%s/histO2_1.txt" % folder):
obj.histO2 = [
np.loadtxt("%s/histO2_%d.txt" % (folder, area), ndmin=1)
for area in range(1, NbAreas + 1)
]
if os.path.isfile("%s/binO2_1.txt" % folder):
obj.binO2 = [
np.loadtxt("%s/binO2_%d.txt" % (folder, area), ndmin=1)
for area in range(1, NbAreas + 1)
]
return obj
def info(self):
"""Prints the processing log."""
with open(self.logfile) as f:
for line in f:
if line.find("Done") == -1:
print(line, end="")
def status(self):
"""Prints the processing status."""
for name, step in self.steps.items():
print(f"- {step.idx:02d}, {name}: {step.status.name}")
def _setup_logfile(self, logger):
if self.file_handler is not None:
# Remove the handlers before adding a new one
self.file_handler.close()
logger.handlers.remove(self.file_handler)
self.logfile = os.path.join(self.outpath, self.name + ".log")
self.file_handler = RotatingFileHandler(self.logfile, "a", 1000000, 1)
self.file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(message)s")
self.file_handler.setFormatter(formatter)
logger.addHandler(self.file_handler)
def set_loglevel(self, level):
"""Set the logging level for the console logger."""
handler = next(
h for h in self.logger.handlers if isinstance(h, logging.StreamHandler)
)
handler.setLevel(level)
self.param["loglevel"] = level
@property
def nbAreas(self):
"""Number of area (segmentation) for the PCA."""
return self.param.get("nbareas")
@property
def threshold_correl(self):
"""Estimated threshold used to detect lines on local maxima of max
correl."""
return self.param.get("threshold")
@threshold_correl.setter
def threshold_correl(self, value):
self.param["threshold"] = value
@property
def threshold_std(self):
"""Estimated threshold used to detect complementary lines on local
maxima of std cube."""
return self.param.get("threshold_std")
@threshold_std.setter
def threshold_std(self, value):
self.param["threshold_std"] = value
@lazyproperty
def profiles(self):
"""Read the list of spectral profiles."""
profiles = self.param["profiles"]
self._loginfo("Load dictionary of spectral profile %s", profiles)
with fits.open(profiles) as hdul:
profiles = [hdu.data for hdu in hdul[1:]]
# check that the profiles have the same size
if len({p.shape[0] for p in profiles}) != 1:
raise ValueError("The profiles must have the same size")
return profiles
@lazyproperty
def FWHM_profiles(self):
"""Read the list of FWHM of the spectral profiles."""
with fits.open(self.param["profiles"]) as hdul:
return [hdu.header["FWHM"] for hdu in hdul[1:]]
def _read_fsf(
self,
cube,
fieldmap=None,
wfields=None,
PSF=None,
LBDA_FWHM_PSF=None,
FWHM_PSF=None,
PSF_size=25,
):
"""Read FSF cube(s), with fieldmap in the case of MUSE mosaic.
There are two ways to specify the PSF informations:
- with the ``PSF``, ``FWHM_PSF``, and ``LBDA_FWHM`` parameters.
- or read from the cube header and fieldmap.
If there are multiple fields, for a mosaic, we also need weight maps.
If the cube contains a FSF model and a fieldmap is given, these weight
maps are computed automatically.
Parameters
----------
cube : mpdaf.obj.Cube
The input datacube.
fieldmap : str
FITS file containing the field map (mosaic).
wfields : list of str
List of weight maps (one per fields in the case of MUSE mosaic).
PSF : str or list of str
Cube FITS filename containing a MUSE PSF per wavelength, or a list
of filenames for multiple fields (mosaic).
LBDA_FWHM_PSF: list of float
Value of the FWMH of the PSF in pixel for each wavelength step
(mean of the fields).
FWHM_PSF : list of float
FWHM of the PSFs in pixels, one per field.
PSF_size : int
Spatial size of the PSF (when reconstructed from the cube header).
"""
self.wfields = None
info = self.logger.info
if PSF is None or FWHM_PSF is None or LBDA_FWHM_PSF is None:
info("Compute FSFs from the datacube FITS header keywords")
if "FSFMODE" not in cube.primary_header:
raise ValueError("missing PSF keywords in the cube FITS header")
# FSF created from FSF*** keywords
try:
from mpdaf.MUSE import FSFModel
except ImportError:
sys.exit("you must upgrade MPDAF")
fsf = FSFModel.read(cube)
lbda = cube.wave.coord()
shape = (PSF_size, PSF_size)
if isinstance(fsf, FSFModel): # just one FSF
self.PSF = fsf.get_3darray(lbda, shape)
self.LBDA_FWHM_PSF = fsf.get_fwhm(lbda, unit="pix")
self.FWHM_PSF = np.mean(self.LBDA_FWHM_PSF)
# mean of the fwhm of the FSF in pixel
info("mean FWHM of the FSFs = %.2f pixels", self.FWHM_PSF)
else:
self.PSF = [f.get_3darray(lbda, shape) for f in fsf]
fwhm = np.array([f.get_fwhm(lbda, unit="pix") for f in fsf])
self.LBDA_FWHM_PSF = np.mean(fwhm, axis=0)
self.FWHM_PSF = np.mean(fwhm, axis=1)
for i, fwhm in enumerate(self.FWHM_PSF):
info("mean FWHM of the FSFs (field %d) = %.2f pixels", i, fwhm)
info("Compute weight maps from field map %s", fieldmap)
fmap = FieldsMap(fieldmap, nfields=len(fsf))
# weighted field map
self.wfields = fmap.compute_weights()
self.param["PSF"] = cube.primary_header["FSFMODE"]
else:
self.LBDA_FWHM_PSF = LBDA_FWHM_PSF
if isinstance(PSF, str):
info("Load FSFs from %s", PSF)
self.param["PSF"] = PSF
self.PSF = fits.getdata(PSF)
if self.PSF.shape[1] != self.PSF.shape[2]:
raise ValueError("PSF must be a square image.")
if not self.PSF.shape[1] % 2:
raise ValueError("The spatial size of the PSF must be odd.")
if self.PSF.shape[0] != self.shape[0]:
raise ValueError(
"PSF and data cube have not the same"
"dimensions along the spectral axis."
)
# mean of the fwhm of the FSF in pixel
self.FWHM_PSF = np.mean(FWHM_PSF)
self.param["FWHM PSF"] = FWHM_PSF.tolist()
info("mean FWHM of the FSFs = %.2f pixels", self.FWHM_PSF)
else:
nfields = len(PSF)
self.wfields = []
self.PSF = []
self.FWHM_PSF = list(FWHM_PSF)
for n in range(nfields):
info("Load FSF from %s", PSF[n])
self.PSF.append(fits.getdata(PSF[n]))
info("Load weight maps from %s", wfields[n])
self.wfields.append(fits.getdata(wfields[n]))
info(
"mean FWHM of the FSFs (field %d) = %.2f pixels", n, FWHM_PSF[n]
)
self.param["FWHM PSF"] = self.FWHM_PSF.tolist()
self.param["LBDA FWHM PSF"] = self.LBDA_FWHM_PSF.tolist()
@timeit
def write(self, path=None, erase=False):
"""Save the current session in a folder that will have the name of the
ORIGIN object (self.name).
The ORIGIN.load(folder, newname=None) method will be used to load a
session. The parameter newname will let the user to load a session but
continue in a new one.
Parameters
----------
path : str
Path where the folder (self.name) will be stored.
erase : bool
Remove the folder if it exists.
"""
self._loginfo("Writing...")
# adapt session if path changes
if path is not None and path != self.path:
if not os.path.exists(path):
raise ValueError(f"path does not exist: {path}")
self.path = path
outpath = os.path.join(path, self.name)
# copy outpath to the new path
shutil.copytree(self.outpath, outpath)
self.outpath = outpath
self._setup_logfile(self.logger)
if erase:
shutil.rmtree(self.outpath)
os.makedirs(self.outpath, exist_ok=True)
# PSF
if isinstance(self.PSF, list):
for i, psf in enumerate(self.PSF):
cube = Cube(data=psf, mask=np.ma.nomask, copy=False)
cube.write(os.path.join(self.outpath, "cube_psf_%02d.fits" % i))
else:
cube = Cube(data=self.PSF, mask=np.ma.nomask, copy=False)
cube.write(os.path.join(self.outpath, "cube_psf.fits"))
if self.wfields is not None:
for i, wfield in enumerate(self.wfields):
im = Image(data=wfield, mask=np.ma.nomask)
im.write(os.path.join(self.outpath, "wfield_%02d.fits" % i))
if self.ima_white is not None:
self.ima_white.write("%s/ima_white.fits" % self.outpath)
for step in self.steps.values():
step.dump(self.outpath)
# parameters in .yaml
with open(f"{self.outpath}/{self.name}.yaml", "w") as stream:
dump_yaml(self.param, stream)
# step3 - saving this manually for now
if self.nbAreas is not None:
if self.testO2 is not None:
for area in range(1, self.nbAreas + 1):
np.savetxt(
"%s/testO2_%d.txt" % (self.outpath, area), self.testO2[area - 1]
)
if self.histO2 is not None:
for area in range(1, self.nbAreas + 1):
np.savetxt(
"%s/histO2_%d.txt" % (self.outpath, area), self.histO2[area - 1]
)
if self.binO2 is not None:
for area in range(1, self.nbAreas + 1):
np.savetxt(
"%s/binO2_%d.txt" % (self.outpath, area), self.binO2[area - 1]
)
self._loginfo("Current session saved in %s", self.outpath)
def plot_areas(self, ax=None, **kwargs):
""" Plot the 2D segmentation for PCA from self.step02_areas()
on the test used to perform this segmentation.
Parameters
----------
ax : matplotlib.Axes
The Axes instance in which the image is drawn.
kwargs : matplotlib.artist.Artist
Optional extra keyword/value arguments to be passed to ``ax.imshow()``.
"""
if ax is None:
ax = plt.gca()
kwargs.setdefault("cmap", "jet")
kwargs.setdefault("alpha", 0.7)
kwargs.setdefault("interpolation", "nearest")
kwargs["origin"] = "lower"
cax = ax.imshow(self.areamap._data, **kwargs)
i0 = np.min(self.areamap._data)
i1 = np.max(self.areamap._data)
if i0 != i1:
from matplotlib.colors import BoundaryNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
n = i1 - i0 + 1
bounds = np.linspace(i0, i1 + 1, n + 1) - 0.5
norm = BoundaryNorm(bounds, n + 1)
divider = make_axes_locatable(ax)
cax2 = divider.append_axes("right", size="5%", pad=1)
plt.colorbar(
cax,
cax=cax2,
cmap=kwargs["cmap"],
norm=norm,
spacing="proportional",
ticks=bounds + 0.5,
boundaries=bounds,
format="%1i",
)
def plot_step03_PCA_threshold(
self, log10=False, ncol=3, legend=True, xlim=None, fig=None, **fig_kw
):
""" Plot the histogram and the threshold for the starting point of the PCA.
Parameters
----------
log10 : bool
Draw histogram in logarithmic scale or not
ncol : int
Number of colomns in the subplots
legend : bool
If true, write pfa and threshold values as legend
xlim : (float, float)
Set the data limits for the x-axes
fig : matplotlib.Figure
Figure instance in which the image is drawn
**fig_kw : matplotlib.artist.Artist
All additional keyword arguments are passed to the figure() call.
"""
if self.nbAreas is None:
raise ValueError("Run the step 02 to initialize self.nbAreas")
if fig is None:
fig = plt.figure()
if self.nbAreas <= ncol:
n = 1
m = self.nbAreas
else:
n = self.nbAreas // ncol
m = ncol
if (n * m) < self.nbAreas:
n = n + 1
for area in range(1, self.nbAreas + 1):
if area == 1:
ax = fig.add_subplot(n, m, area, **fig_kw)
else:
ax = fig.add_subplot(n, m, area, sharey=fig.axes[0], **fig_kw)
self.plot_PCA_threshold(area, "step03", log10, legend, xlim, ax)
# Fine-tune figure
for a in fig.axes[:-1]:
a.set_xlabel("")
for a in fig.axes[1:]:
a.set_ylabel("")
plt.setp([a.get_yticklabels() for a in fig.axes], visible=False)
plt.setp([a.get_yticklabels() for a in fig.axes[0::m]], visible=True)
plt.setp([a.get_yticklines() for a in fig.axes], visible=False)
plt.setp([a.get_yticklines() for a in fig.axes[0::m]], visible=True)
fig.subplots_adjust(wspace=0)
if xlim is not None:
plt.setp([a.get_xticklabels() for a in fig.axes[:-m]], visible=False)
plt.setp([a.get_xticklines() for a in fig.axes[:-m]], visible=False)
fig.subplots_adjust(hspace=0)
def plot_step03_PCA_stat(self, cutoff=5, ax=None):
"""Plot the threshold value according to the area.
Median Absolute Deviation is used to find outliers.
Parameters
----------
cutoff : float
Median Absolute Deviation cutoff
ax : matplotlib.Axes
The Axes instance in which the image is drawn
"""
if self.nbAreas is None:
raise ValueError("Run the step 02 to initialize self.nbAreas")
if self.thresO2 is None:
raise ValueError("Run the step 03 to compute the threshold values")
if ax is None:
ax = plt.gca()
ax.plot(np.arange(1, self.nbAreas + 1), self.thresO2, "+")
med = np.median(self.thresO2)
diff = np.absolute(self.thresO2 - med)
mad = np.median(diff)
if mad != 0:
ksel = (diff / mad) > cutoff
if ksel.any():
ax.plot(
np.arange(1, self.nbAreas + 1)[ksel],
np.asarray(self.thresO2)[ksel],
"ro",
)
ax.set_xlabel("area")
ax.set_ylabel("Threshold")
ax.set_title(f"PCA threshold (med={med:.2f}, mad= {mad:.2f})")
def plot_PCA_threshold(
self, area, pfa_test="step03", log10=False, legend=True, xlim=None, ax=None
):
""" Plot the histogram and the threshold for the starting point of the PCA.
Parameters
----------
area : int in [1, nbAreas]
Area ID
pfa_test : float or str
PFA of the test (if 'step03', the value set during step03 is used)
log10 : bool
Draw histogram in logarithmic scale or not
legend : bool
If true, write pfa and threshold values as legend
xlim : (float, float)
Set the data limits for the x-axis
ax : matplotlib.Axes
Axes instance in which the image is drawn
"""
if self.nbAreas is None:
raise ValueError("Run the step 02 to initialize self.nbAreas")
if pfa_test == "step03":
param = self.param["compute_PCA_threshold"]["params"]
if "pfa_test" in param:
pfa_test = param["pfa_test"]
hist = self.histO2[area - 1]
bins = self.binO2[area - 1]
thre = self.thresO2[area - 1]
mea = self.meaO2[area - 1]
std = self.stdO2[area - 1]
else:
raise ValueError(
"pfa_test param is None: set a value or run the Step03"
)
else:
if self.cube_std is None:
raise ValueError("Run the step 01 to initialize self.cube_std")
# limits of each spatial zone
ksel = self.areamap._data == area
# Data in this spatio-spectral zone
cube_temp = self.cube_std._data[:, ksel]
# Compute_PCA_threshold
from .lib_origin import Compute_PCA_threshold
testO2, hist, bins, thre, mea, std = Compute_PCA_threshold(
cube_temp, pfa_test
)
if ax is None:
ax = plt.gca()
from scipy import stats
center = (bins[:-1] + bins[1:]) / 2
gauss = stats.norm.pdf(center, loc=mea, scale=std)
gauss *= hist.max() / gauss.max()
if log10:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
gauss = np.log10(gauss)
hist = np.log10(hist)
ax.plot(center, hist, "-k")
ax.plot(center, hist, ".r")
ax.plot(center, gauss, "-b", alpha=0.5)
ax.axvline(thre, color="b", lw=2, alpha=0.5)
ax.grid()
if xlim is None:
ax.set_xlim((center.min(), center.max()))
else:
ax.set_xlim(xlim)
ax.set_xlabel("frequency")
ax.set_ylabel("value")
kwargs = dict(transform=ax.transAxes, bbox=dict(facecolor="red", alpha=0.5))
if legend:
text = "zone %d\npfa %.2f\nthreshold %.2f" % (area, pfa_test, thre)
ax.text(0.1, 0.8, text, **kwargs)
else:
ax.text(0.9, 0.9, "%d" % area, **kwargs)
def plot_mapPCA(self, area=None, iteration=None, ax=None, **kwargs):
""" Plot at a given iteration (or at the end) the number of times
a spaxel got cleaned by the PCA.
Parameters
----------
area: int in [1, nbAreas]
if None draw the full map for all areas
iteration : int
Display the nuisance/bacground pixels at iteration k
ax : matplotlib.Axes
The Axes instance in which the image is drawn
kwargs : matplotlib.artist.Artist
Optional extra keyword/value arguments to be passed to ``ax.imshow()``.
"""
if self.mapO2 is None:
raise ValueError("Run the step 04 to initialize self.mapO2")
themap = self.mapO2.copy()
title = "Number of times the spaxel got cleaned by the PCA"
if iteration is not None:
title += "\n%d iterations" % iteration
if area is not None:
mask = np.ones_like(self.mapO2._data, dtype=np.bool)
mask[self.areamap._data == area] = False
themap._mask = mask
title += " (zone %d)" % area
if iteration is not None:
themap[themap._data < iteration] = np.ma.masked
if ax is None:
ax = plt.gca()
kwargs.setdefault("cmap", "jet")
themap.plot(title=title, colorbar="v", ax=ax, **kwargs)
def plot_purity(self, comp=False, ax=None, log10=False, legend=True):
"""Draw number of sources per threshold computed in step06/step08.
Parameters
----------
comp : bool
If True, plot purity curves for the complementary lines (step08).
ax : matplotlib.Axes
The Axes instance in which the image is drawn.
log10 : bool
To draw histogram in logarithmic scale or not.
legend : bool
To draw the legend.
"""
if ax is None:
ax = plt.gca()
if comp:
threshold = self.threshold_std
purity = self.param["purity_std"]
Pval = self.Pval_comp
else:
threshold = self.threshold_correl
purity = self.param["purity"]
Pval = self.Pval
if Pval is None:
raise ValueError("Run the step 06")
Tval_r = Pval["Tval_r"]
ax2 = ax.twinx()
ax2.plot(Tval_r, Pval["Pval_r"], "y.-", label="purity")
ax.plot(Tval_r, Pval["Det_M"], "b.-", label="n detections (+DATA)")
ax.plot(Tval_r, Pval["Det_m"], "g.-", label="n detections (-DATA)")
ax2.plot(threshold, purity, "xr")
if log10:
ax.set_yscale("log")
ax2.set_yscale("log")
ym, yM = ax.get_ylim()
ax.plot(
[threshold, threshold],
[ym, yM],
"r",
alpha=0.25,
lw=2,
label="automatic threshold",
)
ax.set_ylim((ym, yM))
ax.set_xlabel("Threshold")
ax2.set_ylabel("Purity")
ax.set_ylabel("Number of detections")
ax.set_title("threshold %f" % threshold)
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
if legend:
ax.legend(h1 + h2, l1 + l2, loc=2)
def plot_NB(self, src_ind, ax1=None, ax2=None, ax3=None):
"""Plot the narrow band images.
Parameters
----------
src_ind : int
Index of the object in self.Cat0.
ax1 : matplotlib.Axes
The Axes instance in which the NB image around the source is drawn.
ax2 : matplotlib.Axes
The Axes instance in which a other NB image for check is drawn.
ax3 : matplotlib.Axes
The Axes instance in which the difference is drawn.
"""
if self.Cat0 is None:
raise ValueError("Run the step 05 to initialize self.Cat0")
if ax1 is None and ax2 is None and ax3 is None:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4))
# Coordinates of the source
x0 = self.Cat0[src_ind]["x0"]
y0 = self.Cat0[src_ind]["y0"]
z0 = self.Cat0[src_ind]["z0"]
# Larger spatial ranges for the plots
longxy0 = 20
y01 = max(0, y0 - longxy0)
y02 = min(self.shape[1], y0 + longxy0 + 1)
x01 = max(0, x0 - longxy0)
x02 = min(self.shape[2], x0 + longxy0 + 1)
# Coordinates in this window
y00 = y0 - y01
x00 = x0 - x01
# spectral profile
num_prof = self.Cat0[src_ind]["profile"]
profil0 = self.profiles[num_prof]
# length of the spectral profile
profil1 = profil0[profil0 > 1e-13]
long0 = profil1.shape[0]
# half-length of the spectral profile
longz = long0 // 2
# spectral range
intz1 = max(0, z0 - longz)
intz2 = min(self.shape[0], z0 + longz + 1)
# subcube for the plot
cube_test_plot = self.cube_raw[intz1:intz2, y01:y02, x01:x02]
wcs = self.wcs[y01:y02, x01:x02]
# controle cube
nb_ranges = 3
if (z0 + longz + nb_ranges * long0) < self.shape[0]:
intz1c = intz1 + nb_ranges * long0
intz2c = intz2 + nb_ranges * long0
else:
intz1c = intz1 - nb_ranges * long0
intz2c = intz2 - nb_ranges * long0
cube_controle_plot = self.cube_raw[intz1c:intz2c, y01:y02, x01:x02]
# (1/sqrt(2)) * difference of the 2 sububes
diff_cube_plot = (1 / np.sqrt(2)) * (cube_test_plot - cube_controle_plot)
if ax1 is not None:
ax1.plot(x00, y00, "m+")
ima_test_plot = Image(data=cube_test_plot.sum(axis=0), wcs=wcs)
title = "cube test - (%d,%d)\n" % (x0, y0)
title += "lambda=%d int=[%d,%d[" % (z0, intz1, intz2)
ima_test_plot.plot(colorbar="v", title=title, ax=ax1)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if ax2 is not None:
ax2.plot(x00, y00, "m+")
ima_controle_plot = Image(data=cube_controle_plot.sum(axis=0), wcs=wcs)
title = "check - (%d,%d)\n" % (x0, y0) + "int=[%d,%d[" % (intz1c, intz2c)
ima_controle_plot.plot(colorbar="v", title=title, ax=ax2)
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
if ax3 is not None:
ax3.plot(x00, y00, "m+")
ima_diff_plot = Image(data=diff_cube_plot.sum(axis=0), wcs=wcs)
title = "Difference narrow band - (%d,%d)\n" % (x0, y0) + "int=[%d,%d[" % (
intz1c,
intz2c,
)
ima_diff_plot.plot(colorbar="v", title=title, ax=ax3)
ax3.get_xaxis().set_visible(False)
ax3.get_yaxis().set_visible(False)
def plot_sources(
self, x, y, circle=False, vmin=0, vmax=30, title=None, ax=None, **kwargs
):
"""Plot detected emission lines on the 2D map of maximum of the T_GLR
values over the spectral channels.
Parameters
----------
x : array
Coordinates along the x-axis of the estimated lines in pixels.
y : array
Coordinates along the y-axis of the estimated lines in pixels.
circle : bool
If true, plot circles with a diameter equal to the
mean of the fwhm of the PSF.
vmin : float
Minimum pixel value to use for the scaling.
vmax : float
Maximum pixel value to use for the scaling.
title : str
An optional title for the figure (None by default).
ax : matplotlib.Axes
the Axes instance in which the image is drawn
kwargs : matplotlib.artist.Artist
Optional arguments passed to ``ax.imshow()``.
"""
if ax is None:
ax = plt.gca()
self.maxmap.plot(vmin=vmin, vmax=vmax, title=title, ax=ax, **kwargs)
if circle:
fwhm = (
self.FWHM_PSF
if self.wfields is None
else np.max(np.array(self.FWHM_PSF))
)
radius = np.round(fwhm / 2)
for pos in zip(x, y):
ax.add_artist(plt.Circle(pos, radius, color="k", fill=False))
else:
ax.plot(x, y, "k+")
def plot_segmaps(self, axes=None, figsize=(6, 6)):
"""Plot the segmentation maps:
- segmap_cont: segmentation map computed on the white-light image.
- segmap_merged: segmentation map merged with the cont one and another
one computed on the residual.
- segmap_purity: combines self.segmap and a segmentation on the maxmap.
- segmap_label: segmentation map used for the catalog, either the one
given as input, otherwise self.segmap_cont.
"""
segmaps = {}
ncolors = 0
for name in ("segmap_cont", "segmap_merged", "segmap_purity", "segmap_label"):
segm = getattr(self, name, None)
if segm:
segmaps[name] = segm
ncolors = max(ncolors, len(np.unique(segm._data)))
nseg = len(segmaps)
if nseg == 0:
self.logger.warning("nothing to plot")
return
try:
# TODO: this will be renamed to make_random_cmap in a future
# version of photutils
from photutils.utils.colormaps import random_cmap
except ImportError:
self.logger.error("photutils is needed for this")
cmap = "jet"
else:
cmap = random_cmap(ncolors=ncolors)
cmap.colors[0] = (0.0, 0.0, 0.0)
if axes is None:
figsize = (figsize[0] * nseg, figsize[1])
fig, axes = plt.subplots(1, nseg, sharex=True, sharey=True, figsize=figsize)
if nseg == 1:
axes = [axes]
for ax, (name, im) in zip(axes, segmaps.items()):
im.plot(ax=ax, cmap=cmap, title=name, colorbar="v")
def plot_min_max_hist(self, ax=None, comp=False):
"""Plot the histograms of local maxima and minima."""
if comp:
cube_local_max = self.cube_std_local_max._data
cube_local_min = self.cube_std_local_min._data
else:
cube_local_max = self.cube_local_max._data
cube_local_min = self.cube_local_min._data
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
ax.set_yscale("log")
ax.grid(which="major", linewidth=1)
ax.grid(which="minor", linewidth=1, linestyle=":")
maxloc = cube_local_max[cube_local_max > 0]
bins = np.arange((maxloc.max() + 1) * 2) / 2
ax.hist(
maxloc, bins=bins, histtype="step", label="max", linewidth=2, cumulative=-1
)
minloc = cube_local_min[cube_local_min > 0]
bins = np.arange((minloc.max() + 1) * 2) / 2
ax.hist(
minloc, bins=bins, histtype="step", label="min", linewidth=2, cumulative=-1
)
minloc2 = cube_local_min[:, self.segmap_purity._data == 0]
minloc2 = minloc2[minloc2 > 0]
ax.hist(
minloc2,
bins=bins,
histtype="step",
label="min filt",
linewidth=2,
cumulative=-1,
)
ax.legend()
ax.set_title("Cumulative histogram of min/max loc")
def timestat(self, table=False):
"""Print CPU usage by steps.
If ``table`` is True, an astropy.table.Table is returned.
"""
if table:
name = []
exdate = []
extime = []
tot = 0
for s in self.steps.items():
if "execution_date" in s[1].meta.keys():
name.append(s[1].method_name)
exdate.append(s[1].meta["execution_date"])
t = s[1].meta["runtime"]
tot += t
extime.append(datetime.timedelta(seconds=t))
name.append("Total")
exdate.append("")
extime.append(str(datetime.timedelta(seconds=tot)))
return Table(
data=[name, exdate, extime],
names=["Step", "Exec Date", "Exec Time"],
masked=True,
)
else:
tot = 0
for s in self.steps.items():
name = s[1].method_name
if "execution_date" in s[1].meta.keys():
exdate = s[1].meta["execution_date"]
t = s[1].meta["runtime"]
tot += t
extime = datetime.timedelta(seconds=t)
self.logger.info(
"%s executed: %s run time: %s", name, exdate, str(extime)
)
self.logger.info(
"*** Total run time: %s", str(datetime.timedelta(seconds=tot))
)
def stat(self):
"""Print detection summary."""
d = self._get_stat()
self.logger.info(
"ORIGIN PCA pfa %.2f Back Purity: %.2f "
"Threshold: %.2f Bright Purity %.2f Threshold %.2f",
d["pca"],
d["back_purity"],
d["back_threshold"],
d["bright_purity"],
d["bright_threshold"],
)
self.logger.info("Nb of detected lines: %d", d["tot_nlines"])
self.logger.info(
"Nb of sources Total: %d Background: %d Cont: %d",
d["tot_nsources"],
d["back_nsources"],
d["cont_nsources"],
)
self.logger.info(
"Nb of sources detected in faint (after PCA): %d "
"in std (before PCA): %d",
d["faint_nsources"],
d["bright_nsources"],
)
def _get_stat(self):
p = self.param
cat = self.Cat3_sources
if cat:
back = cat[cat["seg_label"] == 0]
cont = cat[cat["seg_label"] > 0]
bright = cat[cat["comp"] == 1]
faint = cat[cat["comp"] == 0]
return dict(
pca=p["compute_PCA_threshold"]["params"]["pfa_test"],
back_purity=p["purity"],
back_threshold=p["threshold"],
bright_purity=p["purity_std"],
bright_threshold=p["threshold_std"],
tot_nlines=len(self.Cat3_lines),
tot_nsources=len(cat),
back_nsources=len(back),
cont_nsources=len(cont),
faint_nsources=len(faint),
bright_nsources=len(bright),
) | en | 0.71239 | # With PyYaml 5.1, load and safe have been renamed to unsafe_* and # replaced by the safe_* functions. We need the full ones to # be able to dump Python objects, yay! # pragma: no cover ORIGIN: detectiOn and extRactIon of Galaxy emIssion liNes This is the main class to interact with all the steps. An Origin object is mainly composed by: - cube data (raw data and covariance) - 1D dictionary of spectral profiles - MUSE PSF Attributes ---------- path : str Path where the ORIGIN data will be stored. name : str Name of the session and basename for the sources. param : dict Parameters values. cube_raw : array (Nz, Ny, Nx) Raw data. var : array (Nz, Ny, Nx) Variance. wcs : `mpdaf.obj.WCS` RA-DEC coordinates. wave : `mpdaf.obj.WaveCoord` Spectral coordinates. profiles : list of array List of spectral profiles to test FWHM_profiles : list FWHM of the profiles in pixels. wfields : None or list of arrays List of weight maps (one per fields in the case of MUSE mosaic) None: just one field PSF : array (Nz, PSF_size, PSF_size) or list of arrays MUSE PSF (one per field) LBDA_FWHM_PSF: list of floats Value of the FWMH of the PSF in pixel for each wavelength step (mean of the fields). FWHM_PSF : float or list of float Mean of the fwhm of the PSF in pixel (one per field). imawhite : `~mpdaf.obj.Image` White image segmap : `~mpdaf.obj.Image` Segmentation map cube_std : `~mpdaf.obj.Cube` standardized data for PCA. Result of step01. cont_dct : `~mpdaf.obj.Cube` DCT continuum. Result of step01. ima_std : `~mpdaf.obj.Image` Mean of standardized data for PCA along the wavelength axis. Result of step01. ima_dct : `~mpdaf.obj.Image` Mean of DCT continuum cube along the wavelength axis. Result of step01. nbAreas : int Number of area (segmentation) for the PCA computation. Result of step02. areamap : `~mpdaf.obj.Image` PCA area. Result of step02. testO2 : list of arrays (one per PCA area) Result of the O2 test (step03). histO2 : list of arrays (one per PCA area) PCA histogram (step03). binO2 : list of arrays (one per PCA area) Bins for the PCA histogram (step03). thresO2 : list of float For each area, threshold value (step03). meaO2 : list of float Location parameter of the Gaussian fit used to estimate the threshold (step03). stdO2 : list of float Scale parameter of the Gaussian fit used to estimate the threshold (step03). cube_faint : `~mpdaf.obj.Cube` Projection on the eigenvectors associated to the lower eigenvalues of the data cube (representing the faint signal). Result of step04. mapO2 : `~mpdaf.obj.Image` The numbers of iterations used by testO2 for each spaxel. Result of step04. cube_correl : `~mpdaf.obj.Cube` Cube of T_GLR values (step05). cube_profile : `~mpdaf.obj.Cube` (type int) PSF profile associated to the T_GLR (step05). maxmap : `~mpdaf.obj.Image` Map of maxima along the wavelength axis (step05). cube_local_max : `~mpdaf.obj.Cube` Local maxima from max correlation (step05). cube_local_min : `~mpdaf.obj.Cube` Local maxima from min correlation (step05). threshold : float Estimated threshold (step06). Pval : `astropy.table.Table` Table with the purity results for each threshold (step06): - PVal_r : The purity function - index_pval : index value to plot - Det_m : Number of detections (-DATA) - Det_M : Number of detections (+DATA) Cat0 : `astropy.table.Table` Catalog returned by step07 Pval_comp : `astropy.table.Table` Table with the purity results for each threshold in compl (step08): - PVal_r : The purity function - index_pval : index value to plot - Det_m : Number of detections (-DATA) - Det_M : Number of detections (+DATA) Cat1 : `astropy.table.Table` Catalog returned by step08 spectra : list of `~mpdaf.obj.Spectrum` Estimated lines. Result of step09. Cat2 : `astropy.table.Table` Catalog returned by step09. # stdout & file logger # dict of Step instances, indexed by step names # dict containing the data attributes of each step, to expose them on # the ORIGIN object # Instantiate the step object, give it a step number # force its signature to be the same as step.run (without the # ORIGIN instance), which allows to see its arguments and their # default value. # Insert the __call__ method of the step in the ORIGIN object. This # allows to run a step with a method like "step01_preprocessing". # MUSE data cube # RA-DEC coordinates # spectral coordinates # List of spectral profile # FSF # additional images # Use __getattr__ to provide access to the steps data attributes # via the ORIGIN object. This will also trigger the loading of # the objects if needed. # Flux - set to 0 the Nan # variance - set to Inf the Nan Create a ORIGIN object. An Origin object is composed by: - cube data (raw data and covariance) - 1D dictionary of spectral profiles - MUSE PSF - parameters used to segment the cube in different zones. Parameters ---------- cube : str Cube FITS file name fieldmap : str FITS file containing the field map (mosaic) profiles : str FITS of spectral profiles If None, a default dictionary of 20 profiles is used. PSF : str Cube FITS filename containing a MUSE PSF per wavelength. If None, PSF are computed with a Moffat function (13x13 pixels, beta=2.6, fwhm1=0.76, fwhm2=0.66, lambda1=4750, lambda2=7000) LBDA_FWHM_PSF: list of float Value of the FWMH of the PSF in pixel for each wavelength step (mean of the fields). FWHM_PSF : list of float FWHM of the PSFs in pixels, one per field. PSF_size : int Spatial size of the PSF (when reconstructed from the cube header). name : str Name of this session and basename for the sources. ORIGIN.write() method saves the session in a folder that has this name. The ORIGIN.load() method will be used to load a session, continue it or create a new from it. loglevel : str Level for the logger (defaults to DEBUG). logcolor : bool Use color for the logger levels. Load a previous session of ORIGIN. ORIGIN.write() method saves a session in a folder that has the name of the ORIGIN object (self.name). Parameters ---------- folder : str Folder name (with the relative path) where the ORIGIN data have been stored. newname : str New name for this session. This parameter lets the user to load a previous session but continue in a new one. If None, the user will continue the loaded session. loglevel : str Level for the logger (by default reuse the saved level). logcolor : bool Use color for the logger levels. # step0 # copy outpath to the new path # special case for step3 Prints the processing log. Prints the processing status. # Remove the handlers before adding a new one Set the logging level for the console logger. Number of area (segmentation) for the PCA. Estimated threshold used to detect lines on local maxima of max correl. Estimated threshold used to detect complementary lines on local maxima of std cube. Read the list of spectral profiles. # check that the profiles have the same size Read the list of FWHM of the spectral profiles. Read FSF cube(s), with fieldmap in the case of MUSE mosaic. There are two ways to specify the PSF informations: - with the ``PSF``, ``FWHM_PSF``, and ``LBDA_FWHM`` parameters. - or read from the cube header and fieldmap. If there are multiple fields, for a mosaic, we also need weight maps. If the cube contains a FSF model and a fieldmap is given, these weight maps are computed automatically. Parameters ---------- cube : mpdaf.obj.Cube The input datacube. fieldmap : str FITS file containing the field map (mosaic). wfields : list of str List of weight maps (one per fields in the case of MUSE mosaic). PSF : str or list of str Cube FITS filename containing a MUSE PSF per wavelength, or a list of filenames for multiple fields (mosaic). LBDA_FWHM_PSF: list of float Value of the FWMH of the PSF in pixel for each wavelength step (mean of the fields). FWHM_PSF : list of float FWHM of the PSFs in pixels, one per field. PSF_size : int Spatial size of the PSF (when reconstructed from the cube header). # FSF created from FSF*** keywords # just one FSF # mean of the fwhm of the FSF in pixel # weighted field map # mean of the fwhm of the FSF in pixel Save the current session in a folder that will have the name of the ORIGIN object (self.name). The ORIGIN.load(folder, newname=None) method will be used to load a session. The parameter newname will let the user to load a session but continue in a new one. Parameters ---------- path : str Path where the folder (self.name) will be stored. erase : bool Remove the folder if it exists. # adapt session if path changes # copy outpath to the new path # PSF # parameters in .yaml # step3 - saving this manually for now Plot the 2D segmentation for PCA from self.step02_areas() on the test used to perform this segmentation. Parameters ---------- ax : matplotlib.Axes The Axes instance in which the image is drawn. kwargs : matplotlib.artist.Artist Optional extra keyword/value arguments to be passed to ``ax.imshow()``. Plot the histogram and the threshold for the starting point of the PCA. Parameters ---------- log10 : bool Draw histogram in logarithmic scale or not ncol : int Number of colomns in the subplots legend : bool If true, write pfa and threshold values as legend xlim : (float, float) Set the data limits for the x-axes fig : matplotlib.Figure Figure instance in which the image is drawn **fig_kw : matplotlib.artist.Artist All additional keyword arguments are passed to the figure() call. # Fine-tune figure Plot the threshold value according to the area. Median Absolute Deviation is used to find outliers. Parameters ---------- cutoff : float Median Absolute Deviation cutoff ax : matplotlib.Axes The Axes instance in which the image is drawn Plot the histogram and the threshold for the starting point of the PCA. Parameters ---------- area : int in [1, nbAreas] Area ID pfa_test : float or str PFA of the test (if 'step03', the value set during step03 is used) log10 : bool Draw histogram in logarithmic scale or not legend : bool If true, write pfa and threshold values as legend xlim : (float, float) Set the data limits for the x-axis ax : matplotlib.Axes Axes instance in which the image is drawn # limits of each spatial zone # Data in this spatio-spectral zone # Compute_PCA_threshold Plot at a given iteration (or at the end) the number of times a spaxel got cleaned by the PCA. Parameters ---------- area: int in [1, nbAreas] if None draw the full map for all areas iteration : int Display the nuisance/bacground pixels at iteration k ax : matplotlib.Axes The Axes instance in which the image is drawn kwargs : matplotlib.artist.Artist Optional extra keyword/value arguments to be passed to ``ax.imshow()``. Draw number of sources per threshold computed in step06/step08. Parameters ---------- comp : bool If True, plot purity curves for the complementary lines (step08). ax : matplotlib.Axes The Axes instance in which the image is drawn. log10 : bool To draw histogram in logarithmic scale or not. legend : bool To draw the legend. Plot the narrow band images. Parameters ---------- src_ind : int Index of the object in self.Cat0. ax1 : matplotlib.Axes The Axes instance in which the NB image around the source is drawn. ax2 : matplotlib.Axes The Axes instance in which a other NB image for check is drawn. ax3 : matplotlib.Axes The Axes instance in which the difference is drawn. # Coordinates of the source # Larger spatial ranges for the plots # Coordinates in this window # spectral profile # length of the spectral profile # half-length of the spectral profile # spectral range # subcube for the plot # controle cube # (1/sqrt(2)) * difference of the 2 sububes Plot detected emission lines on the 2D map of maximum of the T_GLR values over the spectral channels. Parameters ---------- x : array Coordinates along the x-axis of the estimated lines in pixels. y : array Coordinates along the y-axis of the estimated lines in pixels. circle : bool If true, plot circles with a diameter equal to the mean of the fwhm of the PSF. vmin : float Minimum pixel value to use for the scaling. vmax : float Maximum pixel value to use for the scaling. title : str An optional title for the figure (None by default). ax : matplotlib.Axes the Axes instance in which the image is drawn kwargs : matplotlib.artist.Artist Optional arguments passed to ``ax.imshow()``. Plot the segmentation maps: - segmap_cont: segmentation map computed on the white-light image. - segmap_merged: segmentation map merged with the cont one and another one computed on the residual. - segmap_purity: combines self.segmap and a segmentation on the maxmap. - segmap_label: segmentation map used for the catalog, either the one given as input, otherwise self.segmap_cont. # TODO: this will be renamed to make_random_cmap in a future # version of photutils Plot the histograms of local maxima and minima. Print CPU usage by steps. If ``table`` is True, an astropy.table.Table is returned. Print detection summary. | 1.976744 | 2 |
cvutils/__init__.py | bikz05/cvutils | 0 | 6620658 | from .cvhighgui import imlist
from .cvhighgui import imreads
from .cvhighgui import imshow
from .cvos import imlist
from .cvimgproc import imresize
| from .cvhighgui import imlist
from .cvhighgui import imreads
from .cvhighgui import imshow
from .cvos import imlist
from .cvimgproc import imresize
| none | 1 | 1.060843 | 1 | |
CameraCalibration/src/CameraCalibrator.py | FRCTeam4500/Utilities | 0 | 6620659 | import numpy as np
import cv2
import sys
import os.path
import glob
import math
import json
from PyQt5.uic import loadUi
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QCheckBox, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog, QLineEdit, QWidget, QListWidget
class CameraCalibrator(QMainWindow):
def __init__(self):
super().__init__()
self.ui = None
self.load_ui()
self.resultWindow = ResultWindow()
def load_ui(self):
self.ui = loadUi('CameraCalibrator.ui', self)
self.editInputDir = self.ui.findChild(QLineEdit, 'editInputDir')
self.btnInputDir = self.ui.findChild(QPushButton, 'btnInputDir')
self.btnInputDir.clicked.connect(lambda: self.loadDir(self.editInputDir))
self.btnOutputDir = self.ui.findChild(QPushButton, 'btnOutputDir')
self.editOutputDir = self.ui.findChild(QLineEdit, 'editOutputDir')
self.btnOutputDir.clicked.connect(lambda: self.loadDir(self.editOutputDir))
self.editWidth = self.ui.findChild(QLineEdit, 'editWidth')
self.editHeight = self.ui.findChild(QLineEdit, 'editHeight')
self.btnStart = self.ui.findChild(QPushButton, 'btnStart')
self.btnStart.clicked.connect(self.startClicked)
self.checkAdaptive = self.ui.findChild(QCheckBox, 'checkAdaptive')
self.checkNormalize = self.ui.findChild(QCheckBox, 'checkNormalize')
self.checkFilter = self.ui.findChild(QCheckBox, 'checkFilter')
self.checkFast = self.ui.findChild(QCheckBox, 'checkFast')
self.flagDic = {
"CALIB_CB_ADAPTIVE_THRESH": cv2.CALIB_CB_ADAPTIVE_THRESH,
"CALIB_CB_NORMALIZE_IMAGE": cv2.CALIB_CB_NORMALIZE_IMAGE,
"CALIB_CB_FILTER_QUADS": cv2.CALIB_CB_FILTER_QUADS,
"CALIB_CB_FAST_CHECK": cv2.CALIB_CB_FAST_CHECK
}
self.selectedFlags = None
self.show()
def loadDir(self, editPath):
editPath.setText(QFileDialog.getExistingDirectory(self, "Select Directory"))
def startClicked(self):
try:
self.width = int(self.editWidth.text())
self.height = int(self.editHeight.text())
self.inputPath = self.editInputDir.text()
self.outputPath = self.editOutputDir.text()
self.selectedFlags = []
if self.checkAdaptive.isChecked():
self.selectedFlags.append(self.checkAdaptive.text())
if self.checkNormalize.isChecked():
self.selectedFlags.append(self.checkNormalize.text())
if self.checkFilter.isChecked():
self.selectedFlags.append(self.checkFilter.text())
if self.checkFast.isChecked():
self.selectedFlags.append(self.checkFast.text())
self.calibrate()
except ValueError:
print("ERROR: Please enter a valid number")
# Method source:
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
# https://github.com/ligerbots/VisionServer/blob/master/utils/camera_calibration.py
def calibrate(self):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((self.width * self.height, 3), np.float32)
objp[:, :2] = np.mgrid[0:self.width, 0:self.height].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
total = 0
success = 0
failed = 0
failedList = []
images = glob.glob(os.path.join(self.inputPath, '*.jpg'))
for fname in images:
print('Processing file', fname)
img = cv2.imread(fname)
if img is None:
print('ERROR: Unable to read file', fname)
continue
self.shape = img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
if len(self.selectedFlags) == 0:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), None)
elif len(self.selectedFlags) == 1:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), self.flagDic[self.selectedFlags[0]])
elif len(self.selectedFlags) == 2:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), self.flagDic[self.selectedFlags[0]] + self.flagDic[self.selectedFlags[1]])
elif len(self.selectedFlags) == 3:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), self.flagDic[self.selectedFlags[0]] + self.flagDic[self.selectedFlags[1]] + self.flagDic[self.selectedFlags[2]])
elif len(self.selectedFlags) == 4:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), self.flagDic[self.selectedFlags[0]] + self.flagDic[self.selectedFlags[1]] + self.flagDic[self.selectedFlags[2]] + self.flagDic[self.selectedFlags[3]])
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (self.width ,self.height), corners2,ret)
cv2.imshow('img',img)
if self.outputPath:
name = os.path.join(self.outputPath, os.path.basename(fname))
cv2.imwrite(name, img)
cv2.waitKey(500)
success += 1
else:
print(fname, 'failed')
failed += 1
failedList.append(fname)
total += 1
cv2.destroyAllWindows()
if not objpoints:
print("No useful images. Quitting...")
return None
print('Found {} useful images'.format(len(objpoints)))
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
print('reprojection error = ', ret)
print('image center = ({:.2f}, {:.2f})'.format(mtx[0][2], mtx[1][2]))
fov_x = math.degrees(2.0 * math.atan(self.shape[1] / 2.0 / mtx[0][0]))
fov_y = math.degrees(2.0 * math.atan(self.shape[0] / 2.0 / mtx[1][1]))
print('FOV = ({:.2f}, {:.2f}) degrees'.format(fov_x, fov_y))
self.resultWindow.setTotal(total)
self.resultWindow.setSuccess(success)
self.resultWindow.setFailed(failed)
self.resultWindow.setFOV(fov_x, fov_y)
self.resultWindow.setCenter(mtx[0][2], mtx[1][2])
self.resultWindow.setList(failedList)
self.resultWindow.show_ui()
print('mtx = ', mtx)
print('dist = ', dist)
if self.outputPath:
with open(os.path.join(self.outputPath, 'data.json'), 'w+') as f:
json.dump({"camera_matrix": mtx.tolist(), "distorsion": dist.tolist()}, f)
class ResultWindow(QMainWindow):
def __init__(self):
super().__init__()
self.ui = None
self.load_ui()
def load_ui(self):
self.ui = loadUi('Result.ui', self)
self.labelTotal = self.ui.findChild(QLabel, 'labelTotal')
self.labelSuccess = self.ui.findChild(QLabel, 'labelSuccess')
self.labelFailed = self.ui.findChild(QLabel, 'labelFailed')
self.listFailed = self.ui.findChild(QListWidget, 'listFailed')
self.labelFOV = self.ui.findChild(QLabel, 'labelFOV')
self.labelCenter = self.ui.findChild(QLabel, 'labelCenter')
def show_ui(self):
self.show()
def setTotal(self, total):
self.labelTotal.setText('Total: {}'.format(total))
def setSuccess(self, success):
self.labelSuccess.setText('Success: {}'.format(success))
def setFailed(self, failed):
self.labelFailed.setText('Failed: {}'.format(failed))
def setFOV(self, fovX, fovY):
self.labelFOV.setText('FOV: ({:.2f}, {:.2f})'.format(fovX, fovY))
def setCenter(self, centerX, centerY):
self.labelCenter.setText('Center: ({:.2f}, {:.2f})'.format(centerX, centerY))
def setList(self, list):
self.listFailed.addItems(list)
app = QApplication(sys.argv)
cameraCalibrator = CameraCalibrator()
sys.exit(app.exec_()) | import numpy as np
import cv2
import sys
import os.path
import glob
import math
import json
from PyQt5.uic import loadUi
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QCheckBox, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog, QLineEdit, QWidget, QListWidget
class CameraCalibrator(QMainWindow):
def __init__(self):
super().__init__()
self.ui = None
self.load_ui()
self.resultWindow = ResultWindow()
def load_ui(self):
self.ui = loadUi('CameraCalibrator.ui', self)
self.editInputDir = self.ui.findChild(QLineEdit, 'editInputDir')
self.btnInputDir = self.ui.findChild(QPushButton, 'btnInputDir')
self.btnInputDir.clicked.connect(lambda: self.loadDir(self.editInputDir))
self.btnOutputDir = self.ui.findChild(QPushButton, 'btnOutputDir')
self.editOutputDir = self.ui.findChild(QLineEdit, 'editOutputDir')
self.btnOutputDir.clicked.connect(lambda: self.loadDir(self.editOutputDir))
self.editWidth = self.ui.findChild(QLineEdit, 'editWidth')
self.editHeight = self.ui.findChild(QLineEdit, 'editHeight')
self.btnStart = self.ui.findChild(QPushButton, 'btnStart')
self.btnStart.clicked.connect(self.startClicked)
self.checkAdaptive = self.ui.findChild(QCheckBox, 'checkAdaptive')
self.checkNormalize = self.ui.findChild(QCheckBox, 'checkNormalize')
self.checkFilter = self.ui.findChild(QCheckBox, 'checkFilter')
self.checkFast = self.ui.findChild(QCheckBox, 'checkFast')
self.flagDic = {
"CALIB_CB_ADAPTIVE_THRESH": cv2.CALIB_CB_ADAPTIVE_THRESH,
"CALIB_CB_NORMALIZE_IMAGE": cv2.CALIB_CB_NORMALIZE_IMAGE,
"CALIB_CB_FILTER_QUADS": cv2.CALIB_CB_FILTER_QUADS,
"CALIB_CB_FAST_CHECK": cv2.CALIB_CB_FAST_CHECK
}
self.selectedFlags = None
self.show()
def loadDir(self, editPath):
editPath.setText(QFileDialog.getExistingDirectory(self, "Select Directory"))
def startClicked(self):
try:
self.width = int(self.editWidth.text())
self.height = int(self.editHeight.text())
self.inputPath = self.editInputDir.text()
self.outputPath = self.editOutputDir.text()
self.selectedFlags = []
if self.checkAdaptive.isChecked():
self.selectedFlags.append(self.checkAdaptive.text())
if self.checkNormalize.isChecked():
self.selectedFlags.append(self.checkNormalize.text())
if self.checkFilter.isChecked():
self.selectedFlags.append(self.checkFilter.text())
if self.checkFast.isChecked():
self.selectedFlags.append(self.checkFast.text())
self.calibrate()
except ValueError:
print("ERROR: Please enter a valid number")
# Method source:
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
# https://github.com/ligerbots/VisionServer/blob/master/utils/camera_calibration.py
def calibrate(self):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((self.width * self.height, 3), np.float32)
objp[:, :2] = np.mgrid[0:self.width, 0:self.height].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
total = 0
success = 0
failed = 0
failedList = []
images = glob.glob(os.path.join(self.inputPath, '*.jpg'))
for fname in images:
print('Processing file', fname)
img = cv2.imread(fname)
if img is None:
print('ERROR: Unable to read file', fname)
continue
self.shape = img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
if len(self.selectedFlags) == 0:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), None)
elif len(self.selectedFlags) == 1:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), self.flagDic[self.selectedFlags[0]])
elif len(self.selectedFlags) == 2:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), self.flagDic[self.selectedFlags[0]] + self.flagDic[self.selectedFlags[1]])
elif len(self.selectedFlags) == 3:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), self.flagDic[self.selectedFlags[0]] + self.flagDic[self.selectedFlags[1]] + self.flagDic[self.selectedFlags[2]])
elif len(self.selectedFlags) == 4:
ret, corners = cv2.findChessboardCorners(gray, (self.width, self.height), self.flagDic[self.selectedFlags[0]] + self.flagDic[self.selectedFlags[1]] + self.flagDic[self.selectedFlags[2]] + self.flagDic[self.selectedFlags[3]])
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (self.width ,self.height), corners2,ret)
cv2.imshow('img',img)
if self.outputPath:
name = os.path.join(self.outputPath, os.path.basename(fname))
cv2.imwrite(name, img)
cv2.waitKey(500)
success += 1
else:
print(fname, 'failed')
failed += 1
failedList.append(fname)
total += 1
cv2.destroyAllWindows()
if not objpoints:
print("No useful images. Quitting...")
return None
print('Found {} useful images'.format(len(objpoints)))
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
print('reprojection error = ', ret)
print('image center = ({:.2f}, {:.2f})'.format(mtx[0][2], mtx[1][2]))
fov_x = math.degrees(2.0 * math.atan(self.shape[1] / 2.0 / mtx[0][0]))
fov_y = math.degrees(2.0 * math.atan(self.shape[0] / 2.0 / mtx[1][1]))
print('FOV = ({:.2f}, {:.2f}) degrees'.format(fov_x, fov_y))
self.resultWindow.setTotal(total)
self.resultWindow.setSuccess(success)
self.resultWindow.setFailed(failed)
self.resultWindow.setFOV(fov_x, fov_y)
self.resultWindow.setCenter(mtx[0][2], mtx[1][2])
self.resultWindow.setList(failedList)
self.resultWindow.show_ui()
print('mtx = ', mtx)
print('dist = ', dist)
if self.outputPath:
with open(os.path.join(self.outputPath, 'data.json'), 'w+') as f:
json.dump({"camera_matrix": mtx.tolist(), "distorsion": dist.tolist()}, f)
class ResultWindow(QMainWindow):
def __init__(self):
super().__init__()
self.ui = None
self.load_ui()
def load_ui(self):
self.ui = loadUi('Result.ui', self)
self.labelTotal = self.ui.findChild(QLabel, 'labelTotal')
self.labelSuccess = self.ui.findChild(QLabel, 'labelSuccess')
self.labelFailed = self.ui.findChild(QLabel, 'labelFailed')
self.listFailed = self.ui.findChild(QListWidget, 'listFailed')
self.labelFOV = self.ui.findChild(QLabel, 'labelFOV')
self.labelCenter = self.ui.findChild(QLabel, 'labelCenter')
def show_ui(self):
self.show()
def setTotal(self, total):
self.labelTotal.setText('Total: {}'.format(total))
def setSuccess(self, success):
self.labelSuccess.setText('Success: {}'.format(success))
def setFailed(self, failed):
self.labelFailed.setText('Failed: {}'.format(failed))
def setFOV(self, fovX, fovY):
self.labelFOV.setText('FOV: ({:.2f}, {:.2f})'.format(fovX, fovY))
def setCenter(self, centerX, centerY):
self.labelCenter.setText('Center: ({:.2f}, {:.2f})'.format(centerX, centerY))
def setList(self, list):
self.listFailed.addItems(list)
app = QApplication(sys.argv)
cameraCalibrator = CameraCalibrator()
sys.exit(app.exec_()) | en | 0.696411 | # Method source: # https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html # https://github.com/ligerbots/VisionServer/blob/master/utils/camera_calibration.py # termination criteria # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) # Arrays to store object points and image points from all the images. # 3d point in real world space # 2d points in image plane. # Find the chess board corners # If found, add object points, image points (after refining them) # Draw and display the corners | 2.217673 | 2 |
demo/richardson_lucy_demo.py | mosaic-group/PyLibAPR | 7 | 6620660 | <reponame>mosaic-group/PyLibAPR
import pyapr
def main():
"""
Read a selected APR from file and apply Richardson-Lucy deconvolution
"""
# Get input APR file path from gui
io_int = pyapr.filegui.InteractiveIO()
fpath_apr = io_int.get_apr_file_name()
# Instantiate APR and particles objects
apr = pyapr.APR()
parts = pyapr.ShortParticles()
# parts = pyapr.FloatParticles()
# Read from APR file
pyapr.io.read(fpath_apr, apr, parts)
# Copy particles to float
fparts = pyapr.FloatParticles()
fparts.copy(parts)
# Add a small offset to the particle values to avoid division by 0
offset = 1e-5 * fparts.max()
fparts += offset
# Display the input image
pyapr.viewer.parts_viewer(apr, fparts)
# Specify the PSF and number of iterations
psf = pyapr.numerics.filter.get_gaussian_stencil(size=5, sigma=1, ndims=3, normalize=True)
niter = 20
# Perform richardson-lucy deconvolution
output = pyapr.FloatParticles()
pyapr.numerics.richardson_lucy(apr, fparts, output, psf, niter, use_stencil_downsample=True,
normalize_stencil=True, resume=False)
# Alternative using total variation regularization:
# reg_factor = 1e-2
# pyapr.numerics.richardson_lucy_tv(apr, fparts, output, psf, niter, reg_factor, use_stencil_downsample=True,
# normalize_stencil=True, resume=False)
# Alternatively, if PyLibAPR is built with CUDA enabled and psf is of size (3, 3, 3) or (5, 5, 5)
# pyapr.numerics.richardson_lucy_cuda(apr, fparts, output, psf, niter, use_stencil_downsample=True,
# normalize_stencil=True, resume=False)
# Display the result
pyapr.viewer.parts_viewer(apr, output)
if __name__ == '__main__':
main()
| import pyapr
def main():
"""
Read a selected APR from file and apply Richardson-Lucy deconvolution
"""
# Get input APR file path from gui
io_int = pyapr.filegui.InteractiveIO()
fpath_apr = io_int.get_apr_file_name()
# Instantiate APR and particles objects
apr = pyapr.APR()
parts = pyapr.ShortParticles()
# parts = pyapr.FloatParticles()
# Read from APR file
pyapr.io.read(fpath_apr, apr, parts)
# Copy particles to float
fparts = pyapr.FloatParticles()
fparts.copy(parts)
# Add a small offset to the particle values to avoid division by 0
offset = 1e-5 * fparts.max()
fparts += offset
# Display the input image
pyapr.viewer.parts_viewer(apr, fparts)
# Specify the PSF and number of iterations
psf = pyapr.numerics.filter.get_gaussian_stencil(size=5, sigma=1, ndims=3, normalize=True)
niter = 20
# Perform richardson-lucy deconvolution
output = pyapr.FloatParticles()
pyapr.numerics.richardson_lucy(apr, fparts, output, psf, niter, use_stencil_downsample=True,
normalize_stencil=True, resume=False)
# Alternative using total variation regularization:
# reg_factor = 1e-2
# pyapr.numerics.richardson_lucy_tv(apr, fparts, output, psf, niter, reg_factor, use_stencil_downsample=True,
# normalize_stencil=True, resume=False)
# Alternatively, if PyLibAPR is built with CUDA enabled and psf is of size (3, 3, 3) or (5, 5, 5)
# pyapr.numerics.richardson_lucy_cuda(apr, fparts, output, psf, niter, use_stencil_downsample=True,
# normalize_stencil=True, resume=False)
# Display the result
pyapr.viewer.parts_viewer(apr, output)
if __name__ == '__main__':
main() | en | 0.663151 | Read a selected APR from file and apply Richardson-Lucy deconvolution # Get input APR file path from gui # Instantiate APR and particles objects # parts = pyapr.FloatParticles() # Read from APR file # Copy particles to float # Add a small offset to the particle values to avoid division by 0 # Display the input image # Specify the PSF and number of iterations # Perform richardson-lucy deconvolution # Alternative using total variation regularization: # reg_factor = 1e-2 # pyapr.numerics.richardson_lucy_tv(apr, fparts, output, psf, niter, reg_factor, use_stencil_downsample=True, # normalize_stencil=True, resume=False) # Alternatively, if PyLibAPR is built with CUDA enabled and psf is of size (3, 3, 3) or (5, 5, 5) # pyapr.numerics.richardson_lucy_cuda(apr, fparts, output, psf, niter, use_stencil_downsample=True, # normalize_stencil=True, resume=False) # Display the result | 2.828785 | 3 |
1930.py | gabzin/uri | 3 | 6620661 | x=list(map(int,input().split()))
print(sum(x)-3)
| x=list(map(int,input().split()))
print(sum(x)-3)
| none | 1 | 2.858114 | 3 | |
Statistics/stats_app.py | symbor/CatsBreeder | 0 | 6620662 | <reponame>symbor/CatsBreeder
import json
from flask import Flask, jsonify, request
import sqlite3
from multiprocessing import Value
import requests
from translate import Translator
API_key = '<KEY>'
breeds = {'sphy':'Sphynx', 'birm':'Birman' , 'emau':'Egyptian Mau' , 'ragd':'Ragdoll', 'abys':'Abyssinian' ,
'siam':'Siamese' , 'mcoo':'Maine Coon' , 'beng':'Bengal', 'bsho':'British Shorthair' , 'bomb':'Bombay' , 'rblu':'Russian Blue' , 'pers':'Persian'}
# Myślę, że te dane wystarczą
properties = ['name', 'origin','life_span','weight','temperament','description']
app = Flask(__name__)
db_connection = sqlite3.connect('CatsBreeder.db', check_same_thread=False)
db_cursor = db_connection.cursor()
# Definicja tabeli bazodanowej
db_cursor.execute(""
"CREATE TABLE if not exists CATEGORIZATION"
"(ID INTEGER PRIMARY KEY AUTOINCREMENT,"
"OP_ID INTEGER,"
"BREED TEXT,"
"PROPABILITY REAL,"
"OP_DATE date)")
db_connection.commit()
OP_counter = Value('i', 0)
@app.route('/')
def index():
return 'Test'
#Metoda pobierająca dane z usługi rozp. obrazów i zapisująca je w bazie
@app.route('/records', methods = ['GET', 'POST'])
def get_data():
if request.method == 'GET':
db_cursor.execute("SELECT * FROM CATEGORIZATION")
records = db_cursor.fetchall()
json_records = jsonify(records)
return json_records
elif request.method == 'POST':
data_dict = json.loads(request.get_data())
kys = list(data_dict.keys())
val = list(data_dict.values())
with OP_counter.get_lock():
OP_counter.value += 1
OP_ID = OP_counter.value
# Insert rekordów z usługi rozpoznawania twarzy
for i in range(len(data_dict)):
breed = kys[i]
propability = val[i]
db_cursor.execute("INSERT INTO CATEGORIZATION VALUES (NULL, (:first), (:second), (:third), CURRENT_TIMESTAMP)",
{'first': OP_ID,
'second': breed,
'third': propability
})
db_connection.commit()
# Odnajdywanie rasy o największym prawdopodobieństwie
max_breed = max(data_dict.items(), key=lambda x : x[1])
for keys, vals in breeds.items():
if vals == max_breed[0]:
our_breed = keys
breed_info = get_breed_info(our_breed)
prop = float(max_breed[1]) * 100
rounded_prop = round(prop, 1)
cur_prop = {"current_propability": str(rounded_prop)}
other_prop = {
"second_breed": list(data_dict)[1],
"third_breed": list(data_dict)[2]
}
cur_prop.update(other_prop)
breed_info.update(cur_prop)
return json.dumps(breed_info)
#Wykorzystanie danych z APi i statystyk z bazy
# @app.route('/breeds/<breed>', methods = ['GET','POST'])
def get_breed_info(breed):
if breed in breeds.keys():
our_breed = breeds[breed]
#rekord zawierające informacje jeśli ich brak daje zero lub domyślne daty
db_cursor.execute("SELECT COUNT(*), ifnull(MAX(PROPABILITY),0), ifnull(AVG(PROPABILITY),0), ifnull(MIN(PROPABILITY),0), \
ifnull(MAX(OP_DATE), '09-09-9999 00:00:00'), ifnull(MIN(OP_DATE),'11-11-1111 00:00:00') FROM CATEGORIZATION WHERE BREED = (:breed)",
{'breed': our_breed })
db_stats = db_cursor.fetchall()
stats_dict = {"record_count": db_stats[0][0], "max_propability": db_stats[0][1], "avg_propability": db_stats[0][2],
"min_propability": db_stats[0][3], "last_date": db_stats[0][4], "first_date": db_stats[0][5]}
api_request = requests.get('https://api.thecatapi.com/v1/breeds/{}'.format(breed))
api_data = api_request.json()
#Wybranie poszczególnych cech można rozszerzyć za pomocą listy wyżej
print("API_DATA: ", api_data)
print("PROPERTIES ", properties)
# problem z alt_names, nie każdy kotek ma alternatywną nazwę rasy?
filtered_api_data = { i: api_data[i] for i in properties }
translator = Translator(to_lang="pl")
for key, val in filtered_api_data.items():
if key == 'weight':
filtered_api_data[key] = filtered_api_data[key]['metric']
else:
# translating na razie działa co 24 godziny i jest ograniczone, więc
# albo znajdziemy inne api albo ?
# filtered_api_data[key] = translator.translate(val)
print(val, " -> ", filtered_api_data[key] )
keys_values = stats_dict.items()
stats_dict = {str(key): str(value) for key, value in keys_values}
combined_date = {}
combined_date.update(filtered_api_data)
combined_date.update(stats_dict)
return combined_date
return
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=9000)
| import json
from flask import Flask, jsonify, request
import sqlite3
from multiprocessing import Value
import requests
from translate import Translator
API_key = '<KEY>'
breeds = {'sphy':'Sphynx', 'birm':'Birman' , 'emau':'Egyptian Mau' , 'ragd':'Ragdoll', 'abys':'Abyssinian' ,
'siam':'Siamese' , 'mcoo':'Maine Coon' , 'beng':'Bengal', 'bsho':'British Shorthair' , 'bomb':'Bombay' , 'rblu':'Russian Blue' , 'pers':'Persian'}
# Myślę, że te dane wystarczą
properties = ['name', 'origin','life_span','weight','temperament','description']
app = Flask(__name__)
db_connection = sqlite3.connect('CatsBreeder.db', check_same_thread=False)
db_cursor = db_connection.cursor()
# Definicja tabeli bazodanowej
db_cursor.execute(""
"CREATE TABLE if not exists CATEGORIZATION"
"(ID INTEGER PRIMARY KEY AUTOINCREMENT,"
"OP_ID INTEGER,"
"BREED TEXT,"
"PROPABILITY REAL,"
"OP_DATE date)")
db_connection.commit()
OP_counter = Value('i', 0)
@app.route('/')
def index():
return 'Test'
#Metoda pobierająca dane z usługi rozp. obrazów i zapisująca je w bazie
@app.route('/records', methods = ['GET', 'POST'])
def get_data():
if request.method == 'GET':
db_cursor.execute("SELECT * FROM CATEGORIZATION")
records = db_cursor.fetchall()
json_records = jsonify(records)
return json_records
elif request.method == 'POST':
data_dict = json.loads(request.get_data())
kys = list(data_dict.keys())
val = list(data_dict.values())
with OP_counter.get_lock():
OP_counter.value += 1
OP_ID = OP_counter.value
# Insert rekordów z usługi rozpoznawania twarzy
for i in range(len(data_dict)):
breed = kys[i]
propability = val[i]
db_cursor.execute("INSERT INTO CATEGORIZATION VALUES (NULL, (:first), (:second), (:third), CURRENT_TIMESTAMP)",
{'first': OP_ID,
'second': breed,
'third': propability
})
db_connection.commit()
# Odnajdywanie rasy o największym prawdopodobieństwie
max_breed = max(data_dict.items(), key=lambda x : x[1])
for keys, vals in breeds.items():
if vals == max_breed[0]:
our_breed = keys
breed_info = get_breed_info(our_breed)
prop = float(max_breed[1]) * 100
rounded_prop = round(prop, 1)
cur_prop = {"current_propability": str(rounded_prop)}
other_prop = {
"second_breed": list(data_dict)[1],
"third_breed": list(data_dict)[2]
}
cur_prop.update(other_prop)
breed_info.update(cur_prop)
return json.dumps(breed_info)
#Wykorzystanie danych z APi i statystyk z bazy
# @app.route('/breeds/<breed>', methods = ['GET','POST'])
def get_breed_info(breed):
if breed in breeds.keys():
our_breed = breeds[breed]
#rekord zawierające informacje jeśli ich brak daje zero lub domyślne daty
db_cursor.execute("SELECT COUNT(*), ifnull(MAX(PROPABILITY),0), ifnull(AVG(PROPABILITY),0), ifnull(MIN(PROPABILITY),0), \
ifnull(MAX(OP_DATE), '09-09-9999 00:00:00'), ifnull(MIN(OP_DATE),'11-11-1111 00:00:00') FROM CATEGORIZATION WHERE BREED = (:breed)",
{'breed': our_breed })
db_stats = db_cursor.fetchall()
stats_dict = {"record_count": db_stats[0][0], "max_propability": db_stats[0][1], "avg_propability": db_stats[0][2],
"min_propability": db_stats[0][3], "last_date": db_stats[0][4], "first_date": db_stats[0][5]}
api_request = requests.get('https://api.thecatapi.com/v1/breeds/{}'.format(breed))
api_data = api_request.json()
#Wybranie poszczególnych cech można rozszerzyć za pomocą listy wyżej
print("API_DATA: ", api_data)
print("PROPERTIES ", properties)
# problem z alt_names, nie każdy kotek ma alternatywną nazwę rasy?
filtered_api_data = { i: api_data[i] for i in properties }
translator = Translator(to_lang="pl")
for key, val in filtered_api_data.items():
if key == 'weight':
filtered_api_data[key] = filtered_api_data[key]['metric']
else:
# translating na razie działa co 24 godziny i jest ograniczone, więc
# albo znajdziemy inne api albo ?
# filtered_api_data[key] = translator.translate(val)
print(val, " -> ", filtered_api_data[key] )
keys_values = stats_dict.items()
stats_dict = {str(key): str(value) for key, value in keys_values}
combined_date = {}
combined_date.update(filtered_api_data)
combined_date.update(stats_dict)
return combined_date
return
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=9000) | pl | 0.990498 | # Myślę, że te dane wystarczą # Definicja tabeli bazodanowej #Metoda pobierająca dane z usługi rozp. obrazów i zapisująca je w bazie # Insert rekordów z usługi rozpoznawania twarzy # Odnajdywanie rasy o największym prawdopodobieństwie #Wykorzystanie danych z APi i statystyk z bazy # @app.route('/breeds/<breed>', methods = ['GET','POST']) #rekord zawierające informacje jeśli ich brak daje zero lub domyślne daty #Wybranie poszczególnych cech można rozszerzyć za pomocą listy wyżej # problem z alt_names, nie każdy kotek ma alternatywną nazwę rasy? # translating na razie działa co 24 godziny i jest ograniczone, więc # albo znajdziemy inne api albo ? # filtered_api_data[key] = translator.translate(val) | 2.680553 | 3 |
image_processing3_4.py | pavi-ninjaac/OpenCV | 0 | 6620663 | <reponame>pavi-ninjaac/OpenCV
#import the library
import cv2
import numpy as np
from skimage.util import random_noise
#adding nosie to the image
img=cv2.imread('red_car.jpg')
# random_noise() method will convert image in [0, 255] to [0, 1.0],
# inherently it use np.random.normal() to create normal distribution
# and adds the generated noised back to image
img_gaussian= random_noise(img, mode='gaussian', var=0.05**2)
img_gaussian= (255*img_gaussian)
#adding salt and peper noise
img_s_and_p=random_noise(img,mode='s&p')
cv2.imshow("The image with noise of gaussian ",img_gaussian)
cv2.imshow("The image with nice of salt and peper",img_s_and_p)
cv2.waitKey(0)
| #import the library
import cv2
import numpy as np
from skimage.util import random_noise
#adding nosie to the image
img=cv2.imread('red_car.jpg')
# random_noise() method will convert image in [0, 255] to [0, 1.0],
# inherently it use np.random.normal() to create normal distribution
# and adds the generated noised back to image
img_gaussian= random_noise(img, mode='gaussian', var=0.05**2)
img_gaussian= (255*img_gaussian)
#adding salt and peper noise
img_s_and_p=random_noise(img,mode='s&p')
cv2.imshow("The image with noise of gaussian ",img_gaussian)
cv2.imshow("The image with nice of salt and peper",img_s_and_p)
cv2.waitKey(0) | en | 0.686102 | #import the library #adding nosie to the image # random_noise() method will convert image in [0, 255] to [0, 1.0], # inherently it use np.random.normal() to create normal distribution # and adds the generated noised back to image #adding salt and peper noise | 3.548669 | 4 |
ion/services/sa/tcaa/test/test_remote_endpoint.py | ooici/coi-services | 3 | 6620664 | #!/usr/bin/env python
"""
@package ion.services.sa.tcaa.test.test_remote_endpoint
@file ion/services/sa/tcaa/test/test_remote_endpoint.py
@author <NAME>
@brief Test cases for 2CAA remote endpoint.
"""
__author__ = '<NAME>'
# Pyon log and config objects.
from pyon.public import log
from pyon.public import CFG
# Standard imports.
import time
import os
import signal
import time
import unittest
from datetime import datetime
import uuid
import socket
import re
import random
# Pyon exceptions.
from pyon.core.exception import IonException
from pyon.core.exception import BadRequest
from pyon.core.exception import ServerError
from pyon.core.exception import NotFound
# 3rd party imports.
import gevent
from gevent import spawn
from gevent.event import AsyncResult
from nose.plugins.attrib import attr
from mock import patch
# Pyon unittest support.
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from pyon.public import IonObject
from pyon.event.event import EventPublisher, EventSubscriber
from pyon.util.context import LocalContextMixin
from ion.services.sa.tcaa.remote_endpoint import RemoteEndpoint
from ion.services.sa.tcaa.remote_endpoint import RemoteEndpointClient
from interface.services.icontainer_agent import ContainerAgentClient
from ion.services.sa.tcaa.r3pc import R3PCServer
from ion.services.sa.tcaa.r3pc import R3PCClient
from interface.objects import TelemetryStatusType
from interface.objects import UserInfo
from pyon.agent.agent import ResourceAgentClient
from pyon.agent.agent import ResourceAgentState
from pyon.agent.agent import ResourceAgentEvent
from interface.objects import AgentCommand
from ion.agents.instrument.driver_int_test_support import DriverIntegrationTestSupport
DEV_ADDR = CFG.device.sbe37.host
DEV_PORT = CFG.device.sbe37.port
DATA_PORT = CFG.device.sbe37.port_agent_data_port
CMD_PORT = CFG.device.sbe37.port_agent_cmd_port
PA_BINARY = CFG.device.sbe37.port_agent_binary
DELIM = CFG.device.sbe37.delim
WORK_DIR = CFG.device.sbe37.workdir
DRV_URI = CFG.device.sbe37.dvr_egg
from ion.agents.instrument.test.agent_test_constants import IA_RESOURCE_ID
from ion.agents.instrument.test.agent_test_constants import IA_NAME
from ion.agents.instrument.test.agent_test_constants import IA_MOD
from ion.agents.instrument.test.agent_test_constants import IA_CLS
from ion.agents.instrument.test.load_test_driver_egg import load_egg
DVR_CONFIG = load_egg()
DRV_MOD = DVR_CONFIG['dvr_mod']
DRV_CLS = DVR_CONFIG['dvr_cls']
# This import will dynamically load the driver egg. It is needed for the MI includes below
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37ProtocolEvent
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_process_queued
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_process_online
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_terrestrial_late
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_service_commands
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_resource_commands
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_bad_service_name_resource_id
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_bad_commands
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_resource_command_sequence
"""
Example code to dynamically create client to container service.
https://github.com/ooici/coi-services/blob/master/ion/services/coi/agent_management_service.py#L531
"""
class FakeProcess(LocalContextMixin):
"""
A fake process used because the test case is not an ion process.
"""
name = ''
id=''
process_type = ''
@attr('INT', group='sa')
@patch.dict(CFG, {'endpoint':{'receive':{'timeout': 60}}})
@unittest.skip('Skipping as this is now out of scope and takes a long time.')
class TestRemoteEndpoint(IonIntegrationTestCase):
"""
Test cases for 2CAA terrestrial endpoint.
"""
def setUp(self):
"""
Start fake terrestrial components and add cleanup.
Start terrestrial server and retrieve port.
Set internal variables.
Start container.
Start deployment.
Start container agent.
Spawn remote endpoint process.
Create remote endpoint client and retrieve remote server port.
Create event publisher.
"""
self._terrestrial_server = R3PCServer(self.consume_req, self.terrestrial_server_close)
self._terrestrial_client = R3PCClient(self.consume_ack, self.terrestrial_client_close)
self.addCleanup(self._terrestrial_server.stop)
self.addCleanup(self._terrestrial_client.stop)
self._other_port = self._terrestrial_server.start('*', 0)
log.debug('Terrestrial server binding to *:%i', self._other_port)
self._other_host = 'localhost'
self._platform_resource_id = 'abc123'
self._resource_id = 'fake_id'
self._no_requests = 10
self._requests_sent = {}
self._results_recv = {}
self._no_telem_events = 0
self._done_evt = AsyncResult()
self._done_telem_evts = AsyncResult()
self._cmd_tx_evt = AsyncResult()
# Start container.
log.debug('Staring capability container.')
self._start_container()
# Bring up services in a deploy file (no need to message).
log.info('Staring deploy services.')
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
# Create a container client.
log.debug('Creating container client.')
container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
# Create agent config.
endpoint_config = {
'other_host' : self._other_host,
'other_port' : self._other_port,
'this_port' : 0,
'platform_resource_id' : self._platform_resource_id
}
# Spawn the remote enpoint process.
log.debug('Spawning remote endpoint process.')
re_pid = container_client.spawn_process(
name='remote_endpoint_1',
module='ion.services.sa.tcaa.remote_endpoint',
cls='RemoteEndpoint',
config=endpoint_config)
log.debug('Endpoint pid=%s.', str(re_pid))
# Create an endpoint client.
self.re_client = RemoteEndpointClient(
process=FakeProcess(),
to_name=re_pid)
log.debug('Got re client %s.', str(self.re_client))
# Remember the remote port.
self._this_port = self.re_client.get_port()
log.debug('The remote port is: %i.', self._this_port)
# Start the event publisher.
self._event_publisher = EventPublisher()
######################################################################
# Helpers.
######################################################################
def on_link_up(self):
"""
Called by a test to simulate turning the link on.
"""
log.debug('Terrestrial client connecting to localhost:%i.',
self._this_port)
self._terrestrial_client.start('localhost', self._this_port)
# Publish a link up event to be caught by the endpoint.
log.debug('Publishing telemetry event.')
self._event_publisher.publish_event(
event_type='PlatformTelemetryEvent',
origin=self._platform_resource_id,
status = TelemetryStatusType.AVAILABLE)
def on_link_down(self):
"""
Called by a test to simulate turning the link off.
"""
self._terrestrial_client.stop()
# Publish a link down event to be caught by the endpoint.
log.debug('Publishing telemetry event.')
self._event_publisher.publish_event(
event_type='PlatformTelemetryEvent',
origin=self._platform_resource_id,
status = TelemetryStatusType.UNAVAILABLE)
def consume_req(self, res):
"""
Consume a terrestrial request setting async event when necessary.
"""
command_id = res['command_id']
self._results_recv[command_id] = res
if len(self._results_recv) == self._no_requests:
self._done_evt.set()
def consume_ack(self, cmd):
"""
Consume terrestrial ack setting async event when necessary.
"""
self._requests_sent[cmd.command_id] = cmd
if len(self._requests_sent) == self._no_requests:
self._cmd_tx_evt.set()
def terrestrial_server_close(self):
"""
Callback when terrestrial server closes.
"""
pass
def terrestrial_client_close(self):
"""
Callback when terrestrial client closes.
"""
pass
def make_fake_command(self, no):
"""
Build a fake command for use in tests.
"""
cmdstr = 'fake_cmd_%i' % no
cmd = IonObject('RemoteCommand',
resource_id=self._resource_id,
command=cmdstr,
args=['arg1', 23],
kwargs={'worktime':3},
command_id = str(uuid.uuid4()))
return cmd
def start_agent(self):
"""
Start an instrument agent and client.
"""
log.info('Creating driver integration test support:')
log.info('driver module: %s', DRV_MOD)
log.info('driver class: %s', DRV_CLS)
log.info('device address: %s', DEV_ADDR)
log.info('device port: %s', DEV_PORT)
log.info('log delimiter: %s', DELIM)
log.info('work dir: %s', WORK_DIR)
self._support = DriverIntegrationTestSupport(DRV_MOD,
DRV_CLS,
DEV_ADDR,
DEV_PORT,
DATA_PORT,
CMD_PORT,
PA_BINARY,
DELIM,
WORK_DIR)
# Start port agent, add stop to cleanup.
port = self._support.start_pagent()
log.info('Port agent started at port %i',port)
# Configure driver to use port agent port number.
DVR_CONFIG['comms_config'] = {
'addr' : 'localhost',
'port' : port,
'cmd_port' : CMD_PORT
}
self.addCleanup(self._support.stop_pagent)
# Create agent config.
agent_config = {
'driver_config' : DVR_CONFIG,
'stream_config' : {},
'agent' : {'resource_id': IA_RESOURCE_ID},
'test_mode' : True
}
# Start instrument agent.
log.debug("Starting IA.")
container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
ia_pid = container_client.spawn_process(name=IA_NAME,
module=IA_MOD,
cls=IA_CLS,
config=agent_config)
log.info('Agent pid=%s.', str(ia_pid))
# Start a resource agent client to talk with the instrument agent.
self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess())
log.info('Got ia client %s.', str(self._ia_client))
######################################################################
# Tests.
######################################################################
def test_process_queued(self):
"""
test_process_queued
Test that queued commands are forwarded to and handled by
remote endpoint when link comes up.
"""
# Create and enqueue some requests.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
self._terrestrial_client.enqueue(cmd)
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for all the enqueued commands to be acked.
# Wait for all the responses to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
# Confirm the results match the commands sent.
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_process_online(self):
"""
test_process_online
Test commands are forwarded and handled while link is up.
"""
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Create and enqueue some requests.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
self._terrestrial_client.enqueue(cmd)
# Wait for all the enqueued commands to be acked.
# Wait for all the responses to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
# Confirm the results match the commands sent.
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_terrestrial_late(self):
"""
test_terrestrial_late
Test queued commands are forwarded and handled by remote endpoint
when terrestrial side is late to come up.
"""
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Manually stop the terrestrial endpoint.
# This will cause it to be unavailable when commands are queued
# to simulate stability during asynchronous wake ups.
self._terrestrial_server.stop()
self._terrestrial_client.stop()
# Create and enqueue some requests.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
self._terrestrial_client.enqueue(cmd)
# Remote side awaits the terrestrial waking up.
gevent.sleep(3)
# Terrestrail endpoint eventually wakes up and starts transmitting.
self._terrestrial_client.start('localhost', self._this_port)
self._terrestrial_server.start('*', self._other_port)
# Wait for all the enqueued commands to be acked.
# Wait for all the responses to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
# Confirm the results match the commands sent.
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_service_commands(self):
"""
test_service_commands
Test that real service commands are handled by the remote endpoint.
"""
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Create user object.
obj = IonObject("UserInfo", name="some_name")
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='create',
args=[obj],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns obj_id, obj_rev.
obj_id, obj_rev = self._results_recv[cmd.command_id]['result']
# Confirm the results are valid.
"""
Result is a tuple of strings.
{'result': ['ad183ff26bae4f329ddd85fd69d160a9',
'1-00a308c45fff459c7cda1db9a7314de6'],
'command_id': 'cc2ae00d-40b0-47d2-af61-8ffb87f1aca2'}
"""
self.assertIsInstance(obj_id, str)
self.assertNotEqual(obj_id, '')
self.assertIsInstance(obj_rev, str)
self.assertNotEqual(obj_rev, '')
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Read user object.
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='read',
args=[obj_id],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns read_obj.
read_obj = self._results_recv[cmd.command_id]['result']
# Confirm the results are valid.
"""
Result is a user info object with the name set.
{'lcstate': 'DEPLOYED_AVAILABLE',
'_rev': '1-851f067bac3c34b2238c0188b3340d0f',
'description': '',
'ts_updated': '1349213207638',
'type_': 'UserInfo',
'contact': <interface.objects.ContactInformation object at 0x10d7df590>,
'_id': '27832d93f4cd4535a75ac75c06e00a7e',
'ts_created': '1349213207638',
'variables': [{'name': '', 'value': ''}],
'name': 'some_name'}
"""
self.assertIsInstance(read_obj, UserInfo)
self.assertEquals(read_obj.name, 'some_name')
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Update user object.
read_obj.name = 'some_other_name'
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='update',
args=[read_obj],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns nothing.
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Read user object.
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='read',
args=[obj_id],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns read_obj.
read_obj = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(read_obj, UserInfo)
self.assertEquals(read_obj.name, 'some_other_name')
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Delete user object.
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='delete',
args=[obj_id],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns nothing.
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
def test_resource_commands(self):
"""
test_resource_commands
Test that real resource commands are handled by the remote endpoint.
"""
# Start the IA and check it's out there and behaving.
self.start_agent()
state = self._ia_client.get_agent_state()
log.debug('Agent state is: %s', state)
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
retval = self._ia_client.ping_agent()
log.debug('Agent ping is: %s', str(retval))
self.assertIn('ping from InstrumentAgent', retval)
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Get agent state via remote endpoint.
cmd = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns agent state.
state = self._results_recv[cmd.command_id]['result']
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Ping agent via remote endpoint.
cmd = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='ping_agent',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns agent state.
ping = self._results_recv[cmd.command_id]['result']
self.assertIn('ping from InstrumentAgent', ping)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
def test_bad_service_name_resource_id(self):
"""
test_bad_service_name_resource_id
Test for proper exception behavior when a bad service name or
resource id is used in a command forwarded to the remote endpoint.
"""
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Create user object.
obj = IonObject("UserInfo", name="some_name")
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='bogus_service',
command='create',
args=[obj],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns NotFound.
result = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(result, NotFound)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Get agent state via remote endpoint.
cmd = IonObject('RemoteCommand',
resource_id='bogus_resource_id',
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns NotFound.
result = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(result, NotFound)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
def test_bad_commands(self):
"""
test_bad_commands
Test for correct exception behavior if a bad command name is forwarded
to a remote service or resource.
"""
# Start the IA and check it's out there and behaving.
self.start_agent()
state = self._ia_client.get_agent_state()
log.debug('Agent state is: %s', state)
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
retval = self._ia_client.ping_agent()
log.debug('Agent ping is: %s', str(retval))
self.assertIn('ping from InstrumentAgent', retval)
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Create user object.
obj = IonObject("UserInfo", name="some_name")
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='what_the_flunk',
args=[obj],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns BadRequest.
result = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(result, BadRequest)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Get agent state via remote endpoint.
cmd = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='what_the_flunk',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns NotFound.
result = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(result, BadRequest)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
def test_resource_command_sequence(self):
"""
test_resource_command_sequence
Test for successful completion of a properly ordered sequence of
resource commands queued for forwarding to the remote endpoint.
"""
# Start the IA and check it's out there and behaving.
self.start_agent()
state = self._ia_client.get_agent_state()
log.debug('Agent state is: %s', state)
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
retval = self._ia_client.ping_agent()
log.debug('Agent ping is: %s', str(retval))
self.assertIn('ping from InstrumentAgent', retval)
# We execute a sequence of twelve consecutive events.
self._no_requests = 12
# Get agent state.
cmd1 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd1)
# Initialize agent.
cmd2 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_agent',
args=[AgentCommand(command=ResourceAgentEvent.INITIALIZE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd2)
# Get agent state.
cmd3 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd3)
# Go active.
cmd4 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_agent',
args=[AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd4)
# Get agent state.
cmd5 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd5)
# Run.
cmd6 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_agent',
args=[AgentCommand(command=ResourceAgentEvent.RUN)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd6)
# Get agent state.
cmd7 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd7)
# Acquire sample.
cmd8 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_resource',
args=[AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd8)
# Acquire sample
cmd9 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_resource',
args=[AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd9)
# Acquire sample.
cmd10 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_resource',
args=[AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd10)
# Reset.
cmd11 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_agent',
args=[AgentCommand(command=ResourceAgentEvent.RESET)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd11)
# Get agent state.
cmd12 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd12)
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Check results of command sequence.
"""
0ccf1e10-eeca-400d-aefe-f9d6888ec963 {'result': 'RESOURCE_AGENT_STATE_INACTIVE', 'command_id': '0ccf1e10-eeca-400d-aefe-f9d6888ec963'}
92531bdf-c2c8-4aa8-817d-5107c7311b37 {'result': <interface.objects.AgentCommandResult object at 0x10d7f11d0>, 'command_id': '92531bdf-c2c8-4aa8-817d-5107c7311b37'}
509934a1-5038-40d8-8014-591e2d8042b6 {'result': 'RESOURCE_AGENT_STATE_COMMAND', 'command_id': '509934a1-5038-40d8-8014-591e2d8042b6'}
88bacbb7-5366-4d27-9ecf-fff2bec34b2c {'result': <interface.objects.AgentCommandResult object at 0x10d389190>, 'command_id': '88bacbb7-5366-4d27-9ecf-fff2bec34b2c'}
f8b4d3fa-a249-439b-8bd4-ac212b6100aa {'result': <interface.objects.AgentCommandResult object at 0x10d3893d0>, 'command_id': 'f8b4d3fa-a249-439b-8bd4-ac212b6100aa'}
8ae98e39-fdb3-4218-ad8f-584620397d9f {'result': <interface.objects.AgentCommandResult object at 0x10d739990>, 'command_id': '8ae98e39-fdb3-4218-ad8f-584620397d9f'}
746364a1-c4c7-400f-96d4-ee36df5dc1a4 {'result': BadRequest('Execute argument "command" not set.',), 'command_id': '746364a1-c4c7-400f-96d4-ee36df5dc1a4'}
d516d3d9-e4f9-4ea5-80e0-34639a6377b5 {'result': <interface.objects.AgentCommandResult object at 0x10d3b2350>, 'command_id': 'd516d3d9-e4f9-4ea5-80e0-34639a6377b5'}
c7da03f5-59bc-420a-9e10-0a7794266599 {'result': 'RESOURCE_AGENT_STATE_IDLE', 'command_id': 'c7da03f5-59bc-420a-9e10-0a7794266599'}
678d870a-bf18-424a-afb0-f80ecf3277e2 {'result': <interface.objects.AgentCommandResult object at 0x10d739590>, 'command_id': '678d870a-bf18-424a-afb0-f80ecf3277e2'}
750c6a30-56eb-4535-99c2-a81fefab1b1f {'result': 'RESOURCE_AGENT_STATE_COMMAND', 'command_id': '750c6a30-56eb-4535-99c2-a81fefab1b1f'}
c17bd658-3775-4aa3-8844-02df70a0e3c0 {'result': 'RESOURCE_AGENT_STATE_UNINITIALIZED', 'command_id': 'c17bd658-3775-4aa3-8844-02df70a0e3c0'}
"""
# First result is a state string.
result1 = self._results_recv[cmd1.command_id]['result']
self.assertEqual(result1, ResourceAgentState.UNINITIALIZED)
# Second result is an empty AgentCommandResult.
result2 = self._results_recv[cmd2.command_id]['result']
# Third result is a state string.
result3 = self._results_recv[cmd3.command_id]['result']
self.assertEqual(result3, ResourceAgentState.INACTIVE)
# Fourth result is an empty AgentCommandResult.
result4 = self._results_recv[cmd4.command_id]['result']
# Fifth result is a state string.
result5 = self._results_recv[cmd5.command_id]['result']
self.assertEqual(result5, ResourceAgentState.IDLE)
# Sixth result is an empty AgentCommandResult.
result6 = self._results_recv[cmd6.command_id]['result']
# Seventh result is a state string.
result7 = self._results_recv[cmd7.command_id]['result']
self.assertEqual(result7, ResourceAgentState.COMMAND)
"""
{'raw': {'quality_flag': 'ok', 'preferred_timestamp': 'driver_timestamp',
'stream_name': 'raw', 'pkt_format_id': 'JSON_Data',
'pkt_version': 1, '
values': [{'binary': True, 'value_id': 'raw',
'value': 'NzkuNDM3MywxNy4yMDU2NCwgNzYxLjg4NSwgICA2LjIxOTgsIDE1MDYuMzk3LCAwMSBGZWIgMjAwMSwgMDE6MDE6MDA='}],
'driver_timestamp': 3558286748.8039923},
'parsed': {'quality_flag': 'ok', 'preferred_timestamp': 'driver_timestamp',
'stream_name': 'parsed', 'pkt_format_id': 'JSON_Data', 'pkt_version': 1,
'values': [{'value_id': 'temp', 'value': 79.4373},
{'value_id': 'conductivity', 'value': 17.20564},
{'value_id': 'pressure', 'value': 761.885}],
'driver_timestamp': 3558286748.8039923}}
"""
# Eigth result is an AgentCommandResult containing a sample.
result8 = self._results_recv[cmd8.command_id]['result']
self.assertTrue('parsed',result8.result )
# Ninth result is an AgentCommandResult containing a sample.
result9 = self._results_recv[cmd9.command_id]['result']
self.assertTrue('parsed',result9.result )
# Tenth result is an AgentCommandResult containing a sample.
result10 = self._results_recv[cmd10.command_id]['result']
self.assertTrue('parsed',result10.result )
# Eleventh result is an empty AgentCommandResult.
result11 = self._results_recv[cmd11.command_id]['result']
# Twelth result is a state string.
result12 = self._results_recv[cmd12.command_id]['result']
self.assertEqual(result1, ResourceAgentState.UNINITIALIZED)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
| #!/usr/bin/env python
"""
@package ion.services.sa.tcaa.test.test_remote_endpoint
@file ion/services/sa/tcaa/test/test_remote_endpoint.py
@author <NAME>
@brief Test cases for 2CAA remote endpoint.
"""
__author__ = '<NAME>'
# Pyon log and config objects.
from pyon.public import log
from pyon.public import CFG
# Standard imports.
import time
import os
import signal
import time
import unittest
from datetime import datetime
import uuid
import socket
import re
import random
# Pyon exceptions.
from pyon.core.exception import IonException
from pyon.core.exception import BadRequest
from pyon.core.exception import ServerError
from pyon.core.exception import NotFound
# 3rd party imports.
import gevent
from gevent import spawn
from gevent.event import AsyncResult
from nose.plugins.attrib import attr
from mock import patch
# Pyon unittest support.
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from pyon.public import IonObject
from pyon.event.event import EventPublisher, EventSubscriber
from pyon.util.context import LocalContextMixin
from ion.services.sa.tcaa.remote_endpoint import RemoteEndpoint
from ion.services.sa.tcaa.remote_endpoint import RemoteEndpointClient
from interface.services.icontainer_agent import ContainerAgentClient
from ion.services.sa.tcaa.r3pc import R3PCServer
from ion.services.sa.tcaa.r3pc import R3PCClient
from interface.objects import TelemetryStatusType
from interface.objects import UserInfo
from pyon.agent.agent import ResourceAgentClient
from pyon.agent.agent import ResourceAgentState
from pyon.agent.agent import ResourceAgentEvent
from interface.objects import AgentCommand
from ion.agents.instrument.driver_int_test_support import DriverIntegrationTestSupport
DEV_ADDR = CFG.device.sbe37.host
DEV_PORT = CFG.device.sbe37.port
DATA_PORT = CFG.device.sbe37.port_agent_data_port
CMD_PORT = CFG.device.sbe37.port_agent_cmd_port
PA_BINARY = CFG.device.sbe37.port_agent_binary
DELIM = CFG.device.sbe37.delim
WORK_DIR = CFG.device.sbe37.workdir
DRV_URI = CFG.device.sbe37.dvr_egg
from ion.agents.instrument.test.agent_test_constants import IA_RESOURCE_ID
from ion.agents.instrument.test.agent_test_constants import IA_NAME
from ion.agents.instrument.test.agent_test_constants import IA_MOD
from ion.agents.instrument.test.agent_test_constants import IA_CLS
from ion.agents.instrument.test.load_test_driver_egg import load_egg
DVR_CONFIG = load_egg()
DRV_MOD = DVR_CONFIG['dvr_mod']
DRV_CLS = DVR_CONFIG['dvr_cls']
# This import will dynamically load the driver egg. It is needed for the MI includes below
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37ProtocolEvent
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_process_queued
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_process_online
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_terrestrial_late
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_service_commands
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_resource_commands
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_bad_service_name_resource_id
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_bad_commands
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_resource_command_sequence
"""
Example code to dynamically create client to container service.
https://github.com/ooici/coi-services/blob/master/ion/services/coi/agent_management_service.py#L531
"""
class FakeProcess(LocalContextMixin):
"""
A fake process used because the test case is not an ion process.
"""
name = ''
id=''
process_type = ''
@attr('INT', group='sa')
@patch.dict(CFG, {'endpoint':{'receive':{'timeout': 60}}})
@unittest.skip('Skipping as this is now out of scope and takes a long time.')
class TestRemoteEndpoint(IonIntegrationTestCase):
"""
Test cases for 2CAA terrestrial endpoint.
"""
def setUp(self):
"""
Start fake terrestrial components and add cleanup.
Start terrestrial server and retrieve port.
Set internal variables.
Start container.
Start deployment.
Start container agent.
Spawn remote endpoint process.
Create remote endpoint client and retrieve remote server port.
Create event publisher.
"""
self._terrestrial_server = R3PCServer(self.consume_req, self.terrestrial_server_close)
self._terrestrial_client = R3PCClient(self.consume_ack, self.terrestrial_client_close)
self.addCleanup(self._terrestrial_server.stop)
self.addCleanup(self._terrestrial_client.stop)
self._other_port = self._terrestrial_server.start('*', 0)
log.debug('Terrestrial server binding to *:%i', self._other_port)
self._other_host = 'localhost'
self._platform_resource_id = 'abc123'
self._resource_id = 'fake_id'
self._no_requests = 10
self._requests_sent = {}
self._results_recv = {}
self._no_telem_events = 0
self._done_evt = AsyncResult()
self._done_telem_evts = AsyncResult()
self._cmd_tx_evt = AsyncResult()
# Start container.
log.debug('Staring capability container.')
self._start_container()
# Bring up services in a deploy file (no need to message).
log.info('Staring deploy services.')
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
# Create a container client.
log.debug('Creating container client.')
container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
# Create agent config.
endpoint_config = {
'other_host' : self._other_host,
'other_port' : self._other_port,
'this_port' : 0,
'platform_resource_id' : self._platform_resource_id
}
# Spawn the remote enpoint process.
log.debug('Spawning remote endpoint process.')
re_pid = container_client.spawn_process(
name='remote_endpoint_1',
module='ion.services.sa.tcaa.remote_endpoint',
cls='RemoteEndpoint',
config=endpoint_config)
log.debug('Endpoint pid=%s.', str(re_pid))
# Create an endpoint client.
self.re_client = RemoteEndpointClient(
process=FakeProcess(),
to_name=re_pid)
log.debug('Got re client %s.', str(self.re_client))
# Remember the remote port.
self._this_port = self.re_client.get_port()
log.debug('The remote port is: %i.', self._this_port)
# Start the event publisher.
self._event_publisher = EventPublisher()
######################################################################
# Helpers.
######################################################################
def on_link_up(self):
"""
Called by a test to simulate turning the link on.
"""
log.debug('Terrestrial client connecting to localhost:%i.',
self._this_port)
self._terrestrial_client.start('localhost', self._this_port)
# Publish a link up event to be caught by the endpoint.
log.debug('Publishing telemetry event.')
self._event_publisher.publish_event(
event_type='PlatformTelemetryEvent',
origin=self._platform_resource_id,
status = TelemetryStatusType.AVAILABLE)
def on_link_down(self):
"""
Called by a test to simulate turning the link off.
"""
self._terrestrial_client.stop()
# Publish a link down event to be caught by the endpoint.
log.debug('Publishing telemetry event.')
self._event_publisher.publish_event(
event_type='PlatformTelemetryEvent',
origin=self._platform_resource_id,
status = TelemetryStatusType.UNAVAILABLE)
def consume_req(self, res):
"""
Consume a terrestrial request setting async event when necessary.
"""
command_id = res['command_id']
self._results_recv[command_id] = res
if len(self._results_recv) == self._no_requests:
self._done_evt.set()
def consume_ack(self, cmd):
"""
Consume terrestrial ack setting async event when necessary.
"""
self._requests_sent[cmd.command_id] = cmd
if len(self._requests_sent) == self._no_requests:
self._cmd_tx_evt.set()
def terrestrial_server_close(self):
"""
Callback when terrestrial server closes.
"""
pass
def terrestrial_client_close(self):
"""
Callback when terrestrial client closes.
"""
pass
def make_fake_command(self, no):
"""
Build a fake command for use in tests.
"""
cmdstr = 'fake_cmd_%i' % no
cmd = IonObject('RemoteCommand',
resource_id=self._resource_id,
command=cmdstr,
args=['arg1', 23],
kwargs={'worktime':3},
command_id = str(uuid.uuid4()))
return cmd
def start_agent(self):
"""
Start an instrument agent and client.
"""
log.info('Creating driver integration test support:')
log.info('driver module: %s', DRV_MOD)
log.info('driver class: %s', DRV_CLS)
log.info('device address: %s', DEV_ADDR)
log.info('device port: %s', DEV_PORT)
log.info('log delimiter: %s', DELIM)
log.info('work dir: %s', WORK_DIR)
self._support = DriverIntegrationTestSupport(DRV_MOD,
DRV_CLS,
DEV_ADDR,
DEV_PORT,
DATA_PORT,
CMD_PORT,
PA_BINARY,
DELIM,
WORK_DIR)
# Start port agent, add stop to cleanup.
port = self._support.start_pagent()
log.info('Port agent started at port %i',port)
# Configure driver to use port agent port number.
DVR_CONFIG['comms_config'] = {
'addr' : 'localhost',
'port' : port,
'cmd_port' : CMD_PORT
}
self.addCleanup(self._support.stop_pagent)
# Create agent config.
agent_config = {
'driver_config' : DVR_CONFIG,
'stream_config' : {},
'agent' : {'resource_id': IA_RESOURCE_ID},
'test_mode' : True
}
# Start instrument agent.
log.debug("Starting IA.")
container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
ia_pid = container_client.spawn_process(name=IA_NAME,
module=IA_MOD,
cls=IA_CLS,
config=agent_config)
log.info('Agent pid=%s.', str(ia_pid))
# Start a resource agent client to talk with the instrument agent.
self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess())
log.info('Got ia client %s.', str(self._ia_client))
######################################################################
# Tests.
######################################################################
def test_process_queued(self):
"""
test_process_queued
Test that queued commands are forwarded to and handled by
remote endpoint when link comes up.
"""
# Create and enqueue some requests.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
self._terrestrial_client.enqueue(cmd)
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for all the enqueued commands to be acked.
# Wait for all the responses to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
# Confirm the results match the commands sent.
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_process_online(self):
"""
test_process_online
Test commands are forwarded and handled while link is up.
"""
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Create and enqueue some requests.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
self._terrestrial_client.enqueue(cmd)
# Wait for all the enqueued commands to be acked.
# Wait for all the responses to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
# Confirm the results match the commands sent.
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_terrestrial_late(self):
"""
test_terrestrial_late
Test queued commands are forwarded and handled by remote endpoint
when terrestrial side is late to come up.
"""
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Manually stop the terrestrial endpoint.
# This will cause it to be unavailable when commands are queued
# to simulate stability during asynchronous wake ups.
self._terrestrial_server.stop()
self._terrestrial_client.stop()
# Create and enqueue some requests.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
self._terrestrial_client.enqueue(cmd)
# Remote side awaits the terrestrial waking up.
gevent.sleep(3)
# Terrestrail endpoint eventually wakes up and starts transmitting.
self._terrestrial_client.start('localhost', self._this_port)
self._terrestrial_server.start('*', self._other_port)
# Wait for all the enqueued commands to be acked.
# Wait for all the responses to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
# Confirm the results match the commands sent.
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_service_commands(self):
"""
test_service_commands
Test that real service commands are handled by the remote endpoint.
"""
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Create user object.
obj = IonObject("UserInfo", name="some_name")
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='create',
args=[obj],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns obj_id, obj_rev.
obj_id, obj_rev = self._results_recv[cmd.command_id]['result']
# Confirm the results are valid.
"""
Result is a tuple of strings.
{'result': ['ad183ff26bae4f329ddd85fd69d160a9',
'1-00a308c45fff459c7cda1db9a7314de6'],
'command_id': 'cc2ae00d-40b0-47d2-af61-8ffb87f1aca2'}
"""
self.assertIsInstance(obj_id, str)
self.assertNotEqual(obj_id, '')
self.assertIsInstance(obj_rev, str)
self.assertNotEqual(obj_rev, '')
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Read user object.
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='read',
args=[obj_id],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns read_obj.
read_obj = self._results_recv[cmd.command_id]['result']
# Confirm the results are valid.
"""
Result is a user info object with the name set.
{'lcstate': 'DEPLOYED_AVAILABLE',
'_rev': '1-851f067bac3c34b2238c0188b3340d0f',
'description': '',
'ts_updated': '1349213207638',
'type_': 'UserInfo',
'contact': <interface.objects.ContactInformation object at 0x10d7df590>,
'_id': '27832d93f4cd4535a75ac75c06e00a7e',
'ts_created': '1349213207638',
'variables': [{'name': '', 'value': ''}],
'name': 'some_name'}
"""
self.assertIsInstance(read_obj, UserInfo)
self.assertEquals(read_obj.name, 'some_name')
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Update user object.
read_obj.name = 'some_other_name'
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='update',
args=[read_obj],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns nothing.
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Read user object.
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='read',
args=[obj_id],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns read_obj.
read_obj = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(read_obj, UserInfo)
self.assertEquals(read_obj.name, 'some_other_name')
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Delete user object.
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='delete',
args=[obj_id],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns nothing.
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
def test_resource_commands(self):
"""
test_resource_commands
Test that real resource commands are handled by the remote endpoint.
"""
# Start the IA and check it's out there and behaving.
self.start_agent()
state = self._ia_client.get_agent_state()
log.debug('Agent state is: %s', state)
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
retval = self._ia_client.ping_agent()
log.debug('Agent ping is: %s', str(retval))
self.assertIn('ping from InstrumentAgent', retval)
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Get agent state via remote endpoint.
cmd = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns agent state.
state = self._results_recv[cmd.command_id]['result']
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Ping agent via remote endpoint.
cmd = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='ping_agent',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns agent state.
ping = self._results_recv[cmd.command_id]['result']
self.assertIn('ping from InstrumentAgent', ping)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
def test_bad_service_name_resource_id(self):
"""
test_bad_service_name_resource_id
Test for proper exception behavior when a bad service name or
resource id is used in a command forwarded to the remote endpoint.
"""
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Create user object.
obj = IonObject("UserInfo", name="some_name")
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='bogus_service',
command='create',
args=[obj],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns NotFound.
result = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(result, NotFound)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Get agent state via remote endpoint.
cmd = IonObject('RemoteCommand',
resource_id='bogus_resource_id',
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns NotFound.
result = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(result, NotFound)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
def test_bad_commands(self):
"""
test_bad_commands
Test for correct exception behavior if a bad command name is forwarded
to a remote service or resource.
"""
# Start the IA and check it's out there and behaving.
self.start_agent()
state = self._ia_client.get_agent_state()
log.debug('Agent state is: %s', state)
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
retval = self._ia_client.ping_agent()
log.debug('Agent ping is: %s', str(retval))
self.assertIn('ping from InstrumentAgent', retval)
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for the link to be up.
# The remote side does not publish public telemetry events
# so we can't wait for that.
gevent.sleep(1)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Create user object.
obj = IonObject("UserInfo", name="some_name")
cmd = IonObject('RemoteCommand',
resource_id='',
svc_name='resource_registry',
command='what_the_flunk',
args=[obj],
kwargs='',
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns BadRequest.
result = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(result, BadRequest)
# Send commands one at a time.
# Reset queues and events.
self._no_requests = 1
self._done_evt = AsyncResult()
self._cmd_tx_evt = AsyncResult()
self._requests_sent = {}
self._results_recv = {}
# Get agent state via remote endpoint.
cmd = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='what_the_flunk',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd)
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Returns NotFound.
result = self._results_recv[cmd.command_id]['result']
self.assertIsInstance(result, BadRequest)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
def test_resource_command_sequence(self):
"""
test_resource_command_sequence
Test for successful completion of a properly ordered sequence of
resource commands queued for forwarding to the remote endpoint.
"""
# Start the IA and check it's out there and behaving.
self.start_agent()
state = self._ia_client.get_agent_state()
log.debug('Agent state is: %s', state)
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
retval = self._ia_client.ping_agent()
log.debug('Agent ping is: %s', str(retval))
self.assertIn('ping from InstrumentAgent', retval)
# We execute a sequence of twelve consecutive events.
self._no_requests = 12
# Get agent state.
cmd1 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd1)
# Initialize agent.
cmd2 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_agent',
args=[AgentCommand(command=ResourceAgentEvent.INITIALIZE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd2)
# Get agent state.
cmd3 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd3)
# Go active.
cmd4 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_agent',
args=[AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd4)
# Get agent state.
cmd5 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd5)
# Run.
cmd6 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_agent',
args=[AgentCommand(command=ResourceAgentEvent.RUN)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd6)
# Get agent state.
cmd7 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd7)
# Acquire sample.
cmd8 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_resource',
args=[AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd8)
# Acquire sample
cmd9 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_resource',
args=[AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd9)
# Acquire sample.
cmd10 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_resource',
args=[AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd10)
# Reset.
cmd11 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='execute_agent',
args=[AgentCommand(command=ResourceAgentEvent.RESET)],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd11)
# Get agent state.
cmd12 = IonObject('RemoteCommand',
resource_id=IA_RESOURCE_ID,
svc_name='',
command='get_agent_state',
args=[],
kwargs={},
command_id = str(uuid.uuid4()))
self._terrestrial_client.enqueue(cmd12)
# Publish a telemetry available event.
# This will cause the endpoint clients to wake up and connect.
self.on_link_up()
# Wait for command request to be acked.
# Wait for response to arrive.
self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
# Check results of command sequence.
"""
0ccf1e10-eeca-400d-aefe-f9d6888ec963 {'result': 'RESOURCE_AGENT_STATE_INACTIVE', 'command_id': '0ccf1e10-eeca-400d-aefe-f9d6888ec963'}
92531bdf-c2c8-4aa8-817d-5107c7311b37 {'result': <interface.objects.AgentCommandResult object at 0x10d7f11d0>, 'command_id': '92531bdf-c2c8-4aa8-817d-5107c7311b37'}
509934a1-5038-40d8-8014-591e2d8042b6 {'result': 'RESOURCE_AGENT_STATE_COMMAND', 'command_id': '509934a1-5038-40d8-8014-591e2d8042b6'}
88bacbb7-5366-4d27-9ecf-fff2bec34b2c {'result': <interface.objects.AgentCommandResult object at 0x10d389190>, 'command_id': '88bacbb7-5366-4d27-9ecf-fff2bec34b2c'}
f8b4d3fa-a249-439b-8bd4-ac212b6100aa {'result': <interface.objects.AgentCommandResult object at 0x10d3893d0>, 'command_id': 'f8b4d3fa-a249-439b-8bd4-ac212b6100aa'}
8ae98e39-fdb3-4218-ad8f-584620397d9f {'result': <interface.objects.AgentCommandResult object at 0x10d739990>, 'command_id': '8ae98e39-fdb3-4218-ad8f-584620397d9f'}
746364a1-c4c7-400f-96d4-ee36df5dc1a4 {'result': BadRequest('Execute argument "command" not set.',), 'command_id': '746364a1-c4c7-400f-96d4-ee36df5dc1a4'}
d516d3d9-e4f9-4ea5-80e0-34639a6377b5 {'result': <interface.objects.AgentCommandResult object at 0x10d3b2350>, 'command_id': 'd516d3d9-e4f9-4ea5-80e0-34639a6377b5'}
c7da03f5-59bc-420a-9e10-0a7794266599 {'result': 'RESOURCE_AGENT_STATE_IDLE', 'command_id': 'c7da03f5-59bc-420a-9e10-0a7794266599'}
678d870a-bf18-424a-afb0-f80ecf3277e2 {'result': <interface.objects.AgentCommandResult object at 0x10d739590>, 'command_id': '678d870a-bf18-424a-afb0-f80ecf3277e2'}
750c6a30-56eb-4535-99c2-a81fefab1b1f {'result': 'RESOURCE_AGENT_STATE_COMMAND', 'command_id': '750c6a30-56eb-4535-99c2-a81fefab1b1f'}
c17bd658-3775-4aa3-8844-02df70a0e3c0 {'result': 'RESOURCE_AGENT_STATE_UNINITIALIZED', 'command_id': 'c17bd658-3775-4aa3-8844-02df70a0e3c0'}
"""
# First result is a state string.
result1 = self._results_recv[cmd1.command_id]['result']
self.assertEqual(result1, ResourceAgentState.UNINITIALIZED)
# Second result is an empty AgentCommandResult.
result2 = self._results_recv[cmd2.command_id]['result']
# Third result is a state string.
result3 = self._results_recv[cmd3.command_id]['result']
self.assertEqual(result3, ResourceAgentState.INACTIVE)
# Fourth result is an empty AgentCommandResult.
result4 = self._results_recv[cmd4.command_id]['result']
# Fifth result is a state string.
result5 = self._results_recv[cmd5.command_id]['result']
self.assertEqual(result5, ResourceAgentState.IDLE)
# Sixth result is an empty AgentCommandResult.
result6 = self._results_recv[cmd6.command_id]['result']
# Seventh result is a state string.
result7 = self._results_recv[cmd7.command_id]['result']
self.assertEqual(result7, ResourceAgentState.COMMAND)
"""
{'raw': {'quality_flag': 'ok', 'preferred_timestamp': 'driver_timestamp',
'stream_name': 'raw', 'pkt_format_id': 'JSON_Data',
'pkt_version': 1, '
values': [{'binary': True, 'value_id': 'raw',
'value': 'NzkuNDM3MywxNy4yMDU2NCwgNzYxLjg4NSwgICA2LjIxOTgsIDE1MDYuMzk3LCAwMSBGZWIgMjAwMSwgMDE6MDE6MDA='}],
'driver_timestamp': 3558286748.8039923},
'parsed': {'quality_flag': 'ok', 'preferred_timestamp': 'driver_timestamp',
'stream_name': 'parsed', 'pkt_format_id': 'JSON_Data', 'pkt_version': 1,
'values': [{'value_id': 'temp', 'value': 79.4373},
{'value_id': 'conductivity', 'value': 17.20564},
{'value_id': 'pressure', 'value': 761.885}],
'driver_timestamp': 3558286748.8039923}}
"""
# Eigth result is an AgentCommandResult containing a sample.
result8 = self._results_recv[cmd8.command_id]['result']
self.assertTrue('parsed',result8.result )
# Ninth result is an AgentCommandResult containing a sample.
result9 = self._results_recv[cmd9.command_id]['result']
self.assertTrue('parsed',result9.result )
# Tenth result is an AgentCommandResult containing a sample.
result10 = self._results_recv[cmd10.command_id]['result']
self.assertTrue('parsed',result10.result )
# Eleventh result is an empty AgentCommandResult.
result11 = self._results_recv[cmd11.command_id]['result']
# Twelth result is a state string.
result12 = self._results_recv[cmd12.command_id]['result']
self.assertEqual(result1, ResourceAgentState.UNINITIALIZED)
# Publish a telemetry unavailable event.
# This will cause the endpoint clients to disconnect and go to sleep.
self.on_link_down()
gevent.sleep(1)
| en | 0.706352 | #!/usr/bin/env python @package ion.services.sa.tcaa.test.test_remote_endpoint @file ion/services/sa/tcaa/test/test_remote_endpoint.py @author <NAME> @brief Test cases for 2CAA remote endpoint. # Pyon log and config objects. # Standard imports. # Pyon exceptions. # 3rd party imports. # Pyon unittest support. # This import will dynamically load the driver egg. It is needed for the MI includes below # bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint # bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_process_queued # bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_process_online # bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_terrestrial_late # bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_service_commands # bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_resource_commands # bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_bad_service_name_resource_id # bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_bad_commands # bin/nosetests -s -v ion/services/sa/tcaa/test/test_remote_endpoint.py:TestRemoteEndpoint.test_resource_command_sequence Example code to dynamically create client to container service. https://github.com/ooici/coi-services/blob/master/ion/services/coi/agent_management_service.py#L531 A fake process used because the test case is not an ion process. Test cases for 2CAA terrestrial endpoint. Start fake terrestrial components and add cleanup. Start terrestrial server and retrieve port. Set internal variables. Start container. Start deployment. Start container agent. Spawn remote endpoint process. Create remote endpoint client and retrieve remote server port. Create event publisher. # Start container. # Bring up services in a deploy file (no need to message). # Create a container client. # Create agent config. # Spawn the remote enpoint process. # Create an endpoint client. # Remember the remote port. # Start the event publisher. ###################################################################### # Helpers. ###################################################################### Called by a test to simulate turning the link on. # Publish a link up event to be caught by the endpoint. Called by a test to simulate turning the link off. # Publish a link down event to be caught by the endpoint. Consume a terrestrial request setting async event when necessary. Consume terrestrial ack setting async event when necessary. Callback when terrestrial server closes. Callback when terrestrial client closes. Build a fake command for use in tests. Start an instrument agent and client. # Start port agent, add stop to cleanup. # Configure driver to use port agent port number. # Create agent config. # Start instrument agent. # Start a resource agent client to talk with the instrument agent. ###################################################################### # Tests. ###################################################################### test_process_queued Test that queued commands are forwarded to and handled by remote endpoint when link comes up. # Create and enqueue some requests. # Publish a telemetry available event. # This will cause the endpoint clients to wake up and connect. # Wait for all the enqueued commands to be acked. # Wait for all the responses to arrive. # Publish a telemetry unavailable event. # This will cause the endpoint clients to disconnect and go to sleep. # Confirm the results match the commands sent. test_process_online Test commands are forwarded and handled while link is up. # Publish a telemetry available event. # This will cause the endpoint clients to wake up and connect. # Wait for the link to be up. # The remote side does not publish public telemetry events # so we can't wait for that. # Create and enqueue some requests. # Wait for all the enqueued commands to be acked. # Wait for all the responses to arrive. # Publish a telemetry unavailable event. # This will cause the endpoint clients to disconnect and go to sleep. # Confirm the results match the commands sent. test_terrestrial_late Test queued commands are forwarded and handled by remote endpoint when terrestrial side is late to come up. # Publish a telemetry available event. # This will cause the endpoint clients to wake up and connect. # Wait for the link to be up. # The remote side does not publish public telemetry events # so we can't wait for that. # Manually stop the terrestrial endpoint. # This will cause it to be unavailable when commands are queued # to simulate stability during asynchronous wake ups. # Create and enqueue some requests. # Remote side awaits the terrestrial waking up. # Terrestrail endpoint eventually wakes up and starts transmitting. # Wait for all the enqueued commands to be acked. # Wait for all the responses to arrive. # Publish a telemetry unavailable event. # This will cause the endpoint clients to disconnect and go to sleep. # Confirm the results match the commands sent. test_service_commands Test that real service commands are handled by the remote endpoint. # Publish a telemetry available event. # This will cause the endpoint clients to wake up and connect. # Send commands one at a time. # Reset queues and events. # Create user object. # Wait for command request to be acked. # Wait for response to arrive. # Returns obj_id, obj_rev. # Confirm the results are valid. Result is a tuple of strings. {'result': ['ad183ff26bae4f329ddd85fd69d160a9', '1-00a308c45fff459c7cda1db9a7314de6'], 'command_id': 'cc2ae00d-40b0-47d2-af61-8ffb87f1aca2'} # Send commands one at a time. # Reset queues and events. # Read user object. # Wait for command request to be acked. # Wait for response to arrive. # Returns read_obj. # Confirm the results are valid. Result is a user info object with the name set. {'lcstate': 'DEPLOYED_AVAILABLE', '_rev': '1-851f067bac3c34b2238c0188b3340d0f', 'description': '', 'ts_updated': '1349213207638', 'type_': 'UserInfo', 'contact': <interface.objects.ContactInformation object at 0x10d7df590>, '_id': '27832d93f4cd4535a75ac75c06e00a7e', 'ts_created': '1349213207638', 'variables': [{'name': '', 'value': ''}], 'name': 'some_name'} # Send commands one at a time. # Reset queues and events. # Update user object. # Wait for command request to be acked. # Wait for response to arrive. # Returns nothing. # Send commands one at a time. # Reset queues and events. # Read user object. # Wait for command request to be acked. # Wait for response to arrive. # Returns read_obj. # Send commands one at a time. # Reset queues and events. # Delete user object. # Wait for command request to be acked. # Wait for response to arrive. # Returns nothing. # Publish a telemetry unavailable event. # This will cause the endpoint clients to disconnect and go to sleep. test_resource_commands Test that real resource commands are handled by the remote endpoint. # Start the IA and check it's out there and behaving. # Publish a telemetry available event. # This will cause the endpoint clients to wake up and connect. # Wait for the link to be up. # The remote side does not publish public telemetry events # so we can't wait for that. # Send commands one at a time. # Reset queues and events. # Get agent state via remote endpoint. # Wait for command request to be acked. # Wait for response to arrive. # Returns agent state. # Send commands one at a time. # Reset queues and events. # Ping agent via remote endpoint. # Wait for command request to be acked. # Wait for response to arrive. # Returns agent state. # Publish a telemetry unavailable event. # This will cause the endpoint clients to disconnect and go to sleep. test_bad_service_name_resource_id Test for proper exception behavior when a bad service name or resource id is used in a command forwarded to the remote endpoint. # Publish a telemetry available event. # This will cause the endpoint clients to wake up and connect. # Wait for the link to be up. # The remote side does not publish public telemetry events # so we can't wait for that. # Send commands one at a time. # Reset queues and events. # Create user object. # Wait for command request to be acked. # Wait for response to arrive. # Returns NotFound. # Send commands one at a time. # Reset queues and events. # Get agent state via remote endpoint. # Wait for command request to be acked. # Wait for response to arrive. # Returns NotFound. # Publish a telemetry unavailable event. # This will cause the endpoint clients to disconnect and go to sleep. test_bad_commands Test for correct exception behavior if a bad command name is forwarded to a remote service or resource. # Start the IA and check it's out there and behaving. # Publish a telemetry available event. # This will cause the endpoint clients to wake up and connect. # Wait for the link to be up. # The remote side does not publish public telemetry events # so we can't wait for that. # Send commands one at a time. # Reset queues and events. # Create user object. # Wait for command request to be acked. # Wait for response to arrive. # Returns BadRequest. # Send commands one at a time. # Reset queues and events. # Get agent state via remote endpoint. # Wait for command request to be acked. # Wait for response to arrive. # Returns NotFound. # Publish a telemetry unavailable event. # This will cause the endpoint clients to disconnect and go to sleep. test_resource_command_sequence Test for successful completion of a properly ordered sequence of resource commands queued for forwarding to the remote endpoint. # Start the IA and check it's out there and behaving. # We execute a sequence of twelve consecutive events. # Get agent state. # Initialize agent. # Get agent state. # Go active. # Get agent state. # Run. # Get agent state. # Acquire sample. # Acquire sample # Acquire sample. # Reset. # Get agent state. # Publish a telemetry available event. # This will cause the endpoint clients to wake up and connect. # Wait for command request to be acked. # Wait for response to arrive. # Check results of command sequence. 0ccf1e10-eeca-400d-aefe-f9d6888ec963 {'result': 'RESOURCE_AGENT_STATE_INACTIVE', 'command_id': '0ccf1e10-eeca-400d-aefe-f9d6888ec963'} 92531bdf-c2c8-4aa8-817d-5107c7311b37 {'result': <interface.objects.AgentCommandResult object at 0x10d7f11d0>, 'command_id': '92531bdf-c2c8-4aa8-817d-5107c7311b37'} 509934a1-5038-40d8-8014-591e2d8042b6 {'result': 'RESOURCE_AGENT_STATE_COMMAND', 'command_id': '509934a1-5038-40d8-8014-591e2d8042b6'} 88bacbb7-5366-4d27-9ecf-fff2bec34b2c {'result': <interface.objects.AgentCommandResult object at 0x10d389190>, 'command_id': '88bacbb7-5366-4d27-9ecf-fff2bec34b2c'} f8b4d3fa-a249-439b-8bd4-ac212b6100aa {'result': <interface.objects.AgentCommandResult object at 0x10d3893d0>, 'command_id': 'f8b4d3fa-a249-439b-8bd4-ac212b6100aa'} 8ae98e39-fdb3-4218-ad8f-584620397d9f {'result': <interface.objects.AgentCommandResult object at 0x10d739990>, 'command_id': '8ae98e39-fdb3-4218-ad8f-584620397d9f'} 746364a1-c4c7-400f-96d4-ee36df5dc1a4 {'result': BadRequest('Execute argument "command" not set.',), 'command_id': '746364a1-c4c7-400f-96d4-ee36df5dc1a4'} d516d3d9-e4f9-4ea5-80e0-34639a6377b5 {'result': <interface.objects.AgentCommandResult object at 0x10d3b2350>, 'command_id': 'd516d3d9-e4f9-4ea5-80e0-34639a6377b5'} c7da03f5-59bc-420a-9e10-0a7794266599 {'result': 'RESOURCE_AGENT_STATE_IDLE', 'command_id': 'c7da03f5-59bc-420a-9e10-0a7794266599'} 678d870a-bf18-424a-afb0-f80ecf3277e2 {'result': <interface.objects.AgentCommandResult object at 0x10d739590>, 'command_id': '678d870a-bf18-424a-afb0-f80ecf3277e2'} 750c6a30-56eb-4535-99c2-a81fefab1b1f {'result': 'RESOURCE_AGENT_STATE_COMMAND', 'command_id': '750c6a30-56eb-4535-99c2-a81fefab1b1f'} c17bd658-3775-4aa3-8844-02df70a0e3c0 {'result': 'RESOURCE_AGENT_STATE_UNINITIALIZED', 'command_id': 'c17bd658-3775-4aa3-8844-02df70a0e3c0'} # First result is a state string. # Second result is an empty AgentCommandResult. # Third result is a state string. # Fourth result is an empty AgentCommandResult. # Fifth result is a state string. # Sixth result is an empty AgentCommandResult. # Seventh result is a state string. {'raw': {'quality_flag': 'ok', 'preferred_timestamp': 'driver_timestamp', 'stream_name': 'raw', 'pkt_format_id': 'JSON_Data', 'pkt_version': 1, ' values': [{'binary': True, 'value_id': 'raw', 'value': 'NzkuNDM3MywxNy4yMDU2NCwgNzYxLjg4NSwgICA2LjIxOTgsIDE1MDYuMzk3LCAwMSBGZWIgMjAwMSwgMDE6MDE6MDA='}], 'driver_timestamp': 3558286748.8039923}, 'parsed': {'quality_flag': 'ok', 'preferred_timestamp': 'driver_timestamp', 'stream_name': 'parsed', 'pkt_format_id': 'JSON_Data', 'pkt_version': 1, 'values': [{'value_id': 'temp', 'value': 79.4373}, {'value_id': 'conductivity', 'value': 17.20564}, {'value_id': 'pressure', 'value': 761.885}], 'driver_timestamp': 3558286748.8039923}} # Eigth result is an AgentCommandResult containing a sample. # Ninth result is an AgentCommandResult containing a sample. # Tenth result is an AgentCommandResult containing a sample. # Eleventh result is an empty AgentCommandResult. # Twelth result is a state string. # Publish a telemetry unavailable event. # This will cause the endpoint clients to disconnect and go to sleep. | 1.573915 | 2 |
rockyraccoon/utils/plot.py | therooler/pennylane-qllh | 8 | 6620665 | import matplotlib.pyplot as plt
import numpy as np
from rockyraccoon.model.core import RaccoonWrapper
from typing import List
def plot_qml_landscape_binary(
X: np.ndarray, y: np.ndarray, wrapper: RaccoonWrapper, cmap="viridis", title=""
):
"""
Plot the separation boundaries in the 2D input space.
Args:
X: N x d matrix of N samples and d features.
y: Length N vector with labels.
wrapper: The RaccoonWrapper we used for learning
cmap: String with name of matplotlib colormap, see MPL docs
title: String with title of the figure
"""
if wrapper.model.bias:
X = wrapper.add_bias(X)
class_0, class_1 = np.unique(y)
plt.rc("font", size=15)
cmap = plt.cm.get_cmap(cmap)
blue = cmap(0.0)
red = cmap(1.0)
h = 25
max_grid = 2
x_min, x_max = X[:, 0].min() - max_grid, X[:, 0].max() + max_grid
y_min, y_max = X[:, 1].min() - max_grid, X[:, 1].max() + max_grid
xx, yy = np.meshgrid(np.linspace(x_min, x_max, h), np.linspace(y_min, y_max, h))
if wrapper.bias:
z = wrapper.predict(np.c_[xx.ravel(), yy.ravel(), np.ones_like(yy).ravel()])
else:
z = wrapper.predict(np.c_[xx.ravel(), yy.ravel()])
z = z[:, 1] - z[:, 0]
z = z.reshape(xx.shape)
fig, ax = plt.subplots()
ax.contour(xx, yy, z, cmap=cmap)
# Plot also the training points
y = y.flatten()
np.random.seed(123)
spread = 0.3
ax.scatter(
X[(y == class_0), 0]
+ np.random.uniform(-spread, spread, np.sum((y == class_0))),
X[(y == class_0), 1]
+ np.random.uniform(-spread, spread, np.sum((y == class_0))),
marker=".",
c=np.array([blue]),
label="-1",
s=25,
)
ax.scatter(
X[(y == class_1), 0]
+ np.random.uniform(-spread, spread, np.sum((y == class_1))),
X[(y == class_1), 1]
+ np.random.uniform(-spread, spread, np.sum((y == class_1))),
marker="x",
c=np.array([red]),
label="+1",
s=25,
)
ax.set_xlabel("$x_0$")
ax.set_ylabel("$x_1$")
ax.set_title(title)
ax.legend()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.03, 0.7])
m = plt.cm.ScalarMappable(cmap=cmap)
m.set_array(np.linspace(-1, 1, 11))
plt.colorbar(m, cax=cbar_ax, boundaries=np.linspace(-1, 1, 11))
plt.show()
def plot_lh(wrapper: RaccoonWrapper, cmap="viridis", title=""):
"""
Args:
wrapper: The RaccoonWrapper we used for learning
"""
cmap = plt.cm.get_cmap(cmap)
fig, ax = plt.subplots(1, 1)
ax.plot(wrapper.lh, c=cmap(0.2))
ax.set_xlabel("number of iterations")
ax.set_ylabel("Likelihood $\mathcal{L}$")
ax.set_title(title)
plt.show()
def plot_qml_landscape_multiclass(
X: np.ndarray,
y: np.ndarray,
wrapper: RaccoonWrapper,
subplot_grid: List[int],
cmap="viridis",
title="",
):
"""
Plot the separation boundaries of a multiclass qml model in 2D space.
Args:
X: N x d matrix of N samples and d features.
y: Length N vector with labels.
wrapper: The RaccoonWrapper we used for learning
subplot_grid: List that specifies the grid of the subplots
cmap: Name of MPL colormap
title: Title of the figure
"""
if wrapper.model.bias:
X = wrapper.add_bias(X)
assert (
len(subplot_grid) == 2
), "Expected subplot_grid to have length 2, but got iterable with length {}".format(
len(subplot_grid)
)
labels = np.unique(y)
num_classes = len(np.unique(y))
if num_classes == 2:
print("Only {} classes found, calling binary plotter instead")
plot_qml_landscape_binary(X, y, wrapper, cmap=cmap)
return
assert (
np.product(subplot_grid) == num_classes
), "wrong grid size {} for {} classes".format(subplot_grid, num_classes)
plt.rc("font", size=15)
cmap = plt.cm.get_cmap(cmap)
clrs = [cmap(0.0), cmap(0.5), cmap(1.0)]
h = 25
max_grid = 2
spread = 0.2
y = y.flatten()
x_min, x_max = X[:, 0].min() - max_grid, X[:, 0].max() + max_grid
y_min, y_max = X[:, 1].min() - max_grid, X[:, 1].max() + max_grid
xx, yy = np.meshgrid(np.linspace(x_min, x_max, h), np.linspace(y_min, y_max, h))
if wrapper.bias:
z = wrapper.predict(np.c_[xx.ravel(), yy.ravel(), np.ones_like(yy).ravel()])
else:
z = wrapper.predict(np.c_[xx.ravel(), yy.ravel()])
sections = np.zeros_like(z)
idx = np.argmax(z, axis=1)
sections[np.arange(len(idx)), np.argmax(z, axis=1)] = 1
idx = idx.reshape(xx.shape)
markers = [".", "*", "x", "v", "s", "1"]
z = [el.reshape(xx.shape) for el in z.T]
fig, axs = plt.subplots(*subplot_grid)
if subplot_grid[0] == 1:
axs = axs.reshape(1, -1)
if subplot_grid[1] == 1:
axs = axs.reshape(-1, 1)
for i, ax in enumerate(axs.flatten()):
for j, label in enumerate(labels):
np.random.seed(2342)
if j != i:
ax.scatter(
X[(y == label), 0]
+ np.random.uniform(-spread, spread, np.sum((y == label))),
X[(y == label), 1]
+ np.random.uniform(-spread, spread, np.sum((y == label))),
c="gray",
label=label,
marker=markers[label],
s=50,
)
else:
ax.scatter(
X[(y == label), 0]
+ np.random.uniform(-spread, spread, np.sum((y == label))),
X[(y == label), 1]
+ np.random.uniform(-spread, spread, np.sum((y == label))),
c=np.array([clrs[2]]),
marker=markers[label],
label=label,
s=50,
)
polygon = np.zeros_like(idx)
polygon[idx == i] = 1
ax.contourf(
xx,
yy,
polygon,
alpha=0.1,
cmap=cmap,
levels=[0.5, 1],
vmin=0,
vmax=1,
)
ax.legend(prop={"size": 6})
cs = ax.contour(xx, yy, z[i], cmap=cmap, vmin=0, vmax=1)
ax.clabel(cs, inline=True, inline_spacing=2, fontsize=10)
ylabels = list((x, 0) for x in range(subplot_grid[0]))
xlabels = list((subplot_grid[0] - 1, x) for x in range(subplot_grid[1]))
for ax_id in ylabels:
axs[ax_id].set_ylabel(r"$x_1$")
for ax_id in xlabels:
axs[ax_id].set_xlabel(r"$x_0$")
for ix, iy in np.ndindex(axs.shape):
if (ix, iy) not in xlabels:
axs[ix, iy].set_xticks([])
if (ix, iy) not in ylabels:
axs[ix, iy].set_yticks([])
fig.suptitle(title)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.03, 0.7])
m = plt.cm.ScalarMappable(cmap=cmap)
m.set_array(np.linspace(0, 1, 11))
plt.colorbar(m, cax=cbar_ax, boundaries=np.linspace(0, 1, 11))
plt.show()
| import matplotlib.pyplot as plt
import numpy as np
from rockyraccoon.model.core import RaccoonWrapper
from typing import List
def plot_qml_landscape_binary(
X: np.ndarray, y: np.ndarray, wrapper: RaccoonWrapper, cmap="viridis", title=""
):
"""
Plot the separation boundaries in the 2D input space.
Args:
X: N x d matrix of N samples and d features.
y: Length N vector with labels.
wrapper: The RaccoonWrapper we used for learning
cmap: String with name of matplotlib colormap, see MPL docs
title: String with title of the figure
"""
if wrapper.model.bias:
X = wrapper.add_bias(X)
class_0, class_1 = np.unique(y)
plt.rc("font", size=15)
cmap = plt.cm.get_cmap(cmap)
blue = cmap(0.0)
red = cmap(1.0)
h = 25
max_grid = 2
x_min, x_max = X[:, 0].min() - max_grid, X[:, 0].max() + max_grid
y_min, y_max = X[:, 1].min() - max_grid, X[:, 1].max() + max_grid
xx, yy = np.meshgrid(np.linspace(x_min, x_max, h), np.linspace(y_min, y_max, h))
if wrapper.bias:
z = wrapper.predict(np.c_[xx.ravel(), yy.ravel(), np.ones_like(yy).ravel()])
else:
z = wrapper.predict(np.c_[xx.ravel(), yy.ravel()])
z = z[:, 1] - z[:, 0]
z = z.reshape(xx.shape)
fig, ax = plt.subplots()
ax.contour(xx, yy, z, cmap=cmap)
# Plot also the training points
y = y.flatten()
np.random.seed(123)
spread = 0.3
ax.scatter(
X[(y == class_0), 0]
+ np.random.uniform(-spread, spread, np.sum((y == class_0))),
X[(y == class_0), 1]
+ np.random.uniform(-spread, spread, np.sum((y == class_0))),
marker=".",
c=np.array([blue]),
label="-1",
s=25,
)
ax.scatter(
X[(y == class_1), 0]
+ np.random.uniform(-spread, spread, np.sum((y == class_1))),
X[(y == class_1), 1]
+ np.random.uniform(-spread, spread, np.sum((y == class_1))),
marker="x",
c=np.array([red]),
label="+1",
s=25,
)
ax.set_xlabel("$x_0$")
ax.set_ylabel("$x_1$")
ax.set_title(title)
ax.legend()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.03, 0.7])
m = plt.cm.ScalarMappable(cmap=cmap)
m.set_array(np.linspace(-1, 1, 11))
plt.colorbar(m, cax=cbar_ax, boundaries=np.linspace(-1, 1, 11))
plt.show()
def plot_lh(wrapper: RaccoonWrapper, cmap="viridis", title=""):
"""
Args:
wrapper: The RaccoonWrapper we used for learning
"""
cmap = plt.cm.get_cmap(cmap)
fig, ax = plt.subplots(1, 1)
ax.plot(wrapper.lh, c=cmap(0.2))
ax.set_xlabel("number of iterations")
ax.set_ylabel("Likelihood $\mathcal{L}$")
ax.set_title(title)
plt.show()
def plot_qml_landscape_multiclass(
X: np.ndarray,
y: np.ndarray,
wrapper: RaccoonWrapper,
subplot_grid: List[int],
cmap="viridis",
title="",
):
"""
Plot the separation boundaries of a multiclass qml model in 2D space.
Args:
X: N x d matrix of N samples and d features.
y: Length N vector with labels.
wrapper: The RaccoonWrapper we used for learning
subplot_grid: List that specifies the grid of the subplots
cmap: Name of MPL colormap
title: Title of the figure
"""
if wrapper.model.bias:
X = wrapper.add_bias(X)
assert (
len(subplot_grid) == 2
), "Expected subplot_grid to have length 2, but got iterable with length {}".format(
len(subplot_grid)
)
labels = np.unique(y)
num_classes = len(np.unique(y))
if num_classes == 2:
print("Only {} classes found, calling binary plotter instead")
plot_qml_landscape_binary(X, y, wrapper, cmap=cmap)
return
assert (
np.product(subplot_grid) == num_classes
), "wrong grid size {} for {} classes".format(subplot_grid, num_classes)
plt.rc("font", size=15)
cmap = plt.cm.get_cmap(cmap)
clrs = [cmap(0.0), cmap(0.5), cmap(1.0)]
h = 25
max_grid = 2
spread = 0.2
y = y.flatten()
x_min, x_max = X[:, 0].min() - max_grid, X[:, 0].max() + max_grid
y_min, y_max = X[:, 1].min() - max_grid, X[:, 1].max() + max_grid
xx, yy = np.meshgrid(np.linspace(x_min, x_max, h), np.linspace(y_min, y_max, h))
if wrapper.bias:
z = wrapper.predict(np.c_[xx.ravel(), yy.ravel(), np.ones_like(yy).ravel()])
else:
z = wrapper.predict(np.c_[xx.ravel(), yy.ravel()])
sections = np.zeros_like(z)
idx = np.argmax(z, axis=1)
sections[np.arange(len(idx)), np.argmax(z, axis=1)] = 1
idx = idx.reshape(xx.shape)
markers = [".", "*", "x", "v", "s", "1"]
z = [el.reshape(xx.shape) for el in z.T]
fig, axs = plt.subplots(*subplot_grid)
if subplot_grid[0] == 1:
axs = axs.reshape(1, -1)
if subplot_grid[1] == 1:
axs = axs.reshape(-1, 1)
for i, ax in enumerate(axs.flatten()):
for j, label in enumerate(labels):
np.random.seed(2342)
if j != i:
ax.scatter(
X[(y == label), 0]
+ np.random.uniform(-spread, spread, np.sum((y == label))),
X[(y == label), 1]
+ np.random.uniform(-spread, spread, np.sum((y == label))),
c="gray",
label=label,
marker=markers[label],
s=50,
)
else:
ax.scatter(
X[(y == label), 0]
+ np.random.uniform(-spread, spread, np.sum((y == label))),
X[(y == label), 1]
+ np.random.uniform(-spread, spread, np.sum((y == label))),
c=np.array([clrs[2]]),
marker=markers[label],
label=label,
s=50,
)
polygon = np.zeros_like(idx)
polygon[idx == i] = 1
ax.contourf(
xx,
yy,
polygon,
alpha=0.1,
cmap=cmap,
levels=[0.5, 1],
vmin=0,
vmax=1,
)
ax.legend(prop={"size": 6})
cs = ax.contour(xx, yy, z[i], cmap=cmap, vmin=0, vmax=1)
ax.clabel(cs, inline=True, inline_spacing=2, fontsize=10)
ylabels = list((x, 0) for x in range(subplot_grid[0]))
xlabels = list((subplot_grid[0] - 1, x) for x in range(subplot_grid[1]))
for ax_id in ylabels:
axs[ax_id].set_ylabel(r"$x_1$")
for ax_id in xlabels:
axs[ax_id].set_xlabel(r"$x_0$")
for ix, iy in np.ndindex(axs.shape):
if (ix, iy) not in xlabels:
axs[ix, iy].set_xticks([])
if (ix, iy) not in ylabels:
axs[ix, iy].set_yticks([])
fig.suptitle(title)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.03, 0.7])
m = plt.cm.ScalarMappable(cmap=cmap)
m.set_array(np.linspace(0, 1, 11))
plt.colorbar(m, cax=cbar_ax, boundaries=np.linspace(0, 1, 11))
plt.show()
| en | 0.775255 | Plot the separation boundaries in the 2D input space. Args: X: N x d matrix of N samples and d features. y: Length N vector with labels. wrapper: The RaccoonWrapper we used for learning cmap: String with name of matplotlib colormap, see MPL docs title: String with title of the figure # Plot also the training points Args: wrapper: The RaccoonWrapper we used for learning Plot the separation boundaries of a multiclass qml model in 2D space. Args: X: N x d matrix of N samples and d features. y: Length N vector with labels. wrapper: The RaccoonWrapper we used for learning subplot_grid: List that specifies the grid of the subplots cmap: Name of MPL colormap title: Title of the figure | 2.807143 | 3 |
nasymoe/utils/dict_list.py | nasyxx/nasy.moe | 0 | 6620666 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Life's pathetic, have fun ("▔□▔)/hi~♡ Nasy.
Excited without bugs::
| * *
| . .
| .
| * ,
| .
|
| *
| |\___/|
| ) -( . '
| =\ - /=
| )===( *
| / - \
| |- |
| / - \ 0.|.0
| NASY___\__( (__/_____(\=/)__+1s____________
| ______|____) )______|______|______|______|_
| ___|______( (____|______|______|______|____
| ______|____\_|______|______|______|______|_
| ___|______|______|______|______|______|____
| ______|______|______|______|______|______|_
| ___|______|______|______|______|______|____
* author: Nasy https://nasy.moe <Nasy>
* date: Apr 12, 2018
* email: echo bmFzeXh4QGdtYWlsLmNvbQo= | base64 -D
* filename: dict_list.py
* Last modified time: Apr 12, 2018
* license: MIT
There are more things in heaven and earth, Horatio, than are dreamt.
-- From "Hamlet"
"""
from operator import itemgetter
from nasymoe.types import BL, BST
def blog2ldict(blog: BL) -> BST:
"""Transform blog dict to list of dict."""
return sorted(blog.values(), key = itemgetter("blog_path"))
def ldict2blog(lblog: BST) -> BL:
"""Transform list of dict to blog dict."""
return {str(blog["id"]): blog for blog in lblog if blog}
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Life's pathetic, have fun ("▔□▔)/hi~♡ Nasy.
Excited without bugs::
| * *
| . .
| .
| * ,
| .
|
| *
| |\___/|
| ) -( . '
| =\ - /=
| )===( *
| / - \
| |- |
| / - \ 0.|.0
| NASY___\__( (__/_____(\=/)__+1s____________
| ______|____) )______|______|______|______|_
| ___|______( (____|______|______|______|____
| ______|____\_|______|______|______|______|_
| ___|______|______|______|______|______|____
| ______|______|______|______|______|______|_
| ___|______|______|______|______|______|____
* author: Nasy https://nasy.moe <Nasy>
* date: Apr 12, 2018
* email: echo bmFzeXh4QGdtYWlsLmNvbQo= | base64 -D
* filename: dict_list.py
* Last modified time: Apr 12, 2018
* license: MIT
There are more things in heaven and earth, Horatio, than are dreamt.
-- From "Hamlet"
"""
from operator import itemgetter
from nasymoe.types import BL, BST
def blog2ldict(blog: BL) -> BST:
"""Transform blog dict to list of dict."""
return sorted(blog.values(), key = itemgetter("blog_path"))
def ldict2blog(lblog: BST) -> BL:
"""Transform list of dict to blog dict."""
return {str(blog["id"]): blog for blog in lblog if blog}
| en | 0.643819 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Life's pathetic, have fun ("▔□▔)/hi~♡ Nasy. Excited without bugs:: | * * | . . | . | * , | . | | * | |\___/| | ) -( . ' | =\ - /= | )===( * | / - \ | |- | | / - \ 0.|.0 | NASY___\__( (__/_____(\=/)__+1s____________ | ______|____) )______|______|______|______|_ | ___|______( (____|______|______|______|____ | ______|____\_|______|______|______|______|_ | ___|______|______|______|______|______|____ | ______|______|______|______|______|______|_ | ___|______|______|______|______|______|____ * author: Nasy https://nasy.moe <Nasy> * date: Apr 12, 2018 * email: echo bmFzeXh4QGdtYWlsLmNvbQo= | base64 -D * filename: dict_list.py * Last modified time: Apr 12, 2018 * license: MIT There are more things in heaven and earth, Horatio, than are dreamt. -- From "Hamlet" Transform blog dict to list of dict. Transform list of dict to blog dict. | 1.925786 | 2 |
client.py | mcgillij/owncast_chat_webhook | 1 | 6620667 | import requests
import json
webhook_url = 'http://localhost:5000/webhook'
data = {'eventData': {'body': 'test message without TTS',
'id': 'MHLpo7Hng',
'rawBody': 'f',
'timestamp': '2021-09-20T23:02:54.980066719Z',
'user': {'createdAt': '2021-09-20T22:34:33.139297191Z',
'displayColor': 22,
'displayName': 'mcgillij',
'id': 'avyQt7N7R',
'previousNames': ['mcgillij']},
'visible': True},
'type': 'CHAT'}
data_tts = {'eventData': {'body': 'TTS: test message',
'id': 'MHLpo7Hng',
'rawBody': 'f',
'timestamp': '2021-09-20T23:02:54.980066719Z',
'user': {'createdAt': '2021-09-20T22:34:33.139297191Z',
'displayColor': 22,
'displayName': 'mcgillij',
'id': 'avyQt7N7R',
'previousNames': ['mcgillij']},
'visible': True},
'type': 'CHAT'}
r = requests.post(webhook_url, data=json.dumps(data), headers={'Content-Type': 'application/json'})
r = requests.post(webhook_url, data=json.dumps(data_tts), headers={'Content-Type': 'application/json'})
| import requests
import json
webhook_url = 'http://localhost:5000/webhook'
data = {'eventData': {'body': 'test message without TTS',
'id': 'MHLpo7Hng',
'rawBody': 'f',
'timestamp': '2021-09-20T23:02:54.980066719Z',
'user': {'createdAt': '2021-09-20T22:34:33.139297191Z',
'displayColor': 22,
'displayName': 'mcgillij',
'id': 'avyQt7N7R',
'previousNames': ['mcgillij']},
'visible': True},
'type': 'CHAT'}
data_tts = {'eventData': {'body': 'TTS: test message',
'id': 'MHLpo7Hng',
'rawBody': 'f',
'timestamp': '2021-09-20T23:02:54.980066719Z',
'user': {'createdAt': '2021-09-20T22:34:33.139297191Z',
'displayColor': 22,
'displayName': 'mcgillij',
'id': 'avyQt7N7R',
'previousNames': ['mcgillij']},
'visible': True},
'type': 'CHAT'}
r = requests.post(webhook_url, data=json.dumps(data), headers={'Content-Type': 'application/json'})
r = requests.post(webhook_url, data=json.dumps(data_tts), headers={'Content-Type': 'application/json'})
| none | 1 | 2.405683 | 2 | |
chap6/gcd.py | theChad/ThinkPython | 0 | 6620668 | # Exercise 6.5
import ackermann # for nonnegative integer check
def gcd(a,b):
if not(ackermann.is_nonnegative_integer(a) or ackerman.is_nonnegative_integer(b)):
print("Please enter nonnegative integers.")
if b==0:
return a
return gcd(b,a%b) # % is modulo operator, a%b gives remainder of a/b
#print(gcd(27,18))
| # Exercise 6.5
import ackermann # for nonnegative integer check
def gcd(a,b):
if not(ackermann.is_nonnegative_integer(a) or ackerman.is_nonnegative_integer(b)):
print("Please enter nonnegative integers.")
if b==0:
return a
return gcd(b,a%b) # % is modulo operator, a%b gives remainder of a/b
#print(gcd(27,18))
| en | 0.629328 | # Exercise 6.5 # for nonnegative integer check # % is modulo operator, a%b gives remainder of a/b #print(gcd(27,18)) | 3.915239 | 4 |
Bet_Goals.py | sureshr14/BetGoals-Beyond-Insight | 0 | 6620669 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
import numpy as np
import pandas as pd
import altair as alt
st.title('Bet Goals')
st.write('Bet goals is an app designed to help you make informed bets on soccer matches')
st.subheader('Suggested Betting Strategy:')
st.write('We suggest betting on games predicted by our model to end in a draw')
st.subheader('Rationale:')
st.write('In the past 4 years, the bookmakers odds have always favoured either the home team or the away team. Not a single game has been backed by the bookmaker to end in a draw in this time frame. This systematic underestimation of the chances of a game ending in a draw lets the bookmaker overestimate the chances of a home win. To exploit this inefficiency, we built a model that can identify draws with 36% precision. Even though this means we will be wrong 2 out of 3 times, the odds on draws are have historically been high enough to give us around 20% return on investment.')
st.subheader('Matchday 28:')
st.subheader('Bookmaker Odds:')
df=pd.read_csv('Betdata27.csv')
st.write(df[['Fixture:','Home win odds','Draw odds','Away win odds','Predicted Result']])
#option = st.selectbox(
# 'Which match would you like to bet on?',
# df['Fixture:'])
#df2
options_multi = st.multiselect('What fixtures would you like to bet on? (We suggest betting on games predicted to end in draws)', df['Fixture:'])
#st.write('You selected:', options_multi)
option_team=pd.DataFrame(columns=['Teamselected'])
option_amount=pd.DataFrame(columns=['Moneybet'])
option_poss_win=pd.DataFrame(columns=['Moneywon'])
option_prob_win=pd.DataFrame(columns=['Probwin'])
for i in range(len(options_multi)):
df2=df[['Home Team','Away Team','Draw option']].loc[df['Fixture:']==options_multi[i]]
option_temp = st.selectbox(
'Which team would you like to bet on in '+options_multi[i]+'?',
(df2.iloc[0,0],df2.iloc[0,1],df2.iloc[0,2]))
option_team=option_team.append({'Teamselected':option_temp}, ignore_index=True)
# option_team[['Teamselected']].iloc[i]=option_temp
d = {'Money': [10, 20, 50, 100]}
Betopt= pd.DataFrame(data=d)
widkey='slider'+str(i)
option_mtemp = st.slider('How much would you like to bet?', 0, 200, 0, key=widkey)
option_amount=option_amount.append({'Moneybet':option_mtemp}, ignore_index=True)
if df2.iloc[0,0]==option_temp:
a1=df[['Home win odds']].loc[df['Fixture:']==options_multi[i]]
t1=a1.iloc[0,0]
b1=df[['Home win']].loc[df['Fixture:']==options_multi[i]]*100
t2=b1.iloc[0,0]
money=(t1-1)*option_mtemp
option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)
option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)
if option_mtemp != 0:
'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on '+option_temp+' in '+options_multi[i]+'.'
elif df2.iloc[0,1]==option_temp:
a1=df[['Away win odds']].loc[df['Fixture:']==options_multi[i]]
t1=a1.iloc[0,0]
b1=df[['Away Win']].loc[df['Fixture:']==options_multi[i]]*100
t2=b1.iloc[0,0]
money=(t1-1)*option_mtemp
option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)
option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)
if option_mtemp != 0:
'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on '+option_temp+' in '+options_multi[i]+'.'
else:
a1=df[['Draw odds']].loc[df['Fixture:']==options_multi[i]]
t1=a1.iloc[0,0]
b1=df[['Draw']].loc[df['Fixture:']==options_multi[i]]*100
t2=b1.iloc[0,0]
money=(t1-1)*option_mtemp
option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)
option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)
if option_mtemp != 0:
'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on a draw in '+options_multi[i]+'.'
combinations=np.zeros((2**len(options_multi),len(options_multi)))
for i in range(2**len(options_multi)):
temp=i
for j in range(len(options_multi)):
q=temp//2
mod=temp%2
combinations[i,j]=mod
temp=q
prob_dist=pd.DataFrame(columns=['Winning','Probability'])
for i in range(2**len(options_multi)):
probability=1
winning=0
for j in range(len(options_multi)):
if combinations[i,j]==1:
probability=probability*option_prob_win['Probwin'].iloc[j]
winning=winning+option_poss_win['Moneywon'].iloc[j]
else:
probability=probability*(1-option_prob_win['Probwin'].iloc[j])
winning=winning-option_amount['Moneybet'].iloc[j]
prob_dist=prob_dist.append({'Winning':winning,'Probability':probability}, ignore_index=True)
prob_dist=prob_dist.sort_values(by='Winning',ascending=True)
#prob_dist
if prob_dist.shape[0]>1:
d=alt.Chart(prob_dist).mark_bar().encode(
x='Winning',
y='Probability'
)
st.altair_chart(d)
expecval=0
for i in range(prob_dist.shape[0]):
expecval=expecval+prob_dist.iloc[i,0]*prob_dist.iloc[i,1]
'The expected value of your bets is '+str(round(expecval,2))+' dollars.'
#option_poss_win
#option_prob_win | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
import numpy as np
import pandas as pd
import altair as alt
st.title('Bet Goals')
st.write('Bet goals is an app designed to help you make informed bets on soccer matches')
st.subheader('Suggested Betting Strategy:')
st.write('We suggest betting on games predicted by our model to end in a draw')
st.subheader('Rationale:')
st.write('In the past 4 years, the bookmakers odds have always favoured either the home team or the away team. Not a single game has been backed by the bookmaker to end in a draw in this time frame. This systematic underestimation of the chances of a game ending in a draw lets the bookmaker overestimate the chances of a home win. To exploit this inefficiency, we built a model that can identify draws with 36% precision. Even though this means we will be wrong 2 out of 3 times, the odds on draws are have historically been high enough to give us around 20% return on investment.')
st.subheader('Matchday 28:')
st.subheader('Bookmaker Odds:')
df=pd.read_csv('Betdata27.csv')
st.write(df[['Fixture:','Home win odds','Draw odds','Away win odds','Predicted Result']])
#option = st.selectbox(
# 'Which match would you like to bet on?',
# df['Fixture:'])
#df2
options_multi = st.multiselect('What fixtures would you like to bet on? (We suggest betting on games predicted to end in draws)', df['Fixture:'])
#st.write('You selected:', options_multi)
option_team=pd.DataFrame(columns=['Teamselected'])
option_amount=pd.DataFrame(columns=['Moneybet'])
option_poss_win=pd.DataFrame(columns=['Moneywon'])
option_prob_win=pd.DataFrame(columns=['Probwin'])
for i in range(len(options_multi)):
df2=df[['Home Team','Away Team','Draw option']].loc[df['Fixture:']==options_multi[i]]
option_temp = st.selectbox(
'Which team would you like to bet on in '+options_multi[i]+'?',
(df2.iloc[0,0],df2.iloc[0,1],df2.iloc[0,2]))
option_team=option_team.append({'Teamselected':option_temp}, ignore_index=True)
# option_team[['Teamselected']].iloc[i]=option_temp
d = {'Money': [10, 20, 50, 100]}
Betopt= pd.DataFrame(data=d)
widkey='slider'+str(i)
option_mtemp = st.slider('How much would you like to bet?', 0, 200, 0, key=widkey)
option_amount=option_amount.append({'Moneybet':option_mtemp}, ignore_index=True)
if df2.iloc[0,0]==option_temp:
a1=df[['Home win odds']].loc[df['Fixture:']==options_multi[i]]
t1=a1.iloc[0,0]
b1=df[['Home win']].loc[df['Fixture:']==options_multi[i]]*100
t2=b1.iloc[0,0]
money=(t1-1)*option_mtemp
option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)
option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)
if option_mtemp != 0:
'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on '+option_temp+' in '+options_multi[i]+'.'
elif df2.iloc[0,1]==option_temp:
a1=df[['Away win odds']].loc[df['Fixture:']==options_multi[i]]
t1=a1.iloc[0,0]
b1=df[['Away Win']].loc[df['Fixture:']==options_multi[i]]*100
t2=b1.iloc[0,0]
money=(t1-1)*option_mtemp
option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)
option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)
if option_mtemp != 0:
'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on '+option_temp+' in '+options_multi[i]+'.'
else:
a1=df[['Draw odds']].loc[df['Fixture:']==options_multi[i]]
t1=a1.iloc[0,0]
b1=df[['Draw']].loc[df['Fixture:']==options_multi[i]]*100
t2=b1.iloc[0,0]
money=(t1-1)*option_mtemp
option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)
option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)
if option_mtemp != 0:
'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on a draw in '+options_multi[i]+'.'
combinations=np.zeros((2**len(options_multi),len(options_multi)))
for i in range(2**len(options_multi)):
temp=i
for j in range(len(options_multi)):
q=temp//2
mod=temp%2
combinations[i,j]=mod
temp=q
prob_dist=pd.DataFrame(columns=['Winning','Probability'])
for i in range(2**len(options_multi)):
probability=1
winning=0
for j in range(len(options_multi)):
if combinations[i,j]==1:
probability=probability*option_prob_win['Probwin'].iloc[j]
winning=winning+option_poss_win['Moneywon'].iloc[j]
else:
probability=probability*(1-option_prob_win['Probwin'].iloc[j])
winning=winning-option_amount['Moneybet'].iloc[j]
prob_dist=prob_dist.append({'Winning':winning,'Probability':probability}, ignore_index=True)
prob_dist=prob_dist.sort_values(by='Winning',ascending=True)
#prob_dist
if prob_dist.shape[0]>1:
d=alt.Chart(prob_dist).mark_bar().encode(
x='Winning',
y='Probability'
)
st.altair_chart(d)
expecval=0
for i in range(prob_dist.shape[0]):
expecval=expecval+prob_dist.iloc[i,0]*prob_dist.iloc[i,1]
'The expected value of your bets is '+str(round(expecval,2))+' dollars.'
#option_poss_win
#option_prob_win | en | 0.685401 | # -*- coding: utf-8 -*- Spyder Editor This is a temporary script file. # To make things easier later, we're also importing numpy and pandas for # working with sample data. #option = st.selectbox( # 'Which match would you like to bet on?', # df['Fixture:']) #df2 #st.write('You selected:', options_multi) # option_team[['Teamselected']].iloc[i]=option_temp #prob_dist #option_poss_win #option_prob_win | 3.397159 | 3 |
src/vtra/preprocess/cvts_speeds.py | GFDRR/vietnam-transport | 3 | 6620670 | import geopandas as gpd
import pandas as pd
import os
import numpy as np
import sys
import itertools
import ast
import math
from scipy import stats
def main():
'''
Traffic speed assignment script
vehicle_id, edge_path, time_stamp
'''
data_path,calc_path,output_path = load_config()['paths']['data'],load_config()['paths']['calc'],load_config()['paths']['output']
edges_in = os.path.join(output_path, 'transport cvts analysis', 'results', 'traffic_count','road_network.shp')
routes_in = os.path.join(output_path, 'transport cvts analysis', 'results', 'routes_collected','routes.csv')
edges = gpd.read_file(edges_in)
edges.columns = map(str.lower, edges.columns)
# get the right linelength
edges['length'] = edges.geometry.apply(line_length)
length_attr = list(zip(edges['g_id'].values.tolist(),edges['length'].values.tolist()))
routes_df = pd.read_csv(routes_in)
edge_speeds = []
for iter_,vals in routes_df.iterrows():
edge_path = ast.literal_eval(vals['edge_path'])
time_stamp = ast.literal_eval(vals['time_stamp'])
if len(edge_path) > 1:
for e in range(len(edge_path)-1):
time_diff = 1.0*(time_stamp[e+1] - time_stamp[e])
if time_diff > 0:
distance = sum([l[1] for l in length_attr if l[0] in (edge_path[e],edge_path[e+1])])
edge_l = [l[1] for l in length_attr if l[0] == edge_path[e]] + [l[1] for l in length_attr if l[0] == edge_path[e+1]]
speed = 3600.0*distance/time_diff
if speed >= 20 and speed <= 120:
edge_speeds.append((edge_path[e],speed))
edge_speeds.append((edge_path[e+1],speed))
print ('Done with iteration',iter_)
del routes_df
edge_speeds_df = pd.DataFrame(edge_speeds,columns = ['g_id','speed'])
edge_speeds_df_min = edge_speeds_df.groupby(['g_id'])['speed'].min().reset_index()
edge_speeds_df_min.rename(columns={'speed': 'min_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_min,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_min
edge_speeds_df_max = edge_speeds_df.groupby(['g_id'])['speed'].max().reset_index()
edge_speeds_df_max.rename(columns={'speed': 'max_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_max,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_max
edge_speeds_df_median = edge_speeds_df.groupby(['g_id'])['speed'].median().reset_index()
edge_speeds_df_median.rename(columns={'speed': 'md_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_median,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_median
edge_speeds_df_mean = edge_speeds_df.groupby(['g_id'])['speed'].mean().reset_index()
edge_speeds_df_mean.rename(columns={'speed': 'mean_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_mean,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_mean
edge_speeds_df_std = edge_speeds_df.groupby(['g_id'])['speed'].std().reset_index()
edge_speeds_df_std.rename(columns={'speed': 'std_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_std,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_std
del edge_speeds_df
edges.loc[edges['est_speed'] > 120.0,'est_speed'] = 120.0
edges.to_file(edges_in)
if __name__ == '__main__':
main() | import geopandas as gpd
import pandas as pd
import os
import numpy as np
import sys
import itertools
import ast
import math
from scipy import stats
def main():
'''
Traffic speed assignment script
vehicle_id, edge_path, time_stamp
'''
data_path,calc_path,output_path = load_config()['paths']['data'],load_config()['paths']['calc'],load_config()['paths']['output']
edges_in = os.path.join(output_path, 'transport cvts analysis', 'results', 'traffic_count','road_network.shp')
routes_in = os.path.join(output_path, 'transport cvts analysis', 'results', 'routes_collected','routes.csv')
edges = gpd.read_file(edges_in)
edges.columns = map(str.lower, edges.columns)
# get the right linelength
edges['length'] = edges.geometry.apply(line_length)
length_attr = list(zip(edges['g_id'].values.tolist(),edges['length'].values.tolist()))
routes_df = pd.read_csv(routes_in)
edge_speeds = []
for iter_,vals in routes_df.iterrows():
edge_path = ast.literal_eval(vals['edge_path'])
time_stamp = ast.literal_eval(vals['time_stamp'])
if len(edge_path) > 1:
for e in range(len(edge_path)-1):
time_diff = 1.0*(time_stamp[e+1] - time_stamp[e])
if time_diff > 0:
distance = sum([l[1] for l in length_attr if l[0] in (edge_path[e],edge_path[e+1])])
edge_l = [l[1] for l in length_attr if l[0] == edge_path[e]] + [l[1] for l in length_attr if l[0] == edge_path[e+1]]
speed = 3600.0*distance/time_diff
if speed >= 20 and speed <= 120:
edge_speeds.append((edge_path[e],speed))
edge_speeds.append((edge_path[e+1],speed))
print ('Done with iteration',iter_)
del routes_df
edge_speeds_df = pd.DataFrame(edge_speeds,columns = ['g_id','speed'])
edge_speeds_df_min = edge_speeds_df.groupby(['g_id'])['speed'].min().reset_index()
edge_speeds_df_min.rename(columns={'speed': 'min_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_min,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_min
edge_speeds_df_max = edge_speeds_df.groupby(['g_id'])['speed'].max().reset_index()
edge_speeds_df_max.rename(columns={'speed': 'max_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_max,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_max
edge_speeds_df_median = edge_speeds_df.groupby(['g_id'])['speed'].median().reset_index()
edge_speeds_df_median.rename(columns={'speed': 'md_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_median,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_median
edge_speeds_df_mean = edge_speeds_df.groupby(['g_id'])['speed'].mean().reset_index()
edge_speeds_df_mean.rename(columns={'speed': 'mean_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_mean,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_mean
edge_speeds_df_std = edge_speeds_df.groupby(['g_id'])['speed'].std().reset_index()
edge_speeds_df_std.rename(columns={'speed': 'std_speed'}, inplace=True)
edges = pd.merge(edges,edge_speeds_df_std,how='left', on=['g_id']).fillna(0)
del edge_speeds_df_std
del edge_speeds_df
edges.loc[edges['est_speed'] > 120.0,'est_speed'] = 120.0
edges.to_file(edges_in)
if __name__ == '__main__':
main() | en | 0.802021 | Traffic speed assignment script vehicle_id, edge_path, time_stamp # get the right linelength | 2.658957 | 3 |
__Training__/Python - HackerRank/12. Python Functionals/Validating Email Addresses With a Filter.py | JUD210/Study-Note | 0 | 6620671 | # https://www.hackerrank.com/challenges/validate-list-of-email-address-with-filter/problem
import re
""" Not Using Regex """
def fun(email):
# return True if s is a valid email, else return False
# It must have the username@websitename.extension format type.
# The username can only contain letters, digits, dashes and underscores.
# The website name can only have letters and digits.
# The maximum length of the extension is .
try:
username, url = email.split("@")
website, extension = url.split(".")
except ValueError:
return False
if not username.replace("-", "").replace("_", "").isalnum():
return False
if not website.isalnum():
return False
if len(extension) > 3:
return False
return True
""" Using Regex """
def fun2(email):
a = re.match(r"[a-zA-Z0-9_-]+@[a-zA-Z0-9]+\.[a-zA-Z]{1,3}$", email)
# {1,3}: The last part of the regex[a-zA-Z] must have a length between 1~3
# $: Returns None if no extension exists
return a
def filter_mail(emails):
return list(filter(fun, emails))
if __name__ == "__main__":
n = int(input())
# 3
emails = []
for _ in range(n):
emails.append(input())
# <EMAIL>
# <EMAIL>
# <EMAIL>
filtered_emails = filter_mail(emails)
filtered_emails.sort()
print(filtered_emails)
# ['<EMAIL>', '<EMAIL>', '<EMAIL>']
| # https://www.hackerrank.com/challenges/validate-list-of-email-address-with-filter/problem
import re
""" Not Using Regex """
def fun(email):
# return True if s is a valid email, else return False
# It must have the username@websitename.extension format type.
# The username can only contain letters, digits, dashes and underscores.
# The website name can only have letters and digits.
# The maximum length of the extension is .
try:
username, url = email.split("@")
website, extension = url.split(".")
except ValueError:
return False
if not username.replace("-", "").replace("_", "").isalnum():
return False
if not website.isalnum():
return False
if len(extension) > 3:
return False
return True
""" Using Regex """
def fun2(email):
a = re.match(r"[a-zA-Z0-9_-]+@[a-zA-Z0-9]+\.[a-zA-Z]{1,3}$", email)
# {1,3}: The last part of the regex[a-zA-Z] must have a length between 1~3
# $: Returns None if no extension exists
return a
def filter_mail(emails):
return list(filter(fun, emails))
if __name__ == "__main__":
n = int(input())
# 3
emails = []
for _ in range(n):
emails.append(input())
# <EMAIL>
# <EMAIL>
# <EMAIL>
filtered_emails = filter_mail(emails)
filtered_emails.sort()
print(filtered_emails)
# ['<EMAIL>', '<EMAIL>', '<EMAIL>']
| en | 0.710956 | # https://www.hackerrank.com/challenges/validate-list-of-email-address-with-filter/problem Not Using Regex # return True if s is a valid email, else return False # It must have the username@websitename.extension format type. # The username can only contain letters, digits, dashes and underscores. # The website name can only have letters and digits. # The maximum length of the extension is . Using Regex # {1,3}: The last part of the regex[a-zA-Z] must have a length between 1~3 # $: Returns None if no extension exists # 3 # <EMAIL> # <EMAIL> # <EMAIL> # ['<EMAIL>', '<EMAIL>', '<EMAIL>'] | 4.080815 | 4 |
src/structure/BlockChain.py | sagnb/VoteBlock | 0 | 6620672 | <filename>src/structure/BlockChain.py
import hashlib
import json
import re
from time import time
from colorama import Fore, Style
styleCommunication = Fore.MAGENTA + Style.BRIGHT
styleClient = Fore.GREEN + Style.BRIGHT
max_transactions=2
class BlockChain(object):
'''
Classe pai da blockchain.
Implementa os metodos essenciais para a blockchain.
'''
def __init__(self):
'''
Construtor da classe BlockChain.
Inicia a lista chain da classe e a regra da prova de trabalho.
'''
self.chain=[]#chain da blockchain
self.chain.append({
'index':len(self.chain)+1,
'timestamp':0,
'transactions':[],
'proof':0,
'previous_hash':0,
'hash_proof':0,
})
self.rule='0000'#a regra inicialmente comeca com quatro zeros
<<<<<<< HEAD
=======
>>>>>>> f44cb60... break loop if self.start_miner == false
@property
def rule(self):
'''
Metodo getter para a regra da prova de trabalho.
:returns: int -- regra da prova de trabalho.
'''
return self._rule
@rule.setter
def rule(self, rule):
'''
Metodo setter para a regra da prova de trabalho.
:param rule: regra da prova de trabalho.
'''
self._rule=rule
@staticmethod
def hash(block):
'''
Metodo estatico que gera a hash do bloco.
:param block: bloco da cadeia de blocos.
:returns: str -- hash do bloco.
'''
block_string=json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
@staticmethod
def valid_proof(last_proof, proof, rule):
'''
Metodo estatico que valida a proof gerada.
:param last_proof: ultima prova.
:param proof: prova atual.
:param rule: regra da prova de trabalho.
:returns: bool -- flag da prova de trabalho gerada.
'''
guess='{0}{1}'.format(last_proof, proof).encode()
guess_hash=hashlib.sha256(guess).hexdigest()
return (guess_hash[:len(rule)]==rule, guess_hash)
@property
def last_proof(self):
'''
Metodo getter para a ultima prova de trabalho adicionada na chain.
:returns: int -- ultima prova de trabalho.
'''
return self.last_block['proof']
@property
def chain(self):
'''
Metodo getter para a cadeia de blocos.
:returns: list -- cadeia de blocos.
'''
return self._chain
@chain.setter
def chain(self, chain):
'''
Metodo setter para a cadeia de blocos.
:param chain: cadeia de blocos.
'''
self._chain=chain
@property
def last_block(self):
'''
Metodo getter para o ultimo bloco da chain.
:returns: list -- ultimo bloco da cadeia de blocos.
'''
return self.chain[-1]
@last_block.setter
def last_block(self, block):
'''
Metodo setter para adicionar um novo block para a chain como se fosse uma atribuicao qualquer.
:param block: bloco que sera inserido na cadeia de blocos.
'''
self._chain.append(block)
def valid_chain(self, chain):
'''
Confere se a chain eh valida atraves das hashs da chain.
:param chain: cadeia de blocos.
:returns: bool -- flag da validade do ultimo bloco da cadeia de blocos.
'''
for index in range(1, len(chain)):
if chain[index]['previous_hash']!=self.hash(chain[index-1]):
return False
if not self.valid_proof(chain[index-1]['proof'], chain[index]['proof'], self.rule)[0]:
return False
return True
class MinerChain(BlockChain):
'''
Classe que extende a classe BlockChain
Classe que implementa os metodos da chain utilizada pelos mineradores da blockchain
'''
def __init__(self):
'''
Construtor da classe
Cria a lista de transacoes
Inicia a flag que diz quando um block pode comecar a ser minerado
'''
super().__init__()
self.transactions=[[]]
self.start_miner=False
self.block=None
@property
def block(self):
'''
Metodo getter do block.
:returns: list -- bloco da cadeia de blocos.
'''
return self._block
@block.setter
def block(self, block):
'''
Metodo setter do block.
:param block: novo bloco da cadeia de blocos.
'''
self._block=block
@property
def transactions(self):
'''
Metodo getter para as transacoes.
:returns: list -- lista de transacoes.
'''
return self._transactions
@transactions.setter
def transactions(self, transactions):
'''
Metodo setter das transacoes.
Vai servir para que sejam passadas as transacoes entre o antigo e o novo dono da carteira.
:param transactions: lista de transacoes.
'''
self._transactions=transactions
@property
def current_transactions(self):
'''
Metodo getter para as transacoes atuais.
:returns: dict -- transacao atual.
'''
return self.transactions[-1]
@property
def finish_transactions(self):
'''
Metodo getter para as transacoes fechadas.
:returns: dict -- transacao.
'''
return self.transactions[0] if len(self.transactions[0])==max_transactions else []
@finish_transactions.setter
def finish_transactions(self, f_t):
'''
Metodo setter para adicionar carteiras que ja foram terminadas e estao prontas para serem mineradas.
Vai servir para passar as carteiras para os mineradores comecarem a minerar.
:param f_t: carteira pronta para ser mineirada.
'''
self._transactions.insert(0, f_t)
def new_transaction(self, transaction):
'''
Metodo que recebe uma nova transacao e adiciona nas transacoes atuais.
Se o numero maximo de transacoes da carteira for atingido.
Uma nova carteira e adicionada na lista de transacoes.
:param transaction: nova transacao.
:returns: int -- indice do proximo bloco.
'''
if len(self.current_transactions) >=max_transactions:
self.transactions.append([])
self.current_transactions.append(transaction)
return self.last_block['index']+1
@property
def start_miner(self):
'''
Metodo getter para retornar o valor da flag _start_miner.
Responsavel por dizer (return True) quando uma carteira esta pronta para ser minerada.
:returns: bool -- flag de inicio da mineracao.
'''
return self._start_miner
@start_miner.setter
def start_miner(self, value):
'''
Metodo setter para mudar o valor da flag start_miner.
:param value: novo valor da flag de inicio da mineracao.
'''
self._start_miner=value
def new_block(self, proof, previous_hash=None):
'''
Cria um novo bloco com as informacoes.
:param proof: prova de trabalho.
:param previous_hash: hash do bloco anterior.
:returns: dict -- novo bloco.
'''
block={
'index':len(self.chain)+1,
'timestamp':time(),
'transactions':self.finish_transactions.copy(),
'proof':proof,
'previous_hash':previous_hash or self.hash(self.last_block),
}
if len(self.transactions) == 1:
self.finish_transactions.clear()
else:
self._transactions.pop(0)
return block
def proof_of_work(self, last_proof):
'''
Metodo de prova de trabalho.
Determina a dificuldade de minerar um block.
:param last_proof: ultima prova de trabalho.
:returns: int -- prova de trabalho.
'''
proof=0
while self.valid_proof(last_proof, proof, self.rule)[0] is False and self.start_miner==True:
proof+=1
return (proof, self.valid_proof(last_proof, proof, self.rule)[1])
def mine(self):
'''
Minera a carteira se ja estiver pronto para minerar.
Muda a flag para false e retorna o block minerado.
'''
if self.start_miner:
proof, hash_proof=self.proof_of_work(self.last_proof)
previous_hash=self.hash(self.last_block)
block=self.new_block(proof, previous_hash)
block['hash_proof']=hash_proof
if self.start_miner:
self.block=block
else:
self.block=None
self.start_miner=False
else:
self.block=None
class TraderChain(BlockChain):
'''
Classe que implementa a chain dos traders
'''
def __init__(self):
'''
Construtor da classe
'''
super().__init__()
def new_transaction(self, myIp):
'''
Cria uma nova transacao que sera enviada para a carteira ativa
:param myIp: ip da maquina.
:returns: dict -- lista de transacoes.
'''
global styleClient
transaction = {}
userInput = input(styleClient + 'Enter your message => ' + Fore.RED)
if re.search('exit', userInput):
print(styleClient + 'Ending the execution of program... ')
exit()
transaction['userInput'] = userInput
transaction['address'] = myIp
return transaction
def main():
pass
if __name__=='__main__':
main()
| <filename>src/structure/BlockChain.py
import hashlib
import json
import re
from time import time
from colorama import Fore, Style
styleCommunication = Fore.MAGENTA + Style.BRIGHT
styleClient = Fore.GREEN + Style.BRIGHT
max_transactions=2
class BlockChain(object):
'''
Classe pai da blockchain.
Implementa os metodos essenciais para a blockchain.
'''
def __init__(self):
'''
Construtor da classe BlockChain.
Inicia a lista chain da classe e a regra da prova de trabalho.
'''
self.chain=[]#chain da blockchain
self.chain.append({
'index':len(self.chain)+1,
'timestamp':0,
'transactions':[],
'proof':0,
'previous_hash':0,
'hash_proof':0,
})
self.rule='0000'#a regra inicialmente comeca com quatro zeros
<<<<<<< HEAD
=======
>>>>>>> f44cb60... break loop if self.start_miner == false
@property
def rule(self):
'''
Metodo getter para a regra da prova de trabalho.
:returns: int -- regra da prova de trabalho.
'''
return self._rule
@rule.setter
def rule(self, rule):
'''
Metodo setter para a regra da prova de trabalho.
:param rule: regra da prova de trabalho.
'''
self._rule=rule
@staticmethod
def hash(block):
'''
Metodo estatico que gera a hash do bloco.
:param block: bloco da cadeia de blocos.
:returns: str -- hash do bloco.
'''
block_string=json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
@staticmethod
def valid_proof(last_proof, proof, rule):
'''
Metodo estatico que valida a proof gerada.
:param last_proof: ultima prova.
:param proof: prova atual.
:param rule: regra da prova de trabalho.
:returns: bool -- flag da prova de trabalho gerada.
'''
guess='{0}{1}'.format(last_proof, proof).encode()
guess_hash=hashlib.sha256(guess).hexdigest()
return (guess_hash[:len(rule)]==rule, guess_hash)
@property
def last_proof(self):
'''
Metodo getter para a ultima prova de trabalho adicionada na chain.
:returns: int -- ultima prova de trabalho.
'''
return self.last_block['proof']
@property
def chain(self):
'''
Metodo getter para a cadeia de blocos.
:returns: list -- cadeia de blocos.
'''
return self._chain
@chain.setter
def chain(self, chain):
'''
Metodo setter para a cadeia de blocos.
:param chain: cadeia de blocos.
'''
self._chain=chain
@property
def last_block(self):
'''
Metodo getter para o ultimo bloco da chain.
:returns: list -- ultimo bloco da cadeia de blocos.
'''
return self.chain[-1]
@last_block.setter
def last_block(self, block):
'''
Metodo setter para adicionar um novo block para a chain como se fosse uma atribuicao qualquer.
:param block: bloco que sera inserido na cadeia de blocos.
'''
self._chain.append(block)
def valid_chain(self, chain):
'''
Confere se a chain eh valida atraves das hashs da chain.
:param chain: cadeia de blocos.
:returns: bool -- flag da validade do ultimo bloco da cadeia de blocos.
'''
for index in range(1, len(chain)):
if chain[index]['previous_hash']!=self.hash(chain[index-1]):
return False
if not self.valid_proof(chain[index-1]['proof'], chain[index]['proof'], self.rule)[0]:
return False
return True
class MinerChain(BlockChain):
'''
Classe que extende a classe BlockChain
Classe que implementa os metodos da chain utilizada pelos mineradores da blockchain
'''
def __init__(self):
'''
Construtor da classe
Cria a lista de transacoes
Inicia a flag que diz quando um block pode comecar a ser minerado
'''
super().__init__()
self.transactions=[[]]
self.start_miner=False
self.block=None
@property
def block(self):
'''
Metodo getter do block.
:returns: list -- bloco da cadeia de blocos.
'''
return self._block
@block.setter
def block(self, block):
'''
Metodo setter do block.
:param block: novo bloco da cadeia de blocos.
'''
self._block=block
@property
def transactions(self):
'''
Metodo getter para as transacoes.
:returns: list -- lista de transacoes.
'''
return self._transactions
@transactions.setter
def transactions(self, transactions):
'''
Metodo setter das transacoes.
Vai servir para que sejam passadas as transacoes entre o antigo e o novo dono da carteira.
:param transactions: lista de transacoes.
'''
self._transactions=transactions
@property
def current_transactions(self):
'''
Metodo getter para as transacoes atuais.
:returns: dict -- transacao atual.
'''
return self.transactions[-1]
@property
def finish_transactions(self):
'''
Metodo getter para as transacoes fechadas.
:returns: dict -- transacao.
'''
return self.transactions[0] if len(self.transactions[0])==max_transactions else []
@finish_transactions.setter
def finish_transactions(self, f_t):
'''
Metodo setter para adicionar carteiras que ja foram terminadas e estao prontas para serem mineradas.
Vai servir para passar as carteiras para os mineradores comecarem a minerar.
:param f_t: carteira pronta para ser mineirada.
'''
self._transactions.insert(0, f_t)
def new_transaction(self, transaction):
'''
Metodo que recebe uma nova transacao e adiciona nas transacoes atuais.
Se o numero maximo de transacoes da carteira for atingido.
Uma nova carteira e adicionada na lista de transacoes.
:param transaction: nova transacao.
:returns: int -- indice do proximo bloco.
'''
if len(self.current_transactions) >=max_transactions:
self.transactions.append([])
self.current_transactions.append(transaction)
return self.last_block['index']+1
@property
def start_miner(self):
'''
Metodo getter para retornar o valor da flag _start_miner.
Responsavel por dizer (return True) quando uma carteira esta pronta para ser minerada.
:returns: bool -- flag de inicio da mineracao.
'''
return self._start_miner
@start_miner.setter
def start_miner(self, value):
'''
Metodo setter para mudar o valor da flag start_miner.
:param value: novo valor da flag de inicio da mineracao.
'''
self._start_miner=value
def new_block(self, proof, previous_hash=None):
'''
Cria um novo bloco com as informacoes.
:param proof: prova de trabalho.
:param previous_hash: hash do bloco anterior.
:returns: dict -- novo bloco.
'''
block={
'index':len(self.chain)+1,
'timestamp':time(),
'transactions':self.finish_transactions.copy(),
'proof':proof,
'previous_hash':previous_hash or self.hash(self.last_block),
}
if len(self.transactions) == 1:
self.finish_transactions.clear()
else:
self._transactions.pop(0)
return block
def proof_of_work(self, last_proof):
'''
Metodo de prova de trabalho.
Determina a dificuldade de minerar um block.
:param last_proof: ultima prova de trabalho.
:returns: int -- prova de trabalho.
'''
proof=0
while self.valid_proof(last_proof, proof, self.rule)[0] is False and self.start_miner==True:
proof+=1
return (proof, self.valid_proof(last_proof, proof, self.rule)[1])
def mine(self):
'''
Minera a carteira se ja estiver pronto para minerar.
Muda a flag para false e retorna o block minerado.
'''
if self.start_miner:
proof, hash_proof=self.proof_of_work(self.last_proof)
previous_hash=self.hash(self.last_block)
block=self.new_block(proof, previous_hash)
block['hash_proof']=hash_proof
if self.start_miner:
self.block=block
else:
self.block=None
self.start_miner=False
else:
self.block=None
class TraderChain(BlockChain):
'''
Classe que implementa a chain dos traders
'''
def __init__(self):
'''
Construtor da classe
'''
super().__init__()
def new_transaction(self, myIp):
'''
Cria uma nova transacao que sera enviada para a carteira ativa
:param myIp: ip da maquina.
:returns: dict -- lista de transacoes.
'''
global styleClient
transaction = {}
userInput = input(styleClient + 'Enter your message => ' + Fore.RED)
if re.search('exit', userInput):
print(styleClient + 'Ending the execution of program... ')
exit()
transaction['userInput'] = userInput
transaction['address'] = myIp
return transaction
def main():
pass
if __name__=='__main__':
main()
| pt | 0.791051 | Classe pai da blockchain. Implementa os metodos essenciais para a blockchain. Construtor da classe BlockChain. Inicia a lista chain da classe e a regra da prova de trabalho. #chain da blockchain Metodo getter para a regra da prova de trabalho. :returns: int -- regra da prova de trabalho. Metodo setter para a regra da prova de trabalho. :param rule: regra da prova de trabalho. Metodo estatico que gera a hash do bloco. :param block: bloco da cadeia de blocos. :returns: str -- hash do bloco. Metodo estatico que valida a proof gerada. :param last_proof: ultima prova. :param proof: prova atual. :param rule: regra da prova de trabalho. :returns: bool -- flag da prova de trabalho gerada. Metodo getter para a ultima prova de trabalho adicionada na chain. :returns: int -- ultima prova de trabalho. Metodo getter para a cadeia de blocos. :returns: list -- cadeia de blocos. Metodo setter para a cadeia de blocos. :param chain: cadeia de blocos. Metodo getter para o ultimo bloco da chain. :returns: list -- ultimo bloco da cadeia de blocos. Metodo setter para adicionar um novo block para a chain como se fosse uma atribuicao qualquer. :param block: bloco que sera inserido na cadeia de blocos. Confere se a chain eh valida atraves das hashs da chain. :param chain: cadeia de blocos. :returns: bool -- flag da validade do ultimo bloco da cadeia de blocos. Classe que extende a classe BlockChain Classe que implementa os metodos da chain utilizada pelos mineradores da blockchain Construtor da classe Cria a lista de transacoes Inicia a flag que diz quando um block pode comecar a ser minerado Metodo getter do block. :returns: list -- bloco da cadeia de blocos. Metodo setter do block. :param block: novo bloco da cadeia de blocos. Metodo getter para as transacoes. :returns: list -- lista de transacoes. Metodo setter das transacoes. Vai servir para que sejam passadas as transacoes entre o antigo e o novo dono da carteira. :param transactions: lista de transacoes. Metodo getter para as transacoes atuais. :returns: dict -- transacao atual. Metodo getter para as transacoes fechadas. :returns: dict -- transacao. Metodo setter para adicionar carteiras que ja foram terminadas e estao prontas para serem mineradas. Vai servir para passar as carteiras para os mineradores comecarem a minerar. :param f_t: carteira pronta para ser mineirada. Metodo que recebe uma nova transacao e adiciona nas transacoes atuais. Se o numero maximo de transacoes da carteira for atingido. Uma nova carteira e adicionada na lista de transacoes. :param transaction: nova transacao. :returns: int -- indice do proximo bloco. Metodo getter para retornar o valor da flag _start_miner. Responsavel por dizer (return True) quando uma carteira esta pronta para ser minerada. :returns: bool -- flag de inicio da mineracao. Metodo setter para mudar o valor da flag start_miner. :param value: novo valor da flag de inicio da mineracao. Cria um novo bloco com as informacoes. :param proof: prova de trabalho. :param previous_hash: hash do bloco anterior. :returns: dict -- novo bloco. Metodo de prova de trabalho. Determina a dificuldade de minerar um block. :param last_proof: ultima prova de trabalho. :returns: int -- prova de trabalho. Minera a carteira se ja estiver pronto para minerar. Muda a flag para false e retorna o block minerado. Classe que implementa a chain dos traders Construtor da classe Cria uma nova transacao que sera enviada para a carteira ativa :param myIp: ip da maquina. :returns: dict -- lista de transacoes. | 2.814598 | 3 |
translator.py | catskillsresearch/openasr20 | 0 | 6620673 | import os, pickle, logging
from multiprocessing import Pool
from Cfg import Cfg
from load_pretrained_model import load_pretrained_model
from RecordingCorpus import RecordingCorpus
from listen_and_transcribe import listen_and_transcribe
from tqdm.auto import tqdm
logging.getLogger('nemo_logger').setLevel(logging.ERROR)
def translator(language, phase, release, max_duration):
C = Cfg('NIST', 16000, language, phase, release)
model = load_pretrained_model(C, 0)
with Pool(16) as pool:
recordings = RecordingCorpus(C, pool)
tdir= f'pred/{language}/{phase}/{release}'
os.system(f'mkdir -p {tdir}')
translations = []
for artifact in recordings.artifacts:
key = artifact.key
(lng,tfn)=key
print("key", key)
save_fn=f'{tdir}/transcription_{lng}_{tfn}.pkl'
if os.path.exists(save_fn):
print("finished", key)
continue
audio = artifact.source.value
gold=artifact.gold()
transcript=listen_and_transcribe(C, model, max_duration, gold, audio)
translations.append((key, transcript))
with open(save_fn, 'wb') as f:
pickle.dump(transcript, f)
print('saved', save_fn)
print()
if __name__=="__main__":
import sys
language, phase, release, max_duration = sys.argv[1:]
max_duration=float(max_duration)
translator(language, phase, release, max_duration)
| import os, pickle, logging
from multiprocessing import Pool
from Cfg import Cfg
from load_pretrained_model import load_pretrained_model
from RecordingCorpus import RecordingCorpus
from listen_and_transcribe import listen_and_transcribe
from tqdm.auto import tqdm
logging.getLogger('nemo_logger').setLevel(logging.ERROR)
def translator(language, phase, release, max_duration):
C = Cfg('NIST', 16000, language, phase, release)
model = load_pretrained_model(C, 0)
with Pool(16) as pool:
recordings = RecordingCorpus(C, pool)
tdir= f'pred/{language}/{phase}/{release}'
os.system(f'mkdir -p {tdir}')
translations = []
for artifact in recordings.artifacts:
key = artifact.key
(lng,tfn)=key
print("key", key)
save_fn=f'{tdir}/transcription_{lng}_{tfn}.pkl'
if os.path.exists(save_fn):
print("finished", key)
continue
audio = artifact.source.value
gold=artifact.gold()
transcript=listen_and_transcribe(C, model, max_duration, gold, audio)
translations.append((key, transcript))
with open(save_fn, 'wb') as f:
pickle.dump(transcript, f)
print('saved', save_fn)
print()
if __name__=="__main__":
import sys
language, phase, release, max_duration = sys.argv[1:]
max_duration=float(max_duration)
translator(language, phase, release, max_duration)
| none | 1 | 2.133396 | 2 | |
tests/test_tuning.py | cristianmatache/HOLA | 10 | 6620674 | # Copyright 2021 BlackRock, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Sequence
import numpy as np
import numpy.typing as npt
import pytest
from hola.algorithm import HOLA
from hola.objective import FuncName, ObjectiveConfig, ObjectivesSpec
from hola.params import ParamConfig, ParamsSpec
from hola.tune import Tuner, tune
PARAMS_CONFIG: list[ParamsSpec] = [
{"x_1": ParamConfig(min=-2.0, max=2.0), "x_2": ParamConfig(min=-1.0, max=3.0)},
{"x_1": {"min": -2.0, "max": 2.0}, "x_2": {"min": -1.0, "max": 3.0}},
]
OBJECTIVE_CONFIG: list[ObjectivesSpec] = [
{"f": ObjectiveConfig(target=10, limit=10)},
{"f": {"target": 10.0, "limit": 10.0}},
]
SEED_0_SCORE = {"f": -332.84684342589117}
def rosenbrock(xs: Sequence[float] | npt.NDArray[np.floating]) -> float:
x, y = xs
a = 1.0 - x
b = y - x * x
return a * a + b * b * 100.0
def hyper_rose(x_1: float, x_2: float) -> dict[FuncName, float]:
return {"f": -rosenbrock([x_1, x_2])}
@pytest.mark.parametrize("n_jobs", [1, 2])
@pytest.mark.parametrize("param_config", PARAMS_CONFIG)
@pytest.mark.parametrize("objective_config", OBJECTIVE_CONFIG)
def test_tuner(n_jobs: int, param_config: ParamsSpec, objective_config: ObjectivesSpec) -> None:
np.random.seed(0)
tuner = Tuner(
HOLA(
params_config=param_config,
objectives_config=objective_config,
min_samples=30,
gmm_sampler="uniform",
explore_sampler="uniform",
top_frac=0.25,
)
)
tuner.tune(hyper_rose, num_runs=100, n_jobs=n_jobs)
assert tuner.get_best_scores() == SEED_0_SCORE
@pytest.mark.parametrize("param_config", PARAMS_CONFIG)
@pytest.mark.parametrize("objective_config", OBJECTIVE_CONFIG)
def test_hola(param_config: ParamsSpec, objective_config: ObjectivesSpec) -> None:
np.random.seed(0)
exp = HOLA(
param_config, objective_config, min_samples=30, top_frac=0.25, gmm_sampler="uniform", explore_sampler="uniform"
)
assert exp.params_config == PARAMS_CONFIG[0]
assert exp.objectives_config == OBJECTIVE_CONFIG[0]
for _ in range(200):
params = exp.sample()
value = hyper_rose(**params)
exp.add_run(value, params)
# print(_, params, value)
assert exp.get_best_scores() == SEED_0_SCORE
@pytest.mark.parametrize("param_config", PARAMS_CONFIG)
@pytest.mark.parametrize("objective_config", OBJECTIVE_CONFIG)
def test_tune(param_config: ParamsSpec, objective_config: ObjectivesSpec) -> None:
tune(hyper_rose, param_config, objective_config)
| # Copyright 2021 BlackRock, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Sequence
import numpy as np
import numpy.typing as npt
import pytest
from hola.algorithm import HOLA
from hola.objective import FuncName, ObjectiveConfig, ObjectivesSpec
from hola.params import ParamConfig, ParamsSpec
from hola.tune import Tuner, tune
PARAMS_CONFIG: list[ParamsSpec] = [
{"x_1": ParamConfig(min=-2.0, max=2.0), "x_2": ParamConfig(min=-1.0, max=3.0)},
{"x_1": {"min": -2.0, "max": 2.0}, "x_2": {"min": -1.0, "max": 3.0}},
]
OBJECTIVE_CONFIG: list[ObjectivesSpec] = [
{"f": ObjectiveConfig(target=10, limit=10)},
{"f": {"target": 10.0, "limit": 10.0}},
]
SEED_0_SCORE = {"f": -332.84684342589117}
def rosenbrock(xs: Sequence[float] | npt.NDArray[np.floating]) -> float:
x, y = xs
a = 1.0 - x
b = y - x * x
return a * a + b * b * 100.0
def hyper_rose(x_1: float, x_2: float) -> dict[FuncName, float]:
return {"f": -rosenbrock([x_1, x_2])}
@pytest.mark.parametrize("n_jobs", [1, 2])
@pytest.mark.parametrize("param_config", PARAMS_CONFIG)
@pytest.mark.parametrize("objective_config", OBJECTIVE_CONFIG)
def test_tuner(n_jobs: int, param_config: ParamsSpec, objective_config: ObjectivesSpec) -> None:
np.random.seed(0)
tuner = Tuner(
HOLA(
params_config=param_config,
objectives_config=objective_config,
min_samples=30,
gmm_sampler="uniform",
explore_sampler="uniform",
top_frac=0.25,
)
)
tuner.tune(hyper_rose, num_runs=100, n_jobs=n_jobs)
assert tuner.get_best_scores() == SEED_0_SCORE
@pytest.mark.parametrize("param_config", PARAMS_CONFIG)
@pytest.mark.parametrize("objective_config", OBJECTIVE_CONFIG)
def test_hola(param_config: ParamsSpec, objective_config: ObjectivesSpec) -> None:
np.random.seed(0)
exp = HOLA(
param_config, objective_config, min_samples=30, top_frac=0.25, gmm_sampler="uniform", explore_sampler="uniform"
)
assert exp.params_config == PARAMS_CONFIG[0]
assert exp.objectives_config == OBJECTIVE_CONFIG[0]
for _ in range(200):
params = exp.sample()
value = hyper_rose(**params)
exp.add_run(value, params)
# print(_, params, value)
assert exp.get_best_scores() == SEED_0_SCORE
@pytest.mark.parametrize("param_config", PARAMS_CONFIG)
@pytest.mark.parametrize("objective_config", OBJECTIVE_CONFIG)
def test_tune(param_config: ParamsSpec, objective_config: ObjectivesSpec) -> None:
tune(hyper_rose, param_config, objective_config)
| en | 0.834587 | # Copyright 2021 BlackRock, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # print(_, params, value) | 2.018127 | 2 |
main.py | ayust/pluss | 8 | 6620675 | """pluss, a feed proxy for G+"""
import logging
from pluss.app import app
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
@app.before_first_request
def setup_logging():
if not app.debug:
# In production mode, add log handler to sys.stderr.
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(process)d] [%(levelname)s] %(pathname)s:%(lineno)d %(message)s",
"%Y-%m-%d %H:%M:%S",
))
app.logger.addHandler(handler)
app.logger.setLevel(logging.WARNING)
if __name__ == '__main__':
app.run(host='pluss.aiiane.com', port=54321, debug=True)
# vim: set ts=4 sts=4 sw=4 et:
| """pluss, a feed proxy for G+"""
import logging
from pluss.app import app
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
@app.before_first_request
def setup_logging():
if not app.debug:
# In production mode, add log handler to sys.stderr.
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(process)d] [%(levelname)s] %(pathname)s:%(lineno)d %(message)s",
"%Y-%m-%d %H:%M:%S",
))
app.logger.addHandler(handler)
app.logger.setLevel(logging.WARNING)
if __name__ == '__main__':
app.run(host='pluss.aiiane.com', port=54321, debug=True)
# vim: set ts=4 sts=4 sw=4 et:
| en | 0.302691 | pluss, a feed proxy for G+ # In production mode, add log handler to sys.stderr. # vim: set ts=4 sts=4 sw=4 et: | 1.942349 | 2 |
2019/day01-rocket-equation/fuel.py | rajitbanerjee/advent-of-code | 0 | 6620676 | #!/usr/bin/env python
def getTotalFuel(masses: list) -> int:
return sum([m//3 - 2 for m in masses])
def getMoreFuel(masses: list) -> int:
return sum([getSum(m) for m in masses])
def getSum(mass: int) -> int:
total = 0
while True:
mass = mass//3 - 2
if mass <= 0:
break
total += mass
return total
if __name__ == '__main__':
with open('day01.in') as f:
masses = [int(m.strip()) for m in f.readlines()]
print(f"Part 1 = {getTotalFuel(masses)}")
print(f"Part 2 = {getMoreFuel(masses)}")
| #!/usr/bin/env python
def getTotalFuel(masses: list) -> int:
return sum([m//3 - 2 for m in masses])
def getMoreFuel(masses: list) -> int:
return sum([getSum(m) for m in masses])
def getSum(mass: int) -> int:
total = 0
while True:
mass = mass//3 - 2
if mass <= 0:
break
total += mass
return total
if __name__ == '__main__':
with open('day01.in') as f:
masses = [int(m.strip()) for m in f.readlines()]
print(f"Part 1 = {getTotalFuel(masses)}")
print(f"Part 2 = {getMoreFuel(masses)}")
| ru | 0.26433 | #!/usr/bin/env python | 3.835754 | 4 |
source/memory.py | samkovaly/quoridor-game-reinforcement-learning | 2 | 6620677 |
import random
class MemoryInstance:
""" remember a specific state -> action -> reward, next_state training example """
def __init__(self, state, action, reward, next_state):
self.state = state
self.action = action
self.reward = reward
self.next_state = next_state
def asTuple(self):
""" Returns memory instance as a length 4 tuple """
return (self.state, self.action, self.reward, self.next_state)
class Memory:
""" Memory of recent memory_instances (training examples) that the agent has encountered """
def __init__(self, max_memory):
self.max_memory = max_memory
self.samples = []
def add_sample(self, memory_instance):
""" Adds a memory_instance sample in queue fashion (deletes old ones) """
self.samples.append(memory_instance.asTuple())
if len(self.samples) > self.max_memory:
del self.samples[0]
def sample(self, no_samples):
""" Randomly samples no_samples from recent memory, or all of the samples if there aren't enough"""
if no_samples > len(self.samples):
return random.sample(self.samples, len(self.samples))
else:
return random.sample(self.samples, no_samples) |
import random
class MemoryInstance:
""" remember a specific state -> action -> reward, next_state training example """
def __init__(self, state, action, reward, next_state):
self.state = state
self.action = action
self.reward = reward
self.next_state = next_state
def asTuple(self):
""" Returns memory instance as a length 4 tuple """
return (self.state, self.action, self.reward, self.next_state)
class Memory:
""" Memory of recent memory_instances (training examples) that the agent has encountered """
def __init__(self, max_memory):
self.max_memory = max_memory
self.samples = []
def add_sample(self, memory_instance):
""" Adds a memory_instance sample in queue fashion (deletes old ones) """
self.samples.append(memory_instance.asTuple())
if len(self.samples) > self.max_memory:
del self.samples[0]
def sample(self, no_samples):
""" Randomly samples no_samples from recent memory, or all of the samples if there aren't enough"""
if no_samples > len(self.samples):
return random.sample(self.samples, len(self.samples))
else:
return random.sample(self.samples, no_samples) | en | 0.851843 | remember a specific state -> action -> reward, next_state training example Returns memory instance as a length 4 tuple Memory of recent memory_instances (training examples) that the agent has encountered Adds a memory_instance sample in queue fashion (deletes old ones) Randomly samples no_samples from recent memory, or all of the samples if there aren't enough | 3.822925 | 4 |
build/lib/tnetwork/DCD/analytics/dynamic_community.py | Yquetzal/tnetwork | 4 | 6620678 | import statistics
from tnetwork.utils.community_utils import jaccard
import networkx as nx
def community_duration(a_dyn_com):
"""
Community duration
:param a_dyn_com: community as sortedDict of snapshots
:return:
"""
return len(a_dyn_com)
def community_avg_size(a_dyn_com):
"""
Community average size
:param a_dyn_com: community as sortedDict of snapshots
:return:
"""
return statistics.mean([len(x) for x in a_dyn_com.values()])
def community_avg_stability(a_dyn_com):
"""
Community average jaccard change
:param a_dyn_com: community as sortedDict of snapshots
:return:
"""
if community_duration(a_dyn_com) == 1:
return None
changes = []
ts = list(a_dyn_com.keys())
for i in range(len(ts) - 1):
changes.append(jaccard(a_dyn_com[ts[i]], a_dyn_com[ts[i + 1]]))
return statistics.mean(changes)
def community_avg_score(a_dyn_com,dyn_graph,score=nx.conductance):
scores = []
try:
for t,nodes in a_dyn_com.items():
scores.append(score(dyn_graph.snapshots(t),nodes))
except:
return None
return statistics.mean(scores)
def community_avg_subgraph_property(a_dyn_com,dyn_graph,property=nx.transitivity):
scores = []
for t,nodes in a_dyn_com.items():
#print(t, nodes)
#print(dyn_graph.snapshots(t).nodes)
try:
subgraph = dyn_graph.snapshots(t).subgraph(nodes)
#print(subgraph.degree)
scores.append(property(subgraph))
except:
pass
if len(scores)==0:
return None
return statistics.mean(scores)
| import statistics
from tnetwork.utils.community_utils import jaccard
import networkx as nx
def community_duration(a_dyn_com):
"""
Community duration
:param a_dyn_com: community as sortedDict of snapshots
:return:
"""
return len(a_dyn_com)
def community_avg_size(a_dyn_com):
"""
Community average size
:param a_dyn_com: community as sortedDict of snapshots
:return:
"""
return statistics.mean([len(x) for x in a_dyn_com.values()])
def community_avg_stability(a_dyn_com):
"""
Community average jaccard change
:param a_dyn_com: community as sortedDict of snapshots
:return:
"""
if community_duration(a_dyn_com) == 1:
return None
changes = []
ts = list(a_dyn_com.keys())
for i in range(len(ts) - 1):
changes.append(jaccard(a_dyn_com[ts[i]], a_dyn_com[ts[i + 1]]))
return statistics.mean(changes)
def community_avg_score(a_dyn_com,dyn_graph,score=nx.conductance):
scores = []
try:
for t,nodes in a_dyn_com.items():
scores.append(score(dyn_graph.snapshots(t),nodes))
except:
return None
return statistics.mean(scores)
def community_avg_subgraph_property(a_dyn_com,dyn_graph,property=nx.transitivity):
scores = []
for t,nodes in a_dyn_com.items():
#print(t, nodes)
#print(dyn_graph.snapshots(t).nodes)
try:
subgraph = dyn_graph.snapshots(t).subgraph(nodes)
#print(subgraph.degree)
scores.append(property(subgraph))
except:
pass
if len(scores)==0:
return None
return statistics.mean(scores)
| en | 0.642892 | Community duration :param a_dyn_com: community as sortedDict of snapshots :return: Community average size :param a_dyn_com: community as sortedDict of snapshots :return: Community average jaccard change :param a_dyn_com: community as sortedDict of snapshots :return: #print(t, nodes) #print(dyn_graph.snapshots(t).nodes) #print(subgraph.degree) | 2.742327 | 3 |
yahoostats/evaluator.py | webclinic017/yahoostats | 14 | 6620679 | from yahoostats.selenium_stats import Webscraper
from yahoostats.requests_stats import yahoo_api_financials, morningstar_stats, seeking_alpha
from yahoostats.requests_stats import zacks_stats, filter_reuters, reuters_stats
from yahoostats.requests_stats import tipranks_price, tipranks_analysis, tipranks_dividends
import configparser
from pprint import pprint as pp
from yahoostats.logger import logger
import time
import pandas as pd
import logging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('selenium').setLevel(logging.WARNING)
ticker = 'GOOGL'
stock_list = ['GOOGL', 'MU']
def combine_stats(stock_list, browser="Chrome"):
"""
Merge the data from requests and selenium into pandas df.
Inputs:
------------------
stocklist - list with stock symbols
browser = "Chrome" / "Firefox" for default selenium webscraping browser
Outputs:
------------------
Pandas DataFrame with the webscraped data from:
-yahoo finance webscraped with selenium - "PEG Ratio"
-yahoo finance API - all rows with yf prefix
-tipranks - all rows with prefix tr
-reuters - all rows with prefix r
-morningstar -the row with "ms" label represents the star rating 0-5
-zacks - racks label - represents buy/sell rating
"""
logger.info(f'Getting data for {stock_list}')
stock_data = {}
tr = Webscraper(browser)
tr.start()
tr.accept_yf_cockies()
for stock in stock_list:
logger.info(f'Evaluator for {stock}')
stock_data.update({stock: {}})
yf_rate = yahoo_api_financials(stock)
ms_rate = morningstar_stats(stock)
zs_rate = zacks_stats(stock)
re_rate = filter_reuters(reuters_stats(stock))
tr_analys = tipranks_analysis(stock)
tr_rate = tipranks_price(stock)
tr_divid = tipranks_dividends(stock)
sa_rate = seeking_alpha(stock)
yf_pegr = tr.get_yahoo_statistics(stock)
wallst_eps = tr.estimize_eps(stock)
stock_data[stock].update(tr_analys)
stock_data[stock].update(tr_rate)
stock_data[stock].update(tr_divid)
stock_data[stock].update(yf_pegr)
stock_data[stock].update(sa_rate)
stock_data[stock].update(yf_rate)
stock_data[stock].update(ms_rate)
stock_data[stock].update(zs_rate)
stock_data[stock].update(re_rate)
stock_data[stock].update(wallst_eps)
time.sleep(0.5)
tr.stop()
logger.info(f'Merging data for {stock_list}')
pd_df = pd.DataFrame(stock_data)
return pd_df
def future_dividends(df):
"""
Inputs:
------------------
df - webscraped data
Outputs:
------------------
Pandas DataFrame with future dividends calendar
"""
logger.info(f'Cleaning webscraped data for for future dividends')
df = df.T
div_data = df[['tr_price','tr_next_ex_dividend_date', 'tr_dividend_amount','r_div_yield5','r_div_yield']]
div_data['tr_next_ex_dividend_date'] = pd.to_datetime(div_data['tr_next_ex_dividend_date'])
div_data = div_data.sort_values(by=['tr_next_ex_dividend_date'])
div_data['tr_dividend_amount'] = div_data['tr_dividend_amount'].str.replace('Currency in US Dollar','$')
# div_data[div_data['tr_next_ex_dividend_date'] > pd.to_datetime('today')]
return div_data
# if __name__ == "__main__":
# webscraped_data = combine_stats(stock_list)
# pp(webscraped_data)
| from yahoostats.selenium_stats import Webscraper
from yahoostats.requests_stats import yahoo_api_financials, morningstar_stats, seeking_alpha
from yahoostats.requests_stats import zacks_stats, filter_reuters, reuters_stats
from yahoostats.requests_stats import tipranks_price, tipranks_analysis, tipranks_dividends
import configparser
from pprint import pprint as pp
from yahoostats.logger import logger
import time
import pandas as pd
import logging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('selenium').setLevel(logging.WARNING)
ticker = 'GOOGL'
stock_list = ['GOOGL', 'MU']
def combine_stats(stock_list, browser="Chrome"):
"""
Merge the data from requests and selenium into pandas df.
Inputs:
------------------
stocklist - list with stock symbols
browser = "Chrome" / "Firefox" for default selenium webscraping browser
Outputs:
------------------
Pandas DataFrame with the webscraped data from:
-yahoo finance webscraped with selenium - "PEG Ratio"
-yahoo finance API - all rows with yf prefix
-tipranks - all rows with prefix tr
-reuters - all rows with prefix r
-morningstar -the row with "ms" label represents the star rating 0-5
-zacks - racks label - represents buy/sell rating
"""
logger.info(f'Getting data for {stock_list}')
stock_data = {}
tr = Webscraper(browser)
tr.start()
tr.accept_yf_cockies()
for stock in stock_list:
logger.info(f'Evaluator for {stock}')
stock_data.update({stock: {}})
yf_rate = yahoo_api_financials(stock)
ms_rate = morningstar_stats(stock)
zs_rate = zacks_stats(stock)
re_rate = filter_reuters(reuters_stats(stock))
tr_analys = tipranks_analysis(stock)
tr_rate = tipranks_price(stock)
tr_divid = tipranks_dividends(stock)
sa_rate = seeking_alpha(stock)
yf_pegr = tr.get_yahoo_statistics(stock)
wallst_eps = tr.estimize_eps(stock)
stock_data[stock].update(tr_analys)
stock_data[stock].update(tr_rate)
stock_data[stock].update(tr_divid)
stock_data[stock].update(yf_pegr)
stock_data[stock].update(sa_rate)
stock_data[stock].update(yf_rate)
stock_data[stock].update(ms_rate)
stock_data[stock].update(zs_rate)
stock_data[stock].update(re_rate)
stock_data[stock].update(wallst_eps)
time.sleep(0.5)
tr.stop()
logger.info(f'Merging data for {stock_list}')
pd_df = pd.DataFrame(stock_data)
return pd_df
def future_dividends(df):
"""
Inputs:
------------------
df - webscraped data
Outputs:
------------------
Pandas DataFrame with future dividends calendar
"""
logger.info(f'Cleaning webscraped data for for future dividends')
df = df.T
div_data = df[['tr_price','tr_next_ex_dividend_date', 'tr_dividend_amount','r_div_yield5','r_div_yield']]
div_data['tr_next_ex_dividend_date'] = pd.to_datetime(div_data['tr_next_ex_dividend_date'])
div_data = div_data.sort_values(by=['tr_next_ex_dividend_date'])
div_data['tr_dividend_amount'] = div_data['tr_dividend_amount'].str.replace('Currency in US Dollar','$')
# div_data[div_data['tr_next_ex_dividend_date'] > pd.to_datetime('today')]
return div_data
# if __name__ == "__main__":
# webscraped_data = combine_stats(stock_list)
# pp(webscraped_data)
| en | 0.5042 | Merge the data from requests and selenium into pandas df. Inputs: ------------------ stocklist - list with stock symbols browser = "Chrome" / "Firefox" for default selenium webscraping browser Outputs: ------------------ Pandas DataFrame with the webscraped data from: -yahoo finance webscraped with selenium - "PEG Ratio" -yahoo finance API - all rows with yf prefix -tipranks - all rows with prefix tr -reuters - all rows with prefix r -morningstar -the row with "ms" label represents the star rating 0-5 -zacks - racks label - represents buy/sell rating Inputs: ------------------ df - webscraped data Outputs: ------------------ Pandas DataFrame with future dividends calendar # div_data[div_data['tr_next_ex_dividend_date'] > pd.to_datetime('today')] # if __name__ == "__main__": # webscraped_data = combine_stats(stock_list) # pp(webscraped_data) | 3.006835 | 3 |
hello/apps/hello/urls.py | tobyt99/nick-test | 1 | 6620680 | from django.conf.urls import url
from hello.apps.hello import views
urlpatterns = [
url(r'^$', views.index, name='index'),
]
| from django.conf.urls import url
from hello.apps.hello import views
urlpatterns = [
url(r'^$', views.index, name='index'),
]
| none | 1 | 1.510112 | 2 | |
app/tests/integration/ExerciseMongoQueryRepositoryIntegrationTest.py | GPortas/Playgroundb | 1 | 6620681 | from app.api.data.query.ExerciseMongoQueryRepository import ExerciseMongoQueryRepository
from app.api.domain.services.data.query.errors.ResourceNotFoundQueryError import ResourceNotFoundQueryError
from tests.integration.PdbMongoIntegrationTestBase import PdbMongoIntegrationTestBase
class ExerciseMongoQueryRepositoryIntegrationTest(PdbMongoIntegrationTestBase):
def setUp(self):
self.fixtures = ['Exercise']
super(ExerciseMongoQueryRepositoryIntegrationTest, self).setUp()
self.sut = ExerciseMongoQueryRepository()
def tearDown(self):
self.db.exercises.delete_many({})
def test_getExerciseById_calledWithValidId_returnCorrectResult(self):
exercise = self.sut.get_exercise_by_id(exercise_id='4d128b6ea794fc13a8000003')
self.assertEqual('fakequestion_4', exercise.get_question())
def test_getExerciseById_calledWithUnexistentId_throwResourceNotFoundQueryError(self):
self.assertRaises(ResourceNotFoundQueryError, self.sut.get_exercise_by_id,
exercise_id='4d128b6ea794fc13a8000009')
def test_getExercisesList_called_returnCorrectResult(self):
actual = self.sut.get_exercises_list(limit=3)
actual_map = list(map(lambda x: x.get_question(), actual))
expected = ['fakequestion_1', 'fakequestion_2', 'fakequestion_3']
self.assertEqual(actual_map, expected)
| from app.api.data.query.ExerciseMongoQueryRepository import ExerciseMongoQueryRepository
from app.api.domain.services.data.query.errors.ResourceNotFoundQueryError import ResourceNotFoundQueryError
from tests.integration.PdbMongoIntegrationTestBase import PdbMongoIntegrationTestBase
class ExerciseMongoQueryRepositoryIntegrationTest(PdbMongoIntegrationTestBase):
def setUp(self):
self.fixtures = ['Exercise']
super(ExerciseMongoQueryRepositoryIntegrationTest, self).setUp()
self.sut = ExerciseMongoQueryRepository()
def tearDown(self):
self.db.exercises.delete_many({})
def test_getExerciseById_calledWithValidId_returnCorrectResult(self):
exercise = self.sut.get_exercise_by_id(exercise_id='4d128b6ea794fc13a8000003')
self.assertEqual('fakequestion_4', exercise.get_question())
def test_getExerciseById_calledWithUnexistentId_throwResourceNotFoundQueryError(self):
self.assertRaises(ResourceNotFoundQueryError, self.sut.get_exercise_by_id,
exercise_id='4d128b6ea794fc13a8000009')
def test_getExercisesList_called_returnCorrectResult(self):
actual = self.sut.get_exercises_list(limit=3)
actual_map = list(map(lambda x: x.get_question(), actual))
expected = ['fakequestion_1', 'fakequestion_2', 'fakequestion_3']
self.assertEqual(actual_map, expected)
| none | 1 | 2.286042 | 2 | |
02_code/prep.py | franperic/vbz_delay | 0 | 6620682 |
class WindowGenerator():
def __init__(self, input_width, label_width, shift,
train_df=train_df, val_df=val_df, test_df=test_df,
label_columns=None):
# Store the raw data.
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def __repr__(self):
return '\n'.join([
f'Total window size: {self.total_window_size}',
f'Input indices: {self.input_indices}',
f'Label indices: {self.label_indices}',
f'Label column name(s): {self.label_columns}'])
a = np.random.normal(size=6)
a = a.reshape((2, 3))
df = pd.DataFrame(data=a)
df.rename(columns={0: "a", 1: "b", 2: "c"}, inplace=True)
df.loc[0, "b":"c"] |
class WindowGenerator():
def __init__(self, input_width, label_width, shift,
train_df=train_df, val_df=val_df, test_df=test_df,
label_columns=None):
# Store the raw data.
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def __repr__(self):
return '\n'.join([
f'Total window size: {self.total_window_size}',
f'Input indices: {self.input_indices}',
f'Label indices: {self.label_indices}',
f'Label column name(s): {self.label_columns}'])
a = np.random.normal(size=6)
a = a.reshape((2, 3))
df = pd.DataFrame(data=a)
df.rename(columns={0: "a", 1: "b", 2: "c"}, inplace=True)
df.loc[0, "b":"c"] | en | 0.33997 | # Store the raw data. # Work out the label column indices. # Work out the window parameters. | 2.780465 | 3 |
markupfield/tests/settings.py | revsys/django-markupfield | 0 | 6620683 | <reponame>revsys/django-markupfield<filename>markupfield/tests/settings.py
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'markuptest.db'
}
}
import markdown
from django.utils.html import escape, linebreaks, urlize
from docutils.core import publish_parts
def render_rest(markup):
parts = publish_parts(source=markup, writer_name="html4css1")
return parts["fragment"]
MARKUP_FIELD_TYPES = [
('markdown', markdown.markdown),
('ReST', render_rest),
('plain', lambda markup: urlize(linebreaks(escape(markup)))),
]
INSTALLED_APPS = (
'markupfield.tests',
)
SECRET_KEY = 'sekrit'
| DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'markuptest.db'
}
}
import markdown
from django.utils.html import escape, linebreaks, urlize
from docutils.core import publish_parts
def render_rest(markup):
parts = publish_parts(source=markup, writer_name="html4css1")
return parts["fragment"]
MARKUP_FIELD_TYPES = [
('markdown', markdown.markdown),
('ReST', render_rest),
('plain', lambda markup: urlize(linebreaks(escape(markup)))),
]
INSTALLED_APPS = (
'markupfield.tests',
)
SECRET_KEY = 'sekrit' | none | 1 | 1.539273 | 2 | |
api.py | Frichetten/aws_api_shapeshifter | 16 | 6620684 | import api_signer
import protocol_formatter
import operation_obj
SAFE_REGIONS = [
"af-south-1",
"ap-east-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ca-central-1",
"eu-central-1",
"eu-north-1",
"eu-south-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"me-south-1",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
]
class API:
def __init__(self, key, service_definition):
self._definition = service_definition
self._latest_version = self.latest_version()
self.protocol = self.latest()['metadata']['protocol']
self.endpoints = self.latest()['endpoints']
self.operations = self._make_operations_list(
self.latest()['metadata'],
self.latest()['endpoints'],
self.latest()['shapes'],
self.latest()['operations']
)
# Returns the definiton of the lastest api version
def latest(self):
# Find the most recent api version
# We can do this by sorting keys
options = list(self._definition.keys())
options.sort()
return self._definition[list(reversed(options))[0]]
# Returns the latest api version
def latest_version(self):
return list(self._definition.keys())[0]
# Returns a list of the available API versions
def api_versions(self):
return list(self._definition.keys())
# Returns a list of all operations
def list_operations(self):
return list(self.operations.keys())
# Returns the protocol
def get_protocol(self):
return self.protocol
# Returns a dictionary with all the available operations
def _make_operations_list(self, metadata, endpoints, shapes, operations):
to_return = {}
for operation_name, operation in operations.items():
to_return[operation_name] = operation_obj.Operation(metadata, endpoints, shapes, operation)
return to_return
| import api_signer
import protocol_formatter
import operation_obj
SAFE_REGIONS = [
"af-south-1",
"ap-east-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ca-central-1",
"eu-central-1",
"eu-north-1",
"eu-south-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"me-south-1",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
]
class API:
def __init__(self, key, service_definition):
self._definition = service_definition
self._latest_version = self.latest_version()
self.protocol = self.latest()['metadata']['protocol']
self.endpoints = self.latest()['endpoints']
self.operations = self._make_operations_list(
self.latest()['metadata'],
self.latest()['endpoints'],
self.latest()['shapes'],
self.latest()['operations']
)
# Returns the definiton of the lastest api version
def latest(self):
# Find the most recent api version
# We can do this by sorting keys
options = list(self._definition.keys())
options.sort()
return self._definition[list(reversed(options))[0]]
# Returns the latest api version
def latest_version(self):
return list(self._definition.keys())[0]
# Returns a list of the available API versions
def api_versions(self):
return list(self._definition.keys())
# Returns a list of all operations
def list_operations(self):
return list(self.operations.keys())
# Returns the protocol
def get_protocol(self):
return self.protocol
# Returns a dictionary with all the available operations
def _make_operations_list(self, metadata, endpoints, shapes, operations):
to_return = {}
for operation_name, operation in operations.items():
to_return[operation_name] = operation_obj.Operation(metadata, endpoints, shapes, operation)
return to_return
| en | 0.673248 | # Returns the definiton of the lastest api version # Find the most recent api version # We can do this by sorting keys # Returns the latest api version # Returns a list of the available API versions # Returns a list of all operations # Returns the protocol # Returns a dictionary with all the available operations | 2.44642 | 2 |
infra_scraper/input/reclass.py | fpytloun/infra-scraper | 0 | 6620685 | <gh_stars>0
# -*- coding: utf-8 -*-
from .saltstack import SaltStackInput
import logging
logger = logging.getLogger(__name__)
class SaltReclassInput(SaltStackInput):
RESOURCE_MAP = {
'salt_high_state': {
'resource': 'High State',
'icon': 'fa:cube',
},
'salt_job': {
'resource': 'Job',
'icon': 'fa:clock-o',
},
'salt_node': {
'resource': 'Node',
'icon': 'fa:server',
},
'salt_service': {
'resource': 'Service',
'icon': 'fa:podcast',
},
'salt_user': {
'resource': 'User',
'icon': 'fa:user',
},
}
def __init__(self, **kwargs):
super(SaltReclassInput, self).__init__(**kwargs)
self.kind = 'reclass'
def _create_relations(self):
for resource_id, resource in self.resources.get('salt_job', {}).items():
self._scrape_relation(
'salt_user-salt_job',
resource['metadata']['User'],
resource_id)
for minion_id, result in resource['metadata'].get('Result', {}).items():
self._scrape_relation(
'salt_job-salt_minion',
resource_id,
minion_id)
if type(result) is list:
logger.error(result[0])
else:
for state_id, state in result.items():
if '__id__' in state:
result_id = '{}|{}'.format(minion_id, state['__id__'])
self._scrape_relation(
'salt_job-salt_high_state',
resource_id,
result_id)
def scrape_all_resources(self):
self.scrape_nodes()
self.scrape_resources()
self.scrape_jobs()
# self.scrape_services()
def scrape_nodes(self):
response = self.api.low([{
'client': 'local',
'expr_form': 'compound',
'tgt': 'I@salt:master',
'fun': 'reclass.inventory'
}]).get('return')[0]
for minion_id, minion in response.items():
for node_id, node in minion.items():
self._scrape_resource(node_id,
node_id,
'salt_node', None,
metadata=node)
def scrape_resources(self):
response = self.api.low([{
'client': 'local',
'expr_form': 'compound',
'tgt': 'I@salt:master',
'fun': 'reclass.graph_data'
}]).get('return')[0]
for minion_id, minion in response.items():
for service in minion['graph']:
service_id = '{}|{}'.format(minion_id,
service['service'])
self._scrape_resource(service_id,
service['service'],
'salt_service', None,
metadata=service)
self._scrape_relation(
'salt_service-salt_host',
service_id,
minion_id)
for rel in service['relations']:
if rel['host'] not in self.resources['salt_node']:
self._scrape_resource(rel['host'],
rel['host'],
'salt_node', None,
metadata={})
rel_service_id = '{}|{}'.format(rel['host'],
rel['service'])
if rel_service_id not in self.resources['salt_service']:
self._scrape_resource(rel_service_id,
rel['service'],
'salt_service', None,
metadata={})
self._scrape_relation(
'salt_service-salt_host',
rel_service_id,
rel['host'])
self._scrape_relation(
'salt_service-salt_service',
service_id,
rel_service_id)
def scrape_jobs(self):
response = self.api.low([{
'client': 'runner',
'fun': 'jobs.list_jobs',
'arg': "search_function='[\"state.apply\", \"state.sls\"]'"
}]).get('return')[0]
for job_id, job in response.items():
if job['Function'] in ['state.apply', 'state.sls']:
result = self.api.lookup_jid(job_id).get('return')[0]
job['Result'] = result
self._scrape_resource(job_id,
job['Function'],
'salt_job', None, metadata=job)
| # -*- coding: utf-8 -*-
from .saltstack import SaltStackInput
import logging
logger = logging.getLogger(__name__)
class SaltReclassInput(SaltStackInput):
RESOURCE_MAP = {
'salt_high_state': {
'resource': 'High State',
'icon': 'fa:cube',
},
'salt_job': {
'resource': 'Job',
'icon': 'fa:clock-o',
},
'salt_node': {
'resource': 'Node',
'icon': 'fa:server',
},
'salt_service': {
'resource': 'Service',
'icon': 'fa:podcast',
},
'salt_user': {
'resource': 'User',
'icon': 'fa:user',
},
}
def __init__(self, **kwargs):
super(SaltReclassInput, self).__init__(**kwargs)
self.kind = 'reclass'
def _create_relations(self):
for resource_id, resource in self.resources.get('salt_job', {}).items():
self._scrape_relation(
'salt_user-salt_job',
resource['metadata']['User'],
resource_id)
for minion_id, result in resource['metadata'].get('Result', {}).items():
self._scrape_relation(
'salt_job-salt_minion',
resource_id,
minion_id)
if type(result) is list:
logger.error(result[0])
else:
for state_id, state in result.items():
if '__id__' in state:
result_id = '{}|{}'.format(minion_id, state['__id__'])
self._scrape_relation(
'salt_job-salt_high_state',
resource_id,
result_id)
def scrape_all_resources(self):
self.scrape_nodes()
self.scrape_resources()
self.scrape_jobs()
# self.scrape_services()
def scrape_nodes(self):
response = self.api.low([{
'client': 'local',
'expr_form': 'compound',
'tgt': 'I@salt:master',
'fun': 'reclass.inventory'
}]).get('return')[0]
for minion_id, minion in response.items():
for node_id, node in minion.items():
self._scrape_resource(node_id,
node_id,
'salt_node', None,
metadata=node)
def scrape_resources(self):
response = self.api.low([{
'client': 'local',
'expr_form': 'compound',
'tgt': 'I@salt:master',
'fun': 'reclass.graph_data'
}]).get('return')[0]
for minion_id, minion in response.items():
for service in minion['graph']:
service_id = '{}|{}'.format(minion_id,
service['service'])
self._scrape_resource(service_id,
service['service'],
'salt_service', None,
metadata=service)
self._scrape_relation(
'salt_service-salt_host',
service_id,
minion_id)
for rel in service['relations']:
if rel['host'] not in self.resources['salt_node']:
self._scrape_resource(rel['host'],
rel['host'],
'salt_node', None,
metadata={})
rel_service_id = '{}|{}'.format(rel['host'],
rel['service'])
if rel_service_id not in self.resources['salt_service']:
self._scrape_resource(rel_service_id,
rel['service'],
'salt_service', None,
metadata={})
self._scrape_relation(
'salt_service-salt_host',
rel_service_id,
rel['host'])
self._scrape_relation(
'salt_service-salt_service',
service_id,
rel_service_id)
def scrape_jobs(self):
response = self.api.low([{
'client': 'runner',
'fun': 'jobs.list_jobs',
'arg': "search_function='[\"state.apply\", \"state.sls\"]'"
}]).get('return')[0]
for job_id, job in response.items():
if job['Function'] in ['state.apply', 'state.sls']:
result = self.api.lookup_jid(job_id).get('return')[0]
job['Result'] = result
self._scrape_resource(job_id,
job['Function'],
'salt_job', None, metadata=job) | en | 0.598107 | # -*- coding: utf-8 -*- # self.scrape_services() | 2.173757 | 2 |
dssim/components/uart.py | majvan/DSSim | 1 | 6620686 | <gh_stars>1-10
# Copyright 2020 NXP Semiconductors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
UART components- physical layer nad link layer.
Link layer components can (but not necessarily has to) use physical layer
'''
import random as _rand
from dssim.simulation import DSSchedulable, DSComponent
from dssim.pubsub import DSConsumer, DSProducer, DSSingleProducer, DSProcessConsumer
from dssim.utils import ParityComputer, ByteAssembler
def get_bits_per_byte(bits, stopbit, startbit, parity):
''' Return number of bits per frame depending on the link layer setting '''
retval = bits + stopbit + startbit
if parity in ('E', 'O'):
retval += 1
return retval
class UARTNoisyLine(DSComponent):
''' This class represents one direction serial line with a noise.
The noise is represented by the probability of incorrect bit.
'''
# A resolution is 1 / resolution
resolution = 1e12
def __init__(self, bit_error_probability=1e-9, **kwargs):
super().__init__(**kwargs)
self.probability_level = int(bit_error_probability * self.resolution)
# The random module is required when using this component
self._rand = _rand
self.rx = DSConsumer(
self,
UARTNoisyLine._on_rx_event,
name=self.name + '.rx',
sim=self.sim,
)
self.tx = DSProducer(name=self.name + '.tx', sim=self.sim)
self.stat = {}
self.stat['bit_counter'] = 0 # counter of bits received
self.stat['bit_err_counter'] = 0 # counter of bits getting flipped
self.stat['byte_counter'] = 0 # counter of bytes received
self.stat['byte_err_counter'] = 0 # counter of bytes which were modified
self.stat['byte_link_counter'] = 0 # counter of bytes which were dropped
def _on_rx_event(self, **data):
''' Compute for every bit some random value (noise amplitude) with normal probability
if the probability is above a threshold, inject error.
'''
bits_per_byte = get_bits_per_byte(
data['bits'],
data['stopbit'],
data['startbit'],
data['parity']
)
self.stat['byte_counter'] += 1
self.stat['bit_counter'] += bits_per_byte
errors = []
for _ in range(int(bits_per_byte)):
errors.append(self.probability_level > self._rand.randint(0, self.resolution))
if errors[0] or errors[-1]:
self.stat['byte_link_counter'] += 1
# an error in start bit or stop bit means that the byte is not properly
# sent in link layer and it is skipped
return
errors = errors[1:-1]
# Compute resulting parity
if 'parity_bit' in data and errors[-1]:
self.stat['bit_err_counter'] += 1
data['parity_bit'] = data['parity_bit'] ^ 1
errors = errors[:-1]
if any(errors):
flip_bits = 0
self.stat['byte_err_counter'] += 1
for err in errors:
flip_bits = flip_bits << 1
if err:
self.stat['bit_err_counter'] += 1
flip_bits += 1
# Compute resulting output bytes
if isinstance(data['byte'], str):
data['byte'] = chr(ord(data['byte']) ^ flip_bits)
else:
data['byte'] ^= flip_bits
self.tx.signal(**data)
class UARTPhysBase(DSComponent):
''' Physical layer of an UART component '''
def __init__(self,
baudrate=115200,
bits=8,
stopbit=1.5,
startbit=1,
parity='N',
tx_buffer_size=1,
rx_buffer_size=1,
**kwargs
):
super().__init__(**kwargs)
self.baudrate = baudrate
self.bits = bits
self.stopbit = stopbit
self.startbit = startbit
self.parity = parity
self.bittime = 1 / baudrate
self.bits_per_byte = get_bits_per_byte(bits, stopbit, startbit, parity)
self.bytetime = self.bits_per_byte * self.bittime
self._add_tx_pubsub()
self._add_rx_pubsub()
self.tx_buffer = []
self.tx_buffer_size = tx_buffer_size + 1 # we keep transmitting byte in the buffer
self.rx_buffer = []
self.rx_buffer_size = rx_buffer_size
def _add_tx_pubsub(self):
self.tx = DSProducer(name=self.name + '.tx')
# self.tx_irq = DSProducer(name=self.name + '.tx_irq') # Not supported yet, low value
self.tx_link = DSSingleProducer(name=self.name + '.tx bridge to linklayer', sim=self.sim)
def _add_rx_pubsub(self):
# self.rx_irq = DSProducer(name=self.name + '.rx_irq') # Not supported now, low value
self.rx_link = DSSingleProducer(name=self.name + '.rx bridge to linklayer', sim=self.sim)
def send(self, byte, parity):
''' Send a byte over UART with and a parity bit '''
if len(self.tx_buffer) == 0:
self.tx_buffer.append({'byte': byte, 'parity': parity})
self._send_now(byte, parity)
return 1
if len(self.tx_buffer) < self.tx_buffer_size:
self.tx_buffer.append({'byte': byte, 'parity': parity})
return 1
return None
def recv(self):
''' Receive byte from UART peripheral '''
if len(self.rx_buffer) > 0:
return self.rx_buffer.pop(0)
return None
def _send_now(self, byte, parity, *other):
pass
def _send_next(self):
''' Send next buffered byte over UART '''
free_slot = False
# Free slot in the physical layer
if len(self.tx_buffer) > 0:
self.tx_buffer.pop(0) # this one was already transferred
free_slot = True
# Check if we have anything to send; first in the physical buffer
if len(self.tx_buffer) > 0:
data = self.tx_buffer[0]
self._send_now(**data)
# And if possible, inform link layer to push data again
if free_slot:
# Inform top layer about free slot in tx_buffer
self.tx_link.signal(producer=self.tx_link, flag='byte')
class UARTPhys(UARTPhysBase):
''' This class represents an UART peripheral in a controller working
on simplified physical layer.
The machine follows the state of bits to be sent, together with
'''
def __init__(self,
baudrate=115200,
bits=8,
stopbit=1.5,
startbit=1,
parity='N',
**kwargs
):
super().__init__(baudrate, bits, stopbit, startbit, parity, 1, 1, **kwargs)
self.tx_line_state = 0
self.rx_line_state = 1
self.stat = {}
self.stat['noise_counter'] = 0
self.stat['tx_counter'] = 0 # TX bytes counter
self.stat['rx_counter'] = 0 # RX bytes counter
self.stat['err_underrun'] = 0 # TX underrun counter (app too fast to send)
self.stat['err_overrun'] = 0 # RX overruns counter (app to slow to recv)
self.__add_tx_pubsub()
self.__add_rx_pubsub()
self._tx_idle_now()
def send(self, *args, **kwargs):
retval = super().send(*args, **kwargs)
if retval is None:
self.stat['err_underrun'] += 1
return retval
def __add_tx_pubsub(self):
pass
def __add_rx_pubsub(self):
super()._add_rx_pubsub()
# Create new interface for RX
self.rx = DSConsumer(
self,
UARTPhys._on_rx_line_state,
name=self.name + '.rx',
sim=self.sim,
)
self._rx_sampler = DSProcessConsumer(
self._scan_bits(),
start=True,
name=self.name + '.rx_sampler',
sim=self.sim,
)
def _send_now(self, byte, parity, *other):
''' Working byte, we will shift this byte right to get the bits.
The initial shift by one bit left here is to prepare for the state machine.
The state machine first shifts and then gets the bit.
It is done this way to have the LSb information available during sending byte
without requiring to save it to other variable.
'''
# TODO: add if the byte should be sent with LSb or MSb first. Currently it is LSb first.
bits = []
for _ in range(self.bits):
bits.append(byte & 0x01)
byte >>= 1
self.sim.schedule(0, self._send_bits(bits, parity))
def _tx_idle_now(self):
self._set_tx_line_state(1)
def _set_tx_line_state(self, state):
if self.tx_line_state != state:
self.tx_line_state = state
self.tx.signal(line=state)
def _on_rx_line_state(self, line, **others):
self.rx_line_state = line
# the sampler also requires in some cases async information about line change
# notify only the _rx_sampler; possible only when this method is run
# within other process than _rx_sampler
self._rx_sampler.notify(line=line)
def _send_bits(self, bits, parity_bit=None):
# schedule set of next events
time = 0
line = 0
self.tx.schedule(time, line=0)
time += self.startbit * self.bittime
for bit in bits:
if bit != line:
line = bit
self.tx.schedule(time, line=line)
time += self.bittime
if parity_bit is not None:
if parity_bit != line:
line = parity_bit
self.tx.schedule(time, line=line)
time += self.bittime
if line != 1:
self.tx.schedule(time, line=1)
yield from self.sim.wait(self.bytetime)
self.stat['tx_counter'] += 1 # TX bytes counter
self._send_next()
def _scan_bits(self):
# Assuming flag == 'line', then flag_details == line state
while True:
# wait for start bit
yield from self.sim.wait(cond=lambda e: e['line'] == 0)
# wait till end of start bit
evt = yield from self.sim.wait(
(self.startbit - 0.5) * self.bittime,
lambda e: e['line'] == 1
)
if evt is not None:
# we received logical 1 during startbit, go to the start
self.stat['noise_counter'] += 1
continue
rx_bits = []
for _ in range(self.bits):
yield from self.sim.wait(self.bittime)
rx_bits.append(self.rx_line_state)
if self.parity in ('E', 'O'):
yield from self.sim.wait(self.bittime)
rx_parity_bit = self.rx_line_state
else:
rx_parity_bit = None
# wait for stop bit
yield from self.sim.wait(self.bittime)
if self.rx_line_state != 1:
self.stat['noise_counter'] += 1
continue
# wait till end of stop bit
evt = yield from self.sim.wait(
(self.stopbit - 0.5 - 0.1) * self.bittime,
lambda e: e['line'] == 0,
)
if evt is not None:
# we received logical 0 during startbit, go to the start
continue
self.stat['rx_counter'] += 1 # RX bytes counter
rx_byte = ByteAssembler(rx_bits).get(lsb=True)
self.rx_link.signal(producer=self.rx, # fake the producer
byte=rx_byte,
parity=rx_parity_bit,
)
class UARTPhysBasic(UARTPhysBase):
''' This class represents an UART peripheral in a controller working
on simplified physical layer.
The machine follows the state of bits to be sent, together with
'''
def __init__(self,
baudrate=115200,
bits=8,
stopbit=1.5,
startbit=1,
parity='N',
**kwargs
):
super().__init__(baudrate, bits, stopbit, startbit, parity, 1, 1, **kwargs)
self.tx_buffer = []
self.tx_buffer_size = 1 # on this level it should not be more
self.__add_tx_pubsub()
self.__add_rx_pubsub()
self.stat = {}
self.stat['noise_counter'] = 0
self.stat['tx_counter'] = 0 # TX bytes counter
self.stat['rx_counter'] = 0 # RX bytes counter
self.stat['err_underrun'] = 0 # TX underrun counter (app too fast to send)
self.stat['err_overrun'] = 0 # RX overruns counter (app to slow to recv)
def __add_tx_pubsub(self):
consumer = DSConsumer(self,
UARTPhysBasic._on_tx_byte_event,
name=self.name + '.(internal) tx fb',
sim=self.sim
)
self.tx.add_consumer(consumer)
def __add_rx_pubsub(self):
# Create new interface for RX
self.rx = DSConsumer(self,
UARTPhysBasic._on_rx_byte_event,
name=self.name + '.rx',
sim=self.sim
)
def _send_now(self, byte, parity, *other):
self.tx.schedule(self.bytetime, byte=byte, parity=parity)
def _on_tx_byte_event(self, producer, byte, parity, **event_data):
self.stat['tx_counter'] += 1
self._send_next()
def _on_rx_byte_event(self, producer, byte, parity, **event_data):
self.stat['rx_counter'] += 1
self.rx_link.signal(producer=self.rx_link, byte=byte, parity=parity)
class UARTLink(DSComponent):
''' This class represents an UART peripheral in a controller.
It is simplified peripheral working on basic link layer
(a typical UART peripheral because UART is defined mostly up to
link layer only). The peripheral is capable of data transmission
and receive. It has a buffer of characters to send and buffer
for receive.
The TX and RX interfaces are not separated. The reason is that
typically an UART peripheral has the single settings for both
RX and TX, i.e. they share the same baudrate for instance.
Note that the underlying physical peripheral can be selected.
'''
def __init__(self,
baudrate=115200,
bits=8,
stopbit=1.5,
startbit=1,
parity='N',
tx_buffer_size=1,
rx_buffer_size=1,
phys_component=None, # None (no component) or True (auto create) or component
**kwargs
):
super().__init__(**kwargs)
self.bits_per_byte = get_bits_per_byte(bits, stopbit, startbit, parity)
self.parity = parity
self.bits = bits
self.stopbit = stopbit
self.startbit = startbit
self.parity = parity
if phys_component is None:
self.phys = None
# Link layer does not have timing (it defines only how the bits are created into bytes)
# But we initialized with baudrate anyway- in the case physical layer is missing
self.bytetime = self.bits_per_byte / baudrate
self.transmitting = False # Flag if we are transmitting now
elif phys_component:
self.phys = UARTPhysBasic(
baudrate,
bits,
stopbit,
startbit,
parity,
name=self.name + '.phys',
sim=self.sim,
)
else:
self.phys = phys_component
self.tx_buffer_size = tx_buffer_size
self.tx_buffer = []
self.tx_buffer_watermark = 1 # invoke irq when there is 1 byte in the buffer
self.rx_buffer_size = rx_buffer_size
self.rx_buffer = []
self.rx_buffer_watermark = rx_buffer_size # invoke irq when the buffer is full
self.__add_tx_pubsub()
self.__add_rx_pubsub()
self.stat = {}
self.stat['tx_counter'] = 0 # TX bytes counter
self.stat['rx_counter'] = 0 # RX bytes counter
self.stat['err_underrun'] = 0 # TX underrun counter (app too fast to send)
self.stat['err_overrun'] = 0 # RX overruns counter (app to slow to recv)
self.stat['err_parity'] = 0 # RX parity error counter
self.stat['err_other'] = 0 # RX other error counter
def __add_tx_pubsub(self):
self.tx = DSProducer(name=self.name + '.tx', sim=self.sim)
self.tx_irq = DSProducer(name=self.name + '.tx_irq', sim=self.sim)
consumer = DSConsumer(self,
UARTLink._on_tx_event,
name=self.name + '.(internal) tx_fb',
sim=self.sim
)
if self.phys:
self.phys.tx_link.add_consumer(consumer)
else:
self.tx.add_consumer(consumer)
def __add_rx_pubsub(self):
self.rx = DSConsumer(self, UARTLink._on_rx_event, name=self.name + '.rx', sim=self.sim)
self.rx_irq = DSProducer(name=self.name + '.rx_irq', sim=self.sim)
if self.phys:
self.phys.rx_link.add_consumer(self.rx)
def set_tx_watermark(self, watermark):
''' Set watermark for the TX buffer to produce IRQ '''
self.tx_buffer_watermark = watermark
def send(self, *data):
''' Send data to line or to buffer '''
total = 0
for byte in data:
num = 0
if len(self.tx_buffer) >= self.tx_buffer_size:
self.stat['err_underrun'] += 1
continue
if len(self.tx_buffer) == 0:
# send the char immediately
num = self._send_now(byte)
if not num:
self.tx_buffer.append(byte)
num = 1
total += num
return total
@DSSchedulable
def send_late(self, *data):
''' Send data in the future '''
return self.send(*data)
def recv(self):
''' Get data from buffer '''
if len(self.rx_buffer) == 0:
return None
return self.rx_buffer.pop(0)
@DSSchedulable
def recv_late(self):
''' Get data from buffer in the future '''
return self.recv()
def set_rx_watermark(self, watermark):
''' Set watermark for the RX buffer to produce IRQ '''
self.rx_buffer_watermark = watermark
def _compute_parity(self, byte):
''' Compute a parity bit for the byte '''
if self.parity == 'E':
parity_bit = ParityComputer.compute_byte_parity(byte, even=True)
elif self.parity == 'O':
parity_bit = ParityComputer.compute_byte_parity(byte, even=False)
else:
parity_bit = None
return parity_bit
def _send_now(self, byte):
''' This is a handler for the link layer emulation of the UART peripheral.
This handler works with read bytes and parity bits.
Though the handler works on link layer, an easy physical layer is also
implemented by considering timing of physical layer.
'''
parity = self._compute_parity(byte)
if self.phys is None:
if self.transmitting:
num = 0
else:
self.tx.schedule(self.bytetime, byte=byte, parity=parity)
self.transmitting = True
num = 1
else:
num = self.phys.send(byte, parity)
return num
def _on_tx_event(self, producer, **other):
''' Internal feedback after byte was sent '''
if not self.phys:
self.transmitting = False
# increase statistics
self.stat['tx_counter'] += 1
send_watermark = False
while len(self.tx_buffer) > 0:
# push chars in buffer if possible
num = self._send_now(byte=self.tx_buffer[0])
if not num:
break
self.tx_buffer.pop(0)
if len(self.tx_buffer) == self.tx_buffer_watermark:
send_watermark = True
if send_watermark:
self.tx_irq.signal(producer=producer, flag='byte', flag_detail='sent')
def _on_rx_event(self, producer, byte, parity, **other):
''' Internal feedback after byte was received '''
# Remove previous flag and possible missing parity
parity_bit = self._compute_parity(byte)
if parity != parity_bit:
self.stat['err_parity'] += 1
self.rx_irq.signal(
producer=producer,
flag='err',
flag_detail='parity',
byte=byte,
**other
)
elif len(self.rx_buffer) >= self.rx_buffer_size:
self.stat['err_overrun'] += 1
self.rx_irq.signal(producer=producer, flag='err', flag_detail='overrun')
else:
self.rx_buffer.append(byte)
self.stat['rx_counter'] += 1
if len(self.rx_buffer) == self.rx_buffer_watermark:
self.rx_irq.signal(producer=producer, flag='byte', flag_detail='received')
| # Copyright 2020 NXP Semiconductors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
UART components- physical layer nad link layer.
Link layer components can (but not necessarily has to) use physical layer
'''
import random as _rand
from dssim.simulation import DSSchedulable, DSComponent
from dssim.pubsub import DSConsumer, DSProducer, DSSingleProducer, DSProcessConsumer
from dssim.utils import ParityComputer, ByteAssembler
def get_bits_per_byte(bits, stopbit, startbit, parity):
''' Return number of bits per frame depending on the link layer setting '''
retval = bits + stopbit + startbit
if parity in ('E', 'O'):
retval += 1
return retval
class UARTNoisyLine(DSComponent):
''' This class represents one direction serial line with a noise.
The noise is represented by the probability of incorrect bit.
'''
# A resolution is 1 / resolution
resolution = 1e12
def __init__(self, bit_error_probability=1e-9, **kwargs):
super().__init__(**kwargs)
self.probability_level = int(bit_error_probability * self.resolution)
# The random module is required when using this component
self._rand = _rand
self.rx = DSConsumer(
self,
UARTNoisyLine._on_rx_event,
name=self.name + '.rx',
sim=self.sim,
)
self.tx = DSProducer(name=self.name + '.tx', sim=self.sim)
self.stat = {}
self.stat['bit_counter'] = 0 # counter of bits received
self.stat['bit_err_counter'] = 0 # counter of bits getting flipped
self.stat['byte_counter'] = 0 # counter of bytes received
self.stat['byte_err_counter'] = 0 # counter of bytes which were modified
self.stat['byte_link_counter'] = 0 # counter of bytes which were dropped
def _on_rx_event(self, **data):
''' Compute for every bit some random value (noise amplitude) with normal probability
if the probability is above a threshold, inject error.
'''
bits_per_byte = get_bits_per_byte(
data['bits'],
data['stopbit'],
data['startbit'],
data['parity']
)
self.stat['byte_counter'] += 1
self.stat['bit_counter'] += bits_per_byte
errors = []
for _ in range(int(bits_per_byte)):
errors.append(self.probability_level > self._rand.randint(0, self.resolution))
if errors[0] or errors[-1]:
self.stat['byte_link_counter'] += 1
# an error in start bit or stop bit means that the byte is not properly
# sent in link layer and it is skipped
return
errors = errors[1:-1]
# Compute resulting parity
if 'parity_bit' in data and errors[-1]:
self.stat['bit_err_counter'] += 1
data['parity_bit'] = data['parity_bit'] ^ 1
errors = errors[:-1]
if any(errors):
flip_bits = 0
self.stat['byte_err_counter'] += 1
for err in errors:
flip_bits = flip_bits << 1
if err:
self.stat['bit_err_counter'] += 1
flip_bits += 1
# Compute resulting output bytes
if isinstance(data['byte'], str):
data['byte'] = chr(ord(data['byte']) ^ flip_bits)
else:
data['byte'] ^= flip_bits
self.tx.signal(**data)
class UARTPhysBase(DSComponent):
''' Physical layer of an UART component '''
def __init__(self,
baudrate=115200,
bits=8,
stopbit=1.5,
startbit=1,
parity='N',
tx_buffer_size=1,
rx_buffer_size=1,
**kwargs
):
super().__init__(**kwargs)
self.baudrate = baudrate
self.bits = bits
self.stopbit = stopbit
self.startbit = startbit
self.parity = parity
self.bittime = 1 / baudrate
self.bits_per_byte = get_bits_per_byte(bits, stopbit, startbit, parity)
self.bytetime = self.bits_per_byte * self.bittime
self._add_tx_pubsub()
self._add_rx_pubsub()
self.tx_buffer = []
self.tx_buffer_size = tx_buffer_size + 1 # we keep transmitting byte in the buffer
self.rx_buffer = []
self.rx_buffer_size = rx_buffer_size
def _add_tx_pubsub(self):
self.tx = DSProducer(name=self.name + '.tx')
# self.tx_irq = DSProducer(name=self.name + '.tx_irq') # Not supported yet, low value
self.tx_link = DSSingleProducer(name=self.name + '.tx bridge to linklayer', sim=self.sim)
def _add_rx_pubsub(self):
# self.rx_irq = DSProducer(name=self.name + '.rx_irq') # Not supported now, low value
self.rx_link = DSSingleProducer(name=self.name + '.rx bridge to linklayer', sim=self.sim)
def send(self, byte, parity):
''' Send a byte over UART with and a parity bit '''
if len(self.tx_buffer) == 0:
self.tx_buffer.append({'byte': byte, 'parity': parity})
self._send_now(byte, parity)
return 1
if len(self.tx_buffer) < self.tx_buffer_size:
self.tx_buffer.append({'byte': byte, 'parity': parity})
return 1
return None
def recv(self):
''' Receive byte from UART peripheral '''
if len(self.rx_buffer) > 0:
return self.rx_buffer.pop(0)
return None
def _send_now(self, byte, parity, *other):
pass
def _send_next(self):
''' Send next buffered byte over UART '''
free_slot = False
# Free slot in the physical layer
if len(self.tx_buffer) > 0:
self.tx_buffer.pop(0) # this one was already transferred
free_slot = True
# Check if we have anything to send; first in the physical buffer
if len(self.tx_buffer) > 0:
data = self.tx_buffer[0]
self._send_now(**data)
# And if possible, inform link layer to push data again
if free_slot:
# Inform top layer about free slot in tx_buffer
self.tx_link.signal(producer=self.tx_link, flag='byte')
class UARTPhys(UARTPhysBase):
''' This class represents an UART peripheral in a controller working
on simplified physical layer.
The machine follows the state of bits to be sent, together with
'''
def __init__(self,
baudrate=115200,
bits=8,
stopbit=1.5,
startbit=1,
parity='N',
**kwargs
):
super().__init__(baudrate, bits, stopbit, startbit, parity, 1, 1, **kwargs)
self.tx_line_state = 0
self.rx_line_state = 1
self.stat = {}
self.stat['noise_counter'] = 0
self.stat['tx_counter'] = 0 # TX bytes counter
self.stat['rx_counter'] = 0 # RX bytes counter
self.stat['err_underrun'] = 0 # TX underrun counter (app too fast to send)
self.stat['err_overrun'] = 0 # RX overruns counter (app to slow to recv)
self.__add_tx_pubsub()
self.__add_rx_pubsub()
self._tx_idle_now()
def send(self, *args, **kwargs):
retval = super().send(*args, **kwargs)
if retval is None:
self.stat['err_underrun'] += 1
return retval
def __add_tx_pubsub(self):
pass
def __add_rx_pubsub(self):
super()._add_rx_pubsub()
# Create new interface for RX
self.rx = DSConsumer(
self,
UARTPhys._on_rx_line_state,
name=self.name + '.rx',
sim=self.sim,
)
self._rx_sampler = DSProcessConsumer(
self._scan_bits(),
start=True,
name=self.name + '.rx_sampler',
sim=self.sim,
)
def _send_now(self, byte, parity, *other):
''' Working byte, we will shift this byte right to get the bits.
The initial shift by one bit left here is to prepare for the state machine.
The state machine first shifts and then gets the bit.
It is done this way to have the LSb information available during sending byte
without requiring to save it to other variable.
'''
# TODO: add if the byte should be sent with LSb or MSb first. Currently it is LSb first.
bits = []
for _ in range(self.bits):
bits.append(byte & 0x01)
byte >>= 1
self.sim.schedule(0, self._send_bits(bits, parity))
def _tx_idle_now(self):
self._set_tx_line_state(1)
def _set_tx_line_state(self, state):
if self.tx_line_state != state:
self.tx_line_state = state
self.tx.signal(line=state)
def _on_rx_line_state(self, line, **others):
self.rx_line_state = line
# the sampler also requires in some cases async information about line change
# notify only the _rx_sampler; possible only when this method is run
# within other process than _rx_sampler
self._rx_sampler.notify(line=line)
def _send_bits(self, bits, parity_bit=None):
# schedule set of next events
time = 0
line = 0
self.tx.schedule(time, line=0)
time += self.startbit * self.bittime
for bit in bits:
if bit != line:
line = bit
self.tx.schedule(time, line=line)
time += self.bittime
if parity_bit is not None:
if parity_bit != line:
line = parity_bit
self.tx.schedule(time, line=line)
time += self.bittime
if line != 1:
self.tx.schedule(time, line=1)
yield from self.sim.wait(self.bytetime)
self.stat['tx_counter'] += 1 # TX bytes counter
self._send_next()
def _scan_bits(self):
# Assuming flag == 'line', then flag_details == line state
while True:
# wait for start bit
yield from self.sim.wait(cond=lambda e: e['line'] == 0)
# wait till end of start bit
evt = yield from self.sim.wait(
(self.startbit - 0.5) * self.bittime,
lambda e: e['line'] == 1
)
if evt is not None:
# we received logical 1 during startbit, go to the start
self.stat['noise_counter'] += 1
continue
rx_bits = []
for _ in range(self.bits):
yield from self.sim.wait(self.bittime)
rx_bits.append(self.rx_line_state)
if self.parity in ('E', 'O'):
yield from self.sim.wait(self.bittime)
rx_parity_bit = self.rx_line_state
else:
rx_parity_bit = None
# wait for stop bit
yield from self.sim.wait(self.bittime)
if self.rx_line_state != 1:
self.stat['noise_counter'] += 1
continue
# wait till end of stop bit
evt = yield from self.sim.wait(
(self.stopbit - 0.5 - 0.1) * self.bittime,
lambda e: e['line'] == 0,
)
if evt is not None:
# we received logical 0 during startbit, go to the start
continue
self.stat['rx_counter'] += 1 # RX bytes counter
rx_byte = ByteAssembler(rx_bits).get(lsb=True)
self.rx_link.signal(producer=self.rx, # fake the producer
byte=rx_byte,
parity=rx_parity_bit,
)
class UARTPhysBasic(UARTPhysBase):
''' This class represents an UART peripheral in a controller working
on simplified physical layer.
The machine follows the state of bits to be sent, together with
'''
def __init__(self,
baudrate=115200,
bits=8,
stopbit=1.5,
startbit=1,
parity='N',
**kwargs
):
super().__init__(baudrate, bits, stopbit, startbit, parity, 1, 1, **kwargs)
self.tx_buffer = []
self.tx_buffer_size = 1 # on this level it should not be more
self.__add_tx_pubsub()
self.__add_rx_pubsub()
self.stat = {}
self.stat['noise_counter'] = 0
self.stat['tx_counter'] = 0 # TX bytes counter
self.stat['rx_counter'] = 0 # RX bytes counter
self.stat['err_underrun'] = 0 # TX underrun counter (app too fast to send)
self.stat['err_overrun'] = 0 # RX overruns counter (app to slow to recv)
def __add_tx_pubsub(self):
consumer = DSConsumer(self,
UARTPhysBasic._on_tx_byte_event,
name=self.name + '.(internal) tx fb',
sim=self.sim
)
self.tx.add_consumer(consumer)
def __add_rx_pubsub(self):
# Create new interface for RX
self.rx = DSConsumer(self,
UARTPhysBasic._on_rx_byte_event,
name=self.name + '.rx',
sim=self.sim
)
def _send_now(self, byte, parity, *other):
self.tx.schedule(self.bytetime, byte=byte, parity=parity)
def _on_tx_byte_event(self, producer, byte, parity, **event_data):
self.stat['tx_counter'] += 1
self._send_next()
def _on_rx_byte_event(self, producer, byte, parity, **event_data):
self.stat['rx_counter'] += 1
self.rx_link.signal(producer=self.rx_link, byte=byte, parity=parity)
class UARTLink(DSComponent):
''' This class represents an UART peripheral in a controller.
It is simplified peripheral working on basic link layer
(a typical UART peripheral because UART is defined mostly up to
link layer only). The peripheral is capable of data transmission
and receive. It has a buffer of characters to send and buffer
for receive.
The TX and RX interfaces are not separated. The reason is that
typically an UART peripheral has the single settings for both
RX and TX, i.e. they share the same baudrate for instance.
Note that the underlying physical peripheral can be selected.
'''
def __init__(self,
baudrate=115200,
bits=8,
stopbit=1.5,
startbit=1,
parity='N',
tx_buffer_size=1,
rx_buffer_size=1,
phys_component=None, # None (no component) or True (auto create) or component
**kwargs
):
super().__init__(**kwargs)
self.bits_per_byte = get_bits_per_byte(bits, stopbit, startbit, parity)
self.parity = parity
self.bits = bits
self.stopbit = stopbit
self.startbit = startbit
self.parity = parity
if phys_component is None:
self.phys = None
# Link layer does not have timing (it defines only how the bits are created into bytes)
# But we initialized with baudrate anyway- in the case physical layer is missing
self.bytetime = self.bits_per_byte / baudrate
self.transmitting = False # Flag if we are transmitting now
elif phys_component:
self.phys = UARTPhysBasic(
baudrate,
bits,
stopbit,
startbit,
parity,
name=self.name + '.phys',
sim=self.sim,
)
else:
self.phys = phys_component
self.tx_buffer_size = tx_buffer_size
self.tx_buffer = []
self.tx_buffer_watermark = 1 # invoke irq when there is 1 byte in the buffer
self.rx_buffer_size = rx_buffer_size
self.rx_buffer = []
self.rx_buffer_watermark = rx_buffer_size # invoke irq when the buffer is full
self.__add_tx_pubsub()
self.__add_rx_pubsub()
self.stat = {}
self.stat['tx_counter'] = 0 # TX bytes counter
self.stat['rx_counter'] = 0 # RX bytes counter
self.stat['err_underrun'] = 0 # TX underrun counter (app too fast to send)
self.stat['err_overrun'] = 0 # RX overruns counter (app to slow to recv)
self.stat['err_parity'] = 0 # RX parity error counter
self.stat['err_other'] = 0 # RX other error counter
def __add_tx_pubsub(self):
self.tx = DSProducer(name=self.name + '.tx', sim=self.sim)
self.tx_irq = DSProducer(name=self.name + '.tx_irq', sim=self.sim)
consumer = DSConsumer(self,
UARTLink._on_tx_event,
name=self.name + '.(internal) tx_fb',
sim=self.sim
)
if self.phys:
self.phys.tx_link.add_consumer(consumer)
else:
self.tx.add_consumer(consumer)
def __add_rx_pubsub(self):
self.rx = DSConsumer(self, UARTLink._on_rx_event, name=self.name + '.rx', sim=self.sim)
self.rx_irq = DSProducer(name=self.name + '.rx_irq', sim=self.sim)
if self.phys:
self.phys.rx_link.add_consumer(self.rx)
def set_tx_watermark(self, watermark):
''' Set watermark for the TX buffer to produce IRQ '''
self.tx_buffer_watermark = watermark
def send(self, *data):
''' Send data to line or to buffer '''
total = 0
for byte in data:
num = 0
if len(self.tx_buffer) >= self.tx_buffer_size:
self.stat['err_underrun'] += 1
continue
if len(self.tx_buffer) == 0:
# send the char immediately
num = self._send_now(byte)
if not num:
self.tx_buffer.append(byte)
num = 1
total += num
return total
@DSSchedulable
def send_late(self, *data):
''' Send data in the future '''
return self.send(*data)
def recv(self):
''' Get data from buffer '''
if len(self.rx_buffer) == 0:
return None
return self.rx_buffer.pop(0)
@DSSchedulable
def recv_late(self):
''' Get data from buffer in the future '''
return self.recv()
def set_rx_watermark(self, watermark):
''' Set watermark for the RX buffer to produce IRQ '''
self.rx_buffer_watermark = watermark
def _compute_parity(self, byte):
''' Compute a parity bit for the byte '''
if self.parity == 'E':
parity_bit = ParityComputer.compute_byte_parity(byte, even=True)
elif self.parity == 'O':
parity_bit = ParityComputer.compute_byte_parity(byte, even=False)
else:
parity_bit = None
return parity_bit
def _send_now(self, byte):
''' This is a handler for the link layer emulation of the UART peripheral.
This handler works with read bytes and parity bits.
Though the handler works on link layer, an easy physical layer is also
implemented by considering timing of physical layer.
'''
parity = self._compute_parity(byte)
if self.phys is None:
if self.transmitting:
num = 0
else:
self.tx.schedule(self.bytetime, byte=byte, parity=parity)
self.transmitting = True
num = 1
else:
num = self.phys.send(byte, parity)
return num
def _on_tx_event(self, producer, **other):
''' Internal feedback after byte was sent '''
if not self.phys:
self.transmitting = False
# increase statistics
self.stat['tx_counter'] += 1
send_watermark = False
while len(self.tx_buffer) > 0:
# push chars in buffer if possible
num = self._send_now(byte=self.tx_buffer[0])
if not num:
break
self.tx_buffer.pop(0)
if len(self.tx_buffer) == self.tx_buffer_watermark:
send_watermark = True
if send_watermark:
self.tx_irq.signal(producer=producer, flag='byte', flag_detail='sent')
def _on_rx_event(self, producer, byte, parity, **other):
''' Internal feedback after byte was received '''
# Remove previous flag and possible missing parity
parity_bit = self._compute_parity(byte)
if parity != parity_bit:
self.stat['err_parity'] += 1
self.rx_irq.signal(
producer=producer,
flag='err',
flag_detail='parity',
byte=byte,
**other
)
elif len(self.rx_buffer) >= self.rx_buffer_size:
self.stat['err_overrun'] += 1
self.rx_irq.signal(producer=producer, flag='err', flag_detail='overrun')
else:
self.rx_buffer.append(byte)
self.stat['rx_counter'] += 1
if len(self.rx_buffer) == self.rx_buffer_watermark:
self.rx_irq.signal(producer=producer, flag='byte', flag_detail='received') | en | 0.87116 | # Copyright 2020 NXP Semiconductors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. UART components- physical layer nad link layer. Link layer components can (but not necessarily has to) use physical layer Return number of bits per frame depending on the link layer setting This class represents one direction serial line with a noise. The noise is represented by the probability of incorrect bit. # A resolution is 1 / resolution # The random module is required when using this component # counter of bits received # counter of bits getting flipped # counter of bytes received # counter of bytes which were modified # counter of bytes which were dropped Compute for every bit some random value (noise amplitude) with normal probability if the probability is above a threshold, inject error. # an error in start bit or stop bit means that the byte is not properly # sent in link layer and it is skipped # Compute resulting parity # Compute resulting output bytes Physical layer of an UART component # we keep transmitting byte in the buffer # self.tx_irq = DSProducer(name=self.name + '.tx_irq') # Not supported yet, low value # self.rx_irq = DSProducer(name=self.name + '.rx_irq') # Not supported now, low value Send a byte over UART with and a parity bit Receive byte from UART peripheral Send next buffered byte over UART # Free slot in the physical layer # this one was already transferred # Check if we have anything to send; first in the physical buffer # And if possible, inform link layer to push data again # Inform top layer about free slot in tx_buffer This class represents an UART peripheral in a controller working on simplified physical layer. The machine follows the state of bits to be sent, together with # TX bytes counter # RX bytes counter # TX underrun counter (app too fast to send) # RX overruns counter (app to slow to recv) # Create new interface for RX Working byte, we will shift this byte right to get the bits. The initial shift by one bit left here is to prepare for the state machine. The state machine first shifts and then gets the bit. It is done this way to have the LSb information available during sending byte without requiring to save it to other variable. # TODO: add if the byte should be sent with LSb or MSb first. Currently it is LSb first. # the sampler also requires in some cases async information about line change # notify only the _rx_sampler; possible only when this method is run # within other process than _rx_sampler # schedule set of next events # TX bytes counter # Assuming flag == 'line', then flag_details == line state # wait for start bit # wait till end of start bit # we received logical 1 during startbit, go to the start # wait for stop bit # wait till end of stop bit # we received logical 0 during startbit, go to the start # RX bytes counter # fake the producer This class represents an UART peripheral in a controller working on simplified physical layer. The machine follows the state of bits to be sent, together with # on this level it should not be more # TX bytes counter # RX bytes counter # TX underrun counter (app too fast to send) # RX overruns counter (app to slow to recv) # Create new interface for RX This class represents an UART peripheral in a controller. It is simplified peripheral working on basic link layer (a typical UART peripheral because UART is defined mostly up to link layer only). The peripheral is capable of data transmission and receive. It has a buffer of characters to send and buffer for receive. The TX and RX interfaces are not separated. The reason is that typically an UART peripheral has the single settings for both RX and TX, i.e. they share the same baudrate for instance. Note that the underlying physical peripheral can be selected. # None (no component) or True (auto create) or component # Link layer does not have timing (it defines only how the bits are created into bytes) # But we initialized with baudrate anyway- in the case physical layer is missing # Flag if we are transmitting now # invoke irq when there is 1 byte in the buffer # invoke irq when the buffer is full # TX bytes counter # RX bytes counter # TX underrun counter (app too fast to send) # RX overruns counter (app to slow to recv) # RX parity error counter # RX other error counter Set watermark for the TX buffer to produce IRQ Send data to line or to buffer # send the char immediately Send data in the future Get data from buffer Get data from buffer in the future Set watermark for the RX buffer to produce IRQ Compute a parity bit for the byte This is a handler for the link layer emulation of the UART peripheral. This handler works with read bytes and parity bits. Though the handler works on link layer, an easy physical layer is also implemented by considering timing of physical layer. Internal feedback after byte was sent # increase statistics # push chars in buffer if possible Internal feedback after byte was received # Remove previous flag and possible missing parity | 2.378798 | 2 |
tests/integration_tests/subtensor_client_tests/test_subscribe.py | aidangomez/bittensor | 0 | 6620687 | from bittensor.subtensor.client import WSClient
from bittensor.subtensor.interface import Keypair
from loguru import logger
import pytest
import asyncio
logger.remove() # Shut up loguru
socket = "localhost:9944"
keypair = Keypair.create_from_uri('//Alice')
client = WSClient(socket, keypair)
@pytest.mark.asyncio
async def test_subscribe():
client.connect()
await client.is_connected()
await client.subscribe("127.0.0.1", 666, 0, keypair.public_key)
await asyncio.sleep(10)
uid = await client.get_uid_for_pubkey(keypair.public_key)
assert uid is not None
@pytest.mark.asyncio
async def get_uid_for_pubkey__does_not_exist():
client.connect()
await client.is_connected()
random = Keypair.create_from_mnemonic(Keypair.generate_mnemonic())
uid = await client.get_uid_for_pubkey(random.public_key)
assert uid is None
| from bittensor.subtensor.client import WSClient
from bittensor.subtensor.interface import Keypair
from loguru import logger
import pytest
import asyncio
logger.remove() # Shut up loguru
socket = "localhost:9944"
keypair = Keypair.create_from_uri('//Alice')
client = WSClient(socket, keypair)
@pytest.mark.asyncio
async def test_subscribe():
client.connect()
await client.is_connected()
await client.subscribe("127.0.0.1", 666, 0, keypair.public_key)
await asyncio.sleep(10)
uid = await client.get_uid_for_pubkey(keypair.public_key)
assert uid is not None
@pytest.mark.asyncio
async def get_uid_for_pubkey__does_not_exist():
client.connect()
await client.is_connected()
random = Keypair.create_from_mnemonic(Keypair.generate_mnemonic())
uid = await client.get_uid_for_pubkey(random.public_key)
assert uid is None
| en | 0.412107 | # Shut up loguru | 1.903484 | 2 |
Code/pre_processing.py | SalimFares4/Hate-Speech-Detection | 0 | 6620688 | <reponame>SalimFares4/Hate-Speech-Detection
import pandas as pd
import re
from nltk import word_tokenize
from nltk.stem.isri import ISRIStemmer
from nltk.corpus import stopwords
import string
import tashaphyne.arabic_const as arabconst
def normalizeArabic(text):
# Remove Tashkeel
text = arabconst.HARAKAT_PAT.sub('', text)
# Remove Repeated Characters
text = re.sub(r'(.)\1+', r'\1', text)
text = re.sub("[إأٱآا]", "ا", text)
text = re.sub("ى", "ي", text)
text = re.sub("ؤ", "ء", text)
text = re.sub("ئ", "ء", text)
text = re.sub("ة", "ه", text)
return text
stop_words = [normalizeArabic(x) for x in stopwords.words('arabic')]
st = ISRIStemmer()
def remove_single_chars(text):
words = text.split(" ")
text = " ".join([word for word in words if len(word) > 1])
return text
def clean(text):
text = normalizeArabic(text)
# Remove Punctuations
arabic_punctuations = '''`÷×؛<>_()*&^%][ـ،/:"؟.,'{}~¦+|!”…“–ـ'''
english_punctuations = string.punctuation
punctuations_list = arabic_punctuations + english_punctuations
text = text.translate(str.maketrans('', '', punctuations_list))
# Remove Hashtag Signs
text = re.sub(r"#", " ", text)
# Remove URLs, Mentions, Trailing Non-Whitespaces Characters
text = re.sub(r"(?:\@|https?\://)\S+", " ", text)
# Remove Numbers
text = re.sub(r"\d+", " ", text)
# Remove English Characters
text = re.sub(r"[A-Z|a-z]+", " ", text)
# Remove Single Characters
text = remove_single_chars(text)
# Remove Stop Words
text = " ".join([word for word in word_tokenize(text) if not word in stop_words])
return str(text)
def stem(text):
# Stemming
text = " ".join([st.stem(word)for word in word_tokenize(text)])
return text | import pandas as pd
import re
from nltk import word_tokenize
from nltk.stem.isri import ISRIStemmer
from nltk.corpus import stopwords
import string
import tashaphyne.arabic_const as arabconst
def normalizeArabic(text):
# Remove Tashkeel
text = arabconst.HARAKAT_PAT.sub('', text)
# Remove Repeated Characters
text = re.sub(r'(.)\1+', r'\1', text)
text = re.sub("[إأٱآا]", "ا", text)
text = re.sub("ى", "ي", text)
text = re.sub("ؤ", "ء", text)
text = re.sub("ئ", "ء", text)
text = re.sub("ة", "ه", text)
return text
stop_words = [normalizeArabic(x) for x in stopwords.words('arabic')]
st = ISRIStemmer()
def remove_single_chars(text):
words = text.split(" ")
text = " ".join([word for word in words if len(word) > 1])
return text
def clean(text):
text = normalizeArabic(text)
# Remove Punctuations
arabic_punctuations = '''`÷×؛<>_()*&^%][ـ،/:"؟.,'{}~¦+|!”…“–ـ'''
english_punctuations = string.punctuation
punctuations_list = arabic_punctuations + english_punctuations
text = text.translate(str.maketrans('', '', punctuations_list))
# Remove Hashtag Signs
text = re.sub(r"#", " ", text)
# Remove URLs, Mentions, Trailing Non-Whitespaces Characters
text = re.sub(r"(?:\@|https?\://)\S+", " ", text)
# Remove Numbers
text = re.sub(r"\d+", " ", text)
# Remove English Characters
text = re.sub(r"[A-Z|a-z]+", " ", text)
# Remove Single Characters
text = remove_single_chars(text)
# Remove Stop Words
text = " ".join([word for word in word_tokenize(text) if not word in stop_words])
return str(text)
def stem(text):
# Stemming
text = " ".join([st.stem(word)for word in word_tokenize(text)])
return text | en | 0.607006 | # Remove Tashkeel # Remove Repeated Characters # Remove Punctuations `÷×؛<>_()*&^%][ـ،/:"؟.,'{}~¦+|!”…“–ـ # Remove Hashtag Signs # Remove URLs, Mentions, Trailing Non-Whitespaces Characters # Remove Numbers # Remove English Characters # Remove Single Characters # Remove Stop Words # Stemming | 3.227314 | 3 |
tests/unit/test_pathmagic.py | matthewgdv/pathmagic | 0 | 6620689 | import os
from pathlib import Path
from pathmagic import Dir, File
from tests.conftest import abstract
class TestPathMagic:
class TestEnums:
class TestIfExists:
pass
def test___str__(self, temp_file: File): # synced
assert str(temp_file) == os.fspath(temp_file)
def test___fspath__(self, temp_file: File): # synced
assert os.fspath(temp_file) == os.fspath(temp_file.path)
def test___hash__(self, temp_file: File): # synced
assert hash(temp_file) == id(temp_file)
def test___eq__(self, temp_file: File): # synced
assert temp_file == temp_file.path and temp_file == str(temp_file)
def test___ne__(self, temp_dir: Dir, temp_file: File): # synced
assert temp_dir != temp_file.path and temp_dir != str(temp_file)
def test___lt__(self, temp_root: Dir, temp_file: File): # synced
assert temp_file < temp_root and not temp_root < temp_root
def test___le__(self, temp_root: Dir, temp_file: File): # synced
assert temp_file <= temp_root <= temp_root
def test___gt__(self, temp_root: Dir, temp_file: File): # synced
assert temp_root > temp_file and not temp_root > temp_root
def test___ge__(self, temp_root: Dir, temp_file: File): # synced
assert temp_root >= temp_root >= temp_file
def test_path(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
assert isinstance(temp_dir.path, Path) and isinstance(temp_file.path, Path)
(temp := (temp_root.path / 'temp')).mkdir()
old_dir_path = temp_dir.path
temp_dir.path = temp / 'test'
new_dir_path = temp_dir.path
assert (
not old_dir_path.exists()
and new_dir_path.exists()
and old_dir_path.parent == new_dir_path.parent.parent
and new_dir_path.name == 'test'
)
old_file_path = temp_file.path
temp_file.path = temp / 'renamed.json'
new_file_path = temp_file.path
assert (
not old_file_path.exists()
and new_file_path.exists()
and old_file_path.parent == new_file_path.parent.parent
and new_file_path.read_text() == "testing..."
and new_file_path.name == 'renamed.json'
)
def test_parent(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
assert temp_root is temp_dir.parent and temp_root is temp_file.parent
(temp := (temp_root.path / 'temp')).mkdir()
old_dir_path = temp_dir.path
temp_dir.parent = temp
new_dir_path = temp_dir.path
assert (
not old_dir_path.exists()
and new_dir_path.exists()
and old_dir_path.parent == new_dir_path.parent.parent
and new_dir_path.name == 'testing'
)
old_file_path = temp_file.path
temp_file.parent = temp
new_file_path = temp_file.path
assert (
not old_file_path.exists()
and new_file_path.exists()
and old_file_path.parent == new_file_path.parent.parent
and new_file_path.read_text() == "testing..."
and new_file_path.name == 'testing.txt'
)
def test_name(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
assert temp_dir.name == temp_dir.path.name and temp_file.name == temp_file.path.name
old_dir_path = temp_dir.path
temp_dir.name = 'renamed'
new_dir_path = temp_dir.path
assert (
not old_dir_path.exists()
and new_dir_path.exists()
and old_dir_path.parent == new_dir_path.parent
and new_dir_path.name == 'renamed'
)
old_file_path = temp_file.path
temp_file.name = 'renamed.json'
new_file_path = temp_file.path
assert (
not old_file_path.exists()
and new_file_path.exists()
and old_file_path.parent == new_file_path.parent
and new_file_path.read_text() == "testing..."
and new_file_path.name == 'renamed.json'
)
def test_stat(self, temp_file: File): # synced
assert isinstance(temp_file.stat, os.stat_result)
@abstract
def test_create(self): # synced
assert True
@abstract
def test_rename(self): # synced
assert True
@abstract
def test_move(self): # synced
assert True
def test_trash(self, temp_file: File): # synced
temp_file.trash()
assert not temp_file.path.exists()
@abstract
def test_delete(self, temp_file: File): # synced
assert True
def test_from_pathlike(self, temp_file: File): # synced
assert File.from_pathlike(temp_file) is temp_file and temp_file == File.from_pathlike(str(temp_file))
def test__validate(self): # synced
assert True
def test__prepare_dir_if_not_exists(self, temp_dir: Dir): # synced
new_path = temp_dir.path / 'temp'
temp_dir._prepare_dir_if_not_exists(new_path)
temp_dir._prepare_dir_if_not_exists(new_path) # test idempotency
assert new_path.exists()
def test__prepare_file_if_not_exists(self, temp_dir: Dir): # synced
new_path = temp_dir.path / 'temp.txt'
temp_dir._prepare_file_if_not_exists(new_path)
temp_dir._prepare_file_if_not_exists(new_path) # test idempotency
assert new_path.exists()
def test__parse_filename_args(self, temp_dir: Dir): # synced
assert temp_dir._parse_filename_args('hi', 'txt').name == 'hi.txt'
assert temp_dir._parse_filename_args('hi.txt').name == 'hi.txt'
| import os
from pathlib import Path
from pathmagic import Dir, File
from tests.conftest import abstract
class TestPathMagic:
class TestEnums:
class TestIfExists:
pass
def test___str__(self, temp_file: File): # synced
assert str(temp_file) == os.fspath(temp_file)
def test___fspath__(self, temp_file: File): # synced
assert os.fspath(temp_file) == os.fspath(temp_file.path)
def test___hash__(self, temp_file: File): # synced
assert hash(temp_file) == id(temp_file)
def test___eq__(self, temp_file: File): # synced
assert temp_file == temp_file.path and temp_file == str(temp_file)
def test___ne__(self, temp_dir: Dir, temp_file: File): # synced
assert temp_dir != temp_file.path and temp_dir != str(temp_file)
def test___lt__(self, temp_root: Dir, temp_file: File): # synced
assert temp_file < temp_root and not temp_root < temp_root
def test___le__(self, temp_root: Dir, temp_file: File): # synced
assert temp_file <= temp_root <= temp_root
def test___gt__(self, temp_root: Dir, temp_file: File): # synced
assert temp_root > temp_file and not temp_root > temp_root
def test___ge__(self, temp_root: Dir, temp_file: File): # synced
assert temp_root >= temp_root >= temp_file
def test_path(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
assert isinstance(temp_dir.path, Path) and isinstance(temp_file.path, Path)
(temp := (temp_root.path / 'temp')).mkdir()
old_dir_path = temp_dir.path
temp_dir.path = temp / 'test'
new_dir_path = temp_dir.path
assert (
not old_dir_path.exists()
and new_dir_path.exists()
and old_dir_path.parent == new_dir_path.parent.parent
and new_dir_path.name == 'test'
)
old_file_path = temp_file.path
temp_file.path = temp / 'renamed.json'
new_file_path = temp_file.path
assert (
not old_file_path.exists()
and new_file_path.exists()
and old_file_path.parent == new_file_path.parent.parent
and new_file_path.read_text() == "testing..."
and new_file_path.name == 'renamed.json'
)
def test_parent(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
assert temp_root is temp_dir.parent and temp_root is temp_file.parent
(temp := (temp_root.path / 'temp')).mkdir()
old_dir_path = temp_dir.path
temp_dir.parent = temp
new_dir_path = temp_dir.path
assert (
not old_dir_path.exists()
and new_dir_path.exists()
and old_dir_path.parent == new_dir_path.parent.parent
and new_dir_path.name == 'testing'
)
old_file_path = temp_file.path
temp_file.parent = temp
new_file_path = temp_file.path
assert (
not old_file_path.exists()
and new_file_path.exists()
and old_file_path.parent == new_file_path.parent.parent
and new_file_path.read_text() == "testing..."
and new_file_path.name == 'testing.txt'
)
def test_name(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
assert temp_dir.name == temp_dir.path.name and temp_file.name == temp_file.path.name
old_dir_path = temp_dir.path
temp_dir.name = 'renamed'
new_dir_path = temp_dir.path
assert (
not old_dir_path.exists()
and new_dir_path.exists()
and old_dir_path.parent == new_dir_path.parent
and new_dir_path.name == 'renamed'
)
old_file_path = temp_file.path
temp_file.name = 'renamed.json'
new_file_path = temp_file.path
assert (
not old_file_path.exists()
and new_file_path.exists()
and old_file_path.parent == new_file_path.parent
and new_file_path.read_text() == "testing..."
and new_file_path.name == 'renamed.json'
)
def test_stat(self, temp_file: File): # synced
assert isinstance(temp_file.stat, os.stat_result)
@abstract
def test_create(self): # synced
assert True
@abstract
def test_rename(self): # synced
assert True
@abstract
def test_move(self): # synced
assert True
def test_trash(self, temp_file: File): # synced
temp_file.trash()
assert not temp_file.path.exists()
@abstract
def test_delete(self, temp_file: File): # synced
assert True
def test_from_pathlike(self, temp_file: File): # synced
assert File.from_pathlike(temp_file) is temp_file and temp_file == File.from_pathlike(str(temp_file))
def test__validate(self): # synced
assert True
def test__prepare_dir_if_not_exists(self, temp_dir: Dir): # synced
new_path = temp_dir.path / 'temp'
temp_dir._prepare_dir_if_not_exists(new_path)
temp_dir._prepare_dir_if_not_exists(new_path) # test idempotency
assert new_path.exists()
def test__prepare_file_if_not_exists(self, temp_dir: Dir): # synced
new_path = temp_dir.path / 'temp.txt'
temp_dir._prepare_file_if_not_exists(new_path)
temp_dir._prepare_file_if_not_exists(new_path) # test idempotency
assert new_path.exists()
def test__parse_filename_args(self, temp_dir: Dir): # synced
assert temp_dir._parse_filename_args('hi', 'txt').name == 'hi.txt'
assert temp_dir._parse_filename_args('hi.txt').name == 'hi.txt'
| en | 0.845305 | # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # synced # test idempotency # synced # test idempotency # synced | 2.545039 | 3 |
methods.py | tobyvg/Jos-Stam-Fluid | 0 | 6620690 | <reponame>tobyvg/Jos-Stam-Fluid
import numpy as np
import matplotlib.pyplot as plt
from project_utilities import *
import time
init_mpl(150,mat_settings = True)
from IPython.display import clear_output
import pygame
from numba import prange
@numba.njit()
def set_bnd(N,b,x):
if b == 0:
for i in prange(N+2):
x[0,i] = x[1,i]
x[i,0] = x[i,1]
x[N+1,i] = x[N,i]
x[i,N+1] = x[i,N]
elif b == 1:
for i in prange(N+2):
x[0,i] = -x[1,i]
x[N+1,i] = -x[N,i]
x[i,0] = x[i,1]
x[i,N+1] = x[i,N]
elif b == 2:
for i in prange(N+2):
x[0,i] = x[1,i]
x[N+1,i] = x[N,i]
x[i,0] = -x[i,1]
x[i,N+1] = -x[i,N]
x[0,0] = 1/2*(x[1,0]+x[0,1])
x[0,N+1] = 1/2*(x[1,N+1]+x[0,N])
x[N+1,0] = 1/2*(x[N,0]+x[N+1,1])
x[N+1,N+1] = 1/2*(x[N,N+1]+x[N+1,N])
@numba.njit()
def add_source(x,s,dt):
x += dt*s
@numba.njit()
def diffuse(N,b,x,x0,diff,dt):
a = dt*diff*N**2
for k in range(20):
for i in range(1,N+1):
for j in range(1,N+1):
x[i,j] = (x0[i,j] + a*(x[i-1,j]+x[i+1,j]+x[i,j-1]+x[i,j+1]))/(1+4*a)
set_bnd(N,b,x)
@numba.njit()
def advect(N,b,d,d0,u,v,dt):
dt0 = N*dt
for i in prange(1,N+1):
for j in prange(1,N+1):
x = i-dt0*u[i,j]
y = j-dt0*v[i,j]
if x < 0.5:
x = 0.5
elif x > N + 0.5:
x = N + 0.5
if y < 0.5:
y = 0.5
elif y > N + 0.5:
y = N + 0.5
i0 = int(np.floor(x))
i1 = i0+1
j0 = int(np.floor(y))
j1 = j0+1
s1 = x -i0
s0 = 1 - s1
t1 = y-j0
t0= 1-t1
d[i,j] = s0*(t0*d0[i0,j0]+t1*d0[i0,j1]) + s1*(t0*d0[i1,j0] + t1*d0[i1,j1])
set_bnd(N,b,d)
@numba.njit()
def dens_step(N,x,x0,u,v,diff,dt,s):
add_source(x,s,dt)
diffuse(N,0,x0,x,diff,dt)
advect(N,0,x,x0,u,v,dt)
@numba.njit()
def project(N,u,v,p,div):
h = 1/N
for i in prange(1,N+1):
for j in prange(1,N+1):
div[i,j] = -0.5*h*(u[i+1,j]-u[i-1,j] + v[i,j+1]-v[i,j-1])
p[i,j] = 0
set_bnd(N,0,div)
set_bnd(N,0,p)
for k in range(20):
for i in range(1,N+1):
for j in range(1,N+1):
p[i,j] = (div[i,j]+p[i-1,j]+p[i+1,j]+p[i,j-1]+p[i,j+1])/4
set_bnd(N,0,p)
for i in prange(1,N+1):
for j in prange(1,N+1):
u[i,j] -= 0.5*(p[i+1,j]-p[i-1,j])/h
v[i,j] -= 0.5*(p[i,j+1]-p[i,j-1])/h
set_bnd(N,1,u)
set_bnd(N,2,v)
@numba.njit()
def vel_step(N,u,v,u0,v0,visc,dt,su,sv):
add_source(u,su,dt)
add_source(v,sv,dt)
diffuse(N,1,u0,u,visc,dt)
diffuse(N,1,v0,v,visc,dt)
project(N,u0,v0,u,v)
advect(N,1,u,u0,u0,v0,dt)
advect(N,2,v,v0,u0,v0,dt)
project(N,u,v,u0,v0)
import matplotlib as mpl
from matplotlib import cm
class MplColorHelper:
def __init__(self, cmap_name, start_val, stop_val):
self.cmap_name = cmap_name
self.cmap = plt.get_cmap(cmap_name)
self.norm = mpl.colors.Normalize(vmin=start_val, vmax=stop_val)
self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)
def get_rgb(self, val):
return self.scalarMap.to_rgba(val)
| import numpy as np
import matplotlib.pyplot as plt
from project_utilities import *
import time
init_mpl(150,mat_settings = True)
from IPython.display import clear_output
import pygame
from numba import prange
@numba.njit()
def set_bnd(N,b,x):
if b == 0:
for i in prange(N+2):
x[0,i] = x[1,i]
x[i,0] = x[i,1]
x[N+1,i] = x[N,i]
x[i,N+1] = x[i,N]
elif b == 1:
for i in prange(N+2):
x[0,i] = -x[1,i]
x[N+1,i] = -x[N,i]
x[i,0] = x[i,1]
x[i,N+1] = x[i,N]
elif b == 2:
for i in prange(N+2):
x[0,i] = x[1,i]
x[N+1,i] = x[N,i]
x[i,0] = -x[i,1]
x[i,N+1] = -x[i,N]
x[0,0] = 1/2*(x[1,0]+x[0,1])
x[0,N+1] = 1/2*(x[1,N+1]+x[0,N])
x[N+1,0] = 1/2*(x[N,0]+x[N+1,1])
x[N+1,N+1] = 1/2*(x[N,N+1]+x[N+1,N])
@numba.njit()
def add_source(x,s,dt):
x += dt*s
@numba.njit()
def diffuse(N,b,x,x0,diff,dt):
a = dt*diff*N**2
for k in range(20):
for i in range(1,N+1):
for j in range(1,N+1):
x[i,j] = (x0[i,j] + a*(x[i-1,j]+x[i+1,j]+x[i,j-1]+x[i,j+1]))/(1+4*a)
set_bnd(N,b,x)
@numba.njit()
def advect(N,b,d,d0,u,v,dt):
dt0 = N*dt
for i in prange(1,N+1):
for j in prange(1,N+1):
x = i-dt0*u[i,j]
y = j-dt0*v[i,j]
if x < 0.5:
x = 0.5
elif x > N + 0.5:
x = N + 0.5
if y < 0.5:
y = 0.5
elif y > N + 0.5:
y = N + 0.5
i0 = int(np.floor(x))
i1 = i0+1
j0 = int(np.floor(y))
j1 = j0+1
s1 = x -i0
s0 = 1 - s1
t1 = y-j0
t0= 1-t1
d[i,j] = s0*(t0*d0[i0,j0]+t1*d0[i0,j1]) + s1*(t0*d0[i1,j0] + t1*d0[i1,j1])
set_bnd(N,b,d)
@numba.njit()
def dens_step(N,x,x0,u,v,diff,dt,s):
add_source(x,s,dt)
diffuse(N,0,x0,x,diff,dt)
advect(N,0,x,x0,u,v,dt)
@numba.njit()
def project(N,u,v,p,div):
h = 1/N
for i in prange(1,N+1):
for j in prange(1,N+1):
div[i,j] = -0.5*h*(u[i+1,j]-u[i-1,j] + v[i,j+1]-v[i,j-1])
p[i,j] = 0
set_bnd(N,0,div)
set_bnd(N,0,p)
for k in range(20):
for i in range(1,N+1):
for j in range(1,N+1):
p[i,j] = (div[i,j]+p[i-1,j]+p[i+1,j]+p[i,j-1]+p[i,j+1])/4
set_bnd(N,0,p)
for i in prange(1,N+1):
for j in prange(1,N+1):
u[i,j] -= 0.5*(p[i+1,j]-p[i-1,j])/h
v[i,j] -= 0.5*(p[i,j+1]-p[i,j-1])/h
set_bnd(N,1,u)
set_bnd(N,2,v)
@numba.njit()
def vel_step(N,u,v,u0,v0,visc,dt,su,sv):
add_source(u,su,dt)
add_source(v,sv,dt)
diffuse(N,1,u0,u,visc,dt)
diffuse(N,1,v0,v,visc,dt)
project(N,u0,v0,u,v)
advect(N,1,u,u0,u0,v0,dt)
advect(N,2,v,v0,u0,v0,dt)
project(N,u,v,u0,v0)
import matplotlib as mpl
from matplotlib import cm
class MplColorHelper:
def __init__(self, cmap_name, start_val, stop_val):
self.cmap_name = cmap_name
self.cmap = plt.get_cmap(cmap_name)
self.norm = mpl.colors.Normalize(vmin=start_val, vmax=stop_val)
self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)
def get_rgb(self, val):
return self.scalarMap.to_rgba(val) | none | 1 | 2.643672 | 3 | |
src/metaproj/common/utils.py | KGerring/metaproj | 2 | 6620691 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename = utils
# author=KGerring
# date = 6/12/21
# project poetryproj
# docs root
"""
poetryproj
"""
from __future__ import annotations
import importlib
import inspect
import io
import json
import os
import pkgutil
import textwrap
import tokenize
from pathlib import Path
from types import ModuleType
from typing import AnyStr, Dict, Iterable, List, Optional, Set, Type, Union
import codecs
from attr import dataclass
from fixit.common.base import CstLintRule, LintConfig
from . import config # LintConfig
import os # isort:skip
import re # isort:skip
LintRuleCollectionT = Set[Union[Type[CstLintRule], Type['PseudoLintRule']]]
DEFAULT_FILENAME: str = "not/a/real/file/path.py"
DEFAULT_CONFIG: LintConfig = LintConfig(
repo_root=str(Path(__file__).parent.parent), # Set base config repo_root to `fixit` directory for testing.
)
def _dedent(src: str) -> str:
src = re.sub(r"\A\n", "", src)
return textwrap.dedent(src)
def _detect_encoding(source: bytes) -> str:
"""
:param bytes source:
:type source:
:return:
:rtype:
"""
return tokenize.detect_encoding(io.BytesIO(source).readline)[0]
def descendents(class_: type):
"""
Return a list of the class hierarchy below (and including) the given class.
The list is ordered from least- to most-specific. Can be useful for
printing the contents of an entire class hierarchy.
"""
assert isinstance(class_, type)
q = [class_]
out = []
while len(q):
x = q.pop(0)
out.insert(0, x)
for b in x.__subclasses__():
if b not in q and b not in out:
q.append(b)
return out[::-1]
def maybe_pathlib(path: Optional[Path, str]):
if isinstance(path, Path):
return path
elif isinstance(path, str):
if os.path.exists(path):
return Path(path)
return Path().cwd()
return path
def auto_encode(string: AnyStr, encoding: str = "utf-8", errors: str = "strict") -> bytes:
"""Lookup a encoder and encode the string if it is bytes, else return it
untouched if it's already in bytes (for utf). If its an int, etc, it'll try
to wrap it in bytes for you.
:param string: The text to encode
:param encoding: The encoding-type to use; default is `utf-8`
:param errors: optional; pass `replace` or `namereplace` if you don't want
the default `strict` for how to process errors
:return: The encoded text
"""
encoder = codecs.getencoder(encoding=encoding)
if isinstance(string, bytes):
return string
elif isinstance(string, str):
return encoder(string)[0]
else:
return encoder(str(string))[0]
def auto_decode(string: AnyStr, encoding: str = "utf-8", errors: str = "strict") -> str:
"""Lookup a decoder and decode the bytestring if it is str, else return it
untouched if it's already in bytes (for utf). If its an int, etc, it'll try
to wrap it in str for you.
:param string: The bytestring to decode
:param encoding: the encoding to use; default=`utf-8`
:param errors: optional; use `replace` or `namereplace`, etc if you don't want
`strict`, the default
:return: a decoded string of type `str`
"""
decoder = codecs.getdecoder(encoding=encoding)
if isinstance(string, str):
return string
elif isinstance(string, bytes):
return decoder(string)[0]
else:
return str(string)
def commonpath(path1: Path, path2: Path) -> Optional[Path]:
"""Return the common part shared with the other path, or None if there is
no common part.
If one path is relative and one is absolute, returns None.
"""
try:
return Path(os.path.commonpath((str(path1), str(path2))))
except ValueError:
return None
def get_common_ancestor(paths: Iterable[Path]) -> Path:
"""
Get the common ancestor of the paths if it exists
:param paths:
:return:
:rtype:
"""
if not all(isinstance(p, Path) for p in paths):
paths = list(map(Path, paths))
common_ancestor: Optional[Path] = None
for path in paths:
if not path.exists():
continue
if common_ancestor is None:
common_ancestor = path
else:
if common_ancestor in path.parents or path == common_ancestor:
continue
elif path in common_ancestor.parents:
common_ancestor = path
else:
shared = commonpath(path, common_ancestor)
if shared is not None:
common_ancestor = shared
if common_ancestor is None:
common_ancestor = Path.cwd()
elif common_ancestor.is_file():
common_ancestor = common_ancestor.parent
return common_ancestor
def is_rule(obj: object) -> bool:
if inspect.isabstract(obj):
return False
# elif getattr(obj, "_is_rule", False):
# logger.debug(f"is rule {obj}")
# return True
if inspect.isclass(obj):
if issubclass(obj, CstLintRule) and obj is not CstLintRule:
return True
if obj is not CstLintRule and (issubclass(obj, CstLintRule) or issubclass(obj, PseudoLintRule)):
return True
return False
class TestCase:
code: str
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
@dataclass
class ValidTestCase(TestCase):
"""
/Users/kristen/repos/Fixit/fixit/common/utils.py
"""
code: str
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
@dataclass
class InvalidTestCase(TestCase):
code: str
kind: Optional[str] = None
line: Optional[int] = None
column: Optional[int] = None
expected_replacement: Optional[str] = None
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
expected_message: Optional[str] = None
@property
def expected_str(self) -> str:
return f"{_str_or_any(self.line)}:{_str_or_any(self.column)}: {self.kind} ..."
def import_submodules(package: str, recursive: bool = True) -> Dict[str, ModuleType]:
""" Import all submodules of a module, recursively, including subpackages. """
package: ModuleType = importlib.import_module(package)
results = {}
for _loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + "." + name
try:
results[full_name] = importlib.import_module(full_name)
except ModuleNotFoundError:
pass
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
def import_distinct_rules_from_package(
package: str,
block_list_rules: List[str] = [],
seen_names: Optional[Set[str]] = None,
allow_list_rules: Optional[List[str]] = None,
) -> LintRuleCollectionT:
"""Import all rules from the specified package, omitting rules that appear in the block list.
Raises error on repeated rule names.
Optional parameter `seen_names` accepts set of names that should not occur in this package.
:param package:
:param block_list_rules:
:param set seen_names: a set of names that should not occur in this package.
:param list allow_list_rules:
:return:
:rtype:
:raises: exceptions.DuplicateLintRuleNameError
"""
# Import all rules from the specified package, omitting rules that appear in the block list.
# Raises error on repeated rule names.
# Optional parameter `seen_names` accepts set of names that should not occur in this package.
rules: LintRuleCollectionT = set()
if seen_names is None:
seen_names: Set[str] = set()
for _module_name, module in import_submodules(package).items():
for name in dir(module):
try:
obj = getattr(module, name)
if inspect.isclass(obj) and hasattr(obj, "_is_rule"):
#print(obj)
if name in seen_names:
raise exceptions.DuplicateLintRuleNameError(
f"Lint rule name {name!r} is duplicated."
)
seen_names.add(name)
# For backwards compatibility if `allow_list_rules` is missing fall back to all allowed
if not allow_list_rules or name in allow_list_rules:
if name not in block_list_rules:
rules.add(obj)
except (TypeError, Exception):
print(f"{module}, {name}")
continue
return rules
def import_rule_from_package(
package_name: str,
rule_class_name: str,
) -> Optional[LintRuleT]:
"""Imports the first rule with matching class name found in specified package.
:param str package_name: fixit.rules
:param str rule_class_name: UseTypesFromTypingRule
:return:
:rtype:
"""
# Imports the first rule with matching class name found in specified package.
rule: Optional[LintRuleT] = None
package = importlib.import_module(package_name)
for _loader, name, is_pkg in pkgutil.walk_packages(
getattr(package, "__path__", None)
):
full_package_or_module_name = package.__name__ + "." + name
try:
module = importlib.import_module(full_package_or_module_name)
rule = getattr(module, rule_class_name, None)
except ModuleNotFoundError:
pass
if is_pkg:
rule = import_rule_from_package(
full_package_or_module_name, rule_class_name
)
if rule is not None:
# Stop early if we have found the rule.
return rule
return rule
def find_and_import_rule(
rule_class_name: str,
packages: List[str]
) -> LintRuleT:
for package in packages:
imported_rule = import_rule_from_package(package, rule_class_name)
if imported_rule is not None:
return imported_rule
# If we get here, the rule was not found.
raise exceptions.LintRuleNotFoundError(
f"Could not find lint rule {rule_class_name} in the following packages: \n"
+ "\n".join(packages)
)
if __name__ == '__main__':
print(__file__)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename = utils
# author=KGerring
# date = 6/12/21
# project poetryproj
# docs root
"""
poetryproj
"""
from __future__ import annotations
import importlib
import inspect
import io
import json
import os
import pkgutil
import textwrap
import tokenize
from pathlib import Path
from types import ModuleType
from typing import AnyStr, Dict, Iterable, List, Optional, Set, Type, Union
import codecs
from attr import dataclass
from fixit.common.base import CstLintRule, LintConfig
from . import config # LintConfig
import os # isort:skip
import re # isort:skip
LintRuleCollectionT = Set[Union[Type[CstLintRule], Type['PseudoLintRule']]]
DEFAULT_FILENAME: str = "not/a/real/file/path.py"
DEFAULT_CONFIG: LintConfig = LintConfig(
repo_root=str(Path(__file__).parent.parent), # Set base config repo_root to `fixit` directory for testing.
)
def _dedent(src: str) -> str:
src = re.sub(r"\A\n", "", src)
return textwrap.dedent(src)
def _detect_encoding(source: bytes) -> str:
"""
:param bytes source:
:type source:
:return:
:rtype:
"""
return tokenize.detect_encoding(io.BytesIO(source).readline)[0]
def descendents(class_: type):
"""
Return a list of the class hierarchy below (and including) the given class.
The list is ordered from least- to most-specific. Can be useful for
printing the contents of an entire class hierarchy.
"""
assert isinstance(class_, type)
q = [class_]
out = []
while len(q):
x = q.pop(0)
out.insert(0, x)
for b in x.__subclasses__():
if b not in q and b not in out:
q.append(b)
return out[::-1]
def maybe_pathlib(path: Optional[Path, str]):
if isinstance(path, Path):
return path
elif isinstance(path, str):
if os.path.exists(path):
return Path(path)
return Path().cwd()
return path
def auto_encode(string: AnyStr, encoding: str = "utf-8", errors: str = "strict") -> bytes:
"""Lookup a encoder and encode the string if it is bytes, else return it
untouched if it's already in bytes (for utf). If its an int, etc, it'll try
to wrap it in bytes for you.
:param string: The text to encode
:param encoding: The encoding-type to use; default is `utf-8`
:param errors: optional; pass `replace` or `namereplace` if you don't want
the default `strict` for how to process errors
:return: The encoded text
"""
encoder = codecs.getencoder(encoding=encoding)
if isinstance(string, bytes):
return string
elif isinstance(string, str):
return encoder(string)[0]
else:
return encoder(str(string))[0]
def auto_decode(string: AnyStr, encoding: str = "utf-8", errors: str = "strict") -> str:
"""Lookup a decoder and decode the bytestring if it is str, else return it
untouched if it's already in bytes (for utf). If its an int, etc, it'll try
to wrap it in str for you.
:param string: The bytestring to decode
:param encoding: the encoding to use; default=`utf-8`
:param errors: optional; use `replace` or `namereplace`, etc if you don't want
`strict`, the default
:return: a decoded string of type `str`
"""
decoder = codecs.getdecoder(encoding=encoding)
if isinstance(string, str):
return string
elif isinstance(string, bytes):
return decoder(string)[0]
else:
return str(string)
def commonpath(path1: Path, path2: Path) -> Optional[Path]:
"""Return the common part shared with the other path, or None if there is
no common part.
If one path is relative and one is absolute, returns None.
"""
try:
return Path(os.path.commonpath((str(path1), str(path2))))
except ValueError:
return None
def get_common_ancestor(paths: Iterable[Path]) -> Path:
"""
Get the common ancestor of the paths if it exists
:param paths:
:return:
:rtype:
"""
if not all(isinstance(p, Path) for p in paths):
paths = list(map(Path, paths))
common_ancestor: Optional[Path] = None
for path in paths:
if not path.exists():
continue
if common_ancestor is None:
common_ancestor = path
else:
if common_ancestor in path.parents or path == common_ancestor:
continue
elif path in common_ancestor.parents:
common_ancestor = path
else:
shared = commonpath(path, common_ancestor)
if shared is not None:
common_ancestor = shared
if common_ancestor is None:
common_ancestor = Path.cwd()
elif common_ancestor.is_file():
common_ancestor = common_ancestor.parent
return common_ancestor
def is_rule(obj: object) -> bool:
if inspect.isabstract(obj):
return False
# elif getattr(obj, "_is_rule", False):
# logger.debug(f"is rule {obj}")
# return True
if inspect.isclass(obj):
if issubclass(obj, CstLintRule) and obj is not CstLintRule:
return True
if obj is not CstLintRule and (issubclass(obj, CstLintRule) or issubclass(obj, PseudoLintRule)):
return True
return False
class TestCase:
code: str
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
@dataclass
class ValidTestCase(TestCase):
"""
/Users/kristen/repos/Fixit/fixit/common/utils.py
"""
code: str
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
@dataclass
class InvalidTestCase(TestCase):
code: str
kind: Optional[str] = None
line: Optional[int] = None
column: Optional[int] = None
expected_replacement: Optional[str] = None
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
expected_message: Optional[str] = None
@property
def expected_str(self) -> str:
return f"{_str_or_any(self.line)}:{_str_or_any(self.column)}: {self.kind} ..."
def import_submodules(package: str, recursive: bool = True) -> Dict[str, ModuleType]:
""" Import all submodules of a module, recursively, including subpackages. """
package: ModuleType = importlib.import_module(package)
results = {}
for _loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + "." + name
try:
results[full_name] = importlib.import_module(full_name)
except ModuleNotFoundError:
pass
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
def import_distinct_rules_from_package(
package: str,
block_list_rules: List[str] = [],
seen_names: Optional[Set[str]] = None,
allow_list_rules: Optional[List[str]] = None,
) -> LintRuleCollectionT:
"""Import all rules from the specified package, omitting rules that appear in the block list.
Raises error on repeated rule names.
Optional parameter `seen_names` accepts set of names that should not occur in this package.
:param package:
:param block_list_rules:
:param set seen_names: a set of names that should not occur in this package.
:param list allow_list_rules:
:return:
:rtype:
:raises: exceptions.DuplicateLintRuleNameError
"""
# Import all rules from the specified package, omitting rules that appear in the block list.
# Raises error on repeated rule names.
# Optional parameter `seen_names` accepts set of names that should not occur in this package.
rules: LintRuleCollectionT = set()
if seen_names is None:
seen_names: Set[str] = set()
for _module_name, module in import_submodules(package).items():
for name in dir(module):
try:
obj = getattr(module, name)
if inspect.isclass(obj) and hasattr(obj, "_is_rule"):
#print(obj)
if name in seen_names:
raise exceptions.DuplicateLintRuleNameError(
f"Lint rule name {name!r} is duplicated."
)
seen_names.add(name)
# For backwards compatibility if `allow_list_rules` is missing fall back to all allowed
if not allow_list_rules or name in allow_list_rules:
if name not in block_list_rules:
rules.add(obj)
except (TypeError, Exception):
print(f"{module}, {name}")
continue
return rules
def import_rule_from_package(
package_name: str,
rule_class_name: str,
) -> Optional[LintRuleT]:
"""Imports the first rule with matching class name found in specified package.
:param str package_name: fixit.rules
:param str rule_class_name: UseTypesFromTypingRule
:return:
:rtype:
"""
# Imports the first rule with matching class name found in specified package.
rule: Optional[LintRuleT] = None
package = importlib.import_module(package_name)
for _loader, name, is_pkg in pkgutil.walk_packages(
getattr(package, "__path__", None)
):
full_package_or_module_name = package.__name__ + "." + name
try:
module = importlib.import_module(full_package_or_module_name)
rule = getattr(module, rule_class_name, None)
except ModuleNotFoundError:
pass
if is_pkg:
rule = import_rule_from_package(
full_package_or_module_name, rule_class_name
)
if rule is not None:
# Stop early if we have found the rule.
return rule
return rule
def find_and_import_rule(
rule_class_name: str,
packages: List[str]
) -> LintRuleT:
for package in packages:
imported_rule = import_rule_from_package(package, rule_class_name)
if imported_rule is not None:
return imported_rule
# If we get here, the rule was not found.
raise exceptions.LintRuleNotFoundError(
f"Could not find lint rule {rule_class_name} in the following packages: \n"
+ "\n".join(packages)
)
if __name__ == '__main__':
print(__file__)
| en | 0.756194 | #!/usr/bin/env python # -*- coding: utf-8 -*- # filename = utils # author=KGerring # date = 6/12/21 # project poetryproj # docs root poetryproj # LintConfig # isort:skip # isort:skip # Set base config repo_root to `fixit` directory for testing. :param bytes source: :type source: :return: :rtype: Return a list of the class hierarchy below (and including) the given class. The list is ordered from least- to most-specific. Can be useful for printing the contents of an entire class hierarchy. Lookup a encoder and encode the string if it is bytes, else return it untouched if it's already in bytes (for utf). If its an int, etc, it'll try to wrap it in bytes for you. :param string: The text to encode :param encoding: The encoding-type to use; default is `utf-8` :param errors: optional; pass `replace` or `namereplace` if you don't want the default `strict` for how to process errors :return: The encoded text Lookup a decoder and decode the bytestring if it is str, else return it untouched if it's already in bytes (for utf). If its an int, etc, it'll try to wrap it in str for you. :param string: The bytestring to decode :param encoding: the encoding to use; default=`utf-8` :param errors: optional; use `replace` or `namereplace`, etc if you don't want `strict`, the default :return: a decoded string of type `str` Return the common part shared with the other path, or None if there is no common part. If one path is relative and one is absolute, returns None. Get the common ancestor of the paths if it exists :param paths: :return: :rtype: # elif getattr(obj, "_is_rule", False): # logger.debug(f"is rule {obj}") # return True /Users/kristen/repos/Fixit/fixit/common/utils.py Import all submodules of a module, recursively, including subpackages. Import all rules from the specified package, omitting rules that appear in the block list. Raises error on repeated rule names. Optional parameter `seen_names` accepts set of names that should not occur in this package. :param package: :param block_list_rules: :param set seen_names: a set of names that should not occur in this package. :param list allow_list_rules: :return: :rtype: :raises: exceptions.DuplicateLintRuleNameError # Import all rules from the specified package, omitting rules that appear in the block list. # Raises error on repeated rule names. # Optional parameter `seen_names` accepts set of names that should not occur in this package. #print(obj) # For backwards compatibility if `allow_list_rules` is missing fall back to all allowed Imports the first rule with matching class name found in specified package. :param str package_name: fixit.rules :param str rule_class_name: UseTypesFromTypingRule :return: :rtype: # Imports the first rule with matching class name found in specified package. # Stop early if we have found the rule. # If we get here, the rule was not found. | 2.473084 | 2 |
tests/test_nicfit.py | nicfit/nicfit.py | 4 | 6620692 | # -*- coding: utf-8 -*-
import pytest
import nicfit
"""
test_nicfit
----------------------------------
Tests for `nicfit` module.
"""
def test_metadata():
assert nicfit.version
assert nicfit.__about__.__license__
assert nicfit.__about__.__project_name__
assert nicfit.__about__.__author__
assert nicfit.__about__.__author_email__
assert nicfit.__about__.__version__
assert nicfit.__about__.__version_info__
assert nicfit.__about__.__release__
assert nicfit.__about__.__version_txt__
def test_parse_version():
from nicfit.__about__ import __parse_version
assert __parse_version("0") == ("0", "final", (0, 0, 0, "final"))
assert __parse_version("1a1") == ("1", "a1", (1, 0, 0, "a1"))
assert __parse_version("1.2b1") == ("1.2", "b1", (1, 2, 0, "b1"))
assert __parse_version("1.2.3c1") == ("1.2.3", "c1", (1, 2, 3, "c1"))
assert __parse_version("2.0") == ("2.0", "final", (2, 0, 0, "final"))
for invalid in ["3.0.3d1", "a.b.c"]:
with pytest.raises(ValueError):
__parse_version(invalid)
| # -*- coding: utf-8 -*-
import pytest
import nicfit
"""
test_nicfit
----------------------------------
Tests for `nicfit` module.
"""
def test_metadata():
assert nicfit.version
assert nicfit.__about__.__license__
assert nicfit.__about__.__project_name__
assert nicfit.__about__.__author__
assert nicfit.__about__.__author_email__
assert nicfit.__about__.__version__
assert nicfit.__about__.__version_info__
assert nicfit.__about__.__release__
assert nicfit.__about__.__version_txt__
def test_parse_version():
from nicfit.__about__ import __parse_version
assert __parse_version("0") == ("0", "final", (0, 0, 0, "final"))
assert __parse_version("1a1") == ("1", "a1", (1, 0, 0, "a1"))
assert __parse_version("1.2b1") == ("1.2", "b1", (1, 2, 0, "b1"))
assert __parse_version("1.2.3c1") == ("1.2.3", "c1", (1, 2, 3, "c1"))
assert __parse_version("2.0") == ("2.0", "final", (2, 0, 0, "final"))
for invalid in ["3.0.3d1", "a.b.c"]:
with pytest.raises(ValueError):
__parse_version(invalid)
| en | 0.204213 | # -*- coding: utf-8 -*- test_nicfit ---------------------------------- Tests for `nicfit` module. | 2.272775 | 2 |
network.py | jnicolasthouvenin/Deep_Learning_to_play_Connect4 | 0 | 6620693 | <filename>network.py<gh_stars>0
import numpy as np
import math
from encoder import *
class NeuralNetwork:
def __init__(self, *args):
if len(args) == 1:
shape, learning_rate = args[0], 0.05
self.size = len(shape)
self.shape = shape
self.l_r = learning_rate
self.biases = []
self.weights = []
for prev_layer, layer in zip(self.shape[:-1], self.shape[1:]):
b = np.squeeze(np.random.randn(layer, 1))
self.biases.append(b)
w = np.random.randn(layer, prev_layer)
self.weights.append(w)
elif len(args) == 2:
shape, learning_rate = args[0], args[1]
self.size = len(shape)
self.shape = shape
self.l_r = learning_rate
self.biases = []
self.weights = []
for prev_layer, layer in zip(self.shape[:-1], self.shape[1:]):
b = np.squeeze(np.random.randn(layer, 1))
self.biases.append(b)
w = np.random.randn(layer, prev_layer)
self.weights.append(w)
else:
self.size = args[0]
self.shape = args[1]
self.l_r = args[2]
self.biases = args[3]
self.weights = args[4]
def train(self, x, y):
y_pred = self.forward(x)
nabla_b, nabla_w = self.backprop(x, y)
self.update(nabla_b, nabla_w)
return y_pred
def forward(self, a):
self.zs = []
self.activations = [np.array(a)]
for b, w in zip(self.biases, self.weights):
z = np.dot(w, a) + b
self.zs.append(z)
a = sigmoid(z)
self.activations.append(np.array(a))
return a
def backprop(self, x, y):
self.forward(x)
gradient_bias = [np.zeros(b.shape) for b in self.biases]
gradient_weights = [np.zeros(w.shape) for w in self.weights]
# last layer
delta = cost_derivative(self.activations[-1], y) * sigmoid_derivative(self.zs[-1])
gradient_bias[-1] = delta
gradient_weights[-1] = computeGradientW(self.activations[-2],delta,len(self.zs[-1]))
# from before last layer to first layer
# last layer is self.size-2
# before last layer is self.size-3
for l in range(self.size - 3, -1, -1):
delta = np.dot(self.weights[l + 1].T, delta) * sigmoid_derivative(self.zs[l])
gradient_bias[l] = delta
# len(activation) == len(weights)+1
# activation[i] is the previous activations to the layer weights[i]
#delta_w = np.dot(delta, self.activations[l].T)
gradient_weights[l] = computeGradientW(self.activations[l],delta,len(self.zs[l]))
return gradient_bias, gradient_weights
def update(self, nabla_b, nabla_w):
self.biases = [b - self.l_r * nb for b, nb in zip(self.biases, nabla_b)]
self.weights = [w - self.l_r * nw for w, nw in zip(self.weights, nabla_w)]
def train_sgd(self, x_train, y_train, batch_size=20):
x_batches = [ x_train[i : i + batch_size] for i in range(0, len(x_train), batch_size) ]
y_batches = [ y_train[i : i + batch_size] for i in range(0, len(y_train), batch_size) ]
for x_batch, y_batch in zip(x_batches,y_batches):
gradient_bias = [np.zeros(b.shape) for b in self.biases]
gradient_weights = [np.zeros(w.shape) for w in self.weights]
for x, y in zip(x_batch, y_batch):
delta_grad_b, delta_grad_w = self.backprop(x, y)
gradient_bias = [ nb + dnb for nb, dnb in zip(gradient_bias, delta_grad_b) ]
gradient_weights = [ nw + dnw for nw, dnw in zip(gradient_weights, delta_grad_w) ]
gradient_weights = [nw / batch_size for nw in gradient_weights]
gradient_bias = [nb / batch_size for nb in gradient_bias]
self.weights = [ w - self.l_r * nw for w, nw in zip(self.weights, gradient_weights) ]
self.biases = [ b - self.l_r * nb for b, nb in zip(self.biases, gradient_bias) ]
def supervised_learning(self,x_train,y_train,x_test,y_test,lenTest,it,EPOCH=100,batch_size=1000,dataset="classic",file="networks/",write=False):
print("[INIT] - classification rate =",self.evaluate(x_test,y_test))
for j in range(EPOCH+1):
# train
shuffler = np.random.permutation(x_train.shape[0])
x_train = x_train[shuffler]
y_train = y_train[shuffler]
self.train_sgd(x_train,y_train,batch_size=batch_size)
# test
goodPred = 0
preds = 0
for i in range(lenTest):
label_pred = ENCODER.encode_prediction(self.forward(x_test[i]))
if (label_pred == y_test[i]).all():
goodPred += 1
preds += 1
print(j," - classification rate =",self.evaluate(x_test,y_test))
if j%10 == 0:
if write:
self.save((file+dataset+"_"+str(it)+"_"+str(j)))
def evaluate(self, x_test, y_test):
test_results = [ (ENCODER.encode_prediction(self.forward(_x)), (_y)) for _x, _y in zip(x_test, y_test) ]
result = sum(int(_y_pred == _y) for (_y_pred, _y) in test_results)
result /= len(y_test)
return round(result, 3)
def save(self, fileName):
file = open(fileName, "w")
file.write(str(self.size)+"\n")
for i in range(self.size):
file.write(str(self.shape[i])+"\n")
file.write(str(self.l_r)+"\n")
for i in range(1, self.size):
for j in range(self.shape[i]):
for k in range(self.shape[i-1]):
file.write(str(self.weights[i-1][j][k])+"\n")
for i in range(self.size-1):
for j in range(self.shape[i+1]):
file.write(str(self.biases[i][j])+"\n")
file.close()
def readNeuralNetwork(fileName):
file = open(fileName, "r")
size = int(file.readline())
shape = []
for i in range(size):
shape.append(int(file.readline()))
l_r = float(file.readline())
weights = []
for i in range(1, size):
weights.append([])
for j in range(shape[i]):
weights[i-1].append([])
for k in range(shape[i-1]):
weights[i-1][j].append(float(file.readline()))
biases = []
for i in range(size-1):
biases.append([])
for j in range(shape[i+1]):
biases[i].append(float(file.readline()))
file.close()
return NeuralNetwork(size, shape, l_r, [np.array(obj) for obj in biases], [np.array(obj) for obj in weights])
def cost(a, y):
return (a - y) ** 2
def cost_derivative(a, y):
return 2*(a - y)
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_derivative(x):
return sigmoid(x) * (1 - sigmoid(x))
def computeGradientW(a,delta,repeat):
aBuffer = a.transpose()
aBuffer = np.tile(a,repeat).reshape(repeat,len(a)).transpose()
g_w = np.multiply(aBuffer,delta).transpose()
return g_w | <filename>network.py<gh_stars>0
import numpy as np
import math
from encoder import *
class NeuralNetwork:
def __init__(self, *args):
if len(args) == 1:
shape, learning_rate = args[0], 0.05
self.size = len(shape)
self.shape = shape
self.l_r = learning_rate
self.biases = []
self.weights = []
for prev_layer, layer in zip(self.shape[:-1], self.shape[1:]):
b = np.squeeze(np.random.randn(layer, 1))
self.biases.append(b)
w = np.random.randn(layer, prev_layer)
self.weights.append(w)
elif len(args) == 2:
shape, learning_rate = args[0], args[1]
self.size = len(shape)
self.shape = shape
self.l_r = learning_rate
self.biases = []
self.weights = []
for prev_layer, layer in zip(self.shape[:-1], self.shape[1:]):
b = np.squeeze(np.random.randn(layer, 1))
self.biases.append(b)
w = np.random.randn(layer, prev_layer)
self.weights.append(w)
else:
self.size = args[0]
self.shape = args[1]
self.l_r = args[2]
self.biases = args[3]
self.weights = args[4]
def train(self, x, y):
y_pred = self.forward(x)
nabla_b, nabla_w = self.backprop(x, y)
self.update(nabla_b, nabla_w)
return y_pred
def forward(self, a):
self.zs = []
self.activations = [np.array(a)]
for b, w in zip(self.biases, self.weights):
z = np.dot(w, a) + b
self.zs.append(z)
a = sigmoid(z)
self.activations.append(np.array(a))
return a
def backprop(self, x, y):
self.forward(x)
gradient_bias = [np.zeros(b.shape) for b in self.biases]
gradient_weights = [np.zeros(w.shape) for w in self.weights]
# last layer
delta = cost_derivative(self.activations[-1], y) * sigmoid_derivative(self.zs[-1])
gradient_bias[-1] = delta
gradient_weights[-1] = computeGradientW(self.activations[-2],delta,len(self.zs[-1]))
# from before last layer to first layer
# last layer is self.size-2
# before last layer is self.size-3
for l in range(self.size - 3, -1, -1):
delta = np.dot(self.weights[l + 1].T, delta) * sigmoid_derivative(self.zs[l])
gradient_bias[l] = delta
# len(activation) == len(weights)+1
# activation[i] is the previous activations to the layer weights[i]
#delta_w = np.dot(delta, self.activations[l].T)
gradient_weights[l] = computeGradientW(self.activations[l],delta,len(self.zs[l]))
return gradient_bias, gradient_weights
def update(self, nabla_b, nabla_w):
self.biases = [b - self.l_r * nb for b, nb in zip(self.biases, nabla_b)]
self.weights = [w - self.l_r * nw for w, nw in zip(self.weights, nabla_w)]
def train_sgd(self, x_train, y_train, batch_size=20):
x_batches = [ x_train[i : i + batch_size] for i in range(0, len(x_train), batch_size) ]
y_batches = [ y_train[i : i + batch_size] for i in range(0, len(y_train), batch_size) ]
for x_batch, y_batch in zip(x_batches,y_batches):
gradient_bias = [np.zeros(b.shape) for b in self.biases]
gradient_weights = [np.zeros(w.shape) for w in self.weights]
for x, y in zip(x_batch, y_batch):
delta_grad_b, delta_grad_w = self.backprop(x, y)
gradient_bias = [ nb + dnb for nb, dnb in zip(gradient_bias, delta_grad_b) ]
gradient_weights = [ nw + dnw for nw, dnw in zip(gradient_weights, delta_grad_w) ]
gradient_weights = [nw / batch_size for nw in gradient_weights]
gradient_bias = [nb / batch_size for nb in gradient_bias]
self.weights = [ w - self.l_r * nw for w, nw in zip(self.weights, gradient_weights) ]
self.biases = [ b - self.l_r * nb for b, nb in zip(self.biases, gradient_bias) ]
def supervised_learning(self,x_train,y_train,x_test,y_test,lenTest,it,EPOCH=100,batch_size=1000,dataset="classic",file="networks/",write=False):
print("[INIT] - classification rate =",self.evaluate(x_test,y_test))
for j in range(EPOCH+1):
# train
shuffler = np.random.permutation(x_train.shape[0])
x_train = x_train[shuffler]
y_train = y_train[shuffler]
self.train_sgd(x_train,y_train,batch_size=batch_size)
# test
goodPred = 0
preds = 0
for i in range(lenTest):
label_pred = ENCODER.encode_prediction(self.forward(x_test[i]))
if (label_pred == y_test[i]).all():
goodPred += 1
preds += 1
print(j," - classification rate =",self.evaluate(x_test,y_test))
if j%10 == 0:
if write:
self.save((file+dataset+"_"+str(it)+"_"+str(j)))
def evaluate(self, x_test, y_test):
test_results = [ (ENCODER.encode_prediction(self.forward(_x)), (_y)) for _x, _y in zip(x_test, y_test) ]
result = sum(int(_y_pred == _y) for (_y_pred, _y) in test_results)
result /= len(y_test)
return round(result, 3)
def save(self, fileName):
file = open(fileName, "w")
file.write(str(self.size)+"\n")
for i in range(self.size):
file.write(str(self.shape[i])+"\n")
file.write(str(self.l_r)+"\n")
for i in range(1, self.size):
for j in range(self.shape[i]):
for k in range(self.shape[i-1]):
file.write(str(self.weights[i-1][j][k])+"\n")
for i in range(self.size-1):
for j in range(self.shape[i+1]):
file.write(str(self.biases[i][j])+"\n")
file.close()
def readNeuralNetwork(fileName):
file = open(fileName, "r")
size = int(file.readline())
shape = []
for i in range(size):
shape.append(int(file.readline()))
l_r = float(file.readline())
weights = []
for i in range(1, size):
weights.append([])
for j in range(shape[i]):
weights[i-1].append([])
for k in range(shape[i-1]):
weights[i-1][j].append(float(file.readline()))
biases = []
for i in range(size-1):
biases.append([])
for j in range(shape[i+1]):
biases[i].append(float(file.readline()))
file.close()
return NeuralNetwork(size, shape, l_r, [np.array(obj) for obj in biases], [np.array(obj) for obj in weights])
def cost(a, y):
return (a - y) ** 2
def cost_derivative(a, y):
return 2*(a - y)
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_derivative(x):
return sigmoid(x) * (1 - sigmoid(x))
def computeGradientW(a,delta,repeat):
aBuffer = a.transpose()
aBuffer = np.tile(a,repeat).reshape(repeat,len(a)).transpose()
g_w = np.multiply(aBuffer,delta).transpose()
return g_w | en | 0.648432 | # last layer # from before last layer to first layer # last layer is self.size-2 # before last layer is self.size-3 # len(activation) == len(weights)+1 # activation[i] is the previous activations to the layer weights[i] #delta_w = np.dot(delta, self.activations[l].T) # train # test | 2.664225 | 3 |
lib/Android/ClipboardCheck.py | Ba-hub/R3verseBug | 4 | 6620694 | from ..Base import Base
from ..info import Info
from ..apk import register
from ..tools import *
TITLE = 'Clipboard sensitive information leakage detection'
LEVEL = 2
INFO = 'Detect whether the app has the risk of sensitive data leakage on the clipboard'
class ClipboardCheck(Base):
def scan(self):
strline = cmdString('grep -r "ClipboardManager;->setPrimaryClip\|ClipboardManager;->setText" ' + self.appPath)
paths = getSmalis(os.popen(strline).readlines())
results = []
for path in paths:
with open(path, 'r') as f:
lines = f.readlines()
lines.reverse()
count = len(lines)
name = getFileName(path)
for i in range(0, count):
line = lines[i]
if 'ClipboardManager;->setPrimaryClip' in line or 'ClipboardManager;->setText' in line:
result = name + ' : ' + str(count - i)
if result not in results:
results.append(result)
Info(key=self.__class__, title=TITLE, level=LEVEL, info=INFO, result='\n'.join(results)).description()
register(ClipboardCheck) | from ..Base import Base
from ..info import Info
from ..apk import register
from ..tools import *
TITLE = 'Clipboard sensitive information leakage detection'
LEVEL = 2
INFO = 'Detect whether the app has the risk of sensitive data leakage on the clipboard'
class ClipboardCheck(Base):
def scan(self):
strline = cmdString('grep -r "ClipboardManager;->setPrimaryClip\|ClipboardManager;->setText" ' + self.appPath)
paths = getSmalis(os.popen(strline).readlines())
results = []
for path in paths:
with open(path, 'r') as f:
lines = f.readlines()
lines.reverse()
count = len(lines)
name = getFileName(path)
for i in range(0, count):
line = lines[i]
if 'ClipboardManager;->setPrimaryClip' in line or 'ClipboardManager;->setText' in line:
result = name + ' : ' + str(count - i)
if result not in results:
results.append(result)
Info(key=self.__class__, title=TITLE, level=LEVEL, info=INFO, result='\n'.join(results)).description()
register(ClipboardCheck) | none | 1 | 2.405941 | 2 | |
website/migrations/0001_initial.py | p-koskey/awwards | 0 | 6620695 | # Generated by Django 3.1.2 on 2020-10-26 18:43
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, default='My Bio', max_length=500)),
('fullname', models.CharField(blank=True, max_length=120)),
('location', models.CharField(blank=True, max_length=60)),
('profile_picture', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('email', models.EmailField(blank=True, max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('projectname', models.CharField(max_length=155)),
('link', models.CharField(max_length=255)),
('projectinfo', models.CharField(max_length=255)),
('languages', models.CharField(max_length=200)),
('picture', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('posted', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
),
]
| # Generated by Django 3.1.2 on 2020-10-26 18:43
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, default='My Bio', max_length=500)),
('fullname', models.CharField(blank=True, max_length=120)),
('location', models.CharField(blank=True, max_length=60)),
('profile_picture', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('email', models.EmailField(blank=True, max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('projectname', models.CharField(max_length=155)),
('link', models.CharField(max_length=255)),
('projectinfo', models.CharField(max_length=255)),
('languages', models.CharField(max_length=200)),
('picture', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('posted', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
),
]
| en | 0.811268 | # Generated by Django 3.1.2 on 2020-10-26 18:43 | 1.907086 | 2 |
GenerateMaze.py | AkshayGuptaK/maze-solvers | 0 | 6620696 | <reponame>AkshayGuptaK/maze-solvers
"""Creates mazes in the form of adjacency matrixes"""
#Maze Generator
import numpy as np
import random
def add_path(node1, node2, maze, p): #adds a path between two nodes
maze[node1, node2] = 1
maze[node2, node1] = 1
return p + 1
def check_full(maze, node, nodes): #checks if more paths can be added for a given node
return np.count_nonzero(maze, 1)[node] == nodes-1
def fill_paths(random, maze, nodes, paths, maxpaths):
#adds additional paths up to specified maximum to the maze at random
empty = {node for node in range(nodes-1) if not check_full(maze, node, nodes)}
while paths < maxpaths:
i = random.choice(tuple(empty))
choices = np.where(maze[i,:]==0)[0].tolist()
choices.remove(i)
j = random.choice(choices)
paths = add_path(i, j, maze, paths)
if check_full(maze, i, nodes):
empty.discard(i)
if check_full(maze, j, nodes):
empty.discard(j)
def generate_simple_maze(random, nodes, paths):
#random maze which biases degree in favor of nodes closer to entry
maze = np.zeros((nodes, nodes), np.int8)
i = 0
traversed = {0}
all_nodes = {node for node in range(nodes-1)}
p = 0
while all_nodes - traversed:
j = random.choice(tuple(all_nodes-traversed))
p = add_path(i, j, maze, p)
traversed.add(j)
i = random.choice(tuple(traversed))
p = add_path(i, nodes-1, maze, p)
fill_paths(random, maze, nodes, p, paths)
return maze
def generate_tree_plus(random, degree_function, nodes, paths):
#builds a tree like maze with specified degree for each node, plus additional random paths if desired
tree = np.zeros((nodes, nodes), np.int8)
paths = paths if paths else nodes-1
i = 0
built = {0}
current_level = {0}
all_nodes = {node for node in range(nodes-1)}
p = 0
link = 0
while all_nodes - built:
new_level = set()
for i in current_level:
for degree in range(degree_function(random)):
if all_nodes - built:
j = random.choice(tuple(all_nodes - built))
p = add_path(i, j, tree, p)
built.add(j)
new_level.add(j)
else:
link = i
break
current_level = new_level
link = link if link else random.choice(tuple(new_level))
p = add_path(link, nodes-1, tree, p)
fill_paths(random, tree, nodes, p, paths)
return tree
def generate_helix_maze(random, nodes=6, forks=2, junctions=1, min_segment=0, max_segment=1):
# nodes > (junctions+1)((forks-1)*max_segment + min_segment)+junctions+2
helix = np.zeros((nodes, nodes), np.int8)
junction_rows = [random.randint(1, nodes-2) for x in range(junctions)]
#complete this
def generate_mazes(random, maze_function, num_mazes=1, *args):
mazes = []
for i in range(num_mazes):
mazes.append(maze_function(random, *args))
return np.stack(mazes, axis=-1)
def make_constant(constant):
return lambda random: constant
def make_randomizer(maxdegree):
return lambda random: random.randint(1, maxdegree) | """Creates mazes in the form of adjacency matrixes"""
#Maze Generator
import numpy as np
import random
def add_path(node1, node2, maze, p): #adds a path between two nodes
maze[node1, node2] = 1
maze[node2, node1] = 1
return p + 1
def check_full(maze, node, nodes): #checks if more paths can be added for a given node
return np.count_nonzero(maze, 1)[node] == nodes-1
def fill_paths(random, maze, nodes, paths, maxpaths):
#adds additional paths up to specified maximum to the maze at random
empty = {node for node in range(nodes-1) if not check_full(maze, node, nodes)}
while paths < maxpaths:
i = random.choice(tuple(empty))
choices = np.where(maze[i,:]==0)[0].tolist()
choices.remove(i)
j = random.choice(choices)
paths = add_path(i, j, maze, paths)
if check_full(maze, i, nodes):
empty.discard(i)
if check_full(maze, j, nodes):
empty.discard(j)
def generate_simple_maze(random, nodes, paths):
#random maze which biases degree in favor of nodes closer to entry
maze = np.zeros((nodes, nodes), np.int8)
i = 0
traversed = {0}
all_nodes = {node for node in range(nodes-1)}
p = 0
while all_nodes - traversed:
j = random.choice(tuple(all_nodes-traversed))
p = add_path(i, j, maze, p)
traversed.add(j)
i = random.choice(tuple(traversed))
p = add_path(i, nodes-1, maze, p)
fill_paths(random, maze, nodes, p, paths)
return maze
def generate_tree_plus(random, degree_function, nodes, paths):
#builds a tree like maze with specified degree for each node, plus additional random paths if desired
tree = np.zeros((nodes, nodes), np.int8)
paths = paths if paths else nodes-1
i = 0
built = {0}
current_level = {0}
all_nodes = {node for node in range(nodes-1)}
p = 0
link = 0
while all_nodes - built:
new_level = set()
for i in current_level:
for degree in range(degree_function(random)):
if all_nodes - built:
j = random.choice(tuple(all_nodes - built))
p = add_path(i, j, tree, p)
built.add(j)
new_level.add(j)
else:
link = i
break
current_level = new_level
link = link if link else random.choice(tuple(new_level))
p = add_path(link, nodes-1, tree, p)
fill_paths(random, tree, nodes, p, paths)
return tree
def generate_helix_maze(random, nodes=6, forks=2, junctions=1, min_segment=0, max_segment=1):
# nodes > (junctions+1)((forks-1)*max_segment + min_segment)+junctions+2
helix = np.zeros((nodes, nodes), np.int8)
junction_rows = [random.randint(1, nodes-2) for x in range(junctions)]
#complete this
def generate_mazes(random, maze_function, num_mazes=1, *args):
mazes = []
for i in range(num_mazes):
mazes.append(maze_function(random, *args))
return np.stack(mazes, axis=-1)
def make_constant(constant):
return lambda random: constant
def make_randomizer(maxdegree):
return lambda random: random.randint(1, maxdegree) | en | 0.794412 | Creates mazes in the form of adjacency matrixes #Maze Generator #adds a path between two nodes #checks if more paths can be added for a given node #adds additional paths up to specified maximum to the maze at random #random maze which biases degree in favor of nodes closer to entry #builds a tree like maze with specified degree for each node, plus additional random paths if desired # nodes > (junctions+1)((forks-1)*max_segment + min_segment)+junctions+2 #complete this | 3.995481 | 4 |
mvtec.py | taikiinoue45/docker-mvtec | 0 | 6620697 | <filename>mvtec.py
import os
from pathlib import Path
from random import Random
from typing import Dict, List
import cv2
import numpy as np
import pandas as pd
from pandas import DataFrame
from tqdm import tqdm
from typing_extensions import Literal
def create_info_csv() -> DataFrame:
train_df = create_mode_df(mode="train")
test_df = create_mode_df(mode="test")
for category in train_df["category"].unique():
category_df = train_df.loc[train_df["category"] == category]
category_index = category_df.index.tolist()
Random(5).shuffle(category_index)
for i, val_index in enumerate(np.array_split(category_index, 5)):
train_df.loc[val_index, f"cv{i}"] = "val"
df = pd.concat([train_df, test_df])
df = df.reset_index()
df.to_csv("/data/info.csv", index=False)
return df
def create_mode_df(mode: Literal["train", "test"]) -> DataFrame:
di: Dict[str, List[str]] = {
"old_img_path": [],
"old_stem": [],
"defect": [],
"mode": [],
"category": [],
}
for p in Path("/data/MVTec").glob(f"*/{mode}/*/*.png"):
di["old_img_path"].append(str(p))
di["old_stem"].append(p.stem)
di["defect"].append(p.parents[0].name)
di["mode"].append(p.parents[1].name)
di["category"].append(p.parents[2].name)
df = pd.DataFrame(di)
df["cv0"] = mode
df["cv1"] = mode
df["cv2"] = mode
df["cv3"] = mode
df["cv4"] = mode
df["stem"] = ""
df["old_mask_path"] = ""
for i in df.index:
old_stem, defect, mode, category = df.loc[i, ["old_stem", "defect", "mode", "category"]]
stem = f"{category}_{mode}_{defect}_{old_stem}"
old_mask_path = f"/data/MVTec/{category}/ground_truth/{defect}/{old_stem}_mask.png"
df.loc[i, "stem"] = stem
df.loc[i, "old_mask_path"] = old_mask_path
return df
def move_images_and_masks(df: pd.DataFrame) -> None:
os.mkdir("/data/images")
os.mkdir("/data/masks")
for i in tqdm(df.index):
old_img_path, old_mask_path, stem = df.loc[i, ["old_img_path", "old_mask_path", "stem"]]
if os.path.exists(old_mask_path):
os.rename(old_mask_path, f"/data/masks/{stem}.png")
else:
img = cv2.imread(old_img_path)
mask = np.zeros(img.shape)
cv2.imwrite(f"/data/masks/{stem}.png", mask)
os.rename(old_img_path, f"/data/images/{stem}.png")
if __name__ == "__main__":
df = create_info_csv()
move_images_and_masks(df)
| <filename>mvtec.py
import os
from pathlib import Path
from random import Random
from typing import Dict, List
import cv2
import numpy as np
import pandas as pd
from pandas import DataFrame
from tqdm import tqdm
from typing_extensions import Literal
def create_info_csv() -> DataFrame:
train_df = create_mode_df(mode="train")
test_df = create_mode_df(mode="test")
for category in train_df["category"].unique():
category_df = train_df.loc[train_df["category"] == category]
category_index = category_df.index.tolist()
Random(5).shuffle(category_index)
for i, val_index in enumerate(np.array_split(category_index, 5)):
train_df.loc[val_index, f"cv{i}"] = "val"
df = pd.concat([train_df, test_df])
df = df.reset_index()
df.to_csv("/data/info.csv", index=False)
return df
def create_mode_df(mode: Literal["train", "test"]) -> DataFrame:
di: Dict[str, List[str]] = {
"old_img_path": [],
"old_stem": [],
"defect": [],
"mode": [],
"category": [],
}
for p in Path("/data/MVTec").glob(f"*/{mode}/*/*.png"):
di["old_img_path"].append(str(p))
di["old_stem"].append(p.stem)
di["defect"].append(p.parents[0].name)
di["mode"].append(p.parents[1].name)
di["category"].append(p.parents[2].name)
df = pd.DataFrame(di)
df["cv0"] = mode
df["cv1"] = mode
df["cv2"] = mode
df["cv3"] = mode
df["cv4"] = mode
df["stem"] = ""
df["old_mask_path"] = ""
for i in df.index:
old_stem, defect, mode, category = df.loc[i, ["old_stem", "defect", "mode", "category"]]
stem = f"{category}_{mode}_{defect}_{old_stem}"
old_mask_path = f"/data/MVTec/{category}/ground_truth/{defect}/{old_stem}_mask.png"
df.loc[i, "stem"] = stem
df.loc[i, "old_mask_path"] = old_mask_path
return df
def move_images_and_masks(df: pd.DataFrame) -> None:
os.mkdir("/data/images")
os.mkdir("/data/masks")
for i in tqdm(df.index):
old_img_path, old_mask_path, stem = df.loc[i, ["old_img_path", "old_mask_path", "stem"]]
if os.path.exists(old_mask_path):
os.rename(old_mask_path, f"/data/masks/{stem}.png")
else:
img = cv2.imread(old_img_path)
mask = np.zeros(img.shape)
cv2.imwrite(f"/data/masks/{stem}.png", mask)
os.rename(old_img_path, f"/data/images/{stem}.png")
if __name__ == "__main__":
df = create_info_csv()
move_images_and_masks(df)
| none | 1 | 2.461667 | 2 | |
scripts/deploy_ropsten.py | SBfin/StrategyImplementation | 2 | 6620698 | from brownie import (
accounts,
project,
OrbitVault,
DynamicRangesStrategy,
TestRouter,
Contract,
ZERO_ADDRESS,
interface
)
from brownie.network.gas.strategies import ExponentialScalingStrategy
from math import floor, sqrt
import time
from math import floor, sqrt
from brownie.network.gas.strategies import GasNowScalingStrategy, ExponentialScalingStrategy
from brownie.network import gas_price, gas_limit
from brownie.network import priority_fee
import os
KEEPER = "0x1bcfFbd9151Da963F874a77B60397d4cD8215a0D"
# Uniswap v3 factory on Ropsten
FACTORY="0x1F98431c8aD98523631AE4a59f267346ea31F984"
PROTOCOL_FEE = 10000
MAX_TOTAL_SUPPLY = 1e32
BASE_THRESHOLD = 3600 #1.43
LIMIT_THRESHOLD = 1200 #1.12
PERIOD = 43200 # 12 hours
MIN_TICK_MOVE = 0
MAX_TWAP_DEVIATION = 100 # 1%
TWAP_DURATION = 60 # 60 seconds
# Set this to make the first deposit, in this example we deposit 1 token0 = 4K token1
DEPOSIT_TOKEN_1 = 0.01e18
DEPOSIT_TOKEN_2 = 40e6
def main():
deployer = accounts.load("deployer", "none")
print("deployer balance: ", deployer.balance())
UniswapV3Core = project.load("Uniswap/v3-core@1.0.0")
priority_fee("auto")
weth = interface.IERC20("0xc778417e063141139fce010982780140aa0cd5ab")
usdc = interface.IERC20("0x07865c6E87B9F70255377e024ace6630C1Eaa37F")
factory = UniswapV3Core.interface.IUniswapV3Factory(FACTORY)
pool = UniswapV3Core.interface.IUniswapV3Pool(factory.getPool(weth, usdc, 500))
print("pool address: ", pool)
vault = OrbitVault.deploy(
pool,
PROTOCOL_FEE,
MAX_TOTAL_SUPPLY,
weth,
{"from": deployer},
publish_source = True
)
strategy = deployer.deploy(
DynamicRangesStrategy,
vault,
BASE_THRESHOLD,
LIMIT_THRESHOLD,
MAX_TWAP_DEVIATION,
TWAP_DURATION,
deployer,
publish_source = True
)
vault.setStrategy(strategy, {"from": deployer})
strategy.setKeeper(KEEPER, {"from": deployer})
print("Doing the first deposit to set the price ratio..")
weth.approve(vault, 1<<255, {"from": deployer})
usdc.approve(vault, 1<<255, {"from": deployer})
tx = vault.deposit(DEPOSIT_TOKEN_1, DEPOSIT_TOKEN_2, 0, 0, deployer, {"from": deployer})
print(f"Vault address: {vault.address}")
print(f"Strategy address: {strategy.address}")
print(f"Deposited transaction: {tx}")
| from brownie import (
accounts,
project,
OrbitVault,
DynamicRangesStrategy,
TestRouter,
Contract,
ZERO_ADDRESS,
interface
)
from brownie.network.gas.strategies import ExponentialScalingStrategy
from math import floor, sqrt
import time
from math import floor, sqrt
from brownie.network.gas.strategies import GasNowScalingStrategy, ExponentialScalingStrategy
from brownie.network import gas_price, gas_limit
from brownie.network import priority_fee
import os
KEEPER = "0x1bcfFbd9151Da963F874a77B60397d4cD8215a0D"
# Uniswap v3 factory on Ropsten
FACTORY="0x1F98431c8aD98523631AE4a59f267346ea31F984"
PROTOCOL_FEE = 10000
MAX_TOTAL_SUPPLY = 1e32
BASE_THRESHOLD = 3600 #1.43
LIMIT_THRESHOLD = 1200 #1.12
PERIOD = 43200 # 12 hours
MIN_TICK_MOVE = 0
MAX_TWAP_DEVIATION = 100 # 1%
TWAP_DURATION = 60 # 60 seconds
# Set this to make the first deposit, in this example we deposit 1 token0 = 4K token1
DEPOSIT_TOKEN_1 = 0.01e18
DEPOSIT_TOKEN_2 = 40e6
def main():
deployer = accounts.load("deployer", "none")
print("deployer balance: ", deployer.balance())
UniswapV3Core = project.load("Uniswap/v3-core@1.0.0")
priority_fee("auto")
weth = interface.IERC20("0xc778417e063141139fce010982780140aa0cd5ab")
usdc = interface.IERC20("0x07865c6E87B9F70255377e024ace6630C1Eaa37F")
factory = UniswapV3Core.interface.IUniswapV3Factory(FACTORY)
pool = UniswapV3Core.interface.IUniswapV3Pool(factory.getPool(weth, usdc, 500))
print("pool address: ", pool)
vault = OrbitVault.deploy(
pool,
PROTOCOL_FEE,
MAX_TOTAL_SUPPLY,
weth,
{"from": deployer},
publish_source = True
)
strategy = deployer.deploy(
DynamicRangesStrategy,
vault,
BASE_THRESHOLD,
LIMIT_THRESHOLD,
MAX_TWAP_DEVIATION,
TWAP_DURATION,
deployer,
publish_source = True
)
vault.setStrategy(strategy, {"from": deployer})
strategy.setKeeper(KEEPER, {"from": deployer})
print("Doing the first deposit to set the price ratio..")
weth.approve(vault, 1<<255, {"from": deployer})
usdc.approve(vault, 1<<255, {"from": deployer})
tx = vault.deposit(DEPOSIT_TOKEN_1, DEPOSIT_TOKEN_2, 0, 0, deployer, {"from": deployer})
print(f"Vault address: {vault.address}")
print(f"Strategy address: {strategy.address}")
print(f"Deposited transaction: {tx}")
| en | 0.671601 | # Uniswap v3 factory on Ropsten #1.43 #1.12 # 12 hours # 1% # 60 seconds # Set this to make the first deposit, in this example we deposit 1 token0 = 4K token1 | 1.896281 | 2 |
packages/pegasus-api/test/api/test_properties.py | hariharan-devarajan/pegasus | 0 | 6620699 | <reponame>hariharan-devarajan/pegasus<filename>packages/pegasus-api/test/api/test_properties.py
import os
from configparser import DEFAULTSECT
from tempfile import TemporaryFile
import pytest
from Pegasus.api.properties import Properties
@pytest.fixture(scope="function")
def props():
return Properties()
class TestProperties:
def test_ls(self, capsys, props):
try:
Properties.ls("pegasus.pmc")
captured = capsys.readouterr().out
assert (
captured
== "pegasus.pmc_priority\npegasus.pmc_request_cpus\npegasus.pmc_request_memory\npegasus.pmc_task_arguments\n"
)
Properties.ls()
Properties.ls("nothing")
props.ls()
except:
pytest.raises("should not have failed")
def test_get_item(self, props, mocker):
props["a"] = "b"
assert props["a"] == "b"
def test_del_item(self, props):
props["a"] = "b"
del props["a"]
assert "a" not in props._conf[DEFAULTSECT]
def test_write_str_filename(self, props):
filename = "props"
props["a"] = "b"
props["c"] = "d"
props.write(filename)
with open(filename) as f:
assert f.read() == "a = b\nc = d\n\n"
os.remove(filename)
def test_write_str_filename_ensure_key_case_preserved(self, props):
filename = "props"
props["a"] = "b"
props["C"] = "d"
props.write(filename)
with open(filename) as f:
assert f.read() == "a = b\nC = d\n\n"
os.remove(filename)
def test_write_file(self, props):
with TemporaryFile(mode="w+") as f:
props["a"] = "b"
props.write(f)
f.seek(0)
assert f.read() == "a = b\n\n"
def test_write_invalid_file(self, props):
with pytest.raises(TypeError) as e:
props.write(123)
assert "invalid file: 123" in str(e)
def test_write_default_file(self, props):
props["a"] = "b"
props.write()
EXPECTED_DEFAULT_FILE = "pegasus.properties"
with open(EXPECTED_DEFAULT_FILE) as f:
assert f.read() == "a = b\n\n"
os.remove(EXPECTED_DEFAULT_FILE)
| import os
from configparser import DEFAULTSECT
from tempfile import TemporaryFile
import pytest
from Pegasus.api.properties import Properties
@pytest.fixture(scope="function")
def props():
return Properties()
class TestProperties:
def test_ls(self, capsys, props):
try:
Properties.ls("pegasus.pmc")
captured = capsys.readouterr().out
assert (
captured
== "pegasus.pmc_priority\npegasus.pmc_request_cpus\npegasus.pmc_request_memory\npegasus.pmc_task_arguments\n"
)
Properties.ls()
Properties.ls("nothing")
props.ls()
except:
pytest.raises("should not have failed")
def test_get_item(self, props, mocker):
props["a"] = "b"
assert props["a"] == "b"
def test_del_item(self, props):
props["a"] = "b"
del props["a"]
assert "a" not in props._conf[DEFAULTSECT]
def test_write_str_filename(self, props):
filename = "props"
props["a"] = "b"
props["c"] = "d"
props.write(filename)
with open(filename) as f:
assert f.read() == "a = b\nc = d\n\n"
os.remove(filename)
def test_write_str_filename_ensure_key_case_preserved(self, props):
filename = "props"
props["a"] = "b"
props["C"] = "d"
props.write(filename)
with open(filename) as f:
assert f.read() == "a = b\nC = d\n\n"
os.remove(filename)
def test_write_file(self, props):
with TemporaryFile(mode="w+") as f:
props["a"] = "b"
props.write(f)
f.seek(0)
assert f.read() == "a = b\n\n"
def test_write_invalid_file(self, props):
with pytest.raises(TypeError) as e:
props.write(123)
assert "invalid file: 123" in str(e)
def test_write_default_file(self, props):
props["a"] = "b"
props.write()
EXPECTED_DEFAULT_FILE = "pegasus.properties"
with open(EXPECTED_DEFAULT_FILE) as f:
assert f.read() == "a = b\n\n"
os.remove(EXPECTED_DEFAULT_FILE) | none | 1 | 2.085178 | 2 | |
words.py | salberico/fastfingers | 1 | 6620700 | english_words = [
"America",
"Indian",
"about",
"above",
"add",
"after",
"again",
"air",
"all",
"almost",
"along",
"also",
"always",
"an",
"and",
"animal",
"another",
"answer",
"any",
"are",
"around",
"as",
"ask",
"at",
"away",
"back",
"be",
"because",
"been",
"before",
"began",
"begin",
"being",
"below",
"between",
"big",
"book",
"both",
"boy",
"but",
"by",
"call",
"came",
"can",
"car",
"carry",
"change",
"children",
"city",
"close",
"come",
"could",
"country",
"cut",
"day",
"did",
"different",
"do",
"does",
"don't",
"down",
"each",
"earth",
"eat",
"end",
"enough",
"even",
"every",
"example",
"eye",
"face",
"family",
"far",
"father",
"feet",
"few",
"find",
"first",
"follow",
"food",
"for",
"form",
"found",
"four",
"from",
"get",
"girl",
"give",
"go",
"good",
"got",
"great",
"group",
"grow",
"had",
"hand",
"hard",
"has",
"have",
"he",
"head",
"hear",
"help",
"her",
"here",
"high",
"him",
"his",
"home",
"house",
"how",
"idea",
"if",
"important",
"in",
"into",
"is",
"it",
"it's",
"its",
"just",
"keep",
"kind",
"know",
"land",
"large",
"last",
"later",
"learn",
"leave",
"left",
"let",
"letter",
"life",
"light",
"like",
"line",
"list",
"little",
"live",
"long",
"look",
"made",
"make",
"man",
"many",
"may",
"me",
"mean",
"men",
"might",
"mile",
"miss",
"more",
"most",
"mother",
"mountain",
"move",
"much",
"must",
"my",
"name",
"near",
"need",
"never",
"new",
"next",
"night",
"no",
"not",
"now",
"number",
"of",
"off",
"often",
"oil",
"old",
"on",
"once",
"one",
"only",
"open",
"or",
"other",
"our",
"out",
"over",
"own",
"page",
"paper",
"part",
"people",
"picture",
"place",
"plant",
"play",
"point",
"put",
"question",
"quick",
"quickly",
"quite",
"read",
"really",
"right",
"river",
"run",
"said",
"same",
"saw",
"say",
"school",
"sea",
"second",
"see",
"seem",
"sentence",
"set",
"she",
"should",
"show",
"side",
"small",
"so",
"some",
"something",
"sometimes",
"song",
"soon",
"sound",
"spell",
"start",
"state",
"still",
"stop",
"story",
"study",
"such",
"take",
"talk",
"tell",
"than",
"that",
"the",
"them",
"then",
"there",
"these",
"they",
"thing",
"think",
"this",
"those",
"thought",
"three",
"through",
"time",
"to",
"together",
"too",
"took",
"tree",
"try",
"turn",
"two",
"under",
"until",
"up",
"us",
"use",
"very",
"walk",
"want",
"was",
"watch",
"water",
"way",
"we",
"well",
"went",
"were",
"what",
"when",
"where",
"which",
"while",
"white",
"who",
"why",
"will",
"with",
"without",
"word",
"work",
"world",
"would",
"write",
"year",
"you",
"young",
"your",
] | english_words = [
"America",
"Indian",
"about",
"above",
"add",
"after",
"again",
"air",
"all",
"almost",
"along",
"also",
"always",
"an",
"and",
"animal",
"another",
"answer",
"any",
"are",
"around",
"as",
"ask",
"at",
"away",
"back",
"be",
"because",
"been",
"before",
"began",
"begin",
"being",
"below",
"between",
"big",
"book",
"both",
"boy",
"but",
"by",
"call",
"came",
"can",
"car",
"carry",
"change",
"children",
"city",
"close",
"come",
"could",
"country",
"cut",
"day",
"did",
"different",
"do",
"does",
"don't",
"down",
"each",
"earth",
"eat",
"end",
"enough",
"even",
"every",
"example",
"eye",
"face",
"family",
"far",
"father",
"feet",
"few",
"find",
"first",
"follow",
"food",
"for",
"form",
"found",
"four",
"from",
"get",
"girl",
"give",
"go",
"good",
"got",
"great",
"group",
"grow",
"had",
"hand",
"hard",
"has",
"have",
"he",
"head",
"hear",
"help",
"her",
"here",
"high",
"him",
"his",
"home",
"house",
"how",
"idea",
"if",
"important",
"in",
"into",
"is",
"it",
"it's",
"its",
"just",
"keep",
"kind",
"know",
"land",
"large",
"last",
"later",
"learn",
"leave",
"left",
"let",
"letter",
"life",
"light",
"like",
"line",
"list",
"little",
"live",
"long",
"look",
"made",
"make",
"man",
"many",
"may",
"me",
"mean",
"men",
"might",
"mile",
"miss",
"more",
"most",
"mother",
"mountain",
"move",
"much",
"must",
"my",
"name",
"near",
"need",
"never",
"new",
"next",
"night",
"no",
"not",
"now",
"number",
"of",
"off",
"often",
"oil",
"old",
"on",
"once",
"one",
"only",
"open",
"or",
"other",
"our",
"out",
"over",
"own",
"page",
"paper",
"part",
"people",
"picture",
"place",
"plant",
"play",
"point",
"put",
"question",
"quick",
"quickly",
"quite",
"read",
"really",
"right",
"river",
"run",
"said",
"same",
"saw",
"say",
"school",
"sea",
"second",
"see",
"seem",
"sentence",
"set",
"she",
"should",
"show",
"side",
"small",
"so",
"some",
"something",
"sometimes",
"song",
"soon",
"sound",
"spell",
"start",
"state",
"still",
"stop",
"story",
"study",
"such",
"take",
"talk",
"tell",
"than",
"that",
"the",
"them",
"then",
"there",
"these",
"they",
"thing",
"think",
"this",
"those",
"thought",
"three",
"through",
"time",
"to",
"together",
"too",
"took",
"tree",
"try",
"turn",
"two",
"under",
"until",
"up",
"us",
"use",
"very",
"walk",
"want",
"was",
"watch",
"water",
"way",
"we",
"well",
"went",
"were",
"what",
"when",
"where",
"which",
"while",
"white",
"who",
"why",
"will",
"with",
"without",
"word",
"work",
"world",
"would",
"write",
"year",
"you",
"young",
"your",
] | none | 1 | 2.717788 | 3 | |
tests/featureDetectionTest.py | yusufsarikaya/Image_Comparison | 19 | 6620701 | <reponame>yusufsarikaya/Image_Comparison<filename>tests/featureDetectionTest.py
###############################
#
# (c) <NAME> 2017
# Student No: C14714071
# Course: DT228
# Date: 04-10-2017
#
# Title: Testing Feature Detection Algorithms
import numpy as np
import cv2
import easygui
# help(cv2.drawKeypoints)
# help(cv2.drawMatches)
imagesPath = 'images/'
outputPath = 'output/'
fileExtension = '.jpg'
I1 = cv2.imread(imagesPath + 'pcb1.jpg')
I2 = cv2.imread(imagesPath + 'pcb2.jpg')
G1 = cv2.cvtColor(I1, cv2.COLOR_BGR2GRAY)
G2 = cv2.cvtColor(I2, cv2.COLOR_BGR2GRAY)
def displayFAST(window, image, nms=1):
# FAST - Features from Accelerated Segment Test
# Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_fast/py_fast.html
fast = cv2.FastFeatureDetector_create()
# Set maximum suppression - nms ignores unimportant pixels in corners that are not the the local maxima
# Reference - http://users.ecs.soton.ac.uk/msn/book/new_demo/nonmax/
fast.setNonmaxSuppression(nms)
# find corners
keyPoints = fast.detect(image, None)
corners = image.copy()
corners = cv2.drawKeypoints(image = image, keypoints = keyPoints, outImage = corners, color = (0, 0, 255))
cv2.imwrite(outputPath + window + fileExtension, corners)
cv2.imshow(window, corners)
def displayORB(window, image):
# ORB - Oriented FAST and Rotated BRIEF
# Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_orb/py_orb.html#orb
orb = cv2.ORB_create()
keyPoints = orb.detect(image, None)
keyPoints, descriptor = orb.compute(image, keyPoints)
newImage = image.copy()
newImage = cv2.drawKeypoints(image = image, keypoints = keyPoints, outImage = newImage, color = (0, 0, 255), flags = 0)
cv2.imwrite(outputPath + window + fileExtension, newImage)
cv2.imshow(window, newImage)
def displayAKAZE(window, image):
# AKAZE
# Reference - http://docs.opencv.org/3.0-beta/doc/tutorials/features2d/akaze_matching/akaze_matching.html
akaze = cv2.AKAZE_create()
keyPoints = akaze.detect(image, None)
newImage = image.copy()
newImage = cv2.drawKeypoints(image = image, keypoints = keyPoints, outImage = newImage, color = (0, 0, 255), flags = 0)
cv2.imwrite(outputPath + window + fileExtension, newImage)
cv2.imshow(window, newImage)
def showDifs(window, image1, keyPoints1, desc1, image2, keyPoints2, desc2):
# Feature Matching
# Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
# BFMatcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
matches = bf.match(desc1, desc2)
matches = sorted(matches, key = lambda x:x.distance)
matchesImage = image1.copy()
matchesImage = cv2.drawMatches(img1 = image1, keypoints1 = keyPoints1, img2 = image2, keypoints2 = keyPoints2, matches1to2 = matches[:250], outImg = matchesImage, flags=2)
cv2.imwrite(outputPath + window + fileExtension, matchesImage)
cv2.imshow(window, matchesImage)
# displayFAST('fast1', I1)
# displayFAST('fast2', I2)
#
# displayORB('orb1', I1)
# displayORB('orb2', I2)
#
# displayAKAZE('akaze1', I1)
# displayAKAZE('akaze2', I2)
# Get edges from images with Canny Edge Detection - TODO: test with multiple feature detection algorithms
# Reference http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html
# edges1 = cv2.Canny(I1, 150, 200)
# edges2 = cv2.Canny(I2, 150, 200)
# cv2.imshow('canny1', edges1)
# cv2.imshow('canny2', edges2)
# Noise Reduction using 5x5 Gaussian filter ?
# ORB
# orb = cv2.ORB_create()
# keyPoints1, desc1 = orb.detectAndCompute(I1, None)
# keyPoints2, desc2 = orb.detectAndCompute(I2, None)
# AKAZE
akaze = cv2.AKAZE_create()
keyPoints1, desc1 = akaze.detectAndCompute(I1, None)
keyPoints2, desc2 = akaze.detectAndCompute(I2, None)
showDifs('akazeDif', I1, keyPoints1, desc1, I2, keyPoints2, desc2)
key = cv2.waitKey(0)
| ###############################
#
# (c) <NAME> 2017
# Student No: C14714071
# Course: DT228
# Date: 04-10-2017
#
# Title: Testing Feature Detection Algorithms
import numpy as np
import cv2
import easygui
# help(cv2.drawKeypoints)
# help(cv2.drawMatches)
imagesPath = 'images/'
outputPath = 'output/'
fileExtension = '.jpg'
I1 = cv2.imread(imagesPath + 'pcb1.jpg')
I2 = cv2.imread(imagesPath + 'pcb2.jpg')
G1 = cv2.cvtColor(I1, cv2.COLOR_BGR2GRAY)
G2 = cv2.cvtColor(I2, cv2.COLOR_BGR2GRAY)
def displayFAST(window, image, nms=1):
# FAST - Features from Accelerated Segment Test
# Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_fast/py_fast.html
fast = cv2.FastFeatureDetector_create()
# Set maximum suppression - nms ignores unimportant pixels in corners that are not the the local maxima
# Reference - http://users.ecs.soton.ac.uk/msn/book/new_demo/nonmax/
fast.setNonmaxSuppression(nms)
# find corners
keyPoints = fast.detect(image, None)
corners = image.copy()
corners = cv2.drawKeypoints(image = image, keypoints = keyPoints, outImage = corners, color = (0, 0, 255))
cv2.imwrite(outputPath + window + fileExtension, corners)
cv2.imshow(window, corners)
def displayORB(window, image):
# ORB - Oriented FAST and Rotated BRIEF
# Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_orb/py_orb.html#orb
orb = cv2.ORB_create()
keyPoints = orb.detect(image, None)
keyPoints, descriptor = orb.compute(image, keyPoints)
newImage = image.copy()
newImage = cv2.drawKeypoints(image = image, keypoints = keyPoints, outImage = newImage, color = (0, 0, 255), flags = 0)
cv2.imwrite(outputPath + window + fileExtension, newImage)
cv2.imshow(window, newImage)
def displayAKAZE(window, image):
# AKAZE
# Reference - http://docs.opencv.org/3.0-beta/doc/tutorials/features2d/akaze_matching/akaze_matching.html
akaze = cv2.AKAZE_create()
keyPoints = akaze.detect(image, None)
newImage = image.copy()
newImage = cv2.drawKeypoints(image = image, keypoints = keyPoints, outImage = newImage, color = (0, 0, 255), flags = 0)
cv2.imwrite(outputPath + window + fileExtension, newImage)
cv2.imshow(window, newImage)
def showDifs(window, image1, keyPoints1, desc1, image2, keyPoints2, desc2):
# Feature Matching
# Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
# BFMatcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
matches = bf.match(desc1, desc2)
matches = sorted(matches, key = lambda x:x.distance)
matchesImage = image1.copy()
matchesImage = cv2.drawMatches(img1 = image1, keypoints1 = keyPoints1, img2 = image2, keypoints2 = keyPoints2, matches1to2 = matches[:250], outImg = matchesImage, flags=2)
cv2.imwrite(outputPath + window + fileExtension, matchesImage)
cv2.imshow(window, matchesImage)
# displayFAST('fast1', I1)
# displayFAST('fast2', I2)
#
# displayORB('orb1', I1)
# displayORB('orb2', I2)
#
# displayAKAZE('akaze1', I1)
# displayAKAZE('akaze2', I2)
# Get edges from images with Canny Edge Detection - TODO: test with multiple feature detection algorithms
# Reference http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html
# edges1 = cv2.Canny(I1, 150, 200)
# edges2 = cv2.Canny(I2, 150, 200)
# cv2.imshow('canny1', edges1)
# cv2.imshow('canny2', edges2)
# Noise Reduction using 5x5 Gaussian filter ?
# ORB
# orb = cv2.ORB_create()
# keyPoints1, desc1 = orb.detectAndCompute(I1, None)
# keyPoints2, desc2 = orb.detectAndCompute(I2, None)
# AKAZE
akaze = cv2.AKAZE_create()
keyPoints1, desc1 = akaze.detectAndCompute(I1, None)
keyPoints2, desc2 = akaze.detectAndCompute(I2, None)
showDifs('akazeDif', I1, keyPoints1, desc1, I2, keyPoints2, desc2)
key = cv2.waitKey(0) | en | 0.43787 | ############################### # # (c) <NAME> 2017 # Student No: C14714071 # Course: DT228 # Date: 04-10-2017 # # Title: Testing Feature Detection Algorithms # help(cv2.drawKeypoints) # help(cv2.drawMatches) # FAST - Features from Accelerated Segment Test # Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_fast/py_fast.html # Set maximum suppression - nms ignores unimportant pixels in corners that are not the the local maxima # Reference - http://users.ecs.soton.ac.uk/msn/book/new_demo/nonmax/ # find corners # ORB - Oriented FAST and Rotated BRIEF # Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_orb/py_orb.html#orb # AKAZE # Reference - http://docs.opencv.org/3.0-beta/doc/tutorials/features2d/akaze_matching/akaze_matching.html # Feature Matching # Reference - http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html # BFMatcher # displayFAST('fast1', I1) # displayFAST('fast2', I2) # # displayORB('orb1', I1) # displayORB('orb2', I2) # # displayAKAZE('akaze1', I1) # displayAKAZE('akaze2', I2) # Get edges from images with Canny Edge Detection - TODO: test with multiple feature detection algorithms # Reference http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html # edges1 = cv2.Canny(I1, 150, 200) # edges2 = cv2.Canny(I2, 150, 200) # cv2.imshow('canny1', edges1) # cv2.imshow('canny2', edges2) # Noise Reduction using 5x5 Gaussian filter ? # ORB # orb = cv2.ORB_create() # keyPoints1, desc1 = orb.detectAndCompute(I1, None) # keyPoints2, desc2 = orb.detectAndCompute(I2, None) # AKAZE | 2.997087 | 3 |
doge_server/main.py | lugiavn/doge-detector | 0 | 6620702 | # Run web server:
# FLASK_APP=main.py python3 -m flask run --host=0.0.0.0 --port=80
import flask
import numpy as np
import cv2
import sys
import torch
import torchvision
DEVICE = 'cpu'
####################################
# Load model
###################################
print('Loading doge model...')
model = torchvision.models.detection.retinanet_resnet50_fpn(
pretrained=False, pretrained_backbone=False)
model = model.to(DEVICE)
model.load_state_dict(torch.load('model.pth', map_location=DEVICE))
model.eval()
print('Done loading doge model')
def run_model(img):
img = img.transpose([2, 0, 1])
img = torch.from_numpy(img).float()
img = img.to(DEVICE)
detections = model([img])[0]
print(detections)
return detections
####################################
# Flask
###################################
app = flask.Flask(__name__)
@app.route("/")
def index():
print('ah something is coming')
return flask.render_template(
'base.html',
image_url='static/intro.jpeg',
message='Try upload new image!',
detections=[{
'x1': 100,
'y1': 10,
'x2': 200,
'y2': 100,
'score': 0.9
}, {
'x1': 190,
'y1': 100,
'x2': 300,
'y2': 250,
'score': 0.1
}]
)
@app.route("/process_image", methods=['GET', 'POST'])
def process_image():
result = {'message': ''}
# try to parse as image and run model.
detections = None
if 'fileToUpload' not in flask.request.files:
return flask.redirect("/", code=302)
try:
x = flask.request.files['fileToUpload']
imgbuffer = x.stream.read()
img = np.frombuffer(imgbuffer, dtype=np.uint8)
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
detections = run_model(img)
except Exception as e:
result['message'] = 'error: ' + str(e)
# parse result.
if detections is not None:
result['image_url'] = 'tmp/' + x.filename
with open(result['image_url'], 'wb') as f:
f.write(imgbuffer)
result['message'] = 'No doge very sad'
result['detections'] = []
for box, score, label in zip(detections['boxes'],
detections['scores'],
detections['labels']):
if score > 0.2 and int(label) == 69:
x1, y1, x2, y2 = box
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
result['detections'].append({
'x1': x1,
'x2': x2,
'y1': y1,
'y2': y2,
'score': float(score)
})
if score > 0.8:
result['message'] = 'very doge, wow amazing'
return flask.render_template(
'base.html',
message=result['message'],
image_url=result.get('image_url', ''),
detections=result.get('detections', []),
)
@app.route('/tmp/<path:filename>')
def custom_static(filename):
return flask.send_from_directory(
'tmp', filename)
| # Run web server:
# FLASK_APP=main.py python3 -m flask run --host=0.0.0.0 --port=80
import flask
import numpy as np
import cv2
import sys
import torch
import torchvision
DEVICE = 'cpu'
####################################
# Load model
###################################
print('Loading doge model...')
model = torchvision.models.detection.retinanet_resnet50_fpn(
pretrained=False, pretrained_backbone=False)
model = model.to(DEVICE)
model.load_state_dict(torch.load('model.pth', map_location=DEVICE))
model.eval()
print('Done loading doge model')
def run_model(img):
img = img.transpose([2, 0, 1])
img = torch.from_numpy(img).float()
img = img.to(DEVICE)
detections = model([img])[0]
print(detections)
return detections
####################################
# Flask
###################################
app = flask.Flask(__name__)
@app.route("/")
def index():
print('ah something is coming')
return flask.render_template(
'base.html',
image_url='static/intro.jpeg',
message='Try upload new image!',
detections=[{
'x1': 100,
'y1': 10,
'x2': 200,
'y2': 100,
'score': 0.9
}, {
'x1': 190,
'y1': 100,
'x2': 300,
'y2': 250,
'score': 0.1
}]
)
@app.route("/process_image", methods=['GET', 'POST'])
def process_image():
result = {'message': ''}
# try to parse as image and run model.
detections = None
if 'fileToUpload' not in flask.request.files:
return flask.redirect("/", code=302)
try:
x = flask.request.files['fileToUpload']
imgbuffer = x.stream.read()
img = np.frombuffer(imgbuffer, dtype=np.uint8)
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
detections = run_model(img)
except Exception as e:
result['message'] = 'error: ' + str(e)
# parse result.
if detections is not None:
result['image_url'] = 'tmp/' + x.filename
with open(result['image_url'], 'wb') as f:
f.write(imgbuffer)
result['message'] = 'No doge very sad'
result['detections'] = []
for box, score, label in zip(detections['boxes'],
detections['scores'],
detections['labels']):
if score > 0.2 and int(label) == 69:
x1, y1, x2, y2 = box
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
result['detections'].append({
'x1': x1,
'x2': x2,
'y1': y1,
'y2': y2,
'score': float(score)
})
if score > 0.8:
result['message'] = 'very doge, wow amazing'
return flask.render_template(
'base.html',
message=result['message'],
image_url=result.get('image_url', ''),
detections=result.get('detections', []),
)
@app.route('/tmp/<path:filename>')
def custom_static(filename):
return flask.send_from_directory(
'tmp', filename)
| de | 0.530654 | # Run web server: # FLASK_APP=main.py python3 -m flask run --host=0.0.0.0 --port=80 #################################### # Load model ################################### #################################### # Flask ################################### # try to parse as image and run model. # parse result. | 2.72817 | 3 |
body/messages.py | dylanjboyd/bodytastic | 0 | 6620703 | <filename>body/messages.py<gh_stars>0
def fui_msg_text(heading, text):
return f'<div class="header">{heading}</div><p>{text}</p>'
| <filename>body/messages.py<gh_stars>0
def fui_msg_text(heading, text):
return f'<div class="header">{heading}</div><p>{text}</p>'
| none | 1 | 1.762879 | 2 | |
asylum/jail.py | cieplak/asylum | 0 | 6620704 | <reponame>cieplak/asylum<filename>asylum/jail.py<gh_stars>0
from enum import Enum, auto
import os
from asylum import config
from asylum import sqlite
from asylum.console import Console
from asylum.hosts import Hosts
from asylum.zfs import Zfs
class Jail(Console):
class State(Enum):
stopped = auto()
running = auto()
enabled = auto()
def __init__(self):
self.name = None
self.version = None
self.path = None
self.state = None
self.user = None
self.domain = None
self.address = None
self.interface = None
@classmethod
def new(cls, name):
jail = Jail()
zpool = config.jails['zpath']
path = os.path.join(config.jails['path'], name)
base_snapshot = '{}@{}'.format(config.base['zpath'], config.base['version'])
interface = config.network['interface']
Zfs.clone(base_snapshot, '{}/{}'.format(zpool, name))
record = sqlite.Jail.save(
name=name,
path=path,
base=base_snapshot,
interface=interface,
)
cidr = config.network['cidr']
record.address = cidr.replace('*', str(record.id))
record.Session.commit()
return jail
def enable(self):
cmd = ['service', 'jail', 'enable', self.name]
return self.run(cmd)
def start(self):
cmd = ['service', 'jail', 'start', self.name]
return self.run(cmd)
def stop(self):
cmd = ['service', 'jail', 'stop', self.name]
return self.run(cmd)
def install_file(self, src, dst=None):
cmd = ['cp', src, self.user.home]
return self.run(cmd)
def install_package(self, *pkgs):
pass
def install_service(self, service):
pass
def register_host(self):
Hosts.register(self.domain, self.address)
class User(object):
def __init__(self):
self.name = None
self.shell = None
self.home = None
def create(self, name, shell='/bin/csh'):
home = '/home/{}'.format(name)
self.run(['mdkir', home])
cmd = ['pw', 'useradd', '-n', name, '-s', shell, '-w', 'no', '-d', home]
self.run(cmd)
self.run(['chown', '-R', '{user}:{user}'.format(user=name), home])
class File(object):
pass
class JailConfig(object):
TEMPLATE = '''
# /etc/jail.conf
exec.start = "/bin/sh /etc/rc";
exec.stop = "/bin/sh /etc/rc.shutdown";
exec.clean;
mount.devfs;
path = "/usr/local/jails/$name";
{$JAILS}
'''
JAIL = '''
{$NAME} {
host.hostname = "{$NAME}.local";
interface = "{$IFACE}";
ip4.addr = {$ADDRESS};
}
'''
@classmethod
def render(cls, jails):
jails_cfg = ''.join(cls.render_jail_config(j) for j in jails)
return cls.TEMPLATE.replace('{$JAILS}', jails_cfg)
@classmethod
def render_jail_config(cls, jail):
return (cls.JAIL
.replace('{$NAME}', jail.name)
.replace('{$IFACE}', jail.iface)
.replace('{$ADDRESS}', jail.address))
| from enum import Enum, auto
import os
from asylum import config
from asylum import sqlite
from asylum.console import Console
from asylum.hosts import Hosts
from asylum.zfs import Zfs
class Jail(Console):
class State(Enum):
stopped = auto()
running = auto()
enabled = auto()
def __init__(self):
self.name = None
self.version = None
self.path = None
self.state = None
self.user = None
self.domain = None
self.address = None
self.interface = None
@classmethod
def new(cls, name):
jail = Jail()
zpool = config.jails['zpath']
path = os.path.join(config.jails['path'], name)
base_snapshot = '{}@{}'.format(config.base['zpath'], config.base['version'])
interface = config.network['interface']
Zfs.clone(base_snapshot, '{}/{}'.format(zpool, name))
record = sqlite.Jail.save(
name=name,
path=path,
base=base_snapshot,
interface=interface,
)
cidr = config.network['cidr']
record.address = cidr.replace('*', str(record.id))
record.Session.commit()
return jail
def enable(self):
cmd = ['service', 'jail', 'enable', self.name]
return self.run(cmd)
def start(self):
cmd = ['service', 'jail', 'start', self.name]
return self.run(cmd)
def stop(self):
cmd = ['service', 'jail', 'stop', self.name]
return self.run(cmd)
def install_file(self, src, dst=None):
cmd = ['cp', src, self.user.home]
return self.run(cmd)
def install_package(self, *pkgs):
pass
def install_service(self, service):
pass
def register_host(self):
Hosts.register(self.domain, self.address)
class User(object):
def __init__(self):
self.name = None
self.shell = None
self.home = None
def create(self, name, shell='/bin/csh'):
home = '/home/{}'.format(name)
self.run(['mdkir', home])
cmd = ['pw', 'useradd', '-n', name, '-s', shell, '-w', 'no', '-d', home]
self.run(cmd)
self.run(['chown', '-R', '{user}:{user}'.format(user=name), home])
class File(object):
pass
class JailConfig(object):
TEMPLATE = '''
# /etc/jail.conf
exec.start = "/bin/sh /etc/rc";
exec.stop = "/bin/sh /etc/rc.shutdown";
exec.clean;
mount.devfs;
path = "/usr/local/jails/$name";
{$JAILS}
'''
JAIL = '''
{$NAME} {
host.hostname = "{$NAME}.local";
interface = "{$IFACE}";
ip4.addr = {$ADDRESS};
}
'''
@classmethod
def render(cls, jails):
jails_cfg = ''.join(cls.render_jail_config(j) for j in jails)
return cls.TEMPLATE.replace('{$JAILS}', jails_cfg)
@classmethod
def render_jail_config(cls, jail):
return (cls.JAIL
.replace('{$NAME}', jail.name)
.replace('{$IFACE}', jail.iface)
.replace('{$ADDRESS}', jail.address)) | en | 0.599612 | # /etc/jail.conf exec.start = "/bin/sh /etc/rc"; exec.stop = "/bin/sh /etc/rc.shutdown"; exec.clean; mount.devfs; path = "/usr/local/jails/$name"; {$JAILS} {$NAME} { host.hostname = "{$NAME}.local"; interface = "{$IFACE}"; ip4.addr = {$ADDRESS}; } | 2.235276 | 2 |
src/python_module/src/mivp_agent/log/metadata.py | Joe-Doyle/moos-ivp-agent | 1 | 6620705 | <filename>src/python_module/src/mivp_agent/log/metadata.py
import os
import sys
from pathlib import Path
from mivp_agent.util.file_system import find_unique
from mivp_agent.log.const import CORE_DIRS
from mivp_agent.proto.mivp_agent_pb2 import Transition
from mivp_agent.proto.proto_logger import ProtoLogger
class RegistryDatum:
'''
This class is for managing a "registry" of MissionManager session ids to assure that they are unique with respect to a certain logging directory.
**NOTE:** The following is **NOT** thread safe. It is unlikely to fail silently, but still need to be cautious.
'''
def __init__(self, path):
'''
Args:
path (str): The registry directory.
'''
self.path = path
if not os.path.isdir(self.path):
try:
os.makedirs(self.path)
except FileExistsError:
return FileExistsError('There is a file in the place of the specified registry directory')
def validate(self):
for p in os.listdir(self.path):
if not os.path.isfile(p):
print('WARNING: There is a directory in the metadata registry. This indicates a corrupted registry.', file=sys.stderr)
def has_session(self, id):
return os.path.isfile(os.path.join(self.path, f'{id}.session'))
def list_sessions(self):
for p in os.listdir(self.path):
if os.path.isfile(os.path.join(self.path, p)):
yield p
def session_count(self):
return len(list(self.list_sessions()))
def register(self, name):
# Find a unique name
id = find_unique(self.path, name, ext='.session')
# Register it
entry = os.path.join(self.path, f'{id}.session')
Path(entry).touch(exist_ok=False)
return id
class LogMetadata:
'''
The following is for managing metadata associated with a perticular logging directory.
'''
def __init__(self, path):
'''
Args:
path (str): The logging directory.
'''
self._data_dir = path
self._path = os.path.join(path, '.meta')
# We don't init here because .meta is a signal that the directory is a valid logging directory
assert os.path.isdir(self._path), "Metadata directory not found, is this a valid log directory?"
self.registry = RegistryDatum(os.path.join(self._path, 'registry'))
def get_logs(self, id):
'''
This function is used to get the logs associated with a specific session id
'''
# Check if the session id is valid in this context
if not self.registry.has_session(id):
return None
logs = []
for subdir in os.listdir(self._data_dir):
if subdir not in CORE_DIRS:
# We have a task folder
session_path = os.path.join(
self._data_dir,
subdir,
id
)
if os.path.isdir(session_path):
for log_dir in os.listdir(session_path):
path = os.path.join(session_path, log_dir)
logs.append(ProtoLogger(path, Transition, mode='r'))
return logs
| <filename>src/python_module/src/mivp_agent/log/metadata.py
import os
import sys
from pathlib import Path
from mivp_agent.util.file_system import find_unique
from mivp_agent.log.const import CORE_DIRS
from mivp_agent.proto.mivp_agent_pb2 import Transition
from mivp_agent.proto.proto_logger import ProtoLogger
class RegistryDatum:
'''
This class is for managing a "registry" of MissionManager session ids to assure that they are unique with respect to a certain logging directory.
**NOTE:** The following is **NOT** thread safe. It is unlikely to fail silently, but still need to be cautious.
'''
def __init__(self, path):
'''
Args:
path (str): The registry directory.
'''
self.path = path
if not os.path.isdir(self.path):
try:
os.makedirs(self.path)
except FileExistsError:
return FileExistsError('There is a file in the place of the specified registry directory')
def validate(self):
for p in os.listdir(self.path):
if not os.path.isfile(p):
print('WARNING: There is a directory in the metadata registry. This indicates a corrupted registry.', file=sys.stderr)
def has_session(self, id):
return os.path.isfile(os.path.join(self.path, f'{id}.session'))
def list_sessions(self):
for p in os.listdir(self.path):
if os.path.isfile(os.path.join(self.path, p)):
yield p
def session_count(self):
return len(list(self.list_sessions()))
def register(self, name):
# Find a unique name
id = find_unique(self.path, name, ext='.session')
# Register it
entry = os.path.join(self.path, f'{id}.session')
Path(entry).touch(exist_ok=False)
return id
class LogMetadata:
'''
The following is for managing metadata associated with a perticular logging directory.
'''
def __init__(self, path):
'''
Args:
path (str): The logging directory.
'''
self._data_dir = path
self._path = os.path.join(path, '.meta')
# We don't init here because .meta is a signal that the directory is a valid logging directory
assert os.path.isdir(self._path), "Metadata directory not found, is this a valid log directory?"
self.registry = RegistryDatum(os.path.join(self._path, 'registry'))
def get_logs(self, id):
'''
This function is used to get the logs associated with a specific session id
'''
# Check if the session id is valid in this context
if not self.registry.has_session(id):
return None
logs = []
for subdir in os.listdir(self._data_dir):
if subdir not in CORE_DIRS:
# We have a task folder
session_path = os.path.join(
self._data_dir,
subdir,
id
)
if os.path.isdir(session_path):
for log_dir in os.listdir(session_path):
path = os.path.join(session_path, log_dir)
logs.append(ProtoLogger(path, Transition, mode='r'))
return logs
| en | 0.916466 | This class is for managing a "registry" of MissionManager session ids to assure that they are unique with respect to a certain logging directory. **NOTE:** The following is **NOT** thread safe. It is unlikely to fail silently, but still need to be cautious. Args: path (str): The registry directory. # Find a unique name # Register it The following is for managing metadata associated with a perticular logging directory. Args: path (str): The logging directory. # We don't init here because .meta is a signal that the directory is a valid logging directory This function is used to get the logs associated with a specific session id # Check if the session id is valid in this context # We have a task folder | 2.512625 | 3 |
simplesite/migrations/0001_initial.py | marsxn/simple-site | 1 | 6620706 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-16 18:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import simplesite.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('slug', models.SlugField(max_length=255, unique=True, verbose_name='Slug')),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='Creation Date')),
('last_modification', models.DateTimeField(auto_now=True, verbose_name='Last Modification')),
('content', models.TextField(blank=True, null=True, verbose_name='Main Content')),
('sort_order', models.IntegerField(blank=True, default=1, null=True, verbose_name='Sort Order')),
('is_public', models.BooleanField(default=True, verbose_name='Public')),
('is_header', models.BooleanField(default=False, verbose_name='Belongs to Header')),
('is_footer', models.BooleanField(default=False, verbose_name='Belongs to Footer')),
('_related_model', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='_related_model', to='contenttypes.ContentType', verbose_name='Related Content')),
],
options={
'ordering': ['sort_order', 'creation_date', 'slug'],
'verbose_name': 'Page',
'verbose_name_plural': 'Pages',
},
),
migrations.CreateModel(
name='PageImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('img_type', models.CharField(blank=True, choices=[('thumbnail', 'Thumbnail Image'), ('detail', 'Detail Image'), ('gallery', 'Gallery Image')], max_length=255, null=True, verbose_name='Image Type')),
('image', models.ImageField(max_length=255, upload_to=simplesite.models.get_page_image_path)),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image_set', to='simplesite.Page')),
],
options={
'verbose_name': 'Image',
'verbose_name_plural': 'Images',
},
),
migrations.CreateModel(
name='SocialNetwork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('slug', models.SlugField(max_length=255, unique=True, verbose_name='Slug')),
('url', models.URLField(max_length=255, verbose_name='URL')),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='Creation Date')),
('sort_order', models.IntegerField(blank=True, default=1, null=True, verbose_name='Sort Order')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('image', models.ImageField(max_length=255, upload_to=simplesite.models.get_socialnetwork_image_path, verbose_name='Image')),
],
options={
'ordering': ['sort_order', 'creation_date'],
'verbose_name': 'Social Network',
'verbose_name_plural': 'Social Networks',
},
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-16 18:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import simplesite.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('slug', models.SlugField(max_length=255, unique=True, verbose_name='Slug')),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='Creation Date')),
('last_modification', models.DateTimeField(auto_now=True, verbose_name='Last Modification')),
('content', models.TextField(blank=True, null=True, verbose_name='Main Content')),
('sort_order', models.IntegerField(blank=True, default=1, null=True, verbose_name='Sort Order')),
('is_public', models.BooleanField(default=True, verbose_name='Public')),
('is_header', models.BooleanField(default=False, verbose_name='Belongs to Header')),
('is_footer', models.BooleanField(default=False, verbose_name='Belongs to Footer')),
('_related_model', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='_related_model', to='contenttypes.ContentType', verbose_name='Related Content')),
],
options={
'ordering': ['sort_order', 'creation_date', 'slug'],
'verbose_name': 'Page',
'verbose_name_plural': 'Pages',
},
),
migrations.CreateModel(
name='PageImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('img_type', models.CharField(blank=True, choices=[('thumbnail', 'Thumbnail Image'), ('detail', 'Detail Image'), ('gallery', 'Gallery Image')], max_length=255, null=True, verbose_name='Image Type')),
('image', models.ImageField(max_length=255, upload_to=simplesite.models.get_page_image_path)),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image_set', to='simplesite.Page')),
],
options={
'verbose_name': 'Image',
'verbose_name_plural': 'Images',
},
),
migrations.CreateModel(
name='SocialNetwork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('slug', models.SlugField(max_length=255, unique=True, verbose_name='Slug')),
('url', models.URLField(max_length=255, verbose_name='URL')),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='Creation Date')),
('sort_order', models.IntegerField(blank=True, default=1, null=True, verbose_name='Sort Order')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('image', models.ImageField(max_length=255, upload_to=simplesite.models.get_socialnetwork_image_path, verbose_name='Image')),
],
options={
'ordering': ['sort_order', 'creation_date'],
'verbose_name': 'Social Network',
'verbose_name_plural': 'Social Networks',
},
),
] | en | 0.805386 | # -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-04-16 18:28 | 1.67619 | 2 |
scripts/lb.py | PREDICT-DPACC/logbook | 0 | 6620707 | <reponame>PREDICT-DPACC/logbook
#!/usr/bin/env python
import os
import sys
import pandas as pd
import logging
import argparse as ap
from importlib import import_module
from datetime import datetime
from logbook import tools
logger = logging.getLogger(os.path.basename(__file__))
def main():
argparser = ap.ArgumentParser('PHOENIX Metadata LogBook Pipeline')
# Input and output parameters
argparser.add_argument('--phoenix-dir',
help='Phoenix directory')
argparser.add_argument('--consent-dir',
help='Consent directory')
argparser.add_argument('--debug', action='store_true', help='Enable debug messages')
argparser.add_argument('--data-type',
help='Data type name (ex. "phone actigraphy" or "onsite_interview")',
nargs='+')
argparser.add_argument('--phone-stream',
help='(ex. "survey_answers" or "accelerometer")',
nargs='+')
argparser.add_argument('--output-dir',
help='Path to the output directory')
argparser.add_argument('--study',
nargs='+', help='Study name')
argparser.add_argument('--subject',
nargs='+', help='Subject ID')
# Basic targeting parameters
argparser.add_argument('--log-dir',
help='Directory where the log is written')
argparser.add_argument('--input-tz',
help='Timezone info for the input. (Default: UTC)',
default = 'UTC')
argparser.add_argument('--output-tz',
help='Timezone info for the output. (Default: America/New_York)',
default = 'America/New_York')
argparser.add_argument('--day-from',
help='Output day from. (optional)',
type=int)
argparser.add_argument('--day-to',
help='Output day to. (optional; By default, process data for all days)',
type=int)
args = argparser.parse_args()
# Log file initialization
log_date= datetime.today().strftime('%Y%m%d')
DEFAULT_LOGFILE_LOCATION = os.path.join(str(args.log_dir),str(log_date)+'logbook.log')
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
filename=str(DEFAULT_LOGFILE_LOCATION))
# Gets all studies under each subdirectory
studies = args.study if args.study else tools.scan_dir(args.consent_dir)
for study in studies:
study_path = os.path.join(args.consent_dir, study)
consent_path = os.path.join(args.consent_dir, study, study + '_metadata.csv')
consents = get_consents(consent_path)
if consents is None: continue
# Gets all subjects under the study directory
subjects = args.subject if args.subject else tools.scan_dir(study_path)
for subject in subjects:
subject_path = os.path.join(study_path, subject)
verified = verify_subject(subject, subject_path, consents)
if not verified:
continue
logger.info('Processing {S} in {ST}'.format(S=subject, ST=study))
date_from = consents[subject][0]
# Loops through PHOENIX's subdirectories.
directories = tools.scan_dir(args.phoenix_dir)
for directory in sorted(directories):
subject_path = os.path.join(args.phoenix_dir, directory, study, subject)
# Scan each subject's directory to find available data types
data_types = args.data_type if args.data_type else tools.scan_dir(subject_path)
for data_type in data_types:
data_path = os.path.join(subject_path, data_type, 'raw')
output_path = args.output_dir if args.output_dir else os.path.join(subject_path,
data_type,
'processed')
mod = get_module()
mod_parser = mod.parse_args()
new_args, unknown = mod_parser.parse_known_args([
'--date-from', str(date_from),
'--read-dir', str(data_path),
'--phone-stream', str(args.phone_stream),
'--output-dir', str(output_path),
'--day-from', str(args.day_from),
'--day-to', str(args.day_to),
'--input-tz', str(args.input_tz),
'--output-tz', str(args.output_tz),
'--study', str(study),
'--subject', str(subject),
'--data-type', str(data_type)
])
mod.main(new_args)
return
# Import module based on user input
def get_module():
try:
return import_module('logbook', __name__)
except Exception as e:
logger.error(e)
logger.error('Could not import the pipeline module. Exiting')
sys.exit(1)
# Ensures data can be processed for the subject
def verify_subject(subject, path, consents):
# Ensures the subject directory is not the consent directory
if subject.endswith('.csv'):
logger.debug('Subject {S} is not a valid subject.'.format(S=subject))
return False
if not os.path.isdir(path):
logger.debug('Path {P} does not exist.'.format(P=path))
return False
if not subject in consents:
logger.debug('Consent date does not exist for {S}.'.format(S=subject))
return False
return True
# Get consents for the study
def get_consents(path):
try:
df = pd.read_csv(path, keep_default_na=False, engine='c', skipinitialspace=True,index_col=False)
df = df.pivot(
index=None,
columns='Subject ID',
values='Consent'
).bfill().iloc[[0],:]
return df
except Exception as e:
logger.error(e)
logger.error('Unable to retrieve consents from {0}.'.format(path))
return None
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import os
import sys
import pandas as pd
import logging
import argparse as ap
from importlib import import_module
from datetime import datetime
from logbook import tools
logger = logging.getLogger(os.path.basename(__file__))
def main():
argparser = ap.ArgumentParser('PHOENIX Metadata LogBook Pipeline')
# Input and output parameters
argparser.add_argument('--phoenix-dir',
help='Phoenix directory')
argparser.add_argument('--consent-dir',
help='Consent directory')
argparser.add_argument('--debug', action='store_true', help='Enable debug messages')
argparser.add_argument('--data-type',
help='Data type name (ex. "phone actigraphy" or "onsite_interview")',
nargs='+')
argparser.add_argument('--phone-stream',
help='(ex. "survey_answers" or "accelerometer")',
nargs='+')
argparser.add_argument('--output-dir',
help='Path to the output directory')
argparser.add_argument('--study',
nargs='+', help='Study name')
argparser.add_argument('--subject',
nargs='+', help='Subject ID')
# Basic targeting parameters
argparser.add_argument('--log-dir',
help='Directory where the log is written')
argparser.add_argument('--input-tz',
help='Timezone info for the input. (Default: UTC)',
default = 'UTC')
argparser.add_argument('--output-tz',
help='Timezone info for the output. (Default: America/New_York)',
default = 'America/New_York')
argparser.add_argument('--day-from',
help='Output day from. (optional)',
type=int)
argparser.add_argument('--day-to',
help='Output day to. (optional; By default, process data for all days)',
type=int)
args = argparser.parse_args()
# Log file initialization
log_date= datetime.today().strftime('%Y%m%d')
DEFAULT_LOGFILE_LOCATION = os.path.join(str(args.log_dir),str(log_date)+'logbook.log')
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
filename=str(DEFAULT_LOGFILE_LOCATION))
# Gets all studies under each subdirectory
studies = args.study if args.study else tools.scan_dir(args.consent_dir)
for study in studies:
study_path = os.path.join(args.consent_dir, study)
consent_path = os.path.join(args.consent_dir, study, study + '_metadata.csv')
consents = get_consents(consent_path)
if consents is None: continue
# Gets all subjects under the study directory
subjects = args.subject if args.subject else tools.scan_dir(study_path)
for subject in subjects:
subject_path = os.path.join(study_path, subject)
verified = verify_subject(subject, subject_path, consents)
if not verified:
continue
logger.info('Processing {S} in {ST}'.format(S=subject, ST=study))
date_from = consents[subject][0]
# Loops through PHOENIX's subdirectories.
directories = tools.scan_dir(args.phoenix_dir)
for directory in sorted(directories):
subject_path = os.path.join(args.phoenix_dir, directory, study, subject)
# Scan each subject's directory to find available data types
data_types = args.data_type if args.data_type else tools.scan_dir(subject_path)
for data_type in data_types:
data_path = os.path.join(subject_path, data_type, 'raw')
output_path = args.output_dir if args.output_dir else os.path.join(subject_path,
data_type,
'processed')
mod = get_module()
mod_parser = mod.parse_args()
new_args, unknown = mod_parser.parse_known_args([
'--date-from', str(date_from),
'--read-dir', str(data_path),
'--phone-stream', str(args.phone_stream),
'--output-dir', str(output_path),
'--day-from', str(args.day_from),
'--day-to', str(args.day_to),
'--input-tz', str(args.input_tz),
'--output-tz', str(args.output_tz),
'--study', str(study),
'--subject', str(subject),
'--data-type', str(data_type)
])
mod.main(new_args)
return
# Import module based on user input
def get_module():
try:
return import_module('logbook', __name__)
except Exception as e:
logger.error(e)
logger.error('Could not import the pipeline module. Exiting')
sys.exit(1)
# Ensures data can be processed for the subject
def verify_subject(subject, path, consents):
# Ensures the subject directory is not the consent directory
if subject.endswith('.csv'):
logger.debug('Subject {S} is not a valid subject.'.format(S=subject))
return False
if not os.path.isdir(path):
logger.debug('Path {P} does not exist.'.format(P=path))
return False
if not subject in consents:
logger.debug('Consent date does not exist for {S}.'.format(S=subject))
return False
return True
# Get consents for the study
def get_consents(path):
try:
df = pd.read_csv(path, keep_default_na=False, engine='c', skipinitialspace=True,index_col=False)
df = df.pivot(
index=None,
columns='Subject ID',
values='Consent'
).bfill().iloc[[0],:]
return df
except Exception as e:
logger.error(e)
logger.error('Unable to retrieve consents from {0}.'.format(path))
return None
if __name__ == '__main__':
main() | en | 0.70255 | #!/usr/bin/env python # Input and output parameters # Basic targeting parameters # Log file initialization # Gets all studies under each subdirectory # Gets all subjects under the study directory # Loops through PHOENIX's subdirectories. # Scan each subject's directory to find available data types # Import module based on user input # Ensures data can be processed for the subject # Ensures the subject directory is not the consent directory # Get consents for the study | 2.51402 | 3 |
src/sima/riflex/blueprints/strouhaluserdefinedproperty.py | SINTEF/simapy | 0 | 6620708 | #
# Generated with StrouhalUserDefinedPropertyBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from .strouhalspecificationproperty import StrouhalSpecificationPropertyBlueprint
class StrouhalUserDefinedPropertyBlueprint(StrouhalSpecificationPropertyBlueprint):
""""""
def __init__(self, name="StrouhalUserDefinedProperty", package_path="sima/riflex", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("reynoldStrouhalProperties","sima/riflex/ReynoldStrouhalNumberItem","",True,Dimension("*"))) | #
# Generated with StrouhalUserDefinedPropertyBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from .strouhalspecificationproperty import StrouhalSpecificationPropertyBlueprint
class StrouhalUserDefinedPropertyBlueprint(StrouhalSpecificationPropertyBlueprint):
""""""
def __init__(self, name="StrouhalUserDefinedProperty", package_path="sima/riflex", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("reynoldStrouhalProperties","sima/riflex/ReynoldStrouhalNumberItem","",True,Dimension("*"))) | en | 0.560528 | # # Generated with StrouhalUserDefinedPropertyBlueprint | 1.920644 | 2 |
tests/test_game.py | jkatzer/usolitaire | 52 | 6620709 | import unittest
from usolitaire import game
class GameTest(unittest.TestCase):
def setUp(self):
self.game = game.Game()
def test_game_init(self):
self.assertEqual(len(self.game.waste), 0)
self.assertEqual([len(pile) for pile in self.game.foundations], [0, 0, 0, 0])
self.assertEqual([len(pile) for pile in self.game.tableau], [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(len(self.game.stock), 24)
self.assertTrue(all(not c.face_up for c in self.game.stock))
for pile in self.game.tableau:
self.assertTrue(all(not c.face_up for c in pile[:-1]))
self.assertTrue(pile[-1].face_up)
def test_game_from_stock(self):
prev_waste_len = len(self.game.waste)
prev_stock_len = len(self.game.stock)
self.game.deal_from_stock()
self.assertEqual(len(self.game.waste), prev_waste_len + 1)
self.assertEqual(len(self.game.stock), prev_stock_len - 1)
| import unittest
from usolitaire import game
class GameTest(unittest.TestCase):
def setUp(self):
self.game = game.Game()
def test_game_init(self):
self.assertEqual(len(self.game.waste), 0)
self.assertEqual([len(pile) for pile in self.game.foundations], [0, 0, 0, 0])
self.assertEqual([len(pile) for pile in self.game.tableau], [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(len(self.game.stock), 24)
self.assertTrue(all(not c.face_up for c in self.game.stock))
for pile in self.game.tableau:
self.assertTrue(all(not c.face_up for c in pile[:-1]))
self.assertTrue(pile[-1].face_up)
def test_game_from_stock(self):
prev_waste_len = len(self.game.waste)
prev_stock_len = len(self.game.stock)
self.game.deal_from_stock()
self.assertEqual(len(self.game.waste), prev_waste_len + 1)
self.assertEqual(len(self.game.stock), prev_stock_len - 1)
| none | 1 | 3.246895 | 3 | |
es_list_indexes.py | aebm/tools | 1 | 6620710 | <filename>es_list_indexes.py
#!/usr/bin/env python
# List all th elasticsearch indexes
from argparse import ArgumentParser
from elasticsearch import Elasticsearch
def main():
parser = ArgumentParser(description='List all the elasticsearch indexes')
parser.add_argument('es_server', help='Connection string host:port')
args = parser.parse_args()
es = Elasticsearch(args.es_server)
idx = es.indices
idx_dict = idx.status().get('indices')
print('\n'.join(idx_dict.keys()))
if __name__ == '__main__':
main()
| <filename>es_list_indexes.py
#!/usr/bin/env python
# List all th elasticsearch indexes
from argparse import ArgumentParser
from elasticsearch import Elasticsearch
def main():
parser = ArgumentParser(description='List all the elasticsearch indexes')
parser.add_argument('es_server', help='Connection string host:port')
args = parser.parse_args()
es = Elasticsearch(args.es_server)
idx = es.indices
idx_dict = idx.status().get('indices')
print('\n'.join(idx_dict.keys()))
if __name__ == '__main__':
main()
| en | 0.289152 | #!/usr/bin/env python # List all th elasticsearch indexes | 2.711658 | 3 |
biobb_ml/resampling/resampling.py | bioexcel/biobb_ml | 0 | 6620711 | <gh_stars>0
#!/usr/bin/env python3
"""Module containing the Resampling class and the command line interface."""
import argparse
import pandas as pd
import numpy as np
from collections import Counter
from biobb_common.generic.biobb_object import BiobbObject
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from biobb_ml.resampling.reg_resampler import resampler
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_ml.resampling.common import *
class Resampling(BiobbObject):
"""
| biobb_ml Resampling
| Wrapper of the imblearn.combine methods.
| Combine over- and under-sampling methods to remove samples and supplement the dataset. If regression is specified as type, the data will be resampled to classes in order to apply the resampling model. Visit the imbalanced-learn official website for the different methods accepted in this wrapper: `SMOTETomek <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTETomek.html>`_, `SMOTEENN <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTEENN.html>`_.
Args:
input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/resampling/dataset_resampling.csv>`_. Accepted formats: csv (edam:format_3752).
output_dataset_path (str): Path to the output dataset. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/resampling/ref_output_resampling.csv>`_. Accepted formats: csv (edam:format_3752).
properties (dic - Python dictionary object containing the tool parameters, not input/output files):
* **method** (*str*) - (None) Resampling method. It's a mandatory property. Values: smotetomek (`SMOTETomek <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTETomek.html>`_: Class to perform over-sampling using SMOTE and cleaning using Tomek links), smotenn (`SMOTEENN <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTEENN.html>`_: Class to perform over-sampling using SMOTE and cleaning using ENN).
* **type** (*str*) - (None) Type of oversampling. It's a mandatory property. Values: regression (the oversampling will be applied on a continuous dataset), classification (the oversampling will be applied on a classified dataset).
* **target** (*dict*) - ({}) Dependent variable you want to predict from your dataset. You can specify either a column name or a column index. Formats: { "column": "column3" } or { "index": 21 }. In case of mulitple formats, the first one will be picked.
* **evaluate** (*bool*) - (False) Whether or not to evaluate the dataset before and after applying the resampling.
* **evaluate_splits** (*int*) - (3) [2~100|1] Number of folds to be applied by the Repeated Stratified K-Fold evaluation method. Must be at least 2.
* **evaluate_repeats** (*int*) - (3) [2~100|1] Number of times Repeated Stratified K-Fold cross validator needs to be repeated.
* **n_bins** (*int*) - (5) [1~100|1] Only for regression resampling. The number of classes that the user wants to generate with the target data.
* **balanced_binning** (*bool*) - (False) Only for regression resampling. Decides whether samples are to be distributed roughly equally across all classes.
* **sampling_strategy_over** (*dict*) - ({ "target": "auto" }) Sampling information applied in the dataset oversampling process. Formats: { "target": "auto" }, { "ratio": 0.3 } or { "dict": { 0: 300, 1: 200, 2: 100 } }. When "target", specify the class targeted by the resampling; the number of samples in the different classes will be equalized; possible choices are: minority (resample only the minority class), not minority (resample all classes but the minority class), not majority (resample all classes but the majority class), all (resample all classes), auto (equivalent to 'not majority'). When "ratio", it corresponds to the desired ratio of the number of samples in the minority class over the number of samples in the majority class after resampling (ONLY IN CASE OF BINARY CLASSIFICATION). When "dict", the keys correspond to the targeted classes and the values correspond to the desired number of samples for each targeted class.
* **sampling_strategy_under** (*dict*) - ({ "target": "auto" }) Sampling information applied in the dataset cleaning process. Formats: { "target": "auto" } or { "list": [0, 2, 3] }. When "target", specify the class targeted by the resampling; the number of samples in the different classes will be equalized; possible choices are: majority (resample only the majority class), not minority (resample all classes but the minority class), not majority (resample all classes but the majority class), all (resample all classes), auto (equivalent to 'not minority'). When "list", the list contains the classes targeted by the resampling.
* **random_state_method** (*int*) - (5) [1~1000|1] Controls the randomization of the algorithm.
* **random_state_evaluate** (*int*) - (5) [1~1000|1] Controls the shuffling applied to the Repeated Stratified K-Fold evaluation method.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_ml.resampling.resampling import resampling
prop = {
'method': 'smotenn',
'type': 'regression',
'target': {
'column': 'target'
},
'evaluate': true,
'n_bins': 10,
'sampling_strategy_over': {
'dict': { '4': 1000, '5': 1000, '6': 1000, '7': 1000 }
},
'sampling_strategy_under': {
'list': [0,1]
}
}
resampling(input_dataset_path='/path/to/myDataset.csv',
output_dataset_path='/path/to/newDataset.csv',
properties=prop)
Info:
* wrapped_software:
* name: imbalanced-learn combine
* version: >0.7.0
* license: MIT
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_dataset_path, output_dataset_path,
properties=None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
"in": { "input_dataset_path": input_dataset_path },
"out": { "output_dataset_path": output_dataset_path }
}
# Properties specific for BB
self.method = properties.get('method', None)
self.type = properties.get('type', None)
self.target = properties.get('target', {})
self.evaluate = properties.get('evaluate', False)
self.evaluate_splits = properties.get('evaluate_splits', 3)
self.evaluate_repeats = properties.get('evaluate_repeats', 3)
self.n_bins = properties.get('n_bins', 5)
self.balanced_binning = properties.get('balanced_binning', False)
self.sampling_strategy_over = properties.get('sampling_strategy_over', { 'target': 'auto' })
self.sampling_strategy_under = properties.get('sampling_strategy_under', { 'target': 'auto' })
self.random_state_method = properties.get('random_state_method', 5)
self.random_state_evaluate = properties.get('random_state_evaluate', 5)
self.properties = properties
# Check the properties
self.check_properties(properties)
def check_data_params(self, out_log, err_log):
""" Checks all the input/output paths and parameters """
self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__)
self.io_dict["out"]["output_dataset_path"] = check_output_path(self.io_dict["out"]["output_dataset_path"],"output_dataset_path", False, out_log, self.__class__.__name__)
@launchlogger
def launch(self) -> int:
"""Execute the :class:`Resampling <resampling.resampling.Resampling>` resampling.resampling.Resampling object."""
# check input/output paths and parameters
self.check_data_params(self.out_log, self.err_log)
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
# check mandatory properties
method, over, under = getCombinedMethod(self.method, self.out_log, self.__class__.__name__)
checkResamplingType(self.type, self.out_log, self.__class__.__name__)
sampling_strategy_over = getSamplingStrategy(self.sampling_strategy_over, self.out_log, self.__class__.__name__)
sampling_strategy_under = getSamplingStrategy(self.sampling_strategy_under, self.out_log, self.__class__.__name__)
# load dataset
fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log)
if 'column' in self.target:
labels = getHeader(self.io_dict["in"]["input_dataset_path"])
skiprows = 1
header = 0
else:
labels = None
skiprows = None
header = None
data = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header = None, sep="\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels)
train_df = data
ranges = None
le = preprocessing.LabelEncoder()
cols_encoded = []
for column in train_df:
# if type object, LabelEncoder.fit_transform
if train_df[column].dtypes == 'object':
cols_encoded.append(column)
train_df[column] = le.fit_transform(train_df[column])
# defining X
X = train_df.loc[:, train_df.columns != getTargetValue(self.target, self.out_log, self.__class__.__name__)]
# calling resample method
if self.method == 'smotetomek':
method = method(smote = over(sampling_strategy=sampling_strategy_over), tomek = under(sampling_strategy=sampling_strategy_under), random_state=self.random_state_method)
elif self.method == 'smotenn':
method = method(smote = over(sampling_strategy=sampling_strategy_over), enn = under(sampling_strategy=sampling_strategy_under), random_state=self.random_state_method)
fu.log('Target: %s' % (getTargetValue(self.target, self.out_log, self.__class__.__name__)), self.out_log, self.global_log)
# resampling
if self.type == 'regression':
fu.log('Resampling regression dataset, continuous data will be classified', self.out_log, self.global_log)
# call resampler class for Regression ReSampling
rs = resampler()
# Create n_bins classes for the dataset
ranges, y, target_pos = rs.fit(train_df, target=getTargetValue(self.target, self.out_log, self.__class__.__name__), bins=self.n_bins, balanced_binning=self.balanced_binning, verbose=0)
# Get the re-sampled data
final_X, final_y = rs.resample(method, train_df, y)
elif self.type == 'classification':
# get X and y
y = getTarget(self.target, train_df, self.out_log, self.__class__.__name__)
# fit and resample
final_X, final_y = method.fit_resample(X, y)
target_pos = None
# evaluate resampling
if self.evaluate:
fu.log('Evaluating data before resampling with RandomForestClassifier', self.out_log, self.global_log)
cv = RepeatedStratifiedKFold(n_splits=self.evaluate_splits, n_repeats=self.evaluate_repeats, random_state=self.random_state_evaluate)
# evaluate model
scores = cross_val_score(RandomForestClassifier(class_weight='balanced'), X, y, scoring='accuracy', cv=cv, n_jobs=-1)
if not np.isnan(np.mean(scores)):
fu.log('Mean Accuracy before resampling: %.3f' % (np.mean(scores)), self.out_log, self.global_log)
else:
fu.log('Unable to calculate cross validation score, NaN was returned.', self.out_log, self.global_log)
# log distribution before resampling
dist = ''
for k,v in Counter(y).items():
per = v / len(y) * 100
rng = ''
if ranges: rng = str(ranges[k])
dist = dist + 'Class=%d, n=%d (%.3f%%) %s\n' % (k, v, per, rng)
fu.log('Classes distribution before resampling:\n\n%s' % dist, self.out_log, self.global_log)
# join final_X and final_y in the output dataframe
if header is None:
# numpy
out_df = np.column_stack((final_X, final_y))
else:
# pandas
out_df = final_X.join(final_y)
# if no header, convert np to pd
if header is None: out_df = pd.DataFrame(data=out_df)
# if cols encoded, decode them
if cols_encoded:
for column in cols_encoded:
if header is None:
out_df = out_df.astype({column: int } )
out_df[column] = le.inverse_transform(out_df[column].values.ravel())
# if no header, target is in a different column
if target_pos: t = target_pos
else: t = getTargetValue(self.target, self.out_log, self.__class__.__name__)
# log distribution after resampling
if self.type == 'regression':
ranges, y_out, _ = rs.fit(out_df, target=t, bins=self.n_bins, balanced_binning=self.balanced_binning, verbose=0)
elif self.type == 'classification':
y_out = getTarget(self.target, out_df, self.out_log, self.__class__.__name__)
dist = ''
for k,v in Counter(y_out).items():
per = v / len(y_out) * 100
rng = ''
if ranges: rng = str(ranges[k])
dist = dist + 'Class=%d, n=%d (%.3f%%) %s\n' % (k, v, per, rng)
fu.log('Classes distribution after resampling:\n\n%s' % dist, self.out_log, self.global_log)
# evaluate resampling
if self.evaluate:
fu.log('Evaluating data after resampling with RandomForestClassifier', self.out_log, self.global_log)
cv = RepeatedStratifiedKFold(n_splits=3, n_repeats=3, random_state=42)
# evaluate model
scores = cross_val_score(RandomForestClassifier(class_weight='balanced'), final_X, y_out, scoring='accuracy', cv=cv, n_jobs=-1)
if not np.isnan(np.mean(scores)):
fu.log('Mean Accuracy after resampling a %s dataset with %s method: %.3f' % (self.type, resampling_methods[self.method]['method'], np.mean(scores)), self.out_log, self.global_log)
else:
fu.log('Unable to calculate cross validation score, NaN was returned.', self.out_log, self.global_log)
# save output
hdr = False
if header == 0: hdr = True
fu.log('Saving resampled dataset to %s' % self.io_dict["out"]["output_dataset_path"], self.out_log, self.global_log)
out_df.to_csv(self.io_dict["out"]["output_dataset_path"], index = False, header=hdr)
return 0
def resampling(input_dataset_path: str, output_dataset_path: str, properties: dict = None, **kwargs) -> int:
"""Execute the :class:`Resampling <resampling.resampling.Resampling>` class and
execute the :meth:`launch() <resampling.resampling.Resampling.launch>` method."""
return Resampling(input_dataset_path=input_dataset_path,
output_dataset_path=output_dataset_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Wrapper of the imblearn.combine methods.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('--config', required=False, help='Configuration file')
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')
required_args.add_argument('--output_dataset_path', required=True, help='Path to the output dataset. Accepted formats: csv.')
args = parser.parse_args()
args.config = args.config or "{}"
properties = settings.ConfReader(config=args.config).get_prop_dic()
# Specific call of each building block
resampling(input_dataset_path=args.input_dataset_path,
output_dataset_path=args.output_dataset_path,
properties=properties)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
"""Module containing the Resampling class and the command line interface."""
import argparse
import pandas as pd
import numpy as np
from collections import Counter
from biobb_common.generic.biobb_object import BiobbObject
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from biobb_ml.resampling.reg_resampler import resampler
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_ml.resampling.common import *
class Resampling(BiobbObject):
"""
| biobb_ml Resampling
| Wrapper of the imblearn.combine methods.
| Combine over- and under-sampling methods to remove samples and supplement the dataset. If regression is specified as type, the data will be resampled to classes in order to apply the resampling model. Visit the imbalanced-learn official website for the different methods accepted in this wrapper: `SMOTETomek <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTETomek.html>`_, `SMOTEENN <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTEENN.html>`_.
Args:
input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/resampling/dataset_resampling.csv>`_. Accepted formats: csv (edam:format_3752).
output_dataset_path (str): Path to the output dataset. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/resampling/ref_output_resampling.csv>`_. Accepted formats: csv (edam:format_3752).
properties (dic - Python dictionary object containing the tool parameters, not input/output files):
* **method** (*str*) - (None) Resampling method. It's a mandatory property. Values: smotetomek (`SMOTETomek <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTETomek.html>`_: Class to perform over-sampling using SMOTE and cleaning using Tomek links), smotenn (`SMOTEENN <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTEENN.html>`_: Class to perform over-sampling using SMOTE and cleaning using ENN).
* **type** (*str*) - (None) Type of oversampling. It's a mandatory property. Values: regression (the oversampling will be applied on a continuous dataset), classification (the oversampling will be applied on a classified dataset).
* **target** (*dict*) - ({}) Dependent variable you want to predict from your dataset. You can specify either a column name or a column index. Formats: { "column": "column3" } or { "index": 21 }. In case of mulitple formats, the first one will be picked.
* **evaluate** (*bool*) - (False) Whether or not to evaluate the dataset before and after applying the resampling.
* **evaluate_splits** (*int*) - (3) [2~100|1] Number of folds to be applied by the Repeated Stratified K-Fold evaluation method. Must be at least 2.
* **evaluate_repeats** (*int*) - (3) [2~100|1] Number of times Repeated Stratified K-Fold cross validator needs to be repeated.
* **n_bins** (*int*) - (5) [1~100|1] Only for regression resampling. The number of classes that the user wants to generate with the target data.
* **balanced_binning** (*bool*) - (False) Only for regression resampling. Decides whether samples are to be distributed roughly equally across all classes.
* **sampling_strategy_over** (*dict*) - ({ "target": "auto" }) Sampling information applied in the dataset oversampling process. Formats: { "target": "auto" }, { "ratio": 0.3 } or { "dict": { 0: 300, 1: 200, 2: 100 } }. When "target", specify the class targeted by the resampling; the number of samples in the different classes will be equalized; possible choices are: minority (resample only the minority class), not minority (resample all classes but the minority class), not majority (resample all classes but the majority class), all (resample all classes), auto (equivalent to 'not majority'). When "ratio", it corresponds to the desired ratio of the number of samples in the minority class over the number of samples in the majority class after resampling (ONLY IN CASE OF BINARY CLASSIFICATION). When "dict", the keys correspond to the targeted classes and the values correspond to the desired number of samples for each targeted class.
* **sampling_strategy_under** (*dict*) - ({ "target": "auto" }) Sampling information applied in the dataset cleaning process. Formats: { "target": "auto" } or { "list": [0, 2, 3] }. When "target", specify the class targeted by the resampling; the number of samples in the different classes will be equalized; possible choices are: majority (resample only the majority class), not minority (resample all classes but the minority class), not majority (resample all classes but the majority class), all (resample all classes), auto (equivalent to 'not minority'). When "list", the list contains the classes targeted by the resampling.
* **random_state_method** (*int*) - (5) [1~1000|1] Controls the randomization of the algorithm.
* **random_state_evaluate** (*int*) - (5) [1~1000|1] Controls the shuffling applied to the Repeated Stratified K-Fold evaluation method.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_ml.resampling.resampling import resampling
prop = {
'method': 'smotenn',
'type': 'regression',
'target': {
'column': 'target'
},
'evaluate': true,
'n_bins': 10,
'sampling_strategy_over': {
'dict': { '4': 1000, '5': 1000, '6': 1000, '7': 1000 }
},
'sampling_strategy_under': {
'list': [0,1]
}
}
resampling(input_dataset_path='/path/to/myDataset.csv',
output_dataset_path='/path/to/newDataset.csv',
properties=prop)
Info:
* wrapped_software:
* name: imbalanced-learn combine
* version: >0.7.0
* license: MIT
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_dataset_path, output_dataset_path,
properties=None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
"in": { "input_dataset_path": input_dataset_path },
"out": { "output_dataset_path": output_dataset_path }
}
# Properties specific for BB
self.method = properties.get('method', None)
self.type = properties.get('type', None)
self.target = properties.get('target', {})
self.evaluate = properties.get('evaluate', False)
self.evaluate_splits = properties.get('evaluate_splits', 3)
self.evaluate_repeats = properties.get('evaluate_repeats', 3)
self.n_bins = properties.get('n_bins', 5)
self.balanced_binning = properties.get('balanced_binning', False)
self.sampling_strategy_over = properties.get('sampling_strategy_over', { 'target': 'auto' })
self.sampling_strategy_under = properties.get('sampling_strategy_under', { 'target': 'auto' })
self.random_state_method = properties.get('random_state_method', 5)
self.random_state_evaluate = properties.get('random_state_evaluate', 5)
self.properties = properties
# Check the properties
self.check_properties(properties)
def check_data_params(self, out_log, err_log):
""" Checks all the input/output paths and parameters """
self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__)
self.io_dict["out"]["output_dataset_path"] = check_output_path(self.io_dict["out"]["output_dataset_path"],"output_dataset_path", False, out_log, self.__class__.__name__)
@launchlogger
def launch(self) -> int:
"""Execute the :class:`Resampling <resampling.resampling.Resampling>` resampling.resampling.Resampling object."""
# check input/output paths and parameters
self.check_data_params(self.out_log, self.err_log)
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
# check mandatory properties
method, over, under = getCombinedMethod(self.method, self.out_log, self.__class__.__name__)
checkResamplingType(self.type, self.out_log, self.__class__.__name__)
sampling_strategy_over = getSamplingStrategy(self.sampling_strategy_over, self.out_log, self.__class__.__name__)
sampling_strategy_under = getSamplingStrategy(self.sampling_strategy_under, self.out_log, self.__class__.__name__)
# load dataset
fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log)
if 'column' in self.target:
labels = getHeader(self.io_dict["in"]["input_dataset_path"])
skiprows = 1
header = 0
else:
labels = None
skiprows = None
header = None
data = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header = None, sep="\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels)
train_df = data
ranges = None
le = preprocessing.LabelEncoder()
cols_encoded = []
for column in train_df:
# if type object, LabelEncoder.fit_transform
if train_df[column].dtypes == 'object':
cols_encoded.append(column)
train_df[column] = le.fit_transform(train_df[column])
# defining X
X = train_df.loc[:, train_df.columns != getTargetValue(self.target, self.out_log, self.__class__.__name__)]
# calling resample method
if self.method == 'smotetomek':
method = method(smote = over(sampling_strategy=sampling_strategy_over), tomek = under(sampling_strategy=sampling_strategy_under), random_state=self.random_state_method)
elif self.method == 'smotenn':
method = method(smote = over(sampling_strategy=sampling_strategy_over), enn = under(sampling_strategy=sampling_strategy_under), random_state=self.random_state_method)
fu.log('Target: %s' % (getTargetValue(self.target, self.out_log, self.__class__.__name__)), self.out_log, self.global_log)
# resampling
if self.type == 'regression':
fu.log('Resampling regression dataset, continuous data will be classified', self.out_log, self.global_log)
# call resampler class for Regression ReSampling
rs = resampler()
# Create n_bins classes for the dataset
ranges, y, target_pos = rs.fit(train_df, target=getTargetValue(self.target, self.out_log, self.__class__.__name__), bins=self.n_bins, balanced_binning=self.balanced_binning, verbose=0)
# Get the re-sampled data
final_X, final_y = rs.resample(method, train_df, y)
elif self.type == 'classification':
# get X and y
y = getTarget(self.target, train_df, self.out_log, self.__class__.__name__)
# fit and resample
final_X, final_y = method.fit_resample(X, y)
target_pos = None
# evaluate resampling
if self.evaluate:
fu.log('Evaluating data before resampling with RandomForestClassifier', self.out_log, self.global_log)
cv = RepeatedStratifiedKFold(n_splits=self.evaluate_splits, n_repeats=self.evaluate_repeats, random_state=self.random_state_evaluate)
# evaluate model
scores = cross_val_score(RandomForestClassifier(class_weight='balanced'), X, y, scoring='accuracy', cv=cv, n_jobs=-1)
if not np.isnan(np.mean(scores)):
fu.log('Mean Accuracy before resampling: %.3f' % (np.mean(scores)), self.out_log, self.global_log)
else:
fu.log('Unable to calculate cross validation score, NaN was returned.', self.out_log, self.global_log)
# log distribution before resampling
dist = ''
for k,v in Counter(y).items():
per = v / len(y) * 100
rng = ''
if ranges: rng = str(ranges[k])
dist = dist + 'Class=%d, n=%d (%.3f%%) %s\n' % (k, v, per, rng)
fu.log('Classes distribution before resampling:\n\n%s' % dist, self.out_log, self.global_log)
# join final_X and final_y in the output dataframe
if header is None:
# numpy
out_df = np.column_stack((final_X, final_y))
else:
# pandas
out_df = final_X.join(final_y)
# if no header, convert np to pd
if header is None: out_df = pd.DataFrame(data=out_df)
# if cols encoded, decode them
if cols_encoded:
for column in cols_encoded:
if header is None:
out_df = out_df.astype({column: int } )
out_df[column] = le.inverse_transform(out_df[column].values.ravel())
# if no header, target is in a different column
if target_pos: t = target_pos
else: t = getTargetValue(self.target, self.out_log, self.__class__.__name__)
# log distribution after resampling
if self.type == 'regression':
ranges, y_out, _ = rs.fit(out_df, target=t, bins=self.n_bins, balanced_binning=self.balanced_binning, verbose=0)
elif self.type == 'classification':
y_out = getTarget(self.target, out_df, self.out_log, self.__class__.__name__)
dist = ''
for k,v in Counter(y_out).items():
per = v / len(y_out) * 100
rng = ''
if ranges: rng = str(ranges[k])
dist = dist + 'Class=%d, n=%d (%.3f%%) %s\n' % (k, v, per, rng)
fu.log('Classes distribution after resampling:\n\n%s' % dist, self.out_log, self.global_log)
# evaluate resampling
if self.evaluate:
fu.log('Evaluating data after resampling with RandomForestClassifier', self.out_log, self.global_log)
cv = RepeatedStratifiedKFold(n_splits=3, n_repeats=3, random_state=42)
# evaluate model
scores = cross_val_score(RandomForestClassifier(class_weight='balanced'), final_X, y_out, scoring='accuracy', cv=cv, n_jobs=-1)
if not np.isnan(np.mean(scores)):
fu.log('Mean Accuracy after resampling a %s dataset with %s method: %.3f' % (self.type, resampling_methods[self.method]['method'], np.mean(scores)), self.out_log, self.global_log)
else:
fu.log('Unable to calculate cross validation score, NaN was returned.', self.out_log, self.global_log)
# save output
hdr = False
if header == 0: hdr = True
fu.log('Saving resampled dataset to %s' % self.io_dict["out"]["output_dataset_path"], self.out_log, self.global_log)
out_df.to_csv(self.io_dict["out"]["output_dataset_path"], index = False, header=hdr)
return 0
def resampling(input_dataset_path: str, output_dataset_path: str, properties: dict = None, **kwargs) -> int:
"""Execute the :class:`Resampling <resampling.resampling.Resampling>` class and
execute the :meth:`launch() <resampling.resampling.Resampling.launch>` method."""
return Resampling(input_dataset_path=input_dataset_path,
output_dataset_path=output_dataset_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Wrapper of the imblearn.combine methods.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('--config', required=False, help='Configuration file')
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')
required_args.add_argument('--output_dataset_path', required=True, help='Path to the output dataset. Accepted formats: csv.')
args = parser.parse_args()
args.config = args.config or "{}"
properties = settings.ConfReader(config=args.config).get_prop_dic()
# Specific call of each building block
resampling(input_dataset_path=args.input_dataset_path,
output_dataset_path=args.output_dataset_path,
properties=properties)
if __name__ == '__main__':
main() | en | 0.617063 | #!/usr/bin/env python3 Module containing the Resampling class and the command line interface. | biobb_ml Resampling | Wrapper of the imblearn.combine methods. | Combine over- and under-sampling methods to remove samples and supplement the dataset. If regression is specified as type, the data will be resampled to classes in order to apply the resampling model. Visit the imbalanced-learn official website for the different methods accepted in this wrapper: `SMOTETomek <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTETomek.html>`_, `SMOTEENN <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTEENN.html>`_. Args: input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/resampling/dataset_resampling.csv>`_. Accepted formats: csv (edam:format_3752). output_dataset_path (str): Path to the output dataset. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/resampling/ref_output_resampling.csv>`_. Accepted formats: csv (edam:format_3752). properties (dic - Python dictionary object containing the tool parameters, not input/output files): * **method** (*str*) - (None) Resampling method. It's a mandatory property. Values: smotetomek (`SMOTETomek <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTETomek.html>`_: Class to perform over-sampling using SMOTE and cleaning using Tomek links), smotenn (`SMOTEENN <https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.combine.SMOTEENN.html>`_: Class to perform over-sampling using SMOTE and cleaning using ENN). * **type** (*str*) - (None) Type of oversampling. It's a mandatory property. Values: regression (the oversampling will be applied on a continuous dataset), classification (the oversampling will be applied on a classified dataset). * **target** (*dict*) - ({}) Dependent variable you want to predict from your dataset. You can specify either a column name or a column index. Formats: { "column": "column3" } or { "index": 21 }. In case of mulitple formats, the first one will be picked. * **evaluate** (*bool*) - (False) Whether or not to evaluate the dataset before and after applying the resampling. * **evaluate_splits** (*int*) - (3) [2~100|1] Number of folds to be applied by the Repeated Stratified K-Fold evaluation method. Must be at least 2. * **evaluate_repeats** (*int*) - (3) [2~100|1] Number of times Repeated Stratified K-Fold cross validator needs to be repeated. * **n_bins** (*int*) - (5) [1~100|1] Only for regression resampling. The number of classes that the user wants to generate with the target data. * **balanced_binning** (*bool*) - (False) Only for regression resampling. Decides whether samples are to be distributed roughly equally across all classes. * **sampling_strategy_over** (*dict*) - ({ "target": "auto" }) Sampling information applied in the dataset oversampling process. Formats: { "target": "auto" }, { "ratio": 0.3 } or { "dict": { 0: 300, 1: 200, 2: 100 } }. When "target", specify the class targeted by the resampling; the number of samples in the different classes will be equalized; possible choices are: minority (resample only the minority class), not minority (resample all classes but the minority class), not majority (resample all classes but the majority class), all (resample all classes), auto (equivalent to 'not majority'). When "ratio", it corresponds to the desired ratio of the number of samples in the minority class over the number of samples in the majority class after resampling (ONLY IN CASE OF BINARY CLASSIFICATION). When "dict", the keys correspond to the targeted classes and the values correspond to the desired number of samples for each targeted class. * **sampling_strategy_under** (*dict*) - ({ "target": "auto" }) Sampling information applied in the dataset cleaning process. Formats: { "target": "auto" } or { "list": [0, 2, 3] }. When "target", specify the class targeted by the resampling; the number of samples in the different classes will be equalized; possible choices are: majority (resample only the majority class), not minority (resample all classes but the minority class), not majority (resample all classes but the majority class), all (resample all classes), auto (equivalent to 'not minority'). When "list", the list contains the classes targeted by the resampling. * **random_state_method** (*int*) - (5) [1~1000|1] Controls the randomization of the algorithm. * **random_state_evaluate** (*int*) - (5) [1~1000|1] Controls the shuffling applied to the Repeated Stratified K-Fold evaluation method. * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files. * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist. Examples: This is a use example of how to use the building block from Python:: from biobb_ml.resampling.resampling import resampling prop = { 'method': 'smotenn', 'type': 'regression', 'target': { 'column': 'target' }, 'evaluate': true, 'n_bins': 10, 'sampling_strategy_over': { 'dict': { '4': 1000, '5': 1000, '6': 1000, '7': 1000 } }, 'sampling_strategy_under': { 'list': [0,1] } } resampling(input_dataset_path='/path/to/myDataset.csv', output_dataset_path='/path/to/newDataset.csv', properties=prop) Info: * wrapped_software: * name: imbalanced-learn combine * version: >0.7.0 * license: MIT * ontology: * name: EDAM * schema: http://edamontology.org/EDAM.owl # Call parent class constructor # Input/Output files # Properties specific for BB # Check the properties Checks all the input/output paths and parameters Execute the :class:`Resampling <resampling.resampling.Resampling>` resampling.resampling.Resampling object. # check input/output paths and parameters # Setup Biobb # check mandatory properties # load dataset # if type object, LabelEncoder.fit_transform # defining X # calling resample method # resampling # call resampler class for Regression ReSampling # Create n_bins classes for the dataset # Get the re-sampled data # get X and y # fit and resample # evaluate resampling # evaluate model # log distribution before resampling # join final_X and final_y in the output dataframe # numpy # pandas # if no header, convert np to pd # if cols encoded, decode them # if no header, target is in a different column # log distribution after resampling # evaluate resampling # evaluate model # save output Execute the :class:`Resampling <resampling.resampling.Resampling>` class and execute the :meth:`launch() <resampling.resampling.Resampling.launch>` method. Command line execution of this building block. Please check the command line documentation. # Specific args of each building block # Specific call of each building block | 2.602093 | 3 |
control/Control/src/ai/astar.py | jacksonicson/paper.IS2015 | 1 | 6620712 | <filename>control/Control/src/ai/astar.py
from heapq import heapify, heappop
from sets import Set
import sys
class pq(object):
def __init__(self, init=None):
self.inner, self.item_f = [], {}
if not None is init:
self.inner = [[priority, item] for item, priority in enumerate(init)]
heapify(self.inner)
self.item_f = {pi[1]: pi for pi in self.inner}
def __contains__(self, item):
return item in self.item_f
def put(self, item, priority, heap=True):
entry = [priority, item]
self.inner.append(entry)
if heap:
heapify(self.inner)
self.item_f[item] = entry
def top_one(self):
if not len(self.inner): return None
priority, item = heappop(self.inner)
del self.item_f[item]
return item
def re_prioritize(self, items, prioritizer): # =lambda x: x + 1
for item in items:
if not item in self.item_f: continue
entry = self.item_f[item]
entry[0] = prioritizer(entry[0])
def heap(self):
heapify(self.inner)
class ANode(object):
def __init__(self, nodeload, domains, domainload):
self.predecessor = None
self.g = 99999
self.nodeload = nodeload
self.domainload = domainload
self.domains = domains
self.successors = None
self.costs = None
self.hash = None
self.tmp = None
def dump(self):
print self.nodes
print self.domains
def get_successors(self, mesh, target):
# generates _all_ successors
# The heuristic will not go into all of them
# for all migration possibilities create a new state
# state heuristics is determined by f_heuristics function
if not self.successors is None:
print 'REAL'
return self.successors, self.costs
successors = []
costs = []
# each domain to each node except its own
for d in xrange(len(self.domains)):
for node in xrange(len(self.nodeload)):
nodes = list(self.nodeload)
domains = list(self.domains)
if domains[d] == node:
continue
# remove from source node
currnode = domains[d]
nodes[currnode] -= self.domainload[d]
# add to target node
nodes[node] += self.domainload[d]
# Change node of domain
domains[d] = node
# Check overload constraint
if nodes[node] > 100:
continue
cost = 1
# cost += 0.5 * (nodes[node] / 100.0) # increases expansions
if target.domains[d] == self.domains[d]: # decreases expansions
cost += 3
elif target.domains[d] == domains[d]:
cost -= 1
elif nodes[node] == 0:
cost -= 1
elif nodes[node] > 80:
cost += 1
new = ANode(nodes, domains, self.domainload)
# Check constraints
# check if node is in mesh already
test = mesh.find(new)
if test:
new = test
else:
new.predecessor = self
mesh.put(new)
successors.append(new)
costs.append(cost)
self.successors = successors
self.costs = costs
return (successors, costs)
def __eq__(self, another):
if another == None:
return False
return self.domains == another.domains
def __hash__(self):
if self.hash:
return self.hash
h = 0
for i, v in enumerate(self.domains):
h += (i + 1) * v
self.hash = h
return h
def f_heuristics(self, target):
counter = 0
for i in xrange(len(self.domains)):
if target.domains[i] != self.domains[i]:
counter += 1
return counter
class AStar(object):
def __init__(self):
self.openlist = pq()
self.closelist = Set()
def search(self, mesh, start, end):
self.mesh = mesh
self.end = end
# Add start node to the open list
start.g = 0
self.openlist.put(start, 0)
expansions = 0
try:
while True:
# sys.stdout.write('.')
value = self.openlist.top_one()
if value is None:
print 'nothing found again'
print 'expansions %i' % expansions
return
if value == end:
print 'END FOUND.... run backtracking now'
print 'expansions %i' % expansions
return end
if (expansions % 50) == 0:
print 'expansions %i' % expansions
expansions += 1
self.expand(value)
self.closelist.add(value)
except KeyError:
pass
print 'nothing found'
def expand(self, current):
successors, costs = current.get_successors(self.mesh, self.end)
for i in xrange(len(successors)):
successor = successors[i]
if successor in self.closelist:
continue
# Relaxion on costs
new_g = current.g + costs[i]
if successor in self.openlist and new_g >= successor.g:
continue
# Update predecessor for backtracking
successor.predecessor = current
successor.g = new_g
# Update open list
f = new_g + current.f_heuristics(self.end)
if successor in self.openlist:
self.openlist.re_prioritize((successor,), lambda x: f)
else:
self.openlist.put(successor, f, False)
self.openlist.heap()
def main():
# Create initial state
# nodes = 50
# domainc = 50
#
# domains = []
# domainl = []
# nodel = [0 for _ in xrange(nodes)]
#
# for i in xrange(domainc):
# while True:
# import random
# node = random.randint(0, nodes-1)
# load = abs(random.randint(0, 50-1))
# if (nodel[node] + load) > 100:
# continue
# domains.append(node)
# domainl.append(load)
# nodel[node] += load
# break
#
# nodel.append(0)
nodes = 6
domains = [1,2,2,3,4,5]
target = [2,1,2,3,5,4]
domainl = [90,20,00,0,90,20]
nodel = [0 for _ in xrange(nodes)]
for i in xrange(len(domains)):
nodel[domains[i]] += domainl[i]
print domains
print domainl
print nodel
print 'A*'
# Create target state
# target = list(domains)
# target[3] = 1
# Validate overload
targetl = [0 for _ in xrange(nodes)]
for i, v in enumerate(target):
targetl[v] += domainl[i]
for i in targetl:
if i > 100:
print 'INVALID TARGET STATE'
return
# Create target and end node
print list(nodel)
start = ANode(nodel, domains, domainl)
target = ANode(nodel, target, domainl)
# Create a new mesh
mesh = Mesh()
mesh.put(start)
mesh.put(target)
# Run search
s = AStar()
end = s.search(mesh, start, target)
while True:
print end.domains
if end.predecessor is None:
break
end = end.predecessor
def plan(node_count, start, target, domain_load):
print start
print target
# Calulate the nodes load for the start allocation
nodes_load = [0 for _ in xrange(node_count)]
for i in xrange(len(start)):
nodes_load[start[i]] += domain_load[i]
# Create A* nodes for the search
start = ANode(nodes_load, start, domain_load)
target = ANode(nodes_load, target, domain_load)
# Create a new mesh with start and target nodes
mesh = Mesh()
mesh.put(start)
mesh.put(target)
# Run A*
s = AStar()
end = s.search(mesh, start, target)
# Extract migrations from A*
migrations = []
while True:
predecessor = end.predecessor
if end.predecessor == None:
break
for domain, server in enumerate(end.domains):
if predecessor.domains[domain] != server:
migration = (domain, predecessor.domains[domain], server)
migrations.append(migration)
break
end = predecessor
print migrations
return reversed(migrations)
class Mesh(object):
def __init__(self):
self.dict = {}
def dump(self):
print self.dict
def find(self, node):
if self.dict.has_key(node):
return self.dict[node]
return None
def put(self, node):
if self.find(node) is None:
self.dict[node] = node
def testNode():
# Create initial state
nodes = [0 for _ in xrange(3)] # the load of a node
domainload = [0 for _ in xrange(10)]
domains = [0, 1, 2, 1, 2, 1]
domainl = [10, 20, 30, 10, 40, 20]
for i, domain in enumerate(domains):
nodes[domain] += domainl[i]
print nodes
# Calc initial node load
# Create target and end node
start = ANode(nodes, domains)
nodes = list(nodes)
domains = list(domains)
domains[0] = 4
end = ANode(nodes, domains)
print start == end
print 'hahses'
print end.__hash__()
print start.__hash__()
test = {}
test[end] = 'asdf'
test[start] = 'super'
print test[end]
print 'in test'
print start in test
print 'test pq'
q = pq()
q.put(start, 1)
q.put(end, 0)
print q.top_one() == end
q.put(end, 0)
print q.top_one() == end
print q.top_one() == start
print 'test successors'
mesh = Mesh()
s, c = start.get_successors(mesh)
print len(s)
# for i in s:
# print i.domains
if __name__ == '__main__':
# testNode()
for i in xrange(1):
main()
| <filename>control/Control/src/ai/astar.py
from heapq import heapify, heappop
from sets import Set
import sys
class pq(object):
def __init__(self, init=None):
self.inner, self.item_f = [], {}
if not None is init:
self.inner = [[priority, item] for item, priority in enumerate(init)]
heapify(self.inner)
self.item_f = {pi[1]: pi for pi in self.inner}
def __contains__(self, item):
return item in self.item_f
def put(self, item, priority, heap=True):
entry = [priority, item]
self.inner.append(entry)
if heap:
heapify(self.inner)
self.item_f[item] = entry
def top_one(self):
if not len(self.inner): return None
priority, item = heappop(self.inner)
del self.item_f[item]
return item
def re_prioritize(self, items, prioritizer): # =lambda x: x + 1
for item in items:
if not item in self.item_f: continue
entry = self.item_f[item]
entry[0] = prioritizer(entry[0])
def heap(self):
heapify(self.inner)
class ANode(object):
def __init__(self, nodeload, domains, domainload):
self.predecessor = None
self.g = 99999
self.nodeload = nodeload
self.domainload = domainload
self.domains = domains
self.successors = None
self.costs = None
self.hash = None
self.tmp = None
def dump(self):
print self.nodes
print self.domains
def get_successors(self, mesh, target):
# generates _all_ successors
# The heuristic will not go into all of them
# for all migration possibilities create a new state
# state heuristics is determined by f_heuristics function
if not self.successors is None:
print 'REAL'
return self.successors, self.costs
successors = []
costs = []
# each domain to each node except its own
for d in xrange(len(self.domains)):
for node in xrange(len(self.nodeload)):
nodes = list(self.nodeload)
domains = list(self.domains)
if domains[d] == node:
continue
# remove from source node
currnode = domains[d]
nodes[currnode] -= self.domainload[d]
# add to target node
nodes[node] += self.domainload[d]
# Change node of domain
domains[d] = node
# Check overload constraint
if nodes[node] > 100:
continue
cost = 1
# cost += 0.5 * (nodes[node] / 100.0) # increases expansions
if target.domains[d] == self.domains[d]: # decreases expansions
cost += 3
elif target.domains[d] == domains[d]:
cost -= 1
elif nodes[node] == 0:
cost -= 1
elif nodes[node] > 80:
cost += 1
new = ANode(nodes, domains, self.domainload)
# Check constraints
# check if node is in mesh already
test = mesh.find(new)
if test:
new = test
else:
new.predecessor = self
mesh.put(new)
successors.append(new)
costs.append(cost)
self.successors = successors
self.costs = costs
return (successors, costs)
def __eq__(self, another):
if another == None:
return False
return self.domains == another.domains
def __hash__(self):
if self.hash:
return self.hash
h = 0
for i, v in enumerate(self.domains):
h += (i + 1) * v
self.hash = h
return h
def f_heuristics(self, target):
counter = 0
for i in xrange(len(self.domains)):
if target.domains[i] != self.domains[i]:
counter += 1
return counter
class AStar(object):
def __init__(self):
self.openlist = pq()
self.closelist = Set()
def search(self, mesh, start, end):
self.mesh = mesh
self.end = end
# Add start node to the open list
start.g = 0
self.openlist.put(start, 0)
expansions = 0
try:
while True:
# sys.stdout.write('.')
value = self.openlist.top_one()
if value is None:
print 'nothing found again'
print 'expansions %i' % expansions
return
if value == end:
print 'END FOUND.... run backtracking now'
print 'expansions %i' % expansions
return end
if (expansions % 50) == 0:
print 'expansions %i' % expansions
expansions += 1
self.expand(value)
self.closelist.add(value)
except KeyError:
pass
print 'nothing found'
def expand(self, current):
successors, costs = current.get_successors(self.mesh, self.end)
for i in xrange(len(successors)):
successor = successors[i]
if successor in self.closelist:
continue
# Relaxion on costs
new_g = current.g + costs[i]
if successor in self.openlist and new_g >= successor.g:
continue
# Update predecessor for backtracking
successor.predecessor = current
successor.g = new_g
# Update open list
f = new_g + current.f_heuristics(self.end)
if successor in self.openlist:
self.openlist.re_prioritize((successor,), lambda x: f)
else:
self.openlist.put(successor, f, False)
self.openlist.heap()
def main():
# Create initial state
# nodes = 50
# domainc = 50
#
# domains = []
# domainl = []
# nodel = [0 for _ in xrange(nodes)]
#
# for i in xrange(domainc):
# while True:
# import random
# node = random.randint(0, nodes-1)
# load = abs(random.randint(0, 50-1))
# if (nodel[node] + load) > 100:
# continue
# domains.append(node)
# domainl.append(load)
# nodel[node] += load
# break
#
# nodel.append(0)
nodes = 6
domains = [1,2,2,3,4,5]
target = [2,1,2,3,5,4]
domainl = [90,20,00,0,90,20]
nodel = [0 for _ in xrange(nodes)]
for i in xrange(len(domains)):
nodel[domains[i]] += domainl[i]
print domains
print domainl
print nodel
print 'A*'
# Create target state
# target = list(domains)
# target[3] = 1
# Validate overload
targetl = [0 for _ in xrange(nodes)]
for i, v in enumerate(target):
targetl[v] += domainl[i]
for i in targetl:
if i > 100:
print 'INVALID TARGET STATE'
return
# Create target and end node
print list(nodel)
start = ANode(nodel, domains, domainl)
target = ANode(nodel, target, domainl)
# Create a new mesh
mesh = Mesh()
mesh.put(start)
mesh.put(target)
# Run search
s = AStar()
end = s.search(mesh, start, target)
while True:
print end.domains
if end.predecessor is None:
break
end = end.predecessor
def plan(node_count, start, target, domain_load):
print start
print target
# Calulate the nodes load for the start allocation
nodes_load = [0 for _ in xrange(node_count)]
for i in xrange(len(start)):
nodes_load[start[i]] += domain_load[i]
# Create A* nodes for the search
start = ANode(nodes_load, start, domain_load)
target = ANode(nodes_load, target, domain_load)
# Create a new mesh with start and target nodes
mesh = Mesh()
mesh.put(start)
mesh.put(target)
# Run A*
s = AStar()
end = s.search(mesh, start, target)
# Extract migrations from A*
migrations = []
while True:
predecessor = end.predecessor
if end.predecessor == None:
break
for domain, server in enumerate(end.domains):
if predecessor.domains[domain] != server:
migration = (domain, predecessor.domains[domain], server)
migrations.append(migration)
break
end = predecessor
print migrations
return reversed(migrations)
class Mesh(object):
def __init__(self):
self.dict = {}
def dump(self):
print self.dict
def find(self, node):
if self.dict.has_key(node):
return self.dict[node]
return None
def put(self, node):
if self.find(node) is None:
self.dict[node] = node
def testNode():
# Create initial state
nodes = [0 for _ in xrange(3)] # the load of a node
domainload = [0 for _ in xrange(10)]
domains = [0, 1, 2, 1, 2, 1]
domainl = [10, 20, 30, 10, 40, 20]
for i, domain in enumerate(domains):
nodes[domain] += domainl[i]
print nodes
# Calc initial node load
# Create target and end node
start = ANode(nodes, domains)
nodes = list(nodes)
domains = list(domains)
domains[0] = 4
end = ANode(nodes, domains)
print start == end
print 'hahses'
print end.__hash__()
print start.__hash__()
test = {}
test[end] = 'asdf'
test[start] = 'super'
print test[end]
print 'in test'
print start in test
print 'test pq'
q = pq()
q.put(start, 1)
q.put(end, 0)
print q.top_one() == end
q.put(end, 0)
print q.top_one() == end
print q.top_one() == start
print 'test successors'
mesh = Mesh()
s, c = start.get_successors(mesh)
print len(s)
# for i in s:
# print i.domains
if __name__ == '__main__':
# testNode()
for i in xrange(1):
main()
| en | 0.643289 | # =lambda x: x + 1 # generates _all_ successors # The heuristic will not go into all of them # for all migration possibilities create a new state # state heuristics is determined by f_heuristics function # each domain to each node except its own # remove from source node # add to target node # Change node of domain # Check overload constraint # cost += 0.5 * (nodes[node] / 100.0) # increases expansions # decreases expansions # Check constraints # check if node is in mesh already # Add start node to the open list # sys.stdout.write('.') # Relaxion on costs # Update predecessor for backtracking # Update open list # Create initial state # nodes = 50 # domainc = 50 # # domains = [] # domainl = [] # nodel = [0 for _ in xrange(nodes)] # # for i in xrange(domainc): # while True: # import random # node = random.randint(0, nodes-1) # load = abs(random.randint(0, 50-1)) # if (nodel[node] + load) > 100: # continue # domains.append(node) # domainl.append(load) # nodel[node] += load # break # # nodel.append(0) # Create target state # target = list(domains) # target[3] = 1 # Validate overload # Create target and end node # Create a new mesh # Run search # Calulate the nodes load for the start allocation # Create A* nodes for the search # Create a new mesh with start and target nodes # Run A* # Extract migrations from A* # Create initial state # the load of a node # Calc initial node load # Create target and end node # for i in s: # print i.domains # testNode() | 2.724319 | 3 |
BOJ/dp_boj/pascal.py | mrbartrns/swacademy_structure | 0 | 6620713 | # BOJ 15489 파스칼의 삼각형
import sys
sys.stdin = open("../input.txt", "r")
si = sys.stdin.readline
R, C, W = map(int, si().split(" "))
dp = [[0 for _ in range(30)] for _ in range(30)]
dp[0][0] = 1
for i in range(1, 30):
dp[i][0] = 1
for j in range(30):
dp[i][j] = dp[i - 1][j] + dp[i - 1][j - 1]
s = 0
for i in range(W):
for j in range(W):
if j > i:
continue
s += dp[R + i - 1][C + j - 1]
print(s) | # BOJ 15489 파스칼의 삼각형
import sys
sys.stdin = open("../input.txt", "r")
si = sys.stdin.readline
R, C, W = map(int, si().split(" "))
dp = [[0 for _ in range(30)] for _ in range(30)]
dp[0][0] = 1
for i in range(1, 30):
dp[i][0] = 1
for j in range(30):
dp[i][j] = dp[i - 1][j] + dp[i - 1][j - 1]
s = 0
for i in range(W):
for j in range(W):
if j > i:
continue
s += dp[R + i - 1][C + j - 1]
print(s) | ko | 0.999476 | # BOJ 15489 파스칼의 삼각형 | 2.432595 | 2 |
Code-Sleep-Python/Snake/snakegame.py | shardul08/Code-Sleep-Python | 420 | 6620714 | <gh_stars>100-1000
import pygame
import random
import sys
from pygame.locals import *
def collide(x1, x2, y1, y2, wh):
w1 = 20
w2 = wh
h2 = wh
h1 = 20
if x1+w1 > x2 and x1 < x2+w2 and y1+h1 > y2 and y1 < y2+h2:
return True
else:
return False
def die(screen, score):
f = pygame.font.SysFont('Monospace', 30)
t = f.render('YOUR SCORE IS : '+str(score), True, (0, 0, 0))
screen.blit(t, (10, 270))
pygame.display.update()
pygame.time.wait(2000)
sys.exit(0)
xs = [290, 290, 290, 290, 290]
ys = [290, 270, 250, 230, 210]
dirs = 0
score = 0
applepos = (random.randint(0, 590), random.randint(0, 590))
pygame.init()
s = pygame.display.set_mode((600, 600))
pygame.display.set_caption('SNAKE')
appleimage = pygame.Surface((10, 10))
appleimage.fill((0, 255, 0))
img = pygame.Surface((20, 20))
img.fill((255, 0, 0))
f = pygame.font.SysFont('Monospace', 20)
clock = pygame.time.Clock()
while True:
clock.tick(10)
for e in pygame.event.get():
if e.type == QUIT:
sys.exit(0)
elif e.type == KEYDOWN:
if e.key == K_UP and dirs != 0:
dirs = 2
elif e.key == K_DOWN and dirs != 2:
dirs = 0
elif e.key == K_LEFT and dirs != 1:
dirs = 3
elif e.key == K_RIGHT and dirs != 3:
dirs = 1
i = len(xs)-1
while i >= 2:
if collide(xs[0], xs[i], ys[0], ys[i], 20):
die(s, score)
i -= 1
if collide(xs[0], applepos[0], ys[0], applepos[1], 10):
score += 1
xs.append(700)
ys.append(700)
applepos = (random.randint(0, 590), random.randint(0, 590))
if xs[0] < 0 or xs[0] > 580 or ys[0] < 0 or ys[0] > 580:
die(s, score)
i = len(xs)-1
while i >= 1:
xs[i] = xs[i-1]
ys[i] = ys[i-1]
i -= 1
if dirs == 0:
ys[0] += 20
elif dirs == 1:
xs[0] += 20
elif dirs == 2:
ys[0] -= 20
elif dirs == 3:
xs[0] -= 20
s.fill((255, 255, 255))
for i in range(0, len(xs)):
s.blit(img, (xs[i], ys[i]))
s.blit(appleimage, applepos)
t = f.render(str(score), True, (0, 0, 0))
s.blit(t, (10, 10))
pygame.display.update()
| import pygame
import random
import sys
from pygame.locals import *
def collide(x1, x2, y1, y2, wh):
w1 = 20
w2 = wh
h2 = wh
h1 = 20
if x1+w1 > x2 and x1 < x2+w2 and y1+h1 > y2 and y1 < y2+h2:
return True
else:
return False
def die(screen, score):
f = pygame.font.SysFont('Monospace', 30)
t = f.render('YOUR SCORE IS : '+str(score), True, (0, 0, 0))
screen.blit(t, (10, 270))
pygame.display.update()
pygame.time.wait(2000)
sys.exit(0)
xs = [290, 290, 290, 290, 290]
ys = [290, 270, 250, 230, 210]
dirs = 0
score = 0
applepos = (random.randint(0, 590), random.randint(0, 590))
pygame.init()
s = pygame.display.set_mode((600, 600))
pygame.display.set_caption('SNAKE')
appleimage = pygame.Surface((10, 10))
appleimage.fill((0, 255, 0))
img = pygame.Surface((20, 20))
img.fill((255, 0, 0))
f = pygame.font.SysFont('Monospace', 20)
clock = pygame.time.Clock()
while True:
clock.tick(10)
for e in pygame.event.get():
if e.type == QUIT:
sys.exit(0)
elif e.type == KEYDOWN:
if e.key == K_UP and dirs != 0:
dirs = 2
elif e.key == K_DOWN and dirs != 2:
dirs = 0
elif e.key == K_LEFT and dirs != 1:
dirs = 3
elif e.key == K_RIGHT and dirs != 3:
dirs = 1
i = len(xs)-1
while i >= 2:
if collide(xs[0], xs[i], ys[0], ys[i], 20):
die(s, score)
i -= 1
if collide(xs[0], applepos[0], ys[0], applepos[1], 10):
score += 1
xs.append(700)
ys.append(700)
applepos = (random.randint(0, 590), random.randint(0, 590))
if xs[0] < 0 or xs[0] > 580 or ys[0] < 0 or ys[0] > 580:
die(s, score)
i = len(xs)-1
while i >= 1:
xs[i] = xs[i-1]
ys[i] = ys[i-1]
i -= 1
if dirs == 0:
ys[0] += 20
elif dirs == 1:
xs[0] += 20
elif dirs == 2:
ys[0] -= 20
elif dirs == 3:
xs[0] -= 20
s.fill((255, 255, 255))
for i in range(0, len(xs)):
s.blit(img, (xs[i], ys[i]))
s.blit(appleimage, applepos)
t = f.render(str(score), True, (0, 0, 0))
s.blit(t, (10, 10))
pygame.display.update() | none | 1 | 3.282722 | 3 | |
util/db/kv/__init__.py | discord-math/bot | 3 | 6620715 | """
A simple key-value store that associates to each module name and a string key a
piece of JSON. If a module needs more efficient or structured storage it should
probably have its own DB handling code.
"""
from __future__ import annotations
import asyncio
import asyncpg
import contextlib
import json
import weakref
from typing import Optional, Dict, Iterator, AsyncIterator, Tuple, Set, Sequence, Union, Any, cast
import util.asyncio
import util.db as util_db
import util.frozen_list
import util.frozen_dict
schema_initialized = False
async def init_schema() -> None:
global schema_initialized
if not schema_initialized:
await util_db.init_for(__name__, """
CREATE TABLE kv
( namespace TEXT NOT NULL
, key TEXT ARRAY NOT NULL
, value TEXT NOT NULL
, PRIMARY KEY(namespace, key) );
CREATE INDEX kv_namespace_index
ON kv USING BTREE(namespace);
""")
schema_initialized = True
def json_freeze(value: Optional[Any]) -> Optional[Any]:
if isinstance(value, list):
return util.frozen_list.FrozenList(
json_freeze(v) for v in value)
elif isinstance(value, dict):
return util.frozen_dict.FrozenDict(
(k, json_freeze(v)) for k, v in value.items())
else:
return value
class ThawingJSONEncoder(json.JSONEncoder):
__slots__ = ()
def default(self, obj: Any) -> Any:
if isinstance(obj, util.frozen_list.FrozenList):
return obj.copy()
elif isinstance(obj, util.frozen_dict.FrozenDict):
return obj.copy()
else:
return super().default(obj)
def json_encode(value: Any) -> Optional[str]:
return json.dumps(value, cls=ThawingJSONEncoder) if value is not None else None
def json_decode(text: Optional[str]) -> Any:
return json_freeze(json.loads(text)) if text is not None else None
@contextlib.asynccontextmanager
async def connect() -> AsyncIterator[asyncpg.Connection]:
await init_schema()
async with util_db.connection() as conn:
yield conn
async def get_raw_value(namespace: Sequence[str], key: Sequence[str]) -> Optional[str]:
async with connect() as conn:
val = await conn.fetchval("""
SELECT value FROM kv WHERE namespace = $1 AND key = $2
""", namespace, tuple(key))
return cast(Optional[str], val)
async def get_raw_key_values(namespace: str) -> Dict[Tuple[str, ...], str]:
async with connect() as conn:
rows = await conn.fetch("""
SELECT key, value FROM kv WHERE namespace = $1
""", namespace)
return {tuple(row["key"]): row["value"] for row in rows}
async def get_raw_glob(namespace: str, length: int, parts: Dict[int, str]) -> Dict[Tuple[str, ...], str]:
async with connect() as conn:
arg = 2
clauses = []
for k in parts:
arg += 1
clauses.append("key[{}] = ${}".format(k, arg))
clause = " AND ".join(clauses) if clauses else "TRUE"
rows = await conn.fetch("""
SELECT key, value FROM kv
WHERE namespace = $1 AND ARRAY_LENGTH(key, 1) = $2 AND ({})
""".format(clause), namespace, length, *parts.values())
return {tuple(row["key"]): row["value"] for row in rows}
async def get_namespaces() -> Sequence[str]:
async with connect() as conn:
rows = await conn.fetch("""
SELECT DISTINCT namespace FROM kv
""")
return [row["namespace"] for row in rows]
async def set_raw_value(namespace: str, key: Sequence[str], value: Optional[str], log_value: bool = True) -> None:
async with connect() as conn:
if value is None:
await conn.execute("""
DELETE FROM kv
WHERE namespace = $1 AND key = $2
""", namespace, tuple(key))
else:
await conn.execute("""
INSERT INTO kv (namespace, key, value)
VALUES ($1, $2, $3)
ON CONFLICT (namespace, key) DO UPDATE SET value = EXCLUDED.value
""", namespace, tuple(key), value, log_data=True if log_value else {1, 2})
async def set_raw_values(namespace: str, dict: Dict[Sequence[str], Optional[str]], log_value: bool = False) -> None:
removals = [(namespace, tuple(key)) for key, value in dict.items() if value is None]
updates = [(namespace, tuple(key), value) for key, value in dict.items() if value is not None]
async with connect() as conn:
async with conn.transaction():
if removals:
await conn.executemany("""
DELETE FROM kv
WHERE namespace = $1 AND key = $2
""", removals)
if updates:
await conn.executemany("""
INSERT INTO kv (namespace, key, value)
VALUES ($1, $2, $3)
ON CONFLICT (namespace, key) DO UPDATE SET value = EXCLUDED.value
""", updates, log_data=True if log_value else {1, 2})
class ConfigStore(Dict[Tuple[str, ...], str]):
__slots__ = ("__weakref__", "ready")
ready: asyncio.Event
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.ready = asyncio.Event()
config_stores: weakref.WeakValueDictionary[str, ConfigStore]
config_stores = weakref.WeakValueDictionary()
KeyType = Union[str, int, Sequence[Union[str, int]]]
def encode_key(key: KeyType) -> Tuple[str, ...]:
if isinstance(key, (str, int)):
key = (key,)
return tuple(str(k) for k in key)
class Config:
"""
This object encapsulates access to the key-value store for a fixed module. Upon construction we load all the pairs
from the DB into memory. The in-memory copy is shared across Config objects for the same module.
__iter__ and __getitem__/__getattr__ will read from this in-memory copy.
__setitem__/__setattr__ will update the in-memory copy. awaiting will commit the keys that were modified by this
Config object to the DB (the values may have since been overwritten by other Config objects)
"""
__slots__ = "_namespace", "_log_value", "_store", "_dirty"
_namespace: str
_log_value: bool
_store: ConfigStore
_dirty: Set[Tuple[str, ...]]
def __init__(self, namespace: str, log_value: bool, store: ConfigStore):
self._namespace = namespace
self._log_value = log_value
self._store = store
self._dirty = set()
def __iter__(self) -> Iterator[Tuple[str, ...]]:
return self._store.__iter__()
def __getitem__(self, key: KeyType) -> Any:
return json_decode(self._store.get(encode_key(key)))
def __setitem__(self, key: KeyType, value: Any) -> None:
ek = encode_key(key)
ev = json_encode(value)
if ev is None:
self._store.pop(ek, None)
else:
self._store[ek] = ev
self._dirty.add(ek)
@util.asyncio.__await__
async def __await__(self) -> None:
dirty = self._dirty
self._dirty = set()
try:
await set_raw_values(self._namespace, {key: self._store.get(key) for key in dirty})
except:
self._dirty.update(dirty)
raise
def __getattr__(self, key: str) -> Any:
if key.startswith("_"):
return None
return self[key]
def __setattr__(self, key: str, value: Any) -> None:
if key.startswith("_"):
return super().__setattr__(key, value)
self[key] = value
async def load(namespace: str, log_value: bool = False) -> Config:
store = config_stores.get(namespace)
if store is None:
store = ConfigStore()
config_stores[namespace] = store
store.update(await get_raw_key_values(namespace))
store.ready.set()
await store.ready.wait()
return Config(namespace, log_value, store)
| """
A simple key-value store that associates to each module name and a string key a
piece of JSON. If a module needs more efficient or structured storage it should
probably have its own DB handling code.
"""
from __future__ import annotations
import asyncio
import asyncpg
import contextlib
import json
import weakref
from typing import Optional, Dict, Iterator, AsyncIterator, Tuple, Set, Sequence, Union, Any, cast
import util.asyncio
import util.db as util_db
import util.frozen_list
import util.frozen_dict
schema_initialized = False
async def init_schema() -> None:
global schema_initialized
if not schema_initialized:
await util_db.init_for(__name__, """
CREATE TABLE kv
( namespace TEXT NOT NULL
, key TEXT ARRAY NOT NULL
, value TEXT NOT NULL
, PRIMARY KEY(namespace, key) );
CREATE INDEX kv_namespace_index
ON kv USING BTREE(namespace);
""")
schema_initialized = True
def json_freeze(value: Optional[Any]) -> Optional[Any]:
if isinstance(value, list):
return util.frozen_list.FrozenList(
json_freeze(v) for v in value)
elif isinstance(value, dict):
return util.frozen_dict.FrozenDict(
(k, json_freeze(v)) for k, v in value.items())
else:
return value
class ThawingJSONEncoder(json.JSONEncoder):
__slots__ = ()
def default(self, obj: Any) -> Any:
if isinstance(obj, util.frozen_list.FrozenList):
return obj.copy()
elif isinstance(obj, util.frozen_dict.FrozenDict):
return obj.copy()
else:
return super().default(obj)
def json_encode(value: Any) -> Optional[str]:
return json.dumps(value, cls=ThawingJSONEncoder) if value is not None else None
def json_decode(text: Optional[str]) -> Any:
return json_freeze(json.loads(text)) if text is not None else None
@contextlib.asynccontextmanager
async def connect() -> AsyncIterator[asyncpg.Connection]:
await init_schema()
async with util_db.connection() as conn:
yield conn
async def get_raw_value(namespace: Sequence[str], key: Sequence[str]) -> Optional[str]:
async with connect() as conn:
val = await conn.fetchval("""
SELECT value FROM kv WHERE namespace = $1 AND key = $2
""", namespace, tuple(key))
return cast(Optional[str], val)
async def get_raw_key_values(namespace: str) -> Dict[Tuple[str, ...], str]:
async with connect() as conn:
rows = await conn.fetch("""
SELECT key, value FROM kv WHERE namespace = $1
""", namespace)
return {tuple(row["key"]): row["value"] for row in rows}
async def get_raw_glob(namespace: str, length: int, parts: Dict[int, str]) -> Dict[Tuple[str, ...], str]:
async with connect() as conn:
arg = 2
clauses = []
for k in parts:
arg += 1
clauses.append("key[{}] = ${}".format(k, arg))
clause = " AND ".join(clauses) if clauses else "TRUE"
rows = await conn.fetch("""
SELECT key, value FROM kv
WHERE namespace = $1 AND ARRAY_LENGTH(key, 1) = $2 AND ({})
""".format(clause), namespace, length, *parts.values())
return {tuple(row["key"]): row["value"] for row in rows}
async def get_namespaces() -> Sequence[str]:
async with connect() as conn:
rows = await conn.fetch("""
SELECT DISTINCT namespace FROM kv
""")
return [row["namespace"] for row in rows]
async def set_raw_value(namespace: str, key: Sequence[str], value: Optional[str], log_value: bool = True) -> None:
async with connect() as conn:
if value is None:
await conn.execute("""
DELETE FROM kv
WHERE namespace = $1 AND key = $2
""", namespace, tuple(key))
else:
await conn.execute("""
INSERT INTO kv (namespace, key, value)
VALUES ($1, $2, $3)
ON CONFLICT (namespace, key) DO UPDATE SET value = EXCLUDED.value
""", namespace, tuple(key), value, log_data=True if log_value else {1, 2})
async def set_raw_values(namespace: str, dict: Dict[Sequence[str], Optional[str]], log_value: bool = False) -> None:
removals = [(namespace, tuple(key)) for key, value in dict.items() if value is None]
updates = [(namespace, tuple(key), value) for key, value in dict.items() if value is not None]
async with connect() as conn:
async with conn.transaction():
if removals:
await conn.executemany("""
DELETE FROM kv
WHERE namespace = $1 AND key = $2
""", removals)
if updates:
await conn.executemany("""
INSERT INTO kv (namespace, key, value)
VALUES ($1, $2, $3)
ON CONFLICT (namespace, key) DO UPDATE SET value = EXCLUDED.value
""", updates, log_data=True if log_value else {1, 2})
class ConfigStore(Dict[Tuple[str, ...], str]):
__slots__ = ("__weakref__", "ready")
ready: asyncio.Event
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.ready = asyncio.Event()
config_stores: weakref.WeakValueDictionary[str, ConfigStore]
config_stores = weakref.WeakValueDictionary()
KeyType = Union[str, int, Sequence[Union[str, int]]]
def encode_key(key: KeyType) -> Tuple[str, ...]:
if isinstance(key, (str, int)):
key = (key,)
return tuple(str(k) for k in key)
class Config:
"""
This object encapsulates access to the key-value store for a fixed module. Upon construction we load all the pairs
from the DB into memory. The in-memory copy is shared across Config objects for the same module.
__iter__ and __getitem__/__getattr__ will read from this in-memory copy.
__setitem__/__setattr__ will update the in-memory copy. awaiting will commit the keys that were modified by this
Config object to the DB (the values may have since been overwritten by other Config objects)
"""
__slots__ = "_namespace", "_log_value", "_store", "_dirty"
_namespace: str
_log_value: bool
_store: ConfigStore
_dirty: Set[Tuple[str, ...]]
def __init__(self, namespace: str, log_value: bool, store: ConfigStore):
self._namespace = namespace
self._log_value = log_value
self._store = store
self._dirty = set()
def __iter__(self) -> Iterator[Tuple[str, ...]]:
return self._store.__iter__()
def __getitem__(self, key: KeyType) -> Any:
return json_decode(self._store.get(encode_key(key)))
def __setitem__(self, key: KeyType, value: Any) -> None:
ek = encode_key(key)
ev = json_encode(value)
if ev is None:
self._store.pop(ek, None)
else:
self._store[ek] = ev
self._dirty.add(ek)
@util.asyncio.__await__
async def __await__(self) -> None:
dirty = self._dirty
self._dirty = set()
try:
await set_raw_values(self._namespace, {key: self._store.get(key) for key in dirty})
except:
self._dirty.update(dirty)
raise
def __getattr__(self, key: str) -> Any:
if key.startswith("_"):
return None
return self[key]
def __setattr__(self, key: str, value: Any) -> None:
if key.startswith("_"):
return super().__setattr__(key, value)
self[key] = value
async def load(namespace: str, log_value: bool = False) -> Config:
store = config_stores.get(namespace)
if store is None:
store = ConfigStore()
config_stores[namespace] = store
store.update(await get_raw_key_values(namespace))
store.ready.set()
await store.ready.wait()
return Config(namespace, log_value, store)
| en | 0.579715 | A simple key-value store that associates to each module name and a string key a piece of JSON. If a module needs more efficient or structured storage it should probably have its own DB handling code. CREATE TABLE kv ( namespace TEXT NOT NULL , key TEXT ARRAY NOT NULL , value TEXT NOT NULL , PRIMARY KEY(namespace, key) ); CREATE INDEX kv_namespace_index ON kv USING BTREE(namespace); SELECT value FROM kv WHERE namespace = $1 AND key = $2 SELECT key, value FROM kv WHERE namespace = $1 SELECT key, value FROM kv WHERE namespace = $1 AND ARRAY_LENGTH(key, 1) = $2 AND ({}) SELECT DISTINCT namespace FROM kv DELETE FROM kv WHERE namespace = $1 AND key = $2 INSERT INTO kv (namespace, key, value) VALUES ($1, $2, $3) ON CONFLICT (namespace, key) DO UPDATE SET value = EXCLUDED.value DELETE FROM kv WHERE namespace = $1 AND key = $2 INSERT INTO kv (namespace, key, value) VALUES ($1, $2, $3) ON CONFLICT (namespace, key) DO UPDATE SET value = EXCLUDED.value This object encapsulates access to the key-value store for a fixed module. Upon construction we load all the pairs from the DB into memory. The in-memory copy is shared across Config objects for the same module. __iter__ and __getitem__/__getattr__ will read from this in-memory copy. __setitem__/__setattr__ will update the in-memory copy. awaiting will commit the keys that were modified by this Config object to the DB (the values may have since been overwritten by other Config objects) | 2.546118 | 3 |
ex16.py | wellingtonn96/Revisao_LP2 | 0 | 6620716 | valores = {1: 'Pedro',
2: 'Jose',
3: 'Pedro',
4: 'Maria',
5: 'André'}
maior = -1
menor = 10000
for i in valores:
if i > maior:
maior = i
if i < menor:
menor = i
print('maior valor: %d' % (maior))
print('menor valor: %d' % (menor))
| valores = {1: 'Pedro',
2: 'Jose',
3: 'Pedro',
4: 'Maria',
5: 'André'}
maior = -1
menor = 10000
for i in valores:
if i > maior:
maior = i
if i < menor:
menor = i
print('maior valor: %d' % (maior))
print('menor valor: %d' % (menor))
| none | 1 | 3.832866 | 4 | |
wack/cli.py | jackwardell/Wack | 0 | 6620717 | import click
import setuptools
from .app import Application
SETUP_PY = "setup.py"
PRE_COMMIT = ".pre-commit-config.yaml"
LICENSE = "LICENSE"
TRAVIS = ".travis.yml"
GITIGNORE = ".gitignore"
UPLOAD = "upload.sh"
@click.group()
@click.pass_context
def cli(context):
context.ensure_object(type("Context", (), {}))
context.obj.app = Application()
@cli.command()
@click.pass_context
def install(context):
"""install all of my most used packages"""
context.obj.app.install_packages()
click.echo("Packages installed")
@cli.group()
def make():
"""make files from templates"""
pass
@make.command("setup.py")
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def setup_py(context, force, file=SETUP_PY):
"""make a `setup.py` file to allow for `pip install -e .`"""
click.echo(f"Making: {file}")
author = context.obj.app.get_author()
packages = setuptools.find_packages()
package_name = packages.pop()
click.echo(f"Found: {packages}. Assuming: {package_name}")
click.echo(f"Assuming github is: {author.github}")
done = context.obj.app.make_template(
file,
package_name=package_name,
author=author.name,
author_email=author.email,
author_github=author.github,
force=force,
)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def pre_commit(context, force, file=PRE_COMMIT):
"""make a `.pre-commit-config.yaml` file to allow for pre-commit"""
click.echo(f"Making: {file}")
done = context.obj.app.make_template(file, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def license(context, force, file=LICENSE):
"""make a `LICENSE` file with MIT license"""
click.echo(f"Making: {file}")
author = context.obj.app.get_author()
done = context.obj.app.make_template(file, author=author.name, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def travis(context, force, file=TRAVIS):
"""make a `.travis.yml` file for pypi auto publishing packages"""
click.echo(f"Making: {file}")
done = context.obj.app.make_template(file, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def gitignore(context, force, file=GITIGNORE):
"""make a `.gitignore` file with pycharm basics"""
click.echo(f"Making: {file}")
done = context.obj.app.make_template(file, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def upload(context, force, file=UPLOAD):
"""make a `.gitignore` file with pycharm basics"""
click.echo(f"Making: {file}")
done = context.obj.app.make_template(file, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.argument("name")
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def package(context, name, force):
"""make a `__init__.py` file in a package"""
click.echo(f"Making: {name}")
done = context.obj.app.make_package(name, force=force)
click.echo(
f"Made: {name}" if done else f"{name} already exists, use --force to overwrite"
)
if __name__ == "__main__":
cli()
| import click
import setuptools
from .app import Application
SETUP_PY = "setup.py"
PRE_COMMIT = ".pre-commit-config.yaml"
LICENSE = "LICENSE"
TRAVIS = ".travis.yml"
GITIGNORE = ".gitignore"
UPLOAD = "upload.sh"
@click.group()
@click.pass_context
def cli(context):
context.ensure_object(type("Context", (), {}))
context.obj.app = Application()
@cli.command()
@click.pass_context
def install(context):
"""install all of my most used packages"""
context.obj.app.install_packages()
click.echo("Packages installed")
@cli.group()
def make():
"""make files from templates"""
pass
@make.command("setup.py")
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def setup_py(context, force, file=SETUP_PY):
"""make a `setup.py` file to allow for `pip install -e .`"""
click.echo(f"Making: {file}")
author = context.obj.app.get_author()
packages = setuptools.find_packages()
package_name = packages.pop()
click.echo(f"Found: {packages}. Assuming: {package_name}")
click.echo(f"Assuming github is: {author.github}")
done = context.obj.app.make_template(
file,
package_name=package_name,
author=author.name,
author_email=author.email,
author_github=author.github,
force=force,
)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def pre_commit(context, force, file=PRE_COMMIT):
"""make a `.pre-commit-config.yaml` file to allow for pre-commit"""
click.echo(f"Making: {file}")
done = context.obj.app.make_template(file, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def license(context, force, file=LICENSE):
"""make a `LICENSE` file with MIT license"""
click.echo(f"Making: {file}")
author = context.obj.app.get_author()
done = context.obj.app.make_template(file, author=author.name, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def travis(context, force, file=TRAVIS):
"""make a `.travis.yml` file for pypi auto publishing packages"""
click.echo(f"Making: {file}")
done = context.obj.app.make_template(file, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def gitignore(context, force, file=GITIGNORE):
"""make a `.gitignore` file with pycharm basics"""
click.echo(f"Making: {file}")
done = context.obj.app.make_template(file, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def upload(context, force, file=UPLOAD):
"""make a `.gitignore` file with pycharm basics"""
click.echo(f"Making: {file}")
done = context.obj.app.make_template(file, force=force)
click.echo(
f"Made: {file}" if done else f"{file} already exists, use --force to overwrite"
)
@make.command()
@click.argument("name")
@click.pass_context
@click.option("--force", "-f", required=False, default=False, is_flag=True)
def package(context, name, force):
"""make a `__init__.py` file in a package"""
click.echo(f"Making: {name}")
done = context.obj.app.make_package(name, force=force)
click.echo(
f"Made: {name}" if done else f"{name} already exists, use --force to overwrite"
)
if __name__ == "__main__":
cli()
| en | 0.71736 | install all of my most used packages make files from templates make a `setup.py` file to allow for `pip install -e .` make a `.pre-commit-config.yaml` file to allow for pre-commit make a `LICENSE` file with MIT license make a `.travis.yml` file for pypi auto publishing packages make a `.gitignore` file with pycharm basics make a `.gitignore` file with pycharm basics make a `__init__.py` file in a package | 2.19051 | 2 |
blog_blog/admin.py | hossshakiba/Django-Personal-Website-Blog | 2 | 6620718 | from django.contrib import admin
from .models import Post
from tinymce.widgets import TinyMCE
from django.db import models
from blog_blog.models import Comment
from ckeditor.widgets import CKEditorWidget
class PostAdmin(admin.ModelAdmin):
model = Post
list_display = ('title', 'publish', 'status')
list_filter = ('created', 'publish', 'status', 'author')
search_fields = ('title', 'body')
ordering = ('status', 'publish')
formfield_overrides = {
models.TextField: {'widget': CKEditorWidget()},
}
admin.site.register(Post, PostAdmin)
admin.site.register(Comment)
| from django.contrib import admin
from .models import Post
from tinymce.widgets import TinyMCE
from django.db import models
from blog_blog.models import Comment
from ckeditor.widgets import CKEditorWidget
class PostAdmin(admin.ModelAdmin):
model = Post
list_display = ('title', 'publish', 'status')
list_filter = ('created', 'publish', 'status', 'author')
search_fields = ('title', 'body')
ordering = ('status', 'publish')
formfield_overrides = {
models.TextField: {'widget': CKEditorWidget()},
}
admin.site.register(Post, PostAdmin)
admin.site.register(Comment)
| none | 1 | 1.723994 | 2 | |
app/eduquate/settings/third_party/debug_toolbar.py | sashis/eduquate | 0 | 6620719 | <gh_stars>0
from eduquate.settings.helpers import insert_after, insert_before
from eduquate.settings.django import INSTALLED_APPS, MIDDLEWARE, DEBUG
if DEBUG:
insert_after(INSTALLED_APPS, 'django.contrib.staticfiles',
'debug_toolbar')
STATIC_URL = '/static/'
insert_before(MIDDLEWARE, 'django.middleware.gzip.GZipMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware')
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'builtins.bool' # always true
} | from eduquate.settings.helpers import insert_after, insert_before
from eduquate.settings.django import INSTALLED_APPS, MIDDLEWARE, DEBUG
if DEBUG:
insert_after(INSTALLED_APPS, 'django.contrib.staticfiles',
'debug_toolbar')
STATIC_URL = '/static/'
insert_before(MIDDLEWARE, 'django.middleware.gzip.GZipMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware')
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'builtins.bool' # always true
} | en | 0.596808 | # always true | 1.877892 | 2 |
uberlearner/main/management/commands/insert_fake_data.py | Uberlearner/uberlearner | 1 | 6620720 | from allauth.account.models import EmailAddress
from django.core.management.base import BaseCommand
from optparse import make_option
from django.template.loader import render_to_string
from courses.models import *
from django.contrib.auth.models import User
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--courses',
action='store',
dest='courses',
default=10,
help='Adds the specified amount of fake data to the database'
),
)
args = '<number of entries to be created>'
help = 'Creates fake data for the Uberlearner project'
def _add_fake_data_to_courses(self, num):
# create a pool of instructors
instructor_data = [
{'first_name': 'first', 'last_name': 'user', 'username': 'first-user', 'email': '<EMAIL>'},
{'first_name': 'second', 'last_name': 'user', 'username': 'second-user', 'email': '<EMAIL>'},
{'first_name': 'third', 'last_name': 'user', 'username': 'third-user', 'email': '<EMAIL>'},
{'first_name': 'fourth', 'last_name': 'user', 'username': 'fourth-user', 'email': '<EMAIL>'},
{'first_name': 'fifth', 'last_name': 'user', 'username': 'fifth-user', 'email': '<EMAIL>'},
]
instructors = []
for instructor_datum in instructor_data:
user = User.objects.create_user(instructor_datum['username'], email=instructor_datum['email'], password="<PASSWORD>")
email_address = EmailAddress(user=user, email=instructor_datum['email'], verified=True, primary=True)
user.first_name = instructor_datum['first_name']
user.last_name = instructor_datum['last_name']
user.save()
email_address.save()
instructors.append(user)
for idx in xrange(num):
course = Course(
instructor=instructors[idx % len(instructor_data)],
title='sample title ' + str(idx),
slug='sample-title-' + str(idx),
description=('sample description ' + str(idx) + '\t') * 25,
is_public = True if idx % 2 == 0 else False,
popularity=0
)
course.save()
self._generate_course_pages(course)
def _generate_course_pages(self, course):
for idx in xrange(7):
Page(
course=course,
title='sample page ' + str(idx),
html=render_to_string('insert_fake_data/page.html', {'number': idx}),
estimated_effort='123',
summary="In this section, we will take an in-depth look into foo and bar"
).save()
def handle(self, *args, **options):
courses = int(options['courses'])
if courses > 0:
self._add_fake_data_to_courses(courses) | from allauth.account.models import EmailAddress
from django.core.management.base import BaseCommand
from optparse import make_option
from django.template.loader import render_to_string
from courses.models import *
from django.contrib.auth.models import User
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--courses',
action='store',
dest='courses',
default=10,
help='Adds the specified amount of fake data to the database'
),
)
args = '<number of entries to be created>'
help = 'Creates fake data for the Uberlearner project'
def _add_fake_data_to_courses(self, num):
# create a pool of instructors
instructor_data = [
{'first_name': 'first', 'last_name': 'user', 'username': 'first-user', 'email': '<EMAIL>'},
{'first_name': 'second', 'last_name': 'user', 'username': 'second-user', 'email': '<EMAIL>'},
{'first_name': 'third', 'last_name': 'user', 'username': 'third-user', 'email': '<EMAIL>'},
{'first_name': 'fourth', 'last_name': 'user', 'username': 'fourth-user', 'email': '<EMAIL>'},
{'first_name': 'fifth', 'last_name': 'user', 'username': 'fifth-user', 'email': '<EMAIL>'},
]
instructors = []
for instructor_datum in instructor_data:
user = User.objects.create_user(instructor_datum['username'], email=instructor_datum['email'], password="<PASSWORD>")
email_address = EmailAddress(user=user, email=instructor_datum['email'], verified=True, primary=True)
user.first_name = instructor_datum['first_name']
user.last_name = instructor_datum['last_name']
user.save()
email_address.save()
instructors.append(user)
for idx in xrange(num):
course = Course(
instructor=instructors[idx % len(instructor_data)],
title='sample title ' + str(idx),
slug='sample-title-' + str(idx),
description=('sample description ' + str(idx) + '\t') * 25,
is_public = True if idx % 2 == 0 else False,
popularity=0
)
course.save()
self._generate_course_pages(course)
def _generate_course_pages(self, course):
for idx in xrange(7):
Page(
course=course,
title='sample page ' + str(idx),
html=render_to_string('insert_fake_data/page.html', {'number': idx}),
estimated_effort='123',
summary="In this section, we will take an in-depth look into foo and bar"
).save()
def handle(self, *args, **options):
courses = int(options['courses'])
if courses > 0:
self._add_fake_data_to_courses(courses) | en | 0.515067 | # create a pool of instructors | 2.390593 | 2 |
topsis_Chirag_101903257/__init__.py | chiragmanchanda9/topsis_Chirag_101903257 | 0 | 6620721 | from topsis_Chirag_101903257.chirag import chiragtopsis | from topsis_Chirag_101903257.chirag import chiragtopsis | none | 1 | 1.076734 | 1 | |
apps/electric_mail.py | HackingMadison/smsmybus-dev | 3 | 6620722 | import os
import wsgiref.handlers
import logging
from google.appengine.api import mail
from google.appengine.api.taskqueue import Task
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from apps import api_bridge
import config
class EmailRequestHandler(webapp.RequestHandler):
def post(self):
inbound_message = mail.InboundEmailMessage(self.request.body)
logging.info("Email request! Sent from %s with message subject %s" % (inbound_message.sender,inbound_message.subject))
body = inbound_message.subject
logging.debug("email body arguments %s" % body)
## magic ##
response = api_bridge.getarrivals(body,10)
# to make it a little easier to read, add newlines before each route report line
response = response.replace('Route','\nRoute')
# send back the reply with the results
header = "Thanks for your request! Here are your results...\n\n"
footer = "\n\nThank you for using SMSMyBus!\nhttp://www.smsmybus.com"
# setup the response email
message = mail.EmailMessage()
message.sender = config.EMAIL_SENDER_ADDRESS
message.bcc = config.EMAIL_BCC_ADDRESS
message.to = inbound_message.sender
message.subject = 'Your Metro schedule estimates for stop %s' % getStopID(body)
message.body = header + response + footer
logging.debug('sending results to %s' % message.to)
message.send()
# create an event to log the event
task = Task(url='/loggingtask', params={'phone':inbound_message.sender,
'inboundBody':body,
'sid':'email',
'outboundBody':response,})
task.add('eventlogger')
self.response.set_status(200)
return
## end EmailRequestHandler
def getStopID(msg):
request = msg.split()
if len(request) == 1:
# assume single argument requests are for a bus stop
stopID = request[0]
else:
stopID = request[1]
return stopID
## end getStopID
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([('/_ah/mail/.+', EmailRequestHandler),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| import os
import wsgiref.handlers
import logging
from google.appengine.api import mail
from google.appengine.api.taskqueue import Task
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from apps import api_bridge
import config
class EmailRequestHandler(webapp.RequestHandler):
def post(self):
inbound_message = mail.InboundEmailMessage(self.request.body)
logging.info("Email request! Sent from %s with message subject %s" % (inbound_message.sender,inbound_message.subject))
body = inbound_message.subject
logging.debug("email body arguments %s" % body)
## magic ##
response = api_bridge.getarrivals(body,10)
# to make it a little easier to read, add newlines before each route report line
response = response.replace('Route','\nRoute')
# send back the reply with the results
header = "Thanks for your request! Here are your results...\n\n"
footer = "\n\nThank you for using SMSMyBus!\nhttp://www.smsmybus.com"
# setup the response email
message = mail.EmailMessage()
message.sender = config.EMAIL_SENDER_ADDRESS
message.bcc = config.EMAIL_BCC_ADDRESS
message.to = inbound_message.sender
message.subject = 'Your Metro schedule estimates for stop %s' % getStopID(body)
message.body = header + response + footer
logging.debug('sending results to %s' % message.to)
message.send()
# create an event to log the event
task = Task(url='/loggingtask', params={'phone':inbound_message.sender,
'inboundBody':body,
'sid':'email',
'outboundBody':response,})
task.add('eventlogger')
self.response.set_status(200)
return
## end EmailRequestHandler
def getStopID(msg):
request = msg.split()
if len(request) == 1:
# assume single argument requests are for a bus stop
stopID = request[0]
else:
stopID = request[1]
return stopID
## end getStopID
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([('/_ah/mail/.+', EmailRequestHandler),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| en | 0.780038 | ## magic ## # to make it a little easier to read, add newlines before each route report line # send back the reply with the results # setup the response email # create an event to log the event ## end EmailRequestHandler # assume single argument requests are for a bus stop ## end getStopID | 2.204496 | 2 |
playversion.py | nachtmaar/androlyze | 17 | 6620723 | <reponame>nachtmaar/androlyze
#!/usr/bin/python
# coding=utf-8
# encoding: utf-8
__author__ = "<NAME> "
__email__ = "<EMAIL>"
import httplib, urllib, simplejson, sys, getopt, string
from sgmllib import SGMLParser
class SwVersionLister(SGMLParser):
inside_div_element = 0
softwareVersion = 0
swversion = None
def reset(self):
SGMLParser.reset(self)
self.versions = []
def start_div(self, attrs):
self.inside_div_element = 1
for k,v in attrs:
if k == 'itemprop':
if v == 'softwareVersion':
self.softwareVersion = 1
def handle_data(self, data):
if self.softwareVersion == 1:
self.swversion = string.strip(data)
self.softwareVersion = 0
def end_div(self):
self.inside_div_element = 0
class MyOpener(urllib.FancyURLopener):
version = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/27.0"
def get_apk_version(package_name):
''' Returns the version of the `package_name` in the play store '''
urllib._urlopener = MyOpener()
response = urllib.urlopen("https://play.google.com/store/apps/details?id=%s" % package_name)
data = response.read()
parser = SwVersionLister()
parser.feed(data)
version = parser.swversion
return version
| #!/usr/bin/python
# coding=utf-8
# encoding: utf-8
__author__ = "<NAME> "
__email__ = "<EMAIL>"
import httplib, urllib, simplejson, sys, getopt, string
from sgmllib import SGMLParser
class SwVersionLister(SGMLParser):
inside_div_element = 0
softwareVersion = 0
swversion = None
def reset(self):
SGMLParser.reset(self)
self.versions = []
def start_div(self, attrs):
self.inside_div_element = 1
for k,v in attrs:
if k == 'itemprop':
if v == 'softwareVersion':
self.softwareVersion = 1
def handle_data(self, data):
if self.softwareVersion == 1:
self.swversion = string.strip(data)
self.softwareVersion = 0
def end_div(self):
self.inside_div_element = 0
class MyOpener(urllib.FancyURLopener):
version = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/27.0"
def get_apk_version(package_name):
''' Returns the version of the `package_name` in the play store '''
urllib._urlopener = MyOpener()
response = urllib.urlopen("https://play.google.com/store/apps/details?id=%s" % package_name)
data = response.read()
parser = SwVersionLister()
parser.feed(data)
version = parser.swversion
return version | en | 0.773139 | #!/usr/bin/python # coding=utf-8 # encoding: utf-8 Returns the version of the `package_name` in the play store | 2.580682 | 3 |
test/test_blob_interface.py | georgeAccnt-GH/Azure2019 | 2 | 6620724 | # Test blob IO functions
import numpy as np
from AzureUtilities import *
import matplotlib.pyplot as plt
# Random test array and model parameters
shape = (100, 120)
spacing = (6.25, 7.35)
origin = (13.34, 12.87)
X = np.random.randn(shape[0], shape[1]).astype('float32')
# Put/Get array
container = 'slim-bucket-common'
array_name = 'pwitte/models/test_array'
array_put(X, container, array_name)
X_rec = array_get(container, array_name)
print("Residual array: ", np.linalg.norm(X - X_rec))
# Put/Get model structure
container = 'slim-bucket-common'
model_name = 'pwitte/models/test_model'
model_put(X, origin, spacing, container, array_name)
X_rec, o_rec, s_rec = model_get(container, array_name)
print("Residual model: ", np.linalg.norm(X - X_rec))
# Get segy file
data_path = 'pwitte/data/'
filename = 'bp_observed_data_1005.segy'
data, sourceX, sourceZ, groupX, groupZ, tmax, dt, nt = segy_get(container, data_path, filename)
# Put segy file
segy_put(data, sourceX, sourceZ, groupX, groupZ, dt, container, data_path, filename, sourceY=None, groupY=None, elevScalar=-1000, coordScalar=-1000, keepFile=False)
| # Test blob IO functions
import numpy as np
from AzureUtilities import *
import matplotlib.pyplot as plt
# Random test array and model parameters
shape = (100, 120)
spacing = (6.25, 7.35)
origin = (13.34, 12.87)
X = np.random.randn(shape[0], shape[1]).astype('float32')
# Put/Get array
container = 'slim-bucket-common'
array_name = 'pwitte/models/test_array'
array_put(X, container, array_name)
X_rec = array_get(container, array_name)
print("Residual array: ", np.linalg.norm(X - X_rec))
# Put/Get model structure
container = 'slim-bucket-common'
model_name = 'pwitte/models/test_model'
model_put(X, origin, spacing, container, array_name)
X_rec, o_rec, s_rec = model_get(container, array_name)
print("Residual model: ", np.linalg.norm(X - X_rec))
# Get segy file
data_path = 'pwitte/data/'
filename = 'bp_observed_data_1005.segy'
data, sourceX, sourceZ, groupX, groupZ, tmax, dt, nt = segy_get(container, data_path, filename)
# Put segy file
segy_put(data, sourceX, sourceZ, groupX, groupZ, dt, container, data_path, filename, sourceY=None, groupY=None, elevScalar=-1000, coordScalar=-1000, keepFile=False)
| en | 0.382118 | # Test blob IO functions # Random test array and model parameters # Put/Get array # Put/Get model structure # Get segy file # Put segy file | 2.177213 | 2 |
20_Raspberry_Pi_project/DStarLiteMain.py | robodhhb/Interactive-D-Star-Lite | 5 | 6620725 | #!/usr/bin/python3
############################################################
#
# This is the main application file for the D*Lite App.
# It implements the D*Lite algorithm with an interactive
# grid including obstacles, a planner and execution
# modules for screen simulation and Lego EV3 control.
#
# File: DStarLiteMain.py
# Author: <NAME>
# Version: 1.0 Date: 22.07.2020
###########################################################
from tkinter import *
from DStarLiteView import *
#Create an start main application window
print("\nStarting D*Lite Application 1.0")
root = Tk()
mainWin= DStarLiteView(root)
root.mainloop()
| #!/usr/bin/python3
############################################################
#
# This is the main application file for the D*Lite App.
# It implements the D*Lite algorithm with an interactive
# grid including obstacles, a planner and execution
# modules for screen simulation and Lego EV3 control.
#
# File: DStarLiteMain.py
# Author: <NAME>
# Version: 1.0 Date: 22.07.2020
###########################################################
from tkinter import *
from DStarLiteView import *
#Create an start main application window
print("\nStarting D*Lite Application 1.0")
root = Tk()
mainWin= DStarLiteView(root)
root.mainloop()
| en | 0.384102 | #!/usr/bin/python3 ############################################################ # # This is the main application file for the D*Lite App. # It implements the D*Lite algorithm with an interactive # grid including obstacles, a planner and execution # modules for screen simulation and Lego EV3 control. # # File: DStarLiteMain.py # Author: <NAME> # Version: 1.0 Date: 22.07.2020 ########################################################### #Create an start main application window | 3.157062 | 3 |
MafiaMain.py | JaiDarby/Mafia-Code | 0 | 6620726 | import json
import discord
import random
import time
from MafiaClass import Player
from MafiaCommands import StartRules, Start
NumPlayers = 0
RoleList = ['Citizen', 'Mafia', 'Doctor', 'Sheriff']
def RoleClaim():
print ('hey peeps')
def sleep():
time.sleep(1)
def Night(TestList):
print("It is now night time, everyone mute their mics")
sleep()
Saved = input ("Doctor, please @ who you would like to save: ")
x = 0
for players in TestList:
if TestList[x] == Saved:
print('I just came to say hello')
x += 1
sleep()
p1 = Player()
p2 = Player()
p3 = Player()
p4 = Player()
p5 = Player()
p6 = Player()
p7 = Player()
p8 = Player()
p9 = Player()
p10 = Player()
TestList = [p1,p2,p3,p4,p5,p6]
'''
x=0
for players in TestList:
print(TestList[x].role)
'''
Night(TestList) | import json
import discord
import random
import time
from MafiaClass import Player
from MafiaCommands import StartRules, Start
NumPlayers = 0
RoleList = ['Citizen', 'Mafia', 'Doctor', 'Sheriff']
def RoleClaim():
print ('hey peeps')
def sleep():
time.sleep(1)
def Night(TestList):
print("It is now night time, everyone mute their mics")
sleep()
Saved = input ("Doctor, please @ who you would like to save: ")
x = 0
for players in TestList:
if TestList[x] == Saved:
print('I just came to say hello')
x += 1
sleep()
p1 = Player()
p2 = Player()
p3 = Player()
p4 = Player()
p5 = Player()
p6 = Player()
p7 = Player()
p8 = Player()
p9 = Player()
p10 = Player()
TestList = [p1,p2,p3,p4,p5,p6]
'''
x=0
for players in TestList:
print(TestList[x].role)
'''
Night(TestList) | en | 0.816021 | x=0 for players in TestList: print(TestList[x].role) | 2.802886 | 3 |
online_recommend/action_similar_recall/similar_recall_tohive.py | hfhfn/db_recommend | 0 | 6620727 | <reponame>hfhfn/db_recommend
from action_similar_recall.similar_recall_ret import UserSimilarRecall
from utils.default import u_spark, user_recall_db, u_topK, user_portrait_db, online_db
from utils.save_tohive import RetToHive
class SaveUserSimilarRecall(object):
spark_app = u_spark
recall_db = user_recall_db
portrait_db = user_portrait_db
online_db = online_db
topK = u_topK
def __init__(self, cate_id):
self.cate_id = cate_id
self.user_recall = UserSimilarRecall(self.portrait_db, self.recall_db, self.spark_app, cate_id)
def save_user_similar_recall(self):
# 保存用户行为电影相似度召回结果
recall_ret = self.user_recall.get_user_similar_recall()
recall_table = 'user_similar_recall_{}'.format(self.cate_id)
RetToHive(self.spark_app, recall_ret, self.recall_db, recall_table)
import gc
del recall_ret
gc.collect()
def save_filter_same_recall(self):
# 综合用户行为电影相似度召回相同的结果
recall_ret = self.user_recall.get_filter_same_recall()
recall_table = 'user_similar_filter_same_recall_{}'.format(self.cate_id)
RetToHive(self.spark_app, recall_ret, self.recall_db, recall_table)
import gc
del recall_ret
gc.collect()
def save_filter_history_recall(self):
# 过滤用户历史数据
recall_ret = self.user_recall.get_filter_history_recall()
recall_table = 'user_similar_filter_history_recall_{}'.format(self.cate_id)
RetToHive(self.spark_app, recall_ret, self.recall_db, recall_table)
import gc
del recall_ret
gc.collect()
def save_user_similar_latest_recall(self):
# 保存topK个召回结果
recall_ret = self.user_recall.get_user_similar_latest_recall()
recall_table = 'user_similar_recall_{}_{}'.format(self.cate_id, self.topK)
RetToHive(self.spark_app, recall_ret, self.recall_db, recall_table)
# 精简上线数据
# recall_ret = self.spark_app.sql("select * from update_user.user_similar_recall_1969_100") # 零时用
recall_ret = recall_ret.select('user_id', 'movie_id', 'title', 'sort_num', 'timestamp')
try:
recall_ret.write.insertInto("{}.{}".format(self.online_db, recall_table), overwrite=True)
except:
RetToHive(self.spark_app, recall_ret, self.online_db, recall_table)
import gc
del recall_ret
gc.collect()
if __name__ == '__main__':
usr = SaveUserSimilarRecall(1969)
usr.save_user_similar_latest_recall()
pass
| from action_similar_recall.similar_recall_ret import UserSimilarRecall
from utils.default import u_spark, user_recall_db, u_topK, user_portrait_db, online_db
from utils.save_tohive import RetToHive
class SaveUserSimilarRecall(object):
spark_app = u_spark
recall_db = user_recall_db
portrait_db = user_portrait_db
online_db = online_db
topK = u_topK
def __init__(self, cate_id):
self.cate_id = cate_id
self.user_recall = UserSimilarRecall(self.portrait_db, self.recall_db, self.spark_app, cate_id)
def save_user_similar_recall(self):
# 保存用户行为电影相似度召回结果
recall_ret = self.user_recall.get_user_similar_recall()
recall_table = 'user_similar_recall_{}'.format(self.cate_id)
RetToHive(self.spark_app, recall_ret, self.recall_db, recall_table)
import gc
del recall_ret
gc.collect()
def save_filter_same_recall(self):
# 综合用户行为电影相似度召回相同的结果
recall_ret = self.user_recall.get_filter_same_recall()
recall_table = 'user_similar_filter_same_recall_{}'.format(self.cate_id)
RetToHive(self.spark_app, recall_ret, self.recall_db, recall_table)
import gc
del recall_ret
gc.collect()
def save_filter_history_recall(self):
# 过滤用户历史数据
recall_ret = self.user_recall.get_filter_history_recall()
recall_table = 'user_similar_filter_history_recall_{}'.format(self.cate_id)
RetToHive(self.spark_app, recall_ret, self.recall_db, recall_table)
import gc
del recall_ret
gc.collect()
def save_user_similar_latest_recall(self):
# 保存topK个召回结果
recall_ret = self.user_recall.get_user_similar_latest_recall()
recall_table = 'user_similar_recall_{}_{}'.format(self.cate_id, self.topK)
RetToHive(self.spark_app, recall_ret, self.recall_db, recall_table)
# 精简上线数据
# recall_ret = self.spark_app.sql("select * from update_user.user_similar_recall_1969_100") # 零时用
recall_ret = recall_ret.select('user_id', 'movie_id', 'title', 'sort_num', 'timestamp')
try:
recall_ret.write.insertInto("{}.{}".format(self.online_db, recall_table), overwrite=True)
except:
RetToHive(self.spark_app, recall_ret, self.online_db, recall_table)
import gc
del recall_ret
gc.collect()
if __name__ == '__main__':
usr = SaveUserSimilarRecall(1969)
usr.save_user_similar_latest_recall()
pass | zh | 0.816979 | # 保存用户行为电影相似度召回结果 # 综合用户行为电影相似度召回相同的结果 # 过滤用户历史数据 # 保存topK个召回结果 # 精简上线数据 # recall_ret = self.spark_app.sql("select * from update_user.user_similar_recall_1969_100") # 零时用 | 2.173452 | 2 |
src/shell_command_logger/cli/main.py | six-two/shell-command-logger | 0 | 6620728 | <gh_stars>0
import argparse
import sys
from typing import Callable
# import the code from this package
import shell_command_logger
from shell_command_logger import print_color
from shell_command_logger.backports import TimeParseException
from shell_command_logger.config import InvalidConfigException
from shell_command_logger.cli import alias, check, config, log, replay, search, symlink
from shell_command_logger.main_file import set_python_main_file
from shell_command_logger.debug import init_debugging
# local files
from ..backports import Dict
class SubcommandHandlerException(Exception):
pass
class SubcommandHandler:
def __init__(self, argument_parser, subcommand_variable_name: str = "subcommand", subcommand_required: bool = False) -> None:
self.ap = argument_parser
self.subcommand_variable_name = subcommand_variable_name
self.ap_subparsers = self.ap.add_subparsers(metavar="SUBCOMMAND", required=subcommand_required, dest=subcommand_variable_name)
# Maps from subcommand names to the coresponding main functions
self.main_function_map: Dict[str, Callable] = {}
def register_module(self, module) -> None:
for name in module.SUBCOMMAND_NAMES:
ap_module = self.ap_subparsers.add_parser(name, **module.ARG_PARSER_OPTIONS)
module.populate_agrument_parser(ap_module)
if name in self.main_function_map:
raise SubcommandHandlerException(f"The subcommand '{name}' is specified twice")
self.main_function_map[name] = module.subcommand_main
def subcommand_main(self, args) -> int:
subcommand_name = getattr(args, self.subcommand_variable_name)
if not subcommand_name:
# If no subcommand is specified, we show the help
self.ap.print_help()
return 1
fn_main = self.main_function_map.get(subcommand_name)
if fn_main:
return fn_main(args)
else:
raise SubcommandHandlerException(f"No subcommand with name '{subcommand_name}' registered")
def main(main_python_file: str) -> None:
# Register the calling binaries path
set_python_main_file(main_python_file)
if symlink_name := shell_command_logger.cli.log.get_name_when_called_by_symlink():
exit_code = shell_command_logger.cli.log.record_command_when_called_by_symlink(symlink_name, sys.argv[1:])
sys.exit(exit_code)
# Setting up argument parser
ap = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="The shell-command-logger (scl) allows you to record commands. Afterwards the recorded commands can be replay and searched.",
epilog=f"Installed version: {shell_command_logger.get_version_string()}\nDocumentation: https://shell-command-logger.six-two.dev/"
)
ap.add_argument("-V", "--version", action="version", version=shell_command_logger.get_version_string())
ap.add_argument("-d", "--debug", action="store_true", help="print debugging information")
handler = SubcommandHandler(ap)
for module in [alias, check, config, log, replay, search, symlink]:
handler.register_module(module)
# Run the selected submodule
args = ap.parse_args()
if args.debug:
init_debugging(True)
try:
exit_code = handler.subcommand_main(args)
except InvalidConfigException as ex:
print_color("Your configuration is not valid:", "red", bold=True)
print_color(str(ex), "red", bold=True)
print_color("Hint: You can use 'scl config --defaults' to reset your configuration to the defaults", "yellow")
exit_code = 1
except TimeParseException as ex:
print_color(str(ex), "red", bold=True)
exit_code = 1
sys.exit(exit_code)
| import argparse
import sys
from typing import Callable
# import the code from this package
import shell_command_logger
from shell_command_logger import print_color
from shell_command_logger.backports import TimeParseException
from shell_command_logger.config import InvalidConfigException
from shell_command_logger.cli import alias, check, config, log, replay, search, symlink
from shell_command_logger.main_file import set_python_main_file
from shell_command_logger.debug import init_debugging
# local files
from ..backports import Dict
class SubcommandHandlerException(Exception):
pass
class SubcommandHandler:
def __init__(self, argument_parser, subcommand_variable_name: str = "subcommand", subcommand_required: bool = False) -> None:
self.ap = argument_parser
self.subcommand_variable_name = subcommand_variable_name
self.ap_subparsers = self.ap.add_subparsers(metavar="SUBCOMMAND", required=subcommand_required, dest=subcommand_variable_name)
# Maps from subcommand names to the coresponding main functions
self.main_function_map: Dict[str, Callable] = {}
def register_module(self, module) -> None:
for name in module.SUBCOMMAND_NAMES:
ap_module = self.ap_subparsers.add_parser(name, **module.ARG_PARSER_OPTIONS)
module.populate_agrument_parser(ap_module)
if name in self.main_function_map:
raise SubcommandHandlerException(f"The subcommand '{name}' is specified twice")
self.main_function_map[name] = module.subcommand_main
def subcommand_main(self, args) -> int:
subcommand_name = getattr(args, self.subcommand_variable_name)
if not subcommand_name:
# If no subcommand is specified, we show the help
self.ap.print_help()
return 1
fn_main = self.main_function_map.get(subcommand_name)
if fn_main:
return fn_main(args)
else:
raise SubcommandHandlerException(f"No subcommand with name '{subcommand_name}' registered")
def main(main_python_file: str) -> None:
# Register the calling binaries path
set_python_main_file(main_python_file)
if symlink_name := shell_command_logger.cli.log.get_name_when_called_by_symlink():
exit_code = shell_command_logger.cli.log.record_command_when_called_by_symlink(symlink_name, sys.argv[1:])
sys.exit(exit_code)
# Setting up argument parser
ap = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="The shell-command-logger (scl) allows you to record commands. Afterwards the recorded commands can be replay and searched.",
epilog=f"Installed version: {shell_command_logger.get_version_string()}\nDocumentation: https://shell-command-logger.six-two.dev/"
)
ap.add_argument("-V", "--version", action="version", version=shell_command_logger.get_version_string())
ap.add_argument("-d", "--debug", action="store_true", help="print debugging information")
handler = SubcommandHandler(ap)
for module in [alias, check, config, log, replay, search, symlink]:
handler.register_module(module)
# Run the selected submodule
args = ap.parse_args()
if args.debug:
init_debugging(True)
try:
exit_code = handler.subcommand_main(args)
except InvalidConfigException as ex:
print_color("Your configuration is not valid:", "red", bold=True)
print_color(str(ex), "red", bold=True)
print_color("Hint: You can use 'scl config --defaults' to reset your configuration to the defaults", "yellow")
exit_code = 1
except TimeParseException as ex:
print_color(str(ex), "red", bold=True)
exit_code = 1
sys.exit(exit_code) | en | 0.626406 | # import the code from this package # local files # Maps from subcommand names to the coresponding main functions # If no subcommand is specified, we show the help # Register the calling binaries path # Setting up argument parser # Run the selected submodule | 2.517137 | 3 |
functions/Demos/import_example2.py | WebucatorTraining/classfiles-actionable-python | 2 | 6620729 | <reponame>WebucatorTraining/classfiles-actionable-python
from add_nums_with_return import add_nums
total = add_nums(1, 2, 3, 4, 5)
print(total) | from add_nums_with_return import add_nums
total = add_nums(1, 2, 3, 4, 5)
print(total) | none | 1 | 2.834152 | 3 | |
smserver/listener/app.py | CorySanin/stepmania-server | 17 | 6620730 | <reponame>CorySanin/stepmania-server
""" Listener module.
This module is responsible of listening for incomming event
"""
from threading import Thread
from smserver import event
from smserver import messaging
from smserver.listener.workers import chat
class Listener(Thread):
""" Secondary thread, hold by the main server. """
def __init__(self, server):
Thread.__init__(self)
self.server = server
self.dispatch = {
event.EventKind.chat_message: chat.ChatWorker(server),
}
def run(self):
""" Start to listen for incomming event """
self.server.log.debug("Start listener")
for evt in messaging.listen():
if evt.room_id and not self.server.has_room(evt.room_id):
self.server.log.debug(
"Ignore event %s: no connection on the room %s",
evt,
evt.room_id
)
continue
if evt.kind not in self.dispatch:
self.server.log.error("Unknown event kind %s", evt.kind)
continue
worker = self.dispatch[evt.kind]
try:
if worker.need_session:
with self.server.db.session_scope() as session:
worker.handle(evt.data, token=evt.token, session=session)
else:
worker.handle(evt.data, token=evt.token)
except Exception: #pylint: disable=broad-except
self.server.log.exception('Error while handling %s', evt)
def stop(self):
""" Stop the thread """
self.server.log.debug("Closing thread: %s", self)
messaging.stop()
| """ Listener module.
This module is responsible of listening for incomming event
"""
from threading import Thread
from smserver import event
from smserver import messaging
from smserver.listener.workers import chat
class Listener(Thread):
""" Secondary thread, hold by the main server. """
def __init__(self, server):
Thread.__init__(self)
self.server = server
self.dispatch = {
event.EventKind.chat_message: chat.ChatWorker(server),
}
def run(self):
""" Start to listen for incomming event """
self.server.log.debug("Start listener")
for evt in messaging.listen():
if evt.room_id and not self.server.has_room(evt.room_id):
self.server.log.debug(
"Ignore event %s: no connection on the room %s",
evt,
evt.room_id
)
continue
if evt.kind not in self.dispatch:
self.server.log.error("Unknown event kind %s", evt.kind)
continue
worker = self.dispatch[evt.kind]
try:
if worker.need_session:
with self.server.db.session_scope() as session:
worker.handle(evt.data, token=evt.token, session=session)
else:
worker.handle(evt.data, token=evt.token)
except Exception: #pylint: disable=broad-except
self.server.log.exception('Error while handling %s', evt)
def stop(self):
""" Stop the thread """
self.server.log.debug("Closing thread: %s", self)
messaging.stop() | en | 0.867939 | Listener module. This module is responsible of listening for incomming event Secondary thread, hold by the main server. Start to listen for incomming event #pylint: disable=broad-except Stop the thread | 2.745137 | 3 |
recurrent/scripts/configuration.py | canders1/ProSPer | 58 | 6620731 | <gh_stars>10-100
#! /usr/bin/env python
def boolify(s):
'''Copied from http://stackoverflow.com/questions/7019283/automatically-type-cast-parameters-in-python'''
if s == 'True':
return True
if s == 'False':
return False
raise ValueError("huh?")
def autoconvert(s):
'''Copied from http://stackoverflow.com/questions/7019283/automatically-type-cast-parameters-in-python'''
for fn in (boolify, int, float):
try:
return fn(s)
except ValueError:
pass
return s
def get_config(f):
config = {}
for line in open(f,'r'):
l = line.split()
if len(l) > 1:
config[l[0]] = autoconvert(l[-1])
return config
| #! /usr/bin/env python
def boolify(s):
'''Copied from http://stackoverflow.com/questions/7019283/automatically-type-cast-parameters-in-python'''
if s == 'True':
return True
if s == 'False':
return False
raise ValueError("huh?")
def autoconvert(s):
'''Copied from http://stackoverflow.com/questions/7019283/automatically-type-cast-parameters-in-python'''
for fn in (boolify, int, float):
try:
return fn(s)
except ValueError:
pass
return s
def get_config(f):
config = {}
for line in open(f,'r'):
l = line.split()
if len(l) > 1:
config[l[0]] = autoconvert(l[-1])
return config | en | 0.424403 | #! /usr/bin/env python Copied from http://stackoverflow.com/questions/7019283/automatically-type-cast-parameters-in-python Copied from http://stackoverflow.com/questions/7019283/automatically-type-cast-parameters-in-python | 3.737599 | 4 |
code/panstarrs/read_ps1.py | aceilers/TheCannon | 35 | 6620732 | <gh_stars>10-100
import pyfits
import numpy as np
import matplotlib.pyplot as plt
ob = pyfits.getdata('apodr12_ps1_allwise.fits')
print("done reading file")
ids = ob['apogee_id']
mag = -2.5 * np.log10(np.clip(ob['median'], 1e-30, np.inf))
err = 2.5/np.log(10)*ob['err']/np.clip(ob['median'], 1e-30, np.inf)
err = np.sqrt((1.3*err)**2.+0.01**2.)
saturation_ps1 = np.asarray([14.0, 14.4, 14.4, 13.8, 13.])
#bad = mag > 50
#bad = mag < saturation_ps1
#starflag = bad.sum(axis=1) > 0
#ids_good = ids[~starflag]
g = mag[:,0]
r = mag[:,1]
i = mag[:,2]
z = mag[:,3]
y = mag[:,4]
good_g = np.logical_and(g>=14.0, g<30)
good_r = np.logical_and(r>=14.4, r<30)
good_i = np.logical_and(i>=14.4, i<30)
good_z = np.logical_and(z>=13.8, z<30)
good_y = np.logical_and(y>=13.0, y<30)
good_gr = np.logical_and(good_g, good_r)
good_iz = np.logical_and(good_i, good_z)
good_griz = np.logical_and(good_gr, good_iz)
good = np.logical_and(good_griz, good_y)
ids = ids[good]
g = g[good]
r = r[good]
i = i[good]
z = z[good]
y = y[good]
err_g = err[:,0][good]
err_r = err[:,1][good]
err_i = err[:,2][good]
err_z = err[:,3][good]
err_y = err[:,4][good]
gi = g-i
ri = r-i
zi = z-i
yi = y-i
err_gi = np.sqrt(err_g**2 + err_i**2)
err_ri = np.sqrt(err_r**2 + err_i**2)
err_zi = np.sqrt(err_z**2 + err_i**2)
err_yi = np.sqrt(err_y**2 + err_i**2)
colors = np.vstack((ids, gi, err_gi, ri, err_ri, zi, err_zi, yi, err_yi)).T
np.savetxt("ps_colors.txt", colors, delimiter=',', header='ids,gi,gi_err,ri,ri_err,zi,zi_err,yi,yi_err', fmt="%s")
| import pyfits
import numpy as np
import matplotlib.pyplot as plt
ob = pyfits.getdata('apodr12_ps1_allwise.fits')
print("done reading file")
ids = ob['apogee_id']
mag = -2.5 * np.log10(np.clip(ob['median'], 1e-30, np.inf))
err = 2.5/np.log(10)*ob['err']/np.clip(ob['median'], 1e-30, np.inf)
err = np.sqrt((1.3*err)**2.+0.01**2.)
saturation_ps1 = np.asarray([14.0, 14.4, 14.4, 13.8, 13.])
#bad = mag > 50
#bad = mag < saturation_ps1
#starflag = bad.sum(axis=1) > 0
#ids_good = ids[~starflag]
g = mag[:,0]
r = mag[:,1]
i = mag[:,2]
z = mag[:,3]
y = mag[:,4]
good_g = np.logical_and(g>=14.0, g<30)
good_r = np.logical_and(r>=14.4, r<30)
good_i = np.logical_and(i>=14.4, i<30)
good_z = np.logical_and(z>=13.8, z<30)
good_y = np.logical_and(y>=13.0, y<30)
good_gr = np.logical_and(good_g, good_r)
good_iz = np.logical_and(good_i, good_z)
good_griz = np.logical_and(good_gr, good_iz)
good = np.logical_and(good_griz, good_y)
ids = ids[good]
g = g[good]
r = r[good]
i = i[good]
z = z[good]
y = y[good]
err_g = err[:,0][good]
err_r = err[:,1][good]
err_i = err[:,2][good]
err_z = err[:,3][good]
err_y = err[:,4][good]
gi = g-i
ri = r-i
zi = z-i
yi = y-i
err_gi = np.sqrt(err_g**2 + err_i**2)
err_ri = np.sqrt(err_r**2 + err_i**2)
err_zi = np.sqrt(err_z**2 + err_i**2)
err_yi = np.sqrt(err_y**2 + err_i**2)
colors = np.vstack((ids, gi, err_gi, ri, err_ri, zi, err_zi, yi, err_yi)).T
np.savetxt("ps_colors.txt", colors, delimiter=',', header='ids,gi,gi_err,ri,ri_err,zi,zi_err,yi,yi_err', fmt="%s") | en | 0.166024 | #bad = mag > 50 #bad = mag < saturation_ps1 #starflag = bad.sum(axis=1) > 0 #ids_good = ids[~starflag] | 2.130282 | 2 |
evaluate.py | lsr12345/image2latex_transformer_tensorflow2.x | 2 | 6620733 | <reponame>lsr12345/image2latex_transformer_tensorflow2.x
'''
# Author: <NAME>
# Date: 2021/08/11
# Email: <EMAIL>
# Description: demo
'''
import tensorflow as tf
import cv2
from model import Image2toLatex_Transformer
from datasetpipline import process_resize
from cfg import *
from tools import create_masks
checkpoint_dir = './ckpt/ckpt-74'
voc_file = './dataset/vocab.txt'
voc2id = {}
voc2id['START_TOKEN'] = 0
voc2id['PAD_TOKEN'] = 1
voc2id['END_TOKEN'] = 2
voc2id['UNK_TOKEN'] = 3
with open(voc_file, mode='r') as f:
ff = f.readlines()
for i, voc in enumerate(ff):
voc = voc.strip()
voc2id[voc] = i + 4
voc_size = len(voc2id)
id2voc = {i: j for i, j in enumerate(voc2id)}
transformer = Image2toLatex_Transformer(enc_units=UNITS, decoder_num_layers=decoder_num_layers, voc_size=voc_size,
max_length=MAX_LENGTH, d_model=DECODING_UNITS, num_heads=8, dff=1024, rate=0.2, use2dpe=False)
checkpoint = tf.train.Checkpoint(transformer=transformer)
checkpoint.restore(checkpoint_dir)
def evaluate(inp_img, max_length=160):
encoder_input = tf.expand_dims(inp_img, 0)
decoder_input = tf.expand_dims([voc2id['START_TOKEN']], 0)
for i in range(max_length):
training = True if i == 0 else False
decoder_mask = create_masks(decoder_input)
encoder_decoder_padding_mask = None
predictions, attention_weights, encoder_input = transformer(encoder_input, decoder_input, decoder_mask,
training, encoder_decoder_padding_mask)
predictions = predictions[:, -1, :]
predicted_id = tf.cast(tf.argmax(predictions, axis=-1),
tf.int32)
if tf.equal(predicted_id, voc2id['END_TOKEN']):
return tf.squeeze(decoder_input, axis=0), attention_weights
decoder_input = tf.concat([decoder_input, [predicted_id]],
axis=-1)
return tf.squeeze(decoder_input, axis=0), attention_weights
demo_path = './demo.png'
img = cv2.imread(demo_path)
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
image, flag = process_resize(img, target_size=IMAGE_SIZE)
pred, _ = evaluate(image, max_length=160)
res = ' '.join([id2voc[i] for i in pred.numpy()][1:])
print(res)
| '''
# Author: <NAME>
# Date: 2021/08/11
# Email: <EMAIL>
# Description: demo
'''
import tensorflow as tf
import cv2
from model import Image2toLatex_Transformer
from datasetpipline import process_resize
from cfg import *
from tools import create_masks
checkpoint_dir = './ckpt/ckpt-74'
voc_file = './dataset/vocab.txt'
voc2id = {}
voc2id['START_TOKEN'] = 0
voc2id['PAD_TOKEN'] = 1
voc2id['END_TOKEN'] = 2
voc2id['UNK_TOKEN'] = 3
with open(voc_file, mode='r') as f:
ff = f.readlines()
for i, voc in enumerate(ff):
voc = voc.strip()
voc2id[voc] = i + 4
voc_size = len(voc2id)
id2voc = {i: j for i, j in enumerate(voc2id)}
transformer = Image2toLatex_Transformer(enc_units=UNITS, decoder_num_layers=decoder_num_layers, voc_size=voc_size,
max_length=MAX_LENGTH, d_model=DECODING_UNITS, num_heads=8, dff=1024, rate=0.2, use2dpe=False)
checkpoint = tf.train.Checkpoint(transformer=transformer)
checkpoint.restore(checkpoint_dir)
def evaluate(inp_img, max_length=160):
encoder_input = tf.expand_dims(inp_img, 0)
decoder_input = tf.expand_dims([voc2id['START_TOKEN']], 0)
for i in range(max_length):
training = True if i == 0 else False
decoder_mask = create_masks(decoder_input)
encoder_decoder_padding_mask = None
predictions, attention_weights, encoder_input = transformer(encoder_input, decoder_input, decoder_mask,
training, encoder_decoder_padding_mask)
predictions = predictions[:, -1, :]
predicted_id = tf.cast(tf.argmax(predictions, axis=-1),
tf.int32)
if tf.equal(predicted_id, voc2id['END_TOKEN']):
return tf.squeeze(decoder_input, axis=0), attention_weights
decoder_input = tf.concat([decoder_input, [predicted_id]],
axis=-1)
return tf.squeeze(decoder_input, axis=0), attention_weights
demo_path = './demo.png'
img = cv2.imread(demo_path)
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
image, flag = process_resize(img, target_size=IMAGE_SIZE)
pred, _ = evaluate(image, max_length=160)
res = ' '.join([id2voc[i] for i in pred.numpy()][1:])
print(res) | en | 0.448088 | # Author: <NAME> # Date: 2021/08/11 # Email: <EMAIL> # Description: demo | 2.261091 | 2 |
groot-backend/django/code/groot/serializers/room.py | ayushmantripathy9/We-Are-Groot | 0 | 6620734 | <reponame>ayushmantripathy9/We-Are-Groot
from rest_framework.serializers import ModelSerializer
from groot.serializers import UserGetSerializer
from groot.models import Room
class RoomGetSerializer(ModelSerializer):
"""
This is to serialize the data pertaining to a Room, where calls would be held.
This would be used during GET Method.
"""
participants = UserGetSerializer(read_only=True, many=True)
class Meta:
model = Room
depth = 1
fields = [
'id',
'room_name',
'room_code',
'participants',
'start_time',
'end_time'
]
read_only_fields = [
'id',
'room_name',
'room_code',
'participants',
'start_time',
'end_time'
]
class RoomPostSerializer(ModelSerializer):
"""
This is to serialize the data pertaining to a Room, where calls would be held.
This would be used during POST Method.
"""
participants = UserGetSerializer(read_only=True, many=True)
class Meta:
model = Room
fields = [
'id',
'room_name',
'room_code',
'participants',
'start_time',
'end_time'
]
read_only_fields = [
'id',
'room_code',
'start_time'
]
class RoomHistorySerializer(ModelSerializer):
"""
This is to serialize the data pertaining to a Room, while retrieving the history.
This would be used during GET Method.
"""
participants_history = UserGetSerializer(read_only=True, many=True)
class Meta:
model = Room
depth = 1
fields = [
'id',
'room_name',
'room_code',
'participants_history',
'start_time',
'end_time'
]
read_only_fields = [
'id',
'room_name',
'room_code',
'participants_history',
'start_time',
'end_time'
]
| from rest_framework.serializers import ModelSerializer
from groot.serializers import UserGetSerializer
from groot.models import Room
class RoomGetSerializer(ModelSerializer):
"""
This is to serialize the data pertaining to a Room, where calls would be held.
This would be used during GET Method.
"""
participants = UserGetSerializer(read_only=True, many=True)
class Meta:
model = Room
depth = 1
fields = [
'id',
'room_name',
'room_code',
'participants',
'start_time',
'end_time'
]
read_only_fields = [
'id',
'room_name',
'room_code',
'participants',
'start_time',
'end_time'
]
class RoomPostSerializer(ModelSerializer):
"""
This is to serialize the data pertaining to a Room, where calls would be held.
This would be used during POST Method.
"""
participants = UserGetSerializer(read_only=True, many=True)
class Meta:
model = Room
fields = [
'id',
'room_name',
'room_code',
'participants',
'start_time',
'end_time'
]
read_only_fields = [
'id',
'room_code',
'start_time'
]
class RoomHistorySerializer(ModelSerializer):
"""
This is to serialize the data pertaining to a Room, while retrieving the history.
This would be used during GET Method.
"""
participants_history = UserGetSerializer(read_only=True, many=True)
class Meta:
model = Room
depth = 1
fields = [
'id',
'room_name',
'room_code',
'participants_history',
'start_time',
'end_time'
]
read_only_fields = [
'id',
'room_name',
'room_code',
'participants_history',
'start_time',
'end_time'
] | en | 0.967966 | This is to serialize the data pertaining to a Room, where calls would be held. This would be used during GET Method. This is to serialize the data pertaining to a Room, where calls would be held. This would be used during POST Method. This is to serialize the data pertaining to a Room, while retrieving the history. This would be used during GET Method. | 2.751247 | 3 |
ScrapingDBLP/scraper.py | niallmartinryan/ScrapingArticlesPython | 0 | 6620735 | import sys
import os.path
import time
import res
from bs4 import BeautifulSoup
import urllib
import urllib.parse
import urllib.request
import requests
import random
import string
def getRandomShortDelay():
return random.randint(2,10)
def getRandomMidDelay():
return random.randint(2,12)
def getRandomUserAgent():
return res.USER_AGENT_STRING[random.randint(0,len(res.USER_AGENT_STRING)-1)]
def getNames(url, params, headers, file):
# Dont need these anymore as pags are indexed. Upper case alphabet string = "ABCD..YZ"
# alpha = string.ascii_uppercase
# alphaCounter = 0
personCounter = 1
magicEndNumber = 2057018
# page are incremented by 300 which is the number of authors that are shown per page
increment = 300
authors = []
try:
while personCounter < magicEndNumber:
print(str(personCounter) + "\n" )
testURL = res.URL_ENTRIES_STRING
params['pos'] = personCounter
req = requests.get(testURL,params=params, headers=headers)
#print req.text
soup = BeautifulSoup(req.text, "html.parser")
myDivs = soup.findAll("div", {"class": "column min20"})
#print(myDivs[0])
#print(myDivs[1])
for div in myDivs:
lis = div.findAll('li')
for li in lis:
file.write(li.text + "\n")
authors.append(li.text)
headers['user-agent'] = getRandomUserAgent()
personCounter = personCounter + increment
time.sleep(getRandomShortDelay())
print(personCounter)
return authors
except Exception as e:
print(personCounter)
print(e)
#return authors
#while personCounter < magicEndNumber:
#request the page
def getBibtex(authors, params, headers, file):
try:
for author in authors:
print(author)
url = urlBuilder(author)
session = requests.Session()
req = session.get(url, params=params, headers=headers)
with open("data.txt", "a") as save:
save.write(req.text+ "\n")
#req = requests.get(url, params=params, headers=headers)
headers['user-agent'] = getRandomUserAgent()
time.sleep(getRandomMidDelay())
except Exception as e:
print(author)
print(e)
def urlBuilder(author):
baseURL = res.URL_AUTHOR_BIBTEX
baseURL = baseURL + str(author[0].lower()) + "/"
splitAuthor = author.split(',')
first = splitAuthor[0]
if len(splitAuthor) == 1:
second = ""
else:
second = splitAuthor[1]
first = first.replace('.', '=')
first = first.replace('-', '=')
first = first.replace(' ', '_')
second = second.replace('.', '=')
second = second.replace('-', '=')
second = second.replace(' ', '_')
sub = second[1:]
baseURL = baseURL + first + ":" +str(sub) + ".bib"
return baseURL
#if "." in splitAuthor[0]:
# splitFirstName = splitAuthor[0].split(".")
# baseURL = baseURL + str(splitAuthor[0]) + "="
# if "" is splitFirstName:
# baseURL = baseURL + ":"
# else:
# baseURL = baseURL + str(splitFirstName[1]) + ":"
#else:
# baseURL = baseURL + str(splitAuthor[0]) + ":"
#
#baseURL = baseURL
def Main():
name_file = res.FILE_NAME
author_file = res.FILE_AUTHORS
complete_path_data = os.path.join(os.path.dirname(os.path.abspath(__file__)), name_file)
complete_path_authors = os.path.join(os.path.dirname(os.path.abspath(__file__)), author_file)
file = open(complete_path_data, "a")
authors_file = open(complete_path_authors, "a")
userAgent = getRandomUserAgent()
url = res.URL_ENTRIES_STRING
params = {
'pos' : '1'
}
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.8',
'upgrade-insecure-requests': '1',
'user-agent': userAgent
}
names = getNames(url, params, headers, authors_file)
print("finished collecting names")
input("Press Enter to Continue..")
getBibtex(names, params, headers, file)
#testString = "O.-Larnnithipong, Nonnarit"
#urlBuilder(testString)
print("finished")
return None
Main()
| import sys
import os.path
import time
import res
from bs4 import BeautifulSoup
import urllib
import urllib.parse
import urllib.request
import requests
import random
import string
def getRandomShortDelay():
return random.randint(2,10)
def getRandomMidDelay():
return random.randint(2,12)
def getRandomUserAgent():
return res.USER_AGENT_STRING[random.randint(0,len(res.USER_AGENT_STRING)-1)]
def getNames(url, params, headers, file):
# Dont need these anymore as pags are indexed. Upper case alphabet string = "ABCD..YZ"
# alpha = string.ascii_uppercase
# alphaCounter = 0
personCounter = 1
magicEndNumber = 2057018
# page are incremented by 300 which is the number of authors that are shown per page
increment = 300
authors = []
try:
while personCounter < magicEndNumber:
print(str(personCounter) + "\n" )
testURL = res.URL_ENTRIES_STRING
params['pos'] = personCounter
req = requests.get(testURL,params=params, headers=headers)
#print req.text
soup = BeautifulSoup(req.text, "html.parser")
myDivs = soup.findAll("div", {"class": "column min20"})
#print(myDivs[0])
#print(myDivs[1])
for div in myDivs:
lis = div.findAll('li')
for li in lis:
file.write(li.text + "\n")
authors.append(li.text)
headers['user-agent'] = getRandomUserAgent()
personCounter = personCounter + increment
time.sleep(getRandomShortDelay())
print(personCounter)
return authors
except Exception as e:
print(personCounter)
print(e)
#return authors
#while personCounter < magicEndNumber:
#request the page
def getBibtex(authors, params, headers, file):
try:
for author in authors:
print(author)
url = urlBuilder(author)
session = requests.Session()
req = session.get(url, params=params, headers=headers)
with open("data.txt", "a") as save:
save.write(req.text+ "\n")
#req = requests.get(url, params=params, headers=headers)
headers['user-agent'] = getRandomUserAgent()
time.sleep(getRandomMidDelay())
except Exception as e:
print(author)
print(e)
def urlBuilder(author):
baseURL = res.URL_AUTHOR_BIBTEX
baseURL = baseURL + str(author[0].lower()) + "/"
splitAuthor = author.split(',')
first = splitAuthor[0]
if len(splitAuthor) == 1:
second = ""
else:
second = splitAuthor[1]
first = first.replace('.', '=')
first = first.replace('-', '=')
first = first.replace(' ', '_')
second = second.replace('.', '=')
second = second.replace('-', '=')
second = second.replace(' ', '_')
sub = second[1:]
baseURL = baseURL + first + ":" +str(sub) + ".bib"
return baseURL
#if "." in splitAuthor[0]:
# splitFirstName = splitAuthor[0].split(".")
# baseURL = baseURL + str(splitAuthor[0]) + "="
# if "" is splitFirstName:
# baseURL = baseURL + ":"
# else:
# baseURL = baseURL + str(splitFirstName[1]) + ":"
#else:
# baseURL = baseURL + str(splitAuthor[0]) + ":"
#
#baseURL = baseURL
def Main():
name_file = res.FILE_NAME
author_file = res.FILE_AUTHORS
complete_path_data = os.path.join(os.path.dirname(os.path.abspath(__file__)), name_file)
complete_path_authors = os.path.join(os.path.dirname(os.path.abspath(__file__)), author_file)
file = open(complete_path_data, "a")
authors_file = open(complete_path_authors, "a")
userAgent = getRandomUserAgent()
url = res.URL_ENTRIES_STRING
params = {
'pos' : '1'
}
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.8',
'upgrade-insecure-requests': '1',
'user-agent': userAgent
}
names = getNames(url, params, headers, authors_file)
print("finished collecting names")
input("Press Enter to Continue..")
getBibtex(names, params, headers, file)
#testString = "O.-Larnnithipong, Nonnarit"
#urlBuilder(testString)
print("finished")
return None
Main()
| en | 0.54326 | # Dont need these anymore as pags are indexed. Upper case alphabet string = "ABCD..YZ" # alpha = string.ascii_uppercase # alphaCounter = 0 # page are incremented by 300 which is the number of authors that are shown per page #print req.text #print(myDivs[0]) #print(myDivs[1]) #return authors #while personCounter < magicEndNumber: #request the page #req = requests.get(url, params=params, headers=headers) #if "." in splitAuthor[0]: # splitFirstName = splitAuthor[0].split(".") # baseURL = baseURL + str(splitAuthor[0]) + "=" # if "" is splitFirstName: # baseURL = baseURL + ":" # else: # baseURL = baseURL + str(splitFirstName[1]) + ":" #else: # baseURL = baseURL + str(splitAuthor[0]) + ":" # #baseURL = baseURL #testString = "O.-Larnnithipong, Nonnarit" #urlBuilder(testString) | 2.893153 | 3 |
Python/examples/bucket_cors.py | zxp19960123/cos-snippets | 21 | 6620736 | # -*- coding=utf-8
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
from qcloud_cos import CosServiceError
from qcloud_cos import CosClientError
secret_id = 'COS_SECRETID' # 替换为用户的secret_id
secret_key = 'COS_SECRETKEY' # 替换为用户的secret_key
region = 'COS_REGION' # 替换为用户的region
token = None # 使用临时密钥需要传入Token,默认为空,可不填
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置存储桶跨域规则
def put_bucket_cors():
#.cssg-snippet-body-start:[put-bucket-cors]
response = client.put_bucket_cors(
Bucket='examplebucket-1250000000',
CORSConfiguration={
'CORSRule': [
{
'ID': 'string',
'MaxAgeSeconds': 100,
'AllowedOrigin': [
'string',
],
'AllowedMethod': [
'string',
],
'AllowedHeader': [
'string',
],
'ExposeHeader': [
'string',
]
}
]
},
)
#.cssg-snippet-body-end
# 获取存储桶跨域规则
def get_bucket_cors():
#.cssg-snippet-body-start:[get-bucket-cors]
response = client.get_bucket_cors(
Bucket='examplebucket-1250000000',
)
#.cssg-snippet-body-end
# 删除存储桶跨域规则
def delete_bucket_cors():
#.cssg-snippet-body-start:[delete-bucket-cors]
response = client.delete_bucket_cors(
Bucket='examplebucket-1250000000',
)
#.cssg-snippet-body-end
#.cssg-methods-pragma
# 设置存储桶跨域规则
put_bucket_cors()
# 获取存储桶跨域规则
get_bucket_cors()
# 删除存储桶跨域规则
delete_bucket_cors()
#.cssg-methods-pragma | # -*- coding=utf-8
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
from qcloud_cos import CosServiceError
from qcloud_cos import CosClientError
secret_id = 'COS_SECRETID' # 替换为用户的secret_id
secret_key = 'COS_SECRETKEY' # 替换为用户的secret_key
region = 'COS_REGION' # 替换为用户的region
token = None # 使用临时密钥需要传入Token,默认为空,可不填
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置存储桶跨域规则
def put_bucket_cors():
#.cssg-snippet-body-start:[put-bucket-cors]
response = client.put_bucket_cors(
Bucket='examplebucket-1250000000',
CORSConfiguration={
'CORSRule': [
{
'ID': 'string',
'MaxAgeSeconds': 100,
'AllowedOrigin': [
'string',
],
'AllowedMethod': [
'string',
],
'AllowedHeader': [
'string',
],
'ExposeHeader': [
'string',
]
}
]
},
)
#.cssg-snippet-body-end
# 获取存储桶跨域规则
def get_bucket_cors():
#.cssg-snippet-body-start:[get-bucket-cors]
response = client.get_bucket_cors(
Bucket='examplebucket-1250000000',
)
#.cssg-snippet-body-end
# 删除存储桶跨域规则
def delete_bucket_cors():
#.cssg-snippet-body-start:[delete-bucket-cors]
response = client.delete_bucket_cors(
Bucket='examplebucket-1250000000',
)
#.cssg-snippet-body-end
#.cssg-methods-pragma
# 设置存储桶跨域规则
put_bucket_cors()
# 获取存储桶跨域规则
get_bucket_cors()
# 删除存储桶跨域规则
delete_bucket_cors()
#.cssg-methods-pragma | zh | 0.370408 | # -*- coding=utf-8 # 替换为用户的secret_id # 替换为用户的secret_key # 替换为用户的region # 使用临时密钥需要传入Token,默认为空,可不填 # 获取配置对象 # 设置存储桶跨域规则 #.cssg-snippet-body-start:[put-bucket-cors] #.cssg-snippet-body-end # 获取存储桶跨域规则 #.cssg-snippet-body-start:[get-bucket-cors] #.cssg-snippet-body-end # 删除存储桶跨域规则 #.cssg-snippet-body-start:[delete-bucket-cors] #.cssg-snippet-body-end #.cssg-methods-pragma # 设置存储桶跨域规则 # 获取存储桶跨域规则 # 删除存储桶跨域规则 #.cssg-methods-pragma | 1.664792 | 2 |
translator.py | ovvladimir/Translator | 0 | 6620737 | <filename>translator.py
# https://www.deepl.com/translator
import translators as ts
from tkinter import Tk, Frame, Label, Text, Scrollbar, Button, PhotoImage, END, W, E
import os
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
from textblob import TextBlob
# import pycld2 as cld2
icon = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(icon, 'icon.png')
def close(_):
root.destroy()
def clipboard():
try:
clip_text = root.clipboard_get()
except BaseException:
clip_text = "Buffer empty"
if len(clip_text) < 4:
clip_text = "Enter text more than two characters"
text1.delete(1.0, END)
text1.insert(END, clip_text)
translate(clip_text)
def window():
win_text = text1.get(1.0, END)
if len(win_text) < 4:
win_text = "Enter text more than two characters"
text1.delete(1.0, END)
text1.insert(END, win_text)
translate(win_text)
def translate(get_text):
languages_text = TextBlob(get_text)
indetect = languages_text.detect_language()
# _, _, details = cld2.detect(get_text)
# indetect = details[0][1]
if indetect != 'ru':
langout = 'ru'
else:
langout = 'en'
lab0['text'] = f'Translate bing {indetect}\u2194{langout}'
lab1['text'] = indetect
lab2['text'] = langout
if len(get_text) > 20:
# translators
output = ts.google(get_text, to_language=langout, if_use_cn_host=True)
# output = ts.bing(get_text, to_language=langout, if_use_cn_host=False)
else:
try:
# textblob
output = languages_text.translate(from_lang=indetect, to=langout)
except BaseException:
output = languages_text
text2.delete(1.0, END)
text2.insert(END, output)
root = Tk()
root.title('Translate')
root.iconphoto(True, PhotoImage(file=path))
lab0 = Label(root, font='"times new roman" 14 bold', fg='white', bg='blue')
lab0.grid(row=0, column=0, columnspan=2, sticky=W + E, pady=2)
f1 = Frame(root)
f1.grid(row=1, column=0)
text1 = Text(
f1, font='arial 12', wrap="word", width=50, height=12, padx=10, pady=10)
text1.pack(side='left')
scroll1 = Scrollbar(f1, command=text1.yview)
scroll1.pack(side='right', fill='y')
text1['yscroll'] = scroll1.set
# text1.pack_propagate(False)
root.update()
x, y = text1.winfo_width(), text1.winfo_height()
lab1 = Label(text1, fg='blue', bg='white')
# lab1.pack(side='right', anchor='s')
lab1.place(x=x - 35, y=y - 35)
f2 = Frame(root)
f2.grid(row=1, column=1)
text2 = Text(
f2, font='arial 12', wrap="word", width=50, height=12, bg='gray95', padx=10, pady=10)
text2.pack(side='left')
scroll2 = Scrollbar(f2)
scroll2.pack(side='right', fill='y')
scroll2.config(command=text2.yview)
text2.config(yscrollcommand=scroll2.set)
# text2.pack_propagate(False)
lab2 = Label(text2, fg='blue', bg='gray95')
# lab2.pack(side='right', anchor='s')
lab2.place(x=x - 35, y=y - 35)
bt1 = Button(root, text='Translate window', font='arial 12', fg='blue', command=window)
bt1.grid(row=2, column=0, sticky=W + E)
bt2 = Button(root, text='Translate clipboard', font='arial 12', fg='blue', command=clipboard)
bt2.grid(row=2, column=1, sticky=W + E)
root.bind("<Escape>", close)
root.mainloop()
| <filename>translator.py
# https://www.deepl.com/translator
import translators as ts
from tkinter import Tk, Frame, Label, Text, Scrollbar, Button, PhotoImage, END, W, E
import os
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
from textblob import TextBlob
# import pycld2 as cld2
icon = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(icon, 'icon.png')
def close(_):
root.destroy()
def clipboard():
try:
clip_text = root.clipboard_get()
except BaseException:
clip_text = "Buffer empty"
if len(clip_text) < 4:
clip_text = "Enter text more than two characters"
text1.delete(1.0, END)
text1.insert(END, clip_text)
translate(clip_text)
def window():
win_text = text1.get(1.0, END)
if len(win_text) < 4:
win_text = "Enter text more than two characters"
text1.delete(1.0, END)
text1.insert(END, win_text)
translate(win_text)
def translate(get_text):
languages_text = TextBlob(get_text)
indetect = languages_text.detect_language()
# _, _, details = cld2.detect(get_text)
# indetect = details[0][1]
if indetect != 'ru':
langout = 'ru'
else:
langout = 'en'
lab0['text'] = f'Translate bing {indetect}\u2194{langout}'
lab1['text'] = indetect
lab2['text'] = langout
if len(get_text) > 20:
# translators
output = ts.google(get_text, to_language=langout, if_use_cn_host=True)
# output = ts.bing(get_text, to_language=langout, if_use_cn_host=False)
else:
try:
# textblob
output = languages_text.translate(from_lang=indetect, to=langout)
except BaseException:
output = languages_text
text2.delete(1.0, END)
text2.insert(END, output)
root = Tk()
root.title('Translate')
root.iconphoto(True, PhotoImage(file=path))
lab0 = Label(root, font='"times new roman" 14 bold', fg='white', bg='blue')
lab0.grid(row=0, column=0, columnspan=2, sticky=W + E, pady=2)
f1 = Frame(root)
f1.grid(row=1, column=0)
text1 = Text(
f1, font='arial 12', wrap="word", width=50, height=12, padx=10, pady=10)
text1.pack(side='left')
scroll1 = Scrollbar(f1, command=text1.yview)
scroll1.pack(side='right', fill='y')
text1['yscroll'] = scroll1.set
# text1.pack_propagate(False)
root.update()
x, y = text1.winfo_width(), text1.winfo_height()
lab1 = Label(text1, fg='blue', bg='white')
# lab1.pack(side='right', anchor='s')
lab1.place(x=x - 35, y=y - 35)
f2 = Frame(root)
f2.grid(row=1, column=1)
text2 = Text(
f2, font='arial 12', wrap="word", width=50, height=12, bg='gray95', padx=10, pady=10)
text2.pack(side='left')
scroll2 = Scrollbar(f2)
scroll2.pack(side='right', fill='y')
scroll2.config(command=text2.yview)
text2.config(yscrollcommand=scroll2.set)
# text2.pack_propagate(False)
lab2 = Label(text2, fg='blue', bg='gray95')
# lab2.pack(side='right', anchor='s')
lab2.place(x=x - 35, y=y - 35)
bt1 = Button(root, text='Translate window', font='arial 12', fg='blue', command=window)
bt1.grid(row=2, column=0, sticky=W + E)
bt2 = Button(root, text='Translate clipboard', font='arial 12', fg='blue', command=clipboard)
bt2.grid(row=2, column=1, sticky=W + E)
root.bind("<Escape>", close)
root.mainloop()
| en | 0.281097 | # https://www.deepl.com/translator # import pycld2 as cld2 # _, _, details = cld2.detect(get_text) # indetect = details[0][1] # translators # output = ts.bing(get_text, to_language=langout, if_use_cn_host=False) # textblob # text1.pack_propagate(False) # lab1.pack(side='right', anchor='s') # text2.pack_propagate(False) # lab2.pack(side='right', anchor='s') | 3.03298 | 3 |
scripts/maf2fasta.py | powerpak/pathogendb-comparison | 7 | 6620738 | <reponame>powerpak/pathogendb-comparison<filename>scripts/maf2fasta.py<gh_stars>1-10
#!/usr/bin/env python
import sys
try:
infile = open(sys.argv[1])
outfile = open(sys.argv[2], 'w')
num_genomes = sys.argv[3]
except IndexError:
sys.stderr.write('''maf2fasta.py
Takes a .maf alignment file and converts it into a concatonated FASTA
of alignments containing all genomes.
USAGE: maf2fasta.py input.maf output.fa number_of_genomes
''')
getit = False
seqDict = {}
for line in infile:
if line.startswith('a '):
if line.split()[3] == 'mult=' + sys.argv[3]:
getit = True
else:
getit = False
if line.startswith('s ') and getit:
s, name, score, start, strand, size, seq = line.split()
name = name.split('.')[0]
if not name in seqDict:
seqDict[name] = ''
seqDict[name] += seq
if len(seqDict) != int(num_genomes):
sys.stderr.write('No alignments found containing all genomes.\n')
sys.exit()
for i in seqDict:
outfile.write('>' + i + '\n')
for j in range(0, len(seqDict[i]), 80):
outfile.write(seqDict[i][j:j+80] + '\n')
| #!/usr/bin/env python
import sys
try:
infile = open(sys.argv[1])
outfile = open(sys.argv[2], 'w')
num_genomes = sys.argv[3]
except IndexError:
sys.stderr.write('''maf2fasta.py
Takes a .maf alignment file and converts it into a concatonated FASTA
of alignments containing all genomes.
USAGE: maf2fasta.py input.maf output.fa number_of_genomes
''')
getit = False
seqDict = {}
for line in infile:
if line.startswith('a '):
if line.split()[3] == 'mult=' + sys.argv[3]:
getit = True
else:
getit = False
if line.startswith('s ') and getit:
s, name, score, start, strand, size, seq = line.split()
name = name.split('.')[0]
if not name in seqDict:
seqDict[name] = ''
seqDict[name] += seq
if len(seqDict) != int(num_genomes):
sys.stderr.write('No alignments found containing all genomes.\n')
sys.exit()
for i in seqDict:
outfile.write('>' + i + '\n')
for j in range(0, len(seqDict[i]), 80):
outfile.write(seqDict[i][j:j+80] + '\n') | en | 0.457976 | #!/usr/bin/env python maf2fasta.py Takes a .maf alignment file and converts it into a concatonated FASTA of alignments containing all genomes. USAGE: maf2fasta.py input.maf output.fa number_of_genomes | 2.699527 | 3 |
examples/hello_world1.py | snewell4/simpleth | 0 | 6620739 | from simpleth import Blockchain, Contract
sender = Blockchain().address(0)
c = Contract('HelloWorld1')
c.deploy(sender)
greeting = c.get_var('greeting')
print(greeting)
| from simpleth import Blockchain, Contract
sender = Blockchain().address(0)
c = Contract('HelloWorld1')
c.deploy(sender)
greeting = c.get_var('greeting')
print(greeting)
| none | 1 | 1.78889 | 2 | |
tests/unit/accounts/test_views.py | TroJan/EvalAI | 0 | 6620740 | from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.models import User
from allauth.account.models import EmailAddress
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
class BaseAPITestClass(APITestCase):
def setUp(self):
self.client = APIClient(enforce_csrf_checks=True)
self.user = User.objects.create(
username='someuser',
email="<EMAIL>",
password='<PASSWORD>')
EmailAddress.objects.create(
user=self.user,
email='<EMAIL>',
primary=True,
verified=True)
self.client.force_authenticate(user=self.user)
class DisableUserTest(BaseAPITestClass):
url = reverse_lazy('accounts:disable_user')
def test_disable_user(self):
response = self.client.post(self.url, {})
self.assertEqual(response.status_code, status.HTTP_200_OK)
| from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.models import User
from allauth.account.models import EmailAddress
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
class BaseAPITestClass(APITestCase):
def setUp(self):
self.client = APIClient(enforce_csrf_checks=True)
self.user = User.objects.create(
username='someuser',
email="<EMAIL>",
password='<PASSWORD>')
EmailAddress.objects.create(
user=self.user,
email='<EMAIL>',
primary=True,
verified=True)
self.client.force_authenticate(user=self.user)
class DisableUserTest(BaseAPITestClass):
url = reverse_lazy('accounts:disable_user')
def test_disable_user(self):
response = self.client.post(self.url, {})
self.assertEqual(response.status_code, status.HTTP_200_OK)
| none | 1 | 2.19824 | 2 | |
aldi_evaluation_metrics.py | buds-lab/aldiplusplus | 0 | 6620741 | <filename>aldi_evaluation_metrics.py
from functools import reduce
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
class AldiEvaluationMetrics():
"""
Provides various metrics for the evaluation of discord detectors
"""
def get_roc_auc(self, df_true, df_pred):
"""
Calculates column-wise the accuracy of two datasframes
(same shape) with the same column names.
Keyword arguments:
df_true -- dataframe with the true values/labels
df_pred -- dataframe with the predicted values/labels
labels --
Returns:
df_accuracies -- dataframe with the column-wise accuracies
- index are the column names of the
input dataframe
- column is 'accuracy'
"""
assert df_true.shape == df_pred.shape, (
"the dataframes must have the same shape")
df_roc_auc = pd.DataFrame(columns=['roc_auc'])
# in order to avoid buildings where all `y_true` are either 0 or 1,
# the entire site is evaluated as a whole
try:
df_roc_auc = roc_auc_score(
df_true.values.ravel(), df_pred.values.ravel())
except ValueError as v_error:
print(f'ValueError w/ msg {v_error.message}')
df_roc_auc = 0
return df_roc_auc
def get_accuracy(self, df_true, df_pred):
"""
Calculates column-wise the accuracy of two datasframes
(same shape) with the same column names.
Keyword arguments:
df_true -- dataframe with the true values/labels
df_pred -- dataframe with the predicted values/labels
Returns:
df_accuracies -- dataframe with the column-wise accuracies
- index are the column names of the
input dataframe
- column is 'accuracy'
"""
assert df_true.shape == df_pred.shape, (
"the dataframes must have the same shape")
df_accuracies = pd.DataFrame(index=df_true.columns,
columns=['accuracy'])
for entry in df_true.columns:
single_accuracy = accuracy_score(df_true[entry], df_pred[entry])
df_accuracies.at[entry, 'accuracy'] = single_accuracy
return df_accuracies
def get_heatmap(
self,
list_metric,
list_sites,
aldi_impl,
metric='roc_auc',
meter_type=0,
p_value=0.01
):
"""
Calculates a site-level accuracy heatmap
Keyword arguments:
list_metric -- list with all the performance metric values (e.g., roc_auc, accuracy)
list_sites -- list with all sites
aldi_impl -- string with the algorithm name
metric -- string of chosen metric (e.g., 'roc_auc', 'accuracy')
meter_type -- int of chosen meter
p_value -- float of chosen p-value used for K-S test
"""
df_all_metrics = pd.DataFrame(
{'site_id': list_sites}).set_index('site_id')
# `roc_auc` doesn't analyze each building individually, it stores the
# value for the entire site
if metric == 'roc_auc':
df_all_metrics[aldi_impl] = [list_metric[site_id]
for site_id in list_sites]
else:
df_all_metrics[aldi_impl] = [list_metric[site_id]
[metric].mean() for site_id in list_sites]
df_all_metrics.to_csv(
f'data/results/{metric}_ai-{aldi_impl}_p{p_value}_m{meter_type}.csv')
plt.title(f'{metric} of the different discord detectors', fontsize=18)
fig = sns.heatmap(df_all_metrics, vmin=0, vmax=1,
cmap='YlGnBu').get_figure()
fig.savefig(
f'img/{metric}_heatmap_ai-{aldi_impl}_p{p_value}_m{meter_type}.png', format='PNG')
plt.show()
def get_heatmap_comparison(
self,
list_aldi_impl,
list_sites,
dict_meter_type,
dict_p_value,
metric='roc_auc',
plot_name='baselines',
fontsize=20
):
"""
Compares the accuracy of different ALDI implementations in a heatmap.
Dictionary arguments have their respective 'aldi_impl' as key.
Keyword arguments:
list_aldi_impl -- list with strings of algorithms names
list_sites -- list with all sites common for
dict_meter_type -- list with int of chosen meter (values)
dict_p_value -- list with float of chosen p-value used for K-S test (values)
"""
list_metric = []
for aldi_impl in list_aldi_impl:
p_value = dict_p_value[aldi_impl]
meter_type = dict_meter_type[aldi_impl]
list_metric.append(pd.read_csv(f'data/results/{metric}_ai-{aldi_impl}_p{p_value}_m{meter_type}.csv',
index_col=0))
df_metric = pd.concat(list_metric, axis=1)
fig, ax = plt.subplots(figsize=(16, 16))
sns.heatmap(df_acc[list_aldi_impl],
cmap='YlGnBu', vmin=0, vmax=1, ax=ax)
if metric == 'roc_auc':
metric_str = 'ROC-AUC'
else:
metric_str = metric
ax.set_title(f"{metric_str} on Electricity meters",
fontsize=fontsize * 2)
ax.set_xlabel("Discord detectors", fontsize=fontsize * 2)
ax.set_ylabel("Site ID", fontsize=fontsize * 2)
ax.tick_params(labelsize=fontsize)
cax = plt.gcf().axes[-1]
cax.tick_params(labelsize=fontsize * 2)
plt.xticks(rotation=90)
plt.tight_layout()
fig.savefig(f'img/{metric}_heatmap_{plot_name}.png', format='PNG')
def get_class_report(
self,
df_true,
df_pred,
aldi_impl,
level_name,
meter_type=0,
figsize=(10, 10),
fontsize=40,
path=''
):
"""
Calculates the classification report and matrix based on two
dataframes
Keyword arguments:
df_true -- dataframe with the true values/labels
df_pred -- dataframe with the predicted values/labels
aldi_impl -- string with the algorithm name
level_name -- string with the level of comparison (e.g., all, site_id, building_id)
meter_type -- int of chosen meter
path -- string with relative path
Returns:
cf_report -- classification report generated through scitkit-learn
"""
vector_true = df_true.values.ravel()
vector_pred = df_pred.values.ravel()
cm = confusion_matrix(vector_true, vector_pred,
labels=np.unique(vector_true))
cf_report = classification_report(vector_true, vector_pred)
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
#annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
annot[i, j] = '%.1f%%' % (p)
elif c == 0:
annot[i, j] = ''
else:
#annot[i, j] = '%.1f%%\n%d' % (p, c)
annot[i, j] = '%.1f%%' % (p)
cm_perc = pd.DataFrame(cm_perc, index=np.unique(
vector_true), columns=np.unique(vector_true))
cm_perc.index.name = 'Actual'
cm_perc.columns.name = 'Predicted'
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(cm_perc,
cmap="YlGnBu",
annot=annot,
vmin=0,
vmax=100,
fmt='',
ax=ax,
annot_kws={"fontsize": fontsize})
# ax.set_title(f'Confusion matrix aldi implementation\n{aldi_impl} site {level_name}',
# fontsize=fontsize+4)
ax.set_xlabel("Predicted", fontsize=fontsize)
ax.set_ylabel("Actual", fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
cax = plt.gcf().axes[-1]
cax.tick_params(labelsize=fontsize)
if path == '':
fig.savefig(f'img/classification_report_ai-{aldi_impl}_{level_name}_m{meter_type}.png',
format='PNG')
else:
fig.savefig(f'{path}/confusion_matrix_{aldi_impl}_{level_name}.png',
format='PNG')
plt.clf()
return cf_report
def accuracy_barplot( # TODO: finish
self,
list_aldi_impl,
list_sites,
dict_meter_type,
dict_p_value,
plot_name='baselines',
fontsize=20
):
"""Plot accuracies of different models"""
| <filename>aldi_evaluation_metrics.py
from functools import reduce
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
class AldiEvaluationMetrics():
"""
Provides various metrics for the evaluation of discord detectors
"""
def get_roc_auc(self, df_true, df_pred):
"""
Calculates column-wise the accuracy of two datasframes
(same shape) with the same column names.
Keyword arguments:
df_true -- dataframe with the true values/labels
df_pred -- dataframe with the predicted values/labels
labels --
Returns:
df_accuracies -- dataframe with the column-wise accuracies
- index are the column names of the
input dataframe
- column is 'accuracy'
"""
assert df_true.shape == df_pred.shape, (
"the dataframes must have the same shape")
df_roc_auc = pd.DataFrame(columns=['roc_auc'])
# in order to avoid buildings where all `y_true` are either 0 or 1,
# the entire site is evaluated as a whole
try:
df_roc_auc = roc_auc_score(
df_true.values.ravel(), df_pred.values.ravel())
except ValueError as v_error:
print(f'ValueError w/ msg {v_error.message}')
df_roc_auc = 0
return df_roc_auc
def get_accuracy(self, df_true, df_pred):
"""
Calculates column-wise the accuracy of two datasframes
(same shape) with the same column names.
Keyword arguments:
df_true -- dataframe with the true values/labels
df_pred -- dataframe with the predicted values/labels
Returns:
df_accuracies -- dataframe with the column-wise accuracies
- index are the column names of the
input dataframe
- column is 'accuracy'
"""
assert df_true.shape == df_pred.shape, (
"the dataframes must have the same shape")
df_accuracies = pd.DataFrame(index=df_true.columns,
columns=['accuracy'])
for entry in df_true.columns:
single_accuracy = accuracy_score(df_true[entry], df_pred[entry])
df_accuracies.at[entry, 'accuracy'] = single_accuracy
return df_accuracies
def get_heatmap(
self,
list_metric,
list_sites,
aldi_impl,
metric='roc_auc',
meter_type=0,
p_value=0.01
):
"""
Calculates a site-level accuracy heatmap
Keyword arguments:
list_metric -- list with all the performance metric values (e.g., roc_auc, accuracy)
list_sites -- list with all sites
aldi_impl -- string with the algorithm name
metric -- string of chosen metric (e.g., 'roc_auc', 'accuracy')
meter_type -- int of chosen meter
p_value -- float of chosen p-value used for K-S test
"""
df_all_metrics = pd.DataFrame(
{'site_id': list_sites}).set_index('site_id')
# `roc_auc` doesn't analyze each building individually, it stores the
# value for the entire site
if metric == 'roc_auc':
df_all_metrics[aldi_impl] = [list_metric[site_id]
for site_id in list_sites]
else:
df_all_metrics[aldi_impl] = [list_metric[site_id]
[metric].mean() for site_id in list_sites]
df_all_metrics.to_csv(
f'data/results/{metric}_ai-{aldi_impl}_p{p_value}_m{meter_type}.csv')
plt.title(f'{metric} of the different discord detectors', fontsize=18)
fig = sns.heatmap(df_all_metrics, vmin=0, vmax=1,
cmap='YlGnBu').get_figure()
fig.savefig(
f'img/{metric}_heatmap_ai-{aldi_impl}_p{p_value}_m{meter_type}.png', format='PNG')
plt.show()
def get_heatmap_comparison(
self,
list_aldi_impl,
list_sites,
dict_meter_type,
dict_p_value,
metric='roc_auc',
plot_name='baselines',
fontsize=20
):
"""
Compares the accuracy of different ALDI implementations in a heatmap.
Dictionary arguments have their respective 'aldi_impl' as key.
Keyword arguments:
list_aldi_impl -- list with strings of algorithms names
list_sites -- list with all sites common for
dict_meter_type -- list with int of chosen meter (values)
dict_p_value -- list with float of chosen p-value used for K-S test (values)
"""
list_metric = []
for aldi_impl in list_aldi_impl:
p_value = dict_p_value[aldi_impl]
meter_type = dict_meter_type[aldi_impl]
list_metric.append(pd.read_csv(f'data/results/{metric}_ai-{aldi_impl}_p{p_value}_m{meter_type}.csv',
index_col=0))
df_metric = pd.concat(list_metric, axis=1)
fig, ax = plt.subplots(figsize=(16, 16))
sns.heatmap(df_acc[list_aldi_impl],
cmap='YlGnBu', vmin=0, vmax=1, ax=ax)
if metric == 'roc_auc':
metric_str = 'ROC-AUC'
else:
metric_str = metric
ax.set_title(f"{metric_str} on Electricity meters",
fontsize=fontsize * 2)
ax.set_xlabel("Discord detectors", fontsize=fontsize * 2)
ax.set_ylabel("Site ID", fontsize=fontsize * 2)
ax.tick_params(labelsize=fontsize)
cax = plt.gcf().axes[-1]
cax.tick_params(labelsize=fontsize * 2)
plt.xticks(rotation=90)
plt.tight_layout()
fig.savefig(f'img/{metric}_heatmap_{plot_name}.png', format='PNG')
def get_class_report(
self,
df_true,
df_pred,
aldi_impl,
level_name,
meter_type=0,
figsize=(10, 10),
fontsize=40,
path=''
):
"""
Calculates the classification report and matrix based on two
dataframes
Keyword arguments:
df_true -- dataframe with the true values/labels
df_pred -- dataframe with the predicted values/labels
aldi_impl -- string with the algorithm name
level_name -- string with the level of comparison (e.g., all, site_id, building_id)
meter_type -- int of chosen meter
path -- string with relative path
Returns:
cf_report -- classification report generated through scitkit-learn
"""
vector_true = df_true.values.ravel()
vector_pred = df_pred.values.ravel()
cm = confusion_matrix(vector_true, vector_pred,
labels=np.unique(vector_true))
cf_report = classification_report(vector_true, vector_pred)
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
#annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
annot[i, j] = '%.1f%%' % (p)
elif c == 0:
annot[i, j] = ''
else:
#annot[i, j] = '%.1f%%\n%d' % (p, c)
annot[i, j] = '%.1f%%' % (p)
cm_perc = pd.DataFrame(cm_perc, index=np.unique(
vector_true), columns=np.unique(vector_true))
cm_perc.index.name = 'Actual'
cm_perc.columns.name = 'Predicted'
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(cm_perc,
cmap="YlGnBu",
annot=annot,
vmin=0,
vmax=100,
fmt='',
ax=ax,
annot_kws={"fontsize": fontsize})
# ax.set_title(f'Confusion matrix aldi implementation\n{aldi_impl} site {level_name}',
# fontsize=fontsize+4)
ax.set_xlabel("Predicted", fontsize=fontsize)
ax.set_ylabel("Actual", fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
cax = plt.gcf().axes[-1]
cax.tick_params(labelsize=fontsize)
if path == '':
fig.savefig(f'img/classification_report_ai-{aldi_impl}_{level_name}_m{meter_type}.png',
format='PNG')
else:
fig.savefig(f'{path}/confusion_matrix_{aldi_impl}_{level_name}.png',
format='PNG')
plt.clf()
return cf_report
def accuracy_barplot( # TODO: finish
self,
list_aldi_impl,
list_sites,
dict_meter_type,
dict_p_value,
plot_name='baselines',
fontsize=20
):
"""Plot accuracies of different models"""
| en | 0.588454 | Provides various metrics for the evaluation of discord detectors Calculates column-wise the accuracy of two datasframes (same shape) with the same column names. Keyword arguments: df_true -- dataframe with the true values/labels df_pred -- dataframe with the predicted values/labels labels -- Returns: df_accuracies -- dataframe with the column-wise accuracies - index are the column names of the input dataframe - column is 'accuracy' # in order to avoid buildings where all `y_true` are either 0 or 1, # the entire site is evaluated as a whole Calculates column-wise the accuracy of two datasframes (same shape) with the same column names. Keyword arguments: df_true -- dataframe with the true values/labels df_pred -- dataframe with the predicted values/labels Returns: df_accuracies -- dataframe with the column-wise accuracies - index are the column names of the input dataframe - column is 'accuracy' Calculates a site-level accuracy heatmap Keyword arguments: list_metric -- list with all the performance metric values (e.g., roc_auc, accuracy) list_sites -- list with all sites aldi_impl -- string with the algorithm name metric -- string of chosen metric (e.g., 'roc_auc', 'accuracy') meter_type -- int of chosen meter p_value -- float of chosen p-value used for K-S test # `roc_auc` doesn't analyze each building individually, it stores the # value for the entire site Compares the accuracy of different ALDI implementations in a heatmap. Dictionary arguments have their respective 'aldi_impl' as key. Keyword arguments: list_aldi_impl -- list with strings of algorithms names list_sites -- list with all sites common for dict_meter_type -- list with int of chosen meter (values) dict_p_value -- list with float of chosen p-value used for K-S test (values) Calculates the classification report and matrix based on two dataframes Keyword arguments: df_true -- dataframe with the true values/labels df_pred -- dataframe with the predicted values/labels aldi_impl -- string with the algorithm name level_name -- string with the level of comparison (e.g., all, site_id, building_id) meter_type -- int of chosen meter path -- string with relative path Returns: cf_report -- classification report generated through scitkit-learn #annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s) #annot[i, j] = '%.1f%%\n%d' % (p, c) # ax.set_title(f'Confusion matrix aldi implementation\n{aldi_impl} site {level_name}', # fontsize=fontsize+4) # TODO: finish Plot accuracies of different models | 3.14199 | 3 |
day2/oddoreven.py | nikhilsamninan/python-files | 0 | 6620742 | <filename>day2/oddoreven.py
n = int(input("Enter the Number"))
while(n%2==0):
print("The number is even",n)
break
else:
print("The number is odd",n) | <filename>day2/oddoreven.py
n = int(input("Enter the Number"))
while(n%2==0):
print("The number is even",n)
break
else:
print("The number is odd",n) | none | 1 | 3.89305 | 4 | |
python/PaxHeaders.47482/setup.py | xiaobinglu/openvswitch | 0 | 6620743 | 30 mtime=1439325497.987312464
30 atime=1440176559.437245607
30 ctime=1440177385.033308369
| 30 mtime=1439325497.987312464
30 atime=1440176559.437245607
30 ctime=1440177385.033308369
| none | 1 | 0.943963 | 1 | |
trafficserver_exporter/collector.py | gdvalle/trafficserver_exporter | 29 | 6620744 | """Prometheus collector for Apache Traffic Server's stats_over_http plugin."""
import logging
import re
import time
import requests
import yaml
from prometheus_client import Metric
CACHE_VOLUMES = re.compile("^proxy.process.cache.volume_([0-9]+)")
LOG = logging.getLogger(__name__)
def _get_float_value(data, keys):
"""Fetch a value using a list of keys. First present key wins.
Used for backwards compatibility with older ATS versions.
"""
for key in keys:
try:
value = float(data[key])
except KeyError:
pass
else:
return value
raise KeyError("Keys not found in data: {}".format(",".join(keys)))
class StatsPluginCollector(object):
"""Collector for metrics from the stats_over_http plugin."""
def __init__(self, endpoint, metrics_config_file, max_retries=0, ssl_verify=True):
"""Instantiate a new Collector for ATS stats."""
self._endpoint = endpoint
self._ssl_verify = ssl_verify
self.log = LOG
self.session = requests.Session()
http_adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
for prefix in ("http://", "https://"):
self.session.mount(prefix, http_adapter)
with open(metrics_config_file, "rb") as metrics_file:
self._metrics = yaml.safe_load(metrics_file.read())
def get_json(self):
"""Query the ATS stats endpoint, return parsed JSON."""
r = self.session.get(self._endpoint, verify=self._ssl_verify)
return r.json()["global"]
def collect(self):
"""Generator used to gather and return all metrics."""
start_time = time.time()
self.log.debug("Beginning collection")
self.log.debug("Fetching JSON: {0}".format(self._endpoint))
data = self.get_json()
self.log.debug("Gathering metrics")
for metric in self.parse_metrics(data):
yield metric
self.log.debug("Collection complete")
yield self._get_scrape_duration_metric(start_time)
def _get_scrape_duration_metric(self, start_time):
metric = Metric(
"trafficserver_scrape_duration_seconds",
"Time the Traffic Server scrape took, in seconds.",
"gauge",
)
metric.add_sample(
"trafficserver_scrape_duration_seconds",
value=time.time() - start_time,
labels={},
)
return metric
def parse_metrics(self, data):
"""Generator for trafficserver metrics."""
for metric_name, metric_cfg in self._metrics.items():
metric = Metric(
metric_name, metric_cfg["documentation"], metric_cfg["type"]
)
for metric_value in metric_cfg["values"]:
if isinstance(metric_value["value"], float):
value = metric_value["value"]
else:
try:
value = float(data[metric_value["value"]])
except ValueError:
self.log.warning(
"Unable to convert metric %s value %s to float",
metric_name,
metric_value["value"],
)
except KeyError:
self.log.debug(
"Metric %s value %s not found",
metric_name,
metric_value["value"],
)
continue
metric.add_sample(
metric_name, value=value, labels=metric_value["labels"]
)
yield metric
for rt in ("request", "response"):
metric_name = "trafficserver_{}_size_bytes_total".format(rt)
metric = Metric(
metric_name, "{} size in bytes.".format(rt.capitalize()), "counter"
)
try:
user_bytes = _get_float_value(
data,
[
"proxy.process.http.user_agent_total_{}_bytes".format(rt),
"proxy.node.http.user_agent_total_{}_bytes".format(rt),
],
)
except KeyError:
# TS v8 with missing total.
header_total = float(
data[
"proxy.process.http.user_agent_{}_header_total_size".format(rt)
]
)
doc_total = float(
data[
"proxy.process.http.user_agent_{}_document_total_size".format(
rt
)
]
)
user_bytes = header_total + doc_total
metric.add_sample(
metric_name,
value=user_bytes,
labels={"source": "user_agent", "protocol": "http"},
)
try:
origin_bytes = _get_float_value(
data,
[
"proxy.process.http.origin_server_total_{}_bytes".format(rt),
"proxy.node.http.origin_server_total_{}_bytes".format(rt),
],
)
except KeyError:
# TS v8 with missing total.
header_total = float(
data[
"proxy.process.http.origin_server_{}_header_total_size".format(
rt
)
]
)
doc_total = float(
data[
"proxy.process.http.origin_server_{}_document_total_size".format(
rt
)
]
)
origin_bytes = header_total + doc_total
metric.add_sample(
metric_name,
value=origin_bytes,
labels={"source": "origin_server", "protocol": "http"},
)
metric.add_sample(
metric_name,
value=_get_float_value(
data,
[
"proxy.process.http.parent_proxy_{}_total_bytes".format(rt),
"proxy.node.http.parent_proxy_total_{}_bytes".format(rt),
],
),
labels={"source": "parent_proxy", "protocol": "http"},
)
yield metric
#
# Cache
#
# Gather all cache volumes for cache statistics
volumes = set()
for key in data:
if key.startswith("proxy.process.cache.volume_"):
m = CACHE_VOLUMES.match(key)
volumes.add(int(m.group(1)))
# Create all cache volume metrics
for volume in volumes:
for metric in self._parse_volume_metrics(data, volume):
yield metric
def _parse_volume_metrics(self, data, volume):
metric = Metric(
"trafficserver_ram_cache_hits_total", "RAM cache hit count.", "counter"
)
metric.add_sample(
"trafficserver_ram_cache_hits_total",
value=float(data["proxy.process.cache.ram_cache.hits"]),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_avail_size_bytes_total",
"Total cache available.",
"gauge",
)
metric.add_sample(
"trafficserver_cache_avail_size_bytes_total",
value=float(
data["proxy.process.cache.volume_{0}.bytes_total".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_used_bytes_total",
"Total cache used in bytes.",
"gauge",
)
metric.add_sample(
"trafficserver_cache_used_bytes_total",
value=float(
data["proxy.process.cache.volume_{0}.bytes_used".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_direntries", "Total cache direntries.", "gauge"
)
metric.add_sample(
"trafficserver_cache_direntries",
value=float(
data["proxy.process.cache.volume_{0}.direntries.total".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_used_direntries", "Cache direntries used.", "gauge"
)
metric.add_sample(
"trafficserver_cache_used_direntries",
value=float(
data["proxy.process.cache.volume_{0}.direntries.used".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_operations_total", "Cache operation count.", "counter"
)
for op in (
"lookup",
"read",
"write",
"update",
"remove",
"evacuate",
"scan",
"read_busy",
):
for result in ("success", "failure"):
k = "proxy.process.cache.volume_{volume}.{op}.{result}".format(
volume=volume, op=op, result=result
)
metric.add_sample(
"trafficserver_cache_operations_total",
value=float(data[k]),
labels={"volume": str(volume), "operation": op, "result": result},
)
yield metric
| """Prometheus collector for Apache Traffic Server's stats_over_http plugin."""
import logging
import re
import time
import requests
import yaml
from prometheus_client import Metric
CACHE_VOLUMES = re.compile("^proxy.process.cache.volume_([0-9]+)")
LOG = logging.getLogger(__name__)
def _get_float_value(data, keys):
"""Fetch a value using a list of keys. First present key wins.
Used for backwards compatibility with older ATS versions.
"""
for key in keys:
try:
value = float(data[key])
except KeyError:
pass
else:
return value
raise KeyError("Keys not found in data: {}".format(",".join(keys)))
class StatsPluginCollector(object):
"""Collector for metrics from the stats_over_http plugin."""
def __init__(self, endpoint, metrics_config_file, max_retries=0, ssl_verify=True):
"""Instantiate a new Collector for ATS stats."""
self._endpoint = endpoint
self._ssl_verify = ssl_verify
self.log = LOG
self.session = requests.Session()
http_adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
for prefix in ("http://", "https://"):
self.session.mount(prefix, http_adapter)
with open(metrics_config_file, "rb") as metrics_file:
self._metrics = yaml.safe_load(metrics_file.read())
def get_json(self):
"""Query the ATS stats endpoint, return parsed JSON."""
r = self.session.get(self._endpoint, verify=self._ssl_verify)
return r.json()["global"]
def collect(self):
"""Generator used to gather and return all metrics."""
start_time = time.time()
self.log.debug("Beginning collection")
self.log.debug("Fetching JSON: {0}".format(self._endpoint))
data = self.get_json()
self.log.debug("Gathering metrics")
for metric in self.parse_metrics(data):
yield metric
self.log.debug("Collection complete")
yield self._get_scrape_duration_metric(start_time)
def _get_scrape_duration_metric(self, start_time):
metric = Metric(
"trafficserver_scrape_duration_seconds",
"Time the Traffic Server scrape took, in seconds.",
"gauge",
)
metric.add_sample(
"trafficserver_scrape_duration_seconds",
value=time.time() - start_time,
labels={},
)
return metric
def parse_metrics(self, data):
"""Generator for trafficserver metrics."""
for metric_name, metric_cfg in self._metrics.items():
metric = Metric(
metric_name, metric_cfg["documentation"], metric_cfg["type"]
)
for metric_value in metric_cfg["values"]:
if isinstance(metric_value["value"], float):
value = metric_value["value"]
else:
try:
value = float(data[metric_value["value"]])
except ValueError:
self.log.warning(
"Unable to convert metric %s value %s to float",
metric_name,
metric_value["value"],
)
except KeyError:
self.log.debug(
"Metric %s value %s not found",
metric_name,
metric_value["value"],
)
continue
metric.add_sample(
metric_name, value=value, labels=metric_value["labels"]
)
yield metric
for rt in ("request", "response"):
metric_name = "trafficserver_{}_size_bytes_total".format(rt)
metric = Metric(
metric_name, "{} size in bytes.".format(rt.capitalize()), "counter"
)
try:
user_bytes = _get_float_value(
data,
[
"proxy.process.http.user_agent_total_{}_bytes".format(rt),
"proxy.node.http.user_agent_total_{}_bytes".format(rt),
],
)
except KeyError:
# TS v8 with missing total.
header_total = float(
data[
"proxy.process.http.user_agent_{}_header_total_size".format(rt)
]
)
doc_total = float(
data[
"proxy.process.http.user_agent_{}_document_total_size".format(
rt
)
]
)
user_bytes = header_total + doc_total
metric.add_sample(
metric_name,
value=user_bytes,
labels={"source": "user_agent", "protocol": "http"},
)
try:
origin_bytes = _get_float_value(
data,
[
"proxy.process.http.origin_server_total_{}_bytes".format(rt),
"proxy.node.http.origin_server_total_{}_bytes".format(rt),
],
)
except KeyError:
# TS v8 with missing total.
header_total = float(
data[
"proxy.process.http.origin_server_{}_header_total_size".format(
rt
)
]
)
doc_total = float(
data[
"proxy.process.http.origin_server_{}_document_total_size".format(
rt
)
]
)
origin_bytes = header_total + doc_total
metric.add_sample(
metric_name,
value=origin_bytes,
labels={"source": "origin_server", "protocol": "http"},
)
metric.add_sample(
metric_name,
value=_get_float_value(
data,
[
"proxy.process.http.parent_proxy_{}_total_bytes".format(rt),
"proxy.node.http.parent_proxy_total_{}_bytes".format(rt),
],
),
labels={"source": "parent_proxy", "protocol": "http"},
)
yield metric
#
# Cache
#
# Gather all cache volumes for cache statistics
volumes = set()
for key in data:
if key.startswith("proxy.process.cache.volume_"):
m = CACHE_VOLUMES.match(key)
volumes.add(int(m.group(1)))
# Create all cache volume metrics
for volume in volumes:
for metric in self._parse_volume_metrics(data, volume):
yield metric
def _parse_volume_metrics(self, data, volume):
metric = Metric(
"trafficserver_ram_cache_hits_total", "RAM cache hit count.", "counter"
)
metric.add_sample(
"trafficserver_ram_cache_hits_total",
value=float(data["proxy.process.cache.ram_cache.hits"]),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_avail_size_bytes_total",
"Total cache available.",
"gauge",
)
metric.add_sample(
"trafficserver_cache_avail_size_bytes_total",
value=float(
data["proxy.process.cache.volume_{0}.bytes_total".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_used_bytes_total",
"Total cache used in bytes.",
"gauge",
)
metric.add_sample(
"trafficserver_cache_used_bytes_total",
value=float(
data["proxy.process.cache.volume_{0}.bytes_used".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_direntries", "Total cache direntries.", "gauge"
)
metric.add_sample(
"trafficserver_cache_direntries",
value=float(
data["proxy.process.cache.volume_{0}.direntries.total".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_used_direntries", "Cache direntries used.", "gauge"
)
metric.add_sample(
"trafficserver_cache_used_direntries",
value=float(
data["proxy.process.cache.volume_{0}.direntries.used".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_operations_total", "Cache operation count.", "counter"
)
for op in (
"lookup",
"read",
"write",
"update",
"remove",
"evacuate",
"scan",
"read_busy",
):
for result in ("success", "failure"):
k = "proxy.process.cache.volume_{volume}.{op}.{result}".format(
volume=volume, op=op, result=result
)
metric.add_sample(
"trafficserver_cache_operations_total",
value=float(data[k]),
labels={"volume": str(volume), "operation": op, "result": result},
)
yield metric
| en | 0.690024 | Prometheus collector for Apache Traffic Server's stats_over_http plugin. Fetch a value using a list of keys. First present key wins. Used for backwards compatibility with older ATS versions. Collector for metrics from the stats_over_http plugin. Instantiate a new Collector for ATS stats. Query the ATS stats endpoint, return parsed JSON. Generator used to gather and return all metrics. Generator for trafficserver metrics. # TS v8 with missing total. # TS v8 with missing total. # # Cache # # Gather all cache volumes for cache statistics # Create all cache volume metrics | 2.62534 | 3 |
setup.py | buxxi/trac-ticketprinter | 0 | 6620745 | from setuptools import setup
PACKAGE = 'TracTicketPrinter'
VERSION = '0.1'
setup(name=PACKAGE,
version=VERSION,
description="Generate a HTML with tickets ready for printing that can be cut out to use on a Scrum board",
author="<NAME>",
author_email="<EMAIL>",
packages=['ticketprinter'],
entry_points={'trac.plugins': '%s = ticketprinter' % PACKAGE},
package_data={'ticketprinter' : ['templates/*.html']}
)
| from setuptools import setup
PACKAGE = 'TracTicketPrinter'
VERSION = '0.1'
setup(name=PACKAGE,
version=VERSION,
description="Generate a HTML with tickets ready for printing that can be cut out to use on a Scrum board",
author="<NAME>",
author_email="<EMAIL>",
packages=['ticketprinter'],
entry_points={'trac.plugins': '%s = ticketprinter' % PACKAGE},
package_data={'ticketprinter' : ['templates/*.html']}
)
| none | 1 | 1.473148 | 1 | |
project/forms/event_suggestion.py | DanielGrams/gsevp | 1 | 6620746 | <reponame>DanielGrams/gsevp
from flask_babelex import lazy_gettext
from flask_wtf import FlaskForm
from wtforms import (
BooleanField,
SelectField,
SelectMultipleField,
StringField,
SubmitField,
)
from wtforms.fields.html5 import EmailField, TelField
from wtforms.validators import DataRequired, Optional
from project.forms.common import get_accept_tos_markup
from project.forms.event import EventDateDefinitionFormMixin, SharedEventForm
from project.forms.widgets import TagSelectField
from project.models import (
EventAttendanceMode,
EventRejectionReason,
EventTargetGroupOrigin,
Image,
)
class CreateEventSuggestionForm(SharedEventForm, EventDateDefinitionFormMixin):
contact_name = StringField(
lazy_gettext("Name"),
validators=[DataRequired()],
description=lazy_gettext("Please enter your name for the review."),
)
contact_phone = TelField(
lazy_gettext("Phone"),
validators=[Optional()],
description=lazy_gettext(
"Please enter your phone number or email address for the review."
),
)
contact_email = EmailField(
lazy_gettext("Email"),
validators=[Optional()],
description=lazy_gettext(
"Please enter your email address or phone number for the review."
),
)
contact_email_notice = BooleanField(
lazy_gettext("I would like to be notified by email after the review"),
validators=[Optional()],
)
event_place_id = TagSelectField(
lazy_gettext("Place"),
validators=[DataRequired()],
description=lazy_gettext(
"Choose where the event takes place. If the venue is not yet in the list, just enter it."
),
)
event_place_id_suffix = StringField(
validators=[Optional()],
)
organizer_id = TagSelectField(
lazy_gettext("Organizer"),
validators=[DataRequired()],
description=lazy_gettext(
"Select the organizer. If the organizer is not yet on the list, just enter it."
),
)
organizer_id_suffix = StringField(
validators=[Optional()],
)
category_ids = SelectMultipleField(
lazy_gettext("Categories"),
validators=[Optional()],
coerce=int,
description=lazy_gettext("Choose categories that fit the event."),
)
accept_tos = BooleanField(validators=[DataRequired()])
submit = SubmitField(lazy_gettext("Create event suggestion"))
def __init__(self, **kwargs):
super(CreateEventSuggestionForm, self).__init__(**kwargs)
self._fields["accept_tos"].label.text = get_accept_tos_markup()
def populate_obj(self, obj):
for name, field in self._fields.items():
if name == "photo" and not obj.photo:
obj.photo = Image()
if name == "event_place_id" and self.event_place_id.is_free_text():
obj.event_place_text = self.event_place_id.data
obj.event_place_id = None
if self.event_place_id_suffix.data:
obj.event_place_text = (
obj.event_place_text + ", " + self.event_place_id_suffix.data
)
elif name == "organizer_id" and self.organizer_id.is_free_text():
obj.organizer_text = self.organizer_id.data
obj.organizer_id = None
if self.organizer_id_suffix.data:
obj.organizer_text = (
obj.organizer_text + ", " + self.organizer_id_suffix.data
)
elif name == "target_group_origin":
obj.target_group_origin = EventTargetGroupOrigin(
self.target_group_origin.data
)
elif name == "attendance_mode":
obj.attendance_mode = EventAttendanceMode(self.attendance_mode.data)
else:
field.populate_obj(obj, name)
def validate(self):
result = super().validate()
if not self.validate_date_definition():
result = False
return result
class RejectEventSuggestionForm(FlaskForm):
rejection_resaon = SelectField(
lazy_gettext("Rejection reason"),
coerce=int,
choices=[
(
0,
lazy_gettext("EventRejectionReason.noreason"),
),
(
int(EventRejectionReason.duplicate),
lazy_gettext("EventRejectionReason.duplicate"),
),
(
int(EventRejectionReason.untrustworthy),
lazy_gettext("EventRejectionReason.untrustworthy"),
),
(
int(EventRejectionReason.illegal),
lazy_gettext("EventRejectionReason.illegal"),
),
],
)
submit = SubmitField(lazy_gettext("Reject event suggestion"))
| from flask_babelex import lazy_gettext
from flask_wtf import FlaskForm
from wtforms import (
BooleanField,
SelectField,
SelectMultipleField,
StringField,
SubmitField,
)
from wtforms.fields.html5 import EmailField, TelField
from wtforms.validators import DataRequired, Optional
from project.forms.common import get_accept_tos_markup
from project.forms.event import EventDateDefinitionFormMixin, SharedEventForm
from project.forms.widgets import TagSelectField
from project.models import (
EventAttendanceMode,
EventRejectionReason,
EventTargetGroupOrigin,
Image,
)
class CreateEventSuggestionForm(SharedEventForm, EventDateDefinitionFormMixin):
contact_name = StringField(
lazy_gettext("Name"),
validators=[DataRequired()],
description=lazy_gettext("Please enter your name for the review."),
)
contact_phone = TelField(
lazy_gettext("Phone"),
validators=[Optional()],
description=lazy_gettext(
"Please enter your phone number or email address for the review."
),
)
contact_email = EmailField(
lazy_gettext("Email"),
validators=[Optional()],
description=lazy_gettext(
"Please enter your email address or phone number for the review."
),
)
contact_email_notice = BooleanField(
lazy_gettext("I would like to be notified by email after the review"),
validators=[Optional()],
)
event_place_id = TagSelectField(
lazy_gettext("Place"),
validators=[DataRequired()],
description=lazy_gettext(
"Choose where the event takes place. If the venue is not yet in the list, just enter it."
),
)
event_place_id_suffix = StringField(
validators=[Optional()],
)
organizer_id = TagSelectField(
lazy_gettext("Organizer"),
validators=[DataRequired()],
description=lazy_gettext(
"Select the organizer. If the organizer is not yet on the list, just enter it."
),
)
organizer_id_suffix = StringField(
validators=[Optional()],
)
category_ids = SelectMultipleField(
lazy_gettext("Categories"),
validators=[Optional()],
coerce=int,
description=lazy_gettext("Choose categories that fit the event."),
)
accept_tos = BooleanField(validators=[DataRequired()])
submit = SubmitField(lazy_gettext("Create event suggestion"))
def __init__(self, **kwargs):
super(CreateEventSuggestionForm, self).__init__(**kwargs)
self._fields["accept_tos"].label.text = get_accept_tos_markup()
def populate_obj(self, obj):
for name, field in self._fields.items():
if name == "photo" and not obj.photo:
obj.photo = Image()
if name == "event_place_id" and self.event_place_id.is_free_text():
obj.event_place_text = self.event_place_id.data
obj.event_place_id = None
if self.event_place_id_suffix.data:
obj.event_place_text = (
obj.event_place_text + ", " + self.event_place_id_suffix.data
)
elif name == "organizer_id" and self.organizer_id.is_free_text():
obj.organizer_text = self.organizer_id.data
obj.organizer_id = None
if self.organizer_id_suffix.data:
obj.organizer_text = (
obj.organizer_text + ", " + self.organizer_id_suffix.data
)
elif name == "target_group_origin":
obj.target_group_origin = EventTargetGroupOrigin(
self.target_group_origin.data
)
elif name == "attendance_mode":
obj.attendance_mode = EventAttendanceMode(self.attendance_mode.data)
else:
field.populate_obj(obj, name)
def validate(self):
result = super().validate()
if not self.validate_date_definition():
result = False
return result
class RejectEventSuggestionForm(FlaskForm):
rejection_resaon = SelectField(
lazy_gettext("Rejection reason"),
coerce=int,
choices=[
(
0,
lazy_gettext("EventRejectionReason.noreason"),
),
(
int(EventRejectionReason.duplicate),
lazy_gettext("EventRejectionReason.duplicate"),
),
(
int(EventRejectionReason.untrustworthy),
lazy_gettext("EventRejectionReason.untrustworthy"),
),
(
int(EventRejectionReason.illegal),
lazy_gettext("EventRejectionReason.illegal"),
),
],
)
submit = SubmitField(lazy_gettext("Reject event suggestion")) | none | 1 | 2.476353 | 2 | |
video_process.py | rand0wn/Enhanced-Video-Search-CNN-RNN- | 4 | 6620747 | <filename>video_process.py<gh_stars>1-10
"""Class for training video for search and performing other operations."""
import nltk
import numpy as np
import cv2
import pandas as pd
import run_inference
from nltk.corpus import stopwords
from sklearn.metrics import mean_squared_error as mse
import random
import Config
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet as wn
# Video Data
vd_df = pd.read_csv(Config.vd_data+'/vd_data.csv')
len_vd_df = vd_df.shape[0]
class Video(object):
def __init__(self,
filenames,
frame_frequency,
audio_or_sub):
# Class Parameters
self.filenames = filenames.split(',') # Video Files
self.frame_frequency = frame_frequency # Number of frames to consider
self.audio_or_sub = audio_or_sub # With audio or subs
# Train Videos and Add Data to DataFrame
def train_videos(self):
global vd_df
global len_vd_df
for filename in self.filenames:
# init for video
vd_cap = cv2.VideoCapture(filename)
frame_count = 0
video_name = filename.split('/')[len(filename.split('/'))-1]
while vd_cap.isOpened():
ret, frame = vd_cap.read()
# Check for video
if ret:
frame_count = frame_count + 1
# For every n secs, 24 fps
curr_frame = random.randint(1, self.frame_frequency * 24) # Take a random frame between interval
if frame_count % curr_frame == 0:
frame_img_name = video_name + '_' + str(frame_count/24) + '.jpg'
frame_img_loc = Config.vd_data + '/frames/' + frame_img_name
cv2.imwrite(frame_img_loc, frame)
vd_df = vd_df.append({'frame': frame_img_name, 'video': video_name, 'time': frame_count/24, 'prob': 0, 'caps': 0, 'words': 0, 'tags': 0, 'subs': 0}, ignore_index=True)
else:
# List of All Video Frames
frame_list = [Config.vd_data+'/frames/' + x for x in list(vd_df['frame'][len_vd_df:,])]
# Store Show and Tell Model Captions and Prob
file_input = [Config.model_checkpoint, Config.model_vocab, ",".join(frame_list)]
prob, cap = run_inference.img_captions(file_input)
vd_df.iloc[len_vd_df:, vd_df.columns.get_loc("prob")] = prob
vd_df.iloc[len_vd_df:, vd_df.columns.get_loc("caps")] = cap
for i in range(0, len(cap)):
words = nltk.re.sub("[^a-zA-Z]", " ", str(cap[i]))
words = list(set(words.split(' ')))
stop = set(stopwords.words("english"))
rem_words = [w for w in words if not w in stop and len(w) > 2]
vd_df.iloc[len_vd_df+i, vd_df.columns.get_loc("words")] = str(rem_words)
# Update Final Changes to CSV
vd_df.to_csv(Config.vd_data + '/vd_data.csv', index=False)
len_vd_df = vd_df.shape[0] # Update Length
break
return "Training Completed"
# Image Frame Prob
def _map_frame_prob(str_frame_prob):
return map(float, str_frame_prob[1:len(str_frame_prob) - 1].split(', '))
# Frame Prob Indexing
def _frame_indexing(image_idx, prob, name):
image_matches = {}
image_match = None
if image_idx == -1:
frame_prob = prob
image_match = image_matches[name] = {}
else:
frame_prob = _map_frame_prob(vd_df['prob'][image_idx])
image_match = image_matches[vd_df['frame'][image_idx]] = {}
for i in range(0, len(vd_df) - 1):
mse_prob = mse(frame_prob, _map_frame_prob(vd_df['prob'][i + 1]))
image_match[mse_prob] = {}
image_match[mse_prob]['img'] = vd_df['frame'][i + 1]
return image_matches
# External Image Indexing
def _ext_img_idx(path):
file_input = [Config.model_checkpoint, Config.model_vocab, path]
prob, cap = run_inference.img_captions(file_input)
img_prob = _map_frame_prob(prob[0])
print cap
return _frame_indexing(-1, img_prob, path.split('/')[len(path.split('/'))-1])
# Sentence and Word Similarity
def penn_to_wn(tag):
""" Convert between a Penn Treebank tag to a simplified Wordnet tag """
if tag.startswith('N'):
return 'n'
if tag.startswith('V'):
return 'v'
if tag.startswith('J'):
return 'a'
if tag.startswith('R'):
return 'r'
return None
def tagged_to_synset(word, tag):
wn_tag = penn_to_wn(tag)
if wn_tag is None:
return None
try:
return wn.synsets(word, wn_tag)[0]
except:
return None
def sentence_similarity(sentence1, sentence2):
""" compute the sentence similarity using Wordnet """
# Tokenize and tag
sentence1 = pos_tag(word_tokenize(sentence1))
sentence2 = pos_tag(word_tokenize(sentence2))
# Get the synsets for the tagged words
synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]
synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]
# Filter out the Nones
synsets1 = [ss for ss in synsets1 if ss]
synsets2 = [ss for ss in synsets2 if ss]
score, count = 0.0, 0
# For each word in the first sentence
for synset in synsets1:
# Get the similarity value of the most similar word in the other sentence
best_score = max([synset.path_similarity(ss) for ss in synsets2])
# Check that the similarity could have been computed
if best_score is not None:
score += best_score
count += 1
# Average the values
try:
score /= count
except:
pass
return score
# Text list to sent
def _map_sent(str_sent):
if '[' in str_sent:
return nltk.re.sub("[^a-zA-Z]", " ", str_sent[1:len(str_sent) - 1])
else:
return nltk.re.sub("[^a-zA-Z]", " ", str_sent)
# Text Indexing
def _text_idx(text):
sent_match = {}
for i in range(0, len(vd_df) - 1):
sent_prob = sentence_similarity(_map_sent(text), _map_sent(vd_df['words'][i]))
sent_match[1 - sent_prob] = {}
sent_match[1 - sent_prob]['img'] = vd_df['frame'][i]
return sent_match
| <filename>video_process.py<gh_stars>1-10
"""Class for training video for search and performing other operations."""
import nltk
import numpy as np
import cv2
import pandas as pd
import run_inference
from nltk.corpus import stopwords
from sklearn.metrics import mean_squared_error as mse
import random
import Config
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet as wn
# Video Data
vd_df = pd.read_csv(Config.vd_data+'/vd_data.csv')
len_vd_df = vd_df.shape[0]
class Video(object):
def __init__(self,
filenames,
frame_frequency,
audio_or_sub):
# Class Parameters
self.filenames = filenames.split(',') # Video Files
self.frame_frequency = frame_frequency # Number of frames to consider
self.audio_or_sub = audio_or_sub # With audio or subs
# Train Videos and Add Data to DataFrame
def train_videos(self):
global vd_df
global len_vd_df
for filename in self.filenames:
# init for video
vd_cap = cv2.VideoCapture(filename)
frame_count = 0
video_name = filename.split('/')[len(filename.split('/'))-1]
while vd_cap.isOpened():
ret, frame = vd_cap.read()
# Check for video
if ret:
frame_count = frame_count + 1
# For every n secs, 24 fps
curr_frame = random.randint(1, self.frame_frequency * 24) # Take a random frame between interval
if frame_count % curr_frame == 0:
frame_img_name = video_name + '_' + str(frame_count/24) + '.jpg'
frame_img_loc = Config.vd_data + '/frames/' + frame_img_name
cv2.imwrite(frame_img_loc, frame)
vd_df = vd_df.append({'frame': frame_img_name, 'video': video_name, 'time': frame_count/24, 'prob': 0, 'caps': 0, 'words': 0, 'tags': 0, 'subs': 0}, ignore_index=True)
else:
# List of All Video Frames
frame_list = [Config.vd_data+'/frames/' + x for x in list(vd_df['frame'][len_vd_df:,])]
# Store Show and Tell Model Captions and Prob
file_input = [Config.model_checkpoint, Config.model_vocab, ",".join(frame_list)]
prob, cap = run_inference.img_captions(file_input)
vd_df.iloc[len_vd_df:, vd_df.columns.get_loc("prob")] = prob
vd_df.iloc[len_vd_df:, vd_df.columns.get_loc("caps")] = cap
for i in range(0, len(cap)):
words = nltk.re.sub("[^a-zA-Z]", " ", str(cap[i]))
words = list(set(words.split(' ')))
stop = set(stopwords.words("english"))
rem_words = [w for w in words if not w in stop and len(w) > 2]
vd_df.iloc[len_vd_df+i, vd_df.columns.get_loc("words")] = str(rem_words)
# Update Final Changes to CSV
vd_df.to_csv(Config.vd_data + '/vd_data.csv', index=False)
len_vd_df = vd_df.shape[0] # Update Length
break
return "Training Completed"
# Image Frame Prob
def _map_frame_prob(str_frame_prob):
return map(float, str_frame_prob[1:len(str_frame_prob) - 1].split(', '))
# Frame Prob Indexing
def _frame_indexing(image_idx, prob, name):
image_matches = {}
image_match = None
if image_idx == -1:
frame_prob = prob
image_match = image_matches[name] = {}
else:
frame_prob = _map_frame_prob(vd_df['prob'][image_idx])
image_match = image_matches[vd_df['frame'][image_idx]] = {}
for i in range(0, len(vd_df) - 1):
mse_prob = mse(frame_prob, _map_frame_prob(vd_df['prob'][i + 1]))
image_match[mse_prob] = {}
image_match[mse_prob]['img'] = vd_df['frame'][i + 1]
return image_matches
# External Image Indexing
def _ext_img_idx(path):
file_input = [Config.model_checkpoint, Config.model_vocab, path]
prob, cap = run_inference.img_captions(file_input)
img_prob = _map_frame_prob(prob[0])
print cap
return _frame_indexing(-1, img_prob, path.split('/')[len(path.split('/'))-1])
# Sentence and Word Similarity
def penn_to_wn(tag):
""" Convert between a Penn Treebank tag to a simplified Wordnet tag """
if tag.startswith('N'):
return 'n'
if tag.startswith('V'):
return 'v'
if tag.startswith('J'):
return 'a'
if tag.startswith('R'):
return 'r'
return None
def tagged_to_synset(word, tag):
wn_tag = penn_to_wn(tag)
if wn_tag is None:
return None
try:
return wn.synsets(word, wn_tag)[0]
except:
return None
def sentence_similarity(sentence1, sentence2):
""" compute the sentence similarity using Wordnet """
# Tokenize and tag
sentence1 = pos_tag(word_tokenize(sentence1))
sentence2 = pos_tag(word_tokenize(sentence2))
# Get the synsets for the tagged words
synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]
synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]
# Filter out the Nones
synsets1 = [ss for ss in synsets1 if ss]
synsets2 = [ss for ss in synsets2 if ss]
score, count = 0.0, 0
# For each word in the first sentence
for synset in synsets1:
# Get the similarity value of the most similar word in the other sentence
best_score = max([synset.path_similarity(ss) for ss in synsets2])
# Check that the similarity could have been computed
if best_score is not None:
score += best_score
count += 1
# Average the values
try:
score /= count
except:
pass
return score
# Text list to sent
def _map_sent(str_sent):
if '[' in str_sent:
return nltk.re.sub("[^a-zA-Z]", " ", str_sent[1:len(str_sent) - 1])
else:
return nltk.re.sub("[^a-zA-Z]", " ", str_sent)
# Text Indexing
def _text_idx(text):
sent_match = {}
for i in range(0, len(vd_df) - 1):
sent_prob = sentence_similarity(_map_sent(text), _map_sent(vd_df['words'][i]))
sent_match[1 - sent_prob] = {}
sent_match[1 - sent_prob]['img'] = vd_df['frame'][i]
return sent_match
| en | 0.79028 | Class for training video for search and performing other operations. # Video Data # Class Parameters # Video Files # Number of frames to consider # With audio or subs # Train Videos and Add Data to DataFrame # init for video # Check for video # For every n secs, 24 fps # Take a random frame between interval # List of All Video Frames # Store Show and Tell Model Captions and Prob # Update Final Changes to CSV # Update Length # Image Frame Prob # Frame Prob Indexing # External Image Indexing # Sentence and Word Similarity Convert between a Penn Treebank tag to a simplified Wordnet tag compute the sentence similarity using Wordnet # Tokenize and tag # Get the synsets for the tagged words # Filter out the Nones # For each word in the first sentence # Get the similarity value of the most similar word in the other sentence # Check that the similarity could have been computed # Average the values # Text list to sent # Text Indexing | 2.73436 | 3 |
Sem3/Python/assignment4/5_palindrome.py | nsudhanva/mca-code | 0 | 6620748 | something = input('Enter a string: ')
def palindrome(something):
if something.isalpha():
something = str(something)
reverse_something = something[::-1]
if reverse_something == something:
return True
else:
return False
if palindrome(something):
print('Palindrome')
else:
print('Not a palindrome') | something = input('Enter a string: ')
def palindrome(something):
if something.isalpha():
something = str(something)
reverse_something = something[::-1]
if reverse_something == something:
return True
else:
return False
if palindrome(something):
print('Palindrome')
else:
print('Not a palindrome') | none | 1 | 4.289253 | 4 | |
package_generator/src/package_generator/generate_package.py | 5730289021-NN/ros_pkg_gen | 33 | 6620749 | #!/usr/bin/env python
"""
@package package_generator
@file package_generator.py
@author <NAME>
@brief given a template ros package structure,
generates the package according to the xml definition
Copyright (C) 2017 Tecnalia Research and Innovation
Distributed under the Apache 2.0 license.
"""
import os
import datetime
import shutil
import sys
import rospkg
from package_generator.code_generator import CodeGenerator
from package_generator.jinja_generator import JinjaGenerator
from package_generator.package_xml_parser import PackageXMLParser
from package_generator.file_update_management import GeneratedFileAnalysis
from package_generator.enhanced_object import EnhancedObject
from package_generator.template_spec import TemplateSpec
from termcolor import colored
class PackageGenerator(EnhancedObject):
"""Handle the genration of a whole package
Attributes:
file_generator_ (CodeGenerator): custom generator
jinja_generator_ (JinjaGenerator): generator based on jinja
package_path_ (str): base location of the package to create
path_pkg_backup_ (str): if the package already existed, location of the package backup
spec_ (TemplateSpec): configuration of the template model
template_path_ (str): path to the template to use
xml_parser_ (PackageXMLParser): parser of the package description
"""
def __init__(self, name="PackageGenerator"):
"""Intialisation of the object
Args:
name (str, optional): Name of the component, for printing aspect
"""
# call super class constructor
super(PackageGenerator, self).__init__(name)
# path to the template to use
self.template_path_ = None
# base location of the package to create
self.package_path_ = None
# parser of the package description
self.xml_parser_ = None
# config parameter provide with the template
self.spec_ = None
# generic file generator
self.file_generator_ = None
# jinja-based generator
self.jinja_generator_ = None
# if the package already existed, location of the package backup
self.path_pkg_backup_ = None
def check_template_structure(self, template_path):
"""Check a provided path refers to a valid template structure
Args:
template_path (str): path to the package template
Returns:
Bool: True if basic sanity checks are successful
"""
if not os.path.exists(template_path):
msg = "Template path ({}) is incorrect ".format(template_path)
self.log_error(msg)
return False
if not os.path.isdir(template_path):
msg = "Template path ({}) is not a directory ".format(template_path)
self.log_error(msg)
return False
# check if minimum information is present.
details = """A template should contain:
* config/dictionary.yaml : the dictionary to be used
* config/functions.py [optional] : additional functions used in the generation
* config/generator.py [optional] : generator list (custom, jinja) default is custom
* template/* set of elements to be generated
Revise the template, and compare to examples
"""
is_ok = True
# check for directories
required_folders = ["config", "template"]
for item in required_folders:
req_folder = template_path + "/" + item
if not os.path.isdir(req_folder):
msg_err = "Error \n Expecting to have folder " + item
msg_err += " in " + template_path
self.log_error(msg_err)
is_ok = False
# check for files
required_files = ["config/dictionary.yaml"]
for item in required_files:
req_file = template_path + "/" + item
if not os.path.isfile(req_file):
msg_err = "Error.\n Expecting to have file " + item
msg_err += " in " + template_path
self.log_error(msg_err)
is_ok = False
if not is_ok:
self.log_error("\n{}".format(details))
return False
return True
def get_template_info(self):
"""Get information about the available package templates
Returns:
list: tuple with [absolute package path, list of package names]
"""
rospack = rospkg.RosPack()
path_template = rospack.get_path('package_generator_templates')
path_template += "/templates/"
template_names = os.listdir(path_template)
return [path_template, template_names]
def generate_package(self, package_desc, output_path):
"""launches the package generation
Args:
package_desc (str): xml file containing the package description
output_path (str): directory into which the package is created
Returns:
Bool: True if the operation succeeded
"""
if not os.path.exists(output_path):
msg_err = "Incorrect desired package path ({})".format(output_path)
self.log_error(msg_err)
return False
if not os.path.isdir(output_path):
msg_err = "Desired package path ({}) not a directory ".format(output_path)
self.log_error(msg_err)
return False
# Initialising needed components
# todo bring it to the constructor?
self.spec_ = TemplateSpec()
self.xml_parser_ = PackageXMLParser()
self.file_generator_ = CodeGenerator()
self.jinja_generator_ = JinjaGenerator()
# Start finding the template
template = self.xml_parser_.get_template(package_desc)
if template is None:
return False
# Locate template location
try:
[all_template_path, template_names] = self.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
self.log_error(msg)
self.log_error(error)
return False
except OSError as error:
msg = "No template dounf in package_generator_templates"
self.log_error(msg)
self.log_error(error)
return False
if template not in template_names:
msg = "Template requested: {} unknown".format(template)
self.log_error(msg)
msg = "Available templates: {}".format(template_names)
self.log_error(msg)
return False
template_path = all_template_path + "/" + template
# confirm this is a template...
if not self.check_template_structure(template_path):
msg = "Please revise template structure"
self.log_error(msg)
return False
# template localized, ready to work!
self.template_path_ = template_path
self.path_pkg_backup_ = None
dir_template_spec = self.template_path_ + "/config/"
if not self.spec_.load_spec(dir_template_spec):
self.log_error("Could not load the template spec")
return False
if not self.xml_parser_.set_template_spec(self.spec_):
msg_err = "Package spec not compatible with xml parser expectations"
self.log_error(msg_err)
return False
if not self.xml_parser_.load(package_desc):
msg_err = "Prb while parsing xml file {}".format(package_desc)
self.log_error(msg_err)
return False
# todo why only the custom generator is configured?
if not self.file_generator_.configure(self.xml_parser_, self.spec_):
return False
package_name = self.xml_parser_.get_package_spec()["name"]
self.package_path_ = output_path + "/" + package_name
if os.path.exists(self.package_path_):
self.log_warn("Package {} already exists".format(self.package_path_))
# moving preexisting code.
# generating dir name using date
now = datetime.datetime.now()
str_date = now.strftime("%Y_%m_%d_%H_%M_%S")
self.path_pkg_backup_ = "/tmp/{}_{}".format(os.path.basename(self.package_path_), str_date)
self.log_warn("Original package temporally stored in {}".format(self.path_pkg_backup_))
# TODO check if the move succeeded
shutil.move(self.package_path_, self.path_pkg_backup_)
else:
self.log("Package to be created in {}".format(self.package_path_))
os.makedirs(self.package_path_)
nb_comp = self.xml_parser_.get_number_comps()
self.log("Number of components defined: {}".format(nb_comp))
if not self.generate_content():
return False
# we store the model into the directory model
path = self.package_path_ + "/model"
if not os.path.exists(path):
os.makedirs(path)
path += "/" + package_name + ".ros_package"
if self.xml_parser_.is_dependency_complete_:
try:
if os.path.abspath(package_desc) == os.path.abspath(path):
# self.log_warn("Using generated model...")
stored_desc = self.path_pkg_backup_ + "/model/" + package_name + ".ros_package"
# self.log("check {} is absolute: {}".format(package_desc, os.path.abspath(package_desc)))
shutil.copyfile(stored_desc, path)
else:
shutil.copyfile(package_desc, path)
self.log("Package model saved in: {}".format(path))
except IOError as error:
self.log_error("Could not store model file: {}".format(error))
else:
# some dependencies were automatically added
# model needs to be rewritten
try:
self.xml_parser_.write_xml(path)
self.log("Package model updated & saved in: {}".format(path))
except IOError as error:
self.log_error("Could not store model file: {}".format(error))
is_ok = self.handle_maintained_files()
return is_ok
def generate_one_file(self, template_file, result_file, force_write):
"""Generate a template file, depending on the generators to be used
Args:
template_file (str): template filename
result_file (str): filename to store the result (unless is None)
force_write (str): force the writting of empty files (if not, files is not written)
Returns:
Bool: True on success
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
if len(self.spec_.generators_) == 1:
return generator[self.spec_.generators_[0]].generate_disk_file(template_file,
result_file,
force_write)
# two generators are to be used
gen_one = generator[self.spec_.generators_[0]]
gen_two = generator[self.spec_.generators_[1]]
is_ok = gen_one.generate_disk_file(template_file)
if not is_ok:
return False
return gen_two.generate_open_file(gen_one.rendered_,
result_file,
force_write)
def check_template_file(self, template_file):
"""Generate a template file, depending on the generators to be used
Args:
template_file (str): template filename
result_file (str): filename to store the result (unless is None)
force_write (str): force the writting of empty files (if not, files is not written)
Returns:
Bool: True on success
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
if len(self.spec_.generators_) == 1:
# self.log("Check with Generator {}".format(self.spec_.generators_[0]))
return generator[self.spec_.generators_[0]].check_template_file(template_file)
# two generators are to be used
gen_one = generator[self.spec_.generators_[0]]
gen_two = generator[self.spec_.generators_[1]]
# self.log("Check with Generator {}".format(self.spec_.generators_[0]))
is_ok = gen_one.check_template_file(template_file)
if not is_ok:
return False
# self.log("Check with Generator {}".format(self.spec_.generators_[1]))
if self.spec_.generators_[1] == "jinja":
return gen_two.check_template_file(gen_one.rendered_, is_filename=False)
return gen_two.check_template_file(template_file)
def write_generated_file(self, result_file):
"""Write a generated file
Args:
result_file (str): filename to store the file.
Returns:
Bool: True on success
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
return generator[self.spec_.generators_[-1]].write_rendered_file(result_file)
def get_generated_file(self):
"""Get the generated files
Returns:
list: list of of each line of the generated file
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
return generator[self.spec_.generators_[-1]].rendered_
def set_generated_file(self, l_file):
"""set the generated file
Args:
l_file (list): list of of each line of the generated file
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
generator[self.spec_.generators_[-1]].rendered_ = l_file
def handle_maintained_files(self):
"""Restore file Developer requests to maintain
Assuming these patterns are defined in file .gen_maintain
Returns:
Bool: True on sucess
"""
# check for files to be maintained
if self.path_pkg_backup_ is None:
# package just created, no maintained file
return True
filename_rel = ".gen_maintain"
filename_abs = self.path_pkg_backup_ + "/" + filename_rel
if os.path.exists(filename_abs) and os.path.isfile(filename_abs):
self.log("Checking content to maintain after update")
else:
self.log("no maintained file defined in previous package version")
return True
with open(filename_abs) as open_file:
for line in open_file:
line = line.rstrip('\n')
if not line:
continue
path_abs = self.path_pkg_backup_ + "/" + line
if not os.path.exists(path_abs):
msg = "Content {} not found. Revise {} content"
self.log_error(msg.format(line, filename_abs))
continue
new_path = self.package_path_ + "/" + line
if os.path.isfile(path_abs):
try:
self.log("Restoring file {}".format(line))
# check if directories needs to be created
dirname = os.path.dirname(line)
# self.log("dirname is : {}".format(dirname))
if dirname:
path_abs_dir = self.package_path_ + "/" + dirname
if not os.path.isdir(path_abs_dir):
os.makedirs(path_abs_dir)
shutil.copyfile(path_abs, new_path)
except IOError as error:
msg = "Could not restore a file: {}"
self.log_error(msg.format(error))
continue
if os.path.isdir(path_abs):
try:
self.log("Restoring folder {}".format(line))
shutil.copytree(path_abs, new_path)
except IOError as error:
msg = "Could not restore folder: {}"
self.log_error(msg.format(error))
continue
self.log_error("Unkown statement {}".format(line))
# restoring the maintained content file
try:
self.log("Restoring file {}".format(filename_rel))
new_path = self.package_path_ + "/" + filename_rel
shutil.copyfile(filename_abs, new_path)
except IOError as error:
msg = "Could not restore file: {}"
self.log_error(msg.format(error))
return True
def handle_status_and_advise(self, input_file, output_file, gen_flag):
"""Depending on the file generation process outcome,
Adjust file status and inform user
Args:
input_file (str): path of the template file used
output_file (str): path of the generated file
gen_flag (Bool): Success of the generation process
Returns:
Bool: True on success of the file generation
"""
if not gen_flag:
msg = "Prb while generating file {}".format(output_file)
self.log_error(msg)
return False
# so the file generation went well
if self.file_generator_.get_len_gen_file() == 0:
# Only file __init__.py is kept empty
if os.path.basename(output_file) != '__init__.py':
msg = "File {} not written since empty".format(output_file)
self.log_warn(msg)
self.log_warn("Check: {}".format(os.path.basename(output_file)))
return True
# file has content
file_status = os.stat(input_file)
os.chmod(output_file, file_status.st_mode)
# self.log("File {} handled".format(input_file))
self.log("File handled")
self.log("*********************************")
return True
def generate_content(self):
"""Generation and storage of all content
Returns:
Bool -- True on success
"""
# Extracting all components from the template
file_list = list()
dir_list = list()
path_root_template = self.template_path_ + "/template"
for (root, dirs, files) in os.walk(path_root_template):
# print "check {}: dir {}, files: {}".format(root, dirs, files)
if os.path.samefile(root, path_root_template):
for item in files:
file_list.append(item)
for item in dirs:
dir_list.append(item)
else:
rel_path = os.path.relpath(root, path_root_template)
for item in files:
file_list.append(rel_path + "/" + item)
for item in dirs:
dir_list.append(rel_path + "/" + item)
# Looking at final directory and filenames
package_name = self.xml_parser_.get_package_spec()["name"]
nb_comp = self.xml_parser_.get_number_comps()
comps_name = [self.xml_parser_.data_comp_[id_comp]["attributes"]["name"] for id_comp in range(nb_comp)]
self.log("Generating all folders")
tmp = list()
for item in dir_list:
item = item.replace('package_name', package_name)
if 'component' in item:
for one_name in comps_name:
tmp.append(item.replace('component', one_name))
else:
tmp.append(item)
dir_list = tmp
for item in dir_list:
path_folder = self.package_path_ + "/" + item
if not os.path.exists(path_folder):
os.makedirs(path_folder)
generation_list = list()
# File preparation: storing [template filename, new filename, comp id]
for item in file_list:
new_item = item.replace('package_name', package_name)
if 'component' in item:
for num, one_name in enumerate(comps_name):
generation_list.append([item,
new_item.replace('component',
one_name),
num])
else:
# todo if no component active I should not set one
generation_list.append([item, new_item, 0])
is_ok = True
# self.log("\nFiles generation plan: ")
for item in generation_list:
[template_file, result_file, comp_id] = item
self.log("{} --> {}".format(template_file, result_file))
if not self.xml_parser_.set_active_comp(comp_id):
return False
# reconfiguring the generator to adjust to the new active component
# todo configure already called in generate_package function. Check why
if not self.file_generator_.configure(self.xml_parser_, self.spec_):
return False
if not self.jinja_generator_.configure(self.xml_parser_, self.spec_):
return False
# Normally an empty file should not be written
# The exception is currently only for the special python file __init__.py
is_write_forced = (os.path.basename(result_file) == '__init__.py')
result_file = self.package_path_ + "/" + result_file
template_file = self.template_path_ + '/template/' + template_file
if self.path_pkg_backup_ is None:
self.log("Generating file {}".format(result_file))
is_ok = self.generate_one_file(template_file,
result_file,
is_write_forced)
if self.handle_status_and_advise(template_file,
result_file,
is_ok):
continue
else:
return False
# A previous version of the package exists
# Checking if an update is necessary
rel_path = os.path.relpath(result_file, package_name)
previous_filename = os.path.join(self.path_pkg_backup_, rel_path)
# Check 1: does this file exist?
if not os.path.isfile(previous_filename):
msg = "File {} not previously existing. Just write it"
self.log_warn(msg.format(rel_path))
is_ok = self.generate_one_file(template_file,
result_file,
is_write_forced)
if self.handle_status_and_advise(template_file,
result_file,
is_ok):
continue
else:
return False
# File already existing. Processing previous version
is_update_needed = False
file_analyzor = GeneratedFileAnalysis()
is_ok = file_analyzor.extract_protected_region(previous_filename)
if is_ok:
# Check if Developer inserted any contribution
if file_analyzor.extracted_areas_:
# contribution found, merge needed
is_update_needed = True
else:
self.log("No Developer contribution found")
else:
msg = "prb while extracting protected area in {}"
self.log_error(msg.format(previous_filename))
self.log_error("Previous file to be manually merged, sorry")
# now we know if an update is needed
if is_ok and is_update_needed:
# self.log("Updating file {} in {}".format(rel_path, output_item))
self.log("Updating file {}".format(rel_path))
is_ok = self.generate_one_file(template_file, None, None)
if not is_ok:
return False
# todo handle this in case jinja is involved.
l_gen = self.get_generated_file()
if not l_gen:
msg = "New generated file empty. No code maintained from previous version"
self.log_warn(msg)
# we write it if forced
if is_write_forced:
is_ok = self.write_generated_file(result_file)
else:
self.log("Merging with previous version")
l_gen = file_analyzor.update_file(l_gen)
self.set_generated_file(l_gen)
is_ok = self.write_generated_file(result_file)
if self.handle_status_and_advise(template_file,
result_file,
is_ok):
continue
else:
return False
# Although the file existed before, we do not have to maintain it
is_ok = self.generate_one_file(template_file, result_file, is_write_forced)
if self.handle_status_and_advise(template_file, result_file, is_ok):
continue
else:
return False
return True
def template_sanity_check(self, template):
"""Perform the package sanity check
Returns:
Bool: True on success
"""
# Locate template location
try:
[all_template_path, template_names] = self.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
self.log_error(msg)
self.log_error(error)
return False
except OSError as error:
msg = "No template found in package_generator_templates"
self.log_error(msg)
self.log_error(error)
return False
is_template_found = False
template_path = None
if template in template_names:
is_template_found = True
template_path = all_template_path + "/" + template
else:
self.log("Could not find template {} in {}".format(template, all_template_path))
# check if the template provided is a relative path, and not a package in the repo
if os.path.isabs(template):
self.log("Loading template from absolute path {}".format(template))
is_template_found = True
template_path = template
else:
# relative path ?
template_path = os.getcwd() + "/" + template
if os.path.isdir(template_path):
self.log("Loading template from path {}".format(template_path))
is_template_found = True
if not is_template_found:
msg = "Template requested: {} unknown".format(template)
self.log_error(msg)
msg = "Available templates: {}".format(template_names)
self.log_error(msg)
return False
# confirm this is a template...
if not self.check_template_structure(template_path):
msg = "Please revise template structure"
self.log_error(msg)
return False
# TODO list number of files in template
# Extracting all components from the template
file_list = list()
dir_list = list()
path_root_template = template_path + "/template"
for (root, dirs, files) in os.walk(path_root_template):
# print "check {}: dir {}, files: {}".format(root, dirs, files)
if os.path.samefile(root, path_root_template):
for item in files:
file_list.append(item)
for item in dirs:
dir_list.append(item)
else:
rel_path = os.path.relpath(root, path_root_template)
for item in files:
file_list.append(rel_path + "/" + item)
for item in dirs:
dir_list.append(rel_path + "/" + item)
# print ("Dirs: ")
# print("\n".join(dir_list))
# print("Files: ")
# print("\n".join(file_list))
# setting the needed component.
self.spec_ = TemplateSpec()
self.xml_parser_ = PackageXMLParser()
self.file_generator_ = CodeGenerator()
self.jinja_generator_ = JinjaGenerator()
dir_template_spec = template_path + "/config/"
if not self.spec_.load_spec(dir_template_spec):
self.log_error("Could not load the template spec")
return False
if not self.xml_parser_.set_template_spec(self.spec_):
msg_err = "Package spec not compatible with xml parser expectations"
self.log_error(msg_err)
return False
if not self.xml_parser_.set_empty_spec():
msg_err = "Failed generating empty spec"
self.log_error(msg_err)
return False
if not self.file_generator_.configure(self.xml_parser_, self.spec_):
return False
if not self.jinja_generator_.configure(self.xml_parser_, self.spec_):
return False
is_ok = True
for item in file_list:
self.log("Checking file: {}".format(item))
item_abs = path_root_template + "/" + item
is_ok = self.check_template_file(item_abs)
if not is_ok:
break
if is_ok:
self.log("No error detected")
else:
self.log_error("Revise the template")
return is_ok
# todo complete the usage description with available templates
# and with existing commands
USAGE_GEN = """ usage: generate_package [package_spec]
package_spec: xml description of the component(s) interface
"""
def main():
"""
@brief Entry point of the package.
Generates a package, given a specified structure
@return nothing
Returns:
int: negative value on error
"""
gen = PackageGenerator()
if len(sys.argv) != 2:
print colored("Wrong input parameters !", "red")
print colored(USAGE_GEN, "yellow")
try:
[_, template_names] = gen.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
print colored(msg, 'red')
print colored(error, 'red')
return -1
except OSError as error:
msg = "No template found in package_generator_templates"
print colored(msg, 'red')
print colored(error, 'red')
return -1
msg = "Available templates are: {}"
print colored(msg.format(template_names), 'yellow')
print "Bye bye"
return -1
package_spec = sys.argv[1]
path_current = os.getcwd()
if not gen.generate_package(package_spec, path_current):
print colored("Prb while generating the package", "red")
return -1
else:
print colored("Package generated", "green")
print "Bye bye"
return 0
USAGE_CHECK = """ usage: check_template package_template
package_template: name of the template to check
Packages template: either one defined in package `package_generator_templates`,
either a path to a local one.
"""
def main_check():
"""
@brief Entry point of the package.
Check a template structure, as provided
Returns:
int: negative value on error
"""
gen = PackageGenerator()
if len(sys.argv) != 2:
print colored("Wrong input parameters !", "red")
print colored(USAGE_CHECK, "yellow")
try:
[_, template_names] = gen.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
print colored(msg, 'red')
print colored(error, 'red')
return -1
except OSError as error:
msg = "No template found in package_generator_templates"
print colored(msg, 'red')
print colored(error, 'red')
return -1
msg = "Available templates are: {}"
print colored(msg.format(template_names), 'yellow')
print "Bye bye"
return -1
template_name = sys.argv[1]
if not gen.template_sanity_check(template_name):
print colored("Issue detected in template", "red")
return -1
else:
print colored("No issue detected", "green")
print "Bye bye"
return 0
| #!/usr/bin/env python
"""
@package package_generator
@file package_generator.py
@author <NAME>
@brief given a template ros package structure,
generates the package according to the xml definition
Copyright (C) 2017 Tecnalia Research and Innovation
Distributed under the Apache 2.0 license.
"""
import os
import datetime
import shutil
import sys
import rospkg
from package_generator.code_generator import CodeGenerator
from package_generator.jinja_generator import JinjaGenerator
from package_generator.package_xml_parser import PackageXMLParser
from package_generator.file_update_management import GeneratedFileAnalysis
from package_generator.enhanced_object import EnhancedObject
from package_generator.template_spec import TemplateSpec
from termcolor import colored
class PackageGenerator(EnhancedObject):
"""Handle the genration of a whole package
Attributes:
file_generator_ (CodeGenerator): custom generator
jinja_generator_ (JinjaGenerator): generator based on jinja
package_path_ (str): base location of the package to create
path_pkg_backup_ (str): if the package already existed, location of the package backup
spec_ (TemplateSpec): configuration of the template model
template_path_ (str): path to the template to use
xml_parser_ (PackageXMLParser): parser of the package description
"""
def __init__(self, name="PackageGenerator"):
"""Intialisation of the object
Args:
name (str, optional): Name of the component, for printing aspect
"""
# call super class constructor
super(PackageGenerator, self).__init__(name)
# path to the template to use
self.template_path_ = None
# base location of the package to create
self.package_path_ = None
# parser of the package description
self.xml_parser_ = None
# config parameter provide with the template
self.spec_ = None
# generic file generator
self.file_generator_ = None
# jinja-based generator
self.jinja_generator_ = None
# if the package already existed, location of the package backup
self.path_pkg_backup_ = None
def check_template_structure(self, template_path):
"""Check a provided path refers to a valid template structure
Args:
template_path (str): path to the package template
Returns:
Bool: True if basic sanity checks are successful
"""
if not os.path.exists(template_path):
msg = "Template path ({}) is incorrect ".format(template_path)
self.log_error(msg)
return False
if not os.path.isdir(template_path):
msg = "Template path ({}) is not a directory ".format(template_path)
self.log_error(msg)
return False
# check if minimum information is present.
details = """A template should contain:
* config/dictionary.yaml : the dictionary to be used
* config/functions.py [optional] : additional functions used in the generation
* config/generator.py [optional] : generator list (custom, jinja) default is custom
* template/* set of elements to be generated
Revise the template, and compare to examples
"""
is_ok = True
# check for directories
required_folders = ["config", "template"]
for item in required_folders:
req_folder = template_path + "/" + item
if not os.path.isdir(req_folder):
msg_err = "Error \n Expecting to have folder " + item
msg_err += " in " + template_path
self.log_error(msg_err)
is_ok = False
# check for files
required_files = ["config/dictionary.yaml"]
for item in required_files:
req_file = template_path + "/" + item
if not os.path.isfile(req_file):
msg_err = "Error.\n Expecting to have file " + item
msg_err += " in " + template_path
self.log_error(msg_err)
is_ok = False
if not is_ok:
self.log_error("\n{}".format(details))
return False
return True
def get_template_info(self):
"""Get information about the available package templates
Returns:
list: tuple with [absolute package path, list of package names]
"""
rospack = rospkg.RosPack()
path_template = rospack.get_path('package_generator_templates')
path_template += "/templates/"
template_names = os.listdir(path_template)
return [path_template, template_names]
def generate_package(self, package_desc, output_path):
"""launches the package generation
Args:
package_desc (str): xml file containing the package description
output_path (str): directory into which the package is created
Returns:
Bool: True if the operation succeeded
"""
if not os.path.exists(output_path):
msg_err = "Incorrect desired package path ({})".format(output_path)
self.log_error(msg_err)
return False
if not os.path.isdir(output_path):
msg_err = "Desired package path ({}) not a directory ".format(output_path)
self.log_error(msg_err)
return False
# Initialising needed components
# todo bring it to the constructor?
self.spec_ = TemplateSpec()
self.xml_parser_ = PackageXMLParser()
self.file_generator_ = CodeGenerator()
self.jinja_generator_ = JinjaGenerator()
# Start finding the template
template = self.xml_parser_.get_template(package_desc)
if template is None:
return False
# Locate template location
try:
[all_template_path, template_names] = self.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
self.log_error(msg)
self.log_error(error)
return False
except OSError as error:
msg = "No template dounf in package_generator_templates"
self.log_error(msg)
self.log_error(error)
return False
if template not in template_names:
msg = "Template requested: {} unknown".format(template)
self.log_error(msg)
msg = "Available templates: {}".format(template_names)
self.log_error(msg)
return False
template_path = all_template_path + "/" + template
# confirm this is a template...
if not self.check_template_structure(template_path):
msg = "Please revise template structure"
self.log_error(msg)
return False
# template localized, ready to work!
self.template_path_ = template_path
self.path_pkg_backup_ = None
dir_template_spec = self.template_path_ + "/config/"
if not self.spec_.load_spec(dir_template_spec):
self.log_error("Could not load the template spec")
return False
if not self.xml_parser_.set_template_spec(self.spec_):
msg_err = "Package spec not compatible with xml parser expectations"
self.log_error(msg_err)
return False
if not self.xml_parser_.load(package_desc):
msg_err = "Prb while parsing xml file {}".format(package_desc)
self.log_error(msg_err)
return False
# todo why only the custom generator is configured?
if not self.file_generator_.configure(self.xml_parser_, self.spec_):
return False
package_name = self.xml_parser_.get_package_spec()["name"]
self.package_path_ = output_path + "/" + package_name
if os.path.exists(self.package_path_):
self.log_warn("Package {} already exists".format(self.package_path_))
# moving preexisting code.
# generating dir name using date
now = datetime.datetime.now()
str_date = now.strftime("%Y_%m_%d_%H_%M_%S")
self.path_pkg_backup_ = "/tmp/{}_{}".format(os.path.basename(self.package_path_), str_date)
self.log_warn("Original package temporally stored in {}".format(self.path_pkg_backup_))
# TODO check if the move succeeded
shutil.move(self.package_path_, self.path_pkg_backup_)
else:
self.log("Package to be created in {}".format(self.package_path_))
os.makedirs(self.package_path_)
nb_comp = self.xml_parser_.get_number_comps()
self.log("Number of components defined: {}".format(nb_comp))
if not self.generate_content():
return False
# we store the model into the directory model
path = self.package_path_ + "/model"
if not os.path.exists(path):
os.makedirs(path)
path += "/" + package_name + ".ros_package"
if self.xml_parser_.is_dependency_complete_:
try:
if os.path.abspath(package_desc) == os.path.abspath(path):
# self.log_warn("Using generated model...")
stored_desc = self.path_pkg_backup_ + "/model/" + package_name + ".ros_package"
# self.log("check {} is absolute: {}".format(package_desc, os.path.abspath(package_desc)))
shutil.copyfile(stored_desc, path)
else:
shutil.copyfile(package_desc, path)
self.log("Package model saved in: {}".format(path))
except IOError as error:
self.log_error("Could not store model file: {}".format(error))
else:
# some dependencies were automatically added
# model needs to be rewritten
try:
self.xml_parser_.write_xml(path)
self.log("Package model updated & saved in: {}".format(path))
except IOError as error:
self.log_error("Could not store model file: {}".format(error))
is_ok = self.handle_maintained_files()
return is_ok
def generate_one_file(self, template_file, result_file, force_write):
"""Generate a template file, depending on the generators to be used
Args:
template_file (str): template filename
result_file (str): filename to store the result (unless is None)
force_write (str): force the writting of empty files (if not, files is not written)
Returns:
Bool: True on success
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
if len(self.spec_.generators_) == 1:
return generator[self.spec_.generators_[0]].generate_disk_file(template_file,
result_file,
force_write)
# two generators are to be used
gen_one = generator[self.spec_.generators_[0]]
gen_two = generator[self.spec_.generators_[1]]
is_ok = gen_one.generate_disk_file(template_file)
if not is_ok:
return False
return gen_two.generate_open_file(gen_one.rendered_,
result_file,
force_write)
def check_template_file(self, template_file):
"""Generate a template file, depending on the generators to be used
Args:
template_file (str): template filename
result_file (str): filename to store the result (unless is None)
force_write (str): force the writting of empty files (if not, files is not written)
Returns:
Bool: True on success
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
if len(self.spec_.generators_) == 1:
# self.log("Check with Generator {}".format(self.spec_.generators_[0]))
return generator[self.spec_.generators_[0]].check_template_file(template_file)
# two generators are to be used
gen_one = generator[self.spec_.generators_[0]]
gen_two = generator[self.spec_.generators_[1]]
# self.log("Check with Generator {}".format(self.spec_.generators_[0]))
is_ok = gen_one.check_template_file(template_file)
if not is_ok:
return False
# self.log("Check with Generator {}".format(self.spec_.generators_[1]))
if self.spec_.generators_[1] == "jinja":
return gen_two.check_template_file(gen_one.rendered_, is_filename=False)
return gen_two.check_template_file(template_file)
def write_generated_file(self, result_file):
"""Write a generated file
Args:
result_file (str): filename to store the file.
Returns:
Bool: True on success
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
return generator[self.spec_.generators_[-1]].write_rendered_file(result_file)
def get_generated_file(self):
"""Get the generated files
Returns:
list: list of of each line of the generated file
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
return generator[self.spec_.generators_[-1]].rendered_
def set_generated_file(self, l_file):
"""set the generated file
Args:
l_file (list): list of of each line of the generated file
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
generator[self.spec_.generators_[-1]].rendered_ = l_file
def handle_maintained_files(self):
"""Restore file Developer requests to maintain
Assuming these patterns are defined in file .gen_maintain
Returns:
Bool: True on sucess
"""
# check for files to be maintained
if self.path_pkg_backup_ is None:
# package just created, no maintained file
return True
filename_rel = ".gen_maintain"
filename_abs = self.path_pkg_backup_ + "/" + filename_rel
if os.path.exists(filename_abs) and os.path.isfile(filename_abs):
self.log("Checking content to maintain after update")
else:
self.log("no maintained file defined in previous package version")
return True
with open(filename_abs) as open_file:
for line in open_file:
line = line.rstrip('\n')
if not line:
continue
path_abs = self.path_pkg_backup_ + "/" + line
if not os.path.exists(path_abs):
msg = "Content {} not found. Revise {} content"
self.log_error(msg.format(line, filename_abs))
continue
new_path = self.package_path_ + "/" + line
if os.path.isfile(path_abs):
try:
self.log("Restoring file {}".format(line))
# check if directories needs to be created
dirname = os.path.dirname(line)
# self.log("dirname is : {}".format(dirname))
if dirname:
path_abs_dir = self.package_path_ + "/" + dirname
if not os.path.isdir(path_abs_dir):
os.makedirs(path_abs_dir)
shutil.copyfile(path_abs, new_path)
except IOError as error:
msg = "Could not restore a file: {}"
self.log_error(msg.format(error))
continue
if os.path.isdir(path_abs):
try:
self.log("Restoring folder {}".format(line))
shutil.copytree(path_abs, new_path)
except IOError as error:
msg = "Could not restore folder: {}"
self.log_error(msg.format(error))
continue
self.log_error("Unkown statement {}".format(line))
# restoring the maintained content file
try:
self.log("Restoring file {}".format(filename_rel))
new_path = self.package_path_ + "/" + filename_rel
shutil.copyfile(filename_abs, new_path)
except IOError as error:
msg = "Could not restore file: {}"
self.log_error(msg.format(error))
return True
def handle_status_and_advise(self, input_file, output_file, gen_flag):
"""Depending on the file generation process outcome,
Adjust file status and inform user
Args:
input_file (str): path of the template file used
output_file (str): path of the generated file
gen_flag (Bool): Success of the generation process
Returns:
Bool: True on success of the file generation
"""
if not gen_flag:
msg = "Prb while generating file {}".format(output_file)
self.log_error(msg)
return False
# so the file generation went well
if self.file_generator_.get_len_gen_file() == 0:
# Only file __init__.py is kept empty
if os.path.basename(output_file) != '__init__.py':
msg = "File {} not written since empty".format(output_file)
self.log_warn(msg)
self.log_warn("Check: {}".format(os.path.basename(output_file)))
return True
# file has content
file_status = os.stat(input_file)
os.chmod(output_file, file_status.st_mode)
# self.log("File {} handled".format(input_file))
self.log("File handled")
self.log("*********************************")
return True
def generate_content(self):
"""Generation and storage of all content
Returns:
Bool -- True on success
"""
# Extracting all components from the template
file_list = list()
dir_list = list()
path_root_template = self.template_path_ + "/template"
for (root, dirs, files) in os.walk(path_root_template):
# print "check {}: dir {}, files: {}".format(root, dirs, files)
if os.path.samefile(root, path_root_template):
for item in files:
file_list.append(item)
for item in dirs:
dir_list.append(item)
else:
rel_path = os.path.relpath(root, path_root_template)
for item in files:
file_list.append(rel_path + "/" + item)
for item in dirs:
dir_list.append(rel_path + "/" + item)
# Looking at final directory and filenames
package_name = self.xml_parser_.get_package_spec()["name"]
nb_comp = self.xml_parser_.get_number_comps()
comps_name = [self.xml_parser_.data_comp_[id_comp]["attributes"]["name"] for id_comp in range(nb_comp)]
self.log("Generating all folders")
tmp = list()
for item in dir_list:
item = item.replace('package_name', package_name)
if 'component' in item:
for one_name in comps_name:
tmp.append(item.replace('component', one_name))
else:
tmp.append(item)
dir_list = tmp
for item in dir_list:
path_folder = self.package_path_ + "/" + item
if not os.path.exists(path_folder):
os.makedirs(path_folder)
generation_list = list()
# File preparation: storing [template filename, new filename, comp id]
for item in file_list:
new_item = item.replace('package_name', package_name)
if 'component' in item:
for num, one_name in enumerate(comps_name):
generation_list.append([item,
new_item.replace('component',
one_name),
num])
else:
# todo if no component active I should not set one
generation_list.append([item, new_item, 0])
is_ok = True
# self.log("\nFiles generation plan: ")
for item in generation_list:
[template_file, result_file, comp_id] = item
self.log("{} --> {}".format(template_file, result_file))
if not self.xml_parser_.set_active_comp(comp_id):
return False
# reconfiguring the generator to adjust to the new active component
# todo configure already called in generate_package function. Check why
if not self.file_generator_.configure(self.xml_parser_, self.spec_):
return False
if not self.jinja_generator_.configure(self.xml_parser_, self.spec_):
return False
# Normally an empty file should not be written
# The exception is currently only for the special python file __init__.py
is_write_forced = (os.path.basename(result_file) == '__init__.py')
result_file = self.package_path_ + "/" + result_file
template_file = self.template_path_ + '/template/' + template_file
if self.path_pkg_backup_ is None:
self.log("Generating file {}".format(result_file))
is_ok = self.generate_one_file(template_file,
result_file,
is_write_forced)
if self.handle_status_and_advise(template_file,
result_file,
is_ok):
continue
else:
return False
# A previous version of the package exists
# Checking if an update is necessary
rel_path = os.path.relpath(result_file, package_name)
previous_filename = os.path.join(self.path_pkg_backup_, rel_path)
# Check 1: does this file exist?
if not os.path.isfile(previous_filename):
msg = "File {} not previously existing. Just write it"
self.log_warn(msg.format(rel_path))
is_ok = self.generate_one_file(template_file,
result_file,
is_write_forced)
if self.handle_status_and_advise(template_file,
result_file,
is_ok):
continue
else:
return False
# File already existing. Processing previous version
is_update_needed = False
file_analyzor = GeneratedFileAnalysis()
is_ok = file_analyzor.extract_protected_region(previous_filename)
if is_ok:
# Check if Developer inserted any contribution
if file_analyzor.extracted_areas_:
# contribution found, merge needed
is_update_needed = True
else:
self.log("No Developer contribution found")
else:
msg = "prb while extracting protected area in {}"
self.log_error(msg.format(previous_filename))
self.log_error("Previous file to be manually merged, sorry")
# now we know if an update is needed
if is_ok and is_update_needed:
# self.log("Updating file {} in {}".format(rel_path, output_item))
self.log("Updating file {}".format(rel_path))
is_ok = self.generate_one_file(template_file, None, None)
if not is_ok:
return False
# todo handle this in case jinja is involved.
l_gen = self.get_generated_file()
if not l_gen:
msg = "New generated file empty. No code maintained from previous version"
self.log_warn(msg)
# we write it if forced
if is_write_forced:
is_ok = self.write_generated_file(result_file)
else:
self.log("Merging with previous version")
l_gen = file_analyzor.update_file(l_gen)
self.set_generated_file(l_gen)
is_ok = self.write_generated_file(result_file)
if self.handle_status_and_advise(template_file,
result_file,
is_ok):
continue
else:
return False
# Although the file existed before, we do not have to maintain it
is_ok = self.generate_one_file(template_file, result_file, is_write_forced)
if self.handle_status_and_advise(template_file, result_file, is_ok):
continue
else:
return False
return True
def template_sanity_check(self, template):
"""Perform the package sanity check
Returns:
Bool: True on success
"""
# Locate template location
try:
[all_template_path, template_names] = self.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
self.log_error(msg)
self.log_error(error)
return False
except OSError as error:
msg = "No template found in package_generator_templates"
self.log_error(msg)
self.log_error(error)
return False
is_template_found = False
template_path = None
if template in template_names:
is_template_found = True
template_path = all_template_path + "/" + template
else:
self.log("Could not find template {} in {}".format(template, all_template_path))
# check if the template provided is a relative path, and not a package in the repo
if os.path.isabs(template):
self.log("Loading template from absolute path {}".format(template))
is_template_found = True
template_path = template
else:
# relative path ?
template_path = os.getcwd() + "/" + template
if os.path.isdir(template_path):
self.log("Loading template from path {}".format(template_path))
is_template_found = True
if not is_template_found:
msg = "Template requested: {} unknown".format(template)
self.log_error(msg)
msg = "Available templates: {}".format(template_names)
self.log_error(msg)
return False
# confirm this is a template...
if not self.check_template_structure(template_path):
msg = "Please revise template structure"
self.log_error(msg)
return False
# TODO list number of files in template
# Extracting all components from the template
file_list = list()
dir_list = list()
path_root_template = template_path + "/template"
for (root, dirs, files) in os.walk(path_root_template):
# print "check {}: dir {}, files: {}".format(root, dirs, files)
if os.path.samefile(root, path_root_template):
for item in files:
file_list.append(item)
for item in dirs:
dir_list.append(item)
else:
rel_path = os.path.relpath(root, path_root_template)
for item in files:
file_list.append(rel_path + "/" + item)
for item in dirs:
dir_list.append(rel_path + "/" + item)
# print ("Dirs: ")
# print("\n".join(dir_list))
# print("Files: ")
# print("\n".join(file_list))
# setting the needed component.
self.spec_ = TemplateSpec()
self.xml_parser_ = PackageXMLParser()
self.file_generator_ = CodeGenerator()
self.jinja_generator_ = JinjaGenerator()
dir_template_spec = template_path + "/config/"
if not self.spec_.load_spec(dir_template_spec):
self.log_error("Could not load the template spec")
return False
if not self.xml_parser_.set_template_spec(self.spec_):
msg_err = "Package spec not compatible with xml parser expectations"
self.log_error(msg_err)
return False
if not self.xml_parser_.set_empty_spec():
msg_err = "Failed generating empty spec"
self.log_error(msg_err)
return False
if not self.file_generator_.configure(self.xml_parser_, self.spec_):
return False
if not self.jinja_generator_.configure(self.xml_parser_, self.spec_):
return False
is_ok = True
for item in file_list:
self.log("Checking file: {}".format(item))
item_abs = path_root_template + "/" + item
is_ok = self.check_template_file(item_abs)
if not is_ok:
break
if is_ok:
self.log("No error detected")
else:
self.log_error("Revise the template")
return is_ok
# todo complete the usage description with available templates
# and with existing commands
USAGE_GEN = """ usage: generate_package [package_spec]
package_spec: xml description of the component(s) interface
"""
def main():
"""
@brief Entry point of the package.
Generates a package, given a specified structure
@return nothing
Returns:
int: negative value on error
"""
gen = PackageGenerator()
if len(sys.argv) != 2:
print colored("Wrong input parameters !", "red")
print colored(USAGE_GEN, "yellow")
try:
[_, template_names] = gen.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
print colored(msg, 'red')
print colored(error, 'red')
return -1
except OSError as error:
msg = "No template found in package_generator_templates"
print colored(msg, 'red')
print colored(error, 'red')
return -1
msg = "Available templates are: {}"
print colored(msg.format(template_names), 'yellow')
print "Bye bye"
return -1
package_spec = sys.argv[1]
path_current = os.getcwd()
if not gen.generate_package(package_spec, path_current):
print colored("Prb while generating the package", "red")
return -1
else:
print colored("Package generated", "green")
print "Bye bye"
return 0
USAGE_CHECK = """ usage: check_template package_template
package_template: name of the template to check
Packages template: either one defined in package `package_generator_templates`,
either a path to a local one.
"""
def main_check():
"""
@brief Entry point of the package.
Check a template structure, as provided
Returns:
int: negative value on error
"""
gen = PackageGenerator()
if len(sys.argv) != 2:
print colored("Wrong input parameters !", "red")
print colored(USAGE_CHECK, "yellow")
try:
[_, template_names] = gen.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
print colored(msg, 'red')
print colored(error, 'red')
return -1
except OSError as error:
msg = "No template found in package_generator_templates"
print colored(msg, 'red')
print colored(error, 'red')
return -1
msg = "Available templates are: {}"
print colored(msg.format(template_names), 'yellow')
print "Bye bye"
return -1
template_name = sys.argv[1]
if not gen.template_sanity_check(template_name):
print colored("Issue detected in template", "red")
return -1
else:
print colored("No issue detected", "green")
print "Bye bye"
return 0
| en | 0.671226 | #!/usr/bin/env python @package package_generator @file package_generator.py @author <NAME> @brief given a template ros package structure, generates the package according to the xml definition Copyright (C) 2017 Tecnalia Research and Innovation Distributed under the Apache 2.0 license. Handle the genration of a whole package Attributes: file_generator_ (CodeGenerator): custom generator jinja_generator_ (JinjaGenerator): generator based on jinja package_path_ (str): base location of the package to create path_pkg_backup_ (str): if the package already existed, location of the package backup spec_ (TemplateSpec): configuration of the template model template_path_ (str): path to the template to use xml_parser_ (PackageXMLParser): parser of the package description Intialisation of the object Args: name (str, optional): Name of the component, for printing aspect # call super class constructor # path to the template to use # base location of the package to create # parser of the package description # config parameter provide with the template # generic file generator # jinja-based generator # if the package already existed, location of the package backup Check a provided path refers to a valid template structure Args: template_path (str): path to the package template Returns: Bool: True if basic sanity checks are successful # check if minimum information is present. A template should contain: * config/dictionary.yaml : the dictionary to be used * config/functions.py [optional] : additional functions used in the generation * config/generator.py [optional] : generator list (custom, jinja) default is custom * template/* set of elements to be generated Revise the template, and compare to examples # check for directories # check for files Get information about the available package templates Returns: list: tuple with [absolute package path, list of package names] launches the package generation Args: package_desc (str): xml file containing the package description output_path (str): directory into which the package is created Returns: Bool: True if the operation succeeded # Initialising needed components # todo bring it to the constructor? # Start finding the template # Locate template location # confirm this is a template... # template localized, ready to work! # todo why only the custom generator is configured? # moving preexisting code. # generating dir name using date # TODO check if the move succeeded # we store the model into the directory model # self.log_warn("Using generated model...") # self.log("check {} is absolute: {}".format(package_desc, os.path.abspath(package_desc))) # some dependencies were automatically added # model needs to be rewritten Generate a template file, depending on the generators to be used Args: template_file (str): template filename result_file (str): filename to store the result (unless is None) force_write (str): force the writting of empty files (if not, files is not written) Returns: Bool: True on success # two generators are to be used Generate a template file, depending on the generators to be used Args: template_file (str): template filename result_file (str): filename to store the result (unless is None) force_write (str): force the writting of empty files (if not, files is not written) Returns: Bool: True on success # self.log("Check with Generator {}".format(self.spec_.generators_[0])) # two generators are to be used # self.log("Check with Generator {}".format(self.spec_.generators_[0])) # self.log("Check with Generator {}".format(self.spec_.generators_[1])) Write a generated file Args: result_file (str): filename to store the file. Returns: Bool: True on success Get the generated files Returns: list: list of of each line of the generated file set the generated file Args: l_file (list): list of of each line of the generated file Restore file Developer requests to maintain Assuming these patterns are defined in file .gen_maintain Returns: Bool: True on sucess # check for files to be maintained # package just created, no maintained file # check if directories needs to be created # self.log("dirname is : {}".format(dirname)) # restoring the maintained content file Depending on the file generation process outcome, Adjust file status and inform user Args: input_file (str): path of the template file used output_file (str): path of the generated file gen_flag (Bool): Success of the generation process Returns: Bool: True on success of the file generation # so the file generation went well # Only file __init__.py is kept empty # file has content # self.log("File {} handled".format(input_file)) Generation and storage of all content Returns: Bool -- True on success # Extracting all components from the template # print "check {}: dir {}, files: {}".format(root, dirs, files) # Looking at final directory and filenames # File preparation: storing [template filename, new filename, comp id] # todo if no component active I should not set one # self.log("\nFiles generation plan: ") # reconfiguring the generator to adjust to the new active component # todo configure already called in generate_package function. Check why # Normally an empty file should not be written # The exception is currently only for the special python file __init__.py # A previous version of the package exists # Checking if an update is necessary # Check 1: does this file exist? # File already existing. Processing previous version # Check if Developer inserted any contribution # contribution found, merge needed # now we know if an update is needed # self.log("Updating file {} in {}".format(rel_path, output_item)) # todo handle this in case jinja is involved. # we write it if forced # Although the file existed before, we do not have to maintain it Perform the package sanity check Returns: Bool: True on success # Locate template location # check if the template provided is a relative path, and not a package in the repo # relative path ? # confirm this is a template... # TODO list number of files in template # Extracting all components from the template # print "check {}: dir {}, files: {}".format(root, dirs, files) # print ("Dirs: ") # print("\n".join(dir_list)) # print("Files: ") # print("\n".join(file_list)) # setting the needed component. # todo complete the usage description with available templates # and with existing commands usage: generate_package [package_spec] package_spec: xml description of the component(s) interface @brief Entry point of the package. Generates a package, given a specified structure @return nothing Returns: int: negative value on error usage: check_template package_template package_template: name of the template to check Packages template: either one defined in package `package_generator_templates`, either a path to a local one. @brief Entry point of the package. Check a template structure, as provided Returns: int: negative value on error | 2.599793 | 3 |
setup.py | nanvel/yuriko | 0 | 6620750 | <reponame>nanvel/yuriko
"""
For testing:
python setup.py install
Upload to PyPI:
python setup.py bdist_wheel --universal
python setup.py sdist
twine upload dist/*
"""
import os
from setuptools import find_packages, setup
def read(file_name):
try:
return open(os.path.join(os.path.dirname(__file__), file_name)).read()
except IOError:
return ''
setup(
name="yuriko",
version='0.0.4',
description="Encrypted notes",
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='encrypted, notes',
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/nanvel/yuriko",
packages=find_packages(),
install_requires=[
'PyCryptodome'
],
entry_points={
'console_scripts': [
'yuriko = yuriko.main:main'
]
}
)
| """
For testing:
python setup.py install
Upload to PyPI:
python setup.py bdist_wheel --universal
python setup.py sdist
twine upload dist/*
"""
import os
from setuptools import find_packages, setup
def read(file_name):
try:
return open(os.path.join(os.path.dirname(__file__), file_name)).read()
except IOError:
return ''
setup(
name="yuriko",
version='0.0.4',
description="Encrypted notes",
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='encrypted, notes',
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/nanvel/yuriko",
packages=find_packages(),
install_requires=[
'PyCryptodome'
],
entry_points={
'console_scripts': [
'yuriko = yuriko.main:main'
]
}
) | en | 0.638718 | For testing: python setup.py install Upload to PyPI: python setup.py bdist_wheel --universal python setup.py sdist twine upload dist/* | 1.734911 | 2 |