text stringlengths 957 885k |
|---|
#
# MazeGame v 1.0.0
# {Server side version}
# Python version
#
# SmartFoxServer PRO example file
#
# (c) 2005 - 2006 gotoAndPlay()
#
# number of players in the room
numPlayers = 0
# associative array of users in the room
users = {}
# flag that handles if the game is started
gameStarted = False
# id of the current room
currentRoomId = -1
# id of Player1
p1id = -1
# id of Player 2
p2id = -1
# Extension initialization
def init():
pass
# Extension destroy
def destroy():
pass
# Handle client request
def handleRequest(cmd, params, user, fromRoom, protocol):
if protocol == "str":
if cmd == "mv":
handleMove(params, user)
# Handle server event
def handleInternalEvent(evt):
global currentRoomId, numPlayers, p1id, p2id, gameStarted
evtName = evt.getEventName()
# Handle a user joining the room
if evtName == "userJoin":
# get the id of the current room
if currentRoomId == -1:
currentRoomId = evt.getObject("room").getId()
# Get the user object
u = evt.getObject("user")
# add this user to our list of local users in this game room
# We use the userId number as the key
users[u.getUserId()] = u
# Increase the number of players
numPlayers += 1
if u.getPlayerIndex() == 1:
p1id = u.getUserId()
else:
p2id = u.getUserId()
# If we have two players and the game was not started yet
# it's time to start it now!
if numPlayers == 2 and gameStarted == False:
startGame()
# Handle a user leaving the room or a user disconnection
elif evtName == "userExit" or evtName == "userLost":
# get the user id
uId = int(evt.getParam("uid"))
# get the playerId of the user that left the room
if evtName == "userExit":
oldPid = int(evt.getParam("oldPlayerIndex"))
# get the playerId of the user that disconnected
elif evtName == "userLost":
pids = evt.getObject("playerIndexes")
oldPid = pids[0]
u = users[uId]
# let's remove the player from the list
del users[uId]
numPlayers -= 1
# game stops
gameStarted = False
# if one player is still in the room let's update him
if numPlayers > 0:
res = {}
res["_cmd"] = "stop"
res["n"] = u.getName()
_server.sendResponse(res, currentRoomId, None, users.values())
# Game starts: send a message to both players
def startGame():
global gameStarted
gameStarted = True
res = {}
res["_cmd"] = "start"
res["p1"] = {"id":p1id, "name":users[p1id].getName(), "x":1, "y":1}
res["p2"] = {"id":p2id, "name":users[p2id].getName(), "x":22, "y":10}
_server.sendResponse(res, currentRoomId, None, users.values())
# Handle the player move and broadcast it to the other player
def handleMove(params, user):
if gameStarted:
res = []
res.append("mv")
res.append(params[0])
res.append(params[1])
uid = user.getUserId()
if uid == p1id:
recipient = users[p2id]
else:
recipient = users[p1id]
_server.sendResponse(res, currentRoomId, user, [recipient], _server.PROTOCOL_STR)
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Controls tests."""
# pylint: disable=redefined-outer-name
import copy
import pytest
from lib import base, browsers, factory, url
from lib.constants import element, objects
from lib.entities import entity
from lib.service import webui_service, webui_facade
@pytest.fixture()
def controls_service(selenium):
"""Controls service fixture."""
return webui_service.ControlsService(selenium)
class TestControls(base.Test):
"""Tests for Controls functionality."""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
# pylint: disable=unused-argument
def test_user_cannot_edit_or_del_control_from_info_page(self, control,
controls_service):
"""Confirm that user cannot edit or delete Control from info page."""
three_bbs = controls_service.open_info_page_of_obj(control).three_bbs
expected_options = {"can_edit": False,
"can_delete": False}
actual_options = {"can_edit": three_bbs.edit_option.exists,
"can_delete": three_bbs.delete_option.exists}
assert actual_options == expected_options
def test_user_cannot_edit_control_from_tree_view(self, control,
dashboard_controls_tab):
"""Confirm that user cannot edit Control from tree view."""
assert not dashboard_controls_tab.get_control(control).is_editable, (
"Edit option should not be available for Control in tree view")
def test_user_cannot_edit_or_del_control_from_gl_search(self, control,
header_dashboard):
"""Confirm that user cannot edit or delete Control from global search."""
three_bbs = (header_dashboard.open_global_search().search_obj(control).
get_three_bbs(control.type))
actual_options = {"can_edit": three_bbs.edit_option.exists,
"can_delete": three_bbs.delete_option.exists}
expected_options = {"can_edit": False,
"can_delete": False}
assert expected_options == actual_options
def test_cannot_make_and_view_proposals_for_control(self, control,
soft_assert, selenium):
"""Confirm that user cannot make and view Proposals for Control."""
info_page = webui_service.ControlsService(
selenium).open_info_page_of_obj(control)
webui_facade.soft_assert_cannot_make_proposal(info_page, soft_assert)
webui_facade.soft_assert_cannot_view_proposals(info_page, soft_assert)
soft_assert.assert_expectations()
def test_cannot_restore_disabled_object_version(self, control, soft_assert,
selenium):
"""Confirm that user cannot restore disabled object's version."""
webui_facade.soft_assert_cannot_view_version_history(
control, soft_assert, selenium)
soft_assert.assert_expectations()
def test_object_export(self, control, create_tmp_dir, selenium):
"""Confirm that object can be exported and exported data is correct."""
actual_objects = webui_facade.export_objects(
path_to_export_dir=create_tmp_dir,
obj_type=control.type)
self.general_contain_assert(
control.repr_ui(), actual_objects,
*entity.Representation.tree_view_attrs_to_exclude)
def test_user_cannot_add_person_to_custom_role(self, control,
controls_service):
"""Tests that user cannot add a person to custom Role."""
expected_conditions = {"add_person_text_field_exists": False,
"same_url_for_new_tab": True}
actual_conditions = copy.deepcopy(expected_conditions)
widget = controls_service.open_info_page_of_obj(control)
widget.control_owners.inline_edit.open()
actual_conditions["add_person_text_field_exists"] = (
widget.control_owners.add_person_text_field.exists)
old_tab, new_tab = browsers.get_browser().windows()
actual_conditions["same_url_for_new_tab"] = (old_tab.url == new_tab.url)
assert expected_conditions == actual_conditions
def test_user_cannot_update_custom_attribute(
self, gcads_for_control, control, controls_service
):
"""Tests that user cannot update custom attribute."""
expected_conditions = {"same_url_for_new_tab": True,
"controls_ca_editable": False}
actual_conditions = copy.deepcopy(expected_conditions)
actual_conditions["controls_ca_editable"] = (
controls_service.has_gca_inline_edit(
control, ca_type=element.AdminWidgetCustomAttributes.RICH_TEXT))
old_tab, new_tab = browsers.get_browser().windows()
actual_conditions["same_url_for_new_tab"] = (old_tab.url == new_tab.url)
assert expected_conditions == actual_conditions
def test_user_cannot_update_predefined_field(self, control, selenium):
"""Tests that user cannot update predefined field."""
expected_conditions = {"predefined_field_updatable": False,
"same_url_for_new_tab": True}
actual_conditions = copy.deepcopy(expected_conditions)
info_widget = webui_service.ControlsService(
selenium).open_info_page_of_obj(control)
info_widget.assertions.open_inline_edit()
actual_conditions[
"predefined_field_updatable"] = info_widget.assertions.input.exists
old_tab, new_tab = browsers.get_browser().windows()
actual_conditions["same_url_for_new_tab"] = (old_tab.url == new_tab.url)
assert expected_conditions == actual_conditions
@pytest.mark.parametrize(
'obj', ["product_mapped_to_control", "standard_mapped_to_control"],
indirect=True)
def test_cannot_unmap_control(self, control, obj, selenium):
"""Checks that user cannot unmap Control from Scope Objects/Directives and
new tab opens."""
webui_service.ControlsService(selenium).open_info_panel_of_mapped_obj(
obj, control).three_bbs.select_unmap_in_new_frontend()
old_tab, new_tab = browsers.get_browser().windows()
expected_url = old_tab.url.replace(url.Widget.CONTROLS, url.Widget.INFO)
assert new_tab.url == expected_url
def test_review_details_for_disabled_obj(self, control, controls_service):
"""Check that new browser tab is displayed after clicking Review
Details button for objects disabled in GGRC."""
controls_service.open_info_page_of_obj(
control).click_ctrl_review_details_btn()
old_tab, new_tab = browsers.get_browser().windows()
assert old_tab.url == new_tab.url
def test_deprecated_obj_review_buttons(self, control, soft_assert, selenium):
"""Check that buttons 'Mark Reviewed' and 'Request Review' are not
displayed at Control Info page."""
info_page = factory.get_cls_webui_service(objects.get_plural(
control.type))().open_info_page_of_obj(control)
soft_assert.expect(not info_page.mark_reviewed_btn.exists,
"There should be no 'Mark Reviewed' button.")
soft_assert.expect(not info_page.request_review_btn.exists,
"There should be no 'Request Review button.")
soft_assert.assert_expectations()
|
from ..kernel import core
from ..kernel.core import VSkillModifier as V
from ..character import characterKernel as ck
from functools import partial
from ..status.ability import Ability_tool
from . import globalSkill
from .jobbranch import warriors
from .jobclass import demon
from math import ceil
### 데몬어벤져 직업 코드 (작성중)
# TODO: 스킬별 딜레이 추가, 5차 강화값 적용, 딜사이클
###### Passive Skill ######
class JobGenerator(ck.JobGenerator):
def __init__(self):
super(JobGenerator, self).__init__()
self.jobtype = "str"
self.jobname = "데몬어벤져"
self.vEnhanceNum = 12
# 쓸샾, 쓸뻥, 쓸오더(아직 미구현)
self.preEmptiveSkills = 3
self.ability_list = Ability_tool.get_ability_set('reuse', 'crit', 'boss_pdamage')
def get_modifier_optimization_hint(self):
return core.CharacterModifier(armor_ignore = 20)
def get_passive_skill_list(self, vEhc, chtr : ck.AbstractCharacter):
passive_level = chtr.get_base_modifier().passive_level + self.combat
#TODO: 블러드 컨트랙트, 컨버전 스타포스
#와일드레이지 : 데미지10%, 링크에 반영되므로 미고려.
#주스탯 미반영, 추가바람.
AbyssalRage = core.InformedCharacterModifier("어비셜 레이지", att=40)
AdvancedDesperadoMastery = core.InformedCharacterModifier("어드밴스드 데스페라도 마스터리",att = 50 + passive_level, crit_damage = 8)
OverwhelmingPower = core.InformedCharacterModifier("오버휄밍 파워", pdamage=30 + passive_level)
DefenseExpertise = core.InformedCharacterModifier("디펜스 엑스퍼타이즈", armor_ignore = 30 + passive_level)
DemonicSharpness = core.InformedCharacterModifier("데모닉 샤프니스", crit=20)
# 메용: 체력+15%로 수정
MapleHeroesDemon = core.InformedCharacterModifier("메이플 용사(데몬어벤져)", pstat_main = 15 + self.combat / 2)
# 최종데미지 (릴리즈 오버로드, 데몬 프렌지)
InnerStrength = core.InformedCharacterModifier("이너 스트렝스", stat_main = 600)
DiabolicRecovery = core.InformedCharacterModifier("디아볼릭 리커버리", pstat_main=25)
HP_RATE = 100
#최대 HP 대비 소모된 HP 3%(24레벨가지는 4%)당 최종 데미지 1% 증가
FrenzyPassive = core.InformedCharacterModifier("데몬 프렌지 (최종 데미지)", pdamage_indep = (100 - HP_RATE) // (4 - (vEhc.getV(0, 0) // 25)))
RUIN_USE = False
if RUIN_USE:
# 극한 HP = 800(600+200), 루인 HP = 560
RuinForceShield = core.InformedCharacterModifier("루인 포스실드", stat_main = -240, stat_sub = -2, pdamage_indep = 10)
Reboot = core.InformedCharacterModifier("리부트", att = 5, pdamage = 127)
return [AbyssalRage, AdvancedDesperadoMastery, OverwhelmingPower, DefenseExpertise, DemonicSharpness, MapleHeroesDemon, InnerStrength, DiabolicRecovery, FrenzyPassive, Reboot]
def get_not_implied_skill_list(self, vEhc, chtr : ck.AbstractCharacter):
passive_level = chtr.get_base_modifier().passive_level + self.combat
WeaponConstant = core.InformedCharacterModifier("무기상수", pdamage_indep = 30)
Mastery = core.InformedCharacterModifier("숙련도",pdamage_indep = -5 + 0.5*ceil(passive_level/2))
return [WeaponConstant, Mastery]
def generate(self, vEhc, chtr : ck.AbstractCharacter):
'''
코강 순서: 익시드 엑스큐션, 실드 체이싱 -> 문라이트 슬래시(사용하지 않음)
'''
'''
하이퍼: 익시드 3종, 실드 체이싱 리인포스, 엑스트라 타겟 적용
TODO:
오라 웨폰 - 작성 필요
데몬 프렌지 - DPM 기준을 어떻게 할것인지?
블러드 피스트 - 작성 필요
디멘션 소드 - 작성 필요
'''
#V코어 값은 전면 재작성 필요
# TODO: OptionalElement로 변경해야 함
passive_level = chtr.get_base_modifier().passive_level + self.combat
# 익시드 0~3스택: 딜레이 900, 900, 840, 780
Execution_0 = core.DamageSkill("익시드: 엑스큐션 (0스택)", 660, 540+8*self.combat, 4, modifier = core.CharacterModifier(armor_ignore = 30 + self.combat, pdamage = 20 + 20)).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
Execution_1 = core.DamageSkill("익시드: 엑스큐션 (1스택)", 660, 540+8*self.combat, 4, modifier = core.CharacterModifier(armor_ignore = 30 + self.combat, pdamage = 20 + 20)).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
Execution_2 = core.DamageSkill("익시드: 엑스큐션 (2스택)", 630, 540+8*self.combat, 4, modifier = core.CharacterModifier(armor_ignore = 30 + self.combat, pdamage = 20 + 20)).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
Execution_3 = core.DamageSkill("익시드: 엑스큐션 (3스택)", 570, 540+8*self.combat, 4, modifier = core.CharacterModifier(armor_ignore = 30 + self.combat, pdamage = 20 + 20)).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
# 익시드 5스택 이상
ExecutionExceed = core.DamageSkill("익시드: 엑스큐션 (강화)", 540, 540+8*self.combat, 6, modifier = core.CharacterModifier(armor_ignore = 30 + self.combat, pdamage = 20 + 20)).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
# 최대 10회 공격
ShieldChasing = core.DamageSkill("실드 체이싱", 540, 500 + 10 * self.combat, 2*2*(8+2), cooltime = 6000, modifier = core.CharacterModifier(armor_ignore = 30, pdamage=20+20), red = True).isV(vEhc,1,1).wrap(core.SummonSkillWrapper)
ArmorBreak = core.DamageSkill("아머 브레이크", 0, 350 + 5 * self.combat, 4, cooltime = (30+self.combat)*1000).setV(vEhc, 1, 2, True).wrap(core.DamageSkillWrapper)
ArmorBreakBuff = core.BuffSkill("아머 브레이크(디버프)", 720, (30+self.combat)*1000, armor_ignore = 30 + self.combat).wrap(core.BuffSkillWrapper)
#ThousandSword = core.Damageskill("사우전드 소드", 0, 500, 8, cooltime = 8*1000).setV(vEhc, 0, 0, False).wrap(core.DamageSkillWrapper)
# 보너스 찬스 70% -> 80%
EnhancedExceed = core.DamageSkill("인핸스드 익시드", 0, 200+4*passive_level, 2*(0.8+0.04*passive_level), cooltime = -1).setV(vEhc, 1, 2, True).wrap(core.DamageSkillWrapper)
#일정 주기로 마족의 피가 바닥에 뿌려져 5초 동안 최대 10명의 적을 일정주기 마다 300+8n%로 2번 공격
# 초당 22타 가정
FrenzyStack = 1
FrenzyPerSecond = 11
FrenzyDOT = core.SummonSkill("프렌지 장판", 0, 1000/FrenzyPerSecond, 300 + 8 * vEhc.getV(0, 0), FrenzyStack, 999999).wrap(core.SummonSkillWrapper)
# 블피 (즉시 시전)
DemonicBlast = core.DamageSkill("블러드 피스트", 0, 500 + 20*vEhc.getV(0,0), 7, cooltime = 10000, modifier = CharacterModifier(crit = 100, armor_ignore = 100)).wrap(core.DamageSkillWrapper)
#평딜이냐 극딜이냐... 소스코드는 서버렉 미적용
# 참고자료: https://blog.naver.com/oe135/221372243858
#DimensionSword = core.SummonSkill("디멘션 소드(평딜)", 480, 3000, 1250+14*vEhc.getV(0,0), 8, 40*1000, cooltime = 120*1000, modifier=core.CharacterModifier(armor_ignore=100)).wrap(core.SummonSkillWrapper)
DimensionSwordReuse = core.SummonSkill("디멘션 소드 (극딜)", 480, 210, 300+vEhc.getV(0,0)*12, 6, 8*1000, cooltime=120*1000, modifier=core.CharacterModifier(armor_ignore=100)).wrap(core.SummonSkillWrapper)
#BatSwarm = core.SummonSkill("배츠 스웜", 0, 0, 200, 1, 0)
#BloodImprison = core.DamageSkill("블러디 임프리즌", 0, 800, 3, cooltime = 120*1000)
#Buff skills
ForbiddenContract = core.BuffSkill("포비든 컨트랙트", 0, 30*1000, cooltime = 75*1000, pdamage = 10).wrap(core.BuffSkillWrapper)
DemonicFortitude = core.BuffSkill("데모닉 포티튜드", 0, 60*1000, cooltime=120*1000, pdamage=10).wrap(core.BuffSkillWrapper)
ReleaseOverload = core.BuffSkill("릴리즈 오버로드", 0, 60*1000, pdamage_indep= 25).wrap(core.BuffSkillWrapper)
# 데몬 5차 공용
CallMastema, MastemaClaw = demon.CallMastemaWrapper(vEhc, 0, 0)
AnotherGoddessBuff, AnotherVoid = demon.AnotherWorldWrapper(vEhc, 0, 0)
###### Skill Wrapper ######
'''딜 사이클 정리
https://blog.naver.com/oe135/221538210455
매 3분마다 버프류 스킬 사용하여 극딜
'''
ArmorBreakBuff.onAfter(ArmorBreak)
ExecutionExceed.onAfter(EnhancedExceed)
ReleaseOverload.onAfter(Execution_0)
ExceedOpt = core.OptionalElement(Exceed.judge(5, 1), ExecutionExceed, Execution)
BasicAttack = core.OptionalElement(ReleaseOverload.is_active, ExecutionExceed, ReleaseOverload)
# 오라 웨폰
auraweapon_builder = warriors.AuraWeaponBuilder(vEhc, 3, 2)
for sk in [Execution_0, Execution_1, Execution_2, Execution_3, ExecutionExceed, ShieldChasing, ArmorBreak, DemonicBlast]:
auraweapon_builder.add_aura_weapon(sk)
AuraWeaponBuff, AuraWeapon = auraweapon_builder.get_buff()
return(BasicAttackWrapper,
[globalSkill.useful_sharp_eyes(), globalSkill.useful_combat_orders(),
Booster, DevilCryBuff, InfinityForce, Metamorphosis, BlueBlood, DemonFortitude, AuraWeaponBuff, AuraWeapon, DemonAwakning,
globalSkill.soul_contract()] +\
[Execution, Cerberus, DevilCry, SpiritOfRageEnd] +\
[MetamorphosisSummon, CallMastema, DemonAwakningSummon, SpiritOfRage, Orthros, Orthros_] +\
[BasicAttackWrapper]) |
<reponame>kaibabbob/capirca
"""Tests for google3.third_party.py.capirca.lib.gcp_hf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import unittest
from absl.testing import parameterized
from capirca.lib import gcp
from capirca.lib import gcp_hf
from capirca.lib import nacaddr
from capirca.lib import naming
from capirca.lib import policy
import mock
HEADER_NO_OPTIONS = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname
}
"""
HEADER_OPTION_MAX = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname 20
}
"""
HEADER_OPTION_EGRESS = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname EGRESS
}
"""
HEADER_OPTION_EGRESS_2 = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname2 EGRESS
}
"""
HEADER_OPTION_AF = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname inet
}
"""
HEADER_OPTION_EGRESS_AND_MAX = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname EGRESS 20
}
"""
HEADER_OPTION_EGRESS_AND_AF = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname EGRESS inet
}
"""
HEADER_OPTION_MAX_AND_AF = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname 20 inet
}
"""
HEADER_VERY_LOW_DEFAULT_MAX = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname 1
}
"""
BAD_HEADER_NO_DISPLAYNAME = """
header {
comment:: "Header without a policy name."
target:: gcp_hf
}
"""
BAD_HEADER_LONG_DISPLAYNAME = """
header {
comment:: "Using a display name with 64 characters."
target:: gcp_hf this-is-a-very-very-long-policy-name-which-is-over-63-characters
}
"""
BAD_HEADER_INVALID_DISPLAYNAME_1 = """
header {
comment:: "Using a display name with an upper case letter."
target:: gcp_hf Displayname
}
"""
BAD_HEADER_INVALID_DISPLAYNAME_2 = """
header {
comment:: "Using a display name with an underscore character."
target:: gcp_hf display_name
}
"""
BAD_HEADER_INVALID_DISPLAYNAME_3 = """
header {
comment:: "Using a display name that ends in a dash."
target:: gcp_hf displayname-
}
"""
BAD_HEADER_UNKNOWN_OPTION = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname INGRESS randomOption
}
"""
BAD_HEADER_UNKNOWN_DIRECTION = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname BIGRESS
}
"""
BAD_HEADER_INVALID_MAX_COST = """
header {
comment:: "The general policy comment."
target:: gcp_hf displayname INGRESS 888888888
}
"""
BAD_HEADER_WRONG_PLATFORM = """
header {
comment:: "The general policy comment."
target:: wrong_platform
}
"""
TERM_ALLOW_ALL_INTERNAL = """
term allow-internal-traffic {
comment:: "Generic description"
protocol:: tcp icmp udp
action:: next
}
"""
TERM_ALLOW_DNS = """
term allow-dns-traffic {
comment:: "Generic description"
source-address:: PUBLIC_NAT
protocol:: tcp udp
destination-port:: DNS
action:: next
}
"""
TERM_ALLOW_PORT = """
term allow-traffic-to-port {
comment:: "Generic description"
source-address:: PUBLIC_NAT
protocol:: tcp
destination-port:: PORT
action:: next
}
"""
TERM_ALLOW_PORT_RANGE = """
term allow-port-range {
comment:: "Generic description"
source-address:: PUBLIC_NAT
protocol:: tcp
destination-port:: RANGE
action:: next
}
"""
TERM_RESTRICT_EGRESS = """
term restrict_egress {
comment:: "Generic description"
destination-address:: PUBLIC_NAT
protocol:: tcp icmp udp
action:: next
}
"""
TERM_DENY_INGRESS = """
term default-deny-ingress {
comment:: "Generic description"
action:: deny
}
"""
TERM_DENY_EGRESS = """
term default-deny-egress {
comment:: "Generic description"
action:: deny
}
"""
TERM_WITH_TARGET_RESOURCES = """
term default-deny-ingress-on-target {
comment:: "Generic description"
source-address:: ANY
action:: deny
target-resources:: (project1, vpc1)
target-resources:: (project2, vpc2)
}
"""
TERM_WITH_TARGET_RESOURCES_2 = """
term default-deny-ingress-on-target {
comment:: "Generic description"
source-address:: ANY
action:: deny
target-resources:: [(project1, vpc1),(project2,vpc2)]
}
"""
TERM_WITH_LOGGING = """
term term-with-logging {
comment:: "Generic description"
source-address:: ANY
protocol:: tcp
action:: accept
logging:: true
}
"""
TERM_NO_COMMENT = """
term allow-internal-traffic {
source-address:: INTERNAL
protocol:: tcp icmp udp
action:: next
}
"""
TERM_LONG_COMMENT = """
term allow-internal-traffic {
comment:: "This is a very long description, it is longer than sixty-four chars"
source-address:: INTERNAL
protocol:: tcp icmp udp
action:: next
}
"""
BAD_TERM_PROTO = """
term bad-term-unsupp-proto {
comment:: "Generic description"
source-address:: PUBLIC_NAT
protocol:: ggp
action:: next
}
"""
BAD_TERM_USING_SOURCE_TAG = """
term bad-term-with-tag {
comment:: "Generic description"
source-address:: PUBLIC_NAT
source-tag:: a-tag
protocol:: tcp icmp udp
action:: next
}
"""
BAD_TERM_USING_DEST_TAG = """
term bad-term-with-tag {
comment:: "Generic description"
source-address:: PUBLIC_NAT
destination-tag:: a-tag
protocol:: tcp icmp udp
action:: next
}
"""
BAD_TERM_SOURCE_PORT = """
term allow-traffic-from-port {
comment:: "Generic description"
destination-address:: INTERNAL
protocol:: tcp
source-port:: PORT
action:: next
}
"""
BAD_TERM_IP_VERSION_MISMATCH = """
term icmpv6-in-inet-term {
comment:: "Generic description"
source-address:: INTERNAL
protocol:: icmpv6
action:: next
}
"""
BAD_TERM_OPTIONS = """
term term-with-options {
comment:: "Generic description"
destination-address:: INTERNAL
option:: TCP_ESTABLISHED
action:: next
}
"""
BAD_TERM_NON_VALID_PROJECT_ID = """
term default-deny-ingress-on-target {
comment:: "Generic description"
protocol:: tcp
source-address:: ANY
action:: deny
target-resources:: (proj, vpc1)
}
"""
BAD_TERM_NON_VALID_VPC_NAME = """
term default-deny-ingress-on-target {
comment:: "Generic description"
protocol:: tcp
source-address:: ANY
action:: deny
target-resources:: (project, Vpc)
}
"""
EXPECTED_ONE_RULE_INGRESS = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "goto_next",
"description": "allow-internal-traffic: Generic description",
"direction": "INGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp"
},
{
"ipProtocol": "icmp"
},
{
"ipProtocol": "udp"
}
],
"srcIpRanges": ["0.0.0.0/0"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1,
"enableLogging": false
}
]
}
]
"""
EXPECTED_ONE_RULE_INGRESS_W_LOGGING = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "allow",
"description": "term-with-logging: Generic description",
"direction": "INGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp"
}
],
"srcIpRanges": ["10.0.0.0/8"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1,
"enableLogging": true
}
]
}
]
"""
EXPECTED_ONE_RULE_EGRESS = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "goto_next",
"description": "restrict_egress: Generic description",
"direction": "EGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp"
},
{
"ipProtocol": "icmp"
},
{
"ipProtocol": "udp"
}
],
"destIpRanges": ["10.0.0.0/8"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1,
"enableLogging": false
}
]
}
]
"""
EXPECTED_MULTIPLE_RULE_INGRESS = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "goto_next",
"description": "allow-internal-traffic: Generic description",
"direction": "INGRESS",
"enableLogging": false,
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp"
},
{
"ipProtocol": "icmp"
},
{
"ipProtocol": "udp"
}
],
"srcIpRanges": ["0.0.0.0/0"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1
},
{
"action": "goto_next",
"description": "allow-dns-traffic: Generic description",
"direction": "INGRESS",
"enableLogging": false,
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp",
"ports": ["53"]
},
{
"ipProtocol": "udp",
"ports": ["53"]
}
],
"srcIpRanges": ["0.0.0.0/0"]
},
"versionedExpr": "FIREWALL"
},
"priority": 2
}
]
}
]
"""
EXPECTED_MULTIPLE_RULE_INGRESS_W_DENY = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "goto_next",
"description": "Generic description",
"direction": "INGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp"
},
{
"ipProtocol": "icmp"
},
{
"ipProtocol": "udp"
}
],
"srcIpRanges": ["10.0.0.0/8"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1,
"enableLogging": false
},
{
"action": "deny",
"description": "Generic description",
"direction": "INGRESS",
"match": {
"config": {
"srcIpRanges": ["0.0.0.0/0"]
},
"versionedExpr": "FIREWALL"
},
"priority": 2,
"enableLogging": false
}
]
}
]
"""
EXPECTED_PORT_RANGE_INGRESS = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "goto_next",
"description": "allow-port-range: Generic description",
"direction": "INGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp",
"ports": ["8000-9000"]
}
],
"srcIpRanges": ["10.0.0.0/8"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1,
"enableLogging": false
}
]
}
]
"""
EXPECTED_DENY_INGRESS = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "deny",
"description": "default-deny-ingress: Generic description",
"direction": "INGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "all"
}
],
"srcIpRanges": ["0.0.0.0/0"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1,
"enableLogging": false
}
]
}
]
"""
EXPECTED_DENY_INGRESS_ON_TARGET = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "deny",
"description": "default-deny-ingress-on-target: Generic description",
"direction": "INGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "all"
}
],
"srcIpRanges": ["0.0.0.0/0"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1,
"enableLogging": false,
"targetResources": ["https://www.googleapis.com/compute/v1/projects/project1/global/networks/vpc1",
"https://www.googleapis.com/compute/v1/projects/project2/global/networks/vpc2"]
}
]
}
]
"""
EXPECTED_INGRESS_AND_EGRESS_W_DENY = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "goto_next",
"description": "allow-internal-traffic: Generic description",
"direction": "INGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp"
},
{
"ipProtocol": "icmp"
},
{
"ipProtocol": "udp"
}
],
"srcIpRanges": ["0.0.0.0/0"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1,
"enableLogging": false
},
{
"action": "deny",
"description": "default-deny-ingress: Generic description",
"direction": "INGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "all"
}
],
"srcIpRanges": ["0.0.0.0/0"]
},
"versionedExpr": "FIREWALL"
},
"priority": 2,
"enableLogging": false
},
{
"action": "goto_next",
"description": "restrict_egress: Generic description",
"direction": "EGRESS",
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp"
},
{
"ipProtocol": "icmp"
},
{
"ipProtocol": "udp"
}
],
"destIpRanges": ["0.0.0.0/0"]
},
"versionedExpr": "FIREWALL"
},
"priority": 3,
"enableLogging": false
},
{
"action": "deny",
"description": "default-deny-egress: Generic description",
"direction": "EGRESS",
"match": {
"config": {
"destIpRanges": ["0.0.0.0/0"],
"layer4Configs": [
{
"ipProtocol": "all"
}
]
},
"versionedExpr": "FIREWALL"
},
"priority": 4,
"enableLogging": false
}
]
}
]
"""
EXPECTED_DENY_EGRESS = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "deny",
"description": "default-deny-egress: Generic description",
"direction": "EGRESS",
"match": {
"config": {
"destIpRanges": ["0.0.0.0/0"],
"layer4Configs": [
{
"ipProtocol": "all"
}
]
},
"versionedExpr": "FIREWALL"
},
"priority": 1,
"enableLogging": false
}
]
}
]
"""
EXPECTED_COST_OF_ONE = """
[
{
"displayName": "displayname",
"type": "FIREWALL",
"rules": [
{
"action": "goto_next",
"description": "allow-traffic-to-port: Generic description",
"direction": "INGRESS",
"enableLogging": false,
"match": {
"config": {
"layer4Configs": [
{
"ipProtocol": "tcp",
"ports": ["80"]
}
],
"srcIpRanges": ["10.1.1.0/24"]
},
"versionedExpr": "FIREWALL"
},
"priority": 1
}
]
}
]
"""
SUPPORTED_TOKENS = frozenset({
'action',
'comment',
'destination_address',
'destination_port',
'destination_tag',
'logging',
'name',
'option',
'protocol',
'source_address',
'source_port',
'source_tag',
'stateless_reply',
'target_resources',
'translated',
})
SUPPORTED_SUB_TOKENS = {
'action': {
'accept', 'deny', 'next'
}
}
EXP_INFO = 2
TEST_IP = [nacaddr.IP('10.0.0.0/8')]
ALL_IPS = [nacaddr.IP('0.0.0.0/0')]
class GcpHfTest(parameterized.TestCase):
def setUp(self):
super(GcpHfTest, self).setUp()
self.naming = mock.create_autospec(naming.Naming)
def _StripAclHeaders(self, acl):
return '\n'.join([line for line in str(acl).split('\n')
if not line.lstrip().startswith('#')])
def testDefaultHeader(self):
"""Test that a header without options is accepted."""
self.naming.GetNetAddr.return_value = ALL_IPS
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_ONE_RULE_INGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testOptionMaxHeader(self):
"""Test that a header with a default maximum cost is accepted."""
self.naming.GetNetAddr.return_value = ALL_IPS
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_OPTION_MAX + TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_ONE_RULE_INGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testOptionEgressHeader(self):
"""Test that a header with direction is accepted."""
self.naming.GetNetAddr.return_value = TEST_IP
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_OPTION_EGRESS + TERM_RESTRICT_EGRESS,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_ONE_RULE_EGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testOptionAFHeader(self):
"""Test that a header with address family is accepted."""
self.naming.GetNetAddr.return_value = ALL_IPS
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_OPTION_AF + TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_ONE_RULE_INGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testOptionEgressAndMaxHeader(self):
"""Test a header with direction and default maximum cost is accepted."""
self.naming.GetNetAddr.return_value = TEST_IP
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_OPTION_EGRESS_AND_MAX + TERM_RESTRICT_EGRESS,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_ONE_RULE_EGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testOptionEgressAndAF(self):
"""Test a header with a direction and address family is accepted."""
self.naming.GetNetAddr.return_value = TEST_IP
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_OPTION_EGRESS_AND_AF + TERM_RESTRICT_EGRESS,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_ONE_RULE_EGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testOptionMaxAndAF(self):
"""Test a header with default maximum cost & address family is accepted."""
self.naming.GetNetAddr.return_value = ALL_IPS
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_OPTION_MAX_AND_AF + TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_ONE_RULE_INGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testRaisesHeaderErrorOnUnknownOption(self):
"""Test that an unknown header option raises a HeaderError."""
with self.assertRaises(gcp.HeaderError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(BAD_HEADER_UNKNOWN_OPTION
+ TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
def testRaisesHeaderErrorOnUnknownDirection(self):
"""Test that an unknown direction option raises a HeaderError."""
with self.assertRaises(gcp.HeaderError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(BAD_HEADER_UNKNOWN_DIRECTION
+ TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
def testRaisesHeaderErrorOnInvalidMaxCost(self):
"""Test that a maximum default cost over 2^16 raises a HeaderError."""
with self.assertRaises(gcp.HeaderError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(BAD_HEADER_INVALID_MAX_COST
+ TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
def testRaisesHeaderErrorOnLongDisplayName(self):
"""Test that a long displayName raises a HeaderError."""
with self.assertRaises(gcp.HeaderError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(BAD_HEADER_LONG_DISPLAYNAME
+ TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
def testRaisesHeaderErrorOnHeaderWithoutDisplayName(self):
"""Test that a header without a policy name raises a HeaderError."""
with self.assertRaises(gcp.HeaderError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(BAD_HEADER_NO_DISPLAYNAME
+ TERM_ALLOW_ALL_INTERNAL, self.naming),
EXP_INFO)
def testRaisesHeaderErrorOnIncorrectDisplayName1(self):
"""Test that an invalid displayName raises a HeaderError."""
with self.assertRaises(gcp.HeaderError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(BAD_HEADER_INVALID_DISPLAYNAME_1
+ TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
def testRaisesHeaderErrorOnIncorrectDisplayName2(self):
"""Test that an invalid displayName raises a HeaderError."""
with self.assertRaises(gcp.HeaderError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(BAD_HEADER_INVALID_DISPLAYNAME_2
+ TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
def testRaisesHeaderErrorOnIncorrectDisplayName3(self):
"""Test that an invalid displayName raises a HeaderError."""
with self.assertRaises(gcp.HeaderError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(BAD_HEADER_INVALID_DISPLAYNAME_3
+ TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
def testRaisesTermErrorOnTermWithDestinationTag(self):
"""Test that a term with a destination tag raises an error.
Tags are not supported in HF.
"""
with self.assertRaises(gcp.TermError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_USING_DEST_TAG,
self.naming),
EXP_INFO)
def testRaisesTermErrorOnTermWithSourceTag(self):
"""Test that a term with a source tag raises an error.
Tags are not supported in HF.
"""
with self.assertRaises(gcp.TermError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_USING_SOURCE_TAG,
self.naming),
EXP_INFO)
def testRaisesTermErrorOnTermWithUnsupportedProtocol(self):
"""Test that a term with an unsupported protocol raises an error."""
with self.assertRaises(gcp.TermError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_PROTO, self.naming),
EXP_INFO)
def testRaisesTermErrorOnTermWithSourcePort(self):
"""Test that a term with a source port raises Term error."""
self.naming.GetNetAddr.return_value = TEST_IP
self.naming.GetServiceByProto.side_effect = [['53']]
with self.assertRaises(gcp.TermError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_SOURCE_PORT,
self.naming),
EXP_INFO)
def testRaisesTermErrorOnTermWithOptions(self):
"""Test that a term with a source port raises Term error."""
self.naming.GetNetAddr.return_value = TEST_IP
with self.assertRaises(gcp.TermError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_OPTIONS,
self.naming),
EXP_INFO)
def testRaisesTermErrorOnInvalidProjectID(self):
"""Test that an invalid project ID on target resources raises Term error."""
self.naming.GetNetAddr.return_value = TEST_IP
with self.assertRaises(gcp.TermError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_NON_VALID_PROJECT_ID,
self.naming),
EXP_INFO)
def testRaisesTermErrorOnInvalidVPCName(self):
"""Test that an invalid VPC name on target resources raises Term error."""
self.naming.GetNetAddr.return_value = TEST_IP
with self.assertRaises(gcp.TermError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + BAD_TERM_NON_VALID_VPC_NAME,
self.naming),
EXP_INFO)
def testRaisesDifferentPolicyNameErrorWhenDifferentPolicyNames(self):
"""Test that different policy names raises DifferentPolicyNameError."""
with self.assertRaises(gcp_hf.DifferentPolicyNameError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_DENY_INGRESS
+ HEADER_OPTION_EGRESS_2 + TERM_DENY_EGRESS,
self.naming),
EXP_INFO)
def testIgnorePolicyFromADifferentPlatform(self):
"""Test that a policy with a header from a different platform is ignored."""
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(BAD_HEADER_WRONG_PLATFORM
+ TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
self.assertEqual([], json.loads(self._StripAclHeaders(str(acl))))
def testIgnoreTermWithICMPv6(self):
"""Test that a term with only an icmpv6 protocol is not rendered."""
self.naming.GetNetAddr.return_value = TEST_IP
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_OPTION_AF
+ BAD_TERM_IP_VERSION_MISMATCH,
self.naming),
EXP_INFO)
exp = [{'displayName': 'displayname', 'rules': [{}], 'type': 'FIREWALL'}]
self.assertEqual(exp, json.loads(self._StripAclHeaders(str(acl))))
def testPriority(self):
"""Test that priority is set based on terms' ordering."""
self.naming.GetNetAddr.return_value = ALL_IPS
self.naming.GetServiceByProto.side_effect = [['53'], ['53']]
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_ALL_INTERNAL
+ TERM_ALLOW_DNS, self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_MULTIPLE_RULE_INGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testLogging(self):
"""Test that logging is used when it is set on a term."""
self.naming.GetNetAddr.return_value = TEST_IP
self.naming.GetServiceByProto.side_effect = [['53'], ['53']]
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_WITH_LOGGING, self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_ONE_RULE_INGRESS_W_LOGGING)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testTargetResources(self):
"""Test that the target resources is used correctly."""
self.naming.GetNetAddr.return_value = [nacaddr.IP('0.0.0.0/0')]
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_WITH_TARGET_RESOURCES,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_DENY_INGRESS_ON_TARGET)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testSecondWayOfPassingTargetResources(self):
"""Test that the target resources is used correctly."""
self.naming.GetNetAddr.return_value = [nacaddr.IP('0.0.0.0/0')]
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_WITH_TARGET_RESOURCES_2,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_DENY_INGRESS_ON_TARGET)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testMultiplePolicies(self):
"""Tests that both ingress and egress rules are included in one policy."""
self.maxDiff = None
self.naming.GetNetAddr.return_value = ALL_IPS
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_ALL_INTERNAL +
TERM_DENY_INGRESS + HEADER_OPTION_EGRESS +
TERM_RESTRICT_EGRESS + TERM_DENY_EGRESS,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_INGRESS_AND_EGRESS_W_DENY)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testPortRange(self):
"""Test that a port range is accepted and used correctly."""
self.naming.GetNetAddr.return_value = TEST_IP
self.naming.GetServiceByProto.side_effect = [['8000-9000']]
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_PORT_RANGE,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_PORT_RANGE_INGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testTermLongComment(self):
"""Test that a term's long comment gets truncated and prefixed with term name."""
self.naming.GetNetAddr.return_value = ALL_IPS
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_LONG_COMMENT,
self.naming),
EXP_INFO)
comment_truncated = EXPECTED_ONE_RULE_INGRESS.replace(
'Generic description',
'This is a very long description, it is l')
expected = json.loads(comment_truncated)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testDefaultDenyIngressCreation(self):
"""Test that the correct IP is correctly set on a deny all ingress term."""
self.naming.GetNetAddr.return_value = ALL_IPS
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_DENY_INGRESS, self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_DENY_INGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testDefaultDenyEgressCreation(self):
"""Test that the correct IP is correctly set on a deny all egress term."""
self.naming.GetNetAddr.return_value = ALL_IPS
acl = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_OPTION_EGRESS + TERM_DENY_EGRESS,
self.naming),
EXP_INFO)
expected = json.loads(EXPECTED_DENY_EGRESS)
self.assertEqual(expected, json.loads(self._StripAclHeaders(str(acl))))
def testBuildTokens(self):
"""Test that _BuildTokens generates the expected list of tokens."""
self.naming.GetNetAddr.side_effect = [TEST_IP]
pol1 = gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_NO_OPTIONS + TERM_ALLOW_ALL_INTERNAL,
self.naming),
EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
def testRaisesExceededCostError(self):
"""Test that ExceededCostError is raised when policy exceeds max cost."""
self.naming.GetNetAddr.side_effect = [TEST_IP]
with self.assertRaises(gcp_hf.ExceededCostError):
gcp_hf.HierarchicalFirewall(
policy.ParsePolicy(HEADER_VERY_LOW_DEFAULT_MAX
+ TERM_ALLOW_ALL_INTERNAL, self.naming),
EXP_INFO)
@parameterized.named_parameters(
('1 ip, 2 protocols',
{'match': {
'config': {
'destIpRanges': ['0.0.0.0/0'],
'layer4Configs': [
{'ipProtocol': 'tcp'},
{'ipProtocol': 'icmp'}
]
}
}}, 2),
('1 ip, 3 protocols, ',
{'match': {
'config': {
'srcIpRanges': ['0.0.0.0/0'],
'layer4Configs': [
{'ipProtocol': 'tcp'},
{'ipProtocol': 'icmp'},
{'ipProtocol': 'udp'}
]
}
}}, 3),
('1 ip, 1 protocol with 1 port',
{'match': {
'config': {
'srcIpRanges': ['0.0.0.0/0'],
'layer4Configs': [
{'ipProtocol': 'tcp', 'ports': ['22']}
]
}
}}, 1),
('1 ip, 2 protocols with 2 ports each',
{'match': {
'config': {
'srcIpRanges': ['0.0.0.0/0'],
'layer4Configs': [
{'ipProtocol': 'tcp', 'ports': ['22']},
{'ipProtocol': 'udp', 'ports': ['22']}
]
}
}}, 2),
('1 ip, 1 protocol with 2 ports',
{'match': {
'config': {
'srcIpRanges': ['0.0.0.0/0'],
'layer4Configs': [
{'ipProtocol': 'tcp', 'ports': ['22', '23']}
]
}
}}, 2),
('2 ips, 1 protocol with 2 ports',
{'match': {
'config': {
'srcIpRanges': ['1.4.6.8/10', '1.2.3.4/5'],
'layer4Configs': [
{'ipProtocol': 'tcp', 'ports': ['22', '23']}
]
}
}}, 4),
('2 ips, 2 protocols with 2 ports each',
{'match': {
'config': {
'srcIpRanges': ['1.4.6.8/10', '1.2.3.4/5'],
'layer4Configs': [
{'ipProtocol': 'tcp', 'ports': ['22', '23']},
{'ipProtocol': 'udp', 'ports': ['22', '23']}
]
}
}}, 8)
)
def testGetCost(self, dict_term, expected):
self.assertEqual(gcp_hf.GetCost(dict_term), expected)
if __name__ == '__main__':
unittest.main()
|
"""
This script is used to compute the window functions and mode coupling matrices of the SO simulations.
The window function are defined of the product of a survey mask and a galactic mask, there is also an option to use a hitcount maps
To run it you need to specify a dictionnary file, for example global_sims_all.dict provided in the:
https://github.com/simonsobs/PSpipe/tree/master/project/AnalyseSims/NERSC_run folder
The code will run as follow:
python sim_window_and_bbl.py global_sims_all.dict
"""
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
from pspy import so_map,so_window,so_mcm,sph_tools,so_spectra, pspy_utils, so_dict
import healpy as hp, numpy as np, pylab as plt
import os,sys
# We start by reading the info in the dictionnary
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
# Create three folders, one for the plot of the simulations, one for storing the mode coupling matrix and one for the window functions
window_dir='window'
pspy_utils.create_directory(window_dir)
mcm_dir='mcm'
pspy_utils.create_directory(mcm_dir)
plot_dir='maps_plot'
pspy_utils.create_directory(plot_dir)
experiment=d['experiment']
for exp in experiment:
freqs=d['freq_%s'%exp]
for f in freqs:
# Read the galactic mask for T and P and the survey mask
mask_T=so_map.read_map(d['mask_T_%s_%s'%(exp,f)])
mask_P=so_map.read_map(d['mask_P_%s_%s'%(exp,f)])
survey_mask=so_map.read_map(d['survey_mask_%s_%s'%(exp,f)])
# option to read an additional weight map
if d['use_pixel_weight']==True:
weight= so_map.read_map(d['hitmaps_%s_%s'%(exp,f)])
else:
weight=mask_T.copy()
weight.data[:]=1
mask_T.data*=survey_mask.data
mask_P.data*=survey_mask.data
# apodize the temperature window function and multiply it by the weight map, plot it and write it to disk
window_T=so_window.create_apodization(mask_T, apo_type=d['apo_type_survey_%s'%exp], apo_radius_degree=d['apo_radius_survey_%s'%exp])
window_T.data*=weight.data
window_T.write_map('%s/window_T_%s_%s.fits'%(window_dir,exp,f))
window_T.plot(file_name='%s/window_T_%s_%s'%(plot_dir,exp,f))
# apodize the polarisation window function and multiply it by the weight map, plot it and write it to disk
window_P=so_window.create_apodization(mask_P, apo_type=d['apo_type_survey_%s'%exp], apo_radius_degree=d['apo_radius_survey_%s'%exp])
window_P.data*=weight.data
window_P.write_map('%s/window_P_%s_%s.fits'%(window_dir,exp,f))
window_P.plot(file_name='%s/window_P_%s_%s'%(plot_dir,exp,f))
del mask_T,mask_P,survey_mask,window_T,window_P,weight
for id_exp1,exp1 in enumerate(experiment):
freqs1=d['freq_%s'%exp1]
for id_f1,f1 in enumerate(freqs1):
#read the beam file and the window function corresponding to exp1 and f1
l,bl1= np.loadtxt(d['beam_%s_%s'%(exp1,f1)],unpack=True)
window1_T=so_map.read_map('%s/window_T_%s_%s.fits'%(window_dir,exp1,f1))
window1_P=so_map.read_map('%s/window_P_%s_%s.fits'%(window_dir,exp1,f1))
for id_exp2,exp2 in enumerate(experiment):
freqs2=d['freq_%s'%exp2]
for id_f2,f2 in enumerate(freqs2):
# The following if statement ensures that no wasteful computation are made
# LAT 145 x LAT 225 as the same mode coupling matrix as LAT 225 x LAT 145
if (id_exp1==id_exp2) & (id_f1>id_f2) : continue
if (id_exp1>id_exp2) : continue
#read the beam file and the window function corresponding to exp2 and f2
l,bl2= np.loadtxt(d['beam_%s_%s'%(exp2,f2)],unpack=True)
window2_T=so_map.read_map('%s/window_T_%s_%s.fits'%(window_dir,exp2,f2))
window2_P=so_map.read_map('%s/window_P_%s_%s.fits'%(window_dir,exp2,f2))
print (exp1,f1,exp2,f2)
# compute the mode coupling matrices and binning matrices
mbb_inv,Bbl=so_mcm.mcm_and_bbl_spin0and2(win1=(window1_T,window1_P),win2=(window2_T,window2_P),bl1=(bl1,bl1),bl2=(bl2,bl2),binning_file= d['binning_file'],niter=0, lmax=d['lmax'], type=d['type'],save_file='%s/%s_%sx%s_%s'%(mcm_dir,exp1,f1,exp2,f2))
|
<reponame>dsavransky/admissions<filename>admissions/utils.py
import numpy as np
import pandas
import scipy.interpolate
from scipy.optimize import curve_fit
from scipy.stats import norm
import country_converter as coco
from fuzzywuzzy import process
from shutil import copyfile
from admissions.rankings import tfit
class utils:
def __init__(
self,
rankfile="university_rankings.xlsx",
aliasfile="university_aliases.xlsx",
gradefile="grade_data.xlsx",
utilfile="utils2021.xlsx",
):
self.rankfile = rankfile
self.aliasfile = aliasfile
self.gradefile = gradefile
self.utilfile = utilfile
self.rankup = False
self.aliasup = False
self.gradeup = False
self.utilup = False
copyfile(rankfile, rankfile + ".bck")
copyfile(aliasfile, aliasfile + ".bck")
copyfile(gradefile, gradefile + ".bck")
copyfile(utilfile, utilfile + ".bck")
self.readFiles()
# generate grade interpolants
tmp = pandas.ExcelFile(self.gradefile, engine="openpyxl")
grades = tmp.parse("grades")
tmp.close()
interps = []
for row in grades.iterrows():
xgpa = np.array(row[1]["SchoolGPA"].split("/")).astype(float)
ygpa = np.array(row[1]["4ptGPA"].split("/")).astype(float)
if (xgpa.min() != 0) & (ygpa.min() != 0):
xgpa = np.hstack([xgpa, 0])
ygpa = np.hstack([ygpa, 0])
interps.append(scipy.interpolate.interp1d(xgpa, ygpa, kind="linear"))
grades["Interp"] = interps
self.grades = grades
self.cc = coco.CountryConverter()
# create fit function
x = np.array([9, 50])
y = np.array([3.3, 3.5])
ftrank, _ = curve_fit(tfit, x, y, [-0.5, 2.5])
self.rankfit = lambda x: tfit(x, ftrank[0], ftrank[1])
def readFiles(self):
tmp = pandas.ExcelFile(self.rankfile, engine="openpyxl")
self.lookup = tmp.parse("lookup")
tmp.close()
tmp = pandas.ExcelFile(self.aliasfile, engine="openpyxl")
self.aliases = tmp.parse("aliases")
self.ignore = tmp.parse("ignore")
tmp.close()
tmp = pandas.ExcelFile(self.utilfile, engine="openpyxl")
self.renames = tmp.parse("rename")
self.schoolmatches = tmp.parse("schools")
tmp.close()
def __del__(self):
self.updateFiles()
def isknownschool(self, name):
# try main list
if name in self.lookup["Name"].values:
return True
if name in self.aliases["Alias"].values:
return True
return False
def matchschool(self, name, country):
# check ignores first
if (name in self.ignore["Name"].values) and (
self.ignore.loc[self.ignore.Name == name, "Country"].values[0] == country
):
return ("skip",)
# try main list
if (name in self.lookup["Name"].values) and (
self.lookup.loc[self.lookup.Name == name, "Country"].values[0] == country
):
return name
# try aliases
if name in self.aliases["Alias"].values:
return self.aliases.loc[
self.aliases["Alias"] == name, "Standard Name"
].values[0]
if country not in self.lookup["Country"].values:
instr = input(
"{0}: I don't know any schools in {1}. [new]/[s]kip ".format(
name, country
)
)
if instr:
self.updateIgnores(name, country)
return ("skip",)
else:
newname = input("Official Name: [{}] ".format(name))
if not (newname):
newname = name
newrank = input("Rank: [200] ")
if not (newrank):
newrank = 200
self.updateRankings(newname, newrank, country)
return newname
# try fuzzy match against main list
res = process.extractOne(
name, self.lookup.loc[self.lookup["Country"] == country, "Name"].values
)
if res[1] == 100:
self.updateAliases(name, res[0])
return res[0]
else:
instr = input(
"I think {} in {} is {}. [accept]/enter alias/[r]ename/[n]ew/[s]kip ".format(
name, country, res[0]
)
)
if instr:
if instr == "r":
newname = input("Official Name: ")
if newname not in self.lookup["Name"].values:
print("This is a new school.")
newrank = input("Rank: [200] ")
if not (newrank):
newrank = 200
self.updateRankings(newname, int(newrank), country)
return "rename", newname
elif instr == "n":
newname = input("Official Name: [accept]")
if not (newname):
newname = name
newrank = input("Rank: [200] ")
if not (newrank):
newrank = 200
self.updateRankings(newname, int(newrank), country)
if newname != name:
self.updateAliases(name, newname)
return newname
elif instr == "s":
self.updateIgnores(name, country)
return ("skip",)
else:
if instr not in self.lookup["Name"].values:
print(
"I don't know the school you just entered. Trying again."
)
return self.matchschool(name, country)
self.updateAliases(name, instr)
return instr
else:
self.updateAliases(name, res[0])
return res[0]
def updateAliases(self, alias, standard_name):
self.aliasup = True
self.aliases = self.aliases.append(
pandas.DataFrame({"Alias": [alias], "Standard Name": [standard_name]})
)
self.aliases = self.aliases.sort_values(by=["Standard Name"]).reset_index(
drop=True
)
def updateIgnores(self, name, country):
self.aliasup = True
self.ignore = self.ignore.append(
pandas.DataFrame({"Name": [name], "Country": [country]})
).reset_index(drop=True)
def updateRankings(self, name, rank, country):
self.rankup = True
self.lookup = self.lookup.append(
pandas.DataFrame({"Name": [name], "Rank": [rank], "Country": [country]})
)
self.lookup = self.lookup.sort_values(by=["Rank"]).reset_index(drop=True)
def updateFiles(self):
if self.rankup:
ew = pandas.ExcelWriter(self.rankfile, options={"encoding": "utf-8"})
self.lookup.to_excel(ew, sheet_name="lookup", index=False)
ew.save()
ew.close()
if self.aliasup:
ew = pandas.ExcelWriter(self.aliasfile, options={"encoding": "utf-8"})
self.aliases.to_excel(ew, sheet_name="aliases", index=False)
self.ignore.to_excel(ew, sheet_name="ignore", index=False)
ew.save()
ew.close()
if self.gradeup:
grades = self.grades.copy()
grades = grades.drop(["Interp"], axis=1)
ew = pandas.ExcelWriter(self.gradefile, options={"encoding": "utf-8"})
grades.to_excel(ew, sheet_name="grades", index=False)
ew.save()
ew.close()
if self.utilup:
renames = self.renames.copy()
schoolmatches = self.schoolmatches.copy()
schoolmatches = schoolmatches.sort_values(by=["Full_Name"]).reset_index(
drop=True
)
ew = pandas.ExcelWriter(self.utilfile, options={"encoding": "utf-8"})
renames.to_excel(ew, sheet_name="rename", index=False)
schoolmatches.to_excel(ew, sheet_name="schools", index=False)
ew.save()
ew.close()
# flush all the update bools
self.rankup = False
self.aliasup = False
self.gradeup = False
self.utilup = False
def calc4ptGPA(self, school, country, gpascale, gpa):
"""Convert GPA to 4 point scale"""
if gpascale == 4.0:
return gpa
if (gpascale == 4.3) | (gpascale == 4.33) | (gpascale == 4.2):
if gpa > 4.0:
return 4.0
else:
return gpa
# first try to match the school
mtch = (
(self.grades.Name == school)
& (self.grades.Country == country)
& (self.grades.GPAScale == gpascale)
)
if mtch.any():
return self.grades.loc[mtch, "Interp"].values[0](gpa)
# if that doesn't work, lets try to match the country
if (
(self.grades["Name"] == "DEFAULT {}".format(country))
& (self.grades["GPAScale"] == gpascale)
).any():
return self.grades.loc[
(self.grades["Name"] == "DEFAULT {}".format(country))
& (self.grades["GPAScale"] == gpascale),
"Interp",
].values[0](gpa)
# if we're here, nothing worked, so lets ask for help
print(
"No matches for {} in {} with {} GPA scale.".format(
school, country, gpascale
)
)
action = input("What would you like to do? [manual entry]/[n]ew ")
if action:
newname = input("New Entry: [DEFAULT country]/[s] School Name ")
if newname:
newname = school
else:
newname = "DEFAULT {}".format(country)
xgpastr = input("New Entry GPAs: gpascale/.../min ")
ygpastr = input("New Entry 4pt GPAs: 4.0/.../min ")
xgpa = np.array(xgpastr.split("/")).astype(float)
ygpa = np.array(ygpastr.split("/")).astype(float)
if (xgpa.min() != 0) & (ygpa.min() != 0):
xgpa = np.hstack([xgpa, 0])
ygpa = np.hstack([ygpa, 0])
self.grades = self.grades.append(
pandas.DataFrame(
{
"Name": [newname],
"Country": [country],
"GPAScale": [gpascale],
"SchoolGPA": [xgpastr],
"4ptGPA": [ygpastr],
"Interp": [
scipy.interpolate.interp1d(xgpa, ygpa, kind="linear")
],
}
),
ignore_index=True,
)
self.gradeup = True
else:
return None
return self.grades.loc[self.grades.Name == newname, "Interp"].values[0](gpa)
def assignschools(self, data):
"""Determine undergrad and grad institutions for all students
data - main data table
"""
for row in data.itertuples():
fullname = row.Full_Name
if fullname in self.schoolmatches["Full_Name"].values:
redo = False
ugj = self.schoolmatches.loc[
self.schoolmatches["Full_Name"] == fullname, "UG_School"
].values[0]
if not (
self.isknownschool(
row.__getattribute__("School_Name_{}".format(int(ugj)))
)
):
redo = True
gj = self.schoolmatches.loc[
self.schoolmatches["Full_Name"] == fullname, "GR_School"
].values[0]
if not (np.isnan(gj)):
if not (
self.isknownschool(
row.__getattribute__("School_Name_{}".format(int(gj)))
)
):
redo = True
if redo:
self.schoolmatches = self.schoolmatches[
self.schoolmatches["Full_Name"] != fullname
].reset_index(drop=True)
else:
continue
print("\n")
print(fullname)
schools = []
degreetypes = []
countries = []
earneddegs = []
snums = []
for j in range(1, 4):
s = row.__getattribute__("School_Name_{}".format(j))
if s == s:
country = self.cc.convert(
names=row.__getattribute__("School_Country_{}".format(j)),
to="name_short",
)
res = self.matchschool(s, country)
if isinstance(res, tuple):
if res[0] == "skip":
continue
elif res[0] == "rename":
self.renames = self.renames.append(
pandas.DataFrame(
{
"Full_Name": [fullname],
"Field": ["School_Name_{}".format(j)],
"Value": [res[1]],
}
),
ignore_index=True,
)
self.utilup = True
n = res[1]
else:
n = res
schools.append(n)
countries.append(country)
tmp = row.__getattribute__("Degree_level_School_{}".format(j))
if tmp != tmp:
tmp = ""
degreetypes.append(tmp)
earneddegs.append(
row.__getattribute__("Earned_a_degree_School_{}".format(j))
)
snums.append(j)
hasgr = False
if len(schools) == 1:
ug = 0
gr = None
else:
inds = np.where(["under" in d.lower() for d in degreetypes])[0]
if len(inds) != 1:
for kk in range(len(schools)):
print(
"{}: {}, {}, Earned: {}".format(
kk, schools[kk], degreetypes[kk], earneddegs[kk]
)
)
ug = int(input("Pick UNDERgrad school index (from 0) "))
else:
ug = inds[0]
inds = np.where(
[
("under" not in d.lower()) | ("combined" in d.lower())
for d in degreetypes
]
)[0]
if len(inds) == 0:
pass
elif len(inds) > 1:
for kk in range(len(schools)):
print(
"{}: {}, {}, Earned: {}".format(
kk, schools[kk], degreetypes[kk], earneddegs[kk]
)
)
gr = input("Pick GRAD school index (from 0) or enter for none ")
if gr:
gr = int(gr)
hasgr = True
else:
gr = inds[0]
hasgr = True
if hasgr:
self.schoolmatches = self.schoolmatches.append(
pandas.DataFrame(
{
"Full_Name": [fullname],
"UG_School": [snums[ug]],
"GR_School": [snums[gr]],
}
),
ignore_index=True,
)
self.utilup = True
else:
self.schoolmatches = self.schoolmatches.append(
pandas.DataFrame(
{
"Full_Name": [fullname],
"UG_School": [snums[ug]],
"GR_School": [np.nan],
}
),
ignore_index=True,
)
self.utilup = True
def fillSchoolData(self, data):
for row in data.itertuples():
fullname = row.Full_Name
print(fullname)
# get ugrad gpa
j = int(
self.schoolmatches.loc[
self.schoolmatches["Full_Name"] == fullname, "UG_School"
].values[0]
)
s = row.__getattribute__("School_Name_{}".format(j))
country = self.cc.convert(
names=row.__getattribute__("School_Country_{}".format(j)),
to="name_short",
)
school = self.matchschool(s, country)
gpa = row.__getattribute__("GPA_School_{}".format(j))
gpascale = row.__getattribute__("GPA_Scale_School_{}".format(j))
country = self.cc.convert(
names=row.__getattribute__("School_Country_{}".format(j)),
to="name_short",
)
data.at[row.Index, "UGrad_School"] = school
data.at[row.Index, "UGrad_GPA"] = gpa
newgpa = self.calc4ptGPA(school, country, gpascale, gpa)
# check for rename request:
if newgpa is None:
newgpa = input("GPA: ")
newgpascale = input("GPA Scale: ")
self.renames = self.renames.append(
pandas.DataFrame(
{
"Full_Name": [fullname, fullname],
"Field": [
"GPA_School_{}".format(j),
"GPA_Scale_School_{}".format(j),
],
"Value": [float(newgpa), float(newgpascale)],
}
),
ignore_index=True,
)
self.utilup = True
continue
data.at[row.Index, "UGrad_GPA_4pt"] = newgpa
rank = self.lookup.loc[self.lookup["Name"] == school, "Rank"].values[0]
medgpa = self.rankfit(rank)
uggpa = norm.cdf(2 * (newgpa - medgpa))
data.at[row.Index, "UGrad_Rank"] = rank
data.at[row.Index, "UGrad_GPA_Norm"] = uggpa
# get grad school gpa if it exists
if (
self.schoolmatches.loc[
self.schoolmatches["Full_Name"] == fullname, "GR_School"
]
.notnull()
.values[0]
):
j = int(
self.schoolmatches.loc[
self.schoolmatches["Full_Name"] == fullname, "GR_School"
].values[0]
)
s = row.__getattribute__("School_Name_{}".format(j))
country = self.cc.convert(
names=row.__getattribute__("School_Country_{}".format(j)),
to="name_short",
)
school = self.matchschool(s, country)
data.at[row.Index, "Grad_School"] = school
gpa = row.__getattribute__("GPA_School_{}".format(j))
if np.isfinite(gpa):
gpascale = row.__getattribute__("GPA_Scale_School_{}".format(j))
country = self.cc.convert(
names=row.__getattribute__("School_Country_{}".format(j)),
to="name_short",
)
data.at[row.Index, "Grad_GPA"] = gpa
newgpa = self.calc4ptGPA(school, country, gpascale, gpa)
data.at[row.Index, "Grad_GPA_4pt"] = newgpa
rank = self.lookup.loc[
self.lookup["Name"] == school, "Rank"
].values[0]
medgpa = self.rankfit(rank)
grgpa = norm.cdf(2 * (newgpa - medgpa))
data.at[row.Index, "Grad_Rank"] = rank
data.at[row.Index, "Grad_GPA_Norm"] = grgpa
return data
def readData(self, fname):
data = pandas.read_csv(fname, header=[0, 1])
data.columns = data.columns.droplevel(-1)
data.drop(data[data["Field Admission Decision"] == "ADMT"].index, inplace=True)
data.reset_index(drop=True, inplace=True)
data = data.drop(
columns=[
"Assigned",
"In Progress",
"Completed",
"Tags",
"Field Admission Decision",
],
errors="ignore",
)
# retain only our concentrations
# concentrations = np.unique(np.hstack([data['Concentration 1'][data['Concentration 1'].notnull()].unique(),data['Concentration 2'][data['Concentration 2'].notnull()].unique(),data['Concentration 3'][data['Concentration 3'].notnull()].unique()]))
# ourconcs = ['Aerodynamics','Aerospace Systems','Dynamics and Control','Dynamics and Space Mechanics','Propulsion']
# inds = (data['Concentration 1'] == ourconcs[0]) | (data['Concentration 2'] == ourconcs[0]) | (data['Concentration 3'] == ourconcs[0])
# for j in range(1,len(ourconcs)):
# inds = inds | ((data['Concentration 1'] == ourconcs[j]) | (data['Concentration 2'] == ourconcs[j]) | (data['Concentration 3'] == ourconcs[j]))
#
# data = data.loc[inds]
# data = data.reset_index(drop=True)
# add some new columns
data["UGrad School"] = None
data["UGrad GPA"] = None
data["Grad School"] = None
data["Grad GPA"] = None
data["UGrad GPA 4pt"] = None
data["Grad GPA 4pt"] = None
data["UGrad GPA Norm"] = None
data["Grad GPA Norm"] = None
data["UGrad Rank"] = None
data["Grad Rank"] = None
data["URM"] = None
data["Total"] = None
# remove all column name spaces and special chars
data.columns = data.columns.str.strip()
data.columns = data.columns.str.replace(" ", "_")
data.columns = data.columns.str.replace("?", "")
data.columns = data.columns.str.replace("(", "")
data.columns = data.columns.str.replace(")", "")
data.columns = data.columns.str.replace('"', "")
# add full name col
fullname = [
"{}, {}".format(row.Last_Name, row.First_Name) for row in data.itertuples()
]
data["Full_Name"] = fullname
# overwrite all fields as needed
for row in self.renames.itertuples():
data.loc[data["Full_Name"] == row.Full_Name, row.Field] = row.Value
# make sure that numeric cols remain numeric
numcols = [
"Verbal_GRE_Unofficial",
"Quantitative_GRE_Unofficial",
"GRE_Analytical_Writing_GRE_Unofficial",
"UGrad_GPA_4pt",
"Grad_GPA_4pt",
]
for j in range(1, 4):
numcols.append("GPA_School_{}".format(j))
numcols.append("GPA_Scale_School_{}".format(j))
for col in numcols:
data[col] = data[col].astype(float)
return data
|
<reponame>imtapps/django-dynamic-validation
import mock
from django import test as unittest
from django.contrib.auth.models import User
from dynamic_rules import models as rule_models
from dynamic_validation import models
from dynamic_validation.dynamic_actions import BaseDynamicValidation, BadViolationType
from dynamic_validation.tests.utils import get_violation
__all__ = (
'BaseDynamicActionTests',
)
class BaseDynamicActionTests(unittest.TestCase):
def setUp(self):
self.rule_model = rule_models.Rule(pk=1)
self.trigger_model = User.objects.create(username="test_admin")
self.action = BaseDynamicValidation(self.rule_model, self.trigger_model)
def test_accepted_status_is_unreviewed_by_default(self):
self.assertEqual(models.ViolationStatus.unreviewed, self.action.accepted_status)
def test_saves_rule_model_on_instance(self):
self.assertEqual(self.rule_model, self.action.rule_model)
def test_saves_validation_object_on_instance(self):
self.assertEqual(self.trigger_model, self.action.trigger_model)
@mock.patch.object(BaseDynamicValidation, 'get_current_violations')
def test_run_calls_get_current_violations_with_args_and_kwargs(self, get_current_violations):
get_current_violations.return_value = []
args = [mock.Mock()]
kwargs = dict(mock=mock.Mock())
self.action.run(*args, **kwargs)
get_current_violations.assert_called_once_with(*args, **kwargs)
@mock.patch.object(BaseDynamicValidation, 'get_current_violations')
@mock.patch.object(BaseDynamicValidation, 'get_matching_violations')
def test_run_calls_gets_matching_violations_with_current_violations(self, get_matching, get_current):
get_current.return_value = []
self.action.run()
get_matching.assert_called_once_with(get_current.return_value)
@mock.patch.object(BaseDynamicValidation, 'get_current_violations')
@mock.patch.object(BaseDynamicValidation, 'get_matching_violations')
@mock.patch.object(BaseDynamicValidation, 'save_violations')
def test_run_calls_save_violations_with_matching_and_current_violations(self, *args):
save, get_matching, get_current = args
get_current.return_value = []
self.action.run()
save.assert_called_once_with(get_matching.return_value, get_current.return_value)
def test_get_current_violations_raises_not_implemented_error(self):
with self.assertRaises(NotImplementedError):
self.action.get_current_violations()
@mock.patch.object(BaseDynamicValidation, 'get_current_violations')
def test_raises_type_error_when_a_current_violations_not_violation_instance(self, get_violations):
get_violations.return_value = [models.Violation(), mock.Mock()]
with self.assertRaises(BadViolationType):
self.action.run()
@mock.patch.object(BaseDynamicValidation, 'get_current_violations', mock.Mock(return_value=None))
def test_clean_violations_returns_empty_list_when_current_violations_is_none(self):
self.assertEqual([], self.action.get_cleaned_violations())
def test_wraps_single_violation_in_list_in_get_cleaned_violations(self):
violation = models.Violation(pk=1)
with mock.patch.object(BaseDynamicValidation, 'get_current_violations', mock.Mock(return_value=violation)):
violations = self.action.get_cleaned_violations()
self.assertEqual([violation], violations)
def test_create_violation_returns_unsaved_rule_violation(self):
key = "key"
message = "message"
violated_fields = {'my_field': 'value'}
self.action.accepted_status = models.ViolationStatus.rejected
violation = self.action.create_violation(
key=key,
message=message,
violated_fields=violated_fields,
)
self.assertIsInstance(violation, models.Violation)
self.assertEqual(None, violation.pk)
self.assertEqual(key, violation.key)
self.assertEqual(message, violation.message)
self.assertEqual(violated_fields, violation.violated_fields)
self.assertEqual(self.rule_model, violation.rule)
self.assertEqual(self.trigger_model, violation.trigger_model)
self.assertEqual(models.ViolationStatus.rejected, violation.acceptable)
def test_create_violation_returns_unsaved_rule_violation_with_silent_indicator_sets_to_value_of_indicator(self):
key = "key"
message = "message"
violated_fields = {'my_field': 'value'}
self.action.accepted_status = models.ViolationStatus.rejected
violation = self.action.create_violation(
key=key,
message=message,
violated_fields=violated_fields
)
self.assertIsInstance(violation, models.Violation)
self.assertEqual(None, violation.pk)
self.assertEqual(key, violation.key)
self.assertEqual(message, violation.message)
self.assertEqual(violated_fields, violation.violated_fields)
self.assertEqual(self.rule_model, violation.rule)
self.assertEqual(self.trigger_model, violation.trigger_model)
self.assertEqual(models.ViolationStatus.rejected, violation.acceptable)
@mock.patch.object(models.Violation.objects, 'get_by_rule')
def test_get_matching_violations_gets_existing_violations(self, get_violations):
get_violations.return_value = []
self.action.get_matching_violations([])
get_violations.assert_called_once_with(self.rule_model, self.trigger_model)
@mock.patch.object(models.Violation.objects, 'get_by_rule')
def test_get_matching_violations_returns_list_violations_that_are_existing_and_current(self, get_violations):
violation = mock.Mock(spec_set=models.Violation)
violation2 = mock.Mock(spec_set=models.Violation)
get_violations.return_value = [violation, violation2]
matched_violations = self.action.get_matching_violations([violation])
self.assertEqual([violation], matched_violations)
@mock.patch.object(models.Violation.objects, 'get_by_rule')
def test_get_matching_violations_deletes_existing_violations_that_are_not_current(self, get_violations):
violation = mock.Mock(spec_set=models.Violation)
violation2 = mock.Mock(spec_set=models.Violation)
get_violations.return_value = [violation, violation2]
self.action.get_matching_violations([violation])
self.assertFalse(violation.delete.called)
violation2.delete.assert_called_once_with()
def test_save_violations_saves_current_violations_not_matched(self):
violation = mock.Mock(spec_set=models.Violation)
violation2 = mock.Mock(spec_set=models.Violation)
violation3 = mock.Mock(spec_set=models.Violation)
self.action.save_violations([violation3], [violation, violation2])
violation.save.assert_called_once_with()
violation2.save.assert_called_once_with()
self.assertFalse(violation3.save.called)
@mock.patch('dynamic_validation.models.Violation.save', mock.Mock())
def test_save_violation_updates_message_when_violation_already_exists(self):
violation = get_violation(message="A new message")
existing_violation = get_violation(message="An old message")
violation2 = mock.Mock(spec_set=models.Violation())
self.action.save_violations([existing_violation], [violation, violation2])
self.assertEqual(violation.message, existing_violation.message)
existing_violation.save.assert_called_once_with()
|
<reponame>mnguyen0226/image-augmentation-dnn-performance<filename>image_preprocessor/preprocess_image.py<gh_stars>0
from scipy import ndarray
import skimage.io
import skimage as sk
from skimage import transform
from skimage import util
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from Minh.DIP.augmentation.affine.flip import flip_horizontal, flip_vertical
from Minh.DIP.augmentation.affine.rotate import rotate
from Minh.DIP.augmentation.affine.shear import vertical_shear, horizontal_shear
from Minh.DIP.augmentation.affine.translate import translate
from Minh.DIP.augmentation.frequency.frequency_filter import freq_filter
from Minh.DIP.augmentation.intensity.amf import amf
from Minh.DIP.augmentation.intensity.hist_equalization import hist_equalization
from Minh.DIP.augmentation.intensity.invert import invert
from Minh.DIP.augmentation.edge_detection.canny_edge import canny
#################################
#image_path = "/home/cdsw/Minh/DIP/data/original/train/n03417042"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-amf/train/n03417042"
image_path = "/home/cdsw/Minh/DIP/data/original/val/n03425413"
save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-amf/val/n03425413"
def preprocess_amf():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_amf.JPEG"
print(new_image_name)
transformed_image = amf(image_to_transform)
if(transformed_image is None):
print("None")
continue # did will not do anything and pass to the next image
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/original/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-butterworth-hpf/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/original/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-butterworth-hpf/val/n03888257"
def preprocess_butterworth_hpf():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_butterworth_hpf.JPEG"
print(new_image_name)
transformed_image = freq_filter(image_to_transform, 8, 2, "butterworth_hpf")
if(transformed_image is None):
print("None")
continue # did will not do anything
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/original/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-butterworth-lpf/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/original/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-butterworth-lpf/val/n03888257"
def preprocess_butterworth_lpf():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_butterworth_lpf.JPEG"
print(new_image_name)
transformed_image = freq_filter(image_to_transform, 8, 2, "butterworth_lpf")
if(transformed_image is None):
print("None")
continue # did will not do anything
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/original/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-gaussian-lpf/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/original/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-gaussian-lpf/val/n03888257"
def preprocess_gaussian_lpf():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_gaussian_lpf.JPEG"
print(new_image_name)
transformed_image = freq_filter(image_to_transform, 8, 2, "gaussian_lpf")
if(transformed_image is None):
print("None")
continue # did will not do anything
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/original/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-gaussian-hpf/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/original/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-gaussian-hpf/val/n03888257"
def preprocess_gaussian_hpf():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_gaussian_hpf.JPEG"
print(new_image_name)
transformed_image = freq_filter(image_to_transform, 8, 2, "gaussian_hpf")
if(transformed_image is None):
print("None")
continue # did will not do anything
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-canny-ed/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-canny-ed/val/n03888257"
def preprocess_canny_ed():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_canny_ed.JPEG"
print(new_image_name)
transformed_image = canny(image_to_transform)
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/original/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-invert/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/original/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-invert/val/n03888257"
def preprocess_invert():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_invert.JPEG"
print(new_image_name)
transformed_image = invert(image_to_transform)
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-hist-equal/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-hist-equal/val/n03888257"
def preprocess_hist_equal():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_hist_equal.JPEG"
print(new_image_name)
transformed_image = hist_equalization(image_to_transform)
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-hori-shear/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-hori-shear/val/n03888257"
def preprocess_shear_hori():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_hori_shear.JPEG"
print(new_image_name)
transformed_image = horizontal_shear(image_to_transform)
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-vert-shear/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-vert-shear/val/n03888257"
def preprocess_shear_verti():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_vert_shear.JPEG"
print(new_image_name)
transformed_image = vertical_shear(image_to_transform)
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-translater/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-translate/val/n03888257"
def preprocess_translate():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_translate.JPEG"
print(new_image_name)
transformed_image = translate(image_to_transform)
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/original/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-ideal-hpf/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/original/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-ideal-hpf/val/n03888257"
def preprocess_ideal_hpf():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_ideal_hpf.JPEG"
print(new_image_name)
transformed_image = freq_filter(image_to_transform, 8, 2, "ideal_hpf")
if(transformed_image is None):
print("None")
continue # did will not do anything and pass to the next image
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/original/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-ideal-lpf/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/original/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-ideal-lpf/val/n03888257"
def preprocess_ideal_lpf():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_ideal_lpf.JPEG"
print(new_image_name)
transformed_image = freq_filter(image_to_transform, 8, 2, "ideal_lpf")
if(transformed_image is None):
print("None")
continue # did will not do anything
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-rand-rotate/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-rand-rotate/val/n03888257"
def preprocess_rotate():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_rand_rotate.JPEG"
print(new_image_name)
transformed_image = rotate(image_to_transform, max_theta = 360, mode = "random")
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-vert-flip/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-vert-flip/val/n03888257"
def preprocess_flip_vert():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_vert_flip.JPEG"
print(new_image_name)
transformed_image = flip_vertical(image_to_transform)
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/train/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-hori-flip/train/n03888257"
#image_path = "/home/cdsw/Minh/DIP/data/imagenette2-160/val/n03888257"
#save_path = "/home/cdsw/Minh/DIP/data/imagenette2-160-hori-flip/val/n03888257"
def preprocess_flip_hori():
print("Running")
# loop on all files of the folder and build a list of files paths
image_name_list = [f for f in os.listdir(image_path) if os.path.splitext(f)[-1] == '.JPEG']
image_list = [os.path.join(image_path, f) for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f))]
for i in range(len(image_list)):
image_to_transform = sk.io.imread(image_list[i])
# plt.imshow(image_to_transform)
# plt.show()
# concatenate name
image_name = image_name_list[i].split('.',1)
new_image_name = image_name[0]
new_image_name = new_image_name + "_hori_flip.JPEG"
print(new_image_name)
transformed_image = flip_horizontal(image_to_transform)
# plt.imshow(transformed_image)
# plt.show()
sk.io.imsave(save_path+'/'+new_image_name , transformed_image)
#################################
if __name__ == "__main__":
# preprocess_flip_hori()
# preprocess_flip_vert()
# preprocess_rotate()
# preprocess_shear_hori()
# preprocess_shear_verti()
# preprocess_translate()
# preprocess_ideal_hpf()
# preprocess_ideal_lpf()
preprocess_amf()
# preprocess_hist_equal()
# preprocess_invert()
# preprocess_canny_ed()
# preprocess_gaussian_lpf()
# preprocess_gaussian_hpf() |
<filename>leisure/transports.py
import fcntl
import os
import socket
import errno
from collections import deque
from .event_emmiter import EventEmmiter
class Socket(EventEmmiter):
def __init__(self,address, delegate=None):
self.address = address
self.delegate = delegate
self.event_loop = None
self.read_buffer_size = 4096
self.write_buffer = deque()
self.closing = False
def listen(self, backlog, event_loop = None):
"""Listen for incoming connections on this port.
backlog - the maximum number of queued connectinos
event_loop - the event_loop that will monitor this port for
incomming connections. Defaults to the
current_event_loop() if none is specified.
"""
if type(self.address) == tuple:
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
socket_path = None
else:
socket_path = self.address
serversocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM )
if os.path.exists(socket_path):
# possible stale socket let's see if any one is listning
err = serversocket.connect_ex(socket_path)
if err == errno.ECONNREFUSED:
os.unlink(socket_path)
else:
serversocket._reset()
raise RuntimeError("Socket path %s is in use" % socket_path )
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(self.address)
if socket_path: # ensure the world can read/write this socket
os.chmod(socket_path, 666)
serversocket.listen(backlog)
serversocket.setblocking(0)
self._socket = serversocket
self.listening = True
self.connected = True
if event_loop is None:
event_loop = current_event_loop()
event_loop.add_reader(self._socket, self.new_connection, self._socket)
self.event_loop = event_loop
return self._socket.getsockname()
def new_connection(self, srv_socket):
client, addr = srv_socket.accept()
new_socket = Socket(addr, self.delegate)
new_socket.connection_accepted(client, self.event_loop)
self.fire("accept", new_socket)
def connection_accepted(self, socket, event_loop):
self._socket = socket
self.event_loop = event_loop
self.connected = True
self.event_loop.add_reader(socket, self.can_read, socket)
def close(self):
self.closing = True
if self._socket:
self.event_loop.remove_reader(self._socket)
#self._socket = None
#self.fire('closed', self)
def can_read(self, client):
while True:
try:
buf = bytearray(self.read_buffer_size)
mem = memoryview(buf)
bytes = client.recv_into(buf)
if bytes > 0:
self.fire('data', mem[:bytes])
else:
self.close()
except socket.error,e:
if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
# other end of the socket is full, so
# ask the runLoop when we can send more
# data
break
else:
# if we receive any other socket
# error we close the connection
# and raise and notify our delegate
#self._reset()
#self.delegate.onError(self, e)
self.fire('error', e)
self.event_loop.remove_reader(client)
def write(self, data):
self.write_buffer.append(data)
self.event_loop.add_writer(self._socket, self.can_write)
def can_write(self):
while self.write_buffer:
sent = self._socket.send(self.write_buffer[0])
if sent == len(self.write_buffer[0]):
self.write_buffer.popleft()
else:
self.write_buffer[0] = buffer(self.write_buffer[0], sent)
break
if not self.write_buffer:
self.event_loop.remove_writer(self._socket)
if self.closing:
self._socket.close()
class Stream(object):
def __init__(self, fd, delegate=None):
# list of ints, each items represents one outstanding call to Stream.read
self.read_requests = []
# list of raw data to send to the file
self.write_buffer = []
if type(fd) == int:
self.fd = fd
else:
self.fd = os.dup(fd.fileno())
flags = fcntl.fcntl(self.fd,fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags | os.O_NDELAY)
self.delegate = delegate
# TODO: For some reason __del__ is firing before the object is being deleted
# which in turn is closing the file to soon. It may be do to a deepcopy issue.
# idea. Log out the ID of this object
# def __del__(self):
# self.close
def __repr__(self):
return "<stream %s>" % self.fd
def fileno(self):
return self.fd
def read(self, bytes):
self.read_requests.append(bytes)
def read_to_end(self):
"""Keeps reading and notifying delegate until the end of stream has
been reached.
Discussion:
This function is synchronous and will block the current thread until
the end of file is reached.
"""
while 1:
data = os.read(self.fd, 1024)
if data:
self.delegate.onRead(self, data)
else:
break
def close(self):
"""Removes the stream from the currentRunLoop and closes the file descriptor"""
#sys.stderr.write('\033[0;32m')
#sys.stderr.write('%s %s %s closing %s\n' % (os.getpid(), threading.currentThread(),sys._getframe(1).f_code.co_name, self.fd))
#sys.stderr.write('\033[m')
if self.fd is not None:
if hasattr(self.delegate, 'on_close'):
self.delegate.on_close(self)
# There will be no more data
del self.read_requests[:]
self.remove_from_event_loop(current_event_loop())
os.close(self.fd)
self.fd=None
def can_read(self):
requestcount = len(self.read_requests)
while requestcount:
requestcount -= 1
# read until we get as much data as we've been
# waiting for, or the socket would block.
bytes_2_read = self.read_requests[0]
try:
data = os.read(self.fd,bytes_2_read)
if data == '':
if hasattr(self.delegate,'end_of_data_for'):
self.delegate.end_of_data_for(self)
return
except OSError, e:
if e.errno != errno.EAGAIN:
raise
# notify our delegate that data's been returned
wait_if_short = self.delegate.on_read(self, data)
bytes_read = len(data)
if bytesRead < bytes_2_read and wait_if_short:
self.read_requests[0] -= bytesRead
else:
# we're done with this request
del self.read_requests[0]
def write(self, data):
self.write_buffer.append(data)
current_event_loop().add_writer(self.fd, self.can_write)
def can_write(self):
bytessent = 0
while self.write_buffer:
try:
data = self.write_buffer[0]
# While we have data in our out going buffer try to send it
sent = os.write(self.fd, data)
if len(data) == sent:
# we sent all the data in one shot
del self.write_buffer[0]
else:
self.write_buffer[0] = data[sent:]
bytessent += sent
except OSError, e:
if e.errno == errno.EAGAIN:
# other end of the socket is full, so
# wait until we can send more
# data
pass
else:
raise
# notify our delegate of how much we wrote in this pass
if bytessent > 0:
self.delegate.on_write(self, bytessent)
def on_errror(self, error):
self.delegate.on_error(self, error)
def remove_from_event_loop(self, event_loop):
for remove in (event_loop.remove_reader, event_loop.remove_writer):
try:
remove(self)
except KeyError:
pass
from .event_loop import current_event_loop
|
<gh_stars>0
# differential expression
import pyspark.sql.functions as F
from pyspark.sql.types import FloatType
from pyspark.sql import Window
from scipy.stats import t
def diff_expr_top_n(df_melt_flt, cluster_id=0, n=10):
'''
Take filtered tall-format dataframe, cluster_id of interest and n as inputs
Find top n differentially expressed genes in vs out cluster_id
Uses p-value from two-tailed Independent two-sample t-test to determine top 10 significant genes.
Actual genes output as significantly different may be less than n if the 5% significance is not met
scipy is used to get the final p-value from the estimated t test.
returns dictionary of top N gene:p-value- {gene1:p-value1, gene2:p-value2, ...}
'''
# cluster id marking
df = df_melt_flt.withColumn('cluster_group',F.when(F.col('prediction')==cluster_id,'in').otherwise('out'))
# t-test related computations on all genes
# 1. sample size, mean, std. dev.
df_ttest = df.groupBy('variable','cluster_group').agg(
F.count('cell').alias('size'),
F.mean('value').alias('mean'),
F.stddev('value').alias('sd'))
# 2. Pivot in vs out into two columns
df_ttest_p = df_ttest.groupBy('variable').pivot('cluster_group').agg(
F.max('size').alias('size'),
F.max('mean').alias('mean'),
F.max('sd').alias('sd'))
# compute interim values and t-test value
df_ttest_p = df_ttest_p.withColumn('df', F.col('in_size') + F.col('out_size') -2)
df_ttest_p = df_ttest_p.withColumn(
'sdp',
F.sqrt(
(
(F.col('in_size')-1)*F.col('in_sd')*F.col('in_sd') +
(F.col('out_size')-1)*F.col('out_sd')*F.col('out_sd')
)/F.col('df')
)
)
df_ttest_p = df_ttest_p.withColumn(
't_test',
(
F.col('in_mean')-F.col('out_mean')
)/
(
F.col('sdp') * F.sqrt(1/F.col('in_size') + 1/F.col('out_size'))
)
)
# use scipy to get p-value: two-tailed
p_val = F.udf(lambda t_val, df: float(t.sf(abs(t_val), df=df)*2), FloatType())
df_ttest_p = df_ttest_p.withColumn('p_val', p_val(F.col('t_test'), F.col('df')))
df_ttest_p = df_ttest_p.persist()
df_ttest_p.count()
# filter for 5% significance & sort using p-value to get top N genes
# p-value can be used directly because we are using p-values from tests on same two sets which means sample sizes and df are same
df_ttest_p = df_ttest_p.where(F.col('p_val')<=0.05)
df_ttest_p = df_ttest_p.withColumn('rank', F.row_number().over(Window.orderBy(F.col('p_val'))))
df_ttest_p = df_ttest_p.where(F.col('rank')<=n)
# return dictionary of top N gene:p-value
gene_dict = df_ttest_p.select('variable','p_val').toPandas()
gene_dict = dict(zip(gene_dict['variable'],gene_dict['p_val']))
return gene_dict |
import torch
import torch._prims.utils as utils
from torch._prims.utils import (
TensorLikeType,
NumberType,
ELEMENTWISE_TYPE_PROMOTION_KIND,
)
import torch._refs as refs
from torch._prims.wrappers import (
elementwise_type_promotion_wrapper,
out_wrapper,
)
from typing import Optional
__all__ = [
"celu",
"elu",
"mish",
"selu",
"softplus",
]
# celu is implemented specially because it has an alpha argument
# celu is very similar to elu
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def celu(
a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False
) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.celu
"""
if inplace:
raise NotImplementedError
rhs: TensorLikeType
if alpha is not None:
python_type = utils.dtype_to_type(a.dtype)
if not utils.is_weakly_lesser_type(type(alpha), python_type):
msg = (
"alpha argument of type {0} cannot be safely cast to type {1}!".format(
type(alpha), python_type
)
)
raise ValueError(msg)
rhs = refs.mul(alpha, refs.expm1(refs.true_divide(a, alpha)))
else:
rhs = refs.expm1(a)
return refs.where(refs.gt(a, 0), a, rhs)
# elu is implemented specially because it has an alpha argument
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def elu(
a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False
) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.elu
"""
if inplace:
raise NotImplementedError
rhs: TensorLikeType
if alpha is not None:
python_type = utils.dtype_to_type(a.dtype)
if not utils.is_weakly_lesser_type(type(alpha), python_type):
msg = (
"alpha argument of type {0} cannot be safely cast to type {1}!".format(
type(alpha), python_type
)
)
raise ValueError(msg)
rhs = refs.mul(alpha, refs.expm1(a))
else:
rhs = refs.expm1(a)
return refs.where(refs.gt(a, 0), a, rhs)
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def mish(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.mish
"""
if inplace:
raise NotImplementedError
return refs.mul(a, refs.tanh(refs.nn.functional.softplus(a)))
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.selu
"""
if inplace:
raise NotImplementedError
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
rhs = refs.mul(alpha, refs.expm1(a))
return refs.mul(scale, refs.where(refs.gt(a, 0), a, rhs))
# softplus is implemented specially because it has beta and threshold arguments
@out_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def softplus(
a: TensorLikeType,
beta: Optional[NumberType] = None,
threshold: NumberType = 20,
inplace: bool = False,
) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.softplus
"""
if inplace:
raise NotImplementedError
rhs: TensorLikeType
if beta is not None:
python_type = utils.dtype_to_type(a.dtype)
if not utils.is_weakly_lesser_type(type(beta), python_type):
msg = "beta argument of type {0} cannot be safely cast to type {1}!".format(
type(beta), python_type
)
raise ValueError(msg)
scaled_input = refs.mul(a, beta)
rhs = refs.true_divide(refs.log1p(refs.exp(scaled_input)), beta)
else:
scaled_input = a
rhs = refs.log1p(refs.exp(scaled_input))
return refs.where(refs.gt(scaled_input, threshold), a, rhs)
|
<reponame>rodrigofaccioli/drugdesign
#! /usr/bin/env python
"""
Routines to extract compounds from ZINC Database: http://zinc.docking.org/
These routines were developed by:
<NAME> - <EMAIL> / <EMAIL>
<NAME> - <EMAIL> / <EMAIL>
"""
import ConfigParser as configparser
import os
import shutil
import gzip
def number_files_of_molecule(molecule_name, path_save_mol2):
"""
Return the number of files at path_save_mol2 that contain molecule_name in filename
Example:
>>> number = number_files_of_molecule(molecule_name, path_save_mol2)
@param molecule_name: main name of molecule
@type molecule_name: string
@param path_save_mol2: path of mol2 files will be saved
@type path_save_mol2: string
@return: the number of files that have molecule_name in their names
@rtype: int
"""
number = 0
for root, dirs, files in os.walk(path_save_mol2):
for f in files:
if f.endswith(".mol2"):
if str(f).find(molecule_name) >=0:
number = number + 1
return number
def finish_current_molecule(molecule_name, path_save_mol2, temp_file_name_full):
"""
Last procedures for current molecule
Example:
>>> finish_current_molecule(molecule_name, path_save_mol2, temp_file_name_full)
@param molecule_name: main name of molecule
@type molecule_name: string
@param path_save_mol2: path of mol2 files will be saved
@type path_save_mol2: string
@param temp_file_name_full: full path of temp file
@type temp_file_name_full: string
"""
#preparing name of mol2 file
# Checking filenames of molecules based on molecule_name
# Because of isomers, it is necessary to check how many files of
# molecule_name there is in path_save_mol2
mol_name_aux = ''
number_files = number_files_of_molecule(molecule_name, path_save_mol2)
if number_files > 0:
if number_files == 1:
#means that there is only one molecule.
#So it must be renamed with prefix _1
#number_files will be assigned to 2, because
# the current molecule will be second molecule
before_molecule = molecule_name+'.mol2'
before_molecule_mol2 = os.path.join(path_save_mol2, before_molecule)
new_molecule = molecule_name+'_1'+'.mol2'
new_molecule_mol2 = os.path.join(path_save_mol2, new_molecule)
shutil.move(before_molecule_mol2, new_molecule_mol2)
number_files = number_files + 1
mol_name_aux = molecule_name+'_'+str(number_files)
else:
mol_name_aux = molecule_name
mol2_file_name = mol_name_aux+'.mol2'
mol2_file_name_full = os.path.join(path_save_mol2, mol2_file_name)
#creating mol2 file - moving temp file to mol2_file_name_full
shutil.move(temp_file_name_full, mol2_file_name_full)
def split_molecules_from_mol2(pathfilename, path_save_mol2):
"""
Split molecules from mol2 file
Example:
>>> split_molecules_from_mol2(pathfilename, path_save_mol2)
@param pathfilename: full path of file that contains all molecules
@type pathfilename: string
@param path_save_mol2: path of mol2 files will be saved
@type path_save_mol2: string
"""
line_name = True
new_molecule = False
temp_file_name = 'temp.temp'
line_aux = ""
#open full mol2 file
fmol2_all = open(pathfilename, "r")
##open temp file for first molecule
temp_file_name_full = os.path.join(path_save_mol2, temp_file_name)
fmol2_temp = open(temp_file_name_full, "w")
#Obtain first line from full mol2 file
line = fmol2_all.readline()
fmol2_temp.write(line)
for line in fmol2_all:
if line.find("@<TRIPOS>MOLECULE") < 0:
#get the molecule name
if line_name == True:
molecule_name = str(line).strip()
line_name = False
fmol2_temp.write(line)
else: # found @<TRIPOS>MOLECULE
#close temp file
fmol2_temp.close()
#finishing the current molecule
finish_current_molecule(molecule_name, path_save_mol2, temp_file_name_full)
#open temp file for new molecue
temp_file_name_full = os.path.join(path_save_mol2, temp_file_name)
fmol2_temp = open(temp_file_name_full, "w")
#assign True to line_name
line_name = True
#assign line to temp file
fmol2_temp.write(line)
#close temp file
fmol2_temp.close()
#finishing the last molecule
finish_current_molecule(molecule_name, path_save_mol2, temp_file_name_full)
def get_files_gz(mypath):
only_gz_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".gz"):
f_path = os.path.join(root,file)
only_gz_file.append(f_path)
return only_gz_file
def decompress_gz_files(gz_path):
f_name = ""
path_filename = ""
gz_files = get_files_gz(gz_path)
for f in gz_files:
f_name = os.path.basename(f)
f_name = str(f_name).replace(".gz",'')
path_filename = os.path.join(gz_path, f_name)
inF = gzip.open(f, 'rb')
s = inF.read()
inF.close()
mol2_file = open(path_filename, 'w')
mol2_file.write(s)
mol2_file.close()
def get_files_mol2(mypath):
only_mol2_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".mol2"):
f_path = os.path.join(root,file)
only_mol2_file.append(f_path)
return only_mol2_file
def main():
config = configparser.ConfigParser()
config.read('config.ini')
decompress_gz_files(config.get('ZINCDB', 'path_downloaded'))
mol2_files = get_files_mol2(config.get('ZINCDB', 'path_downloaded'))
for f_mol2 in mol2_files:
split_molecules_from_mol2(f_mol2, config.get('DEFAULT', 'mol2_path') )
main() |
<reponame>a-amaral/qiskit-terra
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Histogram visualization
"""
from string import Template
from collections import Counter
import sys
import time
import re
import numpy as np
if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules):
try:
from IPython.core.display import display, HTML
except ImportError:
print("Error importing IPython.core.display")
def process_data(data, number_to_keep):
""" Prepare received data for representation.
Args:
data (dict): values to represent (ex. {'001' : 130})
number_to_keep (int): number of elements to show individually.
Returns:
dict: processed data to show.
"""
result = dict()
if number_to_keep != 0:
data_temp = dict(Counter(data).most_common(number_to_keep))
data_temp['rest'] = sum(data.values()) - sum(data_temp.values())
data = data_temp
labels = data
values = np.array([data[key] for key in labels], dtype=float)
pvalues = values / sum(values)
for position, label in enumerate(labels):
result[label] = round(pvalues[position], 5)
return result
def iplot_histogram(executions_results, options=None):
""" Create a histogram representation.
Graphical representation of the input array using a vertical bars
style graph.
Args:
executions_results (array): Array of dictionaries containing
- data (dict): values to represent (ex. {'001' : 130})
- name (string): name to show in the legend
- device (string): Could be 'real' or 'simulated'
options (dict): Representation settings containing
- width (integer): graph horizontal size
- height (integer): graph vertical size
- slider (bool): activate slider
- number_to_keep (integer): groups max values
- show_legend (bool): show legend of graph content
- sort (string): Could be 'asc' or 'desc'
"""
# HTML
html_template = Template("""
<p>
<div id="histogram_$divNumber"></div>
</p>
""")
# JavaScript
javascript_template = Template("""
<script>
requirejs.config({
paths: {
qVisualization: "https://qvisualization.mybluemix.net/q-visualizations"
}
});
require(["qVisualization"], function(qVisualizations) {
qVisualizations.plotState("histogram_$divNumber",
"histogram",
$executions,
$options);
});
</script>
""")
# Process data and execute
div_number = str(time.time())
div_number = re.sub('[.]', '', div_number)
if not options:
options = {}
if 'slider' in options and options['slider'] is True:
options['slider'] = 1
else:
options['slider'] = 0
if 'show_legend' in options and options['show_legend'] is False:
options['show_legend'] = 0
else:
options['show_legend'] = 1
if 'number_to_keep' not in options:
options['number_to_keep'] = 0
data_to_plot = []
for execution in executions_results:
data = process_data(execution['data'], options['number_to_keep'])
data_to_plot.append({'data': data})
html = html_template.substitute({
'divNumber': div_number
})
javascript = javascript_template.substitute({
'divNumber': div_number,
'executions': data_to_plot,
'options': options
})
display(HTML(html + javascript))
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 08:11:50 2021
@author: <NAME>
"""
import os
from pickle import load, dump
import subprocess
from time import time, sleep
from shutil import rmtree
import numpy as np
import pandas as pd
from reificationFusion import model_reification
import concurrent.futures
from multiprocessing import cpu_count
from copy import deepcopy
from util import cartesian, call_model, apply_constraints
from util import calculate_KG, calculate_EI, fused_calculate, calculate_TS, calculate_Greedy, calculate_PI, calculate_UCB
from util import calculate_GPHedge, evaluateFusedModel, batchAcquisitionFunc, kmedoids_max
from util import fused_EHVI, calculate_EHVI, Pareto_finder, storeObject
from gpModel import gp_model, bmarsModel
from sklearn_extra.cluster import KMedoids
import logging
from pyDOE import lhs
import matplotlib.pyplot as plt
import concurrent.futures
def Pool():
return concurrent.futures.ThreadPoolExecutor(8)
# from ray.util.multiprocessing import Pool
class barefoot():
def __init__(self, ROMModelList=[], TruthModel=[], calcInitData=True,
initDataPathorNum=[], multiNode=0, workingDir=".",
calculationName="Calculation", nDim=1, input_resolution=5, restore_calc=False,
updateROMafterTM=False, externalTM=False, acquisitionFunc="KG",
A=[], b=[], Aeq=[], beq=[], lb=[], ub=[], func=[], keepSubRunning=True,
verbose=False, sampleScheme="LHS", tmSampleOpt="Greedy", logname="BAREFOOT",
maximize=True, train_func=[], reification=True, batch=True,
multiObjective=False, multiObjectRef=[], surrogate="GP", externalROM=False,
temp_input=[]):
self.temp_input = temp_input
"""
Python Class for Batch Reification/Fusion Optimization (BAREFOOT) Framework Calculations
Parameters
----------
ROMModelList : This is the list of functions that are the cheap information sources.
These need to be in a form that ensures that by providing the unit hypercube
input, the function will provide the required output
TruthModel : This is the Truth model, or the function that needs to be optimized.
calcInitData : This variable controls whether the initial data is calculated for
each of the models or is retrieved from a file
initDataPathorNum : This variable holds the number of initial datapoints to evaluate for each
information source (including the Truth Model), or, when initial data is
loaded from a file, holds the path to the initial data file
multiNode : This variable reflects the number of subprocesses that will be used
for the calculations. A value of zero indicates all calculations will
be completed on the main compute node.
workingDir : This is the path to the working directory. In some cases it may be desirable
to store data separately from the code, this will allow the data to be stored
in alternate locations. Can also be used if the relative directory reference
is not working correctly.
calculationName : This is the name for the calculation and will change the results directory name
nDim : The number of dimensions for the input space that will be used
restore_calc : This parameter toggles whether the framework data is set up from the information
provided or retrieved from a save_state file. This can be used to restart a calculation
updateROMafterTM : This parameter allows the reduced order models to be retrained after getting more data
from the Truth Model. The model function calls do not change, so the training needs to
reflect in the same function. Requires a training function to be supplied in the
train_func input.
train_func : Training function used to retrain the reduced order models after the Truth Model
evaluations.
externalTM : In cases where it is necessary to evaluate the Truth Model separate to the
framework (for example, if the Truth Model is an actual experiment), this toggles
the output of the predicted points to a separate file for use externally. The
framework is shut down after the data is output, see test examples for how to restart
the framework after the external Truth Model has been evaluated
acquisitionFunc : The acquisition function to use to evaluate the next best points for the reduced
order models. Currently the options are "KG" for Knowledge Gradient and "EI" for expected
improvement, "PI" Probability of Improvment, "TS" Thompson sampling, "Greedy" Greedy,
"UCB" Upper confidence bound, "Hedge" GP-Hedge Portfolio optimization.
A, b, Aeq, beq : Equality and inequality constraints according to the following equations:
1) A*x <= b
2) Aeq*x == b
ub, lb : Upper bounds and lower bounds for inputs, all inputs must receive a value
(Specify 0 for lb and 1 for ub if there is no bound for that input)
func : function constraints, must take the input matrix (x) and output a vector of length
equal to the number of samples in the input matrix (x) with boolean values.
keepSubRunning : Determines whether the subprocesses are left running while calling the Truth Model
verbose : Determines the logging level for tracking the calculations.
input_resolution : How many decimal places to use in the inputs.
sampleScheme : Sampling scheme for the test points. Options are "Grid", "LHS", "Custom", "CompFunc".
Where the Custom uses preselected test points from a file, and the CompFunc is
specifically designed for sampling composition spaces.
tmSampleOpt : The acquisition function to use when evaluating next-best points for the Truth Model
logname : The name of the log file
maximize : Toggles if the problem is a maximization or minimization problem. Default is Maximization.
reification : Toggles the use of the multi-fidelity Reification approach
batch : Toggles the use of the Batch BO approach
multiObjective : Toggles multi-objective optimization
multiObjectRef : Holds the reference point required by the EHVI acquisition function
"""
if verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# create logger to output framework progress
self.logger = logging.getLogger(logname)
for h in self.logger.handlers:
self.logger.removeHandler(h)
self.logger.setLevel(log_level)
fh = logging.FileHandler('{}.log'.format(logname))
sh = logging.StreamHandler()
fh.setLevel(log_level)
sh.setLevel(log_level)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
sh.setFormatter(formatter)
# add the handler to the logger
self.logger.addHandler(fh)
self.logger.addHandler(sh)
if not restore_calc:
with open(f"{logname}.log", 'w') as f:
pass
self.logger.info("#########################################################")
self.logger.info("# #")
self.logger.info("# Start BAREFOOT Framework Initialization #")
self.logger.info("# #")
self.logger.info("#########################################################")
self.logger.info("*********************************************************")
self.logger.info("* Calculation Name: {} ".format(calculationName))
self.logger.info("*********************************************************")
# Restore a previous calculation and restart the timer or load new
# information and initialize
if restore_calc:
if externalTM:
self.__external_TM_data_load(workingDir, calculationName)
else:
self.__load_from_save(workingDir, calculationName)
self.restore_calc = restore_calc
self.pool = Pool()
self.timeCheck = time()
self.logger.info("Previous Save State Restored")
else:
self.restore_calc = restore_calc
self.pool = Pool()
self.timeCheck = time()
self.multiObjective = multiObjective
self.MORef = multiObjectRef
self.ROM = ROMModelList
self.TM = TruthModel
self.TMInitInput = []
self.TMInitOutput = []
self.ROMInitInput = []
self.ROMInitOutput = []
self.inputLabels = []
self.multinode = multiNode
self.workingDir = workingDir
self.calculationName = calculationName
self.calcInitData = calcInitData
self.initDataPathorNum = initDataPathorNum
self.currentIteration = -1
self.maximize = maximize
self.surrogate = surrogate
self.updateROMafterTM = updateROMafterTM
self.reification = reification
self.batch = batch
self.externalTM = externalTM
self.externalROM = externalROM
if self.multiObjective:
self.tmSampleOpt = "EHVI"
self.acquisitionFunc = "EHVI"
self.logger.warning("Default multiobjective acquisition function (EHVI) selected!")
elif self.surrogate == "BMARS":
self.acquisitionFunc = "EI-BMARS"
self.tmSampleOpt = "EI-BMARS"
self.logger.warning("BMARS Surrogate Model selected! Default EI for BMARS acquisition function selected!")
self.reification = False
self.logger.warning("BMARS Surrogate Model not compatible with Reification! Reification approach disabled!")
else:
if tmSampleOpt in ["Hedge", "Greedy", "EI", "KG", "TS", "PI", "UCB"]:
self.tmSampleOpt = tmSampleOpt
else:
self.tmSampleOpt = "Greedy"
self.logger.warning("Invalid Truth Model Acquisition Function! Using default (Greedy).")
if acquisitionFunc in ["Hedge", "Greedy", "EI", "KG", "TS", "PI", "UCB"]:
self.acquisitionFunc = acquisitionFunc
else:
self.acquisitionFunc = "KG"
self.logger.warning("Invalid ROM Acquisition Function! Using default (KG).")
self.nDim = nDim
self.res = input_resolution
self.A = A
self.b = b
self.Aeq = Aeq
self.beq = beq
self.ub = ub
self.lb = lb
self.constr_func = func
self.train_func = train_func
if sampleScheme in ["LHS", "Grid", "Custom", "CompFunc"]:
self.sampleScheme = sampleScheme
else:
self.sampleScheme = "LHS"
self.logger.warning("Invalid Sample Scheme! Using default (LHS).")
if self.multinode != 0:
self.keepSubRunning = keepSubRunning
else:
self.keepSubRunning = True
self.__create_dir_and_files()
self.__create_output_dataframes()
self.__get_initial_data__()
self.logger.info("Initialization Completed")
def __catch_error(func):
"""
If an error occurs during the initialization of the framework this decorator will catch
that error
"""
def close_subs(self, *args, **kwargs):
no_error = False
try:
func(self, *args, **kwargs)
no_error = True
except Exception as err:
self.logger.critical("Initialization Code Failed - See Error Below")
self.logger.exception(err)
return no_error
return close_subs
def __create_dir_and_files(self):
# Create the required directories for saving the results and the subprocess
# information if applicable
try:
os.mkdir('{}/results'.format(self.workingDir))
self.logger.debug("Results Directory Created Successfully")
except FileExistsError:
self.logger.debug("Results Directory Already Exists")
try:
os.mkdir('{}/data'.format(self.workingDir))
self.logger.debug("Data Directory Created Successfully")
except FileExistsError:
self.logger.debug("Data Directory Already Exists")
try:
os.mkdir('{}/data/parameterSets'.format(self.workingDir))
self.logger.debug("Parameter Set Directory Created Successfully")
except FileExistsError:
self.logger.debug("Parameter Set Directory Already Exists")
try:
os.mkdir('{}/results/{}'.format(self.workingDir,
self.calculationName))
self.logger.debug("Calculation Results Directory [{}] Created Successfully".format(self.calculationName))
except FileExistsError:
self.logger.debug("Calculation Results Directory [{}] Already Exists".format(self.calculationName))
# If using subprocesses, create the folder structure needed
if self.multinode != 0:
with open("BAREFOOT.log", 'w') as f:
pass
if os.path.exists('{}/subprocess'.format(self.workingDir)):
rmtree('{}/subprocess'.format(self.workingDir))
self.logger.debug("Existing Subprocess Directory Removed")
os.mkdir('{}/subprocess'.format(self.workingDir))
os.mkdir('{}/subprocess/LSFOut'.format(self.workingDir))
self.logger.debug("Subprocess Directory Created")
def __create_output_dataframes(self):
# The output of the framework is contained in two pandas dataframes
# the iterationData df shows the iterations, model calls and maximum
# value found
if self.multiObjective:
labels2 = ["Iteration", "Calculation Time", "Objective 1", "Objective 2", "Truth Model"]
else:
labels2 = ["Iteration", "Max Found", "Calculation Time", "Truth Model"]
for ii in range(len(self.ROM)):
labels2.append("ROM {}".format(ii))
# the evaluatedPoints df contains all the points that have been
# evaluated from all models
if self.multiObjective:
labels1 = ["Model Index", "Iteration", "y1", "y2"]
else:
labels1 = ["Model Index", "Iteration", "y"]
for ii in range(self.nDim):
labels1.append("x{}".format(ii))
if self.multiObjective:
labels2.append("x{}".format(ii))
self.inputLabels.append("x{}".format(ii))
self.evaluatedPoints = pd.DataFrame(columns=labels1)
self.iterationData = pd.DataFrame(columns=labels2)
self.logger.debug("Output Dataframes Created")
def __save_output_dataframes(self):
fig,ax = plt.subplots(1,2,figsize=(10,5))
ax[0].set_xlabel('RVE Evaluations')
ax[0].set_ylabel('$1/\sigma(d\sigma/d\epsilon_{pl})$')
ax[0].set_xlim(0,20)
ax[0].set_xticks([0,2,4,6,8,10,12,14,16,18,20])
ax[0].set_ylim(0,35)
ax[1].set_xlabel('Iteration')
ax[1].set_ylabel('Model Evaluations')
ax[1].set_xlim(0,20)
ax[1].set_ylim(0,100)
def pltsin(ax, fig, x, y, lbls):
if ax.lines:
ii = 0
for line in ax.lines:
line.set_xdata(x[ii])
line.set_ydata(y[ii])
ii += 1
else:
ax.plot(x[0], y[0], 'r-', label=lbls[0])
ax.plot(x[1], y[1], 'g:', label=lbls[1])
ax.plot(x[2], y[2], 'b-.', label=lbls[2])
ax.legend()
fig.canvas.draw()
with open("reificationOnlyResults.pickle", 'rb') as f:
reifi_out = load(f)
with open("BBOOnlyResults.pickle", 'rb') as f:
BBO_out = load(f)
iteration = np.array(self.iterationData.loc[:,"Iteration"])
max_val = np.array(self.iterationData.loc[:,"Max Found"])
rve_calls = np.array(self.iterationData.loc[:,"Truth Model"])
rve_calls[0] = 0
isostrain_calls = np.array(self.iterationData.loc[:,"ROM 0"])
isostress_calls = np.array(self.iterationData.loc[:,"ROM 1"])
isowork_calls = np.array(self.iterationData.loc[:,"ROM 2"])
pltsin(ax[0], fig, [rve_calls, reifi_out[0], BBO_out[0]],
[max_val, reifi_out[1], BBO_out[1]],
["BAREFOOT", "Reification/Fusion", "Batch Bayesian Optimization"])
pltsin(ax[1], fig, [iteration, iteration, iteration],
[isostrain_calls, isostress_calls, isowork_calls],
["Isostrain", "Isostress", "Isowork"])
plt.show()
# The dataframes are saved in two forms, first a pickled version of the
# dataframe, and also a csv version for readability
with open('{}/results/{}/evaluatedPoints'.format(self.workingDir, self.calculationName), 'wb') as f:
dump(self.evaluatedPoints, f)
self.evaluatedPoints.to_csv('{}/results/{}/evaluatedPoints.csv'.format(self.workingDir, self.calculationName))
with open('{}/results/{}/iterationData'.format(self.workingDir, self.calculationName), 'wb') as f:
dump(self.iterationData, f)
self.iterationData.to_csv('{}/results/{}/iterationData.csv'.format(self.workingDir, self.calculationName))
# for the GP Hedge approach, the choice of model for each iteration is
# also saved to a separate file
hedge_out = {"ROM":[], "TM":[]}
if self.acquisitionFunc == "Hedge":
hedge_out["ROM"] = self.gpHedgeTrack
if self.tmSampleOpt == "Hedge":
hedge_out["TM"] = self.gpHedgeTrackTM
if self.acquisitionFunc == "Hedge" or self.tmSampleOpt == "Hedge":
with open('{}/results/{}/hedgeRecord'.format(self.workingDir, self.calculationName), 'wb') as f:
dump(hedge_out, f)
if self.multiObjective:
with open('{}/results/{}/paretoRecord{}'.format(self.workingDir, self.calculationName, self.currentIteration), 'wb') as f:
dump(self.pareto, f)
self.logger.info("Dataframes Pickled and Dumped to Results Directory")
def __save_calculation_state(self):
skipValues = ['logger', 'pool']
saveObj = {}
for item in self.__dict__:
if item not in skipValues:
saveObj[item] = self.__dict__[item]
# This function saves the entire barefoot object into a pickle file
with open('{}/data/{}_save_state'.format(self.workingDir, self.calculationName), 'wb') as f:
dump(saveObj, f)
self.logger.info("Calculation State Saved")
# self.logger.info("Calculation State Save Skipped")
def __load_from_save(self, workingDir, calculationName):
# This function restores the barefoot object parameters from a saved
# pickle file. In order for this to work, each variable of the object
# is restored separately.
try:
print('{}/data/{}_save_state'.format(workingDir, calculationName))
with open('{}/data/{}_save_state'.format(workingDir, calculationName), 'rb') as f:
saveState = load(f)
self.logger.debug("Save State File Found")
for item in saveState:
setattr(self, item, saveState[item])
except FileNotFoundError:
self.loadFailed = True
self.logger.warning("Could not find Save State File")
def __add_to_evaluatedPoints(self, modelIndex, eval_x, eval_y):
# Adds new data points to the evaluated datapoints dataframe
if self.multiObjective:
temp = np.zeros((eval_x.shape[0], self.nDim+4))
temp[:,0] = modelIndex
temp[:,1] = self.currentIteration
temp[:,2] = eval_y[:,0]
temp[:,3] = eval_y[:,1]
temp[:,4:] = eval_x[:,0:]
temp = pd.DataFrame(temp, columns=self.evaluatedPoints.columns)
else:
temp = np.zeros((eval_x.shape[0], self.nDim+3))
temp[:,0] = modelIndex
temp[:,1] = self.currentIteration
temp[:,2] = eval_y
temp[:,3:] = eval_x
temp = pd.DataFrame(temp, columns=self.evaluatedPoints.columns)
self.evaluatedPoints = pd.concat([self.evaluatedPoints,temp])
if self.multiObjective:
self.pareto = Pareto_finder(np.array(self.evaluatedPoints.iloc[:,2:4]),self.goal)
self.logger.debug("{} New Points Added to Evaluated Points Dataframe".format(eval_x.shape[0]))
def __add_to_iterationData(self, calcTime, iterData):
# Adds new data points to the Iteration Data Dataframe
if self.multiObjective:
temp = np.zeros((1,5+len(self.ROM)+self.nDim))
temp[0,0] = self.currentIteration
temp[0,1] = calcTime
temp[0,2] = self.maxTM[0]
temp[0,3] = self.maxTM[1]
temp[0,4] = iterData[-1]
temp[0,5:5+len(self.ROM)] = iterData[0:len(self.ROM)]
else:
temp = np.zeros((1,4+len(self.ROM)))
temp[0,0] = self.currentIteration
temp[0,1] = self.maxTM
temp[0,2] = calcTime
temp[0,3] = iterData[-1]
temp[0,4:] = iterData[0:len(self.ROM)]
temp = pd.DataFrame(temp, columns=self.iterationData.columns)
self.iterationData = pd.concat([self.iterationData,temp])
self.logger.debug("Iteration {} Data saved to Dataframe".format(self.currentIteration))
@__catch_error
def __get_initial_data__(self):
# Function for obtaining the initial data either by calculation or by
# extracting the data from a file.
params = []
count = []
param_index = 0
if self.multiObjective:
self.maxTM = [-np.inf,-np.inf]
else:
self.maxTM = -np.inf
if self.acquisitionFunc == "Hedge":
self.gpHedgeHist = [[np.random.random()],[np.random.random()],
[np.random.random()],[np.random.random()],
[np.random.random()],[np.random.random()]]
self.gpHedgeProb = np.sum(self.gpHedgeHist, axis=1)
self.gpHedgeTrack = []
if self.tmSampleOpt == "Hedge":
self.gpHedgeHistTM = [[np.random.random()],[np.random.random()],
[np.random.random()],[np.random.random()],
[np.random.random()],[np.random.random()]]
self.gpHedgeProbTM = np.sum(self.gpHedgeHistTM, axis=1)
self.gpHedgeTrackTM = []
if self.multiObjective:
if type(self.maximize) == list:
self.goal = np.array([-1,-1])
if self.maximize[0]:
self.goal[0] = 1
if self.maximize[1]:
self.goal[1] = 1
else:
if self.maximize:
self.goal = np.array([1,1])
else:
self.goal = np.array([-1,-1])
else:
if self.maximize:
self.goal = 1
else:
self.goal = -1
# Check if data needs to be calculated or extracted
if self.calcInitData:
self.logger.debug("Start Calculation of Initial Data")
# obtain LHS initial data for each reduced order model
if self.reification:
for ii in range(len(self.ROM)):
count.append(0)
initInput, check = apply_constraints(self.initDataPathorNum[ii],
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme,opt_sample_size=True)
if check:
self.logger.debug("ROM {} - Initial Data - All constraints applied successfully".format(ii))
else:
self.logger.critical("ROM {} - Initial Data - Some or All Constraints Could not Be applied! Continuing With {}/{}".format(ii, initInput.shape[0], self.initDataPathorNum[ii]))
for jj in range(initInput.shape[0]):
params.append({"Model Index":ii,
"Model":self.ROM[ii],
"Input Values":initInput[jj,:],
"ParamIndex":param_index})
param_index += 1
self.ROMInitInput.append(np.zeros_like(initInput))
if self.multiObjective:
self.ROMInitOutput.append(np.zeros((initInput.shape[0],2)))
else:
self.ROMInitOutput.append(np.zeros(initInput.shape[0]))
# Obtain LHS initial data for Truth Model
initInput, check = apply_constraints(self.initDataPathorNum[-1],
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme,opt_sample_size=True)
count.append(0)
if check:
self.logger.debug("TM - Initial Data - All constraints applied successfully")
else:
self.logger.critical("TM - Initial Data - Some or All Constraints Could not Be applied! Continuing With {}/{}".format(initInput.shape[0], self.initDataPathorNum[-1]))
for jj in range(initInput.shape[0]):
params.append({"Model Index":-1,
"Model":self.TM,
"Input Values":initInput[jj,:],
"ParamIndex":param_index})
param_index += 1
self.TMInitInput = np.zeros_like(initInput)
if self.multiObjective:
self.TMInitOutput = np.zeros((initInput.shape[0],2))
else:
self.TMInitOutput = np.zeros(initInput.shape[0])
# Calculate all the initial data in parallel
temp_x = np.zeros((len(params), self.nDim))
if self.multiObjective:
temp_y = np.zeros((len(params),2))
else:
temp_y = np.zeros(len(params))
temp_index = np.zeros(len(params))
pass_calculations = []
self.logger.debug("Parameters Defined. Starting Concurrent.Futures Calculation")
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(params, executor.map(call_model, params)):
par, results = result_from_process
try:
test = results.shape
if par["Model Index"] != -1:
self.ROMInitInput[par["Model Index"]][count[par["Model Index"]],:] = par["Input Values"]
if self.multiObjective:
self.ROMInitOutput[par["Model Index"]][count[par["Model Index"]]] = np.tile(self.goal, (results.shape[0]))*results
else:
self.ROMInitOutput[par["Model Index"]][count[par["Model Index"]]] = self.goal*results
temp_x[par["ParamIndex"],:] = par["Input Values"]
if self.multiObjective:
temp_y[par["ParamIndex"],:] = self.goal*results
else:
temp_y[par["ParamIndex"]] = self.goal*results
temp_index[par["ParamIndex"]] = par["Model Index"]
else:
self.TMInitInput[count[par["Model Index"]],:] = par["Input Values"]
self.TMInitOutput[count[par["Model Index"]]] = self.goal*results
if self.multiObjective:
if results[0,0] > self.maxTM[0]:
self.maxTM[0] = results[0,0]
if results[0,1] > self.maxTM[1]:
self.maxTM[1] = results[0,1]
else:
if np.max(results) > self.maxTM:
self.maxTM = np.max(results)
temp_x[par["ParamIndex"],:] = par["Input Values"]
if self.multiObjective:
temp_y[par["ParamIndex"],:] = self.goal*results
else:
temp_y[par["ParamIndex"]] = self.goal*results
temp_index[par["ParamIndex"]] = par["Model Index"]
count[par["Model Index"]] += 1
pass_calculations.append(par["ParamIndex"])
except AttributeError:
pass
self.logger.debug("Concurrent.Futures Calculation Completed")
if self.multiObjective:
temp_y = temp_y[pass_calculations,:]
else:
temp_y = temp_y[pass_calculations]
temp_x = temp_x[pass_calculations,:]
temp_index = temp_index[pass_calculations]
else:
# extract the initial data from the file
self.logger.debug("Start Loading Initial Data from Files")
with open(self.initDataPathorNum, 'rb') as f:
data = load(f)
# extract data from dictionary in file and assign to correct variables
self.TMInitOutput = data["TMInitOutput"]
self.TMInitInput = data["TMInitInput"]
if self.reification:
self.ROMInitOutput = data["ROMInitOutput"]
self.ROMInitInput = data["ROMInitInput"]
print(self.TMInitInput)
print(self.TMInitOutput)
ROMSize = 0
for mmm in range(len(self.ROMInitInput)):
ROMSize += self.ROMInitOutput[mmm].shape[0]
temp_x = np.zeros((self.TMInitOutput.shape[0]+ROMSize,
self.nDim))
if self.multiObjective:
temp_y = np.zeros((self.TMInitOutput.shape[0]+ROMSize,2))
else:
temp_y = np.zeros(self.TMInitOutput.shape[0]+ROMSize)
temp_index = np.zeros(self.TMInitOutput.shape[0]+ROMSize)
ind = 0
if self.reification:
for ii in range(len(self.ROM)):
for jj in range(self.ROMInitOutput[ii].shape[0]):
temp_x[ind,:] = self.ROMInitInput[ii][jj,:]
if self.multiObjective:
temp_y[ind,:] = self.goal*self.ROMInitOutput[ii][jj,:]
else:
temp_y[ind] = self.goal*self.ROMInitOutput[ii][jj]
temp_index[ind] = ii
ind += 1
count.append(self.ROMInitInput[ii].shape[0])
for jj in range(self.TMInitOutput.shape[0]):
temp_x[ind,:] = self.TMInitInput[jj,:]
if self.multiObjective:
temp_y[ind,:] = self.goal*self.TMInitOutput[jj,:]
if np.max(temp_y[0,0]) > self.maxTM[0]:
self.maxTM[0] = np.max(temp_y[0,0])
if np.max(temp_y[0,1]) > self.maxTM[1]:
self.maxTM[1] = np.max(temp_y[0,1])
else:
temp_y[ind] = self.TMInitOutput[jj]
if self.TMInitOutput[jj] > self.maxTM:
self.maxTM = self.TMInitOutput[jj]
temp_index[ind] = -1
ind += 1
count.append(self.TMInitInput.shape[0])
self.logger.debug("Loading Data From File Completed")
# Add initial data to dataframes
iterData = np.array(count)
self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)
self.__add_to_iterationData(time()-self.timeCheck, iterData)
self.logger.debug("Initial Data Saved to Dataframes")
self.timeCheck = time()
@__catch_error
def initialize_parameters(self, modelParam, covFunc="M32", iterLimit=100,
sampleCount=50, hpCount=100, batchSize=5,
tmIter=1e6, totalBudget=1e16, tmBudget=1e16,
upperBound=1, lowBound=0.0001, fusedPoints=500,
fusedHP=[], fusedSamples=10000):
"""
This function sets the conditions for the barefoot framework calculations.
All parameters have default values except the model parameters.
Parameters
----------
modelParam : dictionary
This must be a dictionary with the hyperparameters for the reduced
order models as well as the costs for all the models. The specific
values in the dictionary must be:
'model_l': A list with the characteristic length scale for each
dimension in each reduced order model GP.
eg 2 reduced order - 3 dimension models
[[0.1,0.1,0.1],[0.2,0.2,0.2]]
'model_sf': A list with the signal variance for each reduced
order model GP.
'model_sn': A list with the noise variance for each reduced
order model GP.
'means': A list of the mean of each model. Set to 0 if the mean
is not known
'std': A list of the standard deviations of each model. Set to 1
if the standard deviation is not known.
'err_l': A list with the characteristic length scale for each
dimension in each discrepancy GP. Must match dimensions
of model_l
'err_sf': A list with the signal variance for each discrepancy GP.
'err_sn': A list with the noise variance for each discrepancy GP.
'costs': The model costs, including the Truth Model
eg. 2 ROM : [model 1 cost, model 2 cost, Truth model cost]
covFunc : String, optional
The covariance function to used for the Gaussian Process models.
Options are Squared Exponential ("SE") Matern 3/2 ("M32") and
Matern 5/2 ("M52"). The default is "M32".
iterLimit : Int, optional
How many iterations to run the framework calculation before
terminating. The default is 100.
sampleCount : Int, optional
The number of samples to use for the acquisition function calculations.
The default is 50.
hpCount : Int, optional
The number of hyperparameter sets to use. The default is 100.
batchSize : Int, optional
The batch size for the model evaluations. The default is 5.
tmIter : Int, optional
The number of iterations to complete before querying the Truth Model.
The default is 1e6.
totalBudget : Int/Float, optional
The total time budget to expend before terminating the calculation.
The default is 1e16.
tmBudget : Int/Float, optional
The budget to expend before querying the Truth Model. The default
is 1e16.
upperBound : Float, optional
The upper bound for the hyperparameters. The default is 1.
lowBound : Float, optional
The lower bound for the hyperparameters. The default is 0.0001.
fusedPoints : Int, optional
The number of points to sample from a LHS sampler at which to
evaluate the fused mean and variance for building the fused model.
The default is 500.
fusedHP : List, optional
Holds the hyperparameters for the fused model if the approach does not
use the Batch approach
fusedSamples : Int, optional
The number of samples to take from the design space for evaluating the fused
model for determining next-best points from the Truth model.
"""
self.logger.debug("Start Initializing Reification Object Parameters")
self.covFunc = covFunc
self.iterLimit = iterLimit
self.sampleCount = sampleCount
self.hpCount = hpCount
self.batchSize = batchSize
self.tmIterLim = tmIter
self.totalBudget = totalBudget
self.tmBudget = tmBudget
self.upperBound = upperBound
self.lowBound = lowBound
self.modelParam = modelParam
self.modelCosts = modelParam["costs"]
self.fusedHP = fusedHP
self.fusedSamples = fusedSamples
# The numpy linspace module will contract the distance below 1 if there
# are also values above 1. The approach implemented here avoids that
# situation
temp_max = self.lowBound*10
all_HP = np.linspace(self.lowBound, temp_max, num=self.hpCount)
while temp_max < self.upperBound:
temp_min = deepcopy(temp_max)
temp_max = temp_max*10
if temp_max > self.upperBound:
temp_max = self.upperBound
all_HP = np.append(all_HP, np.linspace(temp_min, temp_max, num=self.hpCount))
# randomly combine the options for the hyperparameters into the hyperparameter sets
self.fusedModelHP = np.zeros((self.hpCount,self.nDim+1))
for i in range(self.hpCount):
for j in range(self.nDim+1):
self.fusedModelHP[i,j] = all_HP[np.random.randint(0,all_HP.shape[0])]
# create the evaluation points for determining the fused mean and
# variance
sampleSize = fusedPoints
if self.sampleScheme == "CompFunc":
sampleOption = "CompFunc"
else:
sampleOption = "LHS"
self.xFused, check = apply_constraints(sampleSize,
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=sampleOption, opt_sample_size=False)
if check:
self.logger.debug("Fused Points - All constraints applied successfully {}/{}".format(self.xFused.shape[0], sampleSize))
else:
self.logger.critical("Fused Points - Sample Size NOT met due to constraints! Continue with {}/{} Samples".format(self.xFused.shape[0], sampleSize))
if not self.restore_calc:
self.logger.debug("Create Reification Object")
if self.multiObjective:
self.TMInitOutput = [np.array(self.TMInitOutput)[:,0],
np.array(self.TMInitOutput)[:,0]]
# build the reification object with the combined inputs and initial values
if self.reification:
self.ROMInitOutput = np.array(self.ROMInitOutput)
temp = [[],[]]
for pp in range(self.ROMInitOutput.shape[0]):
temp[0].append(self.ROMInitOutput[pp,:,0])
temp[1].append(self.ROMInitOutput[pp,:,1])
self.reificationObj = [model_reification(self.ROMInitInput, temp[0],
self.modelParam['model_l'],
self.modelParam['model_sf'],
self.modelParam['model_sn'],
self.modelParam['means'],
self.modelParam['std'],
self.modelParam['err_l'],
self.modelParam['err_sf'],
self.modelParam['err_sn'],
self.TMInitInput, self.TMInitOutput[0],
len(self.ROM), self.nDim, self.covFunc),
model_reification(self.ROMInitInput, temp[1],
self.modelParam['model_l'],
self.modelParam['model_sf'],
self.modelParam['model_sn'],
self.modelParam['means'],
self.modelParam['std'],
self.modelParam['err_l'],
self.modelParam['err_sf'],
self.modelParam['err_sn'],
self.TMInitInput, self.TMInitOutput[1],
len(self.ROM), self.nDim, self.covFunc)]
else:
if self.surrogate == "GP":
self.modelGP = [gp_model(self.TMInitInput, self.TMInitOutput[0],
np.ones((self.nDim)), 1, 0.05,
self.nDim, self.covFunc),
gp_model(self.TMInitInput, self.TMInitOutput[1],
np.ones((self.nDim)), 1, 0.05,
self.nDim, self.covFunc)]
else:
self.modelGP = [bmarsModel(self.TMInitInput, self.TMInitOutput[0]),
bmarsModel(self.TMInitInput, self.TMInitOutput[1])]
else:
# build the reification object with the combined inputs and initial values
if self.reification:
self.reificationObj = model_reification(self.ROMInitInput, self.ROMInitOutput,
self.modelParam['model_l'],
self.modelParam['model_sf'],
self.modelParam['model_sn'],
self.modelParam['means'],
self.modelParam['std'],
self.modelParam['err_l'],
self.modelParam['err_sf'],
self.modelParam['err_sn'],
self.TMInitInput, self.TMInitOutput,
len(self.ROM), self.nDim, self.covFunc)
else:
if self.surrogate == "GP":
self.modelGP = gp_model(self.TMInitInput, self.TMInitOutput,
np.ones((self.nDim)), 1, 0.05,
self.nDim, self.covFunc)
elif self.surrogate == "BMARS":
self.modelGP = bmarsModel(self.TMInitInput, self.TMInitOutput)
self.allTMInput = []
self.allTMOutput = []
self.tmBudgetLeft = self.tmBudget
self.totalBudgetLeft = self.totalBudget
self.currentIteration += 1
self.tmIterCount = 0
self.logger.info("Reification Object Initialized. Ready for Calculations")
def __restart_subs(self):
# This function restarts the sub processes if they have been closed
# while doing the Truth Model evaluations
for kk in range(self.multinode):
try:
os.remove("{}/subprocess/close{}".format(self.workingDir, kk))
os.remove("{}/subprocess/sub{}.control".format(self.workingDir, kk))
os.remove("{}/subprocess/sub{}.start".format(self.workingDir, kk))
self.logger.debug("Close File {} removed".format(kk))
except FileNotFoundError:
self.logger.debug("Close File {} does not exist".format(kk))
calcPerProcess, all_started = self.__start_subprocesses__(self.multinode)
subProcessWait = True
while subProcessWait:
if all_started:
subProcessWait = False
else:
total_started = 0
for fname in range(self.multinode):
if os.path.exists("{}/subprocess/sub{}.start".format(self.workingDir, fname)):
total_started += 1
if total_started == self.multinode:
all_started = True
self.logger.info("All Subprocess Jobs Started Successfully")
def __run_multinode_acq_func(self, x_test, new_mean, calcPerProcess):
# This function controls the parameter setup and transfer for the
# evaluation of the acquisition functions to determine the next best
# points for evaluating the Reduced Order Models when using subprocesses
self.logger.info("Set Up Parameters for Acquisition Function Evaluation and submit to Subprocesses")
parameters = []
parameterFileData = []
sub_fnames = []
count = 0
sub_count = 0
parameterIndex = 0
parameterFileIndex = 0
# Pickle the reification object to be loaded by each of the subprocesses
# this reduces the amount of memory that needs to be transferred
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
# set up the parameters to be used in the calculations
for jj in range(len(self.ROM)):
for kk in range(self.sampleCount):
if self.multiObjective:
means = [np.expand_dims(np.array([new_mean[jj][0][kk]]), axis=0),
np.expand_dims(np.array([new_mean[jj][1][kk]]), axis=0)]
model_temp = [means, self.goal, self.MORef, self.pareto[0]]
else:
model_temp = [np.expand_dims(x_test[kk], axis=0),
np.expand_dims(np.array([new_mean[jj][kk]]), axis=0),
jj]
if self.batch:
for mm in range(self.hpCount):
parameterFileData.append((self.currentIteration+1, model_temp, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, x_test, jj, kk, mm, self.sampleCount,
self.modelParam['costs'], self.maxTM))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# store every 1000 set of parameters in a file for use in the
# subprocesses
if len(parameterFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
count += 1
if count == calcPerProcess:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
# Send the trigger for the subprocess to pick up the data for
# the calculations
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "iteration", self.acquisitionFunc]
dump(control_param, f)
# dump the index for the parameter files for the subprocess
# to load
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
parameters = []
count = 0
sub_count += 1
else:
parameterFileData.append((self.currentIteration+1, model_temp, self.xFused, self.fusedHP,
self.covFunc, x_test, jj, kk, 0, self.sampleCount,
self.modelParam['costs'], self.maxTM))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# store every 1000 set of parameters in a file for use in the
# subprocesses
if len(parameterFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
count += 1
if count == calcPerProcess:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
# Send the trigger for the subprocess to pick up the data for
# the calculations
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "iteration", self.acquisitionFunc]
dump(control_param, f)
# dump the index for the parameter files for the subprocess
# to load
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
parameters = []
count = 0
sub_count += 1
# dump the last of the parameter datasets
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
# trigger the last subprocess and dump the index parameters
if parameters != []:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "iteration", self.acquisitionFunc]
dump(control_param, f)
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
self.logger.info("Start Waiting for Results to Complete")
# the calculations will take some time, so start a sleep timer to wait
# for a minute before starting to check for results
calc_start = time()
sleep(10)
finished = 0
process_costs = np.zeros((len(sub_fnames)))
# check for finished subprocess calculations, and only continue once
# all subprcesses calculations are completed
while finished < len(sub_fnames):
finished = 0
proc_count = 0
for sub_name in sub_fnames:
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_name), 'rb') as f:
control_param = load(f)
if control_param[0] == 1:
finished += 1
if process_costs[proc_count] == 0:
# When a subprocess has completed, record how long
# the subprocess ran for. This is the cost of the
# subprocess calculation
process_costs[proc_count] = time()-calc_start
if finished < len(sub_fnames):
sleep(10)
self.logger.info("Acquisition Function Evaluations Completed")
# Calculate the total subprocess cost.
process_cost = np.sum(process_costs)
# extract all the outputs from the subprocesses and collate them
# into a single array
kg_output = []
for sub_name in sub_fnames:
cont_loop = True
load_failed = True
timer = 0
while cont_loop:
try:
with open("{}/subprocess/{}.output".format(self.workingDir, sub_name), 'rb') as f:
try:
sub_output = load(f)
except EOFError:
raise FileNotFoundError
load_failed = False
cont_loop = False
except FileNotFoundError:
sleep(10)
timer += 30
if timer > 300:
cont_loop = False
if not load_failed:
self.logger.debug("sub_output {} found | length: {}".format(sub_name, len(sub_output)))
for jj in range(len(sub_output)):
kg_output.append(sub_output[jj])
os.remove("{}/subprocess/{}.output".format(self.workingDir, sub_name))
os.remove("{}/subprocess/{}.dump".format(self.workingDir, sub_name))
else:
self.logger.debug("sub_output {} NOT found".format(len(sub_name)))
self.logger.debug("Calculation Results retrieved from Subprocess Jobs")
return kg_output, process_cost
def __run_singlenode_acq_func(self, x_test, new_mean):
# As before, this function calculates the acquisition function values
# for determining the next best points to be queried from the reduced
# order models. This function runs the concurrent.futures calculations
# directly.
parameters = []
parameterFileData = []
count = 0
parameterIndex = 0
parameterFileIndex = 0
self.logger.debug("Set Up Parameters for Acquisition Function Evaluation")
# Save the current reification object to a file for loading
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
# Define the parameters for each calculation
for jj in range(len(self.ROM)):
for kk in range(self.sampleCount):
if self.multiObjective:
means = [np.expand_dims(np.array([new_mean[jj][0][kk]]), axis=0),
np.expand_dims(np.array([new_mean[jj][1][kk]]), axis=0)]
model_temp = [means, self.goal, self.MORef, self.pareto[0]]
else:
model_temp = [np.expand_dims(x_test[kk], axis=0),
np.expand_dims(np.array([new_mean[jj][kk]]), axis=0),
jj]
if self.batch:
for mm in range(self.hpCount):
parameterFileData.append((self.currentIteration+1, model_temp, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, x_test, jj, kk, mm, self.sampleCount,
self.modelParam['costs'], self.maxTM))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# save each 1000 parameter sets to a file to reduce the amount of memory used
if len(parameterFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
count += 1
else:
parameterFileData.append((self.currentIteration+1, model_temp, self.xFused, self.fusedHP,
self.covFunc, x_test, jj, kk, 0, self.sampleCount,
self.modelParam['costs'], self.maxTM))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# save each 1000 parameter sets to a file to reduce the amount of memory used
if len(parameterFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
count += 1
# save the last of the parameters sets
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
# set which acquistion function will be used
if self.acquisitionFunc == "EI":
acqFunc = calculate_EI
elif self.acquisitionFunc == "KG":
acqFunc = calculate_KG
elif self.acquisitionFunc == "TS":
acqFunc = calculate_TS
elif self.acquisitionFunc == "PI":
acqFunc = calculate_PI
elif self.acquisitionFunc == "UCB":
acqFunc = calculate_UCB
elif self.acquisitionFunc == "Hedge":
acqFunc = calculate_GPHedge
elif self.acquisitionFunc == "Greedy":
acqFunc = calculate_Greedy
elif self.acquisitionFunc == "EHVI":
acqFunc = calculate_EHVI
kg_output = []
# Start the concurrent calculations and return the output array
self.logger.info("Start Acquisition Function Evaluations for {} Parameter Sets".format(len(parameters)))
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(parameters, executor.map(acqFunc,parameters)):
params, results = result_from_process
kg_output.append(results)
self.logger.info("Acquisition Function Evaluations Completed")
return kg_output, 0
def __run_multinode_fused(self, tm_test):
# As with the reduced order model calculations, this function evaluates
# the selected acquisition function to determine the next best points to
# evaluate from the Truth model
# Since this set of calculations uses only the hyperparameter count,
# a new calculation is needed to determine how many calculations to
# do on each subprocess
calc_limit = (-(-self.hpCount//self.multinode))
self.logger.debug("Define Parameters for Max Value Evaluations")
parameters = []
parameterFileData = []
parameterIndex = 0
parameterFileIndex = 0
count = 0
sub_count = 0
sub_fnames = []
# Save the reification object to a file
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
if self.multiObjective:
extra_data = [self.pareto[0], self.goal, self.MORef]
else:
extra_data = []
for mm in range(self.hpCount):
parameterFileData.append((self.currentIteration+1, extra_data, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, tm_test, self.maxTM, 0.01, self.tmSampleOpt))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
count += 1
# Save every 500 parameter sets to a separate file to reduce memory
# usage
if len(parameterFileData) == 500:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
if count == calc_limit:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
# Trigger the subprocesses with a new calculation set
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "fused", self.acquisitionFunc]
dump(control_param, f)
# save the parameter indices to a file
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
parameters = []
count = 0
sub_count += 1
# save the last of the parameter sets to a file
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
if parameters != []:
fname = "{}".format(sub_count)
sub_fnames.append(fname)
# Trigger the final subprocess to start calculations
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_count), 'wb') as f:
control_param = [0, "fused", self.acquisitionFunc]
dump(control_param, f)
# dump the parameter indices to a file
with open("{}/subprocess/{}.dump".format(self.workingDir, fname), 'wb') as f:
dump(parameters, f)
self.logger.info("Parameters for Max Value Calculations Sent to Subprocess")
# wait for calculations to finish
sleep(10)
finished = 0
# check that all calculations have completed before continuing
while finished < len(sub_fnames):
finished = 0
for sub_name in sub_fnames:
with open("{}/subprocess/sub{}.control".format(self.workingDir, sub_name), 'rb') as f:
control_param = load(f)
if control_param[0] == 1:
finished += 1
if finished < len(sub_fnames):
sleep(10)
fused_output = []
# Extract the outputs from the individual subprocess output files and
# collate into a single array
for sub_name in sub_fnames:
cont_loop = True
load_failed = True
timer = 0
while cont_loop:
try:
with open("{}/subprocess/{}.output".format(self.workingDir, sub_name), 'rb') as f:
sub_output = load(f)
load_failed = False
cont_loop = False
except FileNotFoundError:
sleep(10)
timer += 30
if timer > 300:
cont_loop = False
if not load_failed:
self.logger.debug("sub_output {} found | length: {}".format(sub_name, len(sub_output)))
for jj in range(len(sub_output)):
fused_output.append(sub_output[jj])
os.remove("{}/subprocess/{}.output".format(self.workingDir, sub_name))
os.remove("{}/subprocess/{}.dump".format(self.workingDir, sub_name))
else:
self.logger.debug("sub_output {} NOT found".format(len(sub_name)))
# change the format of the output array to be a numpy array
fused_output = np.array(fused_output, dtype=object)
if fused_output.shape[0] == 0:
fused_output = np.array([[0,0]])
self.logger.info("Max Value Calculations Completed")
return fused_output
def __run_singlenode_fused(self, tm_test):
# This function achieves the same functionality as the multi-node fused
# function above, but does it all on the base node, rather than sending the
# data to subprocesses.
parameters = []
parameterFileData = []
# initialize the parameters for the fused model calculations and
# start the calculation
self.logger.debug("Define Parameters for Max Value Evaluations")
parameterIndex = 0
parameterFileIndex = 0
# save the reification object to a separate file
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
if self.multiObjective:
extra_data = [self.pareto[0], self.goal, self.MORef]
else:
extra_data = []
if self.batch:
for mm in range(self.hpCount):
parameterFileData.append((self.currentIteration+1, extra_data, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, tm_test, self.maxTM, 0.01, self.tmSampleOpt))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# Save each set of 500 parameters to a separate file
if len(parameterFileData) == 500:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
else:
parameterFileData.append((self.currentIteration+1, extra_data, self.xFused, self.fusedHP,
self.covFunc, tm_test, self.maxTM, 0.01, self.tmSampleOpt))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# Save each set of 500 parameters to a separate file
if len(parameterFileData) == 500:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
# Save the last of the parameter sets to a file
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
# Set up a list of outputs for each of the results from the acquisition
# functions if using the GP Hedge approach
if self.tmSampleOpt == "Hedge":
fused_out = [[],[],[],[],[],[]]
else:
# Create just a single list for when using other Acquisition Functions
fused_output = []
self.logger.info("Start Max Value Calculations | {} Sets".format(len(parameters)))
count = 0
if self.multiObjective:
func = fused_EHVI
else:
func = fused_calculate
# Run the concurrent processes and save the outputs
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(parameters, executor.map(func,parameters)):
params, results = result_from_process
if self.tmSampleOpt == "Hedge":
fused_out[0].append(results[0][0])
fused_out[1].append(results[0][1])
fused_out[2].append(results[0][2])
fused_out[3].append(results[0][3])
fused_out[4].append(results[0][4])
fused_out[5].append(results[0][5])
else:
fused_output.append(results[0])
count += 1
# When using the GP Hedge approach the list of outputs are returned
# as-is
if self.tmSampleOpt == "Hedge":
return fused_out
# when using other acquisition functions process the output to attempt
# the removal of all duplicates and then return the processed output
max_values = np.zeros((results[1],2))
for ii in range(len(fused_output)):
if max_values[fused_output[ii][1],0] != 0:
if max_values[fused_output[ii][1],0] < fused_output[ii][0]:
max_values[fused_output[ii][1],0] = fused_output[ii][0]
max_values[fused_output[ii][1],1] = fused_output[ii][1]
else:
max_values[fused_output[ii][1],0] = fused_output[ii][0]
max_values[fused_output[ii][1],1] = fused_output[ii][1]
fused_output = max_values[np.where(max_values[:,0]!=0)]
if fused_output.shape[0] == 0:
fused_output = np.array([[0,0]])
self.logger.info("Max Value Calculations Completed")
return fused_output
def __call_ROM(self, medoid_out, x_val):
# This function serves to evaluate the Reduced Order Models at the
# determined points. This is done in parallel to reduce the time taken
params = []
count = np.zeros((len(self.ROM)+1))
if self.multiObjective:
current = np.array(self.iterationData.iloc[:,4:5+len(self.ROM)])[-1,:]
else:
current = np.array(self.iterationData.iloc[:,3:])[-1,:]
count[0:len(self.ROM)] = current[1:]
count[-1] = current[0]
param_index = 0
# Define the parameter sets needed for each calculation
self.logger.debug("Define Parameters for ROM Function Evaluations")
for iii in range(medoid_out.shape[0]):
params.append({"Model Index":medoid_out[iii,3],
"Model":self.ROM[medoid_out[iii,3]],
"Input Values":x_val[iii,:],
"ParamIndex":param_index})
param_index += 1
temp_x = np.zeros((len(params), self.nDim))
if self.multiObjective:
temp_y = np.zeros((len(params),2))
else:
temp_y = np.zeros(len(params))
temp_index = np.zeros(len(params))
costs = np.zeros(len(params))
passed_calcs = []
# Run the concurrent calculations and extract the results
self.logger.info("Start ROM Function Evaluations | {} Calculations".format(len(params)))
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(params, executor.map(call_model, params)):
par, results = result_from_process
costs[par["ParamIndex"]] += self.modelCosts[par["Model Index"]]
# if the truth function fails to evaluate, it should return false
# and therefore the results are not included in the output
try:
test = results.shape
results_evaluate = True
passed_calcs.append(par["ParamIndex"])
except AttributeError:
results_evaluate = False
# if self.multiObjective:
# try:
# if results == False:
# results_evaluate = False
# except ValueError:
# results_evaluate = True
# passed_calcs.append(par["ParamIndex"])
# else:
# if results != False:
# results_evaluate = True
# passed_calcs.append(par["ParamIndex"])
# else:
# results_evaluate = False
if results_evaluate:
if len(results.shape) == 1:
results = np.expand_dims(results, axis=0)
if self.multiObjective:
results = self.goal*results
temp_y[par["ParamIndex"],:] = results
else:
results = self.goal*results
temp_y[par["ParamIndex"]] = results
temp_x[par["ParamIndex"],:] = par["Input Values"]
temp_index[par["ParamIndex"]] = par["Model Index"]
if self.multiObjective:
self.reificationObj[0].update_GP(par["Input Values"], results[0,0], par["Model Index"])
self.reificationObj[1].update_GP(par["Input Values"], results[0,1], par["Model Index"])
else:
self.reificationObj.update_GP(par["Input Values"], results, par["Model Index"])
count[par["Model Index"]] += 1
# Remove any calculations that failed from the output and save the
# data
temp_x = temp_x[passed_calcs]
temp_y = temp_y[passed_calcs]
temp_index = temp_index[passed_calcs]
return temp_x, temp_y, temp_index, costs, count, len(passed_calcs)
def __call_Truth(self, params, count):
# This function evaluates the truth model at the points defined by the
# framework. The parameters for the calculation are defined elsewhere
# and this framework just runs the evaluations
temp_x = np.zeros((len(params), self.nDim))
if self.multiObjective:
temp_y = np.zeros((len(params),2))
else:
temp_y = np.zeros(len(params))
temp_index = np.zeros(len(params))
costs = np.zeros(len(params))
passed_calcs = []
# Run the concurrent calculations and extract the results
self.logger.info("Start Truth Model Evaluations | {} Sets".format(len(params)))
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(params, executor.map(call_model, params)):
par, results = result_from_process
costs[par["ParamIndex"]] += self.modelCosts[par["Model Index"]]
# if the truth function fails to evaluate, it should return false
# and therefore the results are not included in the output
try:
test = results.shape
results_evaluate = True
passed_calcs.append(par["ParamIndex"])
except AttributeError:
results_evaluate = False
# if self.multiObjective:
# try:
# if results == False:
# results_evaluate = False
# except ValueError:
# results_evaluate = True
# passed_calcs.append(par["ParamIndex"])
# else:
# if results != False:
# results_evaluate = True
# passed_calcs.append(par["ParamIndex"])
# else:
# results_evaluate = False
if results_evaluate:
if len(results.shape) == 1:
results = np.expand_dims(results, axis=0)
if self.multiObjective:
results = self.goal*results
temp_y[par["ParamIndex"],:] = results
else:
results = self.goal*results
temp_y[par["ParamIndex"]] = results
temp_x[par["ParamIndex"],:] = par["Input Values"]
temp_index[par["ParamIndex"]] = par["Model Index"]
count[par["Model Index"]] += 1
if self.multiObjective:
if self.reification:
self.reificationObj[0].update_truth(par["Input Values"], results[0,0])
self.reificationObj[1].update_truth(par["Input Values"], results[0,1])
else:
self.modelGP[0].update(par["Input Values"], results[0,0], 0.05, False)
self.modelGP[1].update(par["Input Values"], results[0,1], 0.05, False)
else:
if self.reification:
self.reificationObj.update_truth(par["Input Values"], results)
else:
self.modelGP.update(par["Input Values"], results, 0.05, False)
# Remove any calculations that failed from the output and save the
# data
if passed_calcs != []:
temp_x = temp_x[passed_calcs]
temp_y = temp_y[passed_calcs]
temp_index = temp_index[passed_calcs]
self.logger.info("Truth Model Evaluations Completed")
self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)
self.totalBudgetLeft -= self.batchSize*self.modelCosts[-1]
if self.multiObjective:
if np.max(temp_y[:,0]) > self.maxTM[0]:
self.maxTM[0] = np.max(temp_y[:,0])
if np.max(temp_y[:,1]) > self.maxTM[1]:
self.maxTM[1] = np.max(temp_y[:,1])
else:
if np.max(temp_y) > self.maxTM:
self.maxTM = np.max(temp_y)
else:
self.logger.critical("All Truth Model Evaluations Failed to Produce Results! Continue with no new results.")
# Return the updated model call counts
return count
def __singleAcqFuncApproach(self, x_test, new_mean, calcPerProcess):
# this function is set up to be used in conjunction with the GP Hedge
# approach to call the required acquisition function calls
if self.multinode > 0:
kg_output, process_cost = self.__run_multinode_acq_func(x_test,
new_mean,
calcPerProcess)
else:
kg_output, process_cost = self.__run_singlenode_acq_func(x_test,
new_mean)
return kg_output, process_cost
def __gpHedgeApproach(self, x_test, new_mean, calcPerProcess):
# This function is for using the GP Hedge Portfolio optimization appraoch
# Calculate the probabilities for each acquisition function
prob = self.gpHedgeProb/np.sum(self.gpHedgeProb)
# determine the index of the function with the highest probability
index_Max_prob = np.where(prob == np.max(prob))[0][0]
self.gpHedgeTrack.append(index_Max_prob)
# run the individual acquisition function evaluations
output, process_cost = self.__singleAcqFuncApproach(x_test, new_mean, calcPerProcess)
# the output will be a list of lists, choose the one corresponding to the
# maximum probability
kg_output = output[index_Max_prob]
clusters = []
# determine the batch of next best points for all acquisition function
# outputs for use in calculating the gain later
for ii in range(6):
cluster_output = np.array(output[ii])
# Cluster the acquisition function output
medoid_out = self.__kg_calc_clustering(cluster_output)
clusters.append(x_test[medoid_out[:,2].astype(int),:])
# save the clusters
with open("data/hedgeClusters", 'wb') as f:
dump(clusters, f)
# return the output from the selected function
return kg_output, process_cost
def __update_Hedge_Probabilities(self, models, x_val):
# at each iteration when using the GP Hedge approach it is necessary to
# calculate the gain associated with each acquisition function
# load the data, which is the clusters from each acquistion function output
with open("data/hedgeClusters", 'rb') as f:
clusters = load(f)
parameters = []
parameterFileData = []
# initialize the parameters for the fused model calculations and
# start the calculation
self.logger.debug("Define Parameters for Max Value Evaluations")
parameterIndex = 0
parameterFileIndex = 0
if self.reification:
with open("data/reificationObj", 'wb') as f:
dump(self.reificationObj, f)
else:
with open("data/reificationObj", 'wb') as f:
dump(self.modelGP, f)
# for each set of results, define the parameters and evaluate all the
# fused model GPs
for ii in range(6):
clusters[ii] = np.array(clusters[ii])
for mm in range(self.hpCount):
if models == "ROM":
parameterFileData.append((1, self.reification, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, clusters[ii], self.maxTM, 0.01, ii))
elif models == "TM":
parameterFileData.append((1, self.reification, self.xFused, self.fusedModelHP[mm,:],
self.covFunc, clusters[ii], self.maxTM, 0.01, ii))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# save each set of 500 parameters in a file
if len(parameterFileData) == 500:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
parameterFileData = []
parameterFileIndex += 1
parameterIndex = 0
# save the last set of parameters in a file
if len(parameterFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(parameterFileData, f)
# run all the calculations concurrently and obtain the outputs
fused_output = [[],[],[],[],[],[]]
count = 0
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(parameters, executor.map(evaluateFusedModel,parameters)):
params, results = result_from_process
fused_output[results[0]].append(results[1])
count += 1
# update the gain for each acquisition function for either the ROM or TM
if models == "ROM":
for ii in range(6):
mean_output = np.mean(np.array(fused_output[ii]).transpose(), axis=1)
self.gpHedgeHist[ii].append(np.max(mean_output))
if len(self.gpHedgeHist[ii]) > 2*self.tmIterLim:
self.gpHedgeHist[ii] = self.gpHedgeHist[ii][1:]
self.gpHedgeProb = np.sum(self.gpHedgeHist, axis=1)
elif models == "TM":
for ii in range(6):
mean_output = np.mean(np.array(fused_output[ii]).transpose(), axis=1)
self.gpHedgeHistTM[ii].append(np.max(mean_output))
if len(self.gpHedgeHistTM[ii]) > 2*self.tmIterLim:
self.gpHedgeHistTM[ii] = self.gpHedgeHistTM[ii][1:]
self.gpHedgeProbTM = np.sum(self.gpHedgeHistTM, axis=1)
def __singleAcqFused(self, tm_test):
# For the GP Hedge appraoch for the Truth Model, this functions
# calls the individual calculations in either single- or multi-node configuration
if self.multinode > 0:
fused_output = self.__run_multinode_fused(tm_test)
else:
fused_output = self.__run_singlenode_fused(tm_test)
return fused_output
def __hedgeFused(self, tm_test):
# This function controls the use of the GP Hedge appraoch in the calculation
# of the next best points for the Truth model
# calculate the most recent probabilities and determine which acquisition
# function has the maximum probability
prob = self.gpHedgeProbTM/np.sum(self.gpHedgeProbTM)
index_Max_prob = np.where(prob == np.max(prob))[0][0]
self.gpHedgeTrackTM.append(prob)
# obtain the outputs from the acquisition functions
output = self.__singleAcqFused(tm_test)
fused_output = output[index_Max_prob]
max_values = np.zeros((tm_test.shape[0],2))
# process the selected output to remove duplicates
for ii in range(len(fused_output)):
if max_values[fused_output[ii][1],0] != 0:
if max_values[fused_output[ii][1],0] < fused_output[ii][0]:
max_values[fused_output[ii][1],0] = fused_output[ii][0]
max_values[fused_output[ii][1],1] = fused_output[ii][1]
else:
max_values[fused_output[ii][1],0] = fused_output[ii][0]
max_values[fused_output[ii][1],1] = fused_output[ii][1]
fused_output = max_values[np.where(max_values[:,0]!=0)]
if fused_output.shape[0] == 0:
fused_output = np.array([[0,0]])
self.logger.info("Max Value Calculations Completed")
clust = []
# cluster all the outputs, for the calculation of the gain at the
# end of the iteration
for ii in range(6):
cluster_output = np.array(output[ii], dtype=object)
# Cluster the acquisition function output
try:
if cluster_output.shape[0] > self.batchSize:
# medoids, clusters = k_medoids(cluster_output, self.batchSize)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(cluster_output[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(cluster_output[:,0].reshape((-1,1)), self.batchSize)
medoids = kmedoids_max(cluster_output, self.batchSize)
else:
medoids = []
for iii in range(cluster_output.shape[0]):
medoids.append(iii)
except:
# medoids, clusters = k_medoids(cluster_output, 1)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(cluster_output[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(cluster_output[:,0].reshape((-1,1)), self.batchSize)
medoids = kmedoids_max(cluster_output, 1)
clust.append(np.array(tm_test[medoids,:], dtype=np.float))
# save the clusters for use later
with open("data/hedgeClusters", 'wb') as f:
dump(clust, f)
return fused_output
def __close_subs_on_error(func):
"""
If an error occurs during the optimization, a multinode calculation must
still close all subprocesses to avoid excessive computing hour costs
"""
def close_subs(self):
no_error = False
try:
func(self)
no_error = True
except Exception as err:
if str(err) == 'Framework Shut Down to Facilitate External ROM Calculations!':
self.logger.info(err)
no_error = True
else:
self.logger.critical("Optimization Code Failed - See Error Below")
self.logger.exception(err)
if self.multinode > 0:
for fname in range(self.multinode):
with open("{}/subprocess/close{}".format(self.workingDir, fname), 'w') as f:
f.write("Close Subprocess {}".format(fname))
return no_error
return close_subs
@__close_subs_on_error
def run_BAREFOOT(self):
"""
This is the main optimization control function which handles all the calculations
of the BAREFOOT Framework
"""
if self.batch:
self.logger.info("Start Full BAREFOOT Framework Calculation")
else:
self.logger.info("Start Reification Only Framework Calculation")
# Check if the calculation requires multiple nodes and start them if necessary
if self.multinode > 0:
calcPerProcess, all_started = self.__start_subprocesses__(self.multinode)
else:
calcPerProcess, all_started = (0, True)
self.ROM_Calc_Start = True
# Once all subprocesses have started, start the main calculation
if all_started:
start_process = True
while start_process:
if self.ROM_Calc_Start:
text_num = str(self.currentIteration)
self.logger.info("#########################################################")
self.logger.info("# Start Iteration : {} #".format("0"*(4-len(text_num))+text_num))
self.logger.info("#########################################################")
self.timeCheck = time()
# Check constraints and obtain latin-hypercube sampled test points
evalP = []
for pp in range(len(self.ROM)):
evalP.append(np.array(self.evaluatedPoints.loc[self.evaluatedPoints['Model Index']==pp,self.inputLabels]))
x_test, check = apply_constraints(self.sampleCount,
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme,opt_sample_size=True,
evaluatedPoints=evalP)
# If constraints can't be satisfied, notify the user in the log
if check:
self.logger.debug("ROM - All constraints applied successfully {}/{}".format(x_test.shape[0], self.sampleCount))
else:
self.logger.critical("ROM - Sample Size NOT met due to constraints! Continue with {}/{} Samples".format(x_test.shape[0], self.sampleCount))
if self.multiObjective:
if self.reification:
new_mean = []
# obtain predictions from the low-order GPs
for iii in range(len(self.ROM)):
new1, var1 = self.reificationObj[0].predict_low_order(x_test, iii)
new2, var2 = self.reificationObj[1].predict_low_order(x_test, iii)
new_mean.append([new1, new2])
else:
new_mean = []
new1, var1 = self.modelGP[0].predict_var(x_test)
new2, var2 = self.modelGP[1].predict_var(x_test)
new_mean.append([new1, new2])
else:
if self.reification:
new_mean = []
# obtain predictions from the low-order GPs
for iii in range(len(self.ROM)):
new, var = self.reificationObj.predict_low_order(x_test, iii)
new_mean.append(new)
else:
new_mean, var = self.modelGP.predict_var(x_test)
# Calculate the Acquisition Function for each of the test points in each
# model for each set of hyperparameters
if self.acquisitionFunc == "Hedge":
kg_output, process_cost = self.__gpHedgeApproach(x_test, new_mean, calcPerProcess)
else:
kg_output, process_cost = self.__singleAcqFuncApproach(x_test, new_mean, calcPerProcess)
kg_output = np.array(kg_output, dtype=object)
# Cluster the acquisition function output
medoid_out = self.__kg_calc_clustering(kg_output)
model_cost = time()-self.timeCheck + process_cost
self.timeCheck = time()
if not self.externalROM:
# Call the reduced order models
temp_x, temp_y, temp_index, costs, count, check = self.__call_ROM(medoid_out, x_test[medoid_out[:,2].astype(int),:])
if check != 0:
self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)
if self.acquisitionFunc == "Hedge":
self.__update_Hedge_Probabilities("ROM", x_test)
else:
self.logger.critical("All ROM Evalutions Failed to produce a result! Continue with no new data")
else:
self.__external_ROM_data_save(medoid_out, x_test[medoid_out[:,2].astype(int),:])
# Set up external ROM
self.ROM_Calc_Start = False
self.__save_calculation_state()
sleep(10)
raise RuntimeWarning("Framework Shut Down to Facilitate External ROM Calculations!")
else:
temp_x, temp_y, temp_index, costs, count, check = self.__external_ROM_data_load(medoid_out, x_test[medoid_out[:,2].astype(int),:])
# Extract external ROM Data
self.ROM_Calc_Start = True
self.totalBudgetLeft -= np.sum(costs) + model_cost
self.tmBudgetLeft -= np.sum(costs) + model_cost
self.logger.info("ROM Function Evaluations Completed")
if (self.tmBudgetLeft < 0) or (self.tmIterCount == self.tmIterLim):
self.logger.info("Start Truth Model Evaluations")
evalP = [np.array(self.evaluatedPoints.loc[self.evaluatedPoints['Model Index']==-1,self.inputLabels])]
# create a test set that is dependent on the number of dimensions
tm_test, check = apply_constraints(self.fusedSamples,
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme, opt_sample_size=True,
evaluatedPoints=evalP)
if check:
self.logger.debug("Truth Model Query - All constraints applied successfully")
else:
self.logger.critical("Truth Model Query - Some or All Constraints Could Not Be Applied! Continuing Without Constraints")
# Evaluate the acquistion function to determine the next best
# points to evaluate
if self.tmSampleOpt == "Hedge":
fused_output = self.__hedgeFused(tm_test)
else:
fused_output = self.__singleAcqFused(tm_test)
fused_output = np.array(fused_output)
if self.batch:
if fused_output.shape[0] > self.batchSize:
# medoids, clusters = k_medoids(fused_output[:,0].reshape((-1,1)), self.batchSize)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(fused_output[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(fused_output[:,0].reshape((-1,1)), self.batchSize)
medoids = kmedoids_max(fused_output, self.batchSize)
else:
if self.batchSize != 0:
medoids = []
for iii in range(fused_output.shape[0]):
medoids.append(iii)
#storeObject([np.where(fused_output[:,0] == np.max(fused_output[:,0]))[0][0], medoids], "ReifiFusedMedoid-{}".format(self.currentIteration))
else:
max_index = np.where(fused_output[:,0] == np.max(fused_output[:,0]))[0][0]
medoids = [max_index]
# define the parameters for the Truth Model Evaluations
params = []
param_index = 0
self.logger.debug("Define Parameters for Truth Model Evaluations")
for iii in range(len(medoids)):
params.append({"Model Index":-1,
"Model":self.TM,
"Input Values":np.array(tm_test[int(fused_output[medoids[iii],1]),:], dtype=np.float),
"ParamIndex":param_index})
param_index += 1
if len(medoids) < self.batchSize:
for iii in range(self.batchSize - len(medoids)):
params.append({"Model Index":-1,
"Model":self.TM,
"Input Values":np.array(tm_test[np.random.randint(0,tm_test.shape[0]),:], dtype=np.float),
"ParamIndex":param_index})
param_index += 1
self.tmIterCount = 0
self.tmBudgetLeft = self.tmBudget
# If and external Truth Model is used, submit the data for
# saving to output
if self.externalTM:
self.__external_TM_data_save(params, count)
break
else:
# If the subprocesses need to be closed, close them
if not self.keepSubRunning:
for fname in range(self.multinode):
with open("{}/subprocess/close{}".format(self.workingDir, fname), 'w') as f:
f.write("Close Subprocess {}".format(fname))
self.logger.warning("Close Subprocess {}".format(fname))
# otherwise, query the Truth Model directly
count = self.__call_Truth(params, count)
if self.tmSampleOpt == "Hedge":
self.__update_Hedge_Probabilities("TM", tm_test)
# for multinode calculations, check if subprocesses are being kept
# running and restart if not
if self.keepSubRunning:
pass
else:
if (self.totalBudgetLeft < 0) or (self.currentIteration >= self.iterLimit):
pass
else:
if self.multinode != 0:
self.__restart_subs()
# save the required outputs
self.__add_to_iterationData(time()-self.timeCheck + model_cost, count)
self.timeCheck = time()
self.__save_output_dataframes()
# Update the reduced order models if they need to be retrained
if (self.tmBudgetLeft < 0) or (self.tmIterCount == self.tmIterLim):
if self.updateROMafterTM:
self.__update_reduced_order_models__()
self.__save_calculation_state()
self.logger.info("Iteration {} Completed Successfully".format(self.currentIteration))
if (self.totalBudgetLeft < 0) or (self.currentIteration >= self.iterLimit):
self.logger.info("#########################################################")
self.logger.info("# #")
self.logger.info("# Iteration or Budget Limit Met or Exceeded #")
self.logger.info("# BAREFOOT Calculation Completed #")
self.logger.info("# #")
self.logger.info("#########################################################")
start_process = False
self.currentIteration += 1
self.tmIterCount += 1
@__close_subs_on_error
def run_BATCH(self):
self.logger.info("Start Batch Only Framework Calculation")
start_process = True
while start_process:
text_num = str(self.currentIteration)
self.logger.info("#########################################################")
self.logger.info("# Start Iteration : {} #".format("0"*(4-len(text_num))+text_num))
self.logger.info("#########################################################")
self.timeCheck = time()
# Check constraints and obtain latin-hypercube sampled test points
evalP = []
for pp in range(len(self.ROM)):
evalP.append(np.array(self.evaluatedPoints.loc[self.evaluatedPoints['Model Index']==pp,self.inputLabels]))
x_test, check = apply_constraints(self.sampleCount,
self.nDim, resolution=self.res,
A=self.A, b=self.b, Aeq=self.Aeq, beq=self.beq,
lb=self.lb, ub=self.ub, func=self.constr_func,
sampleScheme=self.sampleScheme, opt_sample_size=True,
evaluatedPoints=evalP)
# If constraints can't be satisfied, notify the user in the log
if check:
self.logger.debug("ROM - All constraints applied successfully {}/{}".format(x_test.shape[0], self.sampleCount))
else:
self.logger.critical("ROM - Sample Size NOT met due to constraints! Continue with {}/{} Samples".format(x_test.shape[0], self.sampleCount))
parameters = []
paramFileData = []
count = np.zeros((len(self.ROM)+1))
if self.multiObjective:
current = np.array(self.iterationData.iloc[:,4:5+len(self.ROM)])[-1,:]
extra_data = [self.pareto[0], self.goal, self.MORef]
else:
current = np.array(self.iterationData.iloc[:,3:])[-1,:]
extra_data = []
count[0:len(self.ROM)] = current[1:]
count[-1] = current[0]
parameterIndex = 0
parameterFileIndex = 0
with open("data/reificationObj", 'wb') as f:
dump(self.modelGP, f)
for jj in range(self.hpCount):
paramFileData.append((self.currentIteration+1, x_test, self.fusedModelHP[jj,:],
self.maxTM, self.tmSampleOpt, extra_data))
parameters.append([parameterIndex, parameterFileIndex])
parameterIndex += 1
# save each 1000 parameter sets to a file to reduce the amount of memory used
if len(paramFileData) == 1000:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(paramFileData, f)
paramFileData = []
parameterFileIndex += 1
parameterIndex = 0
# dump the last of the parameter datasets
if len(paramFileData) != 0:
with open("data/parameterSets/parameterSet{}".format(parameterFileIndex), 'wb') as f:
dump(paramFileData, f)
# Set up a list of outputs for each of the results from the acquisition
# functions if using the GP Hedge approach
if self.tmSampleOpt == "Hedge":
prob = self.gpHedgeProbTM/np.sum(self.gpHedgeProbTM)
index_Max_prob = np.where(prob == np.max(prob))[0][0]
self.gpHedgeTrackTM.append(prob)
kg_out = [[],[],[],[],[],[]]
else:
# Create just a single list for when using other Acquisition Functions
kg_output = []
# Start the concurrent calculations and return the output array
self.logger.info("Start Acquisition Function Evaluations for {} Parameter Sets".format(len(parameters)))
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(parameters, executor.map(batchAcquisitionFunc,parameters)):
params, results = result_from_process
if self.tmSampleOpt == "Hedge":
kg_out[0].append(results[0])
kg_out[1].append(results[1])
kg_out[2].append(results[2])
kg_out[3].append(results[3])
kg_out[4].append(results[4])
kg_out[5].append(results[5])
else:
kg_output.append(results)
self.logger.info("Acquisition Function Evaluations Completed")
def get_medoids(kg_output):
# Cluster the acquisition function output
if kg_output.shape[0] > self.batchSize:
medoids = kmedoids_max(kg_output[:,0:3], self.batchSize)
else:
medoids = []
for iii in range(kg_output.shape[0]):
medoids.append(iii)
return medoids
if self.tmSampleOpt == "Hedge":
clust = []
for pp in range(6):
kg_out[pp] = np.array(kg_out[pp])
#kg_out[pp][np.isinf(kg_out[pp])] = -1e16
kg_out[pp] = np.unique(kg_out[pp], axis=0)
med = get_medoids(kg_out[pp])
if pp == index_Max_prob:
medoids = med
kg_output = kg_out[pp]
index = np.array(kg_out[pp][med,1],dtype=np.uint8)
clust.append(np.array(x_test[index,:], dtype=np.float))
with open("data/hedgeClusters", 'wb') as f:
dump(clust, f)
else:
kg_output = np.array(kg_output)
kg_output = np.unique(kg_output, axis=0)
medoids = get_medoids(kg_output)
model_cost = time()-self.timeCheck
self.timeCheck = time()
# define the parameters for the Truth Model Evaluations
params = []
param_index = 0
self.logger.debug("Define Parameters for Model Evaluations")
for iii in range(len(medoids)):
params.append({"Model Index":-1,
"Model":self.TM,
"Input Values":np.array(x_test[int(kg_output[medoids[iii],1]),:], dtype=np.float),
"ParamIndex":param_index})
param_index += 1
count = self.__call_Truth(params, count)
if self.acquisitionFunc == "Hedge":
self.__update_Hedge_Probabilities("TM", x_test)
# save the required outputs
self.__add_to_iterationData(time()-self.timeCheck + model_cost, count)
self.timeCheck = time()
self.__save_output_dataframes()
# Update the reduced order models if they need to be retrained
if (self.tmBudgetLeft < 0) or (self.tmIterCount == self.tmIterLim):
if self.updateROMafterTM:
self.__update_reduced_order_models__()
self.__save_calculation_state()
self.logger.info("Iteration {} Completed Successfully".format(self.currentIteration))
if (self.totalBudgetLeft < 0) or (self.currentIteration >= self.iterLimit):
self.logger.info("#########################################################")
self.logger.info("# #")
self.logger.info("# Iteration or Budget Limit Met or Exceeded #")
self.logger.info("# BAREFOOT Calculation Completed #")
self.logger.info("# #")
self.logger.info("#########################################################")
start_process = False
self.currentIteration += 1
self.tmIterCount += 1
def run_optimization(self):
if self.reification:
return self.run_BAREFOOT()
else:
return self.run_BATCH()
def __kg_calc_clustering(self, kg_output):
# This function clusters the output from the Reduced Order Model stage
# acquistion function evaluations There is some processing required to
# obtain the correct format.
# convert to a numpy array for ease of indexing
# kg_output = np.array(kg_output, dtype=object)
point_selection = {}
self.logger.debug("Extract Points for Clustering from Acquisition Function Evaluations")
# process the output to obtain the correct format for the clustering
# (model index, acquisition function value, input index)
for iii in range(kg_output.shape[0]):
try:
if int(kg_output[iii,3]) in point_selection[kg_output[iii,2]]['models']:
if kg_output[iii,1] > point_selection[kg_output[iii,2]]['nu'][int(kg_output[iii,3])]:
point_selection[kg_output[iii,2]]['nu'][int(kg_output[iii,3])] = kg_output[iii,1]
point_selection[kg_output[iii,2]]['kg_out'][int(kg_output[iii,3])] = iii
else:
point_selection[kg_output[iii,2]]['models'].append(int(kg_output[iii,3]))
point_selection[kg_output[iii,2]]['nu'][int(kg_output[iii,3])] = kg_output[iii,1]
point_selection[kg_output[iii,2]]['kg_out'][int(kg_output[iii,3])] = iii
except KeyError:
point_selection[kg_output[iii,2]] = {'models':[int(kg_output[iii,3])],
'nu':[],
'kg_out':[]}
for mm in range(len(self.ROM)):
point_selection[kg_output[iii,2]]['nu'].append(1e-6)
point_selection[kg_output[iii,2]]['kg_out'].append(-1)
point_selection[kg_output[iii,2]]['nu'][int(kg_output[iii,3])] = kg_output[iii,1]
point_selection[kg_output[iii,2]]['kg_out'][int(kg_output[iii,3])] = iii
med_input = [[],[],[],[]]
for index in point_selection.keys():
for jjj in range(len(point_selection[index]['models'])):
med_input[0].append(point_selection[index]['nu'][point_selection[index]['models'][jjj]])
med_input[1].append(index)
med_input[2].append(point_selection[index]['models'][jjj])
med_input[3].append(point_selection[index]['kg_out'][point_selection[index]['models'][jjj]])
med_input = np.array(med_input).transpose()
# Since there may be too many duplicates when using small numbers of
# test points and hyper-parameters check to make sure and then return
# all the points if there are less than the required number of points
self.logger.debug("Cluster Acquistion Function Evaluations | {}".format(med_input.shape))
if self.batch:
if med_input.shape[0] > self.batchSize:
# medoids, clusters = k_medoids(med_input[:,0:3], self.batchSize)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(med_input[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(med_input[:,0].reshape((-1,1)), self.batchSize)
medoids = kmedoids_max(med_input[:,0:3], self.batchSize)
else:
# medoids, clusters = k_medoids(med_input[:,0:3], 1)
# kmedoids = KMedoids(n_clusters=self.batchSize, random_state=0).fit(med_input[:,0].reshape((-1,1)))
# medoids = kmedoids.medoid_indices_
# medoids = kmedoids_max(med_input[:,0].reshape((-1,1)), 1)
medoids = kmedoids_max(med_input[:,0:3], med_input.shape[0])
#storeObject([np.where(med_input[:,0] == np.max(med_input[:,0]))[0][0], medoids], "ReifiROM-{}".format(self.currentIteration))
else:
max_index = np.where(med_input[:,0] == np.max(med_input[:,0]))[0][0]
medoids = [max_index]
# next, need to get the true values for each of the medoids and update the
# models before starting next iteration.
self.logger.debug("Extract True Values for Medoids")
medoid_index = []
for i in range(len(medoids)):
medoid_index.append(int(med_input[medoids[i],3]))
medoid_out = kg_output[medoid_index,:]
self.logger.info("Clustering of Acquisition Function Evaluations Completed")
return medoid_out
def __start_subprocesses__(self, subprocess_count):
# The subprocesses require a separate directory in the main BAREFOOT
# directory, so these need to be created if they don't exist
try:
os.mkdir('{}/subprocess'.format(self.workingDir))
self.logger.debug("Subprocess Directory Created")
except FileExistsError:
self.logger.debug("Subprocess Directory Already Exists")
pass
try:
os.mkdir('{}/subprocess/LSFOut'.format(self.workingDir))
self.logger.debug("LSFOut Directory Created")
except FileExistsError:
self.logger.debug("LSFOut Directory Already Exists")
pass
# These strings are used to create the job files for the subprocesses used
# when running the calculations in multi-node configuration
with open("{}/data/processStrings".format(self.workingDir), 'rb') as f:
processStrings = load(f)
self.logger.info("Strings for Subprocess Shell Files Loaded")
# extract the two process strings and calculate how many calculations
# will be done per subprocess
subProcessStr = processStrings[0]
runProcessStr = processStrings[1]
if self.batch and self.reification:
calculation_count = self.sampleCount*self.hpCount*(len(self.ROM))
elif self.batch and not self.reification:
calculation_count = self.hpCount
else:
calculation_count = self.sampleCount*(len(self.ROM))
if calculation_count % subprocess_count == 0:
calcPerProcess = int(calculation_count/subprocess_count)
else:
calcPerProcess = int(calculation_count/subprocess_count) + 1
self.logger.info("{} Subprocess Jobs | {} Calculations per Subprocess".format(subprocess_count, calcPerProcess))
# Start all subprocesses
for fname in range(subprocess_count):
with open("{}/subprocess/{}.sh".format(self.workingDir, fname), 'w') as f:
f.write(subProcessStr.format(fname))
with open("{}/subprocess/submit{}.sh".format(self.workingDir, fname), 'w') as f:
f.write(runProcessStr.format(fname))
os.chmod("{}/subprocess/submit{}.sh".format(self.workingDir, fname), 0o775)
subprocess.run(["{}/subprocess/submit{}.sh".format(self.workingDir, fname)], shell=True)
# wait for all subprocesses to start
all_pending = True
self.logger.info("Waiting for Subprocess Jobs to start")
count = 0
all_started = False
while all_pending:
sleep(10)
total_started = 0
for fname in range(subprocess_count):
if os.path.exists("{}/subprocess/sub{}.start".format(self.workingDir, fname)):
total_started += 1
count += 1
if total_started == subprocess_count:
all_pending = False
all_started = True
self.logger.info("All Subprocess Jobs Started Successfully")
# waiting for 2 hours for all the subprocesses to start will stop the waiting
# and return false from this function to say that all the processes weren't
# started yet. This is to save on computational hours if there is a problem
# but this functionality can be disabled if desired.
if count == 720:
all_pending = False
self.logger.critical("Subprocess Jobs Outstanding after 2 Hours | {}/{} Jobs Started".format(total_started, subprocess_count))
return calcPerProcess, all_started
def __update_reduced_order_models__(self):
# If the reduced order models are configured to be retrained after more
# truth model evaluations have been conducted, this function re-evaluates
# all the evaluated points and reconstructs the reification object with
# the new values.
self.train_func("results/{}/".format(self.calculationName))
self.logger.info("Recalculate all evaluated points for ROM to ensure correct model results are used")
self.ROMInitInput = []
self.ROMInitOutput = []
TMDataX = self.reificationObj.x_true
TMDataY = self.reificationObj.y_true
params = []
params_truth = []
count = []
param_index = 0
modelIter_record = []
for jj in range(len(self.ROM)+1):
count.append(0)
for jj in range(self.evaluatedPoints.shape[0]):
modelIter_record.append([self.evaluatedPoints.loc[jj,"Model Index"], self.evaluatedPoints.loc[jj,"Iteration"]])
if self.evaluatedPoints.loc[jj,"Model Index"] != -1:
params.append({"Model Index":self.evaluatedPoints.loc[jj,"Model Index"],
"Model":self.ROM[self.evaluatedPoints.loc[jj,"Model Index"]],
"Input Values":self.evaluatedPoints.loc[jj,self.inputLabels],
"ParamIndex":param_index})
else:
count[-1] += 1
params_truth.append({"Model Index":-1,
"Model":self.TM,
"Input Values":self.evaluatedPoints.loc[jj,self.inputLabels],
"ParamIndex":param_index,
"Evaluation": self.evaluatedPoints.loc[jj,"y"]})
param_index += 1
for ii in range(len(self.ROM)):
self.ROMInitInput.append(np.zeros_like(self.reificationObj.x_train[ii]))
self.ROMInitOutput.append(np.zeros_like(self.reificationObj.y_train[ii]))
temp_x = np.zeros((len(modelIter_record), self.nDim))
temp_y = np.zeros(len(modelIter_record))
temp_index = np.zeros(len(modelIter_record))
temp_iter = np.array(modelIter_record)
# Run the evaluations concurrently and store the outputs
try:
self.pool.terminate()
self.pool = Pool()
except AttributeError:
self.pool = Pool()
with self.pool as executor:
for result_from_process in zip(params, executor.map(call_model, params)):
par, results = result_from_process
if par["Model Index"] != -1:
self.ROMInitInput[par["Model Index"]][count[par["Model Index"]],:] = par["Input Values"]
self.ROMInitOutput[par["Model Index"]][count[par["Model Index"]]] = results
temp_x[par["ParamIndex"],:] = par["Input Values"]
temp_y[par["ParamIndex"]] = results
temp_index[par["ParamIndex"]] = par["Model Index"]
count[par["Model Index"]] += 1
for pp in range(len(params_truth)):
temp_x[params_truth[pp]["ParamIndex"]] = params_truth[pp]["Input Values"]
temp_y[params_truth[pp]["ParamIndex"]] = params_truth[pp]["Evaluation"]
temp_index[params_truth[pp]["ParamIndex"]] = -1
self.logger.info("Create New Reification Object")
# Recreate the reification object for further calculations
self.reificationObj = model_reification(self.ROMInitInput, self.ROMInitOutput,
self.modelParam['model_l'],
self.modelParam['model_sf'],
self.modelParam['model_sn'],
self.modelParam['means'],
self.modelParam['std'],
self.modelParam['err_l'],
self.modelParam['err_sf'],
self.modelParam['err_sn'],
TMDataX, TMDataY,
len(self.ROM), self.nDim, self.covFunc)
# save the new data
# Adds new data points to the evaluated datapoints dataframe
temp = np.zeros((temp_x.shape[0], self.nDim+3))
temp[:,0] = temp_index
temp[:,1] = temp_iter
temp[:,2] = temp_y
temp[:,3:] = temp_x
temp = pd.DataFrame(temp, columns=self.evaluatedPoints.columns)
self.evaluatedPoints = temp
self.__add_to_iterationData(time()-self.timeCheck, np.array(count))
self.timeCheck = time()
self.logger.info("New Evaluations Saved | Reification Object Updated")
def __external_TM_data_save(self, TMEvaluationPoints, count):
# When using an external Truth Model, it is necessary to save the next
# best points for use in the external calculations or experiments
outputData = np.zeros((len(TMEvaluationPoints), self.nDim+1))
for ii in range(len(TMEvaluationPoints)):
outputData[ii,0:self.nDim] = TMEvaluationPoints[ii]["Input Values"]
colNames = deepcopy(self.inputLabels)
if self.multiObjective:
colNames.append("y1")
colNames.append("y2")
else:
colNames.append("y")
outputData = pd.DataFrame(outputData, columns=colNames)
outputData.to_csv('{}/results/{}/TruthModelEvaluationPoints.csv'.format(self.workingDir,
self.calculationName))
with open('{}/results/{}/countData'.format(self.workingDir, self.calculationName), 'wb') as f:
dump(count, f)
self.__save_calculation_state()
self.logger.critical("Truth Model Evaluation Points Copied to File | Restart Process when results are ready")
def __external_ROM_data_save(self, medoid_out, x_val):
# Define the parameter sets needed for each calculation
self.logger.debug("Define Parameters for ROM Function Evaluations")
output = np.zeros((medoid_out.shape[0], self.nDim+2))
for iii in range(medoid_out.shape[0]):
output[iii,0:self.nDim] = x_val[iii,:]
output[iii,self.nDim] = medoid_out[iii,3]
output[iii,self.nDim+1] = 0
colNames = deepcopy(self.inputLabels)
colNames.append("Model Index")
if self.multiObjective:
colNames.append("y1")
colNames.append("y2")
else:
colNames.append("y")
outputData = pd.DataFrame(output, columns=colNames)
outputData.to_csv('{}/results/{}/ROMEvaluationPoints.csv'.format(self.workingDir,
self.calculationName))
def __external_TM_data_load(self, workingDir, calculationName):
# When restarting the framework after using an external Truth Model
# the data from the model must be loaded into the framework
self.__load_from_save(workingDir, calculationName)
with open('{}/results/{}/countData'.format(self.workingDir, self.calculationName), 'rb') as f:
count = load(f)
TMData = pd.read_csv('{}/results/{}/TruthModelEvaluationPoints.csv'.format(self.workingDir,
self.calculationName))
TMData = np.array(TMData)
temp_x = np.zeros((TMData.shape[0], self.nDim))
if self.multiObjective:
temp_y = np.zeros((TMData.shape[0], 2))
else:
temp_y = np.zeros((TMData.shape[0]))
temp_index = np.zeros((TMData.shape[0]))
for ii in range(TMData.shape[0]):
temp_x[ii,:] = TMData[ii,0:self.nDim]
if self.multiObjective:
temp_y[ii,:] = TMData[ii,self.nDim+1:self.nDim+3]
else:
temp_y[ii] = TMData[ii,self.nDim+1]
temp_index[ii] = -1
count[-1] += 1
# After loading the data, the reification object is updated and the new
# data saved to the normal framework outputs
self.logger.info("Truth Model Evaluations Loaded")
self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)
temp_y = self.goal*temp_y
self.reificationObj.update_truth(temp_x, temp_y)
self.totalBudgetLeft -= self.batchSize*self.modelCosts[-1]
if self.multiObjective:
if np.max(temp_y[:,0]) > self.maxTM[:,0]:
self.maxTM[:,0] = np.max(temp_y[:,0])
else:
if np.max(temp_y) > self.maxTM:
self.maxTM = np.max(temp_y)
self.__add_to_iterationData(time()-self.timeCheck, count)
self.timeCheck = time()
if self.updateROMafterTM:
self.__update_reduced_order_models__()
self.__save_output_dataframes()
self.__save_calculation_state()
self.logger.info("Iteration {} Completed Successfully".format(self.currentIteration))
self.currentIteration += 1
self.tmIterCount += 1
self.logger.info("Finished Loading External TM Data")
def __external_ROM_data_load(self):
count = np.zeros((len(self.ROM)+1))
if self.multiObjective:
current = np.array(self.iterationData.iloc[:,4:5+len(self.ROM)])[-1,:]
else:
current = np.array(self.iterationData.iloc[:,3:])[-1,:]
count[0:len(self.ROM)] = current[1:]
count[-1] = current[0]
ROMData = pd.read_csv('{}/results/{}/ROMEvaluationPoints.csv'.format(self.workingDir,
self.calculationName))
ROMData = np.array(ROMData)
temp_x = np.zeros((ROMData.shape[0], self.nDim))
if self.multiObjective:
temp_y = np.zeros((ROMData.shape[0],2))
else:
temp_y = np.zeros(ROMData.shape[0])
temp_index = np.zeros(ROMData.shape[0])
costs = np.zeros(ROMData.shape[0])
# Run the concurrent calculations and extract the results
self.logger.info("ROM Function Evaluations Completed | {} Calculations".format(ROMData.shape[0]))
for ii in range(ROMData.shape[0]):
costs[ii] += self.modelCosts[ROMData[ii,(self.nDim)]]
if self.multiObjective:
results = self.goal*ROMData[ii,(self.nDim+1):]
else:
results = self.goal*ROMData[ii,(self.nDim+1)]
temp_y[ii,:] = results
temp_x[ii,:] = ROMData[ii,0:(self.nDim)]
temp_index[ii] = ROMData[ii,(self.nDim)]
if self.multiObjective:
self.reificationObj[0].update_GP(temp_x[ii,:], results[0,0], ROMData[ii,(self.nDim)])
self.reificationObj[1].update_GP(temp_x[ii,:], results[0,1], ROMData[ii,(self.nDim)])
else:
self.reificationObj.update_GP(temp_x[ii,:], results, ROMData[ii,(self.nDim)])
count[ROMData[ii,(self.nDim)]] += 1
return temp_x, temp_y, temp_index, costs, count, ROMData.shape[0]
##############################################################################
##############################################################################
##############################################################################
def model1c(x):
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
x = np.pi*(x*2 - 1)
return np.array([-80*np.sin(2*x[:,0])/(441*np.pi) - 160*np.sin(4*x[:,0])/(81*np.pi) + np.pi*np.sin(5*x[:,0])/2,
-48*np.sin(x[:,1])/(1225*np.pi) - 16*np.sin(3*x[:,1])/(81*np.pi) - 240*np.sin(5*x[:,1])/(121*np.pi)]).transpose()
# return -80*np.sin(2*x[:,0])/(441*np.pi) - 160*np.sin(4*x[:,0])/(81*np.pi) + np.pi*np.sin(5*x[:,0])/2 - 48*np.sin(x[:,1])/(1225*np.pi) - 16*np.sin(3*x[:,1])/(81*np.pi) - 240*np.sin(5*x[:,1])/(121*np.pi)
def model2c(x):
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
x = np.pi*(x*2 - 1)
return np.array([-60*np.sin(2.5*x[:,0])/(441*np.pi) - 160*np.sin(4.2*x[:,0])/(81*np.pi) + np.pi*np.sin(5.3*x[:,0])/2,
-60*np.sin(0.5*x[:,1])/(1200*np.pi) - 16*np.sin(3.2*x[:,1])/(81*np.pi) - 240*np.sin(5.3*x[:,1])/(121*np.pi)]).transpose()
# return -60*np.sin(2.5*x[:,0])/(441*np.pi) - 60*np.sin(0.5*x[:,1])/(1200*np.pi) - 160*np.sin(4.2*x[:,0])/(81*np.pi) + np.pi*np.sin(5.3*x[:,0])/2 - 16*np.sin(3.2*x[:,1])/(81*np.pi) - 240*np.sin(5.3*x[:,1])/(121*np.pi)
def model3c(x):
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
x = np.pi*(x*2 - 1)
return np.array([-100*np.sin(1.5*x[:,0])/(400*np.pi) - 160*np.sin(4.5*x[:,0])/(81*np.pi) + np.pi*np.sin(5.5*x[:,0])/2,
-36*np.sin(1.5*x[:,1])/(1200*np.pi) - 16*np.sin(3.5*x[:,1])/(81*np.pi) - 240*np.sin(5.5*x[:,1])/(121*np.pi)]).transpose()
# return -100*np.sin(1.5*x[:,0])/(400*np.pi) - 36*np.sin(1.5*x[:,1])/(1200*np.pi) - 160*np.sin(4.5*x[:,0])/(81*np.pi) + np.pi*np.sin(5.5*x[:,0])/2 - 16*np.sin(3.5*x[:,1])/(81*np.pi) - 240*np.sin(5.5*x[:,1])/(121*np.pi)
def truthmodel(x):
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
x = np.pi*(x*2 - 1)
return np.array([np.abs(x[:,0])*np.sin(5*x[:,0]),
np.abs(x[:,1])*np.sin(6*x[:,1])]).transpose()
# return np.abs(x[:,0])*np.sin(5*x[:,0]) + np.abs(x[:,1])*np.sin(6*x[:,1])
def runMultiObjective(gg):
ROMList = [model1c, model2c, model3c]
framework = barefoot(ROMModelList=ROMList, TruthModel=truthmodel,
calcInitData=False, initDataPathorNum="data/testMOInitData", nDim=2,
#calcInitData=True, initDataPathorNum=[2,2,2,5], nDim=2,
calculationName="bfIMMI-{}".format(gg), acquisitionFunc="EI",
restore_calc=False, logname="barefoot", tmSampleOpt="EI", multiNode=0,
multiObjective=True, multiObjectRef=np.array([-np.pi, -np.pi]), reification=False)
modelParam = {'model_l': [[0.1608754, 0.2725361],
[0.17094462, 0.28988983],
[0.10782092, 0.18832378]],
'model_sf': [6.95469898,8.42299498,2.98009081],
'model_sn': [0.05, 0.05, 0.05],
'means': [-2.753353101070388e-16,-1.554312234475219e-16,4.884981308350689e-17],
'std': [1.2460652976290285,1.3396622409903254,1.3429644403939915],
'err_l': [[0.1,0.1],[0.1,0.1],[0.1,0.1]],
'err_sf': [1,1,1],
'err_sn': [0.01, 0.01, 0.01],
'costs': [1,1,1,1]}
framework.initialize_parameters(modelParam=modelParam, covFunc="M32", iterLimit=50,
sampleCount=25, hpCount=50, batchSize=2,
tmIter=1, upperBound=1, lowBound=0.0001, fusedPoints=10)
framework.run_optimization()
if __name__ == "__main__":
runMultiObjective(0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ndextcgaloader` package."""
import os
import tempfile
import shutil
import unittest
from ndexutil.config import NDExUtilConfig
from ndextcgaloader import ndexloadtcga
from ndextcgaloader.ndexloadtcga import NDExNdextcgaloaderLoader
import json
import ndex2
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class TestNdextcgaloader(unittest.TestCase):
"""Tests for `ndextcgaloader` package."""
def setUp(self):
"""Set up test fixtures, if any."""
self._package_dir = ndexloadtcga.get_package_dir()
self._networklistfile = ndexloadtcga.get_networksfile()
self._loadplan_path = ndexloadtcga.get_load_plan()
self._networks_for_testing = ndexloadtcga.get_networksdir()
self._testing_dir = ndexloadtcga.get_testsdir()
self._sample_networks_in_tests_dir = os.path.join(self._testing_dir, 'sample_networks')
self._the_args = {
'conf': None,
'datadir': self._sample_networks_in_tests_dir,
'dataurl': None,
'loadplan': self._loadplan_path,
'logconf': None,
'networklistfile': self._networklistfile
}
self._the_args = dotdict(self._the_args)
self.NDExTCGALoader = NDExNdextcgaloaderLoader(self._the_args)
def tearDown(self):
"""Tear down test fixtures, if any."""
def _test_parse_arguments(self):
"""Tests parse arguments"""
res = ndexloadtcga._parse_arguments('hi', [])
self.assertEqual(res.profile, 'ndextcgaloader')
self.assertEqual(res.verbose, 0)
self.assertEqual(res.logconf, None)
self.assertEqual(res.conf, None)
someargs = ['-vv','--conf', 'foo', '--logconf', 'hi',
'--profile', 'myprofy']
res = ndexloadtcga._parse_arguments('hi', someargs)
self.assertEqual(res.profile, 'myprofy')
self.assertEqual(res.verbose, 2)
self.assertEqual(res.logconf, 'hi')
self.assertEqual(res.conf, 'foo')
def _test_setup_logging(self):
""" Tests logging setup"""
try:
ndexloadtcga._setup_logging(None)
self.fail('Expected AttributeError')
except AttributeError:
pass
# args.logconf is None
res = ndexloadtcga._parse_arguments('hi', [])
ndexloadtcga._setup_logging(res)
# args.logconf set to a file
try:
temp_dir = tempfile.mkdtemp()
logfile = os.path.join(temp_dir, 'log.conf')
with open(logfile, 'w') as f:
f.write("""[loggers]
keys=root
[handlers]
keys=stream_handler
[formatters]
keys=formatter
[logger_root]
level=DEBUG
handlers=stream_handler
[handler_stream_handler]
class=StreamHandler
level=DEBUG
formatter=formatter
args=(sys.stderr,)
[formatter_formatter]
format=%(asctime)s %(name)-12s %(levelname)-8s %(message)s""")
res = ndexloadtcga._parse_arguments('hi', ['--logconf',
logfile])
ndexloadtcga._setup_logging(res)
finally:
shutil.rmtree(temp_dir)
def validate_network(self, network, sample_network, file_name):
edges, nodes, node_attributes = network.edges, network.nodes, network.nodeAttributes
sample_edges, sample_nodes, sample_node_attributes = sample_network.edges, sample_network.nodes, sample_network.nodeAttributes
self.assertEqual(len(edges), len(sample_edges), 'Edges in sample ' + file_name)
self.assertEqual(len(nodes), len(sample_nodes), 'Nodes in sample ' + file_name)
self.assertEqual(len(node_attributes), len(sample_node_attributes), 'Node Attributes in sample ' + file_name)
for key, value in edges.items():
sample_value = sample_edges[key]
self.assertEqual(value, sample_value, 'Edges are different in ' + file_name)
for key, value in nodes.items():
sample_value = sample_nodes[key]
self.assertEqual(value, sample_value, 'Nodes are different in ' + file_name)
for key, value in node_attributes.items():
sample_value = sample_node_attributes[key]
self.assertEqual(value, sample_value, 'Nodes attributes are different in ' + file_name)
def test_main(self):
"""Tests main function"""
# try where loading config is successful
try:
#temp_dir = tempfile.mkdtemp()
#confile = os.path.join(temp_dir, 'some.conf')
#with open(confile, 'w') as f:
# f.write("""[hi]
# {user} = bob
# {pw} = smith
# {server} = dev.ndexbio.org""".format(user=NDExUtilConfig.USER,
# pw=NDExUtilConfig.PASSWORD,
# server=NDExUtilConfig.SERVER))
#res = ndexloadtcga.main(['myprog.py', '--profile', 'ndextcgaloader'])
#self.assertEqual(res, 0)
with open(self._networklistfile, 'r') as networks:
list_of_network_files = networks.read().splitlines()
list_of_network_files_in_cx = [network.replace('.txt', '.cx') for network in list_of_network_files]
with open(self._loadplan_path, 'r') as f:
self._loadplan = json.load(f)
count = 1
self.NDExTCGALoader.parse_load_plan()
self.NDExTCGALoader.prepare_report_directory()
for network_file in list_of_network_files:
network_file_in_cx = network_file.replace('.txt', '.cx')
# generate NiceCX from network_file
df, network_description, id_to_gene_dict = self.NDExTCGALoader.get_pandas_dataframe(network_file)
network = self.NDExTCGALoader.generate_nice_cx_from_panda_df(df,
network_file, network_description, id_to_gene_dict)
path_to_sample_network = os.path.join(self._sample_networks_in_tests_dir, network_file_in_cx)
network_sample_in_cx = ndex2.create_nice_cx_from_file(path_to_sample_network)
self.validate_network(network, network_sample_in_cx, network_file_in_cx)
print('{}) network {} passed'.format(count, network_file.replace('.txt', '')))
count += 1
finally:
print('done')
#finally:
# shutil.rmtree(temp_dir)
|
import numpy as np
class HMM:
def __init__(self, states, observables):
# Set states and observables
self.numStates = states
self.numObservables = observables
self.randomizeProbabilities()
def randomizeProbabilities(self):
# Alpha are state probabilities
# Also called the "transition probabilities"
# Alpha[i][j]: probability of state j at t+1 given i at t
# Beta are observation probabilites
# Also called the "emmission probabilities"
# Beta[j][k]: probability of observing k in state j
# Pi is the initial state distribution
# Also called the "start probabilities"
# Pi[i] is the probability of beginning in state i
# Sum over all j in Alpha[i] must equal 1
# Sum over all k in Beta[j] must equal 1
# Sum over i in Pi must equal 1
self.alpha = np.ones((states, states))
self.beta = np.ones((states, observables))
for i in range(0, states):
self.alpha[i] = np.random.dirichlet(self.alpha[i])
self.beta[i] = np.random.dirichlet(self.beta[i])
self.pi = np.random.dirichlet([1] * states)
# Decoding algorithms
# The Viterbi decoding algorithm is used to find the most likely path of states
# The standard Viterbi algorithm is below
def decodeViterbi(self, obs):
# Initialization
T = len(obs)
delta = np.zeros((self.numStates, T))
path = np.zeros((self.numStates, T),'int32')
# t = 0
delta[:, 0] = np.squeeze(self.pi * self.beta[:, obs[0]])
# Recursion
# t > 0
for t in range(1, T):
delta[:, t] = np.max(np.dot(delta[:,[t-1]],[self.beta[:, obs[t]]])*self.alpha,0)
path[:, t] = np.argmax(np.tile(delta[:,[t-1]],(1,self.numStates))*self.alpha,0)
# Termination
backtrace = [np.argmax(delta[:,-1])] # -1 addresses column T-1
probability = np.max(delta[:,-1])
# Backtrace
for i in range(T-1, 0, -1):
backtrace.append(path[backtrace[-1], i])
# We need to move the first element to the end of backtrace
# since it is actually the T'th element
return backtrace[::-1], probability
# The ViterbiLog algorithm is very similar to the standard algorithm
# All of the computations are done in the log domain to avoid underflow
def decodeViterbiLog(self, obs):
# Initialization
pi = np.log(self.pi)
beta = np.log(self.beta)
alpha = np.log(self.alpha)
T = len(obs)
delta = np.zeros((self.numStates, T))
path = np.zeros((self.numStates, T),'int32')
# t = 0
delta[:, 0] = np.squeeze(pi + beta[:, obs[0]])
# Recursion
# t > 0
for t in range(1, T):
delta[:, t] = np.max(np.tile(delta[:,[t-1]],(1,self.numStates))+alpha,0) + beta[:, obs[t]]
path[:, t] = np.argmax(np.tile(delta[:,[t-1]],(1,self.numStates))+alpha,0)
# Termination
backtrace = [np.argmax(delta[:,-1])] # -1 addresses column T-1
probability = np.max(delta[:,-1])
# Backtrace
for i in range(T-1, 0, -1):
backtrace.append(path[backtrace[-1], i])
# We need to move the first element to the end of backtrace
# since it is actually the T'th element
return backtrace[::-1], probability
# The Forward-Backward decoder uses separate forward and backward steps to decode
def decodeForwardBackward(self, obs):
# Forward pass
T = len(obs)
for t in range(0, T):
return None
def forward(self, obs):
T = len(obs)
for
# Training algorithms
# The Viterbi training (or extraction) algorithm uses the Viterbi decoder to find alpha and beta
# This algorithm employs K-means clustering for its unsupervised learning
# It is often called Segmental K-means for this reason
# It is also called Gaussian training because Gaussian mixture models are used to update alpha and beta
def trainViterbi(self, data, epochs = 10):
prob = 0.0
for e in range(0, epochs):
self.randomizeProbabilities()
while True:
b, p = self.decodeViterbi(data)
oldAlpha = self.alpha
oldBeta = self.beta
# TODO: Update alpha and beta parameters
if (np.sum(np.abs(self.alpha-oldAlpha))+np.sum(np.abs(self.beta-oldBeta))) < 0.00001:
break
if p > prob:
newAlpha = self.alpha
newBeta = self.beta
prob = p
self.alpha = newAlpha
self.beta = newBeta
# The Baum-Welch training algorithm uses several other algorithms
# It is a combination of EM (Expectation-Maximation) applied to HMM
# using the Forward-Backward steps to help update alpha and beta
def trainBaumWelch(self, data):
# not yet implemented
return None
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# justice-legal-service (1.22.2)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class UploadLocalizedPolicyVersionAttachmentResponse(Model):
"""Upload localized policy version attachment response (UploadLocalizedPolicyVersionAttachmentResponse)
Properties:
attachment_checksum: (attachmentChecksum) OPTIONAL str
attachment_location: (attachmentLocation) OPTIONAL str
attachment_upload_url: (attachmentUploadUrl) OPTIONAL str
"""
# region fields
attachment_checksum: str # OPTIONAL
attachment_location: str # OPTIONAL
attachment_upload_url: str # OPTIONAL
# endregion fields
# region with_x methods
def with_attachment_checksum(self, value: str) -> UploadLocalizedPolicyVersionAttachmentResponse:
self.attachment_checksum = value
return self
def with_attachment_location(self, value: str) -> UploadLocalizedPolicyVersionAttachmentResponse:
self.attachment_location = value
return self
def with_attachment_upload_url(self, value: str) -> UploadLocalizedPolicyVersionAttachmentResponse:
self.attachment_upload_url = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "attachment_checksum"):
result["attachmentChecksum"] = str(self.attachment_checksum)
elif include_empty:
result["attachmentChecksum"] = ""
if hasattr(self, "attachment_location"):
result["attachmentLocation"] = str(self.attachment_location)
elif include_empty:
result["attachmentLocation"] = ""
if hasattr(self, "attachment_upload_url"):
result["attachmentUploadUrl"] = str(self.attachment_upload_url)
elif include_empty:
result["attachmentUploadUrl"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
attachment_checksum: Optional[str] = None,
attachment_location: Optional[str] = None,
attachment_upload_url: Optional[str] = None,
) -> UploadLocalizedPolicyVersionAttachmentResponse:
instance = cls()
if attachment_checksum is not None:
instance.attachment_checksum = attachment_checksum
if attachment_location is not None:
instance.attachment_location = attachment_location
if attachment_upload_url is not None:
instance.attachment_upload_url = attachment_upload_url
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> UploadLocalizedPolicyVersionAttachmentResponse:
instance = cls()
if not dict_:
return instance
if "attachmentChecksum" in dict_ and dict_["attachmentChecksum"] is not None:
instance.attachment_checksum = str(dict_["attachmentChecksum"])
elif include_empty:
instance.attachment_checksum = ""
if "attachmentLocation" in dict_ and dict_["attachmentLocation"] is not None:
instance.attachment_location = str(dict_["attachmentLocation"])
elif include_empty:
instance.attachment_location = ""
if "attachmentUploadUrl" in dict_ and dict_["attachmentUploadUrl"] is not None:
instance.attachment_upload_url = str(dict_["attachmentUploadUrl"])
elif include_empty:
instance.attachment_upload_url = ""
return instance
@classmethod
def create_many_from_dict(cls, dict_: dict, include_empty: bool = False) -> Dict[str, UploadLocalizedPolicyVersionAttachmentResponse]:
return {k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_} if dict_ else {}
@classmethod
def create_many_from_list(cls, list_: list, include_empty: bool = False) -> List[UploadLocalizedPolicyVersionAttachmentResponse]:
return [cls.create_from_dict(i, include_empty=include_empty) for i in list_] if list_ else []
@classmethod
def create_from_any(cls, any_: any, include_empty: bool = False, many: bool = False) -> Union[UploadLocalizedPolicyVersionAttachmentResponse, List[UploadLocalizedPolicyVersionAttachmentResponse], Dict[Any, UploadLocalizedPolicyVersionAttachmentResponse]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"attachmentChecksum": "attachment_checksum",
"attachmentLocation": "attachment_location",
"attachmentUploadUrl": "attachment_upload_url",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"attachmentChecksum": False,
"attachmentLocation": False,
"attachmentUploadUrl": False,
}
# endregion static methods
|
<gh_stars>0
import os.path
from time import time
import numpy as np
from matplotlib import pyplot as plt
import burg_toolkit as burg
def loading_saving_lib():
fn = '/home/rudorfem/datasets/object_libraries/test_library/test_library_def.yaml'
fn2 = '/home/rudorfem/datasets/object_libraries/test_library/test_library_def.yaml'
lib = burg.ObjectLibrary.from_yaml(fn2)
lib.print_details()
lib.to_yaml(fn2)
def generate_urdfs():
fn2 = '/home/rudorfem/datasets/object_libraries/test_library/test_library_roundtrip.yaml'
lib = burg.ObjectLibrary.from_yaml(fn2)
lib.generate_urdf_files('/home/rudorfem/datasets/object_libraries/test_library/urdf/')
lib.to_yaml(fn2)
def making_thumbnail():
fn2 = '/home/rudorfem/datasets/object_libraries/test_library/test_library_roundtrip.yaml'
lib_dir = os.path.dirname(fn2)
lib = burg.ObjectLibrary.from_yaml(fn2)
print(lib)
obj = lib['006_mustard_bottle']
obj.generate_thumbnail(os.path.join(lib_dir, 'thumbnails', f'{obj.identifier}.png'))
lib.to_yaml(fn2)
def render_pc():
fn2 = '/home/rudorfem/datasets/object_libraries/test_library/test_library_roundtrip.yaml'
lib_dir = os.path.dirname(fn2)
lib = burg.ObjectLibrary.from_yaml(fn2)
obj = lib['006_mustard_bottle']
r = burg.render.MeshRenderer(output_dir=os.path.join(lib_dir, obj.identifier))
poses = burg.render.CameraPoseGenerator().icosphere(subdivisions=1, in_plane_rotations=1, scales=1)
scene = burg.Scene(burg.ObjectInstance(object_type=obj))
r.render_depth(scene, poses, depth_fn_type='npy-pc', depth_fn_func=lambda i: f'pc{i}')
pcs = [burg.visualization.create_plane(), obj.mesh]
for i in range(len(poses)):
pc = np.load(os.path.join(lib_dir, obj.identifier, f'pc{i}.npy'))
pcs.append(burg.util.numpy_pc_to_o3d(pc))
burg.visualization.show_geometries(pcs)
def pybullet_kram():
fn2 = '/home/rudorfem/datasets/object_libraries/test_library/test_library_roundtrip.yaml'
lib = burg.ObjectLibrary.from_yaml(fn2)
scene, lib, printout = burg.Scene.from_yaml('/home/rudorfem/Downloads/moving_box.yaml', object_library=lib)
target = '003_cracker_box'
idx = 0 # object to inspect
for i, instance in enumerate(scene.objects):
print(instance.object_type.identifier)
if instance.object_type.identifier == target:
idx = i
print(f'tracking position of target object: {idx} - {scene.objects[idx].object_type.identifier}')
burg.visualization.show_geometries([scene])
pose = scene.objects[idx].pose.copy()
gs = burg.GraspSet.from_poses(pose.reshape(1, 4, 4))
gs_prev = gs
sim = burg.scene_sim.SceneSimulator(verbose=False, timeout=10)
for i in range(30):
print(f'{i}: simulator found rest after {sim.simulate_scene(scene)} seconds')
gs_new = burg.GraspSet.from_poses(scene.objects[idx].pose.reshape(1, 4, 4))
print(f'* init pose dist: pos {burg.metrics.euclidean_distances(gs, gs_new)[0, 0]:.7f}; '
f'ang {np.rad2deg(burg.metrics.angular_distances(gs, gs_new))[0, 0]:.7f} degree')
print(f'* prev pose dist: pos {burg.metrics.euclidean_distances(gs_prev, gs_new)[0, 0]:.7f}; '
f'ang {np.rad2deg(burg.metrics.angular_distances(gs_prev, gs_new))[0, 0]:.7f} degree')
gs_prev = gs_new
sim.dismiss()
print('initial rotation')
print(gs.rotation_matrices)
print('final rotation')
print(gs_new.rotation_matrices)
pose1 = burg.GraspSet.from_poses(burg.sampling.random_poses(1))
pose2 = burg.GraspSet.from_poses(burg.sampling.random_poses(1))
print(f'* sanity check: pos {burg.metrics.euclidean_distances(pose1, pose2)[0, 0]:.7f}; '
f'ang {np.rad2deg(burg.metrics.angular_distances(pose1, pose2))[0, 0]:.7f} degree')
def compute_stable_poses():
fn2 = '/home/rudorfem/datasets/object_libraries/test_library/test_library_roundtrip.yaml'
lib = burg.ObjectLibrary.from_yaml(fn2)
obj = lib['051_large_clamp']
#stable_poses = burg.mesh_processing.compute_stable_poses(obj, verify_in_sim=False)
#print(stable_poses)
for prob, pose in obj.stable_poses:
instance = burg.ObjectInstance(obj, pose)
burg.visualization.show_geometries([burg.visualization.create_plane(), instance.get_mesh()])
#lib.compute_stable_poses()
#lib.to_yaml(fn2)
def check_stable_pose():
fn2 = '/home/rudorfem/datasets/object_libraries/test_library/test_library_roundtrip.yaml'
lib = burg.ObjectLibrary.from_yaml(fn2)
obj = lib['051_large_clamp']
orig_pose = obj.stable_poses.sample_pose()
from scipy.spatial.transform import Rotation as R
rng = np.random.default_rng()
for i in range(5):
angle = rng.random() * np.pi * 2
tf_rot = np.eye(4)
tf_rot[:3, :3] = R.from_rotvec(angle * np.array([0, 0, 1])).as_matrix()
pose = tf_rot @ orig_pose
pose[:2, 3] = rng.random(2) * (0.5, 0.5)
instance = burg.ObjectInstance(obj, pose)
burg.visualization.show_geometries([burg.visualization.create_plane(), instance.get_mesh()])
def check_scene_sampling():
fn2 = '/home/rudorfem/datasets/object_libraries/test_library/test_library_def.yaml'
lib = burg.ObjectLibrary.from_yaml(fn2)
object_names = list(lib.keys())
print('all attribs?', lib.objects_have_all_attributes())
# scene = burg.sampling.sample_scene(lib, burg.Scene.size_A3, instances_per_scene=5, instances_per_object=1)
# burg.visualization.show_geometries([scene])
# img = scene.render_printout() # uses pyrender
scene_size = burg.constants.SIZE_A4
scene = burg.sampling.sample_scene(lib, scene_size, instances_per_scene=1, instances_per_object=1)
burg.visualization.show_geometries([scene])
printout = burg.printout.Printout(scene_size)
printout.add_scene(scene)
start_time = time()
img = printout.get_image()
elapsed = time() - start_time
print(f'producing image took {elapsed*1000:.4f} ms')
plt.imshow(img, cmap='gray')
plt.show()
print('marker info:\n', printout.marker_info)
start_time = time()
printout.save_pdf(os.path.join(os.path.dirname(fn2), 'printout.pdf'), page_size=burg.constants.SIZE_A4)
elapsed = time() - start_time
print(f'producing image, pdf and saving took {elapsed*1000:.4f} ms')
printout.save_image(os.path.join(os.path.dirname(fn2), 'printout.png'))
yaml_fn = os.path.join(os.path.dirname(fn2), 'scene.yaml')
scene.to_yaml(yaml_fn, object_library=lib, printout=printout)
print(scene)
scene, library, printout = burg.Scene.from_yaml(yaml_fn, lib)
print(scene)
print(printout.to_dict())
frame = printout.get_marker_frame()
print(frame)
frame = burg.visualization.create_frame(pose=frame)
burg.visualization.show_geometries([scene, frame])
def check_rendering():
fn2 = '/home/rudorfem/datasets/object_libraries/test_library/test_library_roundtrip.yaml'
lib = burg.ObjectLibrary.from_yaml(fn2)
obj = lib['051_large_clamp']
engine = burg.render.RenderEngineFactory.create('pybullet')
print('engine:', engine._p)
renderer = burg.render.ThumbnailRenderer(engine, size=128)
engine.dismiss()
print('engine:', engine._p)
engine2 = burg.render.RenderEngineFactory.create('pybullet')
print('engine2:', engine2._p)
#img = renderer.render(obj)
renderer = burg.render.ThumbnailRenderer(engine2, size=128)
print('engine2:', engine2._p)
img2 = renderer.render(obj)
img = renderer.render(obj)
fig, ax = plt.subplots(1, 2)
ax[0].imshow(img)
ax[1].imshow(img2)
plt.show()
def show_off():
fn = '/home/rudorfem/datasets/l2g-ycb-test-set/object_library.yaml'
lib = burg.ObjectLibrary.from_yaml(fn)
print(lib)
while True:
scene = burg.sampling.sample_scene(lib, burg.constants.SIZE_A4, instances_per_scene=5)
printout = burg.printout.Printout(burg.constants.SIZE_A4)
printout.add_scene(scene)
lib_dir = os.path.dirname(fn)
printout_fn = os.path.join(lib_dir, 'printout.pdf')
printout.save_pdf(printout_fn)
burg.visualization.show_geometries([scene])
def check_z_values():
fn = '/home/rudorfem/datasets/l2g-ycb-test-set/object_library.yaml'
lib = burg.ObjectLibrary.from_yaml(fn)
while True:
scene = burg.sampling.sample_scene(lib, burg.constants.SIZE_A4, instances_per_scene=5)
printout = burg.printout.Printout(burg.constants.SIZE_A4)
printout.add_scene(scene)
tmp_dir = '/home/rudorfem/tmp'
scene.to_yaml(os.path.join(tmp_dir, 'scene.yaml'), lib, printout)
printout_fn = os.path.join(tmp_dir, 'printout.pdf')
printout.save_pdf(printout_fn)
burg.visualization.show_geometries([scene])
def io_scene():
fn = '/home/rudorfem/tmp/scene.yaml'
scene, lib, template = burg.Scene.from_yaml(fn)
scene.to_yaml(fn, lib, template)
def recreate_urdf():
from xml.etree import ElementTree
path = '/home/rudorfem/datasets/l2g-ycb-test-set/urdf/'
fns = os.listdir(path)
for fn in fns:
datafile = os.path.join(path, fn)
tree = ElementTree.parse(datafile)
node = tree.find('.//mass')
mass = float(node.get('value'))
node.set('value', str(mass/1000))
tree.write(datafile)
if __name__ == "__main__":
# loading_saving_lib()
# compute_stable_poses()
# check_scene_sampling()
# check_stable_pose()
# pybullet_kram()
# check_rendering()
# show_off()
# check_z_values()
# io_scene()
recreate_urdf()
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Cache temperature specifies how the browser cache should be configured before
the page run.
See design doc for details:
https://docs.google.com/document/u/1/d/12D7tkhZi887g9d0U2askU9JypU_wYiEI7Lw0bfwxUgA
"""
import logging
from telemetry.core import exceptions
# Default Cache Temperature. The page doesn't care which browser cache state
# it is run on.
ANY = 'any'
# Emulates cold runs. Clears various caches and data with using tab.ClearCache()
# and tab.ClearDataForOrigin().
COLD = 'cold'
# Emulates warm browser runs. Ensures that the page was visited once in a
# different renderer.
WARM_BROWSER = 'warm-browser'
# Emulates hot runs. Ensures that the page was visited at least twice in a
# different renderer before the run.
HOT_BROWSER = 'hot-browser'
# Emulates warm renderer runs. Ensures that the page was visited once before the
# run in the same renderer.
WARM = 'warm'
# Emulates hot renderer runs. Ensures that the page was visited at least twice
# in the same renderer before the run.
HOT = 'hot'
ALL_CACHE_TEMPERATURES = [ANY, COLD, WARM_BROWSER, HOT_BROWSER, WARM, HOT]
class _MarkTelemetryInternal(object):
def __init__(self, tab, identifier):
self.tab = tab
self.identifier = identifier
def __enter__(self):
# This marker must match the regexp in
# ChromeProcessHelper.findTelemetryInternalRanges_().
self.tab.AddTimelineMarker('telemetry.internal.%s.start' % self.identifier)
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_type:
return True
# This marker must match the regexp in
# ChromeProcessHelper.findTelemetryInternalRanges_().
self.tab.AddTimelineMarker('telemetry.internal.%s.end' % self.identifier)
return True
def _ClearCacheAndData(tab, url):
tab.ClearCache(force=True)
tab.ClearDataForOrigin(url)
def _WarmCache(page, tab, temperature):
with _MarkTelemetryInternal(tab, 'warm_cache.%s' % temperature):
page.RunNavigateSteps(tab.action_runner)
page.RunPageInteractions(tab.action_runner)
tab.Navigate("about:blank")
tab.WaitForDocumentReadyStateToBeComplete()
# Stop service worker after each cache warming to ensure service worker
# script evaluation will be executed again in next navigation.
tab.StopAllServiceWorkers()
class CacheManipulator(object):
RENDERER_TEMPERATURE = None
BROWSER_TEMPERATURE = None
@staticmethod
def PrepareRendererCache(page, tab, previous_page):
raise NotImplementedError
@classmethod
def PrepareBrowserCache(cls, page, browser, previous_page):
# Perform browser cache manipulation in a different tab.
tab = browser.tabs.New()
cls.PrepareRendererCache(page, tab, previous_page)
tab.Close()
# Flush and discard current tracing data, so the trace does not contain
# events from the tab just closed.
browser.platform.tracing_controller.FlushTracing(discard_current=True)
class AnyCacheManipulator(CacheManipulator):
RENDERER_TEMPERATURE = ANY
BROWSER_TEMPERATURE = None
@staticmethod
def PrepareRendererCache(page, tab, previous_page):
pass
@classmethod
def PrepareBrowserCache(cls, page, browser, previous_page):
raise exceptions.Error('Prepare browser cache not supported')
class ColdCacheManipulator(CacheManipulator):
RENDERER_TEMPERATURE = COLD
BROWSER_TEMPERATURE = None
@staticmethod
def PrepareRendererCache(page, tab, previous_page):
if previous_page is None:
# DiskCache initialization is performed asynchronously on Chrome start-up.
# Ensure that DiskCache is initialized before starting the measurement to
# avoid performance skew.
# This is done by navigating to an inexistent URL and then wait for the
# navigation to complete.
# TODO(kouhei) Consider moving this logic to PageCyclerStory
with _MarkTelemetryInternal(tab, 'ensure_diskcache'):
tab.Navigate("http://does.not.exist")
tab.WaitForDocumentReadyStateToBeComplete()
_ClearCacheAndData(tab, page.url)
@classmethod
def PrepareBrowserCache(cls, page, browser, previous_page):
raise exceptions.Error('Prepare browser cache not supported')
class WarmCacheManipulator(CacheManipulator):
RENDERER_TEMPERATURE = WARM
BROWSER_TEMPERATURE = WARM_BROWSER
@staticmethod
def PrepareRendererCache(page, tab, previous_page):
if (previous_page is not None and
previous_page.url == page.url and
previous_page.cache_temperature == COLD):
if '#' in page.url:
# TODO(crbug.com/768780): Move this operation to tab.Navigate().
# This navigates to inexistent URL to avoid in-page hash navigation.
# Note: Unlike PCv1, PCv2 iterates the same URL for different cache
# configurations. This may issue blink in-page hash navigations,
# which isn't intended here.
with _MarkTelemetryInternal(tab, 'avoid_double_hash_navigation'):
tab.Navigate("http://does.not.exist")
tab.WaitForDocumentReadyStateToBeComplete()
# Stop all service workers before running tests to measure the starting
# time of service worker too.
tab.StopAllServiceWorkers()
else:
_ClearCacheAndData(tab, page.url)
_WarmCache(page, tab, WARM)
class HotCacheManipulator(CacheManipulator):
RENDERER_TEMPERATURE = HOT
BROWSER_TEMPERATURE = HOT_BROWSER
@staticmethod
def PrepareRendererCache(page, tab, previous_page):
if (previous_page is not None and
previous_page.url == page.url and
previous_page.cache_temperature != ANY):
if previous_page.cache_temperature == COLD:
_WarmCache(page, tab, HOT)
else:
if '#' in page.url:
# TODO(crbug.com/768780): Move this operation to tab.Navigate().
# This navigates to inexistent URL to avoid in-page hash navigation.
# Note: Unlike PCv1, PCv2 iterates the same URL for different cache
# configurations. This may issue blink in-page hash navigations,
# which isn't intended here.
with _MarkTelemetryInternal(tab, 'avoid_double_hash_navigation'):
tab.Navigate("http://does.not.exist")
tab.WaitForDocumentReadyStateToBeComplete()
# Stop all service workers before running tests to measure the starting
# time of service worker too.
tab.StopAllServiceWorkers()
else:
_ClearCacheAndData(tab, page.url)
_WarmCache(page, tab, WARM)
_WarmCache(page, tab, HOT)
def EnsurePageCacheTemperature(page, browser, previous_page=None):
temperature = page.cache_temperature
logging.info('PageCacheTemperature: %s', temperature)
for c in [AnyCacheManipulator, ColdCacheManipulator, WarmCacheManipulator,
HotCacheManipulator]:
if temperature == c.RENDERER_TEMPERATURE:
c.PrepareRendererCache(page, browser.tabs[0], previous_page)
return
elif temperature == c.BROWSER_TEMPERATURE:
c.PrepareBrowserCache(page, browser, previous_page)
return
raise NotImplementedError('Unrecognized cache temperature: %s' % temperature)
|
<filename>tests/unit_tests/test_cli_funcs/test_train.py
"""tests for vak.cli.train module"""
from configparser import ConfigParser
import os
from pathlib import Path
import shutil
import tempfile
import unittest
import vak.cli.train
HERE = Path(__file__).parent
TEST_DATA_DIR = HERE.joinpath('..', '..', 'test_data')
TEST_CONFIGS_DIR = TEST_DATA_DIR.joinpath('configs')
SETUP_SCRIPTS_DIR = HERE.joinpath('..', '..', 'setup_scripts')
class TestTrain(unittest.TestCase):
def setUp(self):
self.tmp_output_dir = tempfile.mkdtemp()
test_train_config = TEST_CONFIGS_DIR.joinpath('test_train_config.ini')
self.tmp_config_dir = tempfile.mkdtemp()
self.tmp_config_path = os.path.join(self.tmp_config_dir, 'tmp_test_train_config.ini')
shutil.copy(test_train_config, self.tmp_config_path)
# rewrite config so it points to data for testing + temporary output dirs
config = ConfigParser()
config.read(self.tmp_config_path)
test_data_vds_path = list(TEST_DATA_DIR.glob('vds'))[0]
for stem in ['train', 'val']:
vds_path = list(test_data_vds_path.glob(f'*.{stem}.vds.json'))
self.assertTrue(len(vds_path) == 1)
vds_path = vds_path[0]
config['TRAIN'][f'{stem}_vds_path'] = str(vds_path)
config['PREP']['output_dir'] = self.tmp_output_dir
config['PREP']['data_dir'] = os.path.join(TEST_DATA_DIR, 'cbins', 'gy6or6', '032312')
config['TRAIN']['root_results_dir'] = self.tmp_output_dir
with open(self.tmp_config_path, 'w') as fp:
config.write(fp)
def tearDown(self):
shutil.rmtree(self.tmp_output_dir)
shutil.rmtree(self.tmp_config_dir)
def test_train_func(self):
# make sure train runs without crashing.
config = vak.config.parse.parse_config(self.tmp_config_path)
vak.cli.train(train_vds_path=config.train.train_vds_path,
val_vds_path=config.train.val_vds_path,
networks=config.networks,
num_epochs=config.train.num_epochs,
config_file=self.tmp_config_path,
val_step=config.train.val_step,
ckpt_step=config.train.ckpt_step,
patience=config.train.patience,
save_only_single_checkpoint_file=config.train.save_only_single_checkpoint_file,
normalize_spectrograms=config.train.normalize_spectrograms,
root_results_dir=config.train.root_results_dir,
save_transformed_data=False)
def test_train_func_no_val_set(self):
# make sure train runs without a validation set
config = vak.config.parse.parse_config(self.tmp_config_path)
vak.cli.train(train_vds_path=config.train.train_vds_path,
val_vds_path=None,
networks=config.networks,
num_epochs=config.train.num_epochs,
config_file=self.tmp_config_path,
val_step=None,
ckpt_step=config.train.ckpt_step,
patience=config.train.patience,
save_only_single_checkpoint_file=config.train.save_only_single_checkpoint_file,
normalize_spectrograms=config.train.normalize_spectrograms,
root_results_dir=config.train.root_results_dir,
save_transformed_data=False)
if __name__ == '__main__':
unittest.main()
|
<filename>DOS/FunctionsLayer1/getNumberMatrix.py
# -*- coding: utf-8 -*-
import numpy as np
from BasicFunctions.generateNumberArray import generateNumberArray
def isRepeatedSample(numberArray, samples_list):
#
for recordedNumberArray in samples_list:
diff = [recordedNumberArray[i] - numberArray[i]\
for i in range(len(numberArray))]
if diff==[0 for i in range(len(numberArray))]:
return 0
return 1
def generateNumberMatrix(**args):
N_spins = args['N_spins']
max_n_spins_in_basket = args['max_n_spins_in_basket']
N_samples = args['N_samples']
sampling_method=args['sampling_method']
###########################
num_baskets = (N_spins/max_n_spins_in_basket+1,
N_spins/max_n_spins_in_basket)\
[N_spins-(N_spins/max_n_spins_in_basket)*\
max_n_spins_in_basket==0]
if num_baskets!=(N_spins/max_n_spins_in_basket):
num_spins_in_the_last_basket =\
N_spins - (N_spins/max_n_spins_in_basket)*max_n_spins_in_basket
else:
num_spins_in_the_last_basket=0
###########################
samples_matrix=[]
## first sample
numberArray_init = generateNumberArray(N_spins, max_n_spins_in_basket)
samples_matrix.append(numberArray_init)
N_sampled = 0
while N_sampled != N_samples-1 and (N_sampled<2**N_spins):
#
################## update method
if sampling_method =='update':
numberArray = updateNumberArray(numberArray_init,
max_n_spins_in_basket,
num_spins_in_the_last_basket )
################## direct
if sampling_method =='direct':
numberArray = generateNumberArray(N_spins, max_n_spins_in_basket)
#################
#
if isRepeatedSample(numberArray, samples_matrix)!=0:
samples_matrix.append(numberArray)
N_sampled+=1
numberArray_init = numberArray
samples_matrix = np.matrix(samples_matrix)
return samples_matrix , N_sampled
def updateNumberArray(numberArray_init,
max_n_spins_in_basket,
num_spins_in_the_last_basket ):
#
numberArray_new = np.copy(numberArray_init)
if num_spins_in_the_last_basket==0:
col = np.random.randint(len(numberArray_init))
new_number = np.random.randint(2**max_n_spins_in_basket)
numberArray_new[col] = new_number
else:
col = np.random.randint(len(numberArray_init))
if col !=len(numberArray_init)-1:
new_number = np.random.randint(2**max_n_spins_in_basket)
numberArray_new[col] = new_number
else:
new_number = np.random.randint(2**num_spins_in_the_last_basket)
numberArray_new[col] = new_number
return numberArray_new
#
#### testing
#
#args={}
#args['N_spins']=100
#args['max_n_spins_in_basket']=20
#args['N_samples']=10
##M=generateNumberMatrix(**args)
#M = generateNumberMatrix(**args)
#print M[:6, :]
#print M[6:, :]
|
<gh_stars>0
"""
Import file catalog metadata from the IceProd v2 simulation database.
"""
import sys
import os
import argparse
import hashlib
import pymysql
import requests
try:
from crawler import generate_files, stat
except ImportError:
print('Requires file_crawler in PYTHONPATH')
sys.exit(1)
level_types = {
'detector': ['detector'],
'L1': ['level1'],
'L2': ['level2'],
'L3': ['level3'],
'L4': ['level4'],
}
def get_level(path):
"""transforn path to processing level"""
path = path.lower()
for k in level_types:
if any(l in path for l in level_types[k]):
return k
return 'unknown'
generator_types = {
'corsika': ['corsika'],
'nugen': ['nugen','neutrino','numu','nue','nutau'],
}
def get_generator(path):
"""transform path to generator"""
path = path.lower()
for k in generator_types:
if any(g in path for g in generator_types[k]):
return k
return 'unknown'
def get_dataset(path):
"""get dataset num"""
name = path.rsplit('/')[-1]
for part in name.split('.'):
try:
if part.startswith('02') or part.startswith('01'):
return int(part)
except Exception:
continue
raise Exception('cannot find dataset')
def get_job(path):
"""get job num"""
name = path.rsplit('/')[-1]
next = False
for part in name.split('.'):
try:
if next:
return int(part)
if part.startswith('02') or part.startswith('01'):
p = int(part)
next = True
except Exception:
continue
raise Exception('cannot find job')
def main():
parser = argparse.ArgumentParser(description='IceProd v2 simulation importer')
parser.add_argument('--fc_host', default=None, help='file catalog address')
parser.add_argument('--fc_auth_token', default=None, help='file catalog auth token')
parser.add_argument('path', help='filesystem path to crawl')
args = parser.parse_args()
s = requests.Session()
if args.fc_auth_token:
s.headers.update({'Authorization': 'JWT '+args.fc_auth_token})
data_template = {
'data_type':'simulation',
'content_status':'good',
}
fakesha512sum = hashlib.sha512('dummysum').hexdigest()
for name in generate_files(args.path):
dataset_num = get_dataset(name)
if dataset_num < 20000:
continue
#dataset_id = get_dataset_id(name)
# check if existing
r = s.get(args.fc_host+'/api/files', params={'logical_name':name})
r.raise_for_status()
if r.json()['files']:
print('skipping',name)
continue
print('adding',name)
row = stat(name)
data = data_template.copy()
data.update({
'logical_name': name,
'locations': [
{'site': 'WIPAC', 'path': name},
],
'file_size': int(row['size']),
'checksum': {
'sha512': fakesha512sum,
},
'create_date': row['ctime'],
'processing_level': get_level(name),
'iceprod':{
'dataset': dataset_num,
#'dataset_id': dataset_id,
'job': get_job(name),
#'job_id': get_job_id(name),
#'config': 'https://iceprod2.icecube.wisc.edu/config?dataset_id='+str(dataset_id),
},
'simulation': {
'generator': get_generator(name),
},
})
r = s.post(args.fc_host+'/api/files', json=data)
r.raise_for_status()
if __name__ == '__main__':
main()
|
<filename>dalle_pytorch/transformer.py
from functools import partial
from itertools import islice, cycle
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
if isinstance(val, list):
val = tuple(val)
return val if isinstance(val, tuple) else (val,) * depth
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
attn_types = None,
image_fmap_size = None,
sparse_attn = False
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
attn_types = default(attn_types, ('full',))
attn_types = cast_tuple(attn_types)
attn_type_layer = islice(cycle(attn_types), depth)
for _, sparse_attn, attn_type in zip(range(depth), sparse_layer, attn_type_layer):
if attn_type == 'full':
attn_class = Attention
elif attn_type == 'sparse':
attn_class = SparseAttention
elif attn_type == 'axial_row':
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 0, image_size = image_fmap_size)
elif attn_type == 'axial_col':
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 1, image_size = image_fmap_size)
elif attn_type == 'conv_like':
attn_class = partial(SparseConvCausalAttention, seq_len = seq_len, image_size = image_fmap_size)
else:
raise ValueError(f'attention type "{attn_type}" is not valid')
layers.append(nn.ModuleList([
PreNorm(dim, attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim, mult = ff_mult, dropout = ff_dropout))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
attn_route_map = {'mask': route_attn}
self.layers = execute_type(layers, args_route = attn_route_map)
def forward(self, x, **kwargs):
return self.layers(x, **kwargs)
|
<reponame>mgotz/PyDataProcessing<filename>mg/dataprocessing/savitzky_golay.py
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 12:45:39 2015
1d and 2d savitzky_golay smoothing functions
blatantly copied from scipy cookbook
"""
import numpy as np
from math import factorial
from scipy.signal import fftconvolve
__all__ = ["savitzky_golay", "sg_2d_filter"]
def savitzky_golay(y, windowSize, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
windowSize : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `windowSize` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, windowSize=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
"""
try:
windowSize = np.abs(np.int(windowSize))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("windowSize and order have to be of type int")
if windowSize % 2 != 1 or windowSize < 1:
raise TypeError("windowSize size must be a positive odd number")
if windowSize < order + 2:
raise TypeError("windowSize is too small for the polynomials order")
order_range = range(order+1)
half_window = (windowSize -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def sg_2d_filter( z, windowSize, order, derivative=None):
"""
"""
# number of terms in the polynomial expression
n_terms = ( order + 1 ) * ( order + 2) / 2.0
if windowSize % 2 == 0:
raise ValueError('windowSize must be odd')
if windowSize**2 < n_terms:
raise ValueError('order is too high for the window size')
half_size = windowSize // 2
# exponents of the polynomial.
# p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ...
# this line gives a list of two item tuple. Each tuple contains
# the exponents of the k-th term. First element of tuple is for x
# second element for y.
# Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...]
exps = [ (k-n, n) for k in range(order+1) for n in range(k+1) ]
# coordinates of points
ind = np.arange(-half_size, half_size+1, dtype=np.float64)
dx = np.repeat( ind, windowSize )
dy = np.tile( ind, [windowSize, 1]).reshape(windowSize**2, )
# build matrix of system of equation
A = np.empty( (windowSize**2, len(exps)) )
for i, exp in enumerate( exps ):
A[:,i] = (dx**exp[0]) * (dy**exp[1])
# pad input array with appropriate values at the four borders
new_shape = z.shape[0] + 2*half_size, z.shape[1] + 2*half_size
Z = np.zeros( (new_shape) )
# top band
band = z[0, :]
Z[:half_size, half_size:-half_size] = band - np.abs( np.flipud( z[1:half_size+1, :] ) - band )
# bottom band
band = z[-1, :]
Z[-half_size:, half_size:-half_size] = band + np.abs( np.flipud( z[-half_size-1:-1, :] ) -band )
# left band
band = np.tile( z[:,0].reshape(-1,1), [1,half_size])
Z[half_size:-half_size, :half_size] = band - np.abs( np.fliplr( z[:, 1:half_size+1] ) - band )
# right band
band = np.tile( z[:,-1].reshape(-1,1), [1,half_size] )
Z[half_size:-half_size, -half_size:] = band + np.abs( np.fliplr( z[:, -half_size-1:-1] ) - band )
# central band
Z[half_size:-half_size, half_size:-half_size] = z
# top left corner
band = z[0,0]
Z[:half_size,:half_size] = band - np.abs( np.flipud(np.fliplr(z[1:half_size+1,1:half_size+1]) ) - band )
# bottom right corner
band = z[-1,-1]
Z[-half_size:,-half_size:] = band + np.abs( np.flipud(np.fliplr(z[-half_size-1:-1,-half_size-1:-1]) ) - band )
# top right corner
band = Z[half_size,-half_size:]
Z[:half_size,-half_size:] = band - np.abs( np.flipud(Z[half_size+1:2*half_size+1,-half_size:]) - band )
# bottom left corner
band = Z[-half_size:,half_size].reshape(-1,1)
Z[-half_size:,:half_size] = band - np.abs( np.fliplr(Z[-half_size:, half_size+1:2*half_size+1]) - band )
# solve system and convolve
if derivative == None:
m = np.linalg.pinv(A)[0].reshape((windowSize, -1))
return fftconvolve(Z, m, mode='valid')
elif derivative == 'col':
c = np.linalg.pinv(A)[1].reshape((windowSize, -1))
return fftconvolve(Z, -c, mode='valid')
elif derivative == 'row':
r = np.linalg.pinv(A)[2].reshape((windowSize, -1))
return fftconvolve(Z, -r, mode='valid')
elif derivative == 'both':
c = np.linalg.pinv(A)[1].reshape((windowSize, -1))
r = np.linalg.pinv(A)[2].reshape((windowSize, -1))
return fftconvolve(Z, -r, mode='valid'), fftconvolve(Z, -c, mode='valid') |
# vim: set tabstop=4 expandtab :
###############################################################################
# Copyright (c) 2019-2021 ams AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Authors:
# - <NAME>, ams AG, <EMAIL>
import atexit
import os
import platform
import signal
import subprocess
import time
from abc import ABC, abstractmethod
from pathlib import Path
import psutil
from psutil import NoSuchProcess
import dottmi.target
from dottmi.dottexceptions import DottException
from dottmi.gdb_mi import GdbMi
from dottmi.gdbcontrollerdott import GdbControllerDott
from dottmi.utils import log
class GdbServer(ABC):
def __init__(self, addr, port, device_id):
self._addr: str = addr
self._port: int = port
self._device_id: str = device_id
@property
def device_id(self):
return self._device_id
@property
def addr(self):
return self._addr
@property
def port(self):
return self._port
@abstractmethod
def _launch(self):
pass
@abstractmethod
def shutdown(self):
pass
class GdbServerJLink(GdbServer):
def __init__(self, gdb_svr_binary: str, addr: str, port: int, device_id: str, interface: str, endian: str,
speed: str = '15000', serial_number: str = None, jlink_addr: str = None):
super().__init__(addr, port, device_id)
self._srv_binary: str = gdb_svr_binary
self._srv_process = None
self._target_interface: str = interface
self._target_endian: str = endian
self._speed: str = speed
self._serial_number: str = serial_number
self._jlink_addr: str = jlink_addr
# Popen.__del__ occasionally complains under Windows about invalid file handles on interpreter shutdown.
# This is somewhat distracting and is silenced by a custom delete function.
subprocess.Popen.__del_orig__ = subprocess.Popen.__del__
subprocess.Popen.__del__ = GdbServerJLink._popen_del
if self.addr is None:
self._launch()
@staticmethod
def _popen_del(instance):
try:
instance.__del_orig__()
except:
pass
def _launch_internal(self):
args = [self._srv_binary, '-device', self.device_id, '-if', self._target_interface , '-endian',
self._target_endian, '-vd', '-noir', '-timeout', '2000', '-singlerun', '-silent', '-speed',
self._speed]
if self._jlink_addr is not None:
args.append('-select')
args.append(f'IP={self._jlink_addr}')
if self._serial_number is not None:
if self._jlink_addr is not None:
log.warn('JLink address and JLINK serial number given. Ignoring serial in favour of address.')
else:
args.append('-select')
args.append(f'USB={self._serial_number}')
if self._port is not None:
args.append('-port')
args.append(f'{self._port}')
cflags = 0
if platform.system() == 'Windows':
cflags = subprocess.CREATE_NEW_PROCESS_GROUP
self._srv_process = subprocess.Popen(args, shell=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
creationflags=cflags)
p = psutil.Process(self._srv_process.pid)
startup_done = False
try:
# query the started process until it has opened a listening socket on the expected port
end_time = time.time() + 8
while not startup_done and time.time() < end_time:
for c in p.connections():
if c.laddr.port == self.port:
if self._serial_number is None:
log.info(f'GDB server is now listening on port {self.port}!')
else:
log.info(f'GDB server (JLINK SN: {self._serial_number}) now listening on port {self.port}!')
startup_done = True
except psutil.AccessDenied as ex:
# On Linux the situation was observed that from newly launched GDB server processes
# an AccessDenied exception is raised when accessing them with psutils. This exception
# is then 'thrown' upwards where it is handled by retrying to create the process.
raise ex
except (NoSuchProcess, PermissionError) as ex:
log.error('JLINK GDB server has terminated!')
end_time = time.time() + 2
res_poll = None
while not startup_done and time.time() < end_time:
res_poll = self._srv_process.poll()
if res_poll is not None:
break
if res_poll is not None:
err_code, err_str = self._conv_jlink_error(self._srv_process.poll())
log.error(f'J-Link gdb server termination reason: {err_code:x} ({err_str})')
if err_code == -2:
log.error('Already a JLINK GDB server instance running?')
if err_code == -5:
log.debug('GDB server command line:')
log.debug(' '.join(args))
raise DottException('Startup of JLINK gdb server failed!') from None
if not startup_done:
raise DottException('Startup of JLINK gdb server failed due to timeout!') from None
else:
self._addr = '127.0.0.1'
atexit.register(self.shutdown)
def _launch(self):
start_done: bool = False
while not start_done:
try:
self._launch_internal()
start_done = True
except psutil.AccessDenied as ex:
pass
def shutdown(self):
if self._srv_process is not None:
# if the gdb server is still running (despite being started in single run mode) it is terminated here
try:
if platform.system() == 'Windows':
os.kill(self._srv_process.pid, signal.CTRL_BREAK_EVENT)
else:
os.kill(self._srv_process.pid, signal.SIGINT)
self._srv_process.communicate(timeout=1)
except subprocess.TimeoutExpired:
self._srv_process.terminate()
self._srv_process = None
def _conv_jlink_error(self, jlink_error: int) -> (int, str):
bits_in_word = 32
err_code = jlink_error - (1 << bits_in_word)
err_str = 'Unknown error code.'
if err_code == 0:
err_str = 'No error. Gdb server closed normally.'
if err_code == -1:
err_str = 'Unknown error. Should not happen.'
if err_code == -2:
err_str = f'Failed to open listener port (default: 2331, current: {self.port}).'
if err_code == -3:
err_str = 'Could not connect to target. No target voltage detected or connection failed.'
if err_code == -4:
err_str = 'Failed to accept a connection from GDB.'
if err_code == -5:
err_str = 'Failed to parse the command line options, wrong or missing command line parameter.'
if err_code == -6:
err_str = 'Unknown or no device name set.'
if err_code == -7:
err_str = 'Failed to connect to J-Link.'
return err_code, err_str
class GdbClient(object):
# Create a new gdb instance
# gdb_client_binary ... binary of gdb client (in PATH or with full-qualified path)
# gdb_server_addr ... gdb server address as supplied to GDB's target command (e.g., remote :2331);
# if none DOTT tries to start a Segger GDB server instance and connect to it
def __init__(self, gdb_client_binary: str) -> None:
self._gdb_client_binary: str = gdb_client_binary
self._mi_controller: GdbControllerDott = None
self._gdb_mi: GdbMi = None
# set Python 2.7 (used for GDB commands) path such that gdb subprocess actually finds it
my_env = os.environ.copy()
python27_path = os.environ.get('PYTHONPATH27')
if python27_path is None:
raise Exception('PYTHONPATH27 not set. Can not load gdb command support. Aborting.')
if platform.system() == 'Windows':
os.environ['PATH'] = f'{python27_path};{my_env["PATH"]}'
os.environ['PYTHONPATH'] = '%s;%s\\lib;%s\\lib\\site-packages;%s\\DLLs' % ((python27_path,) * 4)
else:
os.environ['PYTHONPATH'] = ''
my_dir = os.path.dirname(os.path.realpath(__file__))
os.environ['PYTHONPATH'] += os.pathsep + str(Path(my_dir + '/..'))
# connect to already running gdb server
def connect(self) -> None:
# create 'GDB Machine Interface' instance and put it async mode
self._mi_controller = GdbControllerDott([self._gdb_client_binary, "--nx", "--quiet", "--interpreter=mi3"])
self._gdb_mi = GdbMi(self._mi_controller)
@property
def gdb_mi(self) -> GdbMi:
return self._gdb_mi
class GdbServerQuirks(object):
@staticmethod
def instantiate_quirks(dt: 'dottmi.target.Target') -> 'GdbServerQuirks':
# Segger and OpenOCD don't agree on xpsr naming (all lowercase vs. mixed case)
if 'xPSR' in dt.reg_get_names():
log.info("Using OpenOCD's xPSR naming")
return GdbServerQuirks('xPSR',
'monitor rbp all')
else:
# falling back to Segger's naming as default
log.info("Using Segger's xpsr naming")
return GdbServerQuirks('xpsr',
'monitor clrbp')
def __init__(self, xpsr_name: str, clear_all_bps: str):
self._xpsr_name: str = xpsr_name
self._clear_all_bps: str = clear_all_bps
@property
def xpsr_name(self) -> str:
return self._xpsr_name
@property
def clear_all_bps(self) -> str:
return self._clear_all_bps
|
<gh_stars>0
"""
2019-May-10 -- incorporates modernizations from <https://github.com/diyclassics/library-callnumber-lc/blob/master/callnumber/__init__.py>
"""
import logging, re
log = logging.getLogger(__name__)
__version__ = '0.1.0'
joiner = ''
topspace = ' '
bottomspace = '~'
topdigit = '0'
bottomdigit = '9'
weird_re = re.compile(r'^\s*[A-Z]+\s*\d+\.\d+\.\d+')
lccn_re = re.compile(r'''^
\s*
(?:VIDEO-D)? # for video stuff
(?:DVD-ROM)? # DVDs, obviously
(?:CD-ROM)? # CDs
(?:TAPE-C)? # Tapes
(?:1-SIZE)? # Brown specific
(?:3-SIZE)?
(?:RIVER)?
(?:BOX)?
(?:2-SIZE)?
(?:SMALL BOX)? # end Brown specific
\s*
([A-Z]{1,3}) # alpha
\s*
(?: # optional numbers with optional decimal point
(\d+)
(?:\s*?\.\s*?(\d+))?
)?
\s*
(?: # optional cutter
\.? \s*
([A-Z]) # cutter letter
\s*
(\d+ | \Z) # cutter numbers
)?
\s*
(?: # optional cutter
\.? \s*
([A-Z]) # cutter letter
\s*
(\d+ | \Z) # cutter numbers
)?
\s*
(?: # optional cutter
\.? \s*
([A-Z]) # cutter letter
\s*
(\d+ | \Z) # cutter numbers
)?
(\s+.+?)? # everthing else
\s*$
''', re.VERBOSE)
def normalize(lc, bottom=False):
lc = lc.upper()
bottomout = bottom
if re.match(weird_re, lc):
return None
m = re.match(lccn_re, lc)
if not m:
return None
origs = m.groups('')
(alpha, num, dec, c1alpha, c1num,
c2alpha, c2num, c3alpha, c3num, extra) = origs
if (len(dec) > 3): # was ```if (len(dec) > 2):```
return None
if alpha and not (num or dec or c1alpha or c1num or c2alpha \
or c2num or c3alpha or c3num):
if extra:
return None
if bottomout:
return alpha + bottomspace * (3 - len(alpha))
return alpha
enorm = re.sub(r'[^A-Z0-9]', '', extra)
log.debug( 'num, `%s`' % num )
if num is not '': # was raising Exception on callnumber 'BB .S7333 1777 5'
num = '%04d' % int(num) # converts, eg, '2' into '0002'
topnorm = [
alpha + topspace * (3 - len(alpha)),
num + topdigit * (4 - len(num)),
dec + topdigit * (3 - len(dec)),
c1alpha if c1alpha else topspace,
c1num + topdigit * (3 - len(c1num)),
c2alpha if c2alpha else topspace,
c2num + topdigit * (3 - len(c2num)),
c3alpha if c3alpha else topspace,
c3num + topdigit * (3 - len(c3num)),
' ' + enorm,
]
bottomnorm = [
alpha + bottomspace * (3 - len(alpha)),
num + bottomdigit * (4 - len(num)),
dec + bottomdigit * (3 - len(dec)),
c1alpha if c1alpha else bottomspace,
c1num + bottomdigit * (3 - len(c1num)),
c2alpha if c2alpha else bottomspace,
c2num + bottomdigit * (3 - len(c2num)),
c3alpha if c3alpha else bottomspace,
c3num + bottomdigit * (3 - len(c3num)),
' ' + enorm,
]
if extra:
return joiner.join(topnorm)
topnorm.pop()
bottomnorm.pop()
inds = list( range(1, 9) )
inds.reverse()
for i in inds:
end = topnorm.pop()
if origs[i]:
if bottomout:
end = joiner.join(bottomnorm[i:])
return joiner.join(topnorm) + joiner + end
class LC(object):
def __init__(self, callno):
try:
self.denormalized = callno.upper()
except AttributeError:
message = '*** ERROR: ```%s``` not a string?' % (callno)
log.warning( message )
print( message )
self.normalized = normalize(callno)
def __unicode__(self):
return self.normalized
def __str__(self):
return self.normalized
@property
def range_start(self):
return self.normalized
@property
def range_end(self):
return normalize(self.denormalized, True)
def components(self, include_blanks=False):
if re.match(weird_re, self.denormalized):
return None
m = re.match(lccn_re, self.denormalized)
if not m:
return None
(alpha, num, dec, c1alpha, c1num, c2alpha, c2num,
c3alpha, c3num, extra) = m.groups('')
if dec:
num += '.%s' % dec
c1 = ''.join((c1alpha, c1num))
c2 = ''.join((c2alpha, c2num))
c3 = ''.join((c3alpha, c3num))
if re.search(r'\S', c1):
c1 = '.%s' % c1
comps = []
for comp in (alpha, num, c1, c2, c3, extra):
if not re.search(r'\S', comp) and not include_blanks:
continue
comp = re.match(r'^\s*(.*?)\s*$', comp).group(1)
comps.append(comp)
return comps
|
<reponame>gregwinther/predicting-solid-state-qubit-material-hosts
import pandas as pd
import logging
import sys
from pymatgen.symmetry.groups import SYMM_DATA, sg_symbol_from_int_number
def sortByMPID(df: pd.DataFrame) -> pd.DataFrame:
mpid_num = []
for i in df["material_id"]:
mpid_num.append(int(i[3:]))
df["mpid_num"] = mpid_num
df = df.sort_values(by="mpid_num").reset_index(drop=True)
df = df.drop(columns=["mpid_num"])
return df
def filterIDs(df: pd.DataFrame) -> pd.DataFrame:
unsupportedMPIDs = ["mp-555563", "mp-583476", "mp-600205", "mp-600217", "mp-1195290", "mp-1196358", "mp-1196439", "mp-1198652", "mp-1198926", "mp-1199490", "mp-1199686", "mp-1203403", "mp-1204279", "mp-1204629"]
# In progress.
unsupportedMPIDs_V2 = ["mp-28709", #C120S32
"mp-28905", #Sr6C120
"mp-28979", #Ba6C120
"mp-29281", #Th24P132
"mp-555563", #PH6C2S2NCl2O4 #DOI: 10.17188/1268877
"mp-560718", #Te4H48Au4C16S12N4
"mp-568028", #C120
"mp-568259", #Ta4Si8P4H72C24N8Cl24
"mp-574148", #K16Zn8N96
"mp-583476", #Nb7S2I19 #DOI: 10.17188/1277059
"mp-600172", #Cu8H96C40S32N8
"mp-600205", #H10C5SeS2N3Cl #DOI: -
"mp-600217", #H80C40Se8S16Br8N24 #DOI: -
"mp-603254", #P8H72Au8C24S24Cl8
"mp-645279", #C136O2F40
"mp-645316", #C140F60
"mp-645364", #Sr24P48N96
"mp-646059", #C156Cl36
"mp-646122", #C160Cl24
"mp-646669", #P112Pb20I8
"mp-647169", #C120F36
"mp-647192", #C112Cl20
"mp-647725", #Os20C68O64
"mp-648157", #Os24C76O80
"mp-680326", #P24C48S48N72
"mp-680329", #K48As112
"mp-698375", #Cu8H96C40S32N8
"mp-705194", #Mn16Sn8C80Br8O80
"mp-705526", #H64Au4C24S8N16Cl4O16
"mp-706304", #H72Ru4C24S12N12Cl4O12
"mp-707239", #H32C8Se4S8Br8N16
"mp-720895", #Re4H88C16S16N32Cl32O12 # not enough memory
"mp-722571", #Re20H20C80O80
"mp-744395", #Ni4H72C16S24N32O16
"mp-744919", #Mn6Mo4H68C44N32O10
"mp-782100", #As16H96C32S28N8
"mp-1195164", #Cu4B4P16H96C32S16F16
"mp-1195290", #Ga3Si5P10H36C12N4Cl11 #DOI: -
"mp-1195608", #C200Cl44
"mp-1195791", #Zr8H256C80I8N40
"mp-1196206", #C216Cl24
"mp-1196283", #H24C142F8
"mp-1196358", #P4H120Pt8C40I8N4Cl8 #DOI: -
"mp-1196439", #Sn8P4H128C44N12Cl8O4 #DOI: -
"mp-1196461", #C156F84
"mp-1196552", #H104Os4C24S24Br12N48O4
"mp-1196583", #C240
"mp-1198652", #Te4H72C36S24N12Cl4 #DOI: -
"mp-1198926", #Re8H96C24S24N48Cl48 #DOI: -
"mp-1199490", #Mn4H64C16S16N32Cl8 #DOI: -
"mp-1199686", #Mo4P16H152C52N16Cl16 #DOI: -
"mp-1203403", #C121S2Cl20 #DOI: -
"mp-1204279", #Si16Te8H176Pd8C64Cl16 #DOI: -
"mp-1204629"] #P16H216C80N32Cl8 #DOI: -
print("A total of {} MPIDs are inconsistent with the rest."
.format(len(unsupportedMPIDs)))
dropped = 0
for unsupportedMPID in unsupportedMPIDs:
if unsupportedMPID in list(df["material_id"].values):
df = df.drop(df[df["material_id"] == str(unsupportedMPID)].index)
dropped += 1
print("A total of {} MPIDs were dropped from the dataset provided."
.format(len(unsupportedMPIDs)))
df = df.reset_index(drop=True)
return df
def countSimilarEntriesWithMP(listOfEntries, nameOfDatabase):
similarEntries = 0
for i in listOfEntries:
if i>=0:
similarEntries += 1
LOG.info("The amount of similar entries between MP and {} is {},".format(nameOfDatabase, similarEntries))
LOG.info("which is {} percent".format(similarEntries/len(listOfEntries)))
def polarGroupUsedInMP():
"""
Materials Project has more space groups than normal convention. This function finds
all the polar groups for materials project extended list of space groups.
"""
# This is a list of the point groups as noted in pymatgen
point_groups = []
for i in range(1,231):
symbol = sg_symbol_from_int_number(i)
point_groups.append(SYMM_DATA['space_group_encoding'][symbol]['point_group'])
# Note that there are 40 of them, rather than 32.
print("Number of point groups denoted in pymatgen: {}".format(len(set(point_groups))))
# This is because multiple conventions are used for the same point group.
# This dictionary can be used to convert between them.
point_group_conv = {'321' :'32', '312': '32', '3m1' :'3m', '31m': '3m',
'-3m1' : '-3m', '-31m': '-3m', '-4m2': '-42m', '-62m': '-6m2' }
# Using this dictionary we can correct to the standard point group notation.
corrected_point_groups = [point_group_conv.get(pg, pg) for pg in point_groups]
# Which produces the correct number of point groups. 32.
print("Number of point groups in conventional notation: {}".format(len(set(corrected_point_groups))))
# There are 10 polar point groups
polar_point_groups = ['1', '2', 'm', 'mm2', '4', '4mm', '3', '3m', '6', '6mm']
# Polar spacegroups have polar point groups.
polar_spacegroups = []
# There are 230 spacegroups
for i in range(1,231):
symbol = sg_symbol_from_int_number(i)
pg = SYMM_DATA['space_group_encoding'][symbol]['point_group']
if point_group_conv.get(pg, pg) in polar_point_groups:
polar_spacegroups.append(i)
# 68 of the 230 spacegroups are polar.
print("Number of polar spacegroups: {}".format(len(polar_spacegroups)))
return polar_spacegroups
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
LOG.addHandler(handler)
|
#!/usr/bin/env python3
import json
import requests
# https://www.notion.so/EXTERNAL-Gather-http-API-3bbf6c59325f40aca7ef5ce14c677444#af100c0dc3a84ea6869cb779d58ff7b7
class Gather:
def __init__(self, gather_api_key, gather_space_id):
self.gather_api_key = gather_api_key
self.gather_space_id = gather_space_id
def createMap(self, name, sourceSpace=None, map=None):
if sourceSpace == None:
extra = {"map": map}
else:
extra = {"sourceSpace": sourceSpace}
res = requests.post(
"https://gather.town/api/createMap",
json={
"apiKey": self.gather_api_key,
"spaceId": self.gather_space_id,
"name": name,
}
| extra,
)
if res.status_code != 200:
raise Exception(f"getMap failed {res.status_code}")
return res.json()
def getMap(self, mapId):
res = requests.get(
"https://gather.town/api/getMap",
params={
"apiKey": self.gather_api_key,
"spaceId": self.gather_space_id,
"mapId": mapId,
},
)
if res.status_code != 200:
raise Exception(f"getMap failed {res.status_code}")
return res.json()
def setMap(self, mapId, mapContent):
res = requests.post(
"https://gather.town/api/setMap",
json={
"apiKey": self.gather_api_key,
"spaceId": self.gather_space_id,
"mapId": mapId,
"mapContent": mapContent,
},
)
if res.status_code != 200:
raise Exception(f"getMap failed {res.status_code}")
def getGatherEmailDictionary(self):
res = requests.get(
"https://gather.town/api/getEmailGuestlist",
headers={"Content-Type": "application/json"},
params={
"apiKey": self.gather_api_key,
"spaceId": self.gather_space_id,
},
)
if res.status_code != 200:
raise Exception(f"getEmailGuestlist failed {res.status_code}")
result = res.json()
if isinstance(result, list):
return { }
else:
return result
def setGatherEmailDictionary(self, users, mustCreate):
res = requests.post(
"https://gather.town/api/setEmailGuestlist",
headers={"Content-Type": "application/json"},
json={
"overwrite": mustCreate,
"apiKey": self.gather_api_key,
"spaceId": self.gather_space_id,
"guestlist": users,
},
)
if res.status_code != 200:
raise Exception(f"setEmailGuestlist failed {res.status_code}")
return res.json()
# print(json.dumps(res.json(), indent=4, sort_keys=True))
# just use the javascript -- too tired to figure out the right
# params to post...
#
# def uploadImage(self, fileName):
# with open(fileName, mode='rb') as file: # b is important -> binary
# fileContent = file.read()
# res = requests.post(
# "https://gather.town/api/uploadImage",
# headers={"Content-Type": "application/json"},
# json={
# "spaceId": self.gather_space_id,
# "bytes": fileContent
# },
# )
# if res.status_code != 200:
# raise Exception(f"uploadImage failed {res.status_code}")
# return res.data()
|
<reponame>floringogianu/wintermute
""" A DQN example using wintermute that is close to the setup in the original
paper.
"""
import time
import random
from functools import partial
from types import SimpleNamespace
from datetime import datetime
import torch
from torch import optim
from termcolor import colored as clr
from rl_logger import Logger
from wintermute.env_wrappers import get_wrapped_atari
from wintermute.estimators import get_estimator
from wintermute.policy_evaluation import EpsilonGreedyPolicy
from wintermute.policy_evaluation import get_epsilon_schedule as get_epsilon
# from wintermute.policy_improvement import get_optimizer
from wintermute.policy_improvement import DQNPolicyImprovement
from wintermute.replay import NaiveExperienceReplay as ER
from wintermute.replay.prioritized_replay import ProportionalSampler as PER
# from wintermute.replay import FlatExperienceReplay as ER
from utils import get_parser, print_namespace
def priority_update(mem, dqn_loss):
""" Callback for updating priorities in the proportional-based experience
replay and for computing the importance sampling corrected loss.
"""
losses = dqn_loss.loss
mem.update([loss.item() for loss in losses.detach().abs()])
return (losses * mem.weights.to(losses.device).view_as(losses)).mean()
def train(args):
""" Here we do the training.
"""
env = args.env
train_log = args.log.groups["training"]
state, reward, done = env.reset(), 0, False
warmed_up = False
ep_cnt = 0
for step in range(1, args.step_no + 1):
# take action and save the s to _s and a to _a to be used later
pi = args.policy_evaluation(state)
_state, _action = state, pi.action
state, reward, done, _ = env.step(pi.action)
# add a (_s, _a, r, d) transition
args.experience_replay.push((_state, _action, reward, state, done))
# args.experience_replay.push(_state[0, 3], _action, reward, done)
# sample a batch and do some learning
do_training = (step % args.update_freq == 0) and warmed_up
if do_training:
batch = args.experience_replay.sample()
if args.prioritized:
args.policy_improvement(batch, cb=args.priority_update)
else:
args.policy_improvement(batch)
if step % 1000 == 0:
args.policy_improvement.update_target_estimator()
# do some logging
train_log.update(
ep_cnt=(1 if done else 0),
rw_per_ep=(reward, (1 if done else 0)),
rw_per_step=reward,
max_q=pi.q_value,
sampling_fps=1,
training_fps=32 if do_training else 0,
)
if done:
state, reward, done = env.reset(), 0, False
ep_cnt += 1
if ep_cnt % args.log_freq == 0:
args.log.log(train_log, step)
train_log.reset()
warmed_up = len(args.experience_replay) > args.learn_start
args.log.log(train_log, step)
train_log.reset()
def main(args):
""" Here we initialize stuff.
"""
args.seed = random.randint(0, 1e4) if args.seed == 42 else args.seed
print(f"torch manual seed={args.seed}.")
torch.manual_seed(args.seed)
# wrap the gym env
env = get_wrapped_atari(
args.game,
mode="training",
hist_len=4,
seed=args.seed,
no_gym=args.no_gym,
)
print(env)
print("ActionSpace: ", env.action_space)
print(env)
# construct an estimator to be used with the policy
action_no = env.action_space.n
estimator = get_estimator(
"atari", hist_len=4, action_no=action_no, hidden_sz=256
)
estimator = estimator.cuda()
# construct an epsilon greedy policy
# also: epsilon = {'name':'linear', 'start':1, 'end':0.1, 'steps':1000}
epsilon = get_epsilon(steps=args.epsilon_steps)
policy_evaluation = EpsilonGreedyPolicy(estimator, action_no, epsilon)
# construct a policy improvement type
# optimizer = get_optimizer('Adam', estimator, lr=0.0001, eps=0.0003)
optimizer = optim.Adam(
estimator.parameters(), lr=args.lr, eps=args.adam_eps
)
policy_improvement = DQNPolicyImprovement(
estimator, optimizer, gamma=0.99, is_double=args.double_dqn
)
# we also need an experience replay
if args.prioritized:
experience_replay = PER(
args.mem_size,
batch_size=32,
alpha=0.6,
optim_steps=((args.step_no - args.learn_start) / args.update_freq),
)
priority_update_cb = partial(priority_update, experience_replay)
else:
experience_replay = ER(1_000_000, batch_size=32)
# experience_replay = ER(100000, batch_size=32, hist_len=4) # flat
# construct a tester
tester = None
# construct a logger
if not args.label:
sampling = "prioritized" if args.prioritized else "uniform"
label = f"{datetime.now():%Y%b%d-%H%M%S}_{args.game}_{sampling}"
log = Logger(label=label, path=f"./results/{label}")
train_log = log.add_group(
tag="training",
metrics=(
log.SumMetric("ep_cnt", resetable=False),
log.AvgMetric("rw_per_ep", emph=True),
log.AvgMetric("rw_per_step"),
log.MaxMetric("max_q"),
log.FPSMetric("training_fps"),
log.FPSMetric("sampling_fps"),
),
console_options=("white", "on_blue", ["bold"]),
)
log.log_info(train_log, "date: %s." % time.strftime("%d/%m/%Y | %H:%M:%S"))
log.log_info(train_log, "pytorch v%s." % torch.__version__)
# Add the created objects in the args namespace
args.env = env
args.policy_evaluation = policy_evaluation
args.policy_improvement = policy_improvement
args.experience_replay = experience_replay
args.tester = tester
args.log = log
if args.prioritized:
args.priority_update = priority_update_cb
# print the args
print_namespace(args)
# start the training
train(args)
if __name__ == "__main__":
main(get_parser())
|
<gh_stars>0
"""
Module containing classes that represent single results or sets of results that
are obtained at once. E.g. the several results that come from one test bar
Also classes to represent a result
"""
import os
from typing import List, Tuple
import pandas as pd
from google.cloud import firestore
# How too's say to set this environment variable with a terminal but can't get
# this to work plus it would persist across sessions so doing it at the top of
# this file
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'keyfile.json'
db = firestore.Client()
class Measurement:
"""
Class for measurements with standards attributes (e.g. a mechanical
strength result)
"""
def __init__(self,
value: float = None,
standards: List[str] = None,
notes: List[str] = None,
units: str = None):
self.value = value
self.standards = standards
self.notes = notes
self.units = units
def to_dict(self):
return {
key: val for key, val in self.__dict__.items() if val is not None
}
def __add__(self, other):
if isinstance(other, Measurement):
return Properties.from_measurements(self, other)
else:
raise TypeError("'+' operator not supported between instances of "
f"Measurement and {type(other)}")
class ProofStress(Measurement):
"""
Object representing a single Proof stress measurement.
"""
name = 'proof_stress'
def __init__(self, value: float = None, percent: float = None, **kwargs):
self.percent = percent
super().__init__(value, **kwargs)
def __repr__(self):
return f'ProofStress({self.__dict__})'
class Uts(Measurement):
"""
Object representing a single ultimate tensile strength (UTS) measurement.
"""
name = 'uts'
def __init__(self, value: float = None, **kwargs):
super().__init__(value, **kwargs)
def __repr__(self):
return f'Uts({self.__dict__})'
class Elongation(Measurement):
"""
Object representing a single elongation result.
"""
name = 'elongation'
def __init__(self, value: float = None, **kwargs):
super().__init__(value, **kwargs)
def __repr__(self):
return f'Elongation({self.__dict__})'
class Properties:
"""
Object representing a set of results for a single item / coupon / bar
tested.
"""
def __init__(self,
proof_stress: ProofStress = None,
uts: Uts = None,
elongation: Elongation = None,
grade: str = None,
temper: str = None,
alloy: str = None):
self.proof_stress = proof_stress
self.uts = uts
self.elongation = elongation
self.grade = grade
self.temper = temper
self.alloy = alloy
def __repr__(self):
return f'Properties({self.__dict__})'
def __add__(self, other):
if isinstance(other, Measurement):
# Seems safest to error if the meaurement is already an attribute
if getattr(self, other.name):
raise AttributeError("Properties object already has "
f"{other.name} attribute")
# Method - create a new Properties object, transfer attributes then
# add new measurement
new_obj = self._copy()
setattr(new_obj, other.name, other)
return new_obj
elif isinstance(other, Properties):
# Method - create a copy then if the other Properties has an
# attribute then transfer it. Error if it's already there. Then
# return the new object, originals preserved.
new_obj = self.copy()
for attr_name, attr in other.__dict__.items():
if getattr(other, attr_name):
if getattr(new_obj, attr_name):
raise AttributeError(
"instance allredy has {attr_name} attribute - "
"cannot be overwritten"
)
setattr(new_obj, attr_name, attr)
return new_obj
@classmethod
def _transfer_attrs_to_new(cls, obj):
new_obj = cls()
for attr_name, attr in obj.__dict__.items():
setattr(new_obj, attr_name, attr)
return new_obj
def copy(self):
return Properties()._transfer_attrs_to_new(self)
def to_dict(self):
rtn_dict = {}
for attr in self.__dict__:
if hasattr(getattr(self, attr), 'to_dict'):
rtn_dict[attr] = getattr(self, attr).to_dict()
else:
rtn_dict[attr] = getattr(self, attr)
return rtn_dict
@staticmethod
def from_measurements(*measurements):
new_obj = Properties()
for measurement in measurements:
setattr(new_obj, measurement.name, measurement)
return new_obj
@classmethod
def from_row(cls, row: Tuple):
props = cls()
if 'PS' in row.index:
props.proof_stress = ProofStress(value=row['PS'])
if 'UTS' in row.index:
props.uts = Uts(value=row['UTS'])
if 'Elongation' in row.index:
props.elongation = Elongation(value=row['Elongation'])
if 'temper' in row.index:
props.temper = row['temper']
if 'grade' in row.index:
props.grade = row['grade']
if 'alloy' in row.index:
props.alloy = row['alloy']
# todo: refactor above - must be a tidier, expandable way
return props
def write_to_db(self):
doc_ref = db.collection('properties').document()
doc_ref.set(self.to_dict())
class Table:
"""
To contain sets of Properties
"""
def __init__(self):
self.properties = []
self.property_names = []
def __repr__(self):
return (f"Table(columns={self.property_names}, "
f"rows={len(self.properties)})")
@classmethod
def from_csv(cls, path: str):
df = pd.read_csv(path)
return cls.from_dataframe(df)
@classmethod
def from_dataframe(cls, df: pd.DataFrame):
table = cls()
table.property_names = df.columns
for _, row in df.iterrows():
table.properties.append(Properties.from_row(row))
return table
def to_dataframe(self):
pass
def to_csv(self):
pass
def write_to_db(self):
batch = db.batch()
for item in self.properties:
doc_ref = db.collection('properties').document()
batch.set(doc_ref, item.to_dict())
batch.commit()
|
from collections import defaultdict
import copy
from nltk.parse import DependencyGraph
from common.globals_args import nltk_nlp
def span_tree_to_hybrid_dependency_graph_interface(span_tree=None):
'''span tree to hybrid dependency graph interface'''
skeleton_span_node = span_tree.get_root_span_node()
if len(span_tree.nodes) == 1:
surface_tokens = span_tree.tokens
hybrid_dependency_graph = nltk_nlp.generate_dependency_graph(skeleton_span_node.content)
"""feats 暂时存放索引位置"""
for address, node_dict in hybrid_dependency_graph.nodes.items():
if address == 0:
node_dict['feats'] = -1
else:
node_dict['feats'] = surface_tokens[address-1].index
else:
hybrid_dependency_graph = span_tree_to_dependency_graph_recursion(span_tree=span_tree, current_span_node=skeleton_span_node)
return hybrid_dependency_graph
def span_tree_to_dependency_graph_recursion(span_tree=None, current_span_node=None):
"""
recursion method generate hybrid dependency graph
:param span_tree:
:param current_span_node:
:return:
"""
current_span_node_dependency_graph = nltk_nlp.generate_dependency_graph(current_span_node.content)
current_span_node_tokens = current_span_node.tokens
node_index_to_dict_list_ranked = list(sorted(current_span_node_dependency_graph.nodes.items(), key=lambda d: d[0], reverse=False))
for (address, node_dict) in node_index_to_dict_list_ranked:
"""feats 暂时存放索引位置"""
if address == 0:
node_dict['feats'] = -1
elif address <= len(current_span_node_tokens): #-1
node_dict['feats'] = current_span_node_tokens[address - 1].index
else:
node_dict['feats'] = -1 #node_dict['address'] #
"""判断 span 还有没有孩子"""
if current_span_node.isTerminal:
if len(current_span_node_tokens) == 1:
current_span_node_token_pos = nltk_nlp.get_pos(current_span_node_tokens[0].value)[0][1]
current_span_node_info = {
'address': -1,
'word': current_span_node_tokens[0].value,
'lemma': current_span_node_tokens[0].value,
'ctag': current_span_node_token_pos,
'tag': current_span_node_token_pos,
'feats': current_span_node_tokens[0].index,
'head': 0,
'deps': defaultdict(list),
'rel': None
}
return current_span_node_info
else:
return current_span_node_dependency_graph
"""has children"""
for child_span in span_tree.get_children_spans_by_fatherspan(current_span_node):
child_dependency_dict_or_graph = span_tree_to_dependency_graph_recursion(span_tree=span_tree, current_span_node=child_span)
headword_node_in_dep = look_for_headword_in_dependencygraph(
dependency_graph=current_span_node_dependency_graph,
span_tree_tokens=current_span_node.tokens, #span_tree.tokens,
headwords_position_in_span_tree_tokens=child_span.headword_position)
current_span_node_dependency_graph = merge(
merge_dependency_graph=current_span_node_dependency_graph,
child_dependency_dict_or_graph=child_dependency_dict_or_graph,
headword_node_in_dep=headword_node_in_dep,
modifier_relation=child_span.headword_relation)
return current_span_node_dependency_graph
def merge(merge_dependency_graph=None, child_dependency_dict_or_graph=None, headword_node_in_dep=None, modifier_relation='other'):
if isinstance(child_dependency_dict_or_graph, dict):
'''add node in dependency graph'''
node_info = child_dependency_dict_or_graph
node_info['address'] = len(merge_dependency_graph.nodes)
node_info['head'] = headword_node_in_dep['address']
node_info['rel'] = modifier_relation
merge_dependency_graph.add_node(node_info)
merge_dependency_graph.add_arc(headword_node_in_dep['address'], node_info['address'])
elif isinstance(child_dependency_dict_or_graph, DependencyGraph):
add_graph(skeleton_dependency_graph=merge_dependency_graph, head_node_in_dependency_graph=headword_node_in_dep,
sub_dependency_graph=child_dependency_dict_or_graph, dependency_rel=modifier_relation)
return merge_dependency_graph
def add_graph(skeleton_dependency_graph=None, head_node_in_dependency_graph=None, sub_dependency_graph=None, dependency_rel=None):
"""维护一个当前子树到新skeleton tree中的对应dict"""
sub_dependency_graph_node_to_skeleton_node_dict = {}
"""维护一个新地址id"""
new_address = len(skeleton_dependency_graph.nodes) - 1
"""add nodes in sub_dependency_graph, add node, at the time record 'deps'"""
node_index_to_dict_list_ranked = list(sorted(sub_dependency_graph.nodes.items(), key=lambda d: d[0], reverse=False))
for (_, node_info_in_node) in node_index_to_dict_list_ranked:
if node_info_in_node['head'] is None:
continue # root of subgraph, 子树的根,则过滤
new_address += 1
"""记住new and old对应关系"""
sub_dependency_graph_node_to_skeleton_node_dict[node_info_in_node['address']] = new_address
node_info = copy.deepcopy(node_info_in_node)
"""更新新地址"""
node_info['address'] = sub_dependency_graph_node_to_skeleton_node_dict[node_info_in_node['address']]
node_info['deps'] = defaultdict(list)
"""add skeleton"""
skeleton_dependency_graph.add_node(node_info)
"""update arc in dependency graph 更新子树的结构信息"""
for (_, node_info_in_node) in node_index_to_dict_list_ranked:
if node_info_in_node['head'] is None:
continue
skeleton_node = skeleton_dependency_graph.nodes[sub_dependency_graph_node_to_skeleton_node_dict[node_info_in_node['address']]]
if node_info_in_node['head'] != 0:
"""非根,更新原先子树中的关系"""
skeleton_node['head'] = sub_dependency_graph_node_to_skeleton_node_dict[node_info_in_node['head']]
else:
"""子树的根"""
skeleton_node['rel'] = dependency_rel
skeleton_node['head'] = head_node_in_dependency_graph['address']
skeleton_dependency_graph.add_arc(skeleton_node['head'], skeleton_node['address'])
def look_for_headword_in_dependencygraph(dependency_graph, span_tree_tokens, headwords_position_in_span_tree_tokens):
headword_node_in_dep = None
"""look for node in skeleton span"""
headword_token_in_skeleton = None
for skeleton_token in span_tree_tokens:
if skeleton_token.index == headwords_position_in_span_tree_tokens:
headword_token_in_skeleton = skeleton_token
break
"""look for node in dep 通过字面比较,来判断在依存树上的头,可能会有bug"""
for index, node_in_dep in dependency_graph.nodes.items():
if headword_token_in_skeleton.value == node_in_dep['word'] \
or (headword_token_in_skeleton.value == ')' and node_in_dep['word'] == '-RRB-') \
or (headword_token_in_skeleton.value == '(' and node_in_dep['word'] == '-LRB-') \
or (headword_token_in_skeleton.value == '"' and node_in_dep['word'] == '\'\''):
headword_node_in_dep = node_in_dep
return headword_node_in_dep
|
from flask import render_template, abort, flash, redirect, url_for, current_app, request, make_response
from . import main
from .. import db
from ..models import User, Role, Post, Permission, Post, Comment
from flask_login import login_required, current_user
from .forms import EditProfileForm, EditProfileAdminForm, PostForm, CommentForm, DelCommentForm
from ..decorators import admin_required, permission_required
import os
from werkzeug.utils import secure_filename
@main.route('/', methods=['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE) and form.validate_on_submit():
file = form.photo.data
if file is not None:
filename = secure_filename(file.filename)
file.save(os.path.join(current_app.config['UPLOAD_FOLDER'],'photos', filename))
post = Post(body=form.body.data, photo=filename, author=current_user._get_current_object())
else:
post = Post(body=form.body.data, author=current_user._get_current_object())
db.session.add(post)
db.session.commit()
flash('已成功送出', 'success')
return redirect(url_for('main.index'))
return render_template('index.html', form=form)
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('user.html', user=user)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about = form.about.data
db.session.add(current_user._get_current_object())
db.session.commit()
flash('修改成功', 'success')
return redirect(url_for('main.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about.data = current_user.about
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about = form.about.data
db.session.add(user)
db.session.commit()
flash('修改成功', 'success')
return redirect(url_for('main.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about.data = user.about
return render_template('edit_profile_admin.html', form=form, user=user)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
del_form = DelCommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object())
db.session.add(comment)
db.session.commit()
flash('評論已成功送出', 'success')
return redirect(url_for('main.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) // \
current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.id).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post], form=form, del_form=del_form,
comments=comments, pagination=pagination)
@main.route('/comment/<int:id>')
def del_com(id):
comment = Comment.query.get_or_404(id)
post_id = comment.post_id
db.session.delete(comment)
db.session.commit()
flash('評論已刪除', 'success')
return redirect(url_for('main.post', id=post_id, page=-1))
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMIN):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
db.session.commit()
flash('已成功修改', 'success')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
@main.route('/post')
@login_required
def post_all():
page = request.args.get('page', 1, type=int)
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed_post', ''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
pagination = query.order_by(Post.id).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('post_all.html', posts=posts,
show_followed=show_followed, pagination=pagination)
@main.route('/post_user')
@login_required
def post_user():
page = request.args.get('page', 1, type=int)
show_user = True
query = Post.query.filter_by(author_id=current_user.id)
pagination = query.order_by(Post.id).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('post_all.html', posts=posts,
show_user=show_user, pagination=pagination)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('查無此人', 'warning')
return redirect(url_for('main.index'))
if current_user.is_following(user):
flash('您已經 follow 他', 'warning')
return redirect(url_for('main.user', username=username))
current_user.follow(user)
db.session.commit()
flash('成功 follow %s' % username, 'success')
return redirect(url_for('main.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('查無使用者', 'warning')
return redirect(url_for('main.index'))
if not current_user.is_following(user):
flash('您尚未 follow 他', 'warning')
return redirect(url_for('main.user', username=username))
current_user.unfollow(user)
db.session.commit()
flash('您成功取消 follow %s' % username, 'success')
return redirect(url_for('main.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('查無使用者', 'warning')
return redirect(url_for('main.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.follower, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followers of",
endpoint='.followers', pagination=pagination,
follows=follows)
@main.route('/followed-by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('查無使用者', 'warning')
return redirect(url_for('main.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.followed, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followed by",
endpoint='.followed_by', pagination=pagination,
follows=follows)
@main.route('/post_all')
@login_required
def show_all():
resp = make_response(redirect(url_for('main.post_all')))
resp.set_cookie('show_followed_post', '', max_age=30*24*60*60)
return resp
@main.route('/post_followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('main.post_all')))
resp.set_cookie('show_followed_post', '1', max_age=30*24*60*60)
return resp
@main.route('/all_user/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def all_user(username):
user = User.query.filter_by(username=username).first()
users = User.query.all()
return render_template('all_user.html', user=user, users=users) |
from example_utils import fmt_row, fetch_dataset
import cPickle, numpy as np
import cgt
from cgt import nn
import argparse, time
def rmsprop_updates(cost, params, stepsize=0.001, rho=0.9, epsilon=1e-6):
grads = cgt.grad(cost, params)
updates = []
for p, g in zip(params, grads):
acc = cgt.shared(p.op.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * cgt.square(g)
gradient_scaling = cgt.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - stepsize * g))
return updates
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--profile",action="store_true")
parser.add_argument("--unittest",action="store_true")
parser.add_argument("--epochs",type=int,default=10)
parser.add_argument("--devtype",choices=["cpu","gpu"],default="cpu")
args = parser.parse_args()
cgt.update_config(default_device=cgt.core.Device(devtype=args.devtype), backend="native")
batchsize = 64
Xshape = (batchsize, 3, 32, 32)
X = cgt.tensor4("X", fixed_shape = Xshape)
y = cgt.vector("y", fixed_shape = (batchsize,), dtype='i4')
conv1 = nn.SpatialConvolution(3, 32, kernelshape=(5,5), pad=(2,2),
weight_init=nn.IIDGaussian(std=1e-4))(X)
relu1 = nn.rectify(conv1)
pool1 = nn.max_pool_2d(relu1, kernelshape=(3,3), stride=(2,2))
conv2 = nn.SpatialConvolution(32, 32, kernelshape=(5,5), pad=(2,2),
weight_init=nn.IIDGaussian(std=0.01))(pool1)
relu2 = nn.rectify(conv2)
pool2 = nn.max_pool_2d(relu2, kernelshape=(3,3), stride=(2,2))
conv3 = nn.SpatialConvolution(32, 64, kernelshape=(5,5), pad=(2,2),
weight_init=nn.IIDGaussian(std=0.01))(pool2)
pool3 = nn.max_pool_2d(conv3, kernelshape=(3,3), stride=(2,2))
relu3 = nn.rectify(pool3)
d0,d1,d2,d3 = relu3.shape
flatlayer = relu3.reshape([d0,d1*d2*d3])
nfeats = cgt.infer_shape(flatlayer)[1]
ip1 = nn.Affine(nfeats, 10)(flatlayer)
logprobs = nn.logsoftmax(ip1)
loss = -logprobs[cgt.arange(batchsize), y].mean()
params = nn.get_parameters(loss)
updates = rmsprop_updates(loss, params, stepsize=1e-3)
train = cgt.function(inputs=[X, y], outputs=[loss], updates=updates)
if args.profile: cgt.profiler.start()
data = fetch_dataset("http://rll.berkeley.edu/cgt-data/cifar10.npz")
Xtrain = data["X_train"]
ytrain = data["y_train"]
print fmt_row(10, ["Epoch","Train NLL","Train Err","Test NLL","Test Err","Epoch Time"])
for i_epoch in xrange(args.epochs):
for start in xrange(0, Xtrain.shape[0], batchsize):
tstart = time.time()
end = start+batchsize
print train(Xtrain[start:end], ytrain[start:end]), time.time()-tstart
if start > batchsize*5: break
# elapsed = time.time() - tstart
# trainerr, trainloss = computeloss(Xtrain[:len(Xtest)], ytrain[:len(Xtest)])
# testerr, testloss = computeloss(Xtest, ytest)
# print fmt_row(10, [i_epoch, trainloss, trainerr, testloss, testerr, elapsed])
if args.profile:
cgt.profiler.print_stats()
return
if args.unittest:
break
if __name__ == "__main__":
main() |
import datetime
import os
import subprocess
import click
from tess.src.compiler import should_be_compiled, compile_cmd
from tess.src.directories import Directory
from tess.src.navigator import list_files, cases_absolute_path, \
build_absolute_path, solutions_absolute_path, debug_build_absolute_path, \
debug_solutions_absolute_path
from tess.src.resources import runners_meta
def runnable_files(ctx, args, incomplete):
meta = runners_meta()
files = []
files.extend(list_files(Directory.BUILD))
for lang in meta['interpreted']:
files.extend(list_files(Directory.SOLUTIONS, lang['extension']))
return [file for file in files if incomplete in file]
def solutions(ctx, args, incomplete):
return [file for file in list_files(Directory.SOLUTIONS)
if incomplete in file]
def test_cases(ctx, args, incomplete):
return [file for file in list_files(Directory.CASES) if incomplete in file]
def resolve_runnable_filename(source_filename: str):
name, ext = os.path.splitext(source_filename)
if ext == '.cpp' or ext == '.cc':
return name
elif ext == '.java':
return f'{name}.class'
else:
return source_filename
def runner_meta(file):
_, ext = os.path.splitext(file)
if not ext:
return None
metadata = runners_meta()
for lang in metadata['compiled']:
if lang['extension'] == ext:
tmp = lang
tmp['type'] = 'compiled'
return tmp
for lang in metadata['interpreted']:
if lang['extension'] == ext:
tmp = lang
tmp['type'] = 'interpreted'
return tmp
raise ValueError(f'Error running {file}')
def concat_cases_absolute_path(tests: list):
root_dir = cases_absolute_path()
t = []
for test in tests:
t.append(f'{root_dir}/{test}')
return t
def resolve_test_content(tests: list):
t = concat_cases_absolute_path(tests)
content = []
for test in t:
with open(test, 'r') as file:
content.append(file.read())
return content
def run_timed_subprocess(_args, _input):
start_time = datetime.datetime.now()
output = subprocess.run(_args,
input=_input,
encoding='utf-8',
capture_output=True)
end_time = datetime.datetime.now()
time_taken = end_time - start_time
millis = round(time_taken.total_seconds() * 1000, 2)
return output, millis
def run_tests(args: list, tests: list):
for test in tests:
with open(test, 'r') as test_file:
content = test_file.read().rstrip()
click.echo(f'\n\nTest case: {os.path.basename(test)}')
click.echo(content)
output, millis = run_timed_subprocess(args, content)
if output.stderr:
click.echo(f'{output.stderr.strip()}')
else:
click.echo(
f'[Output] Time: {millis}ms\n{output.stdout.strip()}')
def concat_absolute_path(file, meta=None, debug=False):
if debug:
if not meta or meta['type'] == 'compiled':
return f'{debug_build_absolute_path()}/{file}'
else:
return f'{debug_solutions_absolute_path()}/{file}'
else:
if not meta or meta['type'] == 'compiled':
return f'{build_absolute_path()}/{file}'
else:
return f'{solutions_absolute_path()}/{file}'
def resolve_file_name(file, meta=None, debug=False):
name, ext = os.path.splitext(file)
if ext == '.class':
return concat_absolute_path(name, meta, debug)
else:
return concat_absolute_path(file, meta, debug)
def runner_args(filename: str, debug=False) -> list:
meta = runner_meta(filename)
if not meta:
args = [f'{resolve_file_name(filename, None, debug)}']
else:
filename = resolve_file_name(filename, meta, debug)
args = [f'{meta["runner"]}']
if meta['runner'] == 'java':
args.append('-cp')
if debug:
args.append(debug_build_absolute_path())
else:
args.append(build_absolute_path())
args.append(os.path.split(filename)[1])
else:
args.append(filename)
if 'flags' in meta:
args.extend(meta['flags'])
return args
def run_solution(file, test, debug=False):
if should_be_compiled(file):
compile_cmd(file, debug)
file = resolve_runnable_filename(file)
if test:
tests = concat_cases_absolute_path([test])
else:
tests = concat_cases_absolute_path(list_files(Directory.CASES))
args = runner_args(file, debug)
run_tests(args, tests)
|
import os
import json
import time
import sys
from .socket_server.server import Server
from .socket_server.server import ReturnCode
from .socket_server.server_app import ServerApp
class DmMockServer(Server):
@staticmethod
def get_socket_file():
return os.path.join(os.path.expanduser("~"),
".DmMockServer", "DmMockServer.socket")
@staticmethod
def get_dm_data_dir():
return os.path.join(os.path.expanduser("~"),
".DmMockServer", "data")
def __init__(self, socket_file, **kwargs):
kwargs['tick_sec'] = 10
super(DmMockServer, self).__init__(DmMockServer.get_socket_file(),
**kwargs)
self.inodes = {}
dirname = DmMockServer.get_dm_data_dir()
if not os.path.exists(dirname):
os.makedirs(dirname)
self.read_data()
self.default_mig_time = 10
self.default_unmig_time = 10
if sys.version_info[0] == 2:
self.fhandle = os.urandom(32).encode('hex')
else:
self.fhandle = os.urandom(32).hex()
self.default_owner = 45953
def read_data(self):
for root, dirs, files in os.walk(DmMockServer.get_dm_data_dir()):
for f in files:
if f.endswith(".json"):
ticket_file = os.path.join(root, f)
with open(ticket_file) as f:
data = json.load(f)
self.inodes[data['inode']] = data
def check_delay(self, inode):
if 'change_time' in inode and 'change_duration' in inode:
now = int(time.time())
time_diff = now - inode['change_time']
if time_diff > inode['change_duration']:
return True
else:
return False
else:
return True
def tick(self):
for k, inode in self.inodes.items():
if inode['state'] == 'MIG':
if self.check_delay(inode):
if inode.get('remove', False):
inode['state'] = 'OFL'
else:
inode['state'] = 'DUL'
self.update_inode(inode)
elif inode['state'] == 'UNM':
if self.check_delay(inode):
inode['state'] = 'DUL'
self.update_inode(inode)
def process(self, code, data):
obj = json.loads(data)
if obj.get('op') == 'ls':
return (ReturnCode.OK, self.ls_inode(obj.get('path')))
elif obj.get('op') == 'get':
return (ReturnCode.OK, self.get_inode(obj.get('path')))
elif obj.get('op') == 'put':
return (ReturnCode.OK, self.put_inode(obj.get('path'),
obj.get('remove', False)))
elif obj.get('op') == 'is_in_state':
return (ReturnCode.OK,
self.is_in_state(obj.get('paths'),
obj.get('states')))
def ls_inode(self, path):
inode = os.stat(path).st_ino
data_path = os.path.join(DmMockServer.get_dm_data_dir(),
"%d.json" % inode)
if inode in self.inodes:
return self.inodes[inode]
else:
return {'state': 'REG',
'inode': inode,
'_path': path,
'_filename': data_path,
'bfid': 0,
'fhandle': self.fhandle,
'flags': 0,
'nregn': 0,
'owner': self.default_owner,
'projid': 0,
'sitetag': 0,
'size': 0,
'space': 0}
def get_inode(self, path):
inode = os.stat(path).st_ino
obj = self.ls_inode(inode)
if obj.get('state') == 'MIG':
obj['remove'] = False
self.update_inode(obj)
elif obj.get('state') == 'OFL':
obj['state'] = 'UNM'
obj['change_duration'] = self.default_unmig_time
self.update_inode(obj)
return obj
def put_inode(self, path, remove):
obj = self.ls_inode(path)
if obj['state'] == 'REG':
obj['state'] = 'MIG'
obj['remove'] = remove
obj['change_duration'] = self.default_mig_time
self.generate_bfid(obj)
self.update_inode(obj)
elif obj['state'] == 'DUL' and remove:
obj['state'] = 'OFL'
self.update_inode(obj)
return obj
def generate_bfid(self, obj):
obj['bfid'] = os.urandom(32).encode('hex')
obj['emask'] = 17000
obj['fhandle'] = self.fhandle
obj['flags'] = 0
obj['owner'] = self.default_owner
obj['nregn'] = 1
obj['projid'] = 0
obj['sitetag'] = 0
obj['size'] = os.stat(obj['_path']).st_size
obj['space'] = 0
def is_in_state(self, paths, states):
inodes = [os.stat(path).st_ino for path in paths]
states = [str(s) for s in states]
for inode in [self.inodes[ind]
for ind in inodes
if ind in self.inodes]:
if not inode['state'] in states:
return {'is_in_state': False}
return {'is_in_state': True}
def update_inode(self, obj):
obj['change_time'] = int(time.time())
self.inodes[obj['inode']] = obj
data_path = os.path.join(DmMockServer.get_dm_data_dir(),
"%d.json" % obj['inode'])
with open(data_path, 'w') as fp:
json.dump(obj, fp)
if __name__ == "__main__":
app = ServerApp(DmMockServer,
module='dm_irods.dm_mock_server',
socket_file=DmMockServer.get_socket_file())
app.main()
|
<filename>PyTkGui/widgets/_base.py
# -*- coding: utf-8 -*-
import gc
from tkinter import ttk
from typing import List
from ..utils import get_real_master
class _Base:
iter_num = 0
def __init__(self, parent, **options):
self.parent = parent
self.options = options
self.configures = {}
self.pack_options = options.pop("pack", {})
self.ptg_children:List[_Base] = []
self.component = self.options.pop("component", None)
self.has_iter = "iterator" in self.options.keys()
self.seq = self.options.pop("sequence", None)
self.iter = self.options.pop("iterator", None)
self.ptg_iters = []
self.append_method = self.options.pop("append_method", "append")
self.insert_idx = self.options.pop("insert_idx", 0)
if hasattr(self.parent, "iter_num"):
if self.parent.has_iter:
self.iter_num = self.parent.iter_num + 1
else:
self.iter_num = self.parent.iter_num
else:
self.iter_num = 0
self.has_cond = "cond" in self.options.keys()
self.cond = self.options.pop("cond", None)
if options.pop("append_to_parent", True) and isinstance(self.parent, _Base):
self.parent.ptg_children.append(self)
def set_configure(self, opt_name:str, conf_name:str, default = None):
self.configures[conf_name] = self.options.pop(opt_name, default)
def render(self):
if self.component is not None:
for name in self.component.data:
value = getattr(self.component, name)
exec(f"global {name}; {name} = value")
if self.has_iter and self.iter is not None:
for widget in self.ptg_iters:
widget.destroy()
self.ptg_iters.clear()
if self.parent.has_iter:
_p_seq_idx = 0
p_seqs = [ item.strip() for item in self.parent.seq.split(",") ]
for _sequence in eval(self.parent.iter):
exec(f"global {self.parent.seq}; {self.parent.seq} = _sequence")
self.render_iters(
self.parent.ptg_iters[_p_seq_idx],
{
seq: eval(seq)
for seq in p_seqs
}
)
_p_seq_idx += 1
else:
self.render_iters()
else:
self.configure(**self.options)
self.configure(**self.configures)
if self.has_cond and not eval(self.cond):
self.derender()
def render_children(self):
for child in self.ptg_children:
child.render()
child.render_children()
def render_iters(self, parent = None, parent_seq:dict = {}):
parent = self.parent if parent is None else parent
insert_idx = self.destroy()
if parent.has_iter:
_p_seq_idx = 0
p_seqs = [ item.strip() for item in self.parent.seq.split(",") ]
for _sequence in eval(self.parent.iter):
exec(f"global {self.parent.seq}; {self.parent.seq} = _sequence")
self.render_iters(
self.parent.ptg_iters[_p_seq_idx],
{
seq: eval(seq)
for seq in p_seqs
}
)
_p_seq_idx += 1
else:
for key, value in parent_seq.items():
exec(f"{key} = value")
for _sequence in eval(self.iter):
exec(f"{self.seq} = _sequence")
options = { key: value for key, value in self.options.items() if not key in ("component", "iterator", "sequence") }
options["append_to_parent"] = False
for key, value in options.items():
if isinstance(value, str) and ("{" in value and "}" in value):
options[key] = eval('f"{}"'.format(value))
if isinstance(self, Base):
options["insert_idx"] = insert_idx
options["append_method"] = "insert"
insert_idx += 1
widget = self.__class__(parent, **options)
self.ptg_iters.append(widget)
widget.render(**self.pack_options)
widget.render_children()
exec(f"del {self.seq}")
for key in parent_seq.keys():
exec(f"del {key}")
gc.collect()
def configure(self, **options):
pass
def derender(self):
pass
def destroy(self) -> int:
return 0
class Base(_Base):
def __init__(self, tk_cls, parent, **options):
options["append_to_parent"] = False
super().__init__(parent, **options)
self.tk_cls = tk_cls
self.children:List[Base] = []
if isinstance(self.parent, Base):
if self.append_method == "append":
self.parent.children.append(self)
else:
self.parent.children.insert(self.insert_idx, self)
self.widget:ttk.Widget = self.tk_cls(get_real_master(self.parent))
def configure(self, **options):
self.widget.configure(**options)
def update_configures(self, options:dict, extras:dict):
self.options, self.configures = options, {}
for opt_name, value in extras.items():
if isinstance(value, list):
conf_name, default = value
else:
conf_name, default = value, None
self.set_configure(opt_name, conf_name, default)
self.configure(**self.options)
self.configure(**self.configures)
def clear(self, destroy_me:bool = True):
for child in self.children:
child.clear()
self.children.clear()
if destroy_me:
self.widget.destroy()
def render(self, **render_options):
if self.widget == None:
self.widget = self.tk_cls(get_real_master(self.parent))
self.widget.pack(**render_options)
self.pack_options = self.widget.pack_info()
self.pack_options.pop("in", None)
super().render()
return self.widget
def render_children(self):
super().render_children()
for child in self.children:
if not child.has_iter:
child.derender()
child.render(**child.pack_options)
child.render_children()
def derender(self):
self.widget.pack_forget()
def destroy(self) -> int:
self.widget.destroy()
insert_idx = self.parent.children.index(self)
self.parent.children.remove(self)
return insert_idx
|
<reponame>daniellepintz/torchx<filename>torchx/schedulers/kubernetes_scheduler.py<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This contains the TorchX Kubernetes scheduler which can be used to run TorchX
components on a Kubernetes cluster.
Prerequisites
==============
TorchX kubernetes scheduler depends on volcano and requires etcd intalled for distributed job execution.
Install volcano 1.4.0 version
.. code:: bash
kubectl apply -f https://raw.githubusercontent.com/volcano-sh/volcano/v1.4.0/installer/volcano-development.yaml
TorchX uses `torch.distributed.run <https://pytorch.org/docs/stable/elastic/run.html>`_ to run distributed training.
This requires the installation of etcd service on your kubernetes cluster:
.. code:: bash
kubectl apply -f https://github.com/pytorch/torchx/blob/main/resources/etcd.yaml
Learn more about running distributed trainers :py:mod:`torchx.components.dist`
"""
import json
import logging
import re
import warnings
from dataclasses import dataclass
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, Iterable, Mapping, Optional, Tuple
import torchx
import yaml
from torchx.schedulers.api import (
AppDryRunInfo,
DescribeAppResponse,
Scheduler,
Stream,
filter_regex,
)
from torchx.schedulers.ids import make_unique
from torchx.specs.api import (
AppDef,
AppState,
CfgVal,
ReplicaState,
ReplicaStatus,
RetryPolicy,
Role,
RoleStatus,
SchedulerBackend,
macros,
runopts,
)
from torchx.workspace.docker_workspace import DockerWorkspace
if TYPE_CHECKING:
from docker import DockerClient
from kubernetes.client import ApiClient, CustomObjectsApi
from kubernetes.client.models import ( # noqa: F401 imported but unused
V1Pod,
V1PodSpec,
V1Container,
V1EnvVar,
V1ResourceRequirements,
V1ContainerPort,
)
from kubernetes.client.rest import ApiException
logger: logging.Logger = logging.getLogger(__name__)
RETRY_POLICIES: Mapping[str, Iterable[Mapping[str, str]]] = {
RetryPolicy.REPLICA: [],
RetryPolicy.APPLICATION: [
{"event": "PodEvicted", "action": "RestartJob"},
{"event": "PodFailed", "action": "RestartJob"},
],
}
JOB_STATE: Dict[str, AppState] = {
# Pending is the phase that job is pending in the queue, waiting for
# scheduling decision
"Pending": AppState.PENDING,
# Aborting is the phase that job is aborted, waiting for releasing pods
"Aborting": AppState.RUNNING,
# Aborted is the phase that job is aborted by user or error handling
"Aborted": AppState.CANCELLED,
# Running is the phase that minimal available tasks of Job are running
"Running": AppState.RUNNING,
# Restarting is the phase that the Job is restarted, waiting for pod
# releasing and recreating
"Restarting": AppState.RUNNING,
# Completed is the phase that all tasks of Job are completed successfully
"Completed": AppState.SUCCEEDED,
# Terminating is the phase that the Job is terminated, waiting for releasing
# pods
"Terminating": AppState.RUNNING,
# Teriminated is the phase that the job is finished unexpected, e.g. events
"Terminated": AppState.FAILED,
"Failed": ReplicaState.FAILED,
}
TASK_STATE: Dict[str, ReplicaState] = {
# Pending means the task is pending in the apiserver.
"Pending": ReplicaState.PENDING,
# Allocated means the scheduler assigns a host to it.
"Allocated": ReplicaState.PENDING,
# Pipelined means the scheduler assigns a host to wait for releasing
# resource.
"Pipelined": ReplicaState.PENDING,
# Binding means the scheduler send Bind request to apiserver.
"Binding": ReplicaState.PENDING,
# Bound means the task/Pod bounds to a host.
"Bound": ReplicaState.PENDING,
# Running means a task is running on the host.
"Running": ReplicaState.RUNNING,
# Releasing means a task/pod is deleted.
"Releasing": ReplicaState.RUNNING,
# Succeeded means that all containers in the pod have voluntarily
# terminated with a container exit code of 0, and the system is not
# going to restart any of these containers.
"Succeeded": ReplicaState.SUCCEEDED,
# Failed means that all containers in the pod have terminated, and at
# least one container has terminated in a failure (exited with a
# non-zero exit code or was stopped by the system).
"Failed": ReplicaState.FAILED,
# Unknown means the status of task/pod is unknown to the scheduler.
"Unknown": ReplicaState.UNKNOWN,
}
LABEL_VERSION = "torchx.pytorch.org/version"
LABEL_APP_NAME = "torchx.pytorch.org/app-name"
LABEL_ROLE_INDEX = "torchx.pytorch.org/role-index"
LABEL_ROLE_NAME = "torchx.pytorch.org/role-name"
LABEL_REPLICA_ID = "torchx.pytorch.org/replica-id"
ANNOTATION_ISTIO_SIDECAR = "sidecar.istio.io/inject"
def sanitize_for_serialization(obj: object) -> object:
from kubernetes import client
api = client.ApiClient()
return api.sanitize_for_serialization(obj)
def role_to_pod(name: str, role: Role, service_account: Optional[str]) -> "V1Pod":
from kubernetes.client.models import ( # noqa: F811 redefinition of unused
V1Pod,
V1PodSpec,
V1Container,
V1EnvVar,
V1ResourceRequirements,
V1ContainerPort,
V1ObjectMeta,
)
requests = {}
resource = role.resource
if resource.cpu >= 0:
requests["cpu"] = f"{int(resource.cpu * 1000)}m"
if resource.memMB >= 0:
requests["memory"] = f"{int(resource.memMB)}M"
if resource.gpu >= 0:
requests["nvidia.com/gpu"] = str(resource.gpu)
resources = V1ResourceRequirements(
limits=requests,
requests=requests,
)
container = V1Container(
command=[role.entrypoint] + role.args,
image=role.image,
name=name,
env=[
V1EnvVar(
name=name,
value=value,
)
for name, value in role.env.items()
],
resources=resources,
ports=[
V1ContainerPort(
name=name,
container_port=port,
)
for name, port in role.port_map.items()
],
)
return V1Pod(
spec=V1PodSpec(
containers=[container],
restart_policy="Never",
service_account_name=service_account,
),
metadata=V1ObjectMeta(
annotations={
# Disable the istio sidecar as it prevents the containers from
# exiting once finished.
ANNOTATION_ISTIO_SIDECAR: "false",
},
labels={},
),
)
def cleanup_str(data: str) -> str:
"""
Invokes ``lower`` on thes string and removes all
characters that do not satisfy ``[a-z0-9]`` pattern.
This method is mostly used to make sure kubernetes scheduler gets
the job name that does not violate its validation.
"""
if data.startswith("-"):
data = data[1:]
pattern = r"[a-z0-9\-]"
return "".join(re.findall(pattern, data.lower()))
def app_to_resource(
app: AppDef, queue: str, service_account: Optional[str]
) -> Dict[str, object]:
"""
app_to_resource creates a volcano job kubernetes resource definition from
the provided AppDef. The resource definition can be used to launch the
app on Kubernetes.
To support macros we generate one task per replica instead of using the
volcano `replicas` field since macros change the arguments on a per
replica basis.
Volcano has two levels of retries: one at the task level and one at the
job level. When using the APPLICATION retry policy, the job level retry
count is set to the minimum of the max_retries of the roles.
"""
tasks = []
unique_app_id = cleanup_str(make_unique(app.name))
for role_idx, role in enumerate(app.roles):
for replica_id in range(role.num_replicas):
values = macros.Values(
img_root="",
app_id=unique_app_id,
replica_id=str(replica_id),
rank0_env=f"VC_{cleanup_str(app.roles[0].name)}_0_HOSTS".upper(),
)
if role_idx == 0 and replica_id == 0:
values.rank0_env = "TORCHX_RANK0_HOST"
name = cleanup_str(f"{role.name}-{replica_id}")
replica_role = values.apply(role)
if role_idx == 0 and replica_id == 0:
replica_role.env["TORCHX_RANK0_HOST"] = "localhost"
pod = role_to_pod(name, replica_role, service_account)
pod.metadata.labels.update(pod_labels(app, role_idx, role, replica_id))
task: Dict[str, Any] = {
"replicas": 1,
"name": name,
"template": pod,
}
if role.max_retries > 0:
task["maxRetry"] = role.max_retries
task["policies"] = RETRY_POLICIES[role.retry_policy]
msg = f"""
Role {role.name} configured with restarts: {role.max_retries}. As of 1.4.0 Volcano
does NOT support retries correctly. More info: https://github.com/volcano-sh/volcano/issues/1651
"""
warnings.warn(msg)
tasks.append(task)
job_retries = min(role.max_retries for role in app.roles)
resource: Dict[str, object] = {
"apiVersion": "batch.volcano.sh/v1alpha1",
"kind": "Job",
"metadata": {"name": f"{unique_app_id}"},
"spec": {
"schedulerName": "volcano",
"queue": queue,
"tasks": tasks,
"maxRetry": job_retries,
"plugins": {
# https://github.com/volcano-sh/volcano/issues/533
"svc": ["--publish-not-ready-addresses"],
"env": [],
},
},
}
return resource
@dataclass
class KubernetesJob:
images_to_push: Dict[str, Tuple[str, str]]
resource: Dict[str, object]
def __str__(self) -> str:
return yaml.dump(sanitize_for_serialization(self.resource))
def __repr__(self) -> str:
return str(self)
class KubernetesScheduler(Scheduler, DockerWorkspace):
"""
KubernetesScheduler is a TorchX scheduling interface to Kubernetes.
Important: Volcano is required to be installed on the Kubernetes cluster.
TorchX requires gang scheduling for multi-replica/multi-role execution
and Volcano is currently the only supported scheduler with Kubernetes.
For installation instructions see: https://github.com/volcano-sh/volcano
This has been confirmed to work with Volcano v1.3.0 and Kubernetes versions
v1.18-1.21. See https://github.com/pytorch/torchx/issues/120 which is
tracking Volcano support for Kubernetes v1.22.
.. note::
AppDefs that have more than 0 retries may not be displayed as pods if they failed.
This occurs due to known bug in Volcano(as per 1.4.0 release):
https://github.com/volcano-sh/volcano/issues/1651
.. code-block:: bash
$ pip install torchx[kubernetes]
$ torchx run --scheduler kubernetes --scheduler_args namespace=default,queue=test utils.echo --image alpine:latest --msg hello
kubernetes://torchx_user/1234
$ torchx status kubernetes://torchx_user/1234
...
**Config Options**
.. runopts::
class: torchx.schedulers.kubernetes_scheduler.KubernetesScheduler
**Compatibility**
.. compatibility::
type: scheduler
features:
cancel: true
logs: true
distributed: true
describe: |
Partial support. KubernetesScheduler will return job and replica
status but does not provide the complete original AppSpec.
workspaces: true
"""
def __init__(
self,
session_name: str,
client: Optional["ApiClient"] = None,
docker_client: Optional["DockerClient"] = None,
) -> None:
Scheduler.__init__(self, "kubernetes", session_name)
DockerWorkspace.__init__(self, docker_client)
self._client = client
def _api_client(self) -> "ApiClient":
from kubernetes import client, config
c = self._client
if c is None:
configuration = client.Configuration()
try:
config.load_kube_config(client_configuration=configuration)
except config.ConfigException as e:
warnings.warn(f"failed to load kube config: {e}")
c = self._client = client.ApiClient(configuration)
return c
def _custom_objects_api(self) -> "CustomObjectsApi":
from kubernetes import client
return client.CustomObjectsApi(self._api_client())
def _get_job_name_from_exception(self, e: "ApiException") -> Optional[str]:
try:
return json.loads(e.body)["details"]["name"]
except Exception as e:
logger.exception("Unable to retrieve job name, got exception", e)
return None
def schedule(self, dryrun_info: AppDryRunInfo[KubernetesJob]) -> str:
from kubernetes.client.rest import ApiException
cfg = dryrun_info._cfg
assert cfg is not None, f"{dryrun_info} missing cfg"
namespace = cfg.get("namespace") or "default"
images_to_push = dryrun_info.request.images_to_push
self._push_images(images_to_push)
resource = dryrun_info.request.resource
try:
resp = self._custom_objects_api().create_namespaced_custom_object(
group="batch.volcano.sh",
version="v1alpha1",
namespace=namespace,
plural="jobs",
body=resource,
)
except ApiException as e:
if e.status == 409 and e.reason == "Conflict":
job_name = self._get_job_name_from_exception(e)
raise ValueError(
f"Job `{job_name}` already exists. This seems like a transient exception, try resubmitting job"
) from e
else:
raise
return f'{namespace}:{resp["metadata"]["name"]}'
def _submit_dryrun(
self, app: AppDef, cfg: Mapping[str, CfgVal]
) -> AppDryRunInfo[KubernetesJob]:
queue = cfg.get("queue")
if not isinstance(queue, str):
raise TypeError(f"config value 'queue' must be a string, got {queue}")
# map any local images to the remote image
images_to_push = self._update_app_images(app, cfg)
service_account = cfg.get("service_account")
assert service_account is None or isinstance(
service_account, str
), "service_account must be a str"
resource = app_to_resource(app, queue, service_account)
req = KubernetesJob(
resource=resource,
images_to_push=images_to_push,
)
info = AppDryRunInfo(req, repr)
info._app = app
info._cfg = cfg
return info
def _validate(self, app: AppDef, scheduler: SchedulerBackend) -> None:
# Skip validation step
pass
def _cancel_existing(self, app_id: str) -> None:
namespace, name = app_id.split(":")
self._custom_objects_api().delete_namespaced_custom_object(
group="batch.volcano.sh",
version="v1alpha1",
namespace=namespace,
plural="jobs",
name=name,
)
def run_opts(self) -> runopts:
opts = runopts()
opts.add(
"namespace",
type_=str,
help="Kubernetes namespace to schedule job in",
default="default",
)
opts.add(
"queue",
type_=str,
help="Volcano queue to schedule job in",
required=True,
)
opts.add(
"image_repo",
type_=str,
help="The image repository to use when pushing patched images, must have push access. Ex: example.com/your/container",
)
opts.add(
"service_account",
type_=str,
help="The service account name to set on the pod specs",
)
return opts
def describe(self, app_id: str) -> Optional[DescribeAppResponse]:
namespace, name = app_id.split(":")
roles = {}
roles_statuses = {}
resp = self._custom_objects_api().get_namespaced_custom_object_status(
group="batch.volcano.sh",
version="v1alpha1",
namespace=namespace,
plural="jobs",
name=name,
)
status = resp.get("status")
if status:
state_str = status["state"]["phase"]
app_state = JOB_STATE[state_str]
TASK_STATUS_COUNT = "taskStatusCount"
if TASK_STATUS_COUNT in status:
for name, status in status[TASK_STATUS_COUNT].items():
role, _, idx = name.rpartition("-")
state_str = next(iter(status["phase"].keys()))
state = TASK_STATE[state_str]
if role not in roles:
roles[role] = Role(name=role, num_replicas=0, image="")
roles_statuses[role] = RoleStatus(role, [])
roles[role].num_replicas += 1
roles_statuses[role].replicas.append(
ReplicaStatus(id=int(idx), role=role, state=state, hostname="")
)
else:
app_state = AppState.UNKNOWN
return DescribeAppResponse(
app_id=app_id,
roles=list(roles.values()),
roles_statuses=list(roles_statuses.values()),
state=app_state,
)
def log_iter(
self,
app_id: str,
role_name: str,
k: int = 0,
regex: Optional[str] = None,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
should_tail: bool = False,
streams: Optional[Stream] = None,
) -> Iterable[str]:
assert until is None, "kubernetes API doesn't support until"
if streams not in (None, Stream.COMBINED):
raise ValueError("KubernetesScheduler only supports COMBINED log stream")
from kubernetes import client, watch
namespace, name = app_id.split(":")
pod_name = cleanup_str(f"{name}-{role_name}-{k}-0")
args: Dict[str, object] = {
"name": pod_name,
"namespace": namespace,
"timestamps": True,
}
if since is not None:
args["since_seconds"] = (datetime.now() - since).total_seconds()
core_api = client.CoreV1Api(self._api_client())
if should_tail:
w = watch.Watch()
iterator = w.stream(core_api.read_namespaced_pod_log, **args)
else:
resp = core_api.read_namespaced_pod_log(**args)
iterator = resp.strip().split("\n")
if regex:
return filter_regex(regex, iterator)
else:
return iterator
def create_scheduler(session_name: str, **kwargs: Any) -> KubernetesScheduler:
return KubernetesScheduler(
session_name=session_name,
)
def pod_labels(
app: AppDef, role_idx: int, role: Role, replica_id: int
) -> Dict[str, str]:
return {
LABEL_VERSION: torchx.__version__,
LABEL_APP_NAME: app.name,
LABEL_ROLE_INDEX: str(role_idx),
LABEL_ROLE_NAME: role.name,
LABEL_REPLICA_ID: str(replica_id),
}
|
import tensorflow
import numpy as np
import cv2
import random
from game import move_conv, find_winner
import time
np.set_printoptions(suppress=True)
model = tensorflow.keras.models.load_model("RPS-model.h5")
# 0_Rock 1_Paper 2_Scissors 3_YourTurn
s = ["images/0.png", "images/1.png", "images/2.png", "images/3.jfif"]
# Setting default cam to webcam and necesseary variables
img = cv2.VideoCapture(0)
data = np.ndarray(shape=(1, 250, 250, 3), dtype=np.float32)
firsttime = False
exit = False
you = 0
ai = 0
while True:
font = cv2.FONT_HERSHEY_SIMPLEX
ret, frame = img.read()
frame = cv2.flip(frame, 1)
if not ret:
continue
frame = cv2.rectangle(frame, (320, 100), (570, 350), (0, 0, 255), 3)
frame2 = frame[100:350, 320:570]
image = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (250, 250))
pred = model.predict(np.array([image]))
#image_array = np.asarray(frame2)
#normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
#data[0] = normalized_image_array
#pred = model.predict(data)
winner = "None"
ai_frame = cv2.imread(s[3])
move_code = np.argmax(pred[0])
start = time.time()
end = time.time()
check = 0.0
gate = 1
window_width = 1200
window_height = 820
cv2.namedWindow('Frame', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Frame', window_width, window_height)
while(True):
end = time.time()
check = end-start
ret, frame = img.read()
frame = cv2.flip(frame, 1)
frame = cv2.rectangle(frame, (320, 100), (570, 350), (0, 0, 255), 3)
cv2.putText(frame, "------".format(you), (3, 87),
font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, "You : {}".format(you), (25, 117),
font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, "A.I : {}".format(ai), (45, 157),
font, 1,(0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, "------".format(you), (3, 187),
font, 1, (0, 0, 255), 2, cv2.LINE_AA)
for i in range(112, 192, 50):
cv2.putText(frame, "|".format(you), (155, i),
font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, "|".format(you), (0, i),
font, 1, (0, 0, 255), 2, cv2.LINE_AA)
if not ret:
continue
if(firsttime):
frame = cv2.rectangle(
frame, (320, 100), (570, 350), (0, 0, 255), 3)
cv2.imshow('Frame', frame)
if(check < 3):
cv2.putText(frame, "Deliver in {}".format(
3-int(check)), (365, 300), font, 1, (0, 255, 255), 2, cv2.LINE_AA)
elif(check >= 2.5 and gate == 1):
t = random.choice([0, 1, 2])
computer_move_name = move_conv(t)
ai_frame = cv2.imread(s[t])
cv2.imshow("A.I move", ai_frame)
gate = 0
elif(check >= 3):
frame2 = frame[100:350, 320:570]
#cv2.imshow("captured image", frame2)
image = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (250, 250))
pred = model.predict(np.array([image]))
print(pred)
move_code = np.argmax(pred[0])
print(move_code)
user_move_name = move_conv(move_code)
if(user_move_name == "none"):
ai_frame = cv2.imread(s[3])
if user_move_name != "none":
result = find_winner(user_move_name, computer_move_name)
if(result == 0):
ai = ai+1
winner = "A.I"
elif(result == 1):
you = you+1
winner = "Y.O.U"
else:
winner = "TIE"
cv2.putText(frame, "Winner : ", (350, 385),
font, 1, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(frame, winner, (480, 385), font,
1, (0, 255, 0), 2, cv2.LINE_AA)
print("user :"+user_move_name+" A.I :" +
computer_move_name+" Winner:"+winner)
firsttime = False
if(not firsttime):
cv2.putText(frame, "Winner : ", (350, 385),
font, 1, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(frame, winner, (480, 385), font,
1, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(frame, "Press S to Play", (320, 210),
font, 1, (0, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, "Press Q to quit", (40, 445),
font, 1, (0, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xff == ord('s'):
firsttime = True
start = time.time()
gate = 1
break
# To Exit from the game...
if cv2.waitKey(1) & 0xff == ord('q'):
exit(0)
result = cv2.imread(s[3])
if cv2.waitKey(1) & 0xff == ord('q'):
exit = True
break
if(exit):
break
cv2.imshow('Frame', frame)
cv2.imshow("A.I move", ai_frame)
img.release()
cv2.destroyAllWindows()
|
<reponame>GMDennis/claf
from overrides import overrides
import torch
from torch.autograd import Variable
from claf.data import utils
class PadCollator:
"""
Collator apply pad and make tensor
Minimizes amount of padding needed while producing mini-batch.
* Kwargs:
cuda_device_id: tensor assign to cuda device id
Default is None (CPU)
skip_keys: skip to make tensor
"""
def __init__(self, cuda_device_id=None, pad_value=0, skip_keys=["text"]):
self.cuda_device_id = cuda_device_id
self.pad_value = pad_value
self.skip_keys = skip_keys
def __call__(self, features, labels):
self.collate(features, pad_value=self.pad_value)
self.collate(labels, apply_pad=False, pad_value=self.pad_value)
return utils.make_batch(features, labels)
def collate(self, datas, apply_pad=True, pad_value=0):
for data_name, data in datas.items():
if isinstance(data, dict):
for key, value in data.items():
data[key] = self._collate(
value, apply_pad=apply_pad, token_name=key, pad_value=pad_value)
else:
datas[data_name] = self._collate(data, apply_pad=apply_pad)
def _collate(self, value, apply_pad=True, token_name=None, pad_value=0):
if apply_pad:
value = self._apply_pad(value, token_name=token_name, pad_value=pad_value)
return self._make_tensor(value)
def _apply_pad(self, value, token_name=None, pad_value=0):
return utils.padding_tokens(value, token_name=token_name, pad_value=pad_value)
def _make_tensor(self, value):
if not isinstance(value, torch.Tensor):
value_type = utils.get_token_type(value)
if value_type == int:
value = torch.LongTensor(value)
else:
value = torch.FloatTensor(value)
value = Variable(value, requires_grad=False)
if self.cuda_device_id is not None:
value = value.cuda(self.cuda_device_id)
return value
class FeatLabelPadCollator(PadCollator):
"""
Collator apply pad and make tensor
Minimizes amount of padding needed while producing mini-batch.
FeatLabelPadCollator allows applying pad to not only features, but also labels.
* Kwargs:
cuda_device_id: tensor assign to cuda device id
Default is None (CPU)
skip_keys: skip to make tensor
"""
@overrides
def __call__(self, features, labels, apply_pad_labels=(), apply_pad_values=()):
self.collate(features)
self.collate(labels, apply_pad=False,
apply_pad_labels=apply_pad_labels, apply_pad_values=apply_pad_values)
return utils.make_batch(features, labels)
@overrides
def collate(self, datas, apply_pad=True, apply_pad_labels=(), apply_pad_values=()):
for data_name, data in datas.items():
if not apply_pad and data_name in apply_pad_labels:
_apply_pad = True # ignore apply_pad
pad_value = apply_pad_values[apply_pad_labels.index(data_name)]
else:
_apply_pad = apply_pad
pad_value = 0
if isinstance(data, dict):
for key, value in data.items():
data[key] = self._collate(
value, apply_pad=_apply_pad, token_name=key, pad_value=pad_value)
else:
datas[data_name] = self._collate(data, apply_pad=_apply_pad, pad_value=pad_value)
|
# -*- coding: utf-8 -*-
# pylint: disable-msg = W0613, W0622, W0704
#
# Copyright 2004-2006 <NAME> or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities
=========
This module contains some utility functions and classes used in several
places of the svnmailer. These functions have a quite general character
and can be used easily outside of the svnmailer as well.
"""
__author__ = "<NAME>"
__docformat__ = "epytext en"
__all__ = [
'TempFile',
'getPipe4',
'getSuitableCommandLine',
'splitCommand',
'filename',
'extractX509User',
'substitute',
'filterForXml',
'getParentDirList',
'getGlobValue',
'parseQuery',
'modifyQuery',
'inherit',
'commonPaths',
'ReadOnlyDict',
'SafeDict',
]
# global imports
import locale, os, sys
class TempFile(object):
""" Tempfile container class
The class contains a destructor that removes the created
file. This differs from the stuff in tempfile, which removes
the file, when it's closed.
The mode is fixed to C{w+}; a C{b} is added if the C{text}
argument is false (see C{__init__})
@cvar name: C{None}
@ivar name: The full name of the file
@type name: C{str}
@cvar fp: C{None}
@ivar fp: The file descriptor
@type fp: file like object
@cvar _unlink: C{None}
@ivar _unlink: C{os.unlink}
@type _unlink: callable
"""
name = None
fp = None
_unlink = None
def __init__(self, tempdir = None, text = False):
""" Initialization
@param tempdir: The temporary directory
@type tempdir: C{str}
@param text: want to write text?
@type text: C{bool}
"""
import tempfile
self._unlink = os.unlink # make sure, unlink is available in __del__
fd, self.name = tempfile.mkstemp(dir = tempdir, text = text)
self.fp = os.fdopen(fd, "w+%s" % ["b", ""][bool(text)])
def __del__(self):
""" Unlink the file name """
if self.fp:
try:
self.fp.close()
except ValueError:
# ok
pass
if self.name and self._unlink:
try:
self._unlink(self.name)
except OSError:
# don't even ignore
pass
def close(self):
""" Close the file (but don't delete it)
@exception ValueError: The file was already closed
"""
if self.fp:
self.fp.close()
def getPipe2(command):
""" Returns a pipe object (C{Popen3} or C{_DummyPopen3} on win32)
@param command: The command list (the first item is the command
itself, the rest represents the arguments)
@type command: C{list}
@return: The pipe object
@rtype: C{popen2.Popen3} or C{_DummyPopen3}
"""
import popen2
try:
cls = popen2.Popen3
except AttributeError:
cls = _DummyPopen3
return cls(getSuitableCommandLine(command))
def getPipe4(command):
""" Returns a pipe object (C{Popen4} or C{_DummyPopen4} on win32)
@param command: The command list (the first item is the command
itself, the rest represents the arguments)
@type command: C{list}
@return: The pipe object
@rtype: C{popen2.Popen4} or C{_DummyPopen4}
"""
import popen2
try:
cls = popen2.Popen4
except AttributeError:
cls = _DummyPopen4
return cls(getSuitableCommandLine(command))
class _DummyPopen4(object):
""" Dummy Popen4 class for platforms which don't provide one in popen2 """
def __init__(self, cmd, bufsize = -1):
""" Initialization """
bufsize = -1 # otherwise error on win32
self.tochild, self.fromchild = os.popen4(cmd, 'b', bufsize)
def wait(self):
""" Dummy wait """
return 0
class _DummyPopen3(object):
""" Dummy Popen3 class for platforms which don't provide one in popen2 """
def __init__(self, cmd, capturestderr = False, bufsize = -1):
""" Initialization """
bufsize = -1 # otherwise error on win32
capturestderr = False # we don't do this on win32
self.tochild, self.fromchild = os.popen2(cmd, 'b', bufsize)
self.childerr = None
def wait(self):
""" Dummy wait """
return 0
def getSuitableCommandLine(command, _platform = None):
""" Return the revised command suitable for being exec'd
Currently this means, it's escaped and converted to a string
only for Win32, because on this system the shell is called.
For other systems the list is just returned.
@note: This is more or less the same as the stuff in
svn.fs._escape_msvcrt_shell_command/arg. But it
belongs somewhere else - e.g. into a util module...
Perhaps once a day the whole package goes directly
into the subversion distribution and then it's all
cool.
@param command: The command to escape
@type command: C{list}
@param _platform: A platform string (for testing purposes only)
@type _platform: C{str}
@return: The escaped command string or the original list
@rtype: C{str} or C{list}
"""
platform = _platform or sys.platform
if platform != "win32":
return command
try:
slashre = getSuitableCommandLine._slashre
except AttributeError:
import re
slashre = getSuitableCommandLine._slashre = re.compile(r'(\\+)("|$)')
# What we do here is:
# (1) double up slashes, but only before quotes or the string end
# (since we surround it by quotes afterwards)
# (2) Escape " as "^""
# This means "string end", "Escaped quote", "string begin" in that
# order
# (See also http://www.microsoft.com/technet/archive/winntas
# /deploy/prodspecs/shellscr.mspx)
# Original comments from the svn.fs functions:
# ============================================
# According cmd's usage notes (cmd /?), it parses the command line by
# "seeing if the first character is a quote character and if so, stripping
# the leading character and removing the last quote character."
# So to prevent the argument string from being changed we add an extra set
# of quotes around it here.
# The (very strange) parsing rules used by the C runtime library are
# described at:
# http://msdn.microsoft.com/library/en-us/vclang/html
# /_pluslang_Parsing_C.2b2b_.Command.2d.Line_Arguments.asp
return '"%s"' % " ".join([
'"%s"' % slashre.sub(r'\1\1\2', arg).replace('"', '"^""')
for arg in command
])
def splitCommand(command):
r"""Split a command string with respect to quotes and such
The command string consists of several tokens:
- whitespace: Those are separators except inside quoted items
- unquoted items: every token that doesn't start with
a double quote (")
- quoted items: every token that starts with a double quote (").
Those items must be closed with a double quote and may contain
whitespaces. The enclosing quotes are stripped. To put a double
quote character inside such a token, it has to be escaped with
a backslash (\). Therefore - backslashes themselves have to be
escaped as well. The escapes are also stripped from the result.
Here's an example: C{r'foo bar "baz" "zo\"" "\\nk"'} resolves
to C{['foo', 'bar', 'baz', 'zo"', r'\nk']}
@param command: The command string
@type command: C{str}
@return: The splitted command
@rtype: C{list}
@exception ValueError: The command string is not valid
(unclosed quote or the like)
"""
try:
argre, checkre, subre = splitCommand._regexps
except AttributeError:
import re
argre = r'[^"\s]\S*|"[^\\"]*(?:\\[\\"][^\\"]*)*"'
checkre = r'\s*(?:%(arg)s)(?:\s+(?:%(arg)s))*\s*$' % {'arg': argre}
subre = r'\\([\\"])'
argre, checkre, subre = splitCommand._regexps = (
re.compile(argre), re.compile(checkre), re.compile(subre)
)
if not checkre.match(command or ''):
raise ValueError("Command string %r is not valid" % command)
return [
(arg.startswith('"') and [subre.sub(r'\1', arg[1:-1])] or [arg])[0]
for arg in argre.findall(command or '')
]
class _LocaleFile(object):
""" Transform filenames according to locale """
def __init__(self, _locale = locale, _os = os, _sys = sys):
""" Initialization """
self.unicode_system = _os.path.supports_unicode_filenames
self.from_enc = _locale.getpreferredencoding(False) or "us-ascii"
self.to_enc = _sys.getfilesystemencoding() or "us-ascii"
def toLocale(self, name, name_enc = None, locale_enc = None):
""" Transforms a file name to the locale representation
@param name: The name to consider
@type name: C{str} / C{unicode}
@param name_enc: The source encoding of C{name}, if it's
not unicode already
@type name_enc: C{str}
@param locale_enc: The file system encoding (used only
if it's not a unicode supporting OS)
@type locale_enc: C{str}
@return: The name in locale representation
@rtype: C{str}/C{unicode}
@exception UnicodeError: An error happened while recoding
"""
if locale_enc is None:
locale_enc = self.to_enc
if name_enc is None:
name_enc = self.from_enc
if self.unicode_system:
if isinstance(name, unicode):
return name
else:
return name.decode(name_enc, "strict")
if locale_enc.lower() == "none":
if isinstance(name, unicode):
raise RuntimeError("Illegal call")
else:
return name
if not isinstance(name, unicode):
name = name.decode(name_enc, "strict")
return name.encode(locale_enc, "strict")
def fromLocale(self, name, locale_enc = None):
""" Transform a file name from locale repr to unicode (hopefully)
@param name: The name to decode
@type name: C{str}/C{unicode}
@param locale_enc: The locale encoding
@type locale_enc: C{str}
@return: The decoded name
@rtype: C{unicode}/C{str}
@exception UnicodeError: An error happend while recoding
"""
if isinstance(name, unicode):
return name
if locale_enc is None:
locale_enc = self.from_enc
if locale_enc.lower() == "none":
return name # no unicode.
return name.decode(locale_enc, "strict")
filename = _LocaleFile()
def extractX509User(author):
""" Returns user data extracted from x509 subject string
@param author: The author string
@type author: C{str}
@return: user name, mail address (user name maybe C{None})
@rtype: C{tuple} or C{None}
"""
if author:
try:
cnre, eare = extractX509User._regexps
except AttributeError:
import re
cnre = re.compile(ur'/CN=([^/]+)', re.I)
eare = re.compile(ur'/emailAddress=([^/]+)', re.I)
extractX509User._regexps = (cnre, eare)
author = author.decode('utf-8', 'replace')
ea_match = eare.search(author)
if ea_match:
cn_match = cnre.search(author)
return (cn_match and cn_match.group(1), ea_match.group(1))
return None
def substitute(template, subst):
""" Returns a filled template
If the L{template} is C{None}, this function returns C{None}
as well.
@param template: The temlate to fill
@type template: C{unicode}
@param subst: The substitution parameters
@type subst: C{dict}
@return: The filled template (The return type depends on the
template and the parameters)
@rtype: C{str} or C{unicode}
"""
if template is None:
return None
return template % SafeDict(subst.items())
def filterForXml(value):
""" Replaces control characters with replace characters
@param value: The value to filter
@type value: C{unicode}
@return: The filtered value
@rtype: C{unicode}
"""
try:
regex = filterForXml._regex
except AttributeError:
import re
chars = u''.join([chr(num) for num in range(32)
if num not in (9, 10, 13) # XML 1.0
])
regex = filterForXml._regex = re.compile("[%s]" % chars)
return regex.sub(u'\ufffd', value)
def getParentDirList(path):
""" Returns the directories up to a (posix) path
@param path: The path to process
@type path: C{str}
@return: The directory list
@rtype: C{list}
"""
import posixpath
path = posixpath.normpath("/%s" % path)
if path[:2] == '//':
path = path[1:]
dirs = []
path = posixpath.dirname(path)
while path != '/':
dirs.append(path)
path = posixpath.dirname(path)
dirs.append('/')
return dirs
def getGlobValue(globs, path):
""" Returns the value of the glob, where path matches
@param globs: The glob list (C{[(glob, associated value)]})
@type globs: C{list} of C{tuple}
@param path: The path to match
@type path: C{str}
@return: The matched value or C{None}
@rtype: any
"""
import fnmatch
result = None
for glob in globs:
if fnmatch.fnmatchcase(path, glob[0]):
result = glob[1]
break
return result
def modifyQuery(query, rem = None, add = None, set = None, delim = '&'):
""" Returns a modified query string
@note: set is a convenience parameter, it's actually a combination of
C{rem} and C{add}. The order of processing is:
1. append the set parameters to C{rem} and C{add}
2. apply C{rem}
3. apply C{add}
@warning: query parameters containing no C{=} character are silently
dropped.
@param query: The query string to modify
@type query: C{str} or C{dict}
@param rem: parameters to remove (if present)
@type rem: C{list} of C{str}
@param add: parameters to add
@type add: C{list} of C{tuple}
@param set: parameters to override
@type set: C{list} of C{tuple}
@param delim: Delimiter to use when rebuilding the query string
@type delim: C{str}
"""
rem = list(rem or [])
add = list(add or [])
set = list(set or [])
# parse query string
query_dict = (isinstance(query, dict) and
[query.copy()] or [parseQuery(query)]
)[0]
# append set list to rem and add
rem.extend([tup[0] for tup in set])
add.extend(set)
# apply rem
for key in rem:
try:
del query_dict[key]
except KeyError:
# don't even ignore
pass
# apply add
for key, val in add:
query_dict.setdefault(key, []).append(val)
# rebuild query and return
return delim.join([
delim.join(["%s=%s" % (key, str(val)) for val in vals])
for key, vals in query_dict.items()
])
def parseQuery(query):
""" Parses a query string
@warning: query parameters containing no C{=} character are silently
dropped.
@param query: The query string to parse
@type query: C{str}
@return: The parsed query (C{{key: [values]}})
@rtype: C{dict}
"""
try:
queryre = parseQuery._regex
except AttributeError:
import re
parseQuery._regex = queryre = re.compile(r'[&;]')
query_dict = {}
for key, val in [pair.split('=', 1)
for pair in queryre.split(query) if '=' in pair]:
query_dict.setdefault(key, []).append(val)
return query_dict
def commonPaths(paths):
""" Returns the common component and the stripped paths
It expects that directories do always end with a trailing slash and
paths never begin with a slash (except root).
@param paths: The list of paths (C{[str, str, ...]})
@type paths: C{list}
@return: The common component (always a directory) and the stripped
paths (C{(str, [str, str, ...])})
@rtype: C{tuple}
"""
import posixpath
common = ''
if len(paths) > 1 and "/" not in paths:
common = posixpath.commonprefix(paths)
if common[-1:] != "/":
common = common[:common.rfind("/") + 1]
idx = len(common)
if idx > 0:
paths = [path[idx:] or "./" for path in paths]
common = common[:-1] # chop the trailing slash
return (common, paths)
def inherit(cls, *bases):
""" Inherits class cls from *bases
@note: cls needs a __dict__, so __slots__ is tabu
@param cls: The class to inherit from *bases
@type cls: C{class}
@param bases: The base class(es)
@type bases: C{list}
"""
newdict = dict([(key, value)
for key, value in cls.__dict__.items()
if key != '__module__'
])
cls = type(cls.__name__, tuple(bases), newdict)
setattr(cls, "_%s__decorator_class" % cls.__name__, cls)
return cls
def parseContentType(value):
""" Parses a content type
(the email module unfortunately doesn't provide a public
interface for this)
@warning: comments are not recognized yet
@param value: The value to parse - must be ascii compatible
@type value: C{basestring}
@return: The parsed header (C{(value, {key, [value, value, ...]})})
or C{None}
@rtype: C{tuple}
"""
try:
if isinstance(value, unicode):
value.encode('us-ascii')
else:
value.decode('us-ascii')
except (AttributeError, UnicodeError):
return None
try:
typere, pairre, stripre = parseContentType._regexps
except AttributeError:
import re
# a bit more lenient than RFC 2045
tokenres = r'[^\000-\040()<>@,;:\\"/[\]?=]+'
qcontent = r'[^\000\\"]'
qsres = r'"%(qc)s*(?:\\"%(qc)s*)*"' % {'qc': qcontent}
valueres = r'(?:%(token)s|%(quoted-string)s)' % {
'token': tokenres, 'quoted-string': qsres,
}
typere = re.compile(
r'\s*([^;/\s]+/[^;/\s]+)((?:\s*;\s*%(key)s\s*=\s*%(val)s)*)\s*$' %
{'key': tokenres, 'val': valueres,}
)
pairre = re.compile(r'\s*;\s*(%(key)s)\s*=\s*(%(val)s)' % {
'key': tokenres, 'val': valueres
})
stripre = re.compile(r'\r?\n')
parseContentType._regexps = (typere, pairre, stripre)
match = typere.match(value)
if not match:
return None
parsed = (match.group(1).lower(), {})
match = match.group(2)
if match:
for key, val in pairre.findall(match):
if val[:1] == '"':
val = stripre.sub(r'', val[1:-1]).replace(r'\"', '"')
parsed[1].setdefault(key.lower(), []).append(val)
return parsed
class ReadOnlyDict(dict):
""" Read only dictionary """
__msg = "The dictionary is read-only"
def __setitem__(self, key, value):
""" modifiying is not allowed """
raise TypeError(self.__msg)
def __delitem__(self, key):
""" deleting is not allowed """
raise TypeError(self.__msg)
def clear(self):
""" clearing is not allowed """
raise TypeError(self.__msg)
def fromkeys(cls, seq, value = None):
""" Chokes by default, so work around it """
return cls(dict.fromkeys(seq, value))
fromkeys = classmethod(fromkeys)
def pop(self, key, default = None):
""" popping is not allowed """
raise TypeError(self.__msg)
def popitem(self):
""" popping is not allowed """
raise TypeError(self.__msg)
def setdefault(self, default = None):
""" modifying is not allowed """
raise TypeError(self.__msg)
def update(self, newdict):
""" updating is not allowed """
raise TypeError(self.__msg)
class SafeDict(dict):
""" A dict, which returns '' on unknown keys or false values """
def __getitem__(self, key):
""" Returns an empty string on false values or unknown keys """
return dict.get(self, key) or ''
|
<gh_stars>1000+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.framework import Program
from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.executor import Executor
from paddle.fluid.parallel_executor import ParallelExecutor
from paddle.fluid.framework import Variable, Parameter
from .runtime_base import RuntimeBase
from ..base.private_helper_function import wait_server_ready
__all__ = []
class ParameterServerRuntime(RuntimeBase):
def __init__(self):
super(ParameterServerRuntime, self).__init__()
self._communicator = None
def _set_basic_info(self, context):
self.context = context
self.role_maker = context["role_maker"]
self.origin_main_program = context["origin_main_program"]
self.origin_startup_program = context["origin_startup_program"]
self.async_strategy = self._get_distributed_strategy()
self.compiled_strategy = self.build_compiled_startegy()
def _get_distributed_strategy(self):
strategy = None
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory
dist_strategy = self.context["valid_strategy"]
k_steps = dist_strategy.a_sync_configs["k_steps"]
if not dist_strategy.a_sync and k_steps == 0:
strategy = StrategyFactory.create_sync_strategy()
if dist_strategy.a_sync and k_steps == 0:
strategy = StrategyFactory.create_async_strategy()
if dist_strategy.a_sync and k_steps > 0:
strategy = StrategyFactory.create_geo_strategy(k_steps)
if not strategy:
raise ValueError("k_steps must be invalid value, please check")
return strategy
def build_compiled_startegy(self):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import CompileTimeStrategy
compiled_config = CompileTimeStrategy(
self.origin_main_program, self.origin_main_program,
self.async_strategy, self.role_maker)
return compiled_config
def _load_sparse_params(self,
executor,
dirname,
varnames,
main_program=None):
assert vars != None
check_vars = []
load_prog = Program()
load_block = load_prog.global_block()
def _in_varnames(var):
return var.name in varnames
load_vars = list(
filter(_in_varnames, fluid.default_main_program().list_vars()))
if main_program is None:
main_program = self.origin_main_program
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts
for each_var in load_vars:
assert isinstance(each_var, Variable)
origin_varname, _, _ = _get_varname_parts(each_var.name)
new_var = fluid.io._clone_var_in_block_(load_block, each_var)
var_path = os.path.join(dirname, origin_varname)
if not os.path.exists(var_path):
raise ValueError("SelectedRows var {} can not find at {}".
format(new_var.name, var_path))
if os.path.isfile(var_path):
load_block.append_op(
type='sparse_tensor_load',
inputs={},
outputs={'Out': [new_var]},
attrs={
'file_path': os.path.join(dirname, origin_varname),
'node_index': self.role_maker._server_index(),
'node_num': self.role_maker._server_num(),
'shape': each_var.shape
})
check_vars.append(each_var)
executor.run(load_prog)
def _load_distributed_params(self, dirname, varnames):
from paddle.fluid.communicator import LargeScaleKV
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts
scale_kv = LargeScaleKV()
for varname in varnames:
origin_varname, _, _ = _get_varname_parts(varname)
sparse_dir = os.path.join(dirname, origin_varname, varname)
scale_kv.load(varname, sparse_dir)
@staticmethod
def __exclude_vars(exclude_var_names=[]):
def is_valid(var):
if var.name in exclude_var_names:
return False
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts
origin_varname, _, _ = _get_varname_parts(var.name)
if origin_varname.endswith("@GRAD"):
return False
if origin_varname == "learning_rate_0":
return False
if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \
var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \
var.desc.type() == core.VarDesc.VarType.READER:
return False
return var.persistable
return is_valid
def _init_worker(self):
def sync_strategy_envs():
kwargs = {}
kwargs[
"pserver_endpoints"] = self.role_maker._get_pserver_endpoints()
kwargs["trainer_id"] = self.role_maker._worker_index()
return kwargs
def geo_strategy_envs():
from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames
def get_sparse_attrs():
opt_init_map = {}
opt_init_map["gaussian_random"] = ["seed", "mean", "std"]
opt_init_map["fill_constant"] = ["value"]
opt_init_map["uniform_random"] = ["seed", "min", "max"]
opt_init_map[
"truncated_gaussian_random"] = ["seed", "mean", "std"]
dist_varnames = get_sparse_tablenames(self.origin_main_program,
True)
sparse_varnames = get_sparse_tablenames(
self.origin_main_program, False)
if len(dist_varnames) != 0:
raise ValueError(
"GeoStrategy can not support large scale embeding now, please use fluid.layers.embedding"
)
init_attrs = []
for value_name in sparse_varnames:
value_var = self.origin_main_program.global_block().vars[
value_name]
value_attr = [
value_name,
",".join([str(dim) for dim in value_var.shape])
]
for op in self.origin_startup_program.global_block().ops:
if op.type in opt_init_map.keys(
) and value_name == op.output("Out")[0]:
init_attr = [op.type]
for attr in opt_init_map[op.type]:
init_attr.append(str(op.attr(attr)))
value_attr.append("&".join(init_attr))
init_attrs.append(":".join(value_attr))
break
return "#".join(init_attrs)
kwargs = {}
kwargs["trainers"] = self.role_maker._worker_num()
kwargs["sparse_attrs"] = get_sparse_attrs()
return kwargs
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_lr_ops, _has_global_step
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \
SyncStrategy, GeoStrategy
trainer_config = self.async_strategy.get_trainer_runtime_config()
print(trainer_config)
dist_strategy = self.context["valid_strategy"]
launch_barrier = dist_strategy.a_sync_configs["launch_barrier"]
if launch_barrier:
# for trainer wait server ready
wait_server_ready(self.role_maker._get_pserver_endpoints())
# for ps-heter mode, wait heter worker ready
if self.role_maker._is_heter_parameter_server_mode and self.role_maker._is_worker(
):
wait_server_ready(self.role_maker._get_heter_worker_endpoints())
lrs = _has_global_step(_get_lr_ops(self.origin_main_program))
if lrs:
kwargs = {"need_global_step": "1"}
else:
kwargs = {"need_global_step": "0"}
if isinstance(self.async_strategy, GeoStrategy):
geo_kwargs = geo_strategy_envs()
kwargs.update(geo_kwargs)
if isinstance(self.async_strategy, SyncStrategy):
sync_kwargs = sync_strategy_envs()
kwargs.update(sync_kwargs)
kwargs = kwargs if kwargs else None
send_ctx = self.compiled_strategy.get_communicator_send_context()
if self.compiled_strategy.is_geo_mode():
recv_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=4)
else:
recv_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=1)
from paddle.fluid.communicator import Communicator
self._communicator = Communicator(
trainer_config.mode, kwargs,
trainer_config.get_communicator_flags())
self._communicator.init_with_ctx(send_ctx, recv_ctx)
if not self._communicator.is_running():
self._communicator.start()
else:
warnings.warn("communicator has been initialized, skip")
def _get_executor(self):
executor = fluid.Executor(fluid.CPUPlace())
if self.role_maker._is_heter_parameter_server_mode:
heter_worker_device_guard = self.context[
"valid_strategy"].a_sync_configs[
"heter_worker_device_guard"].upper()
if heter_worker_device_guard not in ["GPU", "XPU", "CPU"]:
raise ValueError("Heter Worker Not Support Device {}".format(
heter_worker_device_guard))
if self.role_maker._is_heter_worker():
if heter_worker_device_guard == "GPU":
executor = Executor(
fluid.CUDAPlace(
int(os.getenv("FLAGS_selected_gpus", "0"))))
elif heter_worker_device_guard == "XPU":
executor = Executor(
fluid.XPUPlace(
int(os.getenv("FLAGS_selected_xpus", "0"))))
return executor
def _init_server(self, *args, **kwargs):
if len(args) > 1:
raise ValueError("init server can only accept 1 args: `dirname`")
elif len(args) == 1:
model_dirname = args[0]
else:
model_dirname = None
executor = self._get_executor()
if self.role_maker._is_heter_worker() and self.context[
"valid_strategy"].a_sync_configs["launch_barrier"]:
# for heter trainer wait server ready
wait_server_ready(self.role_maker._get_pserver_endpoints())
executor.run(fluid.default_startup_program())
if self.role_maker._is_heter_worker():
self._init_worker()
return
sparse_varnames = self.compiled_strategy.get_sparse_varname_on_ps(False)
sparse_related_optimize_varnames = []
for var_name in sparse_varnames:
sparse_related_optimize_varnames += self.compiled_strategy.get_optimize_varname_on_ps(
var_name)
sparse_related_optimize_varnames = list(
set(sparse_related_optimize_varnames))
distribtued_varnames = self.compiled_strategy.get_sparse_varname_on_ps(
True)
distributed_related_optimize_varnames = []
for var_name in distribtued_varnames:
distributed_related_optimize_varnames += self.compiled_strategy.get_optimize_varname_on_ps(
var_name)
distributed_related_optimize_varnames = list(
set(distributed_related_optimize_varnames))
remaining_vars = list(
filter(
ParameterServerRuntime.__exclude_vars(
sparse_varnames + distribtued_varnames +
sparse_related_optimize_varnames +
distributed_related_optimize_varnames),
fluid.default_main_program().list_vars()))
if not model_dirname:
return
if not os.path.isdir(model_dirname):
raise ValueError("There is no directory named '%s'", model_dirname)
# load dense
fluid.io.load_vars(
executor,
main_program=fluid.default_main_program(),
dirname=model_dirname,
vars=remaining_vars)
# load sparse
self._load_sparse_params(
executor=executor,
dirname=model_dirname,
varnames=sparse_varnames + sparse_related_optimize_varnames)
# load large scale
self._load_distributed_params(
dirname=model_dirname,
varnames=distribtued_varnames +
distributed_related_optimize_varnames)
def _run_server(self):
executor = self._get_executor()
executor.run(fluid.default_main_program())
def _stop_worker(self):
self._communicator.stop()
executor = self._get_executor()
executor.close()
def _get_optimizer_status(self, op, param_name):
supported_opts = [
"sgd", "adam", "adagrad", "adamax", "momentum", "lars_momentum",
"rmsprop", "decayed_adagrad", "ftrl"
]
reshaped_val_map = {}
reshaped_val_map["sgd"] = []
reshaped_val_map["adam"] = ["moment1_0", "moment2_0"]
reshaped_val_map["adagrad"] = ["moment_0"]
reshaped_val_map["adamax"] = ["moment_0", "inf_norm_0"]
reshaped_val_map["momentum"] = ["velocity_0"]
reshaped_val_map["lars_momentum"] = ["velocity_0"]
reshaped_val_map[
"rmsprop"] = ["momentum_0", "mean_square_0", "mean_grad_0"]
reshaped_val_map["decayed_adagrad"] = ["moment_0"]
reshaped_val_map["ftrl"] = ["squared_0", "linear_0"]
orishaped_val_map = {}
orishaped_val_map["adam"] = ["beta1_pow_acc_0", "beta2_pow_acc_0"]
orishaped_val_map["adamax"] = ["beta1_pow_acc_0"]
if op not in supported_opts:
raise ValueError(
"fleet can not support optimizer: {}, only this can be supported: {}".
format(op, supported_opts))
reshaped_names = [
param_name + "_" + val for val in reshaped_val_map[op]
]
if op not in orishaped_val_map:
origin_names = []
else:
origin_names = [
param_name + "_" + val for val in orishaped_val_map[op]
]
return reshaped_names, origin_names
def _get_optimizer_op(self, param_name):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops
opts = _get_optimize_ops(self.origin_main_program)
for op in opts:
if "Param" in op.input_names and \
"LearningRate" in op.input_names and op.input("Param")[0] == param_name:
return op
def _save_dense_params(self, executor, dirname, context, main_program):
self._communicator.recv()
prog = Program()
block = prog.global_block()
local_vars = []
for name, var_ctx in context.items():
if len(var_ctx.origin_varnames()) != 1:
raise ValueError("Dense can not support split now.")
varname = var_ctx.origin_varnames()[0]
local_vars.append(varname)
optimizer = self._get_optimizer_op(varname)
reshaped_varnames, origin_varnames = self._get_optimizer_status(
optimizer.type, varname)
for var_name in [varname] + reshaped_varnames + origin_varnames:
var = self.origin_main_program.global_block().vars[var_name]
block.append_op(
type='recv_save',
attrs={
"trainer_id": self.role_maker._worker_index(),
"shape": var.shape,
"slice_shapes":
[",".join([str(i) for i in var.shape])],
"slice_varnames": [var.name],
"remote_varnames": [var.name],
"is_sparse": False,
"endpoints": var_ctx.split_endpoints(),
"file_path": os.path.join(dirname, var.name)
})
executor.run(prog)
return local_vars
def _save_sparse_params(self, executor, dirname, context, main_program):
prog = Program()
block = prog.global_block()
local_vars = []
for name, var_ctx in context.items():
if len(var_ctx.origin_varnames()) != 1:
raise ValueError("Dense can not support split now.")
varname = var_ctx.origin_varnames()[0]
local_vars.append(varname)
optimizer = self._get_optimizer_op(varname)
reshaped_varnames, origin_varnames = self._get_optimizer_status(
optimizer.type, varname)
var = self.origin_main_program.global_block().vars[varname]
slice_shapes = []
dims1 = ",".join([str(i) for i in var.shape[1:]])
for section in var_ctx.sections():
slice_shapes.append(str(section) + dims1)
block.append_op(
type='recv_save',
attrs={
"trainer_id": self.role_maker._worker_index(),
"shape": var.shape,
"slice_shapes": slice_shapes,
"slice_varnames": var_ctx.split_varnames(),
"remote_varnames": var_ctx.split_varnames(),
"is_sparse": True,
"endpoints": var_ctx.split_endpoints(),
"pserver_num":
len(self.role_maker._get_pserver_endpoints()),
"file_path": os.path.join(dirname, var.name)
})
for reshaped_varname in reshaped_varnames:
var = self.origin_main_program.global_block().vars[
reshaped_varname]
slice_varnames = []
remote_varnames = []
for i in range(len(var_ctx.split_varnames())):
slice_varnames.append("{}.block{}".format(reshaped_varname,
i))
remote_varnames.append(reshaped_varname)
block.append_op(
type='recv_save',
attrs={
"trainer_id": self.role_maker._worker_index(),
"shape": var.shape,
"slice_shapes": slice_shapes,
"slice_varnames": slice_varnames,
"remote_varnames": remote_varnames,
"is_sparse": True,
"endpoints": var_ctx.split_endpoints(),
"pserver_num":
len(self.role_maker._get_pserver_endpoints()),
"file_path": os.path.join(dirname, var.name)
})
for origin_varname in origin_varnames:
var = self.origin_main_program.global_block().vars[
origin_varname]
block.append_op(
type='recv_save',
attrs={
"trainer_id": self.role_maker._worker_index(),
"shape": var.shape,
"slice_shapes":
[",".join([str(i) for i in var.shape])],
"slice_varnames": [origin_varname],
"remote_varnames": [origin_varname],
"is_sparse": False,
"endpoints": var_ctx.split_endpoints()[:1],
"file_path": os.path.join(dirname, var.name)
})
executor.run(prog)
return context.keys()
def _save_distributed_params(self, executor, dirname, context, mode):
prog = Program()
block = prog.global_block()
for name, var_ctx in context.items():
block.append_op(
type='checkpoint_notify',
attrs={
"varname": name,
"mode": mode,
"slice_varnames": var_ctx.split_varnames(),
"remote_varnames": var_ctx.split_varnames(),
"endpoints": var_ctx.split_endpoints(),
"dirname": dirname
})
executor.run(prog)
return context.keys()
def _save_distributed_persistables(self, executor, dirname, main_program,
mode):
dense_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=1, use_origin_program=True)
sparse_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=2, use_origin_program=True)
distributed_ctx = self.compiled_strategy.get_communicator_recv_context(
recv_type=3, use_origin_program=True)
recv_dense_varnames = self._save_dense_params(executor, dirname,
dense_ctx, main_program)
recv_sparse_varnames = self._save_sparse_params(
executor, dirname, sparse_ctx, main_program)
recv_distributed_varnames = self._save_distributed_params(
executor, dirname, distributed_ctx, mode)
saved_varnames = recv_dense_varnames + list(
recv_sparse_varnames) + list(recv_distributed_varnames)
remaining_vars = list(
filter(
ParameterServerRuntime.__exclude_vars(saved_varnames),
main_program.list_vars()))
fluid.io.save_vars(
executor,
main_program=main_program,
dirname=dirname,
vars=remaining_vars)
def _ps_inference_save_persistables(self,
executor,
dirname,
main_program=None,
mode=0,
**kwargs):
"""
This function filters out all variables with `persistable==True` from the
give `main_program` and then saves these variables to the folder `dirname`
or file `filename`.
The `dirname` is used to specify the folder where persistable variables
are going to be saved. If you would like to save variables in separate
files, set `filename` None; if you would like to save all variables in a
single file, use `filename` to specify the file name.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save_persistables() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save_persistables() function, executor must be as Executor type"
)
if main_program is None:
main_program = self.compiled_strategy.get_origin_ps_main_program()
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save_persistables() function, main_program must be as Program type, CompiledProgram is not allowed"
)
self._save_distributed_persistables(executor, dirname, main_program,
mode)
def _ps_inference_save_inference_model(self,
executor,
dirname,
feeded_var_names,
target_vars,
main_program=None,
export_for_deployment=True):
"""
Prune the given `main_program` to build a new program especially for inference,
and then save it and all related parameters to given `dirname` by the `executor`.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save_inference_model() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save_inference_model() function, executor must be as Executor type"
)
if main_program is not None:
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save_inference_model() function, main_program must be as Program type, CompiledProgram is not allowed"
)
fluid.io.save_inference_model(dirname, feeded_var_names,
target_vars, executor, main_program,
None, None, export_for_deployment)
else:
fluid.io.save_inference_model(dirname, feeded_var_names,
target_vars, executor,
self.origin_main_program, None, None,
export_for_deployment, True)
model_basename = "__model__"
model_filename = os.path.join(dirname, model_basename)
with open(model_filename, "rb") as f:
program_desc_str = f.read()
program = Program.parse_from_string(program_desc_str)
program._copy_dist_param_info_from(fluid.default_main_program())
self._ps_inference_save_persistables(
executor, dirname, program, mode=0)
def _save_inference_model(self, *args, **kwargs):
self._ps_inference_save_inference_model(*args, **kwargs)
def _save_persistables(self, *args, **kwargs):
self._ps_inference_save_persistables(*args, **kwargs)
|
<reponame>futureseadev/coba
import unittest
import timeit
from typing import List
from coba.pipes import MemorySource
from coba.config import CobaConfig, NoneLogger
from coba.simulations import (
Interaction, MemorySimulation, ClassificationSimulation,
LambdaSimulation, CsvSimulation, ArffSimulation, LibsvmSimulation
)
CobaConfig.Logger = NoneLogger()
class Interaction_Tests(unittest.TestCase):
def test_context_none(self):
interaction = Interaction(None, (1,2,3), (4,5,6))
self.assertEqual(None, interaction.context)
def test_context_str(self):
interaction = Interaction("A", (1,2,3), (4,5,6))
self.assertEqual("A", interaction.context)
def test_context_sparse_pairs_1(self):
interaction = Interaction(((1,2,3),(4,5,6)), (1,2,3), (4,5,6))
self.assertEqual({1:4, 2:5, 3:6}, interaction.context)
def test_context_sparse_pairs_2(self):
interaction = Interaction(((1,2,3),((0,0,1),5,6)), (1,2,3), (4,5,6))
self.assertEqual({"1_0":0, "1_1":0, "1_2":1, 2:5, 3:6}, interaction.context)
def test_context_bytes(self):
interaction = Interaction(bytes([0,0,1,1,0]), (1,2,3), (4,5,6))
self.assertEqual((0,0,1,1,0), interaction.context)
def test_context_dense(self):
interaction = Interaction((1,2,3), (1,2,3), (4,5,6))
self.assertEqual((1,2,3), interaction.context)
def test_context_dense_2(self):
interaction = Interaction((1,2,3,(0,0,1)), (1,2,3), (4,5,6))
self.assertEqual((1,2,3,0,0,1), interaction.context)
def test_context_sparse_dict(self):
interaction = Interaction({1:0}, (1,2,3), (4,5,6))
self.assertEqual({1:0}, interaction.context)
def test_performance(self):
interaction = Interaction([1,2,3]*100, (1,2,3), (4,5,6))
time = timeit.timeit(lambda: interaction.context, number=10000)
#old best was 0.6 on my machine
self.assertLess(time, 1.5)
class ClassificationSimulation_Tests(unittest.TestCase):
def assert_simulation_for_data(self, simulation, features, answers) -> None:
interactions = list(simulation.read())
self.assertEqual(len(interactions), len(features))
#first we make sure that all the labels are included
#in the first interactions actions without any concern for order
self.assertCountEqual(interactions[0].actions, set(answers))
#then we set our expected actions to the first interaction
#to make sure that every interaction has the exact same actions
#with the exact same order
expected_actions = interactions[0].actions
for f,l,i in zip(features, answers, interactions):
expected_context = f
expected_rewards = [ int(a == l) for a in i.actions]
actual_context = i.context
actual_actions = i.actions
actual_rewards = i.feedbacks
self.assertEqual(actual_context, expected_context)
self.assertSequenceEqual(actual_actions, expected_actions)
self.assertSequenceEqual(actual_rewards, expected_rewards)
def test_constructor_with_good_features_and_labels1(self) -> None:
features = [1,2,3,4]
labels = [1,1,0,0]
simulation = ClassificationSimulation(features, labels)
self.assert_simulation_for_data(simulation, features, labels)
def test_constructor_with_good_features_and_labels2(self) -> None:
features = ["a","b"]
labels = ["good","bad"]
simulation = ClassificationSimulation(features, labels)
self.assert_simulation_for_data(simulation, features, labels)
def test_constructor_with_good_features_and_labels3(self) -> None:
features = [(1,2),(3,4)]
labels = ["good","bad"]
simulation = ClassificationSimulation(features, labels)
self.assert_simulation_for_data(simulation, features, labels)
def test_constructor_with_too_few_features(self) -> None:
with self.assertRaises(AssertionError):
ClassificationSimulation([1], [1,1])
def test_constructor_with_too_few_labels(self) -> None:
with self.assertRaises(AssertionError):
ClassificationSimulation([1,1], [1])
def test_sparse(self) -> None:
feature_rows = [
( (0,1), (10,11) ),
( (1,2), (20,30) ),
( (2,3), (30,40) ),
( (2,3), (30,40) )
]
label_column = (1,1,0,2)
simulation = ClassificationSimulation(feature_rows, label_column)
interactions = list(simulation.read())
self.assertEqual(dict(zip(*feature_rows[0])), interactions[0].context)
self.assertEqual(dict(zip(*feature_rows[1])), interactions[1].context)
self.assertEqual(dict(zip(*feature_rows[2])), interactions[2].context)
self.assertEqual(dict(zip(*feature_rows[3])), interactions[3].context)
self.assertEqual([0,2,1], interactions[0].actions)
self.assertEqual([0,2,1], interactions[1].actions)
self.assertEqual([0,2,1], interactions[2].actions)
self.assertEqual([0,2,1], interactions[3].actions)
self.assertEqual([0,0,1], interactions[0].feedbacks)
self.assertEqual([0,0,1], interactions[1].feedbacks)
self.assertEqual([1,0,0], interactions[2].feedbacks)
self.assertEqual([0,1,0], interactions[3].feedbacks)
class MemorySimulation_Tests(unittest.TestCase):
def test_interactions(self):
simulation = MemorySimulation([Interaction(1, [1,2,3], [0,1,2]), Interaction(2, [4,5,6], [2,3,4])])
interactions = list(simulation.read())
self.assertEqual(interactions[0], interactions[0])
self.assertEqual(interactions[1], interactions[1])
class LambdaSimulation_Tests(unittest.TestCase):
def test_interactions(self):
def C(i:int) -> int:
return [1,2][i]
def A(i:int,c:int) -> List[int]:
return [[1,2,3],[4,5,6]][i]
def R(i:int,c:int,a:int) -> int:
return a-c
simulation = LambdaSimulation(2,C,A,R)
interactions = list(simulation.read())
self.assertEqual(1 , interactions[0].context)
self.assertEqual([1,2,3], interactions[0].actions)
self.assertEqual([0,1,2], interactions[0].feedbacks)
self.assertEqual(2 , interactions[1].context)
self.assertEqual([4,5,6], interactions[1].actions)
self.assertEqual([2,3,4], interactions[1].feedbacks)
def test_interactions_len(self):
def C(i:int) -> int:
return [1,2][i]
def A(i:int,c:int) -> List[int]:
return [[1,2,3],[4,5,6]][i]
def R(i:int,c:int,a:int) -> int:
return a-c
simulation = LambdaSimulation(2,C,A,R)
interactions = list(simulation.read())
self.assertEqual(len(interactions), 2)
class CsvSimulation_Tests(unittest.TestCase):
def test_simple(self):
source = MemorySource(['a,b,c','1,2,3','4,5,6','7,8,6'])
simulation = CsvSimulation(source,'c')
interactions = list(simulation.read())
self.assertEqual(3, len(interactions))
self.assertEqual(('1','2'), interactions[0].context)
self.assertEqual(('4','5'), interactions[1].context)
self.assertEqual(('7','8'), interactions[2].context)
self.assertEqual(['3','6'], interactions[0].actions)
self.assertEqual(['3','6'], interactions[1].actions)
self.assertEqual([1,0], interactions[0].feedbacks)
self.assertEqual([0,1], interactions[1].feedbacks)
class ArffSimulation_Tests(unittest.TestCase):
def test_simple(self):
lines = [
"@relation news20",
"@attribute a numeric",
"@attribute B numeric",
"@attribute c {0, class_B, class_C, class_D}",
"@data",
"1,2,class_B",
"2,3,0",
]
source = MemorySource(lines)
simulation = ArffSimulation(source,'c',)
interactions = list(simulation.read())
self.assertEqual(2, len(interactions))
self.assertEqual((1,2), interactions[0].context)
self.assertEqual((2,3), interactions[1].context)
self.assertEqual(['0','class_B'], interactions[0].actions)
self.assertEqual(['0','class_B'], interactions[1].actions)
self.assertEqual([0,1], interactions[0].feedbacks)
self.assertEqual([1,0], interactions[1].feedbacks)
def test_one_hot(self):
lines = [
"@relation news20",
"@attribute a numeric",
"@attribute B {0, 1, 2, 3}",
"@attribute c {0, class_B, class_C, class_D}",
"@data",
"1,2,class_B",
"2,3,0",
"3,1,0"
]
source = MemorySource(lines)
simulation = ArffSimulation(source,'c',)
interactions = list(simulation.read())
self.assertEqual(3, len(interactions))
self.assertEqual((1,0,0,1,0), interactions[0].context)
self.assertEqual((2,0,0,0,1), interactions[1].context)
self.assertEqual((3,0,1,0,0), interactions[2].context)
self.assertEqual(['0','class_B'], interactions[0].actions)
self.assertEqual(['0','class_B'], interactions[1].actions)
self.assertEqual(['0','class_B'], interactions[2].actions)
self.assertEqual([0,1], interactions[0].feedbacks)
self.assertEqual([1,0], interactions[1].feedbacks)
self.assertEqual([1,0], interactions[2].feedbacks)
class LibsvmSimulation_Tests(unittest.TestCase):
def test_simple(self):
lines = [
"0 4:2 5:3",
"1 1:1 2:1",
"1 3:4"
]
source = MemorySource(lines)
simulation = LibsvmSimulation(source)
interactions = list(simulation.read())
self.assertEqual(3, len(interactions))
self.assertEqual({0:2,1:3}, interactions[0].context)
self.assertEqual({2:1,3:1}, interactions[1].context)
self.assertEqual({4:4 }, interactions[2].context)
self.assertEqual(['0','1'], interactions[0].actions)
self.assertEqual(['0','1'], interactions[1].actions)
self.assertEqual([1,0], interactions[0].feedbacks)
self.assertEqual([0,1], interactions[1].feedbacks)
if __name__ == '__main__':
unittest.main() |
# Scrape Pardot extracted emails for leads and contact id's for opportunities
# ----------------------------------------------------------------------------
import io
import sys
import os
from os.path import expanduser as ospath
import numpy as np
import pandas as pd
import xlsxwriter
from collections import OrderedDict
from simple_salesforce import Salesforce
import pytz
import datetime
from simple_salesforce import Salesforce
from salesforce_reporting import Connection, ReportParser
with open('~/.sfdc') as f: # read in authentication params
uname, spass, stoken, ppass, ptoken = [x.strip("\n") for x in f.readlines()]
soql_opty = "queries/q_opty.sql" # stored query
soql_lead = "queries/q_lead.sql" # stored query
excel_path = "~/Four Winds Interactive/Marketing - Documents/s_data/" # directory - case sensistve
excel_file = "pardot.xlsx" # excel file - case sensistve
sheet_id_opty = "id_opty" # sheet - case sensistve
sheet_id_lead = "email_lead" # sheet - case sensistve
sf = Salesforce(username=uname, password=<PASSWORD>, security_token=stoken) # authenticate
end = datetime.datetime.now(pytz.UTC) # salesforce API requires UTC
print("@ ", end)
with open(soql_opty, 'r') as file: # get opty soql query from file
soql_opty = file.read().replace('\n','') # remove line breaks
soql_opty = soql_opty.replace('\t','') # remove tabs
opty_id = pd.read_excel(excel_path + excel_file, sheet_id_opty) # read ids from excel file
opty_id = tuple(list(opty_id['x18ContactID'])) # convert dataframe column to list then tuple
opty_id = "','".join(opty_id) # convert tuple to comma sep string
soql_opty = soql_opty + "'" + opty_id + "')"
q_opty = sf.query(soql_opty)
records = [dict(
IndVert=rec['Account']['Industry_Vertical__c'],
Name=rec['Account']['Name'],
optysAct=rec['Account']['of_Active_Opps__c'],
optyss30=rec['Account']['of_Opps_Created_Last_30_Days__c'],
optyssYr=rec['Account']['of_Opps_Created_this_Calendar_Year__c'],
x18actid=rec['Account']['X18_Digit_ID__c'])
for rec in q_opty['records']]
df_opty = pd.DataFrame(records)
df_opty.to_csv(excel_path + 'p_opty.csv')
with open(soql_lead, 'r') as file: # get lead soql query from file
soql_lead = file.read().replace('\n','') # remove line breaks
soql_lead = soql_lead.replace('\t','') # remove tabs
lead_email = pd.read_excel(excel_path + excel_file, sheet_id_lead) # read ids from excel file
lead_email = tuple(list(lead_email['Email'])) # dataframe column to list to tuple
lead_email = "','".join(lead_email) # tuple to comma sep string
soql_lead = soql_lead + "'" + lead_email + "')"
q_lead = sf.query(soql_lead)
records = [dict(
IndustryVertical=rec['Industry_Vertical__c'],
Email=rec['Email'],
LeadSource=rec['LeadSource'],
Company=rec['Company'],
CreatedDate=rec['CreatedDate'],
LeadType=rec['Lead_Type__c'],
UnqualifiedReason=rec['Unqualified_Reason__c'],
Id=rec['Id'],
ConvertedAccountId=rec['ConvertedAccountId'],
ConvertedOpportunityId=rec['ConvertedOpportunityId'],
ConvertedContactId=rec['ConvertedContactId'])
for rec in q_lead['records']]
df_lead = pd.DataFrame(records)
df_lead.to_csv(excel_path + 'p_lead.csv')
|
from njupass import NjuUiaAuth
import time
import datetime
from pytz import timezone
from urllib.parse import urlencode
URL_JKDK_LIST = 'http://ehallapp.nju.edu.cn/xgfw/sys/yqfxmrjkdkappnju/apply/getApplyInfoList.do'
URL_JKDK_APPLY = 'http://ehallapp.nju.edu.cn/xgfw/sys/yqfxmrjkdkappnju/apply/saveApplyInfos.do'
URL_JKDK_INDEX = 'http://ehallapp.nju.edu.cn/xgfw/sys/mrjkdkappnju/index.do'
def get_zjhs_time(method, last_id):
"""获取最近核酸时间"""
today = datetime.datetime.now(timezone('Asia/Shanghai'))
if method == 'YESTERDAY':
PCR_date = today + datetime.timedelta(-1)
elif method == 'REGULAR':
delta = ((datetime.date.today() - datetime.date(2022, 4, 4)).days - last_id) % 5
PCR_date = today + datetime.timedelta(-delta)
else:
PCR_date = today
return PCR_date.strftime("%Y-%m-%d %-H")
def apply(curr_location, logger, auth: NjuUiaAuth, covidTestMethod, last_id, force=False):
"""
完成一次健康打卡
:param `covidTestMethod`: 最近核酸时间的方案
:param `force`: 是否在今日已经打卡的前提下强制打卡
"""
headers = {
# required since 2022/4/20
'referer': 'http://ehallapp.nju.edu.cn/xgfw/sys/mrjkdkappnju/index.html',
"X-Requested-With": "com.wisedu.cpdaily.nju",
"User-Agent": "Mozilla/5.0 (Linux; Android 11; M2006J10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/92.0.4515.131 Mobile Safari/537.36 cpdaily/8.2.7 wisedu/8.2.7",
"Host": "ehallapp.nju.edu.cn",
}
for _ in range(10):
logger.info('尝试获取打卡列表信息...')
auth.session.get(URL_JKDK_INDEX)
r = auth.session.get(URL_JKDK_LIST, headers=headers)
if r.status_code != 200:
logger.error('获取失败,一分钟后再次尝试...')
time.sleep(60)
continue
dk_info = r.json()['data'][0]
has_applied = dk_info['TBZT'] == "1"
wid = dk_info['WID']
param = {
'WID': wid,
'IS_TWZC': 1, # 是否体温正常
'CURR_LOCATION': curr_location, # 位置
'ZJHSJCSJ': get_zjhs_time(covidTestMethod, last_id), # 最近核酸检测时间
'JRSKMYS': 1, # 今日苏康码颜色
'IS_HAS_JKQK': 1, # 健康情况
'JZRJRSKMYS': 1, # 居住人今日苏康码颜色
'SFZJLN': 0, # 是否最近离宁
}
url = URL_JKDK_APPLY + '?' + urlencode(param)
if not has_applied or force:
logger.info('正在打卡')
auth.session.get(url, headers=headers)
force = False
time.sleep(1)
else:
logger.info('今日已打卡!')
return True
logger.error("打卡失败,请尝试手动打卡")
return False
|
<filename>src/utils/run_reports.py
import logging
import time
def publish_report(wrapper, name, version):
try:
logging.info("Publishing report version {}-{}".format(name, version))
return wrapper.execute(wrapper.ow.publish_report, name, version)
except Exception as ex:
logging.exception(ex)
return False
def params_to_string(params):
return ", ".join([f"{key}={value}" for key, value in params.items()])
def run_reports(wrapper, group, disease, touchstone, config, reports,
success_callback, error_callback, running_reports_repo):
running_reports = {}
new_versions = {}
if wrapper.ow is None:
error = "Orderlyweb authentication failed; could not begin task"
for report in reports:
error_callback(report, error)
logging.error(error)
return new_versions
# Start configured reports
for report in reports:
# Kill any currently running report for this group/disease/report
already_running = running_reports_repo.get(group, disease, report.name)
if already_running is not None:
try:
logging.info("Killing already running report: {}. Key is {}"
.format(report.name, already_running))
wrapper.execute(wrapper.ow.kill_report, already_running)
except Exception as ex:
logging.exception(ex)
# Assume report requires touchstone and touchstone_name parameters
parameters = report.parameters or {}
parameters["touchstone"] = touchstone
parameters["touchstone_name"] = touchstone.rsplit('-', 1)[0]
try:
key = wrapper.execute(wrapper.ow.run_report,
report.name,
parameters,
report.timeout)
running_reports[key] = report
# Save key to shared data - may be killed by subsequent task
running_reports_repo.set(group, disease, report.name, key)
logging.info("Running report: {} with parameters {}. Key is {}. "
"Timeout is {}s."
.format(report.name, params_to_string(parameters),
key, report.timeout))
except Exception as ex:
error_callback(report, str(ex))
logging.exception(ex)
# Poll running reports until they complete
report_poll_seconds = config.report_poll_seconds
while len(running_reports.items()) > 0:
finished = []
keys = sorted(running_reports.keys())
for key in keys:
report = running_reports[key]
try:
result = wrapper.execute(wrapper.ow.report_status, key)
if result.finished:
finished.append(key)
if result.success:
logging.info("Success for key {}. New version is {}"
.format(key, result.version))
version = result.version
name = report.name
published = publish_report(wrapper, name, version)
if published:
logging.info(
"Successfully published report version {}-{}"
.format(name, version))
success_callback(report, version)
else:
error = "Failed to publish report version {}-{}"\
.format(name, version)
logging.error(error)
error_callback(report, error)
new_versions[version] = {
"published": published,
"report": name
}
else:
error = "Failure for key {}. Status: {}"\
.format(key, result.status)
logging.error(error)
# don't invoke error callback for cancelled runs
if result.status != "interrupted":
error_callback(report, error)
except Exception as ex:
error_callback(report, str(ex))
if key not in finished:
finished.append(key)
logging.exception(ex)
for key in finished:
report = running_reports[key]
running_reports.pop(key)
# delete finished report, unless it's been updated by another task
running_reports_repo.delete_if_matches(group, disease, report.name,
key)
time.sleep(report_poll_seconds)
return new_versions
|
#!/usr/bin/python
# Copyright (c) 2010-2013, Regents of the University of California.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import threading
import socket
import logging
import os
import time
import binascii
from openvisualizer.SimEngine import SimEngine
from openvisualizer.BspEmulator import BspBoard
from openvisualizer.BspEmulator import BspDebugpins
from openvisualizer.BspEmulator import BspEui64
from openvisualizer.BspEmulator import BspLeds
from openvisualizer.BspEmulator import BspRadio
from openvisualizer.BspEmulator import BspSctimer
from openvisualizer.BspEmulator import BspUart
from openvisualizer.BspEmulator import HwSupply
from openvisualizer.BspEmulator import HwCrystal
#============================ get notification IDs ============================
# Contains the list of notifIds used in the following functions.
notifString = []
def readNotifIds(headerPath):
'''
Contextual parent must call this method before other use of mote handler.
``headerPath`` Path to openwsnmodule_obj.h, containing notifIds
Required since this module cannot know where to find the header file.
'''
import re
f = open(headerPath)
lines = f.readlines()
f.close()
global notifString
for line in lines:
m = re.search('MOTE_NOTIF_(\w+)',line)
if m:
if m.group(1) not in notifString:
notifString += [m.group(1)]
def notifId(s):
assert s in notifString
return notifString.index(s)
#============================ classes =========================================
class MoteHandler(threading.Thread):
def __init__(self,mote):
# store params
self.engine = SimEngine.SimEngine()
self.mote = mote
#=== local variables
self.loghandler = self.engine.loghandler
# unique identifier of the mote
self.id = self.engine.idmanager.getId()
# position of the mote
self.location = self.engine.locationmanager.getLocation()
# stats
self.numRxCommands = 0
self.numTxCommands = 0
# hw
self.hwSupply = HwSupply.HwSupply(self)
self.hwCrystal = HwCrystal.HwCrystal(self)
# bsp
self.bspBoard = BspBoard.BspBoard(self)
self.bspDebugpins = BspDebugpins.BspDebugpins(self)
self.bspEui64 = BspEui64.BspEui64(self)
self.bspLeds = BspLeds.BspLeds(self)
self.bspSctimer = BspSctimer.BspSctimer(self)
self.bspRadio = BspRadio.BspRadio(self)
self.bspUart = BspUart.BspUart(self)
# status
self.booted = False
self.cpuRunning = threading.Lock()
self.cpuRunning.acquire()
self.cpuDone = threading.Lock()
self.cpuDone.acquire()
#=== install callbacks
# board
mote.set_callback(notifId('board_init'), self.bspBoard.cmd_init)
mote.set_callback(notifId('board_sleep'), self.bspBoard.cmd_sleep)
# debugpins
mote.set_callback(notifId('debugpins_init'), self.bspDebugpins.cmd_init)
mote.set_callback(notifId('debugpins_frame_toggle'), self.bspDebugpins.cmd_frame_toggle)
mote.set_callback(notifId('debugpins_frame_clr'), self.bspDebugpins.cmd_frame_clr)
mote.set_callback(notifId('debugpins_frame_set'), self.bspDebugpins.cmd_frame_set)
mote.set_callback(notifId('debugpins_slot_toggle'), self.bspDebugpins.cmd_slot_toggle)
mote.set_callback(notifId('debugpins_slot_clr'), self.bspDebugpins.cmd_slot_clr)
mote.set_callback(notifId('debugpins_slot_set'), self.bspDebugpins.cmd_slot_set)
mote.set_callback(notifId('debugpins_fsm_toggle'), self.bspDebugpins.cmd_fsm_toggle)
mote.set_callback(notifId('debugpins_fsm_clr'), self.bspDebugpins.cmd_fsm_clr)
mote.set_callback(notifId('debugpins_fsm_set'), self.bspDebugpins.cmd_fsm_set)
mote.set_callback(notifId('debugpins_task_toggle'), self.bspDebugpins.cmd_task_toggle)
mote.set_callback(notifId('debugpins_task_clr'), self.bspDebugpins.cmd_task_clr)
mote.set_callback(notifId('debugpins_task_set'), self.bspDebugpins.cmd_task_set)
mote.set_callback(notifId('debugpins_isr_toggle'), self.bspDebugpins.cmd_isr_toggle)
mote.set_callback(notifId('debugpins_isr_clr'), self.bspDebugpins.cmd_isr_clr)
mote.set_callback(notifId('debugpins_isr_set'), self.bspDebugpins.cmd_isr_set)
mote.set_callback(notifId('debugpins_radio_toggle'), self.bspDebugpins.cmd_radio_toggle)
mote.set_callback(notifId('debugpins_radio_clr'), self.bspDebugpins.cmd_radio_clr)
mote.set_callback(notifId('debugpins_radio_set'), self.bspDebugpins.cmd_radio_set)
mote.set_callback(notifId('debugpins_ka_clr'), self.bspDebugpins.cmd_ka_clr)
mote.set_callback(notifId('debugpins_ka_set'), self.bspDebugpins.cmd_ka_set)
mote.set_callback(notifId('debugpins_syncPacket_clr'), self.bspDebugpins.cmd_syncPacket_clr)
mote.set_callback(notifId('debugpins_syncPacket_set'), self.bspDebugpins.cmd_syncPacket_set)
mote.set_callback(notifId('debugpins_syncAck_clr'), self.bspDebugpins.cmd_syncAck_clr)
mote.set_callback(notifId('debugpins_syncAck_set'), self.bspDebugpins.cmd_syncAck_set)
mote.set_callback(notifId('debugpins_debug_clr'), self.bspDebugpins.cmd_debug_clr)
mote.set_callback(notifId('debugpins_debug_set'), self.bspDebugpins.cmd_debug_set)
# eui64
mote.set_callback(notifId('eui64_get'), self.bspEui64.cmd_get)
# leds
mote.set_callback(notifId('leds_init'), self.bspLeds.cmd_init)
mote.set_callback(notifId('leds_error_on'), self.bspLeds.cmd_error_on)
mote.set_callback(notifId('leds_error_off'), self.bspLeds.cmd_error_off)
mote.set_callback(notifId('leds_error_toggle'), self.bspLeds.cmd_error_toggle)
mote.set_callback(notifId('leds_error_isOn'), self.bspLeds.cmd_error_isOn)
mote.set_callback(notifId('leds_radio_on'), self.bspLeds.cmd_radio_on)
mote.set_callback(notifId('leds_radio_off'), self.bspLeds.cmd_radio_off)
mote.set_callback(notifId('leds_radio_toggle'), self.bspLeds.cmd_radio_toggle)
mote.set_callback(notifId('leds_radio_isOn'), self.bspLeds.cmd_radio_isOn)
mote.set_callback(notifId('leds_sync_on'), self.bspLeds.cmd_sync_on)
mote.set_callback(notifId('leds_sync_off'), self.bspLeds.cmd_sync_off)
mote.set_callback(notifId('leds_sync_toggle'), self.bspLeds.cmd_sync_toggle)
mote.set_callback(notifId('leds_sync_isOn'), self.bspLeds.cmd_sync_isOn)
mote.set_callback(notifId('leds_debug_on'), self.bspLeds.cmd_debug_on)
mote.set_callback(notifId('leds_debug_off'), self.bspLeds.cmd_debug_off)
mote.set_callback(notifId('leds_debug_toggle'), self.bspLeds.cmd_debug_toggle)
mote.set_callback(notifId('leds_debug_isOn'), self.bspLeds.cmd_debug_isOn)
mote.set_callback(notifId('leds_all_on'), self.bspLeds.cmd_all_on)
mote.set_callback(notifId('leds_all_off'), self.bspLeds.cmd_all_off)
mote.set_callback(notifId('leds_all_toggle'), self.bspLeds.cmd_all_toggle)
mote.set_callback(notifId('leds_circular_shift'), self.bspLeds.cmd_circular_shift)
mote.set_callback(notifId('leds_increment'), self.bspLeds.cmd_increment)
# radio
mote.set_callback(notifId('radio_init'), self.bspRadio.cmd_init)
mote.set_callback(notifId('radio_reset'), self.bspRadio.cmd_reset)
mote.set_callback(notifId('radio_setFrequency'), self.bspRadio.cmd_setFrequency)
mote.set_callback(notifId('radio_rfOn'), self.bspRadio.cmd_rfOn)
mote.set_callback(notifId('radio_rfOff'), self.bspRadio.cmd_rfOff)
mote.set_callback(notifId('radio_loadPacket'), self.bspRadio.cmd_loadPacket)
mote.set_callback(notifId('radio_txEnable'), self.bspRadio.cmd_txEnable)
mote.set_callback(notifId('radio_txNow'), self.bspRadio.cmd_txNow)
mote.set_callback(notifId('radio_rxEnable'), self.bspRadio.cmd_rxEnable)
mote.set_callback(notifId('radio_rxNow'), self.bspRadio.cmd_rxNow)
mote.set_callback(notifId('radio_getReceivedFrame'), self.bspRadio.cmd_getReceivedFrame)
# sctimer
mote.set_callback(notifId('sctimer_init'), self.bspSctimer.cmd_init)
mote.set_callback(notifId('sctimer_setCompare'), self.bspSctimer.cmd_setCompare)
mote.set_callback(notifId('sctimer_readCounter'), self.bspSctimer.cmd_readCounter)
mote.set_callback(notifId('sctimer_enable'), self.bspSctimer.cmd_enable)
mote.set_callback(notifId('sctimer_disable'), self.bspSctimer.cmd_disable)
# uart
mote.set_callback(notifId('uart_init'), self.bspUart.cmd_init)
mote.set_callback(notifId('uart_enableInterrupts'), self.bspUart.cmd_enableInterrupts)
mote.set_callback(notifId('uart_disableInterrupts'), self.bspUart.cmd_disableInterrupts)
mote.set_callback(notifId('uart_clearRxInterrupts'), self.bspUart.cmd_clearRxInterrupts)
mote.set_callback(notifId('uart_clearTxInterrupts'), self.bspUart.cmd_clearTxInterrupts)
mote.set_callback(notifId('uart_writeByte'), self.bspUart.cmd_writeByte)
mote.set_callback(notifId('uart_writeCircularBuffer_FASTSIM'), self.bspUart.cmd_writeCircularBuffer_FASTSIM)
mote.set_callback(notifId('uart_writeBufferByLen_FASTSIM'), self.bspUart.uart_writeBufferByLen_FASTSIM)
mote.set_callback(notifId('uart_readByte'), self.bspUart.cmd_readByte)
# logging this module
self.log = logging.getLogger('MoteHandler_'+str(self.id))
self.log.setLevel(logging.INFO)
self.log.addHandler(logging.NullHandler())
# logging this mote's modules
for loggerName in [
'MoteHandler_'+str(self.id),
# hw
'HwSupply_'+str(self.id),
'HwCrystal_'+str(self.id),
# bsp
'BspBoard_'+str(self.id),
'BspDebugpins_'+str(self.id),
'BspEui64_'+str(self.id),
'BspLeds_'+str(self.id),
'BspSctimer_'+str(self.id),
'BspRadio_'+str(self.id),
'BspUart_'+str(self.id),
]:
temp = logging.getLogger(loggerName)
temp.setLevel(logging.INFO)
temp.addHandler(self.loghandler)
# initialize parent class
threading.Thread.__init__(self)
# give this thread a name
self.setName('MoteHandler_'+str(self.id))
# thread daemon mode
self.setDaemon(True)
# log
self.log.info('thread initialized')
def run(self):
# log
self.log.info('thread starting')
# switch on the mote
self.hwSupply.switchOn()
assert 0
#======================== public ==========================================
def getId(self):
return self.id
def getLocation(self):
return self.location
def setLocation(self,lat,lon):
self.location = (lat,lon)
def handleEvent(self,functionToCall):
if not self.booted:
assert functionToCall==self.hwSupply.switchOn
# I'm not booted
self.booted = True
# start the thread's execution
self.start()
# wait for CPU to be done
self.cpuDone.acquire()
else:
# call the funcion (mote runs in ISR)
kickScheduler = functionToCall()
assert kickScheduler in [True,False]
if kickScheduler:
# release the mote's CPU (mote runs in task mode)
self.cpuRunning.release()
# wait for CPU to be done
self.cpuDone.acquire()
#======================== private =========================================
|
#
# Collective Knowledge (Workflow to automate validation of results from the SysML'19 paper: "AGGREGATHOR: Byzantine Machine Learning")
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: <NAME>, <EMAIL>, http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# run workflow
def run(i):
"""
Input: {
(cmd_key) - local-mnist, local-mnist-attack or local-cifar10
(aggregator) - if not specified, use the list of ['averaged-median', 'krum-co', 'krum-py', 'average', 'median', 'krum-tf', 'average-nan', 'bulyan-co', 'bulyan-py']
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
import shutil
import copy
gar=cfg['aggregators'] # See .cm/meta.json of this CK module
# If output is to console (i.e. interactive mode), pass it to all further modules
o=i.get('out','')
oo=''
if o=='con': oo='con'
# check customization
cmd_key=i.get('cmd_key','')
if cmd_key=='': cmd_key='local-mnist'
if i.get('aggregator','')!='':
gar=[i['aggregator']]
pcur=os.getcwd()
pres=os.path.join(pcur, 'results')
resolved_deps={}
# Loop through all aggregators
str=''
for agg in sorted(gar):
ck.out('**************************************************************')
ck.out('* CMD_KEY: '+cmd_key)
ck.out('* Aggregator: '+agg)
ck.out('')
str+='****************************************************************\n'
str+='* CMD_KEY: '+cmd_key+'\n'
str+='* Aggregator: '+agg+'\n\n'
if not os.path.isdir(pres):
os.makedirs(pres)
# How to set CK to platform generic-linux-dummy if there is a problem with sudo on GRID5000:
#r=ck.access({'action':'detect',
# 'module_uoa':'platform',
# 'platform_init_uoa':'generic-linux-dummy',
# 'update_platform_init':'yes',
# 'out':oo})
#if r['return']>0: return r
# Set program pipeline
copy_resolved_deps=copy.deepcopy(resolved_deps)
pipeline={
'data_uoa':'sysml19-aggregathor',
'cmd_key':cmd_key,
'env':{'AGGREGATOR':agg},
'no_state_check':'yes',
'dependencies':copy_resolved_deps
}
# Run program pipeline
ii={'action':'autotune',
'module_uoa':'pipeline',
'data_uoa':'program',
'iterations': 1,
'repetitions': 3, # statistical repetitions of the same program pipeline
'record':'yes',
'record_failed':'yes',
'record_params':{
'search_point_by_features':'yes'
},
'tags':'syml19,experiments,raw,aggregathor',
'meta':{},
'record_repo':'local',
'record_uoa':'sysml19-aggregathor-'+cmd_key+'-'+agg,
'pipeline':pipeline,
'out':oo
}
r=ck.access(ii)
if r['return']>0: return r
ck.save_json_to_file({'json_file':'/tmp/xyz1.json','dict':r})
# If first time, get resolved deps and record platform and env
if len(resolved_deps)==0:
os.system('ck detect platform > '+os.path.join(pres,'ck-platform.log'))
os.system('ck show env > '+os.path.join(pres,'ck-env.log'))
resolved_deps=copy.deepcopy(r.get('dependencies',{}))
# Copy output
cur_dir=r.get('state',{}).get('cur_dir','')
p1=os.path.join(cur_dir, 'stdout.log')
p2=os.path.join(cur_dir, 'stderr.log')
if os.path.isfile(p1):
px1=os.path.join(pres, cmd_key+'-'+agg+'-stdout.log')
shutil.copyfile(p1,px1)
# Load file and add numbers (accuracy, perf) to str
r=ck.load_text_file({'text_file':p1, 'split_to_list':'yes'})
if r['return']>0: return r
lst=r['lst']
for l in lst:
if '(test)' in l or '(perf)' in l:
j=l.find('[RUNNER]')
if j>=0:
l=l[j:]
str+=l.strip()+'\n'
if os.path.isfile(p2):
px2=os.path.join(pres, cmd_key+'-'+agg+'-stderr.log')
shutil.copyfile(p2,px2)
# Record general info
pt=os.path.join(pres, 'results.txt')
r=ck.save_text_file({'text_file':pt, 'string':str})
if r['return']>0: return r
# Archive results (can reply them)
ck.out('**************************************************************')
p=os.path.join(pres, 'ck-results.zip')
if os.path.isfile(p):
os.remove(p)
ck.out('Archiving experimental results: '+p)
ck.out('')
os.chdir(pres)
ii={'action':'zip',
'cid':'local:experiment:sysml19-aggregathor-*',
'archive_name':'ck-results.zip',
'overwrite':'yes',
'out':oo}
r=ck.access(ii)
if r['return']>0: return r
return {'return':0}
|
from bloom.bloomfilter import BloomFilter, openfile, setup_dict
import hashlib
import logging
from bitarray import bitarray
import subprocess
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('pytest bloom3')
logging.info("pytest bloomfilter")
logging.debug("debug")
def test_bitmap_creation():
bf = BloomFilter(1000)
assert (1000) == (bf.bit_array.count(False))
def test_bitmap_clear():
bf = BloomFilter(1000)
assert bf.bit_array.count(True) == 0
def test_hash():
tstr = "this is a fine string"
tstr = "écrit ça dans un fichier"
# https://stackoverflow.com/questions/5297448/how-to-get-md5-sum-of-a-string-using-python
# https://stackoverflow.com/questions/7585307/how-to-correct-typeerror-unicode-objects-must-be-encoded-before-hashing
# md5 operates on bytes so encode str into bytes
# The MD5 message digest hashing algorithm processes data in 512-bit
# blocks, broken down into 16 words composed of 32 bits each. The output
# from MD5 is a 128-bit message digest value.
h = hashlib.md5(tstr.encode('utf-8')).hexdigest()
h1 = hashlib.md5(tstr.encode('utf-8')).hexdigest()
assert (h) == (h1)
def test_insert():
line = u'écrit'
bf = BloomFilter(100)
i1, i2 = bf.insert(line, bf.n)
logging.debug('\n my line {0}'.format(line.encode('utf-8')))
logging.debug('\n whoset: \n bits {0} set: {0} index: {2} \
index: {3} '.format(bf.bit_array,
bf.bit_array.count(True), i1, i2))
logging.debug('n')
assert [i1, i2, bf.bit_array.count(True), bf.bit_array.count(False)] == [
89, 61, 2, 98]
def test_ascii_wordlist():
n = 100
file = "tests/ascii_words.txt"
print('n')
bf = BloomFilter(n)
tfileh, tencode = openfile(file)
# setup_dict(bf, size_bitarray, dict_file, dfileh, dencode)
setup_dict(bf, n, file, tfileh, tencode)
test_bit_array = bitarray('0000000000000000000001000100000000000001010000000001000000000000000001010000000001000000000000101000')
print('\n *******************\n bf and set \n', bf.bit_array)
print('\n Test_bit_array \n', test_bit_array)
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
assert (bitarray(test_bit_array) == bf.bit_array)
def test_search_one_word():
n = 100
file = "tests/ascii_words.txt"
print('n')
bf = BloomFilter(n)
print('n')
file_encoding = subprocess.getoutput('file -b --mime-encoding %s' % file)
fileIN = open(file, encoding=file_encoding)
line = fileIN.readline()
i1, i2 = bf.insert(line, bf.n)
fnd, j, k = bf.find_word(line, n)
print("\n ***********after word one\n", bf.bit_array)
assert ([fnd, i1, i2]) == ([True, j, k])
def test_search_two_words():
n = 100
file = "tests/ascii_words.txt"
print('n')
bf = BloomFilter(n)
print('n')
file_encoding = subprocess.getoutput('file -b --mime-encoding %s' % file)
fileIN = open(file, encoding=file_encoding)
line = fileIN.readline()
i1, i2 = bf.insert(line, bf.n)
fnd1, j1, j2 = bf.find_word(line, n)
print("\n ***********after word one\n", bf.bit_array)
line = fileIN.readline()
i3, i4 = bf.insert(line, bf.n)
fnd2, j3, j4 = bf.find_word(line, n)
print("\n ***********after word two\n", bf.bit_array)
assert ([fnd1, fnd2, i1, i2, i3, i4]) == ([True, True, j1, j2, j3, j4])
def test_no_wordfile():
x, y = openfile("ThisFileShouldNotBefound.txt")
assert x == 0 and y == 0
def test_write_test_file():
fileh = open('./data/playdata/testdata.txt', 'w', encoding="iso-8859-1")
fileh.write('zzz\n')
fileh.write('étagère\n')
fileh.write("étui's\n")
fileh.close()
|
#!/usr/bin/python
import os
import subprocess
from os.path import isfile, join
# KITTI 02
#start_num = 0
#stop_num = 4660
#frame_step = 1
#left_prefix = "/image_0/"
#right_prefix = "/image_1/"
#left_suffix = ".png"
#right_suffix = ".png"
#out_fname = "kitti_02_lst.xml"
#start_num = 0
##stop_num = 1100
#frame_step = 1
#left_suffix = "_10.png"
#right_suffix = "_10.png"
#
##data_folder = "/home/kivan/Projects/datasets/KITTI/sequences_gray/07/"
##data_folder = "/home/kivan/Projects/datasets/KITTI/dense_stereo/training/"
##stop_num = 193
#data_folder = "/home/kivan/Projects/datasets/KITTI/dense_stereo/testing/"
#stop_num = 194
#
##left_prefix = "/colored_0/"
##right_prefix = "/colored_1/"
##out_folder = "/home/kivan/Projects/datasets/results/dense_stereo/spsstereo/kitti/data"
##binary_path = "/home/kivan/Projects/cv-stereo/build/spsstereo/release/spsstereo"
#
#left_prefix = "/image_0/"
#right_prefix = "/image_1/"
##out_folder = "/home/kivan/Projects/datasets/results/dense_stereo/kitti/testing/our_sgm_5_60/data/"
##binary_path = "/home/kivan/Projects/cv-stereo/build/our_sgm/release/our_sgm"
binary_path = "/home/kivan/source/cv-stereo/build/spsstereo/release/spsstereo"
#binary_path = "/home/kivan/source/cv-stereo/build/sgm_single/release/sgm_single"
#data_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/Training_00/RGB/"
#out_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/Training_00/RGB/depth"
#img_right_dir = "/home/kivan/datasets/KITTI/sequences_color/00/image_3/"
data_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/Validation_07/RGB/"
out_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/Validation_07/RGB/depth"
img_right_dir = "/home/kivan/datasets/KITTI/sequences_color/07/image_3/"
if not os.path.exists(out_folder):
os.makedirs(out_folder)
else:
print("WARNING: path exists - ", out_folder)
left_dir = data_folder + "/left/"
right_dir = data_folder + "/right/"
filelist = [f for f in os.listdir(left_dir) if isfile(join(left_dir,f))]
for filename in filelist:
print(filename)
#num_str = "%06d" % (i)
#num_str = "%010d" % (i)
img_left = left_dir + filename
img_right = right_dir + filename
#subprocess.call(["/bin/cp", img_right_dir + filename, img_right])
subprocess.call([binary_path, img_left, img_right, out_folder])
#subprocess.call([binary_path, img_left, img_right, out_folder, "5", "60"])
#cmd = binary_path + " " + img_left + " " + img_right + " " + out_folder
#subprocess.call([cmd], shell=True)
#ofile.write("</imagelist>\n</opencv_storage>")
#ofile.close()
|
<reponame>augustinharter/nlp-bert-project<filename>tests/bert_test.py
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from tqdm import tqdm, trange
import pandas as pd
import io
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
import transformers
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
n_gpu = torch.cuda.device_count()
#torch.cuda.get_device_name(0)
DATA_DIR="../data"
# load Pickle file
def load_ds(fname=os.path.join(DATA_DIR,'/atis.train.pkl'), verbose=True):
with open(fname, 'rb') as stream:
ds,dicts = pickle.load(stream)
if verbose:
print('Done loading: ', fname)
print(' samples: {:4d}'.format(len(ds['query'])))
print(' vocab_size: {:4d}'.format(len(dicts['token_ids'])))
print(' slot count: {:4d}'.format(len(dicts['slot_ids'])))
print(' intent count: {:4d}'.format(len(dicts['intent_ids'])))
return ds,dicts
# convert Pickle file to arrays
def load_atis(filename, add_start_end_token=False, verbose=True):
train_ds, dicts = load_ds(os.path.join(DATA_DIR,filename), verbose)
t2i, s2i, in2i = map(dicts.get, ['token_ids', 'slot_ids','intent_ids'])
i2t, i2s, i2in = map(lambda d: {d[k]:k for k in d.keys()}, [t2i,s2i,in2i])
query, slots, intent = map(train_ds.get, ['query', 'slot_labels', 'intent_labels'])
if add_start_end_token:
i2s[178] = 'BOS'
i2s[179] = 'EOS'
s2i['BOS'] = 178
s2i['EOS'] = 179
input_tensor = []
target_tensor = []
query_data = []
intent_data = []
slot_data = []
to_show = np.random.randint(0, len(query)-1, 5)
for i in range(len(query)):
input_tensor.append(query[i])
slot_text = []
slot_vector = []
for j in range(len(query[i])):
slot_text.append(i2s[slots[i][j]])
slot_vector.append(slots[i][j])
if add_start_end_token:
slot_text[0] = 'BOS'
slot_vector[0] = 178
slot_text[-1] = 'EOS'
slot_vector[-1]= 179
target_tensor.append(slot_vector)
q = ' '.join(map(i2t.get, query[i]))
query_data.append(q.replace('BOS', '').replace('EOS',''))
intent_data.append(i2in[intent[i][0]])
slot = ' '.join(slot_text)
slot_data.append(slot[1:-1])
if i in to_show and verbose:
print('Query text:', q)
print('Query vector: ', query[i])
print('Intent label: ', i2in[intent[i][0]])
print('Slot text: ', slot)
print('Slot vector: ', slot_vector)
print('*'*74)
query_data = np.array(query_data)
intent_data = np.array(intent_data)
slot_data = np.array(slot_data)
intent_data_label = np.array(intent).flatten()
return t2i, s2i, in2i, i2t, i2s, i2in, input_tensor, target_tensor, \
query_data, intent_data, intent_data_label, slot_data
def trim(t, n):
l = []
for i in t:
l.append(i[0:n])
return l
# load ATIS training dataset
t2i_train, s2i_train, in2i_train, i2t_train, i2s_train, i2in_train, \
input_tensor_train, target_tensor_train, \
query_data_train, intent_data_train, intent_data_label_train, slot_data_train = load_atis('atis.train.pkl')
labels = intent_data_label_train
nb_labels = len(labels)
# load ATIS testing dataset
t2i_test, s2i_test, in2i_test, i2t_test, i2s_test, i2in_test, \
input_tensor_test, target_tensor_test, \
query_data_test, intent_data_test, intent_data_label_test, slot_data_test = load_atis('atis.test.pkl')
# add special tokens for BERT to work properly
sentences = ["[CLS] " + query + " [SEP]" for query in query_data_train]
print(sentences[0])
# Tokenize with BERT tokenizer
tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
print ("Tokenize the first sentence:")
# Set the maximum sequence length.
MAX_LEN = 128
# Pad our input tokens
# Use the BERT tokenizer to convert the tokens to their index numbers in the BERT vocabulary
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
print(input_ids)
print ("TOKENS:",len(input_ids[0]))
# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
# Use train_test_split to split our data into train and validation sets for training
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels, random_state=2018, test_size=0.1)
train_masks, validation_masks, _, _ = train_test_split(attention_masks, input_ids,random_state=2018, test_size=0.1)
# Convert all of our data into torch tensors, the required datatype for our model
train_inputs = torch.tensor(train_inputs)
validation_inputs = torch.tensor(validation_inputs)
train_labels = torch.tensor(train_labels)
validation_labels = torch.tensor(validation_labels)
train_masks = torch.tensor(train_masks)
validation_masks = torch.tensor(validation_masks)
# Select a batch size for training.
batch_size = 32
# Create an iterator of our data with torch DataLoader
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
# Load BertForSequenceClassification, the pretrained BERT model with a single linear classification layer on top.
model = transformers.BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=nb_labels)
#model.cuda()
"""
# BERT model summary
BertForSequenceClassification(
(bert): BertModel(
(embeddings): BertEmbeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(token_type_embeddings): Embedding(2, 768)
(LayerNorm): BertLayerNorm()
(dropout): Dropout(p=0.1)
)
(encoder): BertEncoder(
(layer): ModuleList(
(0): BertLayer(
(attention): BertAttention(
(self): BertSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.1)
)
(output): BertSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(LayerNorm): BertLayerNorm()
(dropout): Dropout(p=0.1)
)
)
(intermediate): BertIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
)
(output): BertOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(LayerNorm): BertLayerNorm()
(dropout): Dropout(p=0.1)
)
)
'ransformers.BertModel.BertAdam
'
'
)
)
(pooler): BertPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(dropout): Dropout(p=0.1)
(classifier): Linear(in_features=768, out_features=2, bias=True)
)
"""
# BERT fine-tuning parameters
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
max_grad_norm = 1.0
num_total_steps = 1000
num_warmup_steps = 100
optimizer = transformers.AdamW(optimizer_grouped_parameters, lr=2e-5, correct_bias=False)
scheduler = transformers.WarmupLinearSchedule(optimizer, num_warmup_steps, num_total_steps)
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return float(np.sum(pred_flat == labels_flat)) / len(labels_flat)
# Store our loss and accuracy for plotting
train_loss_set = []
# Number of training epochs
epochs = 4
# BERT training loop
for _ in trange(epochs, desc="Epoch"):
## TRAINING
# Set our model to training mode
model.train()
# Tracking variables
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# Train the data for one epoch
for step, batch in enumerate(train_dataloader):
print("step:", step)
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Clear out the gradients (by default they accumulate)
optimizer.zero_grad()
# Forward pass
loss = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)[0]
train_loss_set.append(loss.item())
# Backward pass
loss.backward()
# Update parameters and take a step using the computed gradient
optimizer.step()
# Update tracking variables
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss/nb_tr_steps))
## VALIDATION
# Put model in evaluation mode
model.eval()
# Tracking variables
eval_loss, eval_accuracy = 0.0, 0.0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
# plot training performance
plt.figure(figsize=(15,8))
plt.title("Training loss")
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.plot(train_loss_set)
plt.show() |
import os
import re
import json
import yaml
import numpy as np
from tps.types import Charset
from tps import symbols as smb
GRAPHEME_DICT = {
Charset.en: smb.english.GRAPHEMES_EN,
Charset.en_cmu: smb.english.GRAPHEMES_EN,
Charset.ru: smb.russian.GRAPHEMES_RU,
Charset.ru_trans: smb.russian.GRAPHEMES_RU
}
NOT_PUNCT_DICT = {
Charset.en: smb.english.GRAPHEMES_EN + smb.shields + [smb.accent, smb.separator, "\%s" %smb.hyphen],
Charset.en_cmu: smb.english.EN_CMU_SET + smb.shields + [smb.accent, smb.separator, "\%s" %smb.hyphen],
Charset.ru: smb.russian.GRAPHEMES_RU + smb.shields + [smb.accent, smb.separator, "\%s" %smb.hyphen],
Charset.ru_trans: smb.russian.GRAPHEMES_RU + smb.russian.PHONEMES_RU_TRANS + smb.shields +
[smb.accent, smb.separator, "\%s" %smb.hyphen]
}
def prob2bool(prob):
return prob if isinstance(prob, bool) else np.random.choice([True, False], p=[prob, 1 - prob])
_punct_re = re.compile("[{}]".format("".join(smb.punctuation + smb.space)))
def split_to_tokens(text, punct_re=None):
punct_re = _punct_re if punct_re is None else punct_re
prepared = punct_re.sub(lambda elem: "*{}*".format(elem.group(0)), text)
prepared = prepared.split("*")
prepared = [t for t in prepared if t != ""]
return prepared
def hide_stress(regexp, text):
return regexp.sub(lambda elem: elem.group(0)[-1].upper(), text)
def reveal_stress(regexp, text):
return regexp.sub(lambda elem: "+" + elem.group(0).lower(), text)
def get_stressed_letters_re(charset):
charset = Charset[charset]
regexp = re.compile(r"\+[{}]".format("".join(GRAPHEME_DICT[charset])))
return regexp
def get_capital_letters_re(charset):
charset = Charset[charset]
regexp = re.compile("[{}]".format("".join(GRAPHEME_DICT[charset]).upper()))
return regexp
def load_dict(dict_source, fmt=None):
_dict = {}
if isinstance(dict_source, str):
_, ext = os.path.splitext(dict_source)
if ext in [".json", ".yaml"]:
fmt = ext.replace(".", "")
elif fmt is None:
raise ValueError("File format must be specified ['json', 'yaml', 'plane']")
assert os.path.exists(dict_source)
with open(dict_source, "r", encoding="utf-8") as stream:
if fmt == "json":
_dict = json.load(stream)
elif fmt == "yaml":
_dict = yaml.safe_load(stream)
elif fmt == "plane":
_dict = stream.read().splitlines()
_dict = tuple(line.split("|") for line in _dict)
_dict = {elem[0]: elem[1] for elem in _dict}
else:
raise ValueError("File format must be specified ['json', 'yaml', 'plane']")
elif isinstance(dict_source, dict):
_dict = dict_source
elif dict_source is None:
pass
else:
raise TypeError
return _dict
def save_dict(dict_obj, filepath, fmt=None):
_dict = {}
_, ext = os.path.splitext(filepath)
if ext in [".json", ".yaml"]:
fmt = ext.replace(".", "")
elif fmt is None:
raise ValueError("File format must be specified ['json', 'yaml', 'plane']")
with open(filepath, "w", encoding="utf-8") as stream:
if fmt == "json":
json.dump(dict_obj, stream, indent=2, ensure_ascii=False)
elif fmt == "yaml":
yaml.dump(dict_obj, stream, indent=2, allow_unicode=True)
else:
raise ValueError("File format must be specified ['json', 'yaml']")
return filepath |
<reponame>disruptek/boto<filename>scripts/rebuild-endpoints.py
"""Rebuild endpoint config.
Final format looks like this::
{
"autoscaling": {
"ap-northeast-1": "autoscaling.ap-northeast-1.amazonaws.com",
"ap-northeast-2": "autoscaling.ap-northeast-2.amazonaws.com",
"ap-southeast-1": "autoscaling.ap-southeast-1.amazonaws.com",
...
},
"service-name": {
"region": "hostname"
}
}
This will use the EndpointResolver from botocore to regenerate
endpoints. To regen the latest static endpoints, ensure you have
the latest version of botocore installed before running this script.
Usage
=====
To print the newly gen'd endpoints to stdout::
python rebuild-endpoints.py
To overwrite the existing endpoints.json file in boto:
python rebuild-endpoints.py --overwrite
If you have a custom upstream endpoints.json file you'd like
to use, you can provide the ``--endpoints-file``:
python rebuild-endpoints.py --endpoints-json custom-endpoints.json
"""
import sys
import os
import json
import argparse
try:
import botocore.session
from botocore.regions import EndpointResolver
except ImportError:
print("Couldn't import botocore, make sure it's installed in order "
"to regen endpoint data.")
sys.exit(1)
EXISTING_ENDPOINTS_FILE = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'boto', 'endpoints.json')
def _load_endpoint_services(filename):
with open(filename) as f:
return list(json.load(f))
class StrictEndpointResolver(object):
"""Endpoint Resolver that verifies services in a partition."""
# It's worth seeing if any of the stuff in this class makes sense
# to move back into botocore. This might be too specific to boto2's
# usage. The intent was to try to make the StaticEndpointBuilder
# as easy to follow as possible, so this class wraps an existing
# botocore endpoint and provides some extension methods. The main
# extension points are:
#
# * Introspection about known services in a partition.
# * Chaining partition iteration (for boto2 we just need to create
# a list of region->endpoints across all known partitions so this
# class provides iterators that allow you to iterate over all known
# regions for all known partitions).
# * Helper method for static hostname lookup by abstracting the
# sslCommonName checks into a "get_hostname" method.
# * Allowing you to use "service names" specific to boto2 when
# generating endpoints. Internally this has a mapping of which endpoint
# prefixes to use.
SERVICE_RENAMES = {
# The botocore resolver is based on endpoint prefix.
# These don't always sync up to the name that boto2 uses.
# A mapping can be provided that handles the mapping between
# "service names" and endpoint prefixes.
'awslambda': 'lambda',
'cloudwatch': 'monitoring',
'ses': 'email',
'ec2containerservice': 'ecs',
'configservice': 'config',
}
def __init__(self, resolver, endpoint_data,
service_name_map=None):
#: An instance of botocore.regions.EndpointResolver.
self._resolver = resolver
self._endpoint_data = endpoint_data
if service_name_map is None:
service_name_map = self.SERVICE_RENAMES
self._service_map = service_name_map
def regions_for_service(self, service_name):
# "What are all the regions EC2 is in across all known partitions?"
endpoint_prefix = self._endpoint_prefix(service_name)
for partition_name in self.get_available_partitions():
if self.is_service_in_partition(service_name, partition_name):
for region_name in self._resolver.get_available_endpoints(
endpoint_prefix, partition_name):
yield region_name
def regions_for_partition(self, partition_name):
# "What are all the known regions in a given partition?"
# This is used in boto to create entries for "cloudfront"
# for every region:
# us-east-1: cloudfront.amazonaws.com
# us-west-2: cloudfront.amazonaws.com
# ...
partition_data = self._get_partition_data(partition_name)
return [r for r in list(partition_data['regions'])
if 'global' not in r]
def partitions_for_service(self, service_name):
# "In which partitions is 'cloudfront' available?"
# This is used because we should *not* generate entries
# for cn-north-1 for cloudfront, it's not available in China.
# This can be accomplished by using this method and
# regions_for_partition. See the _special_case_global_service
# method in StaticEndpointBuilder.
for partition_name in self.get_available_partitions():
if self.is_service_in_partition(service_name, partition_name):
yield partition_name
def get_available_partitions(self):
return self._resolver.get_available_partitions()
def get_hostname(self, service_name, region_name):
# Static hostname given a service_name/region_name
# We'll map the service_name to the endpoint_prefix
# and validate that the service is in the partition.
partition = self._partition_for_region(region_name)
if not self.is_service_in_partition(service_name, partition):
raise ValueError("Unknown service '%s' in partition '%s'" % (
service_name, partition))
endpoint_prefix = self._endpoint_prefix(service_name)
endpoint_config = self._resolver.construct_endpoint(
endpoint_prefix, region_name)
hostname = endpoint_config.get('sslCommonName',
endpoint_config.get('hostname'))
return hostname
def is_service_in_partition(self, service_name, partition_name):
# Is iam in aws-cn? Yes
# Is cloudfront in aws-cn? No
endpoint_prefix = self._endpoint_prefix(service_name)
partition_data = self._get_partition_data(partition_name)
return endpoint_prefix in partition_data['services']
def _partition_for_region(self, region_name):
# us-east-1 -> aws
# us-west-2 -> aws
# cn-north-1 -> aws-cn
for partition in self._endpoint_data['partitions']:
if region_name in partition['regions']:
return partition['partition']
raise ValueError("Unknown region name: %s" % region_name)
def _get_partition_data(self, partition_name):
for partition in self._endpoint_data['partitions']:
if partition['partition'] == partition_name:
return partition
raise ValueError("Could not find partition data for: %s"
% partition_name)
def _endpoint_prefix(self, service_name):
endpoint_prefix = self._service_map.get(
service_name, service_name)
return endpoint_prefix
def is_global_service(self, service_name):
# This is making the assumption that if a service is
# a partitionEndpoint for one partition, it will be that
# way for *all* partitions. Technically possible to be
# different, but in practice it's not.
# We need this because this is how we know to trigger
# special case behavior with services like iam, cloudfront.
return (
'partitionEndpoint' in
self._endpoint_data['partitions'][0]['services'].get(
service_name, {}))
class StaticEndpointBuilder(object):
def __init__(self, resolver):
self._resolver = resolver
def build_static_endpoints(self, service_names):
"""Build a set of static endpoints.
:param service_names: The name of services to build.
These are the service names they are supported by
boto2. They also must use the names that boto2
uses, not boto3, e.g "ec2containerservice" and not "ecs".
:return: A dict consisting of::
{"service": {"region": "full.host.name"}}
"""
static_endpoints = {}
for name in service_names:
endpoints_for_service = self._build_endpoints_for_service(name)
if endpoints_for_service:
# It's possible that when we try to build endpoints for services
# we get an empty hash. In that case we don't bother adding
# it to the final list of static endpoints.
static_endpoints[name] = endpoints_for_service
self._deal_with_special_cases(static_endpoints)
return static_endpoints
def _build_endpoints_for_service(self, service_name):
# Given a service name, 'ec2', build a dict of
# 'region' -> 'hostname'
if self._resolver.is_global_service(service_name):
return self._special_case_global_service(service_name)
endpoints = {}
for region_name in self._resolver.regions_for_service(service_name):
endpoints[region_name] = self._resolver.get_hostname(service_name,
region_name)
return endpoints
def _special_case_global_service(self, service_name):
# In boto2, an entry for each known region is added with the same
# partition wide endpoint for every partition the service is available
# in. This method implements this special cased behavior.
endpoints = {}
for partition in self._resolver.partitions_for_service(service_name):
region_names = self._resolver.regions_for_partition(
partition)
for region_name in region_names:
endpoints[region_name] = self._resolver.get_hostname(
service_name, region_name)
return endpoints
def _deal_with_special_cases(self, static_endpoints):
# I'm not sure why we do this, but cloudsearchdomain endpoints
# use the exact same set of endpoints as cloudsearch.
if 'cloudsearch' in static_endpoints:
static_endpoints['cloudsearchdomain'] = static_endpoints['cloudsearch']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--overwrite', action='store_true')
parser.add_argument('--endpoints-file',
help=('Path to endpoints.json. If this argument '
'is not given, then the endpoints.json file '
'bundled with botocore will be used.'))
args = parser.parse_args()
known_services_in_existing_endpoints = _load_endpoint_services(
EXISTING_ENDPOINTS_FILE)
session = botocore.session.get_session()
if args.endpoints_file:
with open(args.endpoints_file) as f:
endpoint_data = json.load(f)
else:
endpoint_data = session.get_data('endpoints')
resolver = EndpointResolver(endpoint_data)
strict_resolver = StrictEndpointResolver(resolver, endpoint_data)
builder = StaticEndpointBuilder(strict_resolver)
static_endpoints = builder.build_static_endpoints(
known_services_in_existing_endpoints)
json_data = json.dumps(
static_endpoints, indent=4, sort_keys=True, separators=(',', ': '))
if args.overwrite:
with open(EXISTING_ENDPOINTS_FILE, 'w') as f:
f.write(json_data)
else:
print(json_data)
if __name__ == '__main__':
main()
|
"""
my_args.py
コマンドライン引数の管理
パラメータ管理
"""
import argparse
def get_parser():
parser = argparse.ArgumentParser(description='パラメータ指定')
# general
parser.add_argument('--workers', default=0, type=int, help="使用するCPUコア数")
# path指定
parser.add_argument('--no_check', action='store_true', help="ファイル削除を尋ねるか")
parser.add_argument('--data_path', default='tmp_data', help="データのPATH指定")
parser.add_argument('--train_file', default='train', help="trainのファイル名")
parser.add_argument('--valid_file', default='valid', help="validのファイル名")
parser.add_argument('--test_file', default='test' , help="testのファイル名" )
parser.add_argument('--checkpoint', default='checkpoint',type=str, help="checkpointのPATH指定")
parser.add_argument('--tensorboard', default='save_tb',type=str, help="tensorboardのPATH指定")
# model
parser.add_argument('--batch_size', default=300, type=int, help="batch size")
# parser.add_argument('--numClasses', default=6, type=int, help="カテゴリ分類の数")
# training
parser.add_argument('--epochs', default=1000, type=int, help="epoch数") # d: 720
parser.add_argument('--start_epoch', default=0, type=int, help="開始epoch")
parser.add_argument('--resume', default='./checkpoint/epoch005_val211.000.pth.tar', type=str, help="checkpointの続きから実行")
# 損失関数
# parser.add_argument('--cos_weight', default=0.98, type=float) # d: 0.98
# parser.add_argument('--cls_weight', default=0.02, type=float) # d: 0.01
# 最適化関数
parser.add_argument('--lr', default=0.001, type=float, help="学習率") # d: 0.0001
parser.add_argument('--momentum', default=0.9, type=float, help="モメンタム")
parser.add_argument('--weight_decay', default=0, type=float, help="weight decay")
# other
parser.add_argument('--seed', default=1234, type=int, help="randomのseed")
parser.add_argument('--cpu', action='store_true', help="cpuで動作させたい場合")
# image
# parser.add_argument('--resize', default=224, type=int)
# parser.add_argument('--preModel', default='resNet18',type=str) # resNet50
# parser.add_argument('--imfeatDim', default=512, type=int) # d: 2048
# text
# parser.add_argument('--maxSeqlen', default=20, type=int)
# parser.add_argument('--maxIngrs', default=20, type=int)
# parser.add_argument('--irnnDim', default=64, type=int) # d: 300 ingRNN hidden
# parser.add_argument('--ingrW2VDim', default=100, type=int) # d:300 ingRNN input_size
# parser.add_argument('--srnnDim', default=256, type=int) # d:1024 stRNN hidden_size
# parser.add_argument('--stDim', default=100, type=int) # d:1024 stRNN input_size 現状word2vecのサイズにならざるを得ないのは実装が異なるため。ただ加算して手順ベクトルを作成.
# im2recipe model
# parser.add_argument('--semantic_reg', default=True,type=bool)
# parser.add_argument('--embDim', default=256, type=int) # d: 1024 # 共通の埋め込みcos前
# parser.add_argument('--nRNNs', default=1, type=int)
# parser.add_argument('--maxImgs', default=5, type=int)
# parser.add_argument('--ingrW2V', default='data/vocab.bin',type=str)
# parser.add_argument('--valfreq', default=10,type=int)
# 切り替え?
# parser.add_argument('--freeVision', default=False, type=bool)
# parser.add_argument('--freeRecipe', default=True, type=bool)
# test
# parser.add_argument('--path_results', default='results/', type=str)
# parser.add_argument('--model_path', default='snapshots/model_e220_v-4.700.pth.tar', type=str)
# parser.add_argument('--test_image_path', default='chicken.jpg', type=str)
# MedR / Recall@1 / Recall@5 / Recall@10
# parser.add_argument('--embtype', default='image', type=str) # [image|recipe] query type
# parser.add_argument('--medr', default=100, type=int) # d: 1000
# dataset
# parser.add_argument('--maxlen', default=20, type=int)
# parser.add_argument('--vocab', default = 'vocab.txt', type=str)
# parser.add_argument('--dataset', default = '../data/recipe1M/', type=str)
# parser.add_argument('--sthdir', default = '../data/', type=str)
return parser
|
import os
import shutil
from joblib import Parallel, delayed
from .data_file import DataFile
from .helpers import (proc_file_input, mp_consol_save, wrap_load_func)
from pandas.util._decorators import doc
from .Dataset import _shared_docs, _sip_docs
import numpy as np
import pandas as pd
def get_file_mapping(self, cols=None):
'''This function is used to access the
up to date file mapping. And can be used
to specify that only the subset of the file mapping
of interest be used, to save on how much info is passed around.
Returns
--------
file_mapping : dict
Return a dictionary with keys as
integer's loaded in the Dataset referring
to Data Files.
See Also
--------
to_data_file : Cast existing columns to type Data File.
add_data_files : Method for adding new data files
'''
# Make sure up to date first
self._check_file_mapping()
# If a subset of cols passed,
# create new subset of file_mapping to return
if cols is not None:
# Get just the data files in scope
u_values = np.unique(np.array(self[cols]))
# Return relevant subset only - don't include any NaN in subset
return {u: self.file_mapping[u] for u in u_values if not pd.isnull(u)}
return self.file_mapping
@doc(load_func=_shared_docs['load_func'], inplace=_shared_docs['inplace'])
def add_data_files(self, files, file_to_subject,
load_func=np.load, inplace=False):
'''This method allows adding columns of type
'data file' to the Dataset class.
Parameters
----------
files : dict
| This argument specifies the files to be loaded as :ref:`data_files`.
Files must be passed as a python dict where
each key refers to the name of that feature / column of data files
to load, and the value is either a list-like of
str file paths, or a single globbing str which will
be used to determine the files.
| In addition to this parameter, you must also pass a
python function to the file_to_subject param,
which specifies how to convert from passed
file path, to a subject name.
file_to_subject : python function, dict of or 'auto'
| This parameter represents how the subject name should
be determined from the passed file paths. This
parameter can be passed any python function, where
the first argument on the function takes a full
file path and returns a subject name.
| This parameter should be passed as either a single function
or argument to be used for all columns, or as a dictionary
corresponding to the passed files dictionary in the case
that each column requires a different function mapping path
to subject. If just one function is passed, it will be used
for to load all dictionary entries. For example:
| You may also pass the custom str 'auto' to
specify that the subject name should be the base
file name with the extension removed. For example
if the path is '/some/path/subj16.npy' then the auto
subject will be 'subj16'.
| In the case that the underlying index is a MultiIndex, this
function should be designed to return the subject in correct
tuple form. See Examples below.
{load_func}
{inplace}
See Also
--------
to_data_file : Cast existing columns to type Data File.
get_file_mapping : Returns the raw file mapping.
Examples
---------
Consider the brief example below for loading two fake subjects,
with the files parameter.
::
files = dict()
files['feat1'] = ['f1/subj_0.npy', 'f1/subj_1.npy']
files['feat2'] = ['f2/subj_0.npy', 'f2/subj_1.npy']
This could be matched with file_to_subject as:
::
def file_to_subject_func(file):
subject = file.split('/')[1].replace('.npy', '')
return subject
file_to_subject = file_to_subject_func
# or
file_to_subject = dict()
file_to_subject['feat1'] = file_to_subject_func
file_to_subject['feat2'] = file_to_subject_func
In this example, subjects are loaded as 'subj_0' and 'subj_1',
and they have associated loaded data files 'feat1' and 'feat2'.
Next, we consider an example with fake data.
In this example we will first generate and save some fake data files.
These fake files will correspond to left hemisphere vertex files.
.. ipython:: python
import numpy as np
import os
dr = 'data/fake_surface/'
os.makedirs(dr, exist_ok=True)
# 20 subjects each with 10,242 vertex values
X = np.random.random(size=(20, 10242))
# Save the data as numpy arrays
for x in range(len(X)):
np.save(dr + str(x), X[x])
os.listdir(dr)[:5]
Next, we will use add data files to add these to
a :class:`Dataset`.
.. ipython:: python
data = bp.Dataset()
files = dict()
files['fake_surface'] = dr + '*' # Add * for file globbing
data = data.add_data_files(files=files, file_to_subject='auto')
data.head(5)
Let's also consider lastly a MultiIndex example:
::
# The underlying dataset is indexed by subject and event
data.set_index(['subject', 'event'], inplace=True)
# Only one feature
files = dict()
files['feat1'] = ['f1/s0_e0.npy',
'f1/s0_e1.npy',
'f1/s1_e0.npy',
'f1/s1_e1.npy']
def file_to_subject_func(file):
# This selects the substring
# at the last part seperated by the '/'
# so e.g. the stub, 's0_e0.npy', 's0_e1.npy', etc...
subj_split = file.split('/')[-1]
# This removes the .npy from the end, so
# stubs == 's0_e0', 's0_e1', etc...
subj_split = subj_split.replace('.npy', '')
# Set the subject name as the first part
# and the eventname as the second part
subj_name = subj_split.split('_')[0]
event_name = subj_split.split('_')[1]
# Lastly put it into the correct return style
# This is tuple style e.g., ('s0', 'e0'), ('s0', 'e1')
ind = (subj_name, eventname)
return ind
'''
if not inplace:
return self._inplace('add_data_files', locals())
# Wrap load func if needed
wrapped_load_func = wrap_load_func(load_func, _print=self._print)
# Init if needed
self._check_file_mapping()
# Get dict of key to files
file_series = proc_file_input(files, file_to_subject, self.index)
# For each column
for file in file_series:
# For each subject, fill in with Data File
series = file_series[file]
self._series_to_data_file(col=file, series=series,
load_func=wrapped_load_func)
@doc(**_sip_docs, load_func=_shared_docs['load_func'])
def to_data_file(self, scope,
load_func=np.load,
inplace=False):
'''This method can be used to cast any existing columns
where the values are file paths, to a data file.
Parameters
----------
{scope}
{load_func}
{inplace}
Examples
----------
This method can be used as a the primary way to prepare data files.
We will perform a simple example here.
.. ipython:: python
import BPt as bp
data = bp.Dataset()
data['files'] = ['data/loc1.npy', 'data/loc2.npy']
data
We now have a :class:`Dataset`, but out column 'files' is not
quite ready, as by default it won't know what to do with str.
To get it to treat it as as a data file we will cast it.
.. ipython:: python
data = data.to_data_file('files')
data
What's happened here? Now it doesn't show paths anymore, but instead
shows integers. That's actually the desired behavior though, we
can check it out in file_mapping.
.. ipython:: python
data.file_mapping
The file_mapping is then used internally with :class:`Loader`
to load objects on the fly.
See Also
--------
add_data_files : Method for adding new data files
consolidate_data_files : Merge existing data files into one column.
'''
if not inplace:
return self._inplace('to_data_file', locals())
# Wrap load func if needed
wrapped_load_func = wrap_load_func(load_func, _print=self._print)
# Init if needed
self._check_file_mapping()
# Cast to data file
for col in self.get_cols(scope):
self._series_to_data_file(col=col, series=self[col],
load_func=wrapped_load_func)
def _series_to_data_file(self, col, series, load_func):
# Get next file mapping ind
cnt = self._get_next_ind()
for subject in series.index:
# Create data file and add to file mapping
data_file = DataFile(series[subject], load_func)
self.file_mapping[cnt] = data_file
# Replace cnt index in data
self.at[subject, col] = cnt
# Increment
cnt += 1
# Set scope
self.add_scope(col, 'data file', inplace=True)
@doc(scope=_shared_docs['scope'])
def consolidate_data_files(self, save_dr, replace_with=None,
scope='data file', cast_to=None,
clear_existing='fail', n_jobs=-1):
'''This function is designed as helper to consolidate all
or a subset of the loaded data files into one column. While this
removes information, in can provide a speed up in terms of downstream
loading and reduce the number of files cached when using
:class:`Loader`.
This method assumes that the underlying data files
can be stacked with ::
np.stack(data, axis=-1)
After they have been loaded. If this is not the case,
then this function will break.
Parameters
-----------
save_dr : str or Path
The file directory in which to
save the consolidated files. If it
doesn't exist, then it will be created.
replace_with : str or None, optional
By default, if replace_with is left
as None, then just a saved version of
the files will be made. Instead,
if a column name passed as a str is passed,
then the original data files which were
consolidated will be deleted, and the new
consolidated column loaded instead.
::
default = None
{scope}
::
default = 'data file'
cast_to : None or numpy dtype, optional
If not None, then this should be a
numpy dtype in which the stacked data
will be cast to before saving.
::
default = None
clear_existing : bool or 'fail', optional
If True, then if the save dr already
has files in it, delete them. If False,
just overwrite them.
If 'fail' then if there
are already files in the save directory,
raise an error.
::
default = 'fail'
n_jobs : int, optional
The number of jobs to use while stacking
and saving each file.
If -1, then will try to use all avaliable cpu's.
::
default == -1
See Also
--------
to_data_file : Convert existing column to data file.
add_data_files : Method for adding new data files
update_data_file_paths : Update data path saved paths
'''
# Make sure file mapping up to date
self._check_file_mapping()
# If clear existing and exists
if clear_existing is True:
if os.path.exists(save_dr):
self._print('Removing existing save directory:',
str(save_dr), level=0)
shutil.rmtree(save_dr)
# Make sure save_dr exists
os.makedirs(save_dr, exist_ok=True)
# If Fail.
if clear_existing == 'fail':
existing_files = len(os.listdir(save_dr))
if existing_files > 0:
raise RuntimeError('The save directory ' +
str(save_dr) + ' is not empty.'
' Either change clear_existing or provide '
'a new save_dr.')
# Get cols in scope
cols = self.get_cols(scope)
# For each subj / data point
saved_locs = Parallel(n_jobs=n_jobs)(delayed(mp_consol_save)(
data_files=[self.file_mapping[self.loc[index, key]]
for key in cols],
index=index, cast_to=cast_to, save_dr=save_dr)
for index in self.index)
# If replace with
if replace_with is not None:
# Drop existing cols
self.drop(cols, axis=1, inplace=True)
# Create new series and add as new col
self[replace_with] = pd.Series(saved_locs, index=self.index)
# Cast to data file
self.to_data_file(scope=replace_with,
inplace=True)
def update_data_file_paths(self, old, new):
'''Go through and update saved file paths within
the Datasets file mapping attribute.
This function can be used
when the underlying location of the data files has changed, or
perhaps when loading a saved dataset on a different device.
Note the old and new parameters work the same as those
in the base python string.replace method.
Parameters
-----------
old : str
The substring in which to replace every instance found
in every saved file path with new.
new : str
The substring in which to replace old with in
every substring found.
See Also
--------
to_data_file : Convert existing column to data file.
add_data_files : Method for adding new data files
'''
self._check_file_mapping()
for file_ind in self.file_mapping:
self.file_mapping[file_ind].loc =\
self.file_mapping[file_ind].loc.replace(old, new)
def _get_next_ind(self):
if len(self.file_mapping) > 0:
return np.nanmax(list(self.file_mapping.keys())) + 1
else:
return 0
|
<filename>install/app_store/tk-multi-workfiles/v0.7.4/python/tk_multi_workfiles/file_list_view.py
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from operator import itemgetter
from datetime import datetime, timedelta
from pprint import pprint
import tank
from tank.platform.qt import QtCore, QtGui
browser_widget = tank.platform.import_framework("tk-framework-widget", "browser_widget")
from .file_item_form import FileItemForm
from .file_item import FileItem
from .file_filter import FileFilter
class FileListView(browser_widget.BrowserWidget):
# signals - note, 'object' is used to avoid
# issues with PyQt when None is passed as PyQt
# doesn't allow None to be passed to classes
# other than object (an exception is raised)
open_previous_workfile = QtCore.Signal(object)#FileItem
open_previous_publish = QtCore.Signal(object)#FileItem
view_in_shotgun = QtCore.Signal(object)#FileItem
NO_TASK_NAME = "No Task"
def __init__(self, parent=None):
"""
Construction
"""
browser_widget.BrowserWidget.__init__(self, parent)
self._current_filter = {}
# tweak style
self.title_style = "none"
self._update_title()
@property
def selected_published_file(self):
selected_item = self.get_selected_item()
if selected_item:
return selected_item.published_file
return None
@property
def selected_work_file(self):
selected_item = self.get_selected_item()
if selected_item:
return selected_item.work_file
return None
# Enable to force all work to be done in the main thread
# which can help when debugging
# IMPORTANT - set this to False before releasing!!!
DEBUG_GET_DATA_IN_MAIN_THREAD=False
def get_data(self, data):
"""
Called by browser widget in worker thread to query the list
of files to display for the specified context
"""
if FileListView.DEBUG_GET_DATA_IN_MAIN_THREAD:
# debug only - _get_data will be called first in
# process_result which runs in the main thread
return data
else:
return self._get_data(data)
def _get_data(self, data):
"""
Retrieve the list of files to display as well as the various display and grouping options required
to build the file list.
:param data: Dictionary containing:
handler - A 'WorkFiles' instance containing the main application business logic
filter - The current 'FileFilter' instance being applied to the list
:returns: Dictionary containing the various display & grouping options required to build the
file list as well as the list of files organised by task.
"""
result = {"task_groups":{}, "task_name_order":{}}
handler = data["handler"]
filter = data.get("filter")
mode = filter.mode
# get some additional info from the handler:
ctx = handler.get_current_work_area()
result["can_do_new_file"] = handler.can_do_new_file()
result["have_valid_workarea"] = (ctx and (ctx.entity or ctx.project))
result["have_valid_configuration"] = handler.have_valid_configuration_for_work_area()
result["current_task_name"] = ctx.task.get("name") if ctx and ctx.task else None
result["can_change_work_area"] = handler.can_change_work_area()
result["filter"] = filter
result["task_order"] = []
if result["have_valid_workarea"] and result["have_valid_configuration"]:
# get the list of files from the handler:
files = handler.find_files(filter)
# re-pivot this list of files ready to display:
#
# builds the following structure
# { task_name : { (file)name : { "files" : { 1:file,2:file, ... }, "thumbnail" : path, ... } } }
task_groups = {}
for file in files:
# first level is task group
task_name = file.task.get("name") if file.task else FileListView.NO_TASK_NAME
task_group = task_groups.setdefault(task_name, dict())
# next level is name:
name_group = task_group.setdefault(file.name, dict())
# finally, add file to files:
file_versions = name_group.setdefault("files", dict())
file_versions[file.version] = file
# do some pre-processing of file groups:
filtered_task_groups = {}
task_modified_pairs = []
task_name_order = {}
for task, name_groups in task_groups.iteritems():
name_modified_pairs = []
filtered_name_groups = {}
for name, details in name_groups.iteritems():
files_versions = details["files"]
# find highest version info:
local_versions = [f.version for f in files_versions.values() if f.is_local]
if mode == FileFilter.WORKFILES_MODE and not local_versions:
# don't have a version of this file to display!
continue
publish_versions = [f.version for f in files_versions.values() if f.is_published]
if mode == FileFilter.PUBLISHES_MODE and not publish_versions:
# don't have a version of this file to display!
continue
highest_local_version = -1
if local_versions:
highest_local_version = max(local_versions)
details["highest_local_file"] = files_versions[highest_local_version]
highest_publish_version = -1
if publish_versions:
highest_publish_version = max(publish_versions)
details["highest_publish_file"] = files_versions[highest_publish_version]
# find thumbnail to use:
sorted_versions = sorted(files_versions.keys(), reverse=True)
thumbnail = None
for version in sorted_versions:
# skip any versions that are greater than the one we are looking for
# Note: we shouldn't choose a thumbnail for versions that aren't
# going to be displayed so filter these out
if ((mode == FileFilter.WORKFILES_MODE and version > highest_local_version)
or (mode == FileFilter.PUBLISHES_MODE and version > highest_publish_version)):
continue
thumbnail = files_versions[version].thumbnail
if thumbnail:
# special case - update the thumbnail!
if mode == FileFilter.WORKFILES_MODE and version < highest_local_version:
files_versions[highest_local_version].set_thumbnail(thumbnail)
break
details["thumbnail"] = thumbnail
# update group with details:
filtered_name_groups[name] = details
# determine when this file was last updated (modified or published)
# this is used to sort the files in the list:
last_updated = None
if mode == FileFilter.WORKFILES_MODE and highest_local_version >= 0:
last_updated = files_versions[highest_local_version].modified_at
if highest_publish_version >= 0:
published_at = files_versions[highest_publish_version].published_at
last_updated = max(last_updated, published_at) if last_updated else published_at
name_modified_pairs.append((name, last_updated))
if not filtered_name_groups:
# everything in this group was filtered out!
continue
filtered_task_groups[task] = filtered_name_groups
# sort names in reverse order of modified date:
name_modified_pairs.sort(key=itemgetter(1), reverse=True)
task_name_order[task] = [n for (n, _) in name_modified_pairs]
task_modified_pairs.append((task, max([m for (_, m) in name_modified_pairs])))
# sort tasks in reverse order of modified date:
task_modified_pairs.sort(key=itemgetter(1), reverse=True)
task_order = [n for (n, _) in task_modified_pairs]
result["task_groups"] = filtered_task_groups
result["task_name_order"] = task_name_order
result["task_order"] = task_order
return result
def process_result(self, result):
"""
Process list of tasks retrieved by get_data on the main thread
:param result: Dictionary containing the various display & grouping options required to build the
file list as well as the list of files organised by task.
"""
if FileListView.DEBUG_GET_DATA_IN_MAIN_THREAD:
# gathering of data was not done in the get_data stage so we
# should do it here instead - this method gets called in the
# main thread
result = self._get_data(result)
task_groups = result["task_groups"]
task_name_order = result["task_name_order"]
task_order = result["task_order"]
current_task_name = result["current_task_name"]
self._current_filter = result["filter"]
self._update_title()
if not task_groups:
# build a useful error message using the info we have available:
msg = ""
if not result["can_change_work_area"]:
if not result["have_valid_workarea"]:
msg = "The current Work Area is not valid!"
elif not result["have_valid_configuration"]:
msg = ("Shotgun File Manager has not been configured for the environment "
"being used by the selected Work Area!")
elif not result["can_do_new_file"]:
msg = "Couldn't find any files in this Work Area!"
else:
msg = "Couldn't find any files!\nClick the New file button to start work."
else:
if not result["have_valid_workarea"]:
msg = "The current Work Area is not valid!"
elif not result["have_valid_configuration"]:
msg = ("Shotgun File Manager has not been configured for the environment "
"being used by the selected Work Area!\n"
"Please choose a different Work Area to continue.")
elif not result["can_do_new_file"]:
msg = "Couldn't find any files in this Work Area!\nTry selecting a different Work Area."
else:
msg = "Couldn't find any files!\nClick the New file button to start work."
self.set_message(msg)
return
for task_name in task_order:
name_groups = task_groups[task_name]
if (len(task_groups) > 1
or (task_name != current_task_name
and task_name != FileListView.NO_TASK_NAME
and current_task_name == None)):
# add header for task:
h = self.add_item(browser_widget.ListHeader)
h.set_title("%s" % (task_name))
ordered_names = task_name_order[task_name]
for name in ordered_names:
details = name_groups[name]
files = details["files"]
highest_local_file = details.get("highest_local_file")
highest_publish_file = details.get("highest_publish_file")
thumbnail = details["thumbnail"]
# add new item to list:
item = self._add_file_item(highest_publish_file, highest_local_file)
if not item:
continue
# set thumbnail if have one:
if thumbnail:
item.set_thumbnail(thumbnail)
# add context menu:
item.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
# if it's a publish then add 'View In Shotgun' item:
if highest_publish_file:
action = QtGui.QAction("View latest Publish in Shotgun", item)
# (AD) - the '[()]' syntax in action.triggered[()].connect looks weird right!
# 'triggered' is a QtCore.SignalInstance which actually defines multiple
# signals: triggered() & triggered(bool). PySide will correctly determine which
# one to use but PyQt gets confused and calls the (bool) version instead which
# causes problems for us... Luckily, Qt lets us use the argument list () to
# index into the SignalInstance object to force the use of the non-bool version - yay!
action.triggered[()].connect(lambda f=highest_publish_file: self._on_show_in_shotgun_action_triggered(f))
item.addAction(action)
# build context menu for all publish versions:
published_versions = [f.version for f in files.values() if f.is_published and isinstance(f.version, int)]
if published_versions:
published_versions.sort(reverse=True)
publishes_action = QtGui.QAction("Open Publish Read-Only", item)
publishes_sm = QtGui.QMenu(item)
publishes_action.setMenu(publishes_sm)
item.addAction(publishes_action)
for v in published_versions[:20]:
f = files[v]
msg = ("v%03d" % f.version)
action = QtGui.QAction(msg, publishes_sm)
# see above for explanation of [()] syntax in action.triggered[()].connect...
action.triggered[()].connect(lambda f=f: self._on_open_publish_action_triggered(f))
publishes_sm.addAction(action)
# build context menu for all work files:
wf_versions = [f.version for f in files.values() if f.is_local and isinstance(f.version, int)]
if wf_versions:
wf_versions.sort(reverse=True)
wf_action = QtGui.QAction("Open Work File", item)
wf_sm = QtGui.QMenu(item)
wf_action.setMenu(wf_sm)
item.addAction(wf_action)
for v in wf_versions[:20]:
f = files[v]
msg = ("v%03d" % f.version)
action = QtGui.QAction(msg, wf_sm)
# see above for explanation of [()] syntax in action.triggered[()].connect...
action.triggered[()].connect(lambda f=f: self._on_open_workfile_action_triggered(f))
wf_sm.addAction(action)
def _update_title(self):
"""
Update the list title depending on the mode
"""
if not self._current_filter:
return
self.set_label(self._current_filter.list_title)
def _add_file_item(self, latest_published_file, latest_work_file):
"""
Add an item to the file list given the latest publish & work files
:param latest_published_file: The latest published version of the file to be added
:param latest_work_file: The latest work/local version of the file to be added
"""
details = ""
tooltip = ""
# colours for item titles:
red = "rgb(200, 84, 74)"
green = "rgb(145, 206, 95)"
current_mode = self._current_filter.mode
file = None
editable = True
not_editable_reason = ""
if current_mode == FileFilter.WORKFILES_MODE:
file = latest_work_file
title_colour = None
if latest_published_file:
if file.compare_with_publish(latest_published_file) >= 0:
# work file is most recent
title_colour = green
tooltip += "This is the latest version of this file"
else:
# published file is most recent
title_colour = red
tooltip += "<b>A more recent published version of this file is available:</b>"
tooltip += "<br>"
tooltip += ("<br><b>Version v%03d</b>" % latest_published_file.version)
tooltip += "<br>" + latest_published_file.format_published_by_details()
tooltip += "<br>"
tooltip += "<br><b>Description:</b>"
tooltip += "<br>" + latest_published_file.format_publish_description()
else:
tooltip += "This file has never been published"
if file.version is not None:
details = "<b>%s, v%03d</b>" % (file.name, file.version)
else:
details = "<b>%s</b>" % (file.name)
if title_colour:
details = "<span style='color:%s'>%s</span>" % (title_colour, details)
details += "<br>" + file.format_modified_by_details()
editable = file.editable
not_editable_reason = file.not_editable_reason
elif current_mode == FileFilter.PUBLISHES_MODE:
file = latest_published_file
title_colour = None
tooltip += "<b>Description:</b>"
tooltip += "<br>" + file.format_publish_description()
tooltip += "<hr>"
if latest_work_file:
if latest_work_file.compare_with_publish(file) <= 0:
# published file is most recent
title_colour = green
tooltip += "This is the latest version of this file"
else:
# work file is most recent
#title_colour = red
tooltip += "<b>A more recent version of this file was found in your work area:</b>"
tooltip += "<br>"
#tooltip += "<br><b>Details:</b>"
tooltip += ("<br><b>Version v%03d</b>" % latest_work_file.version)
tooltip += "<br>" + latest_work_file.format_modified_by_details()
else:
title_colour = green
tooltip += "This is the latest version of this file"
details = "<b>%s, v%03d</b>" % (file.name, file.version)
if title_colour:
details = "<span style='color:%s'>%s</span>" % (title_colour, details)
details += "<br>" + file.format_published_by_details()
editable = file.editable
not_editable_reason = file.not_editable_reason
else:
raise TankError("Display mode is not recognised!")
# update editable info on the tooltip
if not editable:
tooltip += "<hr>"
tooltip += "Read-only: " + not_editable_reason
# add item:
item = self.add_item(FileItemForm)
item.published_file = latest_published_file
item.work_file = latest_work_file
# set tool tip
item.setToolTip(tooltip)
# build and set details string:
item.set_details(details)
item.set_is_editable(editable, not_editable_reason)
return item
def _on_open_workfile_action_triggered(self, file):
"""
Open action triggered from context menu
"""
self.open_previous_workfile.emit(file)
def _on_open_publish_action_triggered(self, file):
"""
Open action triggered from context menu
"""
self.open_previous_publish.emit(file)
def _on_show_in_shotgun_action_triggered(self, file):
"""
Show in Shotgun action triggered from context menu
"""
self.view_in_shotgun.emit(file)
|
import uuid
from galaxy import model
from galaxy.jobs.rule_helper import RuleHelper
from galaxy.model import mapping
from galaxy.util import bunch
USER_EMAIL_1 = "<EMAIL>"
USER_EMAIL_2 = "<EMAIL>"
USER_EMAIL_3 = "<EMAIL>"
def test_job_count():
rule_helper = __rule_helper()
__assert_job_count_is( 0, rule_helper )
__setup_fixtures( rule_helper.app )
# Test raw counts for users...
__assert_job_count_is( 7, rule_helper, for_user_email=USER_EMAIL_1 )
__assert_job_count_is( 2, rule_helper, for_user_email=USER_EMAIL_2 )
__assert_job_count_is( 0, rule_helper, for_user_email=USER_EMAIL_3 )
# Test desitnation counts
__assert_job_count_is( 2, rule_helper, for_destination="local" )
__assert_job_count_is( 7, rule_helper, for_destination="cluster1" )
__assert_job_count_is( 9, rule_helper, for_destinations=["cluster1", "local"] )
# Test per user destination counts
__assert_job_count_is( 5, rule_helper, for_destination="cluster1", for_user_email=USER_EMAIL_1 )
__assert_job_count_is( 2, rule_helper, for_destination="local", for_user_email=USER_EMAIL_1 )
__assert_job_count_is( 7, rule_helper, for_destinations=["cluster1", "local"], for_user_email=USER_EMAIL_1 )
__assert_job_count_is( 2, rule_helper, for_destination="cluster1", for_user_email=USER_EMAIL_2 )
__assert_job_count_is( 0, rule_helper, for_destination="local", for_user_email=USER_EMAIL_2 )
# Test per user, per state destination counts
__assert_job_count_is( 3, rule_helper, for_destination="cluster1", for_user_email=USER_EMAIL_1, for_job_states=[ "queued" ] )
__assert_job_count_is( 2, rule_helper, for_destination="cluster1", for_user_email=USER_EMAIL_1, for_job_states=[ "running" ] )
__assert_job_count_is( 0, rule_helper, for_destination="cluster1", for_user_email=USER_EMAIL_1, for_job_states=[ "error" ] )
__assert_job_count_is( 5, rule_helper, for_destination="cluster1", for_user_email=USER_EMAIL_1, for_job_states=[ "queued", "running", "error" ] )
def __assert_job_count_is( expected_count, rule_helper, **kwds ):
acutal_count = rule_helper.job_count( **kwds )
if expected_count != acutal_count:
template = "Expected job count %d, actual job count %s for params %s"
raise AssertionError( template % ( expected_count, acutal_count, kwds ) )
def __setup_fixtures( app ):
# user1 has 3 jobs queued and 2 jobs running on cluster1 and one queued and
# on running job on local. user2 has a queued and running job on the cluster.
# user3 has no jobs.
user1 = model.User( email=USER_EMAIL_1, password="<PASSWORD>" )
user2 = model.User( email=USER_EMAIL_2, password="<PASSWORD>" )
user3 = model.User( email=USER_EMAIL_2, password="<PASSWORD>" )
app.add( user1, user2, user3 )
app.add( __new_job( user=user1, destination_id="cluster1", state="queued" ) )
app.add( __new_job( user=user1, destination_id="cluster1", state="queued" ) )
app.add( __new_job( user=user1, destination_id="cluster1", state="queued" ) )
app.add( __new_job( user=user1, destination_id="cluster1", state="running" ) )
app.add( __new_job( user=user1, destination_id="cluster1", state="running" ) )
app.add( __new_job( user=user1, destination_id="local", state="queued" ) )
app.add( __new_job( user=user1, destination_id="local", state="running" ) )
app.add( __new_job( user=user2, destination_id="cluster1", state="queued" ) )
app.add( __new_job( user=user2, destination_id="cluster1", state="running" ) )
def test_choose_one_unhashed():
rule_helper = __rule_helper()
# Random choices if hash not set.
chosen_ones = set([])
__do_a_bunch( lambda: chosen_ones.add(rule_helper.choose_one(['a', 'b'])) )
assert chosen_ones == set(['a', 'b'])
def test_choose_one_hashed():
rule_helper = __rule_helper()
# Hashed, so all choosen ones should be the same...
chosen_ones = set([])
__do_a_bunch( lambda: chosen_ones.add(rule_helper.choose_one(['a', 'b'], hash_value=1234)) )
assert len( chosen_ones ) == 1
# ... also can verify hashing on strings
chosen_ones = set([])
__do_a_bunch( lambda: chosen_ones.add(rule_helper.choose_one(['a', 'b'], hash_value="i am a string")) )
assert len( chosen_ones ) == 1
def test_job_hash_unique_by_default( ):
rule_helper = __rule_helper()
job1, job2 = __two_jobs_in_a_history()
rule_helper.job_hash( job1 ) != rule_helper.job_hash( job2 )
def test_job_hash_history( ):
rule_helper = __rule_helper()
job1, job2 = __two_jobs_in_a_history()
__assert_same_hash( rule_helper, job1, job2, hash_by="history" )
def test_job_hash_workflow_invocation():
rule_helper = __rule_helper()
job1, job2 = __two_jobs()
wi_uuid = uuid.uuid1().hex
job1.add_parameter( "__workflow_invocation_uuid__", wi_uuid )
job2.add_parameter( "__workflow_invocation_uuid__", wi_uuid )
__assert_same_hash( rule_helper, job1, job2, hash_by="workflow_invocation" )
def test_job_hash_fallback():
rule_helper = __rule_helper()
job1, job2 = __two_jobs_in_a_history()
__assert_same_hash( rule_helper, job1, job2, hash_by="workflow_invocation,history" )
def test_should_burst( ):
rule_helper = __rule_helper()
__setup_fixtures( rule_helper.app )
# cluster1 fixture has 4 queued jobs, 3 running
assert rule_helper.should_burst( [ "cluster1" ], "7" )
assert not rule_helper.should_burst( [ "cluster1" ], "10" )
assert rule_helper.should_burst( [ "cluster1" ], "2", job_states="queued" )
assert not rule_helper.should_burst( [ "cluster1" ], "6", job_states="queued" )
def __assert_same_hash( rule_helper, job1, job2, hash_by ):
job1_hash = rule_helper.job_hash( job1, hash_by=hash_by )
job2_hash = rule_helper.job_hash( job2, hash_by=hash_by )
assert job1_hash == job2_hash
def __two_jobs_in_a_history():
job1, job2 = __two_jobs()
job1.history_id = 4
job2.history_id = 4
return job1, job2
def __two_jobs( ):
job1 = model.Job()
job1.id = 1
job2 = model.Job()
job2.id = 2
return job1, job2
def __do_a_bunch( work ):
for i in range( 20 ):
work()
def __new_job( **kwds ):
job = model.Job()
for key, value in kwds.items():
setattr( job, key, value )
return job
def __rule_helper():
app = MockApp()
rule_helper = RuleHelper( app )
return rule_helper
class MockApp( object ):
def __init__( self ):
self.config = bunch.Bunch( )
self.model = mapping.init(
"/tmp",
"sqlite:///:memory:",
create_tables=True
)
def add( self, *args ):
for arg in args:
self.model.context.add( arg )
self.model.context.flush()
|
from math import pi, sqrt
from raytracer.tuple import (
tuple,
point,
vector,
magnitude,
normalize,
dot,
cross,
Color,
)
from raytracer.util import equal
from raytracer.matrices import Matrix, I
from raytracer.transformations import (
translation,
scaling,
rotation_x,
rotation_y,
rotation_z,
shearing,
view_transform
)
def test_multiplying_by_a_translation_matrix():
transform = translation(5, -3, 2)
p = point(-3, 4, 5)
assert transform * p == point(2, 1, 7)
def test_multiplying_by_the_inverse_of_a_translation_matrix():
transform = translation(5, -3, 2)
inv = transform.inverse()
p = point(-3, 4, 5)
assert inv * p == point(-8, 7, 3)
def test_translation_does_not_affect_vectors():
transform = translation(5, -3, 2)
v = vector(-3, 4, 5)
assert transform * v == v
def test_a_scaling_matrix_applied_to_a_point():
transform = scaling(2, 3, 4)
p = point(-4, 6, 8)
assert transform * p == point(-8, 18, 32)
def test_reflection_is_scaling_by_a_negative_value():
transform = scaling(-1, 1, 1)
p = point(2, 3, 4)
assert transform * p == point(-2, 3, 4)
def test_rotating_a_point_around_the_x_axis():
p = point(0, 1, 0)
half_quarter = rotation_x(pi / 4)
full_quarter = rotation_x(pi / 2)
assert half_quarter * p == point(0, sqrt(2)/2, sqrt(2)/2)
assert full_quarter * p == point(0, 0, 1)
def test_rotating_a_point_around_the_y_axis():
p = point(0, 0, 1)
half_quarter = rotation_y(pi / 4)
full_quarter = rotation_y(pi / 2)
assert half_quarter * p == point(sqrt(2)/2, 0, sqrt(2)/2)
assert full_quarter * p == point(1, 0, 0)
def test_rotating_a_point_around_the_z_axis():
p = point(0, 1, 0)
half_quarter = rotation_z(pi / 4)
full_quarter = rotation_z(pi / 2)
assert half_quarter * p == point(-sqrt(2)/2, sqrt(2)/2, 0)
assert full_quarter * p == point(-1, 0, 0)
def test_a_shearing_transformation_moves_x_in_proportion_to_y():
transform = shearing(1, 0, 0, 0, 0, 0)
p = point(2, 3, 4)
assert transform * p == point(5, 3, 4)
def test_a_shearing_transformation_moves_x_in_proportion_to_z():
transform = shearing(0, 1, 0, 0, 0, 0)
p = point(2, 3, 4)
assert transform * p == point(6, 3, 4)
def test_a_shearing_transformation_moves_y_in_proportion_to_x():
transform = shearing(0, 0, 1, 0, 0, 0)
p = point(2, 3, 4)
assert transform * p == point(2, 5, 4)
def test_a_shearing_transformation_moves_y_in_proportion_to_z():
transform = shearing(0, 0, 0, 1, 0, 0)
p = point(2, 3, 4)
assert transform * p == point(2, 7, 4)
def test_a_shearing_transformation_moves_z_in_proportion_to_x():
transform = shearing(0, 0, 0, 0, 1, 0)
p = point(2, 3, 4)
assert transform * p == point(2, 3, 6)
def test_a_shearing_transformation_moves_z_in_proportion_to_y():
transform = shearing(0, 0, 0, 0, 0, 1)
p = point(2, 3, 4)
assert transform * p == point(2, 3, 7)
def test_individual_transformations_are_applied_in_sequence():
p = point(1, 0, 1)
A = rotation_x(pi / 2)
B = scaling(5, 5, 5)
C = translation(10, 5, 7)
# apply rotation first
p2 = A * p
assert p2 == point(1, -1, 0)
# then apply scaling
p3 = B * p2
assert p3 == point(5, -5, 0)
# then apply translation
p4 = C * p3
assert p4 == point(15, 0, 7)
def test_chained_transformations_must_be_applied_in_reverse_order():
p = point(1, 0, 1)
A = rotation_x(pi / 2)
B = scaling(5, 5, 5)
C = translation(10, 5, 7)
T = C @ B @ A
assert T * p == point(15, 0, 7)
def test_the_transformation_matrix_for_the_default_orientation():
from_p = point(0, 0, 0)
to_p = point(0, 0, -1)
up = vector(0, 1, 0)
t = view_transform(from_p, to_p, up)
assert t == I
def test_a_view_transformation_matrix_looking_in_positive_z_direction():
from_p = point(0, 0, 0)
to_p = point(0, 0, 1)
up = vector(0, 1, 0)
t = view_transform(from_p, to_p, up)
assert t == scaling(-1, 1, -1)
def test_the_view_transformation_moves_the_world():
from_p = point(0, 0, 8)
to_p = point(0, 0, 0)
up = vector(0, 1, 0)
t = view_transform(from_p, to_p, up)
assert t == translation(0, 0, -8)
def test_an_arbitrary_view_transformation():
from_p = point(1, 3, 2)
to_p = point(4, -2, 8)
up = vector(1, 1, 0)
t = view_transform(from_p, to_p, up)
print(t)
assert t == Matrix(
[[-0.50709, 0.50709, 0.67612, -2.36643],
[0.76772, 0.60609, 0.12122, -2.82843],
[-0.35857, 0.59761, -0.71714, 0.0],
[0.0, 0.0, 0.0, 1.0]]
)
|
<gh_stars>1-10
"""
A Galaxy wrapper script for corrector
<NAME> - GigaScience and BGI-HK
"""
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
import glob
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def cleanup_before_exit(tmp_dir):
if tmp_dir and os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def main():
#Parse command line
parser = optparse.OptionParser()
#List of params
parser.add_option("", "--filelist", type="string", dest="filelist")
parser.add_option("", "--freq_gz", type="string", dest="freq_gz")
parser.add_option("", "--default_full_settings_type", dest="default_full_settings_type")
#Custom params
parser.add_option("-k", "--kmer_size", dest="kmer_size")
parser.add_option("-l", "--low_freq_cutoff", dest="low_freq_cutoff")
parser.add_option("-m", "--min_length_high_freq_region", dest="min_length_high_freq_region")
parser.add_option("-c", "--max_read_change", dest="max_read_change")
parser.add_option("-n", "--max_node_num", dest="max_node_num")
parser.add_option("-a", "--remove_suspicious_data", dest="remove_suspicious_data")
parser.add_option("-Q", "--ascii_shift_quality_value", dest="ascii_shift_quality_value")
parser.add_option("-e", "--trim_suspicious_end_regions_Q", dest="trim_suspicious_end_regions_Q")
parser.add_option("-w", "--trim_error_bases_Q", dest="trim_error_bases_Q")
parser.add_option("-q", "--qual_threshold_error_bases", dest="qual_threshold_error_bases")
parser.add_option("-x", "--length_trim_low_qual_ends", dest="length_trim_low_qual_ends")
parser.add_option("-r", "--min_length_trimmed_read", dest="min_length_trimmed_read")
parser.add_option("-t", "--thread_num", dest="thread_num")
parser.add_option("-j", "--convert_reads_into_paired_end_file", dest="convert_reads_into_paired_end_file")
parser.add_option("-o", "--output_format", dest="output_format")
#Multiple outputs; number not known before job execution
parser.add_option("", "--output1.id", dest='output1_id')
parser.add_option("", "--output1", dest='output1')
parser.add_option("", "--__new_file_path__", dest='__new_file_path__')
opts, args = parser.parse_args()
#Temp directory for data processing
temp_dir = tempfile.mkdtemp()
#Files for std out and std error
tmp_out_file = tempfile.NamedTemporaryFile(dir=temp_dir).name
tmp_stdout = open(tmp_out_file, 'wb')
tmp_err_file = tempfile.NamedTemporaryFile(dir=temp_dir).name
tmp_stderr = open(tmp_err_file, 'wb')
#Set up command line call
if opts.default_full_settings_type == "default":
cmd = "Corrector_HA_v2.0 %s %s" % (opts.freq_gz, opts.filelist)
elif opts.default_full_settings_type == "full":
cmd = "Corrector_HA_v2.0 %s %s -k %s -l %s -m %s -c %s -n %s -a %s -Q %s -e %s -w %s -q %s -r %s -t %s -j %s -o %s" % (opts.freq_gz, opts.filelist, opts.kmer_size, opts.low_freq_cutoff, opts.min_length_high_freq_region, opts.max_read_change, opts.max_node_num, opts.remove_suspicious_data, opts.ascii_shift_quality_value, opts.trim_suspicious_end_regions_Q, opts.trim_error_bases_Q, opts.qual_threshold_error_bases, opts.min_length_trimmed_read, opts.thread_num, opts.convert_reads_into_paired_end_file, opts.output_format)
if opts.length_trim_low_qual_ends != "":
cmd = cmd + " -x %s" % opts.length_trim_low_qual_ends
print "Command executed: ", cmd
buffsize = 1048576
#Temp directory to perform processing
dirpath = tempfile.mkdtemp()
print "Working directory: ", dirpath
try:
#Execution occurs in the directory where the input read files are
proc = subprocess.Popen(args=cmd, shell=True, cwd=dirpath, stdout=tmp_stdout, stderr=tmp_stderr)
returncode = proc.wait()
#Get stdout, allowing for case where it's very large
tmp_stdout = open(tmp_out_file, 'rb')
stdout = ''
try:
while True:
stdout += tmp_stdout.read(buffsize)
if not stdout or len(stdout) % buffsize != 0:
break
except OverflowError:
pass
print stdout
#Get stderr, allowing for case where it's very large
tmp_stderr = open(tmp_err_file, 'rb')
stderr = ''
try:
while True:
stderr += tmp_stderr.read(buffsize)
if not stderr or len(stderr) % buffsize != 0:
break
except OverflowError:
pass
#Close streams
tmp_stdout.close()
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
except Exception, e:
raise Exception, 'Problem performing Corrector process: ' + str(e)
#Read Corrector results into outputs
print "Unique identifier for file: " + opts.output1_id
print "Files kept in: " + opts.__new_file_path__
#Excel file output
xls_out = open(opts.output1, 'w')
xlspath = opts.filelist + ".QC.xls"
xls_in = open(xlspath, 'r')
data = xls_in.read()
xls_out.write(data)
xls_out.close()
xls_in.close()
#Create outputs; need to move and rename files for galaxy for display multiple files
print "Reading filelist contents"
file = open(opts.filelist)
index = 1
#Check file format
if opts.output_format == "0":
format = ".fa.gz"
elif opts.output_format =="1":
format = ".fq.gz"
elif opts.output_format =="2":
format = ".fa"
elif opts.output_format =="3":
format = ".fq"
#Read the file paths in read.lst
for line in file:
print "line:", line
#Work with cor.pair.fq.gz files
print "Working on cor.pair files"
#Create path to access file
source = line.rstrip() + ".cor.pair_" + str(index) + format
print "Renaming file: ", source
#Create string for renaming file
dir = line[0:line.rindex("/")]
filename = line[line.rindex("/") + 1:].rstrip()
filename = filename.replace("_", ".")
dest = dir + "/primary_" + opts.output1_id + "_" + filename + ".cor.pair." + str(index) + "_visible_" + format
print "New file name: ", dest
#Rename and move file
os.rename(source, dest)
shutil.move(dest, opts.__new_file_path__)
#Deal with cor.stat files
print "Working on cor.stat files"
#Create path to access file
source = line.rstrip() + ".cor.stat"
print "Renaming file: ", source
#Create string for renaming file
dir = line[0:line.rindex("/")]
filename = line[line.rindex("/") + 1:].rstrip()
filename = filename.replace("_", ".")
dest = dir + "/primary_" + opts.output1_id + "_" + filename + ".cor.stat_visible_txt"
print "New file name: ", dest
#Rename and move file
os.rename(source, dest)
shutil.move(dest, opts.__new_file_path__)
#Deal with cor single fq gz files if present
print "Working on cor single fq gz files"
#Create path to access file
source = line.rstrip() + ".cor.single.fq.gz"
print "Renaming file: ", source
#Need to check that this file is present
if os.path.isfile(source):
#Create string for renaming file
dir = line[0:line.rindex("/")]
filename = line[line.rindex("/") + 1:].rstrip()
filename = filename.replace("_", ".")
dest = dir + "/primary_" + opts.output1_id + "_" + filename + ".cor.single_visible_" + format
print "New file name: ", dest
#Rename and move file
os.rename(source, dest)
shutil.move(dest, opts.__new_file_path__)
#Deal with cor pair single stat files if present
print "Working on cor single single stat files"
#Create path to access file
source = line.rstrip() + ".cor.pair.single.stat"
print "Renaming file: ", source
#Need to check that this file is present
if os.path.isfile(source):
#Create string for renaming file
dir = line[0:line.rindex("/")]
filename = line[line.rindex("/") + 1:].rstrip()
filename = filename.replace("_", ".")
dest = dir + "/primary_" + opts.output1_id + "_" + filename + ".cor.pair.single.stat_visible_txt"
print "New file name: ", dest
#Rename and move file
os.rename(source, dest)
shutil.move(dest, opts.__new_file_path__)
index = index + 1
file.close()
#Clean up temp files
cleanup_before_exit(temp_dir)
cleanup_before_exit(dirpath)
#Check results in output file
if os.path.getsize(opts.output1) > 0:
sys.stdout.write('Status complete')
else:
stop_err("The output is empty")
if __name__ == "__main__": main()
|
# test_cleanupaccounts.py - functionnal test for CleanAccounts task
#
# This file is part of debexpo
# https://salsa.debian.org/mentors.debian.net-team/debexpo
#
# Copyright © 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from datetime import timedelta, datetime, timezone
from django.test import override_settings
from django.conf import settings
from tests import TestController
from debexpo.accounts.models import User
from debexpo.accounts.tasks import CleanupAccounts
class TestCronjobCleanupAccounts(TestController):
def setUp(self):
self.task = CleanupAccounts()
self.index = 0
# Create two regular account to make sure it is not removed
self._create_accounts(True, activated=True)
self._create_accounts(False, activated=True)
def tearDown(self):
self._remove_all_users()
# Ask for a cleanup, but no account at all
def test_cleanup_no_users(self):
self.task.run()
self._assert_account_cleaned_up()
# Ask for a cleanup, but no account are expired
def test_cleanup_no_expired_accounts(self):
self._create_accounts(False)
self.task.run()
self._assert_account_cleaned_up()
# Ask for a cleanup, all accounts are expired
def test_cleanup_all_expired_accounts(self):
self._create_accounts(True)
self.task.run()
self._assert_account_cleaned_up()
# Ask for a cleanup, some account are expired
def test_cleanup_mixed_expired_accounts(self):
self._create_accounts(True)
self._create_accounts(False)
self.task.run()
self._assert_account_cleaned_up()
# Ask for a cleanup, using a custom expiration date
@override_settings(REGISTRATION_EXPIRATION_DAYS=2)
def test_cleanup_custom_expired_accounts(self):
# Re-setup the plugin
self.task = CleanupAccounts()
# Edit expiration date
self.test_cleanup_mixed_expired_accounts()
self._assert_account_cleaned_up()
def _create_accounts(self, expired, activated=False):
if expired:
creation_date = self._get_expiration_date() - timedelta(days=1)
else:
creation_date = self._get_expiration_date() + timedelta(days=1)
if activated:
password = 'password'
else:
password = <PASSWORD>'
self.index += 1
user = User(name='Test expiration user',
email=f'<EMAIL>',
password=password,
date_joined=creation_date)
user.save()
def _get_expiration_date(self):
delta = settings.REGISTRATION_EXPIRATION_DAYS
return datetime.now(timezone.utc) - timedelta(days=delta)
def _get_expired_accounts(self, users):
return users.filter(
password__startswith='!',
date_joined__lt=self._get_expiration_date(),
is_active=True)
def _assert_account_cleaned_up(self):
users = self._get_accounts()
users_to_cleanup = self._get_expired_accounts(users)
users_activated = users.exclude(password__startswith='!')
# No more expired accounts
self.assertEquals(users_to_cleanup.count(), 0)
# Don't touch regular accounts
self.assertEquals(users_activated.count(), 2)
def _get_accounts(self):
return User.objects
def _remove_all_users(self):
users = self._get_accounts()
if users.count() > 0:
users.all().delete()
|
#!/usr/bin/env python3
import time
from Cluster import Cluster
from TestHelper import TestHelper
from WalletMgr import WalletMgr
from testUtils import Utils
###############################################################
# nodeos_producer_watermark_test
# --dump-error-details <Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout>
# --keep-logs <Don't delete var/lib/node_* folders upon test completion>
###############################################################
def isValidBlockProducer(prodsActive, blockNum, node):
blockProducer = node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
return False
return prodsActive[blockProducer]
def validBlockProducer(prodsActive, prodsSeen, blockNum, node):
blockProducer = node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
Utils.cmdError("unexpected block producer %s at blockNum=%s" % (blockProducer, blockNum))
Utils.errorExit("Failed because of invalid block producer")
if not prodsActive[blockProducer]:
Utils.cmdError("block producer %s for blockNum=%s not elected, belongs to node %s" % (
blockProducer, blockNum, ProducerToNode.map[blockProducer]))
Utils.errorExit("Failed because of incorrect block producer")
prodsSeen[blockProducer] = True
return blockProducer
def setProds(sharedProdKey):
setProdsStr = '{"schedule": ['
firstTime = True
for name in ["defproducera", "shrproducera", "defproducerb", "defproducerc"]:
if firstTime:
firstTime = False
else:
setProdsStr += ','
key = cluster.defProducerAccounts[name].activePublicKey
if name == "shrproducera":
key = sharedProdKey
setProdsStr += ' { "producer_name": "%s", "block_signing_key": "%s" }' % (name, key)
setProdsStr += ' ] }'
Utils.Print("setprods: %s" % (setProdsStr))
opts = "--permission eosio@active"
# pylint: disable=redefined-variable-type
trans = cluster.biosNode.pushMessage("eosio", "setprods", setProdsStr, opts)
if trans is None or not trans[0]:
Utils.Print("ERROR: Failed to set producer with cmd %s" % (setProdsStr))
def verifyProductionRounds(trans, node, prodsActive, rounds):
blockNum = node.getNextCleanProductionCycle(trans)
Utils.Print("Validating blockNum=%s" % (blockNum))
temp = Utils.Debug
Utils.Debug = False
Utils.Print("FIND VALID BLOCK PRODUCER")
blockProducer = node.getBlockProducerByNum(blockNum)
lastBlockProducer = blockProducer
adjust = False
while not isValidBlockProducer(prodsActive, blockNum, node):
adjust = True
blockProducer = node.getBlockProducerByNum(blockNum)
if lastBlockProducer != blockProducer:
Utils.Print("blockProducer=%s for blockNum=%s is for node=%s" % (
blockProducer, blockNum, ProducerToNode.map[blockProducer]))
lastBlockProducer = blockProducer
blockNum += 1
Utils.Print("VALID BLOCK PRODUCER")
saw = 0
sawHigh = 0
startingFrom = blockNum
doPrint = 0
invalidCount = 0
while adjust:
invalidCount += 1
if lastBlockProducer == blockProducer:
saw += 1
else:
if saw >= 12:
startingFrom = blockNum
if saw > 12:
Utils.Print("ERROR!!!!!!!!!!!!!! saw=%s, blockProducer=%s, blockNum=%s" % (
saw, blockProducer, blockNum))
break
else:
if saw > sawHigh:
sawHigh = saw
Utils.Print("sawHigh=%s" % (sawHigh))
if doPrint < 5:
doPrint += 1
Utils.Print("saw=%s, blockProducer=%s, blockNum=%s" % (saw, blockProducer, blockNum))
lastBlockProducer = blockProducer
saw = 1
blockProducer = node.getBlockProducerByNum(blockNum)
blockNum += 1
if adjust:
blockNum -= 1
Utils.Print("ADJUSTED %s blocks" % (invalidCount - 1))
prodsSeen = None
reportFirstMissedBlock = False
Utils.Print("Verify %s complete rounds of all producers producing" % (rounds))
prodsSize = len(prodsActive)
for i in range(0, rounds):
prodsSeen = {}
lastBlockProducer = None
for j in range(0, prodsSize):
# each new set of 12 blocks should have a different blockProducer
if lastBlockProducer is not None and lastBlockProducer == node.getBlockProducerByNum(blockNum):
Utils.cmdError("expected blockNum %s to be produced by any of the valid producers except %s" % (
blockNum, lastBlockProducer))
Utils.errorExit("Failed because of incorrect block producer order")
# make sure that the next set of 12 blocks all have the same blockProducer
lastBlockProducer = node.getBlockProducerByNum(blockNum)
for k in range(0, 12):
blockProducer = validBlockProducer(prodsActive, prodsSeen, blockNum, node1)
if lastBlockProducer != blockProducer:
if not reportFirstMissedBlock:
printStr = ""
newBlockNum = blockNum - 18
for l in range(0, 36):
printStr += "%s" % (newBlockNum)
printStr += ":"
newBlockProducer = node.getBlockProducerByNum(newBlockNum)
printStr += "%s" % (newBlockProducer)
printStr += " "
newBlockNum += 1
Utils.Print(
"NOTE: expected blockNum %s (started from %s) to be produced by %s, but produded by %s: round=%s, prod slot=%s, prod num=%s - %s" % (
blockNum, startingFrom, lastBlockProducer, blockProducer, i, j, k, printStr))
reportFirstMissedBlock = True
break
blockNum += 1
# make sure that we have seen all 21 producers
prodsSeenKeys = prodsSeen.keys()
if len(prodsSeenKeys) != prodsSize:
Utils.cmdError(
"only saw %s producers of expected %d. At blockNum %s only the following producers were seen: %s" % (
len(prodsSeenKeys), prodsSize, blockNum, ",".join(prodsSeenKeys)))
Utils.errorExit("Failed because of missing block producers")
Utils.Debug = temp
Print = Utils.Print
errorExit = Utils.errorExit
args = TestHelper.parse_args(
{"--prod-count", "--dump-error-details", "--keep-logs", "-v", "--leave-running", "--clean-run",
"--wallet-port"})
Utils.Debug = args.v
totalNodes = 3
cluster = Cluster(walletd=True)
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontKill = args.leave_running
prodCount = args.prod_count
killAll = args.clean_run
walletPort = args.wallet_port
walletMgr = WalletMgr(True, port=walletPort)
testSuccessful = False
killEosInstances = not dontKill
killWallet = not dontKill
WalletdName = Utils.EosWalletName
ClientName = "clio"
try:
assert (totalNodes == 3)
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
Print("Stand up cluster")
if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes,
totalProducers=totalNodes, useBiosBootFile=False, onlySetProds=True, sharedProducers=1) is False:
Utils.cmdError("launcher")
Utils.errorExit("Failed to stand up eos cluster.")
Print("Validating system accounts after bootstrap")
cluster.validateAccounts(None)
node0 = cluster.getNode(0)
node1 = cluster.getNode(1)
node2 = cluster.getNode(2)
node = node0
numprod = totalNodes + 1
trans = None
prodsActive = {}
prodsActive["shrproducera"] = True
prodsActive["defproducera"] = True
prodsActive["defproducerb"] = True
prodsActive["defproducerc"] = True
Print(
"Wait for initial schedule: defproducera(node 0) shrproducera(node 2) defproducerb(node 1) defproducerc(node 2)")
tries = 10
while tries > 0:
node.infoValid = False
info = node.getInfo()
if node.infoValid and node.lastRetrievedHeadBlockProducer != "eosio":
break
time.sleep(1)
tries = tries - 1
if tries == 0:
Utils.errorExit("failed to wait for initial schedule")
# try to change signing key of shrproducera, shrproducera will produced by node1 instead of node2
Print("change producer signing key, shrproducera will be produced by node1 instead of node2")
shracc_node1 = cluster.defProducerAccounts["shrproducera"]
shracc_node1.activePublicKey = cluster.defProducerAccounts["defproducerb"].activePublicKey
setProds(shracc_node1.activePublicKey)
Print("sleep for 4/3 rounds...")
time.sleep(numprod * 6 * 4 / 3)
verifyProductionRounds(trans, node0, prodsActive, 1)
# change signing key of shrproducera that no one can sign
accounts = cluster.createAccountKeys(1)
Print("change producer signing key of shrproducera that none of the node has")
shracc_node1.activePublicKey = accounts[0].activePublicKey
del prodsActive["shrproducera"]
setProds(shracc_node1.activePublicKey)
Print("sleep for 4/3 rounds...")
time.sleep(numprod * 6 * 4 / 3)
verifyProductionRounds(trans, node0, prodsActive, 1)
# change signing key back to node1
Print("change producer signing key of shrproducera so that node1 can produce again")
shracc_node1.activePublicKey = cluster.defProducerAccounts["defproducerb"].activePublicKey
prodsActive["shrproducera"] = True
setProds(shracc_node1.activePublicKey)
tries = numprod * 6 * 4 # give 4 rounds
while tries > 0:
node.infoValid = False
info = node.getInfo()
if node.infoValid and node.lastRetrievedHeadBlockProducer == "shrproducera":
break
time.sleep(1)
tries = tries - 1
if tries == 0:
Utils.errorExit("shrproducera failed to produce")
testSuccessful = True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances,
killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails)
exit(0)
|
<filename>gcsl/__init__.py
import copy
import itertools
import time
from collections import deque
from typing import Any, Callable, List, Literal, Sequence, Tuple, Type, Union
import gym
import numpy as np
import torch
import torch.distributions as D
import torch.optim
import torch.nn
import torch.nn.functional as F
State = Any
Goal = State
Action = int
Horizon = int
SAGHTuple = Tuple[State, Action, Goal, Horizon]
Trajectory = List[SAGHTuple]
PolicyFn = Callable[[State, Goal, Horizon], Action]
GoalSampleFn = Callable[[], Goal]
GoalUpdateFn = Callable[[Goal, Tuple[int, int]], Goal]
GoalRelabelFn = Callable[[SAGHTuple, SAGHTuple], Goal]
def collect_trajectories(
env: gym.Env,
goal_sample_fn: GoalSampleFn,
policy_fn: PolicyFn,
num_episodes: int,
max_steps: int = 50,
) -> List[List[Tuple]]:
"""Collect trajectories by interacting with the environment."""
trajectories = []
for _ in range(num_episodes):
state = env.reset()
goal = goal_sample_fn()
traj = []
for t in range(max_steps):
action = policy_fn(state, goal, t)
new_state, _, done, _ = env.step(action)
traj.append(copy.copy((state, action, goal)))
state = new_state
if done:
break
# Add horizon info
T = len(traj)
horizons = T - np.arange(T) - 1
traj = [t + (h,) for t, h in zip(traj, horizons)]
# Finalize trajectory
trajectories.append(traj)
return trajectories
@torch.no_grad()
def evaluate_policy(
env: gym.Env,
goal_sample_fn: GoalSampleFn,
policy_fn: PolicyFn,
num_episodes: int,
max_steps: int = 50,
render_freq: bool = False,
return_images: bool = False,
goal_dynamics_fn: GoalUpdateFn = None,
) -> Union[Tuple[float, float], Tuple[float, float, List[np.ndarray]]]:
"""Evaluate the policy in the given environment.
Returns average final goal metric and average episode lengths."""
if goal_dynamics_fn is None:
goal_dynamics_fn = lambda g, _: g
goal_metrics = []
all_lengths = []
all_images = []
for e in range(1, num_episodes + 1):
goal = goal_sample_fn()
render = e % render_freq == 0
state = env.reset()
for t in range(max_steps):
goal = goal_dynamics_fn(goal, (t, max_steps))
action = policy_fn(state, goal, t)
state, _, done, _ = env.step(action)
if render:
if return_images:
img = env.render(mode="rgb_array", goal=goal)
all_images.append(img)
else:
env.render(mode="human", goal=goal)
time.sleep(0.5 if done else 0.01)
if done:
print(state[1])
break
if hasattr(env, "goal_metric"):
goal_metrics.append(env.goal_metric(state, goal))
all_lengths.append(t + 1)
if return_images:
return np.mean(goal_metrics), np.mean(all_lengths), all_images
else:
return np.mean(goal_metrics), np.mean(all_lengths)
class ExperienceBuffer:
"""Experience buffer of limited capacity.
This buffers stores (state, action, horizon) tuples of trajectories
in a flat memory layout. Here horizon is time (offset >= 0) to tuple
representing the final trajectory state.
Once the capacity is reached oldest tuples will get discarded first,
leaving potentially partial trajectories in memory. Since we only
required that a future state is available for relabeling, this does
not pose a problem.
"""
def __init__(self, max_experiences) -> None:
self.memory = deque(maxlen=max_experiences)
def insert(self, trajectories: List[Trajectory]):
"""Append experiences. This might remove oldest values
if capacity of the buffer is reached."""
for t in itertools.chain(*trajectories):
self.memory.append(t)
def sample(
self,
num_experiences: int,
goal_relabel_fn: GoalRelabelFn,
max_horizon: int = None,
) -> List[SAGHTuple]:
"""Uniform randomly sample N (state,action,goal,horizon) tuples."""
indices = np.random.choice(len(self.memory), size=num_experiences)
if max_horizon is None:
max_horizon = np.iinfo(int).max
tuples = [self._relabel(idx, goal_relabel_fn, max_horizon) for idx in indices]
return tuples
def __len__(self):
return len(self.memory)
def _relabel(
self, idx: int, goal_relabel_fn: GoalRelabelFn, max_horizon: int
) -> SAGHTuple:
t0 = self.memory[idx]
s, a, _, h = t0
if h > 0:
# If not last element of trajectory, we can sample
# a new horizon, which defines the target tuple.
h = int(np.random.randint(1, min(h + 1, max_horizon)))
t1 = self.memory[idx + h]
# Note, h(t0) >= h(t1)
g = goal_relabel_fn(t0, t1)
return (s, a, g, h)
def sample_buffers(
buffers: Union[ExperienceBuffer, Sequence[ExperienceBuffer]],
num_experiences: int,
goal_relabel_fn: GoalRelabelFn,
max_horizon: int = None,
buf_probs: Sequence[float] = None,
) -> List[SAGHTuple]:
"""Sample experiences from a number of buffers.
This function is particularily useful if multiple buffers are to be sampled
from. In this case, per default, the expected number of experiences sampled
from each buffers is proportional to the buffer length.
"""
if isinstance(buffers, ExperienceBuffer):
buffers = [buffers]
if buf_probs is None:
buf_probs = np.array([len(b) for b in buffers]).astype(float)
buf_probs /= buf_probs.sum()
else:
buf_probs = np.ndarray(buf_probs)
num_samples_per_buffer = np.random.multinomial(num_experiences, buf_probs)
nested_tuples = [
b.sample(n, goal_relabel_fn, max_horizon=max_horizon)
for b, n in zip(buffers, num_samples_per_buffer)
]
return list(itertools.chain(*nested_tuples))
def to_tensor(
tuples: List[SAGHTuple],
) -> Tuple[torch.Tensor, torch.Tensor, torch.LongTensor, torch.IntTensor]:
"""Converts lists of (state,action,goal,horizon) tuples to separate tensors."""
if len(tuples) > 0:
states, actions, goals, horizons = zip(*tuples)
else:
states, actions, goals, horizons = [], [], [], []
states = torch.tensor(states)
goals = torch.tensor(goals)
actions = torch.tensor(actions).long()
horizons = torch.tensor(horizons).int()
return states, actions, goals, horizons
def gcsl_step(
net: torch.nn.Module,
opt: torch.optim.Optimizer,
buffers: Union[ExperienceBuffer, Sequence[ExperienceBuffer]],
relabel_goal_fn: GoalRelabelFn,
) -> torch.Tensor:
"""Performs a single training step in the GCSL regime."""
s, a, g, h = to_tensor(sample_buffers(buffers, 512, relabel_goal_fn))
mask = h > 0 # Only consider samples which are not final states
opt.zero_grad()
logits = net(s[mask], g[mask], h[mask])
loss = F.cross_entropy(logits, a[mask])
loss.backward()
opt.step()
return loss
def make_fc_layers(
infeatures: int,
arch: List[Union[int, Literal["D", "A", "N"]]],
dropout: float = 0.1,
activation: Type[torch.nn.Module] = torch.nn.ReLU,
) -> torch.nn.Sequential:
"""Helper function to create a fully connected network from an architecture description."""
layers = []
last_c = infeatures
for d in arch:
if isinstance(d, int):
layers.append(torch.nn.Linear(last_c, d))
last_c = d
elif d == "A":
layers.append(activation())
elif d == "N":
layers.append(torch.nn.BatchNorm1d(last_c))
else:
layers.append(torch.nn.Dropout(p=dropout))
return torch.nn.Sequential(*layers)
def make_policy_fn(
net: torch.nn.Module, greedy: bool = False, tscaling: float = 0.1
) -> PolicyFn:
"""Creates a default policy function wrapping a torch.nn.Module returning action-logits.
This method will be called for a single (s,a,h) tuple and its components may not be
torch types.
"""
def predict(s: State, g: Goal, h: Horizon):
s = torch.tensor(s).unsqueeze(0)
g = torch.tensor(g).unsqueeze(0)
logits = net(s, g, h)
if greedy:
return torch.argmax(logits).item()
else:
scaled_logits = logits * (1 - tscaling)
return D.Categorical(logits=scaled_logits).sample().item()
return predict
|
<filename>pointnet_model/models/pointnet2_wss_reg.py
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tensorflow as tf
import numpy as np
import tf_util
from pointnet_util import pointnet_sa_module, pointnet_sa_module_msg, pointnet_fp_module
def placeholder_inputs(batch_size, num_point, num_feature):
pointclouds_pl = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, num_point, num_feature))
labels_pl = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, batchnorm=False, bn_decay=None, dropout_rate=0.1):
"""
PointNet++ for TAWSS regression, input is BxNxF, output BxN
"""
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
num_features = point_cloud.get_shape()[2].value
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3]) # point coordinates
if num_features == 3: l0_points = None
else: l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,1]) # scale information
#mid_xyz = {'l0_xyz': l0_xyz}
#mid_points = {'l0_points': l0_points}
# Set Abstraction layers with multi-scale grouping
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 256, [0.1,0.2,0.4], [16,32,64], [[64,128], [128,128], [128,128]], is_training, bn_decay, dropout_rate, scope='layer1', bn=batchnorm)
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 16, [0.2,0.4,0.8], [16,32,64], [[128],[256],[512]], is_training, bn_decay, dropout_rate, scope='layer2', bn=batchnorm)
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3', bn=batchnorm)
#mid_xyz['l2_xyz'] = l2_xyz
#mid_points['l2_points'] = l2_points
#mid_xyz['l1_xyz'] = l1_xyz
#mid_points['l1_points'] = l1_points
#mid_xyz['l3_xyz'] = l3_xyz
#mid_points['l3_points'] = l3_points
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [512], is_training, bn_decay, dropout_rate, scope='fp_layer1', bn=batchnorm)
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256], is_training, bn_decay, dropout_rate, scope='fp_layer2', bn=batchnorm)
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz, l0_points], axis=-1), l1_points, [128], is_training, bn_decay, dropout_rate, scope='fp_layer3', bn=batchnorm)
# Fully Connected layers
net = tf_util.conv1d(l0_points, 128, 1, scope='fc1', padding='VALID', is_training=is_training, bn=batchnorm, bn_decay=bn_decay)
#mid_points['feats'] = net
net = tf_util.dropout(net, rate=dropout_rate, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 1, 1, scope='fc2', padding='VALID', activation_fn=None, bn=False)
return net#, mid_xyz, mid_points
def get_loss(pred, label, loss='l1'):
"""
pred: BxN,
label: BxN
"""
if loss in {'l1', 'mae', 'mean_absolute_error'}:
reg_loss = tf.reduce_mean(tf.abs(tf.squeeze(label) - tf.squeeze(pred)))
elif loss in {'l2', 'mse', 'mean_squared_error'}:
reg_loss = tf.compat.v1.losses.mean_squared_error(labels=tf.squeeze(label), predictions=tf.squeeze(pred))
elif loss in {'huber'}:
reg_loss = tf.compat.v1.losses.huber_loss(labels=tf.squeeze(label), predictions=tf.squeeze(pred), delta=0.5)
else: raise NotImplementedError('Unknown loss %s.' % str(loss))
tf.compat.v1.summary.scalar(loss + ' loss', reg_loss)
tf.compat.v1.add_to_collection('losses', reg_loss)
return reg_loss
|
import RabinKarp
import plotly.plotly as py
import plotly.graph_objs as go
import time
py.sign_in(username='aafham', api_key='<KEY>')
start = time.time()
malaysiaIO = open('news/text/Kuala Lumpur.txt', 'r', encoding='utf-8')
malaysia_text = malaysiaIO.read().lower()
malaysia_text = malaysia_text.replace("\n", " ")
malaysiaIO.close()
jakartaIO = open('news/text/Jakarta.txt', 'r', encoding='utf-8')
jakarta_text = jakartaIO.read().lower()
jakartaIO.close()
manilaIO = open('news/text/Manila.txt', 'r', encoding='utf-8')
manila_text = manilaIO.read().lower()
manilaIO.close()
dhakaIO = open('news/text/Dhaka.txt', 'r', encoding='utf-8')
dhaka_text = dhakaIO.read().lower()
dhakaIO.close()
bandar_seri_begawanIO = open('news/text/Bandar Seri Begawan.txt', 'r', encoding='utf-8')
bandar_seri_begawan_text = bandar_seri_begawanIO.read().lower()
bandar_seri_begawanIO.close()
shanghaiIO = open('news/text/Shanghai.txt', 'r', encoding='utf-8')
shanghai_text = shanghaiIO.read().lower()
shanghaiIO.close()
tokyoIO = open('news/text/Tokyo.txt', 'r', encoding='utf-8')
tokyo_text = tokyoIO.read().lower()
tokyoIO.close()
positive_word = open('wordlist/positivewords.txt', 'r', encoding='utf-8')
positive_text = positive_word.read().lower().split('\n')
negative_word = open('wordlist/negativewords.txt', 'r', encoding='utf-8')
negative_text = negative_word.read().lower().split('\n')
# getting the frequency of positive, negative and neutral words in a text
def wordcount(text):
total_length = len(text.split())
count = 0
positive = 0
negative = 0
for pat in positive_text:
pat = pat.replace(" ", "")
if RabinKarp.rabin_karp_matcher(pat, text):
positive = positive + 1
count = count + 1
for pat in negative_text:
pat = pat.replace(" ", "")
if RabinKarp.rabin_karp_matcher(pat, text):
negative = negative + 1
count = count + 1
# neutral word is equal to the total words in text minus the total count
# of words that is positive or negative
neutral = total_length - count
return positive, negative, neutral
kl_pos, kl_neg, kl_neutral = wordcount(malaysia_text)
dhaka_pos, dhaka_neg, dhaka_neutral = wordcount(dhaka_text)
jakarta_pos, jakarta_neg, jakarta_neutral = wordcount(jakarta_text)
bsb_pos, bsb_neg, bsb_neutral = wordcount(bandar_seri_begawan_text)
manila_pos, manila_neg, manila_neutral = wordcount(manila_text)
shanghai_pos, shanghai_neg, shanghai_neutral = wordcount(shanghai_text)
tokyo_pos, tokyo_neg, tokyo_neutral = wordcount(tokyo_text)
print("\nKuala Lumpur word count")
print("Positive word: " + str(kl_pos) + " word(s)")
print("Negative word: " + str(kl_neg) + " word(s)")
print("Neutral word: " + str(kl_neutral) + " word(s)")
print("\nDhaka word count")
print("Positive word: " + str(dhaka_pos) + " word(s)")
print("Negative word: " + str(dhaka_neg) + " word(s)")
print("Neutral word: " + str(dhaka_neutral) + " word(s)")
print("\nJakarta word count")
print("Positive word: " + str(jakarta_pos) + " word(s)")
print("Negative word: " + str(jakarta_neg) + " word(s)")
print("Neutral word: " + str(jakarta_neutral) + " word(s)")
print("\n<NAME>i Begawan word count")
print("Positive word: " + str(bsb_pos) + " word(s)")
print("Negative word: " + str(bsb_neg) + " word(s)")
print("Neutral word: " + str(bsb_neutral) + " word(s)")
print("\nManila word count")
print("Positive word: " + str(manila_pos) + " word(s)")
print("Negative word: " + str(manila_neg) + " word(s)")
print("Neutral word: " + str(manila_neutral) + " word(s)")
print("\nShanghai word count")
print("Positive word: " + str(shanghai_pos) + " word(s)")
print("Negative word: " + str(shanghai_neg) + " word(s)")
print("Neutral word: " + str(shanghai_neutral) + " word(s)")
print("\nTokyo word count")
print("Positive word: " + str(tokyo_pos) + " word(s)")
print("Negative word: " + str(tokyo_neg) + " word(s)")
print("Neutral word: " + str(tokyo_neutral) + " word(s)")
x = ["<NAME>", "Dhaka", "Jakarta", "<NAME>", "Manila", "Shanghai", "Tokyo"]
positive_y = [kl_pos, dhaka_pos, jakarta_pos, bsb_pos, manila_pos, shanghai_pos, tokyo_pos]
negative_y = [kl_neg, dhaka_neg, jakarta_neg, bsb_neg, manila_neg, shanghai_neg, tokyo_neg]
neutral_y = [kl_neutral, dhaka_neutral, jakarta_neutral, bsb_neutral, manila_neutral,
shanghai_neutral, tokyo_neutral]
# Graph
data = [
go.Histogram(
histfunc="sum",
y=positive_y,
x=x,
name="Positive words"
),
go.Histogram(
histfunc="sum",
y=negative_y,
x=x,
name="Negative words"
),
go.Histogram(
histfunc="sum",
y=neutral_y,
x=x,
name="Neutral words"
)
]
layout = go.Layout(
title=go.layout.Title(
text="Positive, Negative & Neutral Words",
xref='paper',
x=0
)
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='Positive & Negative Word Count')
# sent_value = ['Positive Word', 'Negative Word', 'Neutral Word']
#
# data = {'x': sent_value, 'y': [malaysia_pos, malaysia_neg, malaysia_neutral
# ], 'type': 'bar'}
# # data = {'x': sent_value, 'y': [positive_frequency, negative_frequency, neutral_frequency], 'type': 'bar'}
#
# layout = {'title': 'The Frequency of Positive and Negative Word',
# 'autosize': False,
# 'width': 800,
# 'height': 700,
# 'yaxis': {'title': 'Frequency of Word'},
# 'xaxis': {'title': 'Type of Word'}}
#
# py.plot([data], layout=layout)
# Sentiment & Conclusion
def sentiment(positive_frequency, negative_frequency, city):
print("\n" + city.upper())
if positive_frequency > negative_frequency:
print('The article is giving positive sentiment')
print('So the country has positive political situation')
elif negative_frequency > positive_frequency:
print('The article is giving negative sentiment')
print('So the country has negative political situation')
sentiment(kl_pos, kl_neg, "kuala lumpur")
sentiment(dhaka_pos, dhaka_neg, "dhaka")
sentiment(jakarta_pos, jakarta_neg, "jakarta")
sentiment(bsb_pos, bsb_neg, "bandar seri begawan")
sentiment(manila_pos, manila_neg, "manila")
sentiment(shanghai_pos, shanghai_neg, "shanghai")
sentiment(tokyo_pos, tokyo_neg, "tokyo")
end = time.time() - start
print("Total running time: " + str(end) + "s")
|
<gh_stars>10-100
"""Tests for the views of the ``payslip`` app."""
from django.test import TestCase
from django.utils import timezone
from django_libs.tests.mixins import ViewRequestFactoryTestMixin
from mixer.backend.django import mixer
from .. import views
class DashboardViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the TemplateView ``DashboardView``."""
view_class = views.DashboardView
def setUp(self):
self.user = mixer.blend('auth.User')
def test_view(self):
self.is_not_callable(user=self.user)
self.user.is_staff = True
self.user.save()
self.is_callable(user=self.user)
class CompanyCreateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the CreateView ``CompanyCreateView``."""
view_class = views.CompanyCreateView
def setUp(self):
self.user = mixer.blend('auth.User', is_staff=True)
def test_view(self):
self.is_callable(user=self.user)
self.is_postable(data={'name': 'Foo'}, user=self.user,
to_url_name='payslip_dashboard')
class CompanyUpdateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the UpdateView ``CompanyUpdateView``."""
view_class = views.CompanyUpdateView
def setUp(self):
self.user = mixer.blend('auth.User')
self.user.is_staff = True
self.user.save()
self.company = mixer.blend('payslip.Company')
def get_view_kwargs(self):
return {'pk': self.company.pk}
def test_view(self):
self.is_callable(user=self.user)
class CompanyDeleteViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the DeleteView ``CompanyDeleteView``."""
view_class = views.CompanyDeleteView
def setUp(self):
self.user = mixer.blend('auth.User')
self.company = mixer.blend('payslip.Company')
def get_view_kwargs(self):
return {'pk': self.company.pk}
def test_view(self):
self.is_not_callable(user=self.user)
self.user.is_staff = True
self.user.save()
self.is_callable(user=self.user)
self.is_postable(data={'delete': True}, user=self.user,
to_url_name='payslip_dashboard')
class EmployeeCreateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the CreateView ``EmployeeCreateView``."""
view_class = views.EmployeeCreateView
def setUp(self):
self.manager = mixer.blend('payslip.Employee', is_manager=True)
def test_view(self):
self.is_callable(user=self.manager.user)
data = {
'first_name': 'Foo',
'last_name': 'Bar',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'retype_password': '<PASSWORD>',
'title': '1',
}
self.is_postable(data=data, user=self.manager.user,
to_url_name='payslip_dashboard')
class EmployeeUpdateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the UpdateView ``EmployeeUpdateView``."""
view_class = views.EmployeeUpdateView
def setUp(self):
self.manager = mixer.blend('payslip.Employee', is_manager=True)
self.employee = mixer.blend('payslip.Employee',
company=self.manager.company)
self.staff = mixer.blend('auth.User', is_staff=True)
extra_field_type = mixer.blend('payslip.ExtraFieldType')
extra_field_type2 = mixer.blend('payslip.ExtraFieldType',
name='Tax Class')
extra_field_type3 = mixer.blend('payslip.ExtraFieldType',
name='Health', fixed_values=False)
mixer.blend('payslip.ExtraFieldType', name='Religion',
fixed_values=False)
extra_field = mixer.blend('payslip.ExtraField',
field_type=extra_field_type)
self.employee.extra_fields.add(extra_field)
extra_field2 = mixer.blend('payslip.ExtraField',
field_type=extra_field_type2, value='II')
self.employee.extra_fields.add(extra_field2)
extra_field3 = mixer.blend('payslip.ExtraField',
field_type=extra_field_type3, value='yes')
self.employee.extra_fields.add(extra_field3)
def get_view_kwargs(self):
return {'pk': self.employee.pk}
def test_view(self):
self.is_callable(user=self.manager.user)
self.is_not_callable(user=self.employee.user)
self.is_callable(user=self.staff)
data = {
'first_name': 'Foo',
'last_name': 'Bar',
'email': '{0}'.format(self.employee.user.email),
'title': '1',
'Tax Class': 'II',
'Health': 'no',
'Religion': 'None',
}
self.is_postable(data=data, user=self.manager.user,
to_url_name='payslip_dashboard')
class EmployeeDeleteViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the DeleteView ``EmployeeDeleteView``."""
view_class = views.EmployeeDeleteView
def setUp(self):
self.manager = mixer.blend('payslip.Employee', is_manager=True)
self.employee = mixer.blend('payslip.Employee',
company=self.manager.company)
def get_view_kwargs(self):
return {'pk': self.employee.pk}
def test_view(self):
self.is_callable(user=self.manager.user)
self.is_postable(data={'delete': True}, user=self.manager.user,
to_url_name='payslip_dashboard')
class ExtraFieldCreateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the CreateView ``ExtraFieldCreateView``."""
view_class = views.ExtraFieldCreateView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
self.extra_field_type = mixer.blend('payslip.ExtraFieldType',
fixed_values=True)
def test_view(self):
self.is_callable(user=self.staff)
data = {
'field_type': self.extra_field_type.id,
'value': 'Bar',
}
self.is_postable(data=data, user=self.staff,
to_url_name='payslip_dashboard')
class ExtraFieldUpdateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the UpdateView ``ExtraFieldUpdateView``."""
view_class = views.ExtraFieldUpdateView
def setUp(self):
self.extra_field = mixer.blend('payslip.ExtraField')
self.staff = mixer.blend('auth.User', is_staff=True)
def get_view_kwargs(self):
return {'pk': self.extra_field.pk}
def test_view(self):
self.is_callable(user=self.staff)
class ExtraFieldDeleteViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the DeleteView ``ExtraFieldDeleteView``."""
view_class = views.ExtraFieldDeleteView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
self.extra_field = mixer.blend('payslip.ExtraField')
def get_view_kwargs(self):
return {'pk': self.extra_field.pk}
def test_view(self):
self.is_callable(user=self.staff)
self.is_postable(data={'delete': True}, user=self.staff,
to_url_name='payslip_dashboard')
class ExtraFieldTypeCreateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the CreateView ``ExtraFieldTypeCreateView``."""
view_class = views.ExtraFieldTypeCreateView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
def test_view(self):
self.is_callable(user=self.staff)
self.is_postable(data={'name': 'Bar'}, user=self.staff,
to_url_name='payslip_dashboard')
class ExtraFieldTypeUpdateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the UpdateView ``ExtraFieldTypeUpdateView``."""
view_class = views.ExtraFieldTypeUpdateView
def setUp(self):
self.extra_field_type = mixer.blend('payslip.ExtraFieldType')
self.staff = mixer.blend('auth.User', is_staff=True)
def get_view_kwargs(self):
return {'pk': self.extra_field_type.pk}
def test_view(self):
self.is_callable(user=self.staff)
class ExtraFieldTypeDeleteViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the DeleteView ``ExtraFieldTypeDeleteView``."""
view_class = views.ExtraFieldTypeDeleteView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
self.extra_field_type = mixer.blend('payslip.ExtraFieldType')
def get_view_kwargs(self):
return {'pk': self.extra_field_type.pk}
def test_view(self):
self.is_callable(user=self.staff)
self.is_postable(data={'delete': True}, user=self.staff,
to_url_name='payslip_dashboard')
class PaymentCreateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the CreateView ``PaymentCreateView``."""
view_class = views.PaymentCreateView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
self.payment_type = mixer.blend('payslip.PaymentType')
self.employee = mixer.blend('payslip.Employee')
def test_view(self):
self.is_callable(user=self.staff)
data = {
'payment_type': self.payment_type.id,
'employee': self.employee.id,
'amount': '1001.00',
'date': '2013-01-08 09:35:18',
}
self.is_postable(data=data, user=self.staff,
to_url_name='payslip_dashboard')
class PaymentUpdateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the UpdateView ``PaymentUpdateView``."""
view_class = views.PaymentUpdateView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
self.payment = mixer.blend('payslip.Payment')
self.employee = mixer.blend('payslip.Employee')
def get_view_kwargs(self):
return {'pk': self.payment.pk}
def test_view(self):
self.is_callable(user=self.staff)
data = {
'payment_type': self.payment.payment_type.id,
'employee': self.employee.id,
'amount': '1001.00',
'date': '2013-01-08 09:35:18',
}
self.is_postable(data=data, user=self.staff,
to_url_name='payslip_dashboard')
class PaymentDeleteViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the DeleteView ``PaymentDeleteView``."""
view_class = views.PaymentDeleteView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
self.payment = mixer.blend('payslip.Payment')
def get_view_kwargs(self):
return {'pk': self.payment.pk}
def test_view(self):
self.is_callable(user=self.staff)
self.is_postable(data={'delete': True}, user=self.staff,
to_url_name='payslip_dashboard')
class PaymentTypeCreateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the CreateView ``PaymentTypeCreateView``."""
view_class = views.PaymentTypeCreateView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
def test_view(self):
self.is_callable(user=self.staff)
self.is_postable(data={'name': 'Bar'}, user=self.staff,
to_url_name='payslip_dashboard')
class PaymentTypeUpdateViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the UpdateView ``PaymentTypeUpdateView``."""
view_class = views.PaymentTypeUpdateView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
self.payment_type = mixer.blend('payslip.PaymentType')
def get_view_kwargs(self):
return {'pk': self.payment_type.pk}
def test_view(self):
self.is_callable(user=self.staff)
self.is_postable(data={'name': 'Bar'}, user=self.staff,
to_url_name='payslip_dashboard')
class PaymentTypeDeleteViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the DeleteView ``PaymentTypeDeleteView``."""
view_class = views.PaymentTypeDeleteView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
self.payment_type = mixer.blend('payslip.PaymentType')
def get_view_kwargs(self):
return {'pk': self.payment_type.pk}
def test_view(self):
self.is_callable(user=self.staff)
self.is_postable(data={'delete': True}, user=self.staff,
to_url_name='payslip_dashboard')
class PayslipGeneratorViewTestCase(ViewRequestFactoryTestMixin, TestCase):
"""Tests for the FormView ``PayslipGeneratorView``."""
view_class = views.PayslipGeneratorView
def setUp(self):
self.staff = mixer.blend('auth.User', is_staff=True)
self.manager = mixer.blend('payslip.Employee', is_manager=True)
# Fixtures to test all context functions
self.payment = mixer.blend('payslip.Payment',
payment_type__rrule='MONTHLY')
self.employee = self.payment.employee
self.employee2 = mixer.blend('payslip.Employee',
company=self.manager.company)
mixer.blend('payslip.Payment', payment_type__rrule='MONTHLY',
employee=self.employee,
date=timezone.now() - timezone.timedelta(days=365))
mixer.blend('payslip.Payment', payment_type__rrule='MONTHLY',
employee=self.employee, amount=-100,
end_date=timezone.now() - timezone.timedelta(days=1))
def test_view(self):
self.is_callable(user=self.staff)
data = {
'employee': self.employee.id,
'year': timezone.now().year,
'month': timezone.now().month,
}
self.is_postable(data=data, user=self.staff, ajax=True)
data.update({'employee': self.employee2.id})
self.is_postable(data=data, user=self.staff, ajax=True)
data.update({'download': True})
self.is_postable(data=data, user=self.manager.user, ajax=True)
|
from copy import deepcopy
import os
import numpy as np
from datetime import datetime, timedelta
from snowav.utils.wyhr import calculate_wyhr_from_date
from snowav.utils.OutputReader import iSnobalReader
import netCDF4 as nc
def outputs(run_dirs, wy, properties, start_date = None,
end_date = None, flight_dates = None, loglevel = None):
'''
This uses start_date and end_date to load the snow.nc and em.nc of interest
within a report period to the outputs format that will be used in process().
Also returns a clipped run_dirs that only contains paths with the specified
date range. If start_date and end_date are not supplied, no run_dirs will
be clipped.
Note: this is assuming awsm_daily output folders and dates in the snow.nc
file.
Args
-----
run_dirs : list
list of run directories
start_date : datetime
report period start date (optional)
end_date : datetime
report period end date (optional)
wy : int
water year
flight_dates : array
(optional)
Returns
------
results : dict
'''
dirs = deepcopy(run_dirs)
snowbands = []
embands = []
log = []
rdict = {}
outputs = {'dates': [], 'time': []}
bands_map = {'snow':{'depth': 0,
'density': 1,
'swe_z': 2,
'lwc': 3,
'temp_surface': 4,
'temp_lower': 5,
'temp_bulk': 6,
'depth_lower_layer': 7,
'h20_sat': 8},
'em':{'R_n': 0,
'H': 1,
'L_v_E': 2,
'G': 3,
'M': 4,
'delta_Q': 5,
'evap_z': 6,
'melt': 7,
'swi_z': 8,
'coldcont': 9}}
for band in properties:
if band in [*bands_map['snow'].keys()]:
snowbands.append(bands_map['snow'][band])
if band in [*bands_map['em'].keys()]:
embands.append(bands_map['em'][band])
outputs[band] = []
start = deepcopy(start_date)
end = deepcopy(end_date)
# Run this with standard processing, and forecast processing
if flight_dates is None:
for path in dirs:
snowfile = os.path.join(path, 'snow.nc')
if loglevel == 'DEBUG':
log.append(' Reading date: {}'.format(snowfile))
# Consider making this a warning, with an else: .remove(path)
# to catch other files that are in these directories
if not os.path.isfile(snowfile):
log.append(' {} not a valid file'.format(snowfile))
print(' {} not a valid file, snowav may '
'error...'.format(snowfile))
run_dirs.remove(path)
results = {'outputs': outputs,
'dirs': dirs,
'run_dirs': run_dirs,
'rdict': rdict,
'log': log}
return results
ncf = nc.Dataset(snowfile)
# Catch 'empty' snow.nc and em.nc file from certain awsm crash
# scenarios in awsm<=0.10.0
if 'specific_mass' not in ncf.variables:
log.append(' No "specific_mass" variable in {}, this may be the result '
'of awsm crashing without writing variables to file, '
'consider deleting and re-running awsm'.format(snowfile))
raise Exception(' No "specific_mass" variable in {}'.format(snowfile))
ta = nc.num2date(ncf.variables['time'][:],ncf.variables['time'].units)
ncf.close()
ta = np.sort(ta)
if start_date is None:
start = deepcopy(ta[0])
if end_date is None:
end = deepcopy(ta[-1])
for idx,t in enumerate(ta):
# Only load the rundirs that we need
if (t.date() >= start.date()) and (t.date() <= end.date()):
log.append(' Loading: {}'.format(snowfile))
st_hr = calculate_wyhr_from_date(start)
en_hr = calculate_wyhr_from_date(end)
output = iSnobalReader(path, snowbands = snowbands,
embands = embands, wy = wy,
time_start = st_hr, time_end = en_hr)
# Make a dict for wyhr-rundir lookup
for ot in output.time:
rdict[int(ot)] = path
# Apply snow bands to outputs
for band in properties:
if band in [*bands_map['snow'].keys()]:
s = bands_map['snow'][band]
outputs[band].append(output.snow_data[s][idx,:,:])
if band in [*bands_map['em'].keys()]:
e = bands_map['em'][band]
outputs[band].append(output.em_data[e][idx,:,:])
outputs['dates'].append(output.dates[idx])
outputs['time'].append(output.time[idx])
else:
run_dirs.remove(path)
# Run this when flight updates are present to make custom outputs
else:
for path in dirs:
snowfile = os.path.join(path, 'snow.nc')
# If the run_dirs isn't empty use it, otherwise remove
if not os.path.isfile(snowfile):
raise Exception('{} not a valid file'.format(snowfile))
ncf = nc.Dataset(snowfile)
ta = nc.num2date(ncf.variables['time'][:],ncf.variables['time'].units)
ncf.close()
for idx,t in enumerate(ta):
if (t.date() in [x.date() for x in flight_dates]):
output = iSnobalReader(path, snowbands=[0,1,2], wy=wy)
for ot in output.time:
rdict[int(ot)] = path
outputs['swe_z'].append(output.snow_data[2][idx,:,:])
outputs['depth'].append(output.snow_data[0][idx,:,:])
outputs['density'].append(output.snow_data[1][idx,:,:])
outputs['dates'].append(output.dates[idx])
outputs['time'].append(output.time[idx])
results = {'outputs': outputs,
'dirs': dirs,
'run_dirs': run_dirs,
'rdict': rdict,
'log': log}
return results
|
import tensorflow as tf
bn_axis = -1
initializer = 'glorot_normal'
def residual_unit(inputs, num_filter, stride, dim_match, name):
bn_axis = -1
initializer = 'glorot_normal'
x = tf.keras.layers.BatchNormalization(axis = bn_axis,
scale = True,
momentum = 0.9,
epsilon = 2e-5,
gamma_regularizer=tf.keras.regularizers.l2(l=5e-4),
name=name + '_bn1')(inputs)
x = tf.keras.layers.ZeroPadding2D(padding=(1, 1),
name=name + '_conv1_pad')(x)
x = tf.keras.layers.Conv2D(num_filter,
(3, 3),
strides=(1, 1),
padding='valid',
kernel_initializer=initializer,
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(l=5e-4),
name=name + '_conv1')(x)
x = tf.keras.layers.BatchNormalization(axis = bn_axis,
scale = True,
momentum = 0.9,
epsilon = 2e-5,
gamma_regularizer=tf.keras.regularizers.l2(l=5e-4),
name=name + '_bn2')(x)
x = tf.keras.layers.PReLU(name=name + '_relu1',
alpha_regularizer=tf.keras.regularizers.l2(l = 5e-4))(x)
x = tf.keras.layers.ZeroPadding2D(padding=(1, 1),
name=name + '_conv2_pad')(x)
x = tf.keras.layers.Conv2D(num_filter,
(3, 3),
strides=stride,
padding='valid',
kernel_initializer=initializer,
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(l=5e-4),
name=name + '_conv2')(x)
x = tf.keras.layers.BatchNormalization(axis = bn_axis,
scale = True,
momentum = 0.9,
epsilon = 2e-5,
gamma_regularizer=tf.keras.regularizers.l2(l=5e-4),
name=name + '_bn3')(x)
if(dim_match):
shortcut = inputs
else:
shortcut = tf.keras.layers.Conv2D(num_filter,
(1,1),
strides=stride,
padding='valid',
kernel_initializer=initializer,
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(l=5e-4),
name=name + '_conv1sc')(inputs)
shortcut = tf.keras.layers.BatchNormalization(axis = bn_axis,
scale = True,
momentum = 0.9,
epsilon=2e-5,
gamma_regularizer=tf.keras.regularizers.l2(l=5e-4),
name=name + '_sc')(shortcut)
return x + shortcut
def head(input_shape = [112, 112, 3]):
img_input = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.ZeroPadding2D(padding=(1, 1),
name='conv0_pad')(img_input)
x = tf.keras.layers.Conv2D(64,
(3, 3),
strides=(1, 1),
padding='valid',
kernel_initializer=initializer,
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(l=5e-4),
name='conv0')(x)
x = tf.keras.layers.BatchNormalization(axis=bn_axis,
scale=True,
momentum=0.9,
epsilon=2e-5,
gamma_regularizer=tf.keras.regularizers.l2(l=5e-4),
name='bn0')(x)
x = tf.keras.layers.PReLU(name = 'prelu0',
alpha_regularizer = tf.keras.regularizers.l2(l = 5e-4))(x)
return img_input, x
def out_layer(inputs, out_size = 512):
x = tf.keras.layers.BatchNormalization(axis = bn_axis,
scale = True,
momentum = 0.9,
epsilon = 2e-5,
gamma_regularizer = tf.keras.regularizers.l2(l=5e-4),
name = 'bn1')(inputs)
x = tf.keras.layers.Dropout(0.4)(x)
resnet_shape = inputs.shape
x = tf.keras.layers.Reshape([resnet_shape[1]*resnet_shape[2]*resnet_shape[3]], name = 'reshapelayer')(x)
x = tf.keras.layers.Dense(out_size,
name = 'E_denseLayer',
kernel_initializer = initializer,
kernel_regularizer = tf.keras.regularizers.l2(l = 5e-4),
bias_regularizer = tf.keras.regularizers.l2(l=5e-4))(x)
x = tf.keras.layers.BatchNormalization(axis = bn_axis,
scale = False,
momentum = 0.9,
epsilon = 2e-5,
#gamma_regularizer = tf.keras.regularizers.l2(l=5e-4),
name = 'fc1')(x)
return x
def body_resnet(x, no_layers = 34):
num_stage = 4
if no_layers == 10:
units = [1, 1, 1, 1]
elif no_layers == 18:
units = [2, 2, 2, 2]
elif no_layers == 34:
units = [3, 4, 6, 3]
elif no_layers == 50:
units = [3, 4, 6, 3]
elif no_layers == 101:
units = [3, 4, 23, 3]
elif no_layers == 152:
units = [3, 8, 36, 3]
elif no_layers == 200 :
units = [3, 24, 36, 3]
filter_list = [64, 64, 128, 256, 512]
for i in range(num_stage):
x = residual_unit(x, filter_list[i+1], (2,2), False, name = 'stage%d_unit%d' %(i+1, 1))
for j in range(units[i] - 1):
x = residual_unit(x, filter_list[i + 1], (1,1), True, name= 'stage%d_unit%d' %(i + 1, j +2))
return x
def ResNet(no_layers = 34):
inputs, x = head()
x = body_resnet(x, no_layers = no_layers)
out = out_layer(x)
model = tf.keras.models.Model(inputs, out, name = 'resnet50')
model.trainable = True
for i in range(len(model.layers)):
model.layers[i].trainable = True
return model |
import py, sys
from pypy.objspace.std.model import registerimplementation, W_Object
from pypy.objspace.std.register_all import register_all
from pypy.objspace.std.settype import set_typedef as settypedef
from pypy.objspace.std.frozensettype import frozenset_typedef as frozensettypedef
from pypy.interpreter import gateway
from pypy.interpreter.argument import Signature
from pypy.interpreter.error import OperationError, operationerrfmt
from pypy.rlib.objectmodel import r_dict, we_are_translated, specialize
from pypy.rlib.debug import mark_dict_non_null
from pypy.rlib import rerased
def _is_str(space, w_key):
return space.is_w(space.type(w_key), space.w_str)
def _never_equal_to_string(space, w_lookup_type):
""" Handles the case of a non string key lookup.
Types that have a sane hash/eq function should allow us to return True
directly to signal that the key is not in the dict in any case.
XXX The types should provide such a flag. """
# XXX there are many more types
return (space.is_w(w_lookup_type, space.w_NoneType) or
space.is_w(w_lookup_type, space.w_int) or
space.is_w(w_lookup_type, space.w_bool) or
space.is_w(w_lookup_type, space.w_float)
)
class W_DictMultiObject(W_Object):
from pypy.objspace.std.dicttype import dict_typedef as typedef
@staticmethod
def allocate_and_init_instance(space, w_type=None, module=False,
instance=False, strdict=False, kwargs=False):
if space.config.objspace.std.withcelldict and module:
from pypy.objspace.std.celldict import ModuleDictStrategy
assert w_type is None
# every module needs its own strategy, because the strategy stores
# the version tag
strategy = ModuleDictStrategy(space)
elif instance or strdict or module:
assert w_type is None
strategy = space.fromcache(StringDictStrategy)
elif kwargs:
assert w_type is None
from pypy.objspace.std.kwargsdict import KwargsDictStrategy
strategy = space.fromcache(KwargsDictStrategy)
else:
strategy = space.fromcache(EmptyDictStrategy)
if w_type is None:
w_type = space.w_dict
storage = strategy.get_empty_storage()
w_self = space.allocate_instance(W_DictMultiObject, w_type)
W_DictMultiObject.__init__(w_self, space, strategy, storage)
return w_self
def __init__(self, space, strategy, storage):
self.space = space
self.strategy = strategy
self.dstorage = storage
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%s)" % (w_self.__class__.__name__, w_self.strategy)
def unwrap(w_dict, space):
result = {}
items = w_dict.items()
for w_pair in items:
key, val = space.unwrap(w_pair)
result[key] = val
return result
def missing_method(w_dict, space, w_key):
if not space.is_w(space.type(w_dict), space.w_dict):
w_missing = space.lookup(w_dict, "__missing__")
if w_missing is None:
return None
return space.get_and_call_function(w_missing, w_dict, w_key)
else:
return None
def initialize_content(w_self, list_pairs_w):
for w_k, w_v in list_pairs_w:
w_self.setitem(w_k, w_v)
def _add_indirections():
dict_methods = "setitem setitem_str getitem \
getitem_str delitem length \
clear w_keys values \
items iter setdefault \
popitem listview_str listview_int \
view_as_kwargs".split()
def make_method(method):
def f(self, *args):
return getattr(self.strategy, method)(self, *args)
f.func_name = method
return f
for method in dict_methods:
setattr(W_DictMultiObject, method, make_method(method))
_add_indirections()
class DictStrategy(object):
def __init__(self, space):
self.space = space
def get_empty_storage(self):
raise NotImplementedError
def w_keys(self, w_dict):
iterator = self.iter(w_dict)
result = []
while 1:
w_key, w_value = iterator.next()
if w_key is not None:
result.append(w_key)
else:
return self.space.newlist(result)
def values(self, w_dict):
iterator = self.iter(w_dict)
result = []
while 1:
w_key, w_value = iterator.next()
if w_value is not None:
result.append(w_value)
else:
return result
def items(self, w_dict):
iterator = self.iter(w_dict)
result = []
while 1:
w_key, w_value = iterator.next()
if w_key is not None:
result.append(self.space.newtuple([w_key, w_value]))
else:
return result
def popitem(self, w_dict):
# this is a bad implementation: if we call popitem() repeatedly,
# it ends up taking n**2 time, because the next() calls below
# will take longer and longer. But all interesting strategies
# provide a better one.
space = self.space
iterator = self.iter(w_dict)
w_key, w_value = iterator.next()
self.delitem(w_dict, w_key)
return (w_key, w_value)
def clear(self, w_dict):
strategy = self.space.fromcache(EmptyDictStrategy)
storage = strategy.get_empty_storage()
w_dict.strategy = strategy
w_dict.dstorage = storage
def listview_str(self, w_dict):
return None
def listview_int(self, w_dict):
return None
def view_as_kwargs(self, w_dict):
return (None, None)
class EmptyDictStrategy(DictStrategy):
erase, unerase = rerased.new_erasing_pair("empty")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def get_empty_storage(self):
return self.erase(None)
def switch_to_correct_strategy(self, w_dict, w_key):
withidentitydict = self.space.config.objspace.std.withidentitydict
if type(w_key) is self.space.StringObjectCls:
self.switch_to_string_strategy(w_dict)
return
w_type = self.space.type(w_key)
if self.space.is_w(w_type, self.space.w_int):
self.switch_to_int_strategy(w_dict)
elif withidentitydict and w_type.compares_by_identity():
self.switch_to_identity_strategy(w_dict)
else:
self.switch_to_object_strategy(w_dict)
def switch_to_string_strategy(self, w_dict):
strategy = self.space.fromcache(StringDictStrategy)
storage = strategy.get_empty_storage()
w_dict.strategy = strategy
w_dict.dstorage = storage
def switch_to_int_strategy(self, w_dict):
strategy = self.space.fromcache(IntDictStrategy)
storage = strategy.get_empty_storage()
w_dict.strategy = strategy
w_dict.dstorage = storage
def switch_to_identity_strategy(self, w_dict):
from pypy.objspace.std.identitydict import IdentityDictStrategy
strategy = self.space.fromcache(IdentityDictStrategy)
storage = strategy.get_empty_storage()
w_dict.strategy = strategy
w_dict.dstorage = storage
def switch_to_object_strategy(self, w_dict):
strategy = self.space.fromcache(ObjectDictStrategy)
storage = strategy.get_empty_storage()
w_dict.strategy = strategy
w_dict.dstorage = storage
def getitem(self, w_dict, w_key):
#return w_value or None
# in case the key is unhashable, try to hash it
self.space.hash(w_key)
# return None anyway
return None
def getitem_str(self, w_dict, key):
#return w_value or None
return None
def setdefault(self, w_dict, w_key, w_default):
# here the dict is always empty
self.switch_to_correct_strategy(w_dict, w_key)
w_dict.setitem(w_key, w_default)
return w_default
def setitem(self, w_dict, w_key, w_value):
self.switch_to_correct_strategy(w_dict, w_key)
w_dict.setitem(w_key, w_value)
def setitem_str(self, w_dict, key, w_value):
self.switch_to_string_strategy(w_dict)
w_dict.setitem_str(key, w_value)
def delitem(self, w_dict, w_key):
# in case the key is unhashable, try to hash it
self.space.hash(w_key)
raise KeyError
def length(self, w_dict):
return 0
def iter(self, w_dict):
return EmptyIteratorImplementation(self.space, self, w_dict)
def clear(self, w_dict):
return
def popitem(self, w_dict):
raise KeyError
def view_as_kwargs(self, w_dict):
return ([], [])
registerimplementation(W_DictMultiObject)
# DictImplementation lattice
# XXX fix me
# Iterator Implementation base classes
class IteratorImplementation(object):
def __init__(self, space, strategy, implementation):
self.space = space
self.strategy = strategy
self.dictimplementation = implementation
self.len = implementation.length()
self.pos = 0
def next(self):
if self.dictimplementation is None:
return None, None
if self.len != self.dictimplementation.length():
self.len = -1 # Make this error state sticky
raise OperationError(self.space.w_RuntimeError,
self.space.wrap("dictionary changed size during iteration"))
# look for the next entry
if self.pos < self.len:
result = self.next_entry()
self.pos += 1
if self.strategy is self.dictimplementation.strategy:
return result # common case
else:
# waaa, obscure case: the strategy changed, but not the
# length of the dict. The (key, value) pair in 'result'
# might be out-of-date. We try to explicitly look up
# the key in the dict.
w_key = result[0]
w_value = self.dictimplementation.getitem(w_key)
if w_value is None:
self.len = -1 # Make this error state sticky
raise OperationError(self.space.w_RuntimeError,
self.space.wrap("dictionary changed during iteration"))
return (w_key, w_value)
# no more entries
self.dictimplementation = None
return None, None
def next_entry(self):
""" Purely abstract method
"""
raise NotImplementedError
def length(self):
if self.dictimplementation is not None:
return self.len - self.pos
return 0
class EmptyIteratorImplementation(IteratorImplementation):
def next(self):
return (None, None)
# concrete subclasses of the above
class AbstractTypedStrategy(object):
_mixin_ = True
@staticmethod
def erase(storage):
raise NotImplementedError("abstract base class")
@staticmethod
def unerase(obj):
raise NotImplementedError("abstract base class")
def wrap(self, unwrapped):
raise NotImplementedError
def unwrap(self, wrapped):
raise NotImplementedError
def is_correct_type(self, w_obj):
raise NotImplementedError("abstract base class")
def get_empty_storage(self):
raise NotImplementedError("abstract base class")
def _never_equal_to(self, w_lookup_type):
raise NotImplementedError("abstract base class")
def setitem(self, w_dict, w_key, w_value):
space = self.space
if self.is_correct_type(w_key):
self.unerase(w_dict.dstorage)[self.unwrap(w_key)] = w_value
return
else:
self.switch_to_object_strategy(w_dict)
w_dict.setitem(w_key, w_value)
def setitem_str(self, w_dict, key, w_value):
self.switch_to_object_strategy(w_dict)
w_dict.setitem(self.space.wrap(key), w_value)
def setdefault(self, w_dict, w_key, w_default):
space = self.space
if self.is_correct_type(w_key):
return self.unerase(w_dict.dstorage).setdefault(self.unwrap(w_key), w_default)
else:
self.switch_to_object_strategy(w_dict)
return w_dict.setdefault(w_key, w_default)
def delitem(self, w_dict, w_key):
space = self.space
w_key_type = space.type(w_key)
if self.is_correct_type(w_key):
del self.unerase(w_dict.dstorage)[self.unwrap(w_key)]
return
else:
self.switch_to_object_strategy(w_dict)
return w_dict.delitem(w_key)
def length(self, w_dict):
return len(self.unerase(w_dict.dstorage))
def getitem_str(self, w_dict, key):
return self.getitem(w_dict, self.space.wrap(key))
def getitem(self, w_dict, w_key):
space = self.space
if self.is_correct_type(w_key):
return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None)
elif self._never_equal_to(space.type(w_key)):
return None
else:
self.switch_to_object_strategy(w_dict)
return w_dict.getitem(w_key)
def w_keys(self, w_dict):
l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()]
return self.space.newlist(l)
def values(self, w_dict):
return self.unerase(w_dict.dstorage).values()
def items(self, w_dict):
space = self.space
dict_w = self.unerase(w_dict.dstorage)
return [space.newtuple([self.wrap(key), w_value])
for (key, w_value) in dict_w.iteritems()]
def popitem(self, w_dict):
key, value = self.unerase(w_dict.dstorage).popitem()
return (self.wrap(key), value)
def clear(self, w_dict):
self.unerase(w_dict.dstorage).clear()
def switch_to_object_strategy(self, w_dict):
d = self.unerase(w_dict.dstorage)
strategy = self.space.fromcache(ObjectDictStrategy)
d_new = strategy.unerase(strategy.get_empty_storage())
for key, value in d.iteritems():
d_new[self.wrap(key)] = value
w_dict.strategy = strategy
w_dict.dstorage = strategy.erase(d_new)
class ObjectDictStrategy(AbstractTypedStrategy, DictStrategy):
erase, unerase = rerased.new_erasing_pair("object")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def wrap(self, unwrapped):
return unwrapped
def unwrap(self, wrapped):
return wrapped
def is_correct_type(self, w_obj):
return True
def get_empty_storage(self):
new_dict = r_dict(self.space.eq_w, self.space.hash_w,
force_non_null=True)
return self.erase(new_dict)
def _never_equal_to(self, w_lookup_type):
return False
def iter(self, w_dict):
return ObjectIteratorImplementation(self.space, self, w_dict)
def w_keys(self, w_dict):
return self.space.newlist(self.unerase(w_dict.dstorage).keys())
class StringDictStrategy(AbstractTypedStrategy, DictStrategy):
erase, unerase = rerased.new_erasing_pair("string")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def wrap(self, unwrapped):
return self.space.wrap(unwrapped)
def unwrap(self, wrapped):
return self.space.str_w(wrapped)
def is_correct_type(self, w_obj):
space = self.space
return space.is_w(space.type(w_obj), space.w_str)
def get_empty_storage(self):
res = {}
mark_dict_non_null(res)
return self.erase(res)
def _never_equal_to(self, w_lookup_type):
return _never_equal_to_string(self.space, w_lookup_type)
def setitem_str(self, w_dict, key, w_value):
assert key is not None
self.unerase(w_dict.dstorage)[key] = w_value
def getitem(self, w_dict, w_key):
space = self.space
# -- This is called extremely often. Hack for performance --
if type(w_key) is space.StringObjectCls:
return self.getitem_str(w_dict, w_key.unwrap(space))
# -- End of performance hack --
return AbstractTypedStrategy.getitem(self, w_dict, w_key)
def getitem_str(self, w_dict, key):
assert key is not None
return self.unerase(w_dict.dstorage).get(key, None)
def listview_str(self, w_dict):
return self.unerase(w_dict.dstorage).keys()
def iter(self, w_dict):
return StrIteratorImplementation(self.space, self, w_dict)
def w_keys(self, w_dict):
return self.space.newlist_str(self.listview_str(w_dict))
class _WrappedIteratorMixin(object):
_mixin_ = True
def __init__(self, space, strategy, dictimplementation):
IteratorImplementation.__init__(self, space, strategy, dictimplementation)
self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
def next_entry(self):
# note that this 'for' loop only runs once, at most
for key, w_value in self.iterator:
return self.space.wrap(key), w_value
else:
return None, None
class _UnwrappedIteratorMixin:
_mixin_ = True
def __init__(self, space, strategy, dictimplementation):
IteratorImplementation.__init__(self, space, strategy, dictimplementation)
self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
def next_entry(self):
# note that this 'for' loop only runs once, at most
for w_key, w_value in self.iterator:
return w_key, w_value
else:
return None, None
class StrIteratorImplementation(_WrappedIteratorMixin, IteratorImplementation):
pass
class IntDictStrategy(AbstractTypedStrategy, DictStrategy):
erase, unerase = rerased.new_erasing_pair("int")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def wrap(self, unwrapped):
return self.space.wrap(unwrapped)
def unwrap(self, wrapped):
return self.space.int_w(wrapped)
def get_empty_storage(self):
return self.erase({})
def is_correct_type(self, w_obj):
space = self.space
return space.is_w(space.type(w_obj), space.w_int)
def _never_equal_to(self, w_lookup_type):
space = self.space
# XXX there are many more types
return (space.is_w(w_lookup_type, space.w_NoneType) or
space.is_w(w_lookup_type, space.w_str) or
space.is_w(w_lookup_type, space.w_unicode)
)
def iter(self, w_dict):
return IntIteratorImplementation(self.space, self, w_dict)
def listview_int(self, w_dict):
return self.unerase(w_dict.dstorage).keys()
# XXX there is no space.newlist_int yet to implement w_keys more efficiently
class IntIteratorImplementation(_WrappedIteratorMixin, IteratorImplementation):
pass
class ObjectIteratorImplementation(_UnwrappedIteratorMixin, IteratorImplementation):
pass
init_signature = Signature(['seq_or_map'], None, 'kwargs')
init_defaults = [None]
def update1(space, w_dict, w_data):
if space.findattr(w_data, space.wrap("keys")) is None:
# no 'keys' method, so we assume it is a sequence of pairs
for w_pair in space.listview(w_data):
pair = space.fixedview(w_pair)
if len(pair) != 2:
raise OperationError(space.w_ValueError,
space.wrap("sequence of pairs expected"))
w_key, w_value = pair
w_dict.setitem(w_key, w_value)
else:
if isinstance(w_data, W_DictMultiObject): # optimization case only
update1_dict_dict(space, w_dict, w_data)
else:
# general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])"
w_keys = space.call_method(w_data, "keys")
for w_key in space.listview(w_keys):
w_value = space.getitem(w_data, w_key)
w_dict.setitem(w_key, w_value)
def update1_dict_dict(space, w_dict, w_data):
iterator = w_data.iter()
while 1:
w_key, w_value = iterator.next()
if w_key is None:
break
w_dict.setitem(w_key, w_value)
def init_or_update(space, w_dict, __args__, funcname):
w_src, w_kwds = __args__.parse_obj(
None, funcname,
init_signature, # signature
init_defaults) # default argument
if w_src is not None:
update1(space, w_dict, w_src)
if space.is_true(w_kwds):
update1(space, w_dict, w_kwds)
def init__DictMulti(space, w_dict, __args__):
init_or_update(space, w_dict, __args__, 'dict')
def dict_update__DictMulti(space, w_dict, __args__):
init_or_update(space, w_dict, __args__, 'dict.update')
def getitem__DictMulti_ANY(space, w_dict, w_key):
w_value = w_dict.getitem(w_key)
if w_value is not None:
return w_value
w_missing_item = w_dict.missing_method(space, w_key)
if w_missing_item is not None:
return w_missing_item
space.raise_key_error(w_key)
def setitem__DictMulti_ANY_ANY(space, w_dict, w_newkey, w_newvalue):
w_dict.setitem(w_newkey, w_newvalue)
def delitem__DictMulti_ANY(space, w_dict, w_key):
try:
w_dict.delitem(w_key)
except KeyError:
space.raise_key_error(w_key)
def len__DictMulti(space, w_dict):
return space.wrap(w_dict.length())
def contains__DictMulti_ANY(space, w_dict, w_key):
return space.newbool(w_dict.getitem(w_key) is not None)
dict_has_key__DictMulti_ANY = contains__DictMulti_ANY
def iter__DictMulti(space, w_dict):
return W_DictMultiIterObject(space, w_dict.iter(), KEYSITER)
def eq__DictMulti_DictMulti(space, w_left, w_right):
if space.is_w(w_left, w_right):
return space.w_True
if w_left.length() != w_right.length():
return space.w_False
iteratorimplementation = w_left.iter()
while 1:
w_key, w_val = iteratorimplementation.next()
if w_key is None:
break
w_rightval = w_right.getitem(w_key)
if w_rightval is None:
return space.w_False
if not space.eq_w(w_val, w_rightval):
return space.w_False
return space.w_True
def characterize(space, w_a, w_b):
""" (similar to CPython)
returns the smallest key in acontent for which b's value is different or absent and this value """
w_smallest_diff_a_key = None
w_its_value = None
iteratorimplementation = w_a.iter()
while 1:
w_key, w_val = iteratorimplementation.next()
if w_key is None:
break
if w_smallest_diff_a_key is None or space.is_true(space.lt(w_key, w_smallest_diff_a_key)):
w_bvalue = w_b.getitem(w_key)
if w_bvalue is None:
w_its_value = w_val
w_smallest_diff_a_key = w_key
else:
if not space.eq_w(w_val, w_bvalue):
w_its_value = w_val
w_smallest_diff_a_key = w_key
return w_smallest_diff_a_key, w_its_value
def lt__DictMulti_DictMulti(space, w_left, w_right):
# Different sizes, no problem
if w_left.length() < w_right.length():
return space.w_True
if w_left.length() > w_right.length():
return space.w_False
# Same size
w_leftdiff, w_leftval = characterize(space, w_left, w_right)
if w_leftdiff is None:
return space.w_False
w_rightdiff, w_rightval = characterize(space, w_right, w_left)
if w_rightdiff is None:
# w_leftdiff is not None, w_rightdiff is None
return space.w_True
w_res = space.lt(w_leftdiff, w_rightdiff)
if (not space.is_true(w_res) and
space.eq_w(w_leftdiff, w_rightdiff) and
w_rightval is not None):
w_res = space.lt(w_leftval, w_rightval)
return w_res
def dict_copy__DictMulti(space, w_self):
w_new = W_DictMultiObject.allocate_and_init_instance(space)
update1_dict_dict(space, w_new, w_self)
return w_new
def dict_items__DictMulti(space, w_self):
return space.newlist(w_self.items())
def dict_keys__DictMulti(space, w_self):
return w_self.w_keys()
def dict_values__DictMulti(space, w_self):
return space.newlist(w_self.values())
def dict_iteritems__DictMulti(space, w_self):
return W_DictMultiIterObject(space, w_self.iter(), ITEMSITER)
def dict_iterkeys__DictMulti(space, w_self):
return W_DictMultiIterObject(space, w_self.iter(), KEYSITER)
def dict_itervalues__DictMulti(space, w_self):
return W_DictMultiIterObject(space, w_self.iter(), VALUESITER)
def dict_viewitems__DictMulti(space, w_self):
return W_DictViewItemsObject(space, w_self)
def dict_viewkeys__DictMulti(space, w_self):
return W_DictViewKeysObject(space, w_self)
def dict_viewvalues__DictMulti(space, w_self):
return W_DictViewValuesObject(space, w_self)
def dict_clear__DictMulti(space, w_self):
w_self.clear()
def dict_get__DictMulti_ANY_ANY(space, w_dict, w_key, w_default):
w_value = w_dict.getitem(w_key)
if w_value is not None:
return w_value
else:
return w_default
def dict_setdefault__DictMulti_ANY_ANY(space, w_dict, w_key, w_default):
return w_dict.setdefault(w_key, w_default)
def dict_pop__DictMulti_ANY(space, w_dict, w_key, defaults_w):
len_defaults = len(defaults_w)
if len_defaults > 1:
raise operationerrfmt(space.w_TypeError,
"pop expected at most 2 arguments, got %d",
1 + len_defaults)
w_item = w_dict.getitem(w_key)
if w_item is None:
if len_defaults > 0:
return defaults_w[0]
else:
space.raise_key_error(w_key)
else:
w_dict.delitem(w_key)
return w_item
def dict_popitem__DictMulti(space, w_dict):
try:
w_key, w_value = w_dict.popitem()
except KeyError:
raise OperationError(space.w_KeyError,
space.wrap("popitem(): dictionary is empty"))
return space.newtuple([w_key, w_value])
# ____________________________________________________________
# Iteration
KEYSITER = 0
ITEMSITER = 1
VALUESITER = 2
class W_DictMultiIterObject(W_Object):
from pypy.objspace.std.dicttype import dictiter_typedef as typedef
_immutable_fields_ = ["iteratorimplementation", "itertype"]
def __init__(w_self, space, iteratorimplementation, itertype):
w_self.space = space
w_self.iteratorimplementation = iteratorimplementation
w_self.itertype = itertype
registerimplementation(W_DictMultiIterObject)
def iter__DictMultiIterObject(space, w_dictiter):
return w_dictiter
def next__DictMultiIterObject(space, w_dictiter):
iteratorimplementation = w_dictiter.iteratorimplementation
w_key, w_value = iteratorimplementation.next()
if w_key is not None:
itertype = w_dictiter.itertype
if itertype == KEYSITER:
return w_key
elif itertype == VALUESITER:
return w_value
elif itertype == ITEMSITER:
return space.newtuple([w_key, w_value])
else:
assert 0, "should be unreachable"
raise OperationError(space.w_StopIteration, space.w_None)
# ____________________________________________________________
# Views
class W_DictViewObject(W_Object):
def __init__(w_self, space, w_dict):
w_self.w_dict = w_dict
class W_DictViewKeysObject(W_DictViewObject):
from pypy.objspace.std.dicttype import dict_keys_typedef as typedef
registerimplementation(W_DictViewKeysObject)
class W_DictViewItemsObject(W_DictViewObject):
from pypy.objspace.std.dicttype import dict_items_typedef as typedef
registerimplementation(W_DictViewItemsObject)
class W_DictViewValuesObject(W_DictViewObject):
from pypy.objspace.std.dicttype import dict_values_typedef as typedef
registerimplementation(W_DictViewValuesObject)
def len__DictViewKeys(space, w_dictview):
return space.len(w_dictview.w_dict)
len__DictViewItems = len__DictViewValues = len__DictViewKeys
def iter__DictViewKeys(space, w_dictview):
return dict_iterkeys__DictMulti(space, w_dictview.w_dict)
def iter__DictViewItems(space, w_dictview):
return dict_iteritems__DictMulti(space, w_dictview.w_dict)
def iter__DictViewValues(space, w_dictview):
return dict_itervalues__DictMulti(space, w_dictview.w_dict)
def all_contained_in(space, w_dictview, w_otherview):
w_iter = space.iter(w_dictview)
assert isinstance(w_iter, W_DictMultiIterObject)
while True:
try:
w_item = space.next(w_iter)
except OperationError, e:
if not e.match(space, space.w_StopIteration):
raise
break
if not space.is_true(space.contains(w_otherview, w_item)):
return space.w_False
return space.w_True
def eq__DictViewKeys_DictViewKeys(space, w_dictview, w_otherview):
if space.eq_w(space.len(w_dictview), space.len(w_otherview)):
return all_contained_in(space, w_dictview, w_otherview)
return space.w_False
eq__DictViewKeys_settypedef = eq__DictViewKeys_DictViewKeys
eq__DictViewKeys_frozensettypedef = eq__DictViewKeys_DictViewKeys
eq__DictViewKeys_DictViewItems = eq__DictViewKeys_DictViewKeys
eq__DictViewItems_DictViewItems = eq__DictViewKeys_DictViewKeys
eq__DictViewItems_settypedef = eq__DictViewItems_DictViewItems
eq__DictViewItems_frozensettypedef = eq__DictViewItems_DictViewItems
def repr__DictViewKeys(space, w_dictview):
w_seq = space.call_function(space.w_list, w_dictview)
w_repr = space.repr(w_seq)
return space.wrap("%s(%s)" % (space.type(w_dictview).getname(space),
space.str_w(w_repr)))
repr__DictViewItems = repr__DictViewKeys
repr__DictViewValues = repr__DictViewKeys
def and__DictViewKeys_DictViewKeys(space, w_dictview, w_otherview):
w_set = space.call_function(space.w_set, w_dictview)
space.call_method(w_set, "intersection_update", w_otherview)
return w_set
and__DictViewKeys_settypedef = and__DictViewKeys_DictViewKeys
and__DictViewItems_DictViewItems = and__DictViewKeys_DictViewKeys
and__DictViewItems_settypedef = and__DictViewKeys_DictViewKeys
def or__DictViewKeys_DictViewKeys(space, w_dictview, w_otherview):
w_set = space.call_function(space.w_set, w_dictview)
space.call_method(w_set, "update", w_otherview)
return w_set
or__DictViewKeys_settypedef = or__DictViewKeys_DictViewKeys
or__DictViewItems_DictViewItems = or__DictViewKeys_DictViewKeys
or__DictViewItems_settypedef = or__DictViewKeys_DictViewKeys
def xor__DictViewKeys_DictViewKeys(space, w_dictview, w_otherview):
w_set = space.call_function(space.w_set, w_dictview)
space.call_method(w_set, "symmetric_difference_update", w_otherview)
return w_set
xor__DictViewKeys_settypedef = xor__DictViewKeys_DictViewKeys
xor__DictViewItems_DictViewItems = xor__DictViewKeys_DictViewKeys
xor__DictViewItems_settypedef = xor__DictViewKeys_DictViewKeys
# ____________________________________________________________
from pypy.objspace.std import dicttype
register_all(vars(), dicttype)
|
<filename>oop03 (class methods).py<gh_stars>1-10
# oop3
# class methods
# regular methods in a class automatically pass attribute as a argument as the first arguement. By convention, we call it "self".
# class methods in a class automatically pass class as a arguement as the first arguement. By convention, we call it "cls".
class Employee:
raise_amount=1.04
numemp=0
def __init__(self,first,last,pay):
self.first=first
self.last=last
self.pay=pay
Employee.numemp +=1
def fullname(self):
return f"{self.first} {self.last}"
def apply_raise(self):
self.pay=int(self.pay*self.raise_amount)
# to turn a regular method into a class method, we need to add a decorator to the top called @classmethod.
# this decorator altering the functionality of our method to where now we receive the class to our first arguement.
# by convention, we called the first arguement of our class method "cls"
# we cant use the class as our first argument here, because the word has a different meaning in this language.
@classmethod
def set_raise_amt(cls,amount):
cls.raise_amount=amount
# now within this method we are going to work with class instead of object
emp1=Employee("ahammad","shawki",200)
emp2=Employee("cristiano","ronaldo",400)
Employee.set_raise_amt(1.05)# changing the raise_amount
# this wiil change all raise_amount both class and object.
# this happens because we ran this set_raise_amt method which is a class method which means now we are working with class instead of the object.
# and we are setting that class variable raise amount equal to the amount that we passed in here which is 1.05.
# what we have done here is the same thing of saying-
Employee.raise_amount=1.05
print(Employee.raise_amount)
print(emp1.raise_amount)
print(emp2.raise_amount)
# we can also use class methods as alternative constructors
# it means we can use this class methods in order to provide multiple ways to creating our object.
# lets say someone is using our class.
emp_str_1 ="john-doe-700"
emp_str_2 ="steve-smith-800"
emp_str_3 ="sergio-ramos-900"
# we have three strings here that are employees separated by hyphens.
# if we want to crete new objects with this string we have to first split on the hyphen-
first, last, pay =emp_str_1.split("-")
new_emp1=Employee(first,last,pay)
print(new_emp1.pay)
# but this takes much code and time.
# so lets create an alternative constructer that allows us to pass in the string and we can create the employee.
# so lets create a new class method and we are going to use that method as an alternative constructer.
class Employee2:
raise_amount=1.04
numemp=0
def __init__(self,first,last,pay):
self.first=first
self.last=last
self.pay=pay
Employee.numemp +=1
def fullname(self):
return f"{self.first} {self.last}"
def apply_raise(self):
self.pay=int(self.pay*self.raise_amount)
@classmethod
def form_string(cls,emp_str):# here form is a convention.
first, last, pay =emp_str.split("-")
return cls(first,last,pay)# here we are using cls instead of Employee2 because cls and Employee2 are basically the same thing.
emp_str_1 ="john-doe-700"
emp_str_2 ="steve-smith-800"
emp_str_3 ="sergio-ramos-900"
new_emp1=Employee2.form_string(emp_str_1)
print(new_emp1.pay)
# characteristics of class methods:
#1. we need to add decorator @classmethod on the top of the class method.
#2. we need to add cls as the first arguement of the class method.
#3. we should use cls inside the class method.
#4. Outside the class, we should call the class method with the class name.
#5. we can use class method as alternative constructor.
|
<filename>src/baselines/PACNet/task_semanticSegmentation/main.py
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import argparse
import os
import time
import math
import random
import glob
from PIL import Image
from collections import OrderedDict
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from . import datasets, models
def fast_conf(pred, gt, nclasses):
if pred.ndim > 1:
pred = pred.flatten()
if gt.ndim > 1:
gt = gt.flatten()
k = (gt >= 0) & (gt < nclasses)
return np.bincount(nclasses * gt[k] + pred[k], minlength=nclasses ** 2).reshape(nclasses, nclasses)
def seg_measures(conf, measures=('miou', 'acc', 'macc')):
if isinstance(measures, str):
return seg_measures(conf, (measures,))[0]
scores = []
for m in measures:
if m == 'miou':
iou = np.diag(conf) / (conf.sum(1) + conf.sum(0) - np.diag(conf))
scores.append(float(iou.mean()))
elif m == 'acc':
scores.append(float(np.diag(conf).sum() / conf.sum()))
elif m == 'macc':
cacc = np.diag(conf) / conf.sum(1)
scores.append(float(cacc.mean()))
return scores
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ConfMeter(object):
def __init__(self, num_classes):
self.num_classes = num_classes
self.reset()
def reset(self):
self.val = np.zeros((self.num_classes, self.num_classes), dtype=np.int64)
self.sum = np.zeros((self.num_classes, self.num_classes), dtype=np.int64)
def update(self, pred, gt):
val = fast_conf(pred, gt, self.num_classes)
self.val = val
self.sum += val
def score_val(self, score_type):
return seg_measures(self.val, score_type)
def score_avg(self, score_type):
return seg_measures(self.sum, score_type)
def _compute_loss(output, target, loss_type, ignore_index=-100):
if loss_type == 'ce':
loss = F.cross_entropy(output, target, ignore_index=ignore_index)
elif loss_type.startswith('fce'):
forgiving_threshold = float(loss_type.split('_')[-1])
loss_none = F.cross_entropy(output, target, ignore_index=ignore_index, reduction='none')
loss_none[loss_none < - math.log(forgiving_threshold)] = 0.0
loss = loss_none.mean()
else:
raise ValueError('loss type {} not supported'.format(loss_type))
return loss
def train(model, train_loader, optimizer, device, epoch, lr, perf_measures, num_classes, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
conf = ConfMeter(num_classes)
model.train()
log = []
end = time.time()
if callable(train_loader):
train_loader = train_loader()
for batch_idx, sample in enumerate(train_loader):
img, target = sample[0].to(device), sample[1].to(device)
data_time.update((time.time() - end) / img.shape[0], img.shape[0])
optimizer.zero_grad()
output = model(img, target.shape[-2:])
loss = _compute_loss(output, target, args.loss, ignore_index=255)
loss.backward()
optimizer.step()
pred, gt = output.argmax(dim=1).cpu().numpy(), sample[1].numpy()
batch_time.update((time.time() - end) / img.shape[0], img.shape[0])
losses.update(loss.item(), img.shape[0])
conf.update(pred, gt)
end = time.time()
batch_cnt = batch_idx + 1
sample_cnt = batch_idx * args.batch_size + len(img)
progress = sample_cnt / len(train_loader.dataset)
if batch_cnt == len(train_loader) or batch_cnt % args.log_interval == 0:
log_row = [progress + epoch - 1, lr, batch_time.avg, data_time.avg, losses.val, losses.avg]
for m in perf_measures:
log_row.extend([conf.score_val(m), conf.score_avg(m)])
log.append(log_row)
if batch_cnt == len(train_loader) or batch_cnt % args.print_interval == 0:
msg = 'Train Epoch {} [{}/{} ({:3.0f}%)]\tLR {:g}\tTime {:.3f}\tData {:.3f}\t' \
'Loss {:.4f} ({:.4f})'.format(epoch, sample_cnt, len(train_loader.dataset), 100. * progress, lr,
batch_time.avg, data_time.avg, losses.val, losses.avg)
print(msg)
msg = '\nTraining (#epochs={})\n'.format(epoch)
msg += 'Average loss: {:.6f}\n'.format(losses.avg)
msg += 'Average speed: {:.2f} samples/sec\n'.format(1.0 / batch_time.avg)
msg += ''.join('{}: {:.6f}\n'.format(m, conf.score_avg(m)) for m in perf_measures)
print(msg)
return log
def test(model, test_loader, device, epoch, lr, perf_measures, num_classes, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
conf = ConfMeter(num_classes)
if isinstance(model, torch.nn.DataParallel) and args.test_batch_size == 1:
model = model.module
model.eval()
with torch.no_grad():
end = time.time()
for i, sample in enumerate(test_loader):
img, target = sample[0].to(device), sample[1].to(device)
data_time.update((time.time() - end) / img.shape[0], img.shape[0])
output = model(img, target.shape[-2:])
loss = _compute_loss(output, target, args.loss, ignore_index=255)
pred, gt = output.argmax(dim=1).cpu().numpy(), sample[1].numpy()
batch_time.update((time.time() - end) / img.shape[0], img.shape[0])
losses.update(loss.item(), img.shape[0])
conf.update(pred, gt)
end = time.time()
log = [float(epoch), lr, batch_time.avg, data_time.avg, losses.avg] + conf.score_avg(perf_measures)
msg = '\nTesting (#epochs={})\n'.format(epoch)
msg += 'Average loss: {:.6f}\n'.format(losses.avg)
msg += 'Average speed: {:.2f} samples/sec ({}+{}ms /sample)\n'.format(1.0 / batch_time.avg,
int(1000 * (batch_time.avg - data_time.avg)),
int(1000 * data_time.avg))
msg += ''.join('{}: {:.6f}\n'.format(m, conf.score_avg(m)) for m in perf_measures)
print(msg)
return [log]
def evaluate(model, test_loader, device, batch_size, out_type, save_dir, palette):
os.makedirs(save_dir, exist_ok=True)
batch_time = AverageMeter()
if isinstance(model, torch.nn.DataParallel) and batch_size == 1:
model = model.module
model.eval()
with torch.no_grad():
end = time.time()
for i, sample in enumerate(test_loader):
imgs = sample[0].to(device)
img_ids = sample[1]
img_hw = sample[2]
outputs = model(imgs)
for out, img_id, (im_h, im_w) in zip(outputs, img_ids, img_hw):
if im_h <= out.shape[1] and im_w <= out.shape[2]:
out = out[:, :im_h, :im_w]
img_id = ''.join([chr(s) for s in img_id])
if out_type == 'raw':
np.save(os.path.join(save_dir, img_id + '.npy'),
out.cpu().numpy().astype(np.float32))
elif out_type == 'pred':
pred = out.argmax(dim=0).cpu().numpy().astype(np.uint8)
pred = Image.fromarray(pred)
pred.putpalette(palette)
pred.save(os.path.join(save_dir, img_id + '.png'))
batch_time.update((time.time() - end) / imgs.shape[0], imgs.shape[0])
end = time.time()
msg = '\nEvaluation\n'
msg += 'Average speed: {:.2f} samples/sec\n'.format(1.0 / batch_time.avg)
print(msg)
def prepare_log(log_path, header, last_epoch=0):
# keep all existing log lines up to epoch==last_epoch (included)
try:
log = np.genfromtxt(log_path, delimiter=',', skip_header=1, usecols=(0,))
except:
log = []
if log != [] and log.size > 0:
idxs = np.where(log <= last_epoch)[0]
if len(idxs) > 0:
lines_to_keep = max(idxs) + 2
with open(log_path) as f:
lines = f.readlines()
with open(log_path, 'w') as f:
f.writelines(lines[:lines_to_keep])
return
with open(log_path, 'w') as f:
f.write(header + '\n')
def main():
parser = argparse.ArgumentParser(description='Semantic segmentation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-root', type=str, default='data', metavar='D',
help='place to find (or download) data')
parser.add_argument('--exp-root', type=str, default='exp', metavar='E',
help='place to save results')
parser.add_argument('--download', default=False, action='store_true',
help='download dataset if not found locally')
parser.add_argument('--load-weights', type=str, default='', metavar='L',
help='file with pre-trained weights')
parser.add_argument('--load-weights-backbone', type=str, default='', metavar='L',
help='file with pre-trained weights for backbone network')
parser.add_argument('--model', type=str, default='fcn8s', metavar='M',
help='network model type')
parser.add_argument('--dataset', type=str, default='VOC2012', metavar='D',
help='dataset')
parser.add_argument('--num-data-worker', type=int, default=4, metavar='W',
help='number of subprocesses for data loading')
parser.add_argument('--gpu', type=int, default=None, metavar='GPU',
help='GPU id to use')
parser.add_argument('--val-ratio', type=float, default=0.0, metavar='V',
help='use this portion of training set for validation')
parser.add_argument('--train-split', type=str, default='', metavar='TRAIN',
help='specify a subset for training')
parser.add_argument('--test-split', type=str, default='', metavar='TEST',
help='specify a subset for testing')
parser.add_argument('--eval', type=str, default='', metavar='EVAL', choices=('', 'pred', 'raw'),
help='do evaluation instead of train/test')
parser.add_argument('--batch-size', type=int, default=1, metavar='N',
help='input batch size for training')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='N',
help='input batch size for testing')
parser.add_argument('--train-crop', type=int, default=449, metavar='CROP',
help='input crop size during training')
parser.add_argument('--test-crop', type=int, default=513, metavar='CROP',
help='input crop size during testing')
parser.add_argument('--train-aug-scale', type=float, default=0.0,
help='random scaling as data augmentation')
parser.add_argument('--train-aug-color', default=False, action='store_true',
help='random color jittering as data augmentation')
parser.add_argument('--epochs', type=int, default=0, metavar='N',
help='number of epochs to train')
parser.add_argument('--optimizer', type=str, default='Adam', metavar='O',
help='pick which optimizer to use')
parser.add_argument('--loss', type=str, default='ce', metavar='L',
help='pick which loss function to use')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate')
parser.add_argument('--lr-steps', nargs='+', default=None, metavar='S',
help='decrease lr by 10 at these epochs')
parser.add_argument('--weight-decay', type=float, default=0.0, metavar='WD',
help='Adam/SGD weight decay')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum')
parser.add_argument('--seed', type=int, default=-1, metavar='S',
help='random seed')
parser.add_argument('--overwrite', default=False, action='store_true',
help='ignore existing log files and snapshots')
parser.add_argument('--print-interval', type=int, default=10, metavar='N',
help='how many batches to wait before displaying training status')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--test-interval', type=int, default=1, metavar='N',
help='how many epochs to wait before a testing')
parser.add_argument('--snapshot-interval', type=int, default=1, metavar='N',
help='snapshot intermediate models')
args = parser.parse_args()
seed_value = ord(os.urandom(1)) if args.seed == -1 else args.seed
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
assert(torch.cuda.is_available())
if args.gpu is not None:
device = torch.device("cuda:{}".format(args.gpu))
else:
device = torch.device("cuda:0")
dl_kwargs = {'num_workers': args.num_data_worker, 'pin_memory': True}
# find existing snapshots
os.makedirs(args.exp_root, exist_ok=True)
snapshots_found = sorted([int(s.split('_')[-1].rstrip('.pth'))
for s in glob.glob(os.path.join(args.exp_root, 'weights_epoch_*.pth'))])
load_weights = args.load_weights
if snapshots_found and not args.overwrite:
last_epoch = max(snapshots_found) if args.epochs > max(snapshots_found) else args.epochs
assert last_epoch in snapshots_found
if load_weights:
print('Warning: parameter (load_weights={}) ignored!'.format(load_weights))
load_weights = os.path.join(args.exp_root, 'weights_epoch_{}.pth'.format(last_epoch))
else:
last_epoch = 0
test_only = (args.epochs <= last_epoch)
if args.eval:
assert test_only
# dataset
if args.dataset == 'VOC2012':
perf_measures = ('miou','acc','macc')
num_classes = 21
palette = datasets.pascal_voc_seg_palette
train_split = 'train_aug' if not args.train_split else args.train_split
test_split = 'val' if not args.test_split else args.test_split
aug_flip = True
aug_scale = (1.0 - args.train_aug_scale, 1.0 + args.train_aug_scale) if args.train_aug_scale > 0 else None
aug_color = (0.1, 0.1, 0.1, 0.01) if args.train_aug_color else None
train_transform = datasets.DeepLabInputs(args.train_crop, pad_label=True,
aug_flip=aug_flip, aug_scale=aug_scale, aug_color=aug_color)
if args.eval:
test_transform = datasets.DeepLabEvalInputs(args.test_crop)
else:
test_transform = datasets.DeepLabInputs(args.test_crop, aug_flip=False, pad_label=(args.test_batch_size > 1))
if ',' in args.data_root:
voc_root, sbd_root = args.data_root.split(',')
else:
voc_root = os.path.join(args.data_root, 'VOCdevkit')
sbd_root = os.path.join(args.data_root, 'benchmark_RELEASE')
if args.epochs > 0:
if '_rand' in train_split:
train_dset = lambda : datasets.PascalVOC(voc_root, sbd_root, split=train_split,
transform=train_transform, download=args.download)
else:
train_dset = datasets.PascalVOC(voc_root, sbd_root, split=train_split, transform=train_transform,
download=args.download)
else:
train_dset = None
test_dset = datasets.PascalVOC(voc_root, sbd_root, split=test_split, transform=test_transform,
download=args.download)
else:
raise ValueError('Dataset ({}) not supported.'.format(args.dataset))
# data loader
if test_only:
train_loader = None
else:
if callable(train_dset):
train_loader = lambda : torch.utils.data.DataLoader(train_dset(), batch_size=args.batch_size, shuffle=True,
**dl_kwargs)
else:
train_loader = torch.utils.data.DataLoader(train_dset, batch_size=args.batch_size, shuffle=True, **dl_kwargs)
test_loader = torch.utils.data.DataLoader(test_dset, batch_size=args.test_batch_size, shuffle=False, **dl_kwargs)
# model
model = models.create_model(args.model, num_classes)
has_backbone = '_' in args.model
frozen_backbone = has_backbone and args.model.split('_')[0].endswith('frozen')
if has_backbone and args.load_weights_backbone:
try:
model.backbone.load_state_dict(torch.load(args.load_weights_backbone))
except Exception:
print('Warning: strict weight loading fails!')
model.backbone.load_state_dict(torch.load(args.load_weights_backbone), strict=False)
print('\nBackbone model weights initialized from: {}'.format(args.load_weights_backbone))
if load_weights:
try:
model.load_state_dict(torch.load(load_weights))
except Exception:
print('Warning: strict weight loading fails!')
model.load_state_dict(torch.load(load_weights), strict=False)
print('\nModel weights initialized from: {}'.format(load_weights))
if args.gpu is None and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model = model.to(device)
# optimizer, scheduler, and logs
if not test_only:
if args.optimizer == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)
else:
raise ValueError('Optimizer type ({}) is not supported.'.format(args.optimizer))
load_optim_state = os.path.join(args.exp_root, '{}_epoch_{}.pth'.format(args.optimizer.lower(), last_epoch))
if os.path.exists(load_optim_state):
optimizer.load_state_dict(torch.load(load_optim_state))
print('\nOptimizer state initialized from: {}'.format(load_optim_state))
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
[] if not args.lr_steps else [int(v) for v in args.lr_steps],
gamma=0.1,
last_epoch=last_epoch-1)
else:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
[] if not args.lr_steps else [int(v) for v in args.lr_steps],
gamma=0.1,
last_epoch=-1)
for _ in range(last_epoch):
scheduler.step()
# log files
fmt_train = '{:.6f},{:g},{:.3f},{:.3f},{:.6f},{:.6f}'
fmt_test = '{:.6f},{:g},{:.3f},{:.3f},{:.6f}'
csvheader_train = 'epoch,lr,time,time-data,loss,loss-avg'
csvheader_test = 'epoch,lr,time,time-data,loss'
for m in perf_measures:
fmt_train += ',{:.6f},{:.6f}'
fmt_test += ',{:.6f}'
csvheader_train += ',{},{}-avg'.format(m, m)
csvheader_test += ',{}'.format(m)
train_log_path = os.path.join(os.path.join(args.exp_root, 'train.log'))
test_log_path = os.path.join(os.path.join(args.exp_root, 'test.log'))
prepare_log(train_log_path, csvheader_train, last_epoch)
prepare_log(test_log_path, csvheader_test, last_epoch)
# main computation
init_lr = 0 if test_only else scheduler.get_lr()[0]
if args.eval:
pred_dir = os.path.join(args.exp_root, 'outputs_{}_{}'.format(test_split, args.eval))
evaluate(model, test_loader, device, args.test_batch_size, args.eval, pred_dir, palette)
else:
log_test = test(model, test_loader, device, last_epoch, init_lr, perf_measures, num_classes, args)
if last_epoch == 0 and not test_only:
with open(test_log_path, 'a') as f:
f.writelines([','.join([('' if arg == -1 else fmt.format(arg)) for fmt, arg in zip(fmt_test.split(','), l)])
+ '\n' for l in log_test])
for epoch in range(last_epoch + 1, args.epochs + 1):
scheduler.step()
lr = scheduler.get_lr()[0]
log_train = train(model, train_loader, optimizer, device, epoch, lr, perf_measures, num_classes, args)
with open(train_log_path, 'a') as f:
f.writelines([','.join([('' if arg == -1 else fmt.format(arg)) for fmt, arg in zip(fmt_train.split(','), l)])
+ '\n' for l in log_train])
if epoch % args.test_interval == 0:
log_test = test(model, test_loader, device, epoch, lr, perf_measures, num_classes, args)
with open(test_log_path, 'a') as f:
f.writelines(
[','.join([('' if arg == -1 else fmt.format(arg)) for fmt, arg in zip(fmt_test.split(','), l)])
+ '\n' for l in log_test])
if (args.snapshot_interval > 0 and epoch % args.snapshot_interval == 0) or (epoch == args.epochs):
save_weights = os.path.join(args.exp_root, 'weights_epoch_{}.pth'.format(epoch))
save_optim_state = os.path.join(args.exp_root, '{}_epoch_{}.pth'.format(args.optimizer.lower(), epoch))
model.to('cpu')
if isinstance(model, torch.nn.DataParallel):
weights_dict = model.module.state_dict()
else:
weights_dict = model.state_dict()
if frozen_backbone:
weights_dict = OrderedDict((k, v) for k, v in weights_dict.items() if not k.startswith('backbone'))
torch.save(weights_dict, save_weights)
torch.save(optimizer.state_dict(), save_optim_state)
model.to(device)
print('Snapshot saved to: {}, {}\n'.format(save_weights, save_optim_state))
if __name__ == '__main__':
main()
|
from __future__ import division
import os
import sys
import time
import random
import string
import numpy as np
from scipy.optimize import root
import zarr
from numcodecs import LZ4, Blosc
# ----------------------------------------------------
# enter parameters
# units: Masses [solar masses]
# Distances [km]
# Time [s]
Nparticles = int(1e5) # number of particles in axion clump
# initial conditions for axion clump
AC_r0 = np.array([1e14, 6e3, 0.]) # initial position [km]
AC_v0 = np.array([-300., 0., 0.]) # initial velocity [km/s]
# parameters for axion minicluster
MC_switch = False # set to true if you want to simulate a minicluster
MC_mass = 1e-11 # minicluster mass in [solar masses]
MC_delta = 1 # initial overdensity of the minicluster
# NFW profile:
MC_NFW_switch = True # set to true for a NFW density profile for the minicluster
MC_c = 100 # concentration parameter for the minicluster
# Power law profile:
MC_PL_switch = False # set to true for a power law density profile for the minicluster
# parameters for (dilute) axion star
AS_switch = True
AS_mass = 1e-13 # mass [M_sol]
ax_mass = 2.6e-5 # axion (particle) mass in [eV]
# parameters for neutron star
NS_mass = 1.4 # mass [M_sol]
NS_radius = 10. # radius [km]
# some parameters for the code and the output
Rdrop = 1e4 # radius [km] at which particles leaving the neutron star are dropped from the calculation
Rsave = 1e3 # radius [km] within in which orbits are written to file
nwrite = 100 # number of steps in output to skip when writing to file
mem_size = 1. # target size of memory [GB] the calculation fills
# switch for the way the results are written to disk
out_format_switch = 1
# if out_format_switch == 1, orbits are written as a collection of plain text files
# if out_format_switch == 2, orbits written as zarr files
# ----------------------------------------------------
# constants
G_N = 1.325e11 # Newton constant in km^3/Msol/s^2
# ----------------------------------------------------
# functions
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def update_r_v(rx, ry, rz, vx, vy, vz, mu, NSR, rprecision=1e-3, dtmax=1e25):
""" returns updated r and v and dt for particles """
r = np.sqrt(rx**2 + ry**2 + rz**2)
v = np.sqrt(vx**2 + vy**2 + vz**2)
dt = r / v * rprecision
if np.isscalar(dt):
dt = np.min([dt, dtmax])
else:
dt = np.minimum(dt, dtmax * np.ones(dt.shape))
# calculate the acceleration
#r[np.where(r<NSR)]=NSR # soften acceleration inside the neutron star assuming that it has uniform density
ax = -mu * rx / r**3
ay = -mu * ry / r**3
az = -mu * rz / r**3
# update velocity and position
out_rx = rx + vx * dt + 0.5 * ax * dt**2
out_ry = ry + vy * dt + 0.5 * ay * dt**2
out_rz = rz + vz * dt + 0.5 * az * dt**2
out_vx = vx + ax * dt
out_vy = vy + ay * dt
out_vz = vz + az * dt
# remove particles inside NSR:
rout2 = out_rx**2 + out_ry**2 + out_rz**2
if np.isscalar(dt):
if rout2 < NSR**2:
out_rx = out_ry = out_rz = 1e10 * Rdrop
out_vx = out_vy = out_vz = 1e10
else:
inds = np.where(rout2 < NSR**2)
out_rx[inds] = out_ry[inds] = out_rz[inds] = 1e3 * np.sqrt(3) * Rdrop
out_vx[inds] = out_vy[inds] = out_vz[inds] = 1e5
return out_rx, out_ry, out_rz, out_vx, out_vy, out_vz, dt
def rho_MC(delta, rhoeq=4.39e-38):
"""
returns the characteristic density of an
axion minicluster in [solar masses/km^3]
forming from an overdensity with
overdensity parameter delta.
rhoeq is the matter density at matter
radiation equality in [solar masses/km^3]
"""
return 140 * (1 + delta) * delta**3 * rhoeq
def NFW_rs(MCM, MCrho, MCc):
"""
returns the scale radius in [km] for a minicluster
with
- mass MCM in [solar masses]
- characteristic density MCrho [solar masses/km^3]
- concentration parameter MCc
"""
f = np.log(1. + MCc) - MCc / (1. + MCc)
return (MCM / (4. * np.pi * MCrho * f))**(1. / 3.)
def NFW_R90_fun(x, c):
"""
helper function to get R90
for a NFW profile
"""
return 0.9 * (np.log(1. + c) - c / (1. + c)) - (np.log(1. + x) - x /
(1. + x))
def R90_NFW(MCM, MCrho, MCc):
"""
returns R90 in [km] for a minicluster with a
NFW density profile and
- mass MCM in[solar masses]
- characteristic density MCrho [solar masses/km^3]
- concentration parameter MCc
"""
rs = NFW_rs(MCM, MCrho, MCc)
x = root(NFW_R90_fun, MCc, args=MCc).x[0]
return x * rs
def dens_dist_NFW(x0, y0, z0, MCM, MCrho, MCc, Np):
"""
returns Np long lists of positions in [km]
in cartesian coordinates,
assuming a NFW distribution centered at
x0, z0, y0 in [km] for an axion minicluster with
- mass MCM in [solar masses]
- characteristic density MCrho in [solar masses/km^3]
- concentration parameter MCc
"""
rs = NFW_rs(MCM, MCrho, MCc)
rvec = np.linspace(0, MCc * rs, int(1e6))
rpdf = rvec**2 / (rvec / rs * (1. + rvec / rs)**2)
rpdf[0] = 0. # fix first entry
# generate random distributions
rng = np.random.default_rng()
costheta = rng.uniform(-1., 1., size=int(Np))
phi = rng.uniform(0, 2. * np.pi, size=int(Np))
r = rng.choice(rvec, p=rpdf / np.sum(rpdf), size=int(Np))
# generate out vectors
x = x0 + r * np.sin(np.arccos(costheta)) * np.cos(phi)
y = y0 + r * np.sin(np.arccos(costheta)) * np.sin(phi)
z = z0 + r * costheta
return x, y, z
def R90_PL(MCM, MCrho):
"""
returns R90 in [km] for a minicluster with a
Power-Law (index 9/4) density profile and
- mass MCM in[solar masses]
- characteristic density MCrho [solar masses/km^3]
"""
return (0.9**4 * 3. * MCM / (4. * np.pi * MCrho))**(1. / 3.)
def dens_dist_PL(x0, y0, z0, MCM, MCrho, Np):
"""
returns Np long lists of positions in [km]
in cartesian coordinates,
assuming a Power-Law profile (index 9/4) centered at
x0, z0, y0 in [km] for an axion minicluster with
- mass MCM in [solar masses]
- characteristic density MCrho in [solar masses/km^3]
"""
RPL = (3. * MCM / (4. * np.pi * MCrho))**(1. / 3.) # truncation radius
rvec = np.linspace(0, RPL, int(1e6))
rpdf = rvec**-0.25
rvec = rvec[1:]
rpdf = rpdf[1:]
# generate random distributions
rng = np.random.default_rng()
costheta = rng.uniform(-1., 1., size=int(Np))
phi = rng.uniform(0, 2. * np.pi, size=int(Np))
r = rng.choice(rvec, p=rpdf / np.sum(rpdf), size=int(Np))
# generate out vectors
x = x0 + r * np.sin(np.arccos(costheta)) * np.cos(phi)
y = y0 + r * np.sin(np.arccos(costheta)) * np.sin(phi)
z = z0 + r * costheta
return x, y, z
def R90_AS(ASM, ma):
"""
returns R90 in [km] for a
dilute axion star with
- axion star mass ASM in [solar masses]
- axion particle mass ma in [eV]
"""
ak = 9.9 # numerical factor from [1710.08910]
c_kms = 2.998e5 # speed of light in [km/s]
hbarc_eVkm = 1.973e-10 # hbar*c in [eV.km]
unitfac = c_kms**2 * hbarc_eVkm**2
return unitfac * ak / G_N / ma**2 / ASM
def dens_dist_sech(x0, y0, z0, ASM, ma, Np):
"""
returns Np long lists of positions in [km]
in cartesian coordinates,
assuming the sech density profile [1710.04729]
for an dilute axion star with mass ASM in [solar masses]
for an axion (particle) mass ma in [eV] centered at
x0, z0, y0 in [km] for an axion minicluster with
- axion star mass ASM in [solar masses]
- axion particle mass ma in [eV]
"""
Rsech = R90_AS(ASM, ma) / 2.799 # numerical factor from [1710.04729]
rvec = np.linspace(0, 10. * Rsech, int(1e6))
rpdf = rvec**2 / np.cosh(rvec / Rsech)**2
# generate random distributions
rng = np.random.default_rng()
costheta = rng.uniform(-1., 1., size=int(Np))
phi = rng.uniform(0, 2. * np.pi, size=int(Np))
r = rng.choice(rvec, p=rpdf / np.sum(rpdf), size=int(Np))
# generate out vectors
x = x0 + r * np.sin(np.arccos(costheta)) * np.cos(phi)
y = y0 + r * np.sin(np.arccos(costheta)) * np.sin(phi)
z = z0 + r * costheta
return x, y, z
def velocity_dist_flat(vx0, vy0, vz0, Np, vesc):
"""
returns Np long lists of velocities in [km/s]
in cartesian coordinates.
assumes a flat distribution cut off at vesc in the frame
of the axion clump,
boosted to the frame specified by vx0, vy0, vz0 in [km/s]
out:
tuple of velocity vectors in cartesian coordinates
(vx, vy, vz)
units fixed by input units of vxi and vesc
"""
vxout = np.zeros(Np)
vyout = np.zeros(Np)
vzout = np.zeros(Np)
# generate random distributions
rng = np.random.default_rng()
costheta = rng.uniform(-1., 1., size=int(Np))
phi = rng.uniform(0, 2. * np.pi, size=int(Np))
v = vesc * rng.power(3, size=int(Np))
# generate out vectors
vx = vx0 + v * np.sin(np.arccos(costheta)) * np.cos(phi)
vy = vy0 + v * np.sin(np.arccos(costheta)) * np.sin(phi)
vz = vz0 + v * costheta
return vx, vy, vz
def find_nsteps(Np, target_size=mem_size):
""" returns number of steps,
for which the output from Np particles
should not take more than [target_size] GB in memory """
return max([int(1e9 * target_size / (Np * 40.)), 1])
def find_inds_active(x, y, z, vx, vy, vz, Rcut=Rdrop):
inds_ingoing = np.where(x * vx + y * vy + z * vz < 0)[0]
inds_Rmin = np.where(x**2. + y**2. + z**2. < Rcut**2.)[0]
return sorted(list(set(inds_ingoing) | set(inds_Rmin)))
def run_particles_nsteps(x0, y0, z0, vx0, vy0, vz0, t0, target_size=mem_size):
""" runs the particles, choosing the number of steps
such that the memory requirement does not exceed
[target_size] GB.
return lists of position, velocity, and time vectors """
startTfun = time.time()
nstep = find_nsteps(len(x0), target_size)
print('the time is ' + time.strftime("%Hh%M"))
print('starting to run {} active particles for {} steps'.format(
len(x0), nstep))
x = [x0]
y = [y0]
z = [z0]
vx = [vx0]
vy = [vy0]
vz = [vz0]
t = [t0]
for i in range(nstep):
temp_vals = update_r_v(x[-1], y[-1], z[-1], vx[-1], vy[-1], vz[-1], mu,
NS_radius)
x.append(temp_vals[0])
y.append(temp_vals[1])
z.append(temp_vals[2])
vx.append(temp_vals[3])
vy.append(temp_vals[4])
vz.append(temp_vals[5])
t.append(t[-1] + temp_vals[6])
print('finished calculation after {} seconds'.format(
(time.time() - startTfun)))
return x, y, z, vx, vy, vz, t
def write_general_info():
""" write general info to file """
fo = open(fpath_out + '/general.txt', 'w')
fo.write('# neutron star parameters\n')
fo.write('{:3E} # mass [solar masses]\n'.format(NS_mass))
fo.write('{:3E} # radius [km]\n'.format(NS_radius))
fo.write('# axion clump parameters\n')
if MC_switch and MC_NFW_switch:
fo.write('# this is an axion minicluster with a NFW density profile\n')
elif MC_switch and MC_PL_switch:
fo.write('# this is an axion minicluster with a power law profile\n')
elif AS_switch:
fo.write('# this is a dilute axion star\n')
fo.write('{:3E} # axion mass [eV]\n'.format(ax_mass))
fo.write('{:3E} # mass [solar masses]\n'.format(AC_mass))
fo.write('{:3E} # radius R90 [km]\n'.format(AC_R90))
fo.write('{:3E} # escape velocity [km/s]\n'.format(AC_vesc))
fo.write('{:3E} {:3E} {:3E} # initial coordinates [km]\n'.format(
AC_r0[0], AC_r0[1], AC_r0[2]))
fo.write('{:3E} {:3E} {:3E} # initial velocity [km/s]\n'.format(
AC_v0[0], AC_v0[1], AC_v0[2]))
fo.write('{:3E} # disruption radius [km]\n'.format(R_dis))
fo.write('{:3E} # number of particles in clump\n'.format(Nparticles))
fo.write(
'{:3E} # radius [km] at which outgoing particles are dropped in calculation\n'
.format(Rdrop))
fo.write(
'{:3E} # radius [km] within which orbits are written to file\n'.format(
Rsave))
fo.write(
'{} # number of steps skipping when writing output\n'.format(nwrite))
fo.close()
def write_pointParticle_orbit(AC_x, AC_y, AC_z, AC_vx, AC_vy, AC_vz, t):
""" write axion clump to file """
fo = open(fpath_out + '/AC_pointParticle_orbit.txt', 'w')
fo.write('# t[s] x[km] y[km] z[km] vx[km] vy[km] vz[km]\n')
for i in range(len(AC_x)):
fo.write('{:.12E} '.format(t[i]))
fo.write('{:.6E} '.format(AC_x[i]))
fo.write('{:.6E} '.format(AC_y[i]))
fo.write('{:.6E} '.format(AC_z[i]))
fo.write('{:.6E} '.format(AC_vx[i]))
fo.write('{:.6E} '.format(AC_vy[i]))
fo.write('{:.6E}'.format(AC_vz[i]))
fo.write('\n')
fo.close()
def write_pointParticle_orbit_zarr(AC_x, AC_y, AC_z, AC_vx, AC_vy, AC_vz, t):
""" write axion star to file """
fo = zarr.open(fpath_out + '/AC_pointParticle_orbit.zarr', 'w')
fo.array("t", t)
fo.array("AC_x", AC_x)
fo.array("AC_y", AC_y)
fo.array("AC_z", AC_z)
fo.array("AC_vx", AC_vx)
fo.array("AC_vy", AC_vy)
fo.array("AC_vz", AC_vz)
def write_orbits_to_disk(x,
y,
z,
vx,
vy,
vz,
t,
inds_active,
Rcut=Rsave,
nskip=nwrite):
""" appends the orbit files of particles in inds_active with the results
only every nskip-th timestep is written to file"""
startTfun = time.time()
for i in range(len(inds_active)):
fo = open(fout_orbit_names[inds_active[i]], 'a')
j = 0
while j < len(t) - 1:
if (x[j][i]**2. + y[j][i]**2. + z[j][i]**2.) < Rcut**2.:
fo.write(
'{:.12E} {:.6E} {:.6E} {:.6E} {:.6E} {:.6E} {:.6E}\n'
.format(t[j][i], x[j][i], y[j][i], z[j][i], vx[j][i],
vy[j][i], vz[j][i]))
j += nskip
fo.close()
print('finished writing data after {} seconds'.format(time.time() -
startTfun))
def write_orbits_to_disk_zarr(x,
y,
z,
vx,
vy,
vz,
t,
inds_active,
Rcut=Rsave,
nskip=nwrite):
""" appends the orbit files of particles in inds_active with the results
only every nskip-th timestep is written to file"""
startTfun = time.time()
t = np.array(t[::nskip]).T
x = np.array(x[::nskip]).T
y = np.array(y[::nskip]).T
z = np.array(z[::nskip]).T
vx = np.array(vx[::nskip]).T
vy = np.array(vy[::nskip]).T
vz = np.array(vz[::nskip]).T
for i, ind in enumerate(inds_active):
mask = x[i]**2 + y[i]**2 + z[i]**2 < Rcut**2
out_zarr[str(ind)].append([
t[i][mask], x[i][mask], y[i][mask], z[i][mask], vx[i][mask],
vy[i][mask], vz[i][mask]
],
axis=1)
print('finished writing data after {} seconds'.format(time.time() -
startTfun))
# ----------------------------------------------------
# run
# ----------------------------------------------------
# generate folder for output
startT = time.time()
str_startT = time.strftime("%Y%m%d_%H%M")
basedir = './'
fpath_out = basedir + 'run_' + str_startT + id_generator()
os.mkdir(fpath_out)
# compute gravitational parameter
mu = NS_mass * G_N
# check if switches are set up correctly
if MC_switch * MC_NFW_switch + MC_switch * MC_PL_switch + AS_switch != 1:
print(
"you did not make a reasonable selection of the options for the axion clump (NFW_minicluster/PL_minicluster/axion star)"
)
print("aborting the code...")
sys.exit()
# check if the minicluster is less dense than an dilute axion star
# this assumes that the axion mass is given by ax_mass
if MC_switch:
if MC_NFW_switch and R90_NFW(MC_mass, rho_MC(MC_delta), MC_c) < R90_AS(
MC_mass, ax_mass):
print(
"your minicluster is denser than an axion star for a {} eV axion".
format(ax_mass))
print("aborting the code...")
sys.exit()
elif MC_PL_switch and R90_PL(MC_mass, rho_MC(MC_delta)) < R90_AS(
MC_mass, ax_mass):
print(
"your minicluster is denser than an axion star for a {} eV axion".
format(ax_mass))
print("aborting the code...")
sys.exit()
# set up the mk_axclump function and calculate the disruption radius
if MC_switch:
AC_mass = MC_mass
if MC_NFW_switch:
AC_R90 = R90_NFW(MC_mass, rho_MC(MC_delta), MC_c)
AC_vesc = np.sqrt(2. * G_N * AC_mass / AC_R90)
def mk_axclump(x0, y0, z0, vx0, vy0, vz0, Np):
x, y, z = dens_dist_NFW(x0, y0, z0, AC_mass, rho_MC(MC_delta),
MC_c, Np)
vx, vy, vz = velocity_dist_flat(vx0, vy0, vz0, Np, AC_vesc)
return x, y, z, vx, vy, vz
elif MC_PL_switch:
AC_R90 = R90_PL(MC_mass, rho_MC(MC_delta))
AC_vesc = np.sqrt(2. * G_N * AC_mass / AC_R90)
def mk_axclump(x0, y0, z0, vx0, vy0, vz0, Np):
x, y, z = dens_dist_PL(x0, y0, z0, AC_mass, rho_MC(MC_delta), Np)
vx, vy, vz = velocity_dist_flat(vx0, vy0, vz0, Np, AC_vesc)
return x, y, z, vx, vy, vz
elif AS_switch:
AC_mass = AS_mass
AC_R90 = R90_AS(AC_mass, ax_mass)
AC_vesc = np.sqrt(2. * G_N * AC_mass / AC_R90)
def mk_axclump(x0, y0, z0, vx0, vy0, vz0, Np):
x, y, z = dens_dist_sech(x0, y0, z0, AC_mass, ax_mass, Np)
vx, vy, vz = velocity_dist_flat(vx0, vy0, vz0, Np, AC_vesc)
return x, y, z, vx, vy, vz
# calculate Roche disruption radius
R_dis = AC_R90 * (2. * NS_mass / AC_mass)**(1. / 3.) #[km]
# run the axion clump as a point particle until it either reaches the disruption radius or flies away from the NS
AC_x = np.array([AC_r0[0]])
AC_y = np.array([AC_r0[1]])
AC_z = np.array([AC_r0[2]])
AC_vx = np.array([AC_v0[0]])
AC_vy = np.array([AC_v0[1]])
AC_vz = np.array([AC_v0[2]])
t = np.array([0.])
flag = 0
while flag == 0:
x, y, z, vx, vy, vz, dt = update_r_v(np.array([AC_x[-1]]),
np.array([AC_y[-1]]),
np.array([AC_z[-1]]),
np.array([AC_vx[-1]]),
np.array([AC_vy[-1]]),
np.array([AC_vz[-1]]), mu, NS_radius)
AC_x = np.append(AC_x, x[0])
AC_y = np.append(AC_y, y[0])
AC_z = np.append(AC_z, z[0])
AC_vx = np.append(AC_vx, vx[0])
AC_vy = np.append(AC_vy, vy[0])
AC_vz = np.append(AC_vz, vz[0])
t = np.append(t, t[-1] + dt)
if np.sqrt(
x**2. + y**2. + z**2.
) < R_dis: # check if axion clump has reached the disuption radius
flag = 1
elif x * vx + y * vy + z * vz > 0: # check if axion clump is outbound
flag = 2
# write results of initial calculation to file, and generate axion clump as collection of particles
print("Calculation of axion clump as point particle finished.")
if flag == 1:
print("Your clump made it to the disruption radius")
print("That took {:3E} years".format(t[-1] / 3.154e7))
print("writing parameters and orbit to file")
write_general_info()
if out_format_switch == 1:
write_pointParticle_orbit(AC_x, AC_y, AC_z, AC_vx, AC_vy, AC_vz, t)
elif out_format_switch == 2:
write_pointParticle_orbit_zarr(AC_x, AC_y, AC_z, AC_vx, AC_vy, AC_vz, t)
print("generating axion clump as {} particles".format(int(Nparticles)))
pAC_x, pAC_y, pAC_z, pAC_vx, pAC_vy, pAC_vz = mk_axclump(
AC_x[-1], AC_y[-1], AC_z[-1], AC_vx[-1], AC_vy[-1], AC_vz[-1],
Nparticles)
elif flag == 2:
print("Your axion clump never came close enough to the neutron star")
print("writing parameters and orbit to file")
write_general_info()
if out_format_switch == 1:
write_pointParticle_orbit(AC_x, AC_y, AC_z, AC_vx, AC_vy, AC_vz, t)
elif out_format_switch == 2:
write_pointParticle_orbit_zarr(AC_x, AC_y, AC_z, AC_vx, AC_vy, AC_vz, t)
print("aborting program...")
sys.exit()
# create file structure for particle output
if out_format_switch == 1:
os.mkdir(fpath_out + '/orbits')
fout_orbit_names = [
fpath_out + '/orbits/p_' + f'{i:06}' + '.txt' for i in range(Nparticles)
]
for i in range(Nparticles):
fo = open(fout_orbit_names[i], 'w')
fo.write('# t[s] x[km] y[km] z[km] vx[km] vy[km] vz[km]\n')
fo.close()
elif out_format_switch == 2:
out_zarr = zarr.open(fpath_out + '/orbits.zarr')
compressor = LZ4()
#compressor = Blosc(cname='lz4')
for i in range(Nparticles):
out_zarr.array(str(i),
np.empty((7, 0)),
chunks=((7, 25000)),
compressor=compressor)
# reset the clock
t = [0.]
# run the particles until all (except at most 5) are outbound and outside Rcut set in find_inds_active
t = np.full(pAC_x.shape, t[-1])
inds_active = find_inds_active(pAC_x, pAC_y, pAC_z, pAC_vx, pAC_vy, pAC_vz)
while len(inds_active) > 5:
x_list, y_list, z_list, vx_list, vy_list, vz_list, t_list = run_particles_nsteps(
pAC_x[inds_active], pAC_y[inds_active], pAC_z[inds_active],
pAC_vx[inds_active], pAC_vy[inds_active], pAC_vz[inds_active],
t[inds_active])
if out_format_switch == 1:
write_orbits_to_disk(x_list, y_list, z_list, vx_list, vy_list, vz_list,
t_list, inds_active)
elif out_format_switch == 2:
write_orbits_to_disk_zarr(x_list, y_list, z_list, vx_list, vy_list, vz_list,
t_list, inds_active)
# prepare next step
pAC_x[inds_active], pAC_y[inds_active], pAC_z[inds_active], pAC_vx[
inds_active], pAC_vy[inds_active], pAC_vz[inds_active], t[
inds_active] = x_list[-1], y_list[-1], z_list[-1], vx_list[
-1], vy_list[-1], vz_list[-1], t_list[-1]
inds_active = find_inds_active(pAC_x, pAC_y, pAC_z, pAC_vx, pAC_vy, pAC_vz)
|
from .shell import cast, call
from .routers.linux import LinuxRouter
from .routers.darwin import DarwinRouter
from .routers.windows import WindowsRouter
from .openers.linux import LinuxOpener
from .openers.darwin import DarwinOpener
from .openers.windows import WindowsOpener
from . import observer as gigalixir_observer
from . import user as gigalixir_user
from . import mfa as gigalixir_mfa
from . import app as gigalixir_app
from . import config as gigalixir_config
from . import permission as gigalixir_permission
from . import release as gigalixir_release
from . import app_activity as gigalixir_app_activity
from . import api_key as gigalixir_api_key
from . import ssh_key as gigalixir_ssh_key
from . import log_drain as gigalixir_log_drain
from . import payment_method as gigalixir_payment_method
from . import domain as gigalixir_domain
from . import invoice as gigalixir_invoice
from . import usage as gigalixir_usage
from . import database as gigalixir_database
from . import free_database as gigalixir_free_database
from . import canary as gigalixir_canary
from . import git
import click
import requests
import getpass
import stripe
import subprocess
import sys
import re
import uuid
import rollbar
import logging
import json
import netrc
import os
import platform
from functools import wraps
import pkg_resources
def _show_usage_error(self, file=None):
if file is None:
file = click._compat.get_text_stderr()
color = None
if self.ctx is not None:
color = self.ctx.color
click.echo(self.ctx.get_help() + '\n', file=file, color=color)
click.echo('Error: %s' % self.format_message(), file=file, color=color)
click.exceptions.UsageError.show = _show_usage_error
ROLLBAR_POST_CLIENT_ITEM = "40403cdd48904a12b6d8d27050b12343"
# kinda sorta duplicated in this file as an option to cli Command.
# we need this at the "top" level so that handle_exception has access to rollbar
# when it was in cli(), it didn't work. I guess that gets run a bit later, after
# the command not found exception is raised.
env = os.environ.get("GIGALIXIR_ENV", "prod")
if env == "prod":
rollbar.init(ROLLBAR_POST_CLIENT_ITEM, 'production', enabled=True, allow_logging_basic_config=False)
elif env == "dev":
rollbar.init(ROLLBAR_POST_CLIENT_ITEM, 'development', enabled=False, allow_logging_basic_config=False)
elif env == "test":
rollbar.init(ROLLBAR_POST_CLIENT_ITEM, 'test', enabled=False, allow_logging_basic_config=False)
else:
raise Exception("Invalid GIGALIXIR_ENV")
def detect_app_name(f):
@wraps(f)
def wrapper(*args, **kwds):
app_name = kwds['app_name']
if app_name is None:
app_name = detect_app()
kwds['app_name'] = app_name
f(*args, **kwds)
return wrapper
def report_errors(f):
@wraps(f)
def wrapper(*args, **kwds):
try:
f(*args, **kwds)
except:
version = pkg_resources.get_distribution("gigalixir").version
rollbar.report_exc_info(sys.exc_info(), payload_data={"version": version})
logging.getLogger("gigalixir-cli").error(sys.exc_info()[1])
sys.exit(1)
return wrapper
def rollbar_fingerprint(e):
return e[1].__str__()
# TODO: remove localhost from .netrc file
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
# copied from https://stackoverflow.com/questions/52213375/python-click-exception-handling-under-setuptools/52214480#52214480
def CatchAllExceptions(cls, handler):
class Cls(cls):
_original_args = None
def make_context(self, info_name, args, parent=None, **extra):
# grab the original command line arguments
self._original_args = ' '.join(args)
try:
return super(Cls, self).make_context(
info_name, args, parent=parent, **extra)
except Exception as exc:
# call the handler
handler(self, info_name, exc)
# let the user see the original error
raise
def invoke(self, ctx):
try:
return super(Cls, self).invoke(ctx)
except Exception as exc:
# call the handler
handler(self, ctx.info_name, exc)
# let the user see the original error
raise
return Cls
def handle_exception(cmd, info_name, exc):
msg = 'command:{} {} error:{}'.format(info_name, cmd._original_args, exc)
rollbar.report_message(msg, 'warning')
class AliasedGroup(click.Group):
def get_command(self, ctx, cmd_name):
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
aliases = {
"configs": "config",
"set_config": "deprecated:set_config",
"databases": "pg",
"scale_database": "pg:scale",
"delete_database": "pg:destroy",
"status": "ps",
"scale": "ps:scale",
"rollback": "releases:rollback",
"remote_console": "ps:remote_console",
"ssh": "ps:ssh",
"migrate": "ps:migrate",
"set_payment_method": "account:payment_method:set",
"payment_method": "account:payment_method",
"set_password": "<PASSWORD>:set",
"change_password": "<PASSWORD>:password:<PASSWORD>",
"reset_api_key": "account:api_key:reset",
"upgrade": "account:upgrade",
"log_drains": "drains",
"delete_log_drain": "drains:remove",
"ssh_keys": "account:ssh_keys",
"add_log_drain": "drains:add",
"add_ssh_key": "account:ssh_keys:add",
"delete_ssh_key": "account:ssh_keys:remove",
"add_domain": "domains:add",
"send_email_confirmation_token": "account:confirmation:resend",
"send_reset_password_token": "account:password:reset",
"delete_app": "apps:destroy",
"delete_permission": "access:remove",
"permissions": "access",
"delete_free_database": "deprecated:delete_free_database",
"free_databases": "deprecated:free_databases",
"create_free_database": "deprecated:create_free_database",
"delete_domain": "domains:remove",
"delete_config": "config:unset",
"add_permission": "access:add",
"create_database": "pg:create",
"set_git_remote": "git:remote",
"invoices": "account:invoices",
"current_period_usage": "account:usage",
"observer": "ps:observer",
# permanent
"create": "apps:create",
"restart": "ps:restart",
}
if cmd_name not in aliases:
return None
else:
return click.Group.get_command(self, ctx, aliases[cmd_name])
def detect_app():
try:
git.check_for_git()
remote = call("git remote -v")
# matches first instance of
# git.gigalixir.com/foo.git
# git.gigalixir.com/foo.git/
# git.gigalixir.com/foo
repo_name = re.search('git.gigalixir.com/(.*) ', remote).group(1)
# strip trailing .git stuff if it is there
git_pos = repo_name.find(".git")
if git_pos >= 0:
repo_name = repo_name[:git_pos]
return repo_name
except (AttributeError, subprocess.CalledProcessError):
raise Exception("Could not detect app name. Try passing the app name explicitly with the `-a` flag or create an app with `gigalixir create`.")
@click.group(cls=AliasedGroup, context_settings=CONTEXT_SETTINGS)
# @click.group(cls=CatchAllExceptions(AliasedGroup, handler=handle_exception), context_settings=CONTEXT_SETTINGS)
@click.option('--env', envvar='GIGALIXIR_ENV', default='prod', help="GIGALIXIR environment [prod, dev, test].")
@click.pass_context
def cli(ctx, env):
ctx.obj = {}
logging.basicConfig(format='%(message)s')
logging.getLogger("gigalixir-cli").setLevel(logging.INFO)
if env == "prod":
stripe.api_key = '<KEY>'
host = "https://api.gigalixir.com"
elif env == "dev":
stripe.api_key = '<KEY>'
host = "http://localhost:4000"
elif env == "test":
stripe.api_key = '<KEY>'
# gets intercepted in tests
host = "https://api.gigalixir.com"
else:
raise Exception("Invalid GIGALIXIR_ENV")
ctx.obj['host'] = host
ctx.obj['env'] = env
PLATFORM = platform.system().lower() # linux, darwin, or windows
if PLATFORM == "linux":
ctx.obj['router'] = LinuxRouter()
ctx.obj['opener'] = LinuxOpener()
elif PLATFORM == "darwin":
ctx.obj['router'] = DarwinRouter()
ctx.obj['opener'] = DarwinOpener()
elif PLATFORM == "windows":
try:
os.environ['HOME']
except KeyError:
os.environ['HOME'] = os.environ['USERPROFILE']
ctx.obj['router'] = WindowsRouter()
ctx.obj['opener'] = WindowsOpener()
else:
raise Exception("Unknown platform: %s" % PLATFORM)
# class TestException(Exception):
# pass
# @cli.command(name="test")
# @click.pass_context
# @report_errors
# def app_info(ctx):
# """
# Test command for debugging
# """
# raise TestException("Test Exception")
def print_help(ctx, subcommand):
if subcommand is None:
click.echo(ctx.parent.get_help(), color=ctx.color)
else:
subcommand_obj = cli.get_command(ctx, subcommand)
if subcommand_obj is None:
click.echo("command %s not found" % subcommand)
else:
ctx.info_name = subcommand
click.echo(subcommand_obj.get_help(ctx))
@cli.command()
@click.argument('subcommand', required=False)
@click.pass_context
@report_errors
def help(ctx, subcommand):
"""
Show commands and descriptions.
"""
print_help(ctx, subcommand)
@cli.command(name='ps')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def status(ctx, app_name):
"""
Current app status.
"""
gigalixir_app.status(ctx.obj['host'], app_name)
@cli.command(name='pg:scale')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-d', '--database_id', required=True)
@click.option('-s', '--size', type=float, default=0.6, help='Size of the database can be 0.6, 1.7, 4, 8, 16, 32, 64, or 128.')
@click.pass_context
@report_errors
@detect_app_name
def scale_database(ctx, app_name, database_id, size):
"""
Scale database. Find the database id by running `gigalixir pg`
"""
gigalixir_database.scale(ctx.obj['host'], app_name, database_id, size)
@cli.command(name='ps:scale')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-r', '--replicas', type=int, help='Number of replicas to run.')
@click.option('-s', '--size', type=float, help='Size of each replica between 0.5 and 128 in increments of 0.1.')
@click.pass_context
@report_errors
@detect_app_name
def scale(ctx, app_name, replicas, size):
"""
Scale app.
"""
if not app_name:
raise Exception("app_name is required")
gigalixir_app.scale(ctx.obj['host'], app_name, replicas, size)
@cli.command(name='releases:rollback')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-r', '--version', default=None, help='The version of the release to revert to. Use gigalixir get releases to find the version. If omitted, this defaults to the second most recent release.')
@click.pass_context
@report_errors
@detect_app_name
def rollback(ctx, app_name, version):
"""
Rollback to a previous release.
"""
gigalixir_app.rollback(ctx.obj['host'], app_name, version)
@cli.command(name='ps:remote_console')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-o', '--ssh_opts', default="", help='Command-line options to pass to ssh.')
@click.option('-c', '--ssh_cmd', default="ssh", help='Which ssh command to use.')
@click.pass_context
@report_errors
@detect_app_name
def remote_console(ctx, app_name, ssh_opts, ssh_cmd):
"""
Drop into a remote console on a live production node.
"""
gigalixir_app.remote_console(ctx.obj['host'], app_name, ssh_opts, ssh_cmd)
@cli.command(name='ps:run')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('command', nargs=-1)
@click.option('-o', '--ssh_opts', default="", help='Command-line options to pass to ssh.')
@click.option('-c', '--ssh_cmd', default="ssh", help='Which ssh command to use.')
@click.pass_context
@report_errors
@detect_app_name
def ps_run(ctx, app_name, ssh_opts, ssh_cmd, command):
"""
Run a shell command on your running container.
"""
gigalixir_app.ps_run(ctx.obj['host'], app_name, ssh_opts, ssh_cmd, *command)
@cli.command(name='ps:ssh')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('command', nargs=-1)
@click.option('-o', '--ssh_opts', default="", help='Command-line options to pass to ssh.')
@click.option('-c', '--ssh_cmd', default="ssh", help='Which ssh command to use.')
@click.pass_context
@report_errors
@detect_app_name
def ssh(ctx, app_name, ssh_opts, ssh_cmd, command):
"""
Ssh into app. Be sure you added your ssh key using gigalixir create ssh_key. Configs are not loaded automatically.
"""
gigalixir_app.ssh(ctx.obj['host'], app_name, ssh_opts, ssh_cmd, *command)
@cli.command(name='ps:distillery')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('distillery_command', nargs=-1)
@click.option('-o', '--ssh_opts', default="", help='Command-line options to pass to ssh.')
@click.option('-c', '--ssh_cmd', default="ssh", help='Which ssh command to use.')
@click.pass_context
@report_errors
@detect_app_name
def distillery(ctx, app_name, ssh_opts, ssh_cmd, distillery_command):
"""
Runs a distillery command to run on the remote container e.g. ping, remote_console. Be sure you've added your ssh key.
"""
gigalixir_app.distillery_command(ctx.obj['host'], app_name, ssh_opts, ssh_cmd, *distillery_command)
@cli.command(name='ps:restart')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def restart(ctx, app_name):
"""
Restart app.
"""
gigalixir_app.restart(ctx.obj['host'], app_name)
# gigalixir run mix ecto.migrate
@cli.command()
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('command', nargs=-1)
@click.pass_context
@report_errors
@detect_app_name
def run(ctx, app_name, command):
"""
Run shell command as a job in a separate process. Useful for migrating databases before the app is running.
"""
gigalixir_app.run(ctx.obj['host'], app_name, command)
@cli.command(name='ps:migrate')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-m', '--migration_app_name', default=None, help='For umbrella apps, specify which inner app to migrate.')
@click.option('-o', '--ssh_opts', default="", help='Command-line options to pass to ssh.')
@click.option('-c', '--ssh_cmd', default="ssh", help='Which ssh command to use.')
@click.pass_context
@report_errors
@detect_app_name
def ps_migrate(ctx, app_name, migration_app_name, ssh_opts, ssh_cmd):
"""
Run Ecto Migrations on a production node.
"""
gigalixir_app.migrate(ctx.obj['host'], app_name, migration_app_name, ssh_opts, ssh_cmd)
# @update.command()
@cli.command(name='account:payment_method:set')
@click.option('--card_number', prompt=True)
@click.option('--card_exp_month', prompt=True)
@click.option('--card_exp_year', prompt=True)
@click.option('--card_cvc', prompt=True)
@click.pass_context
@report_errors
def set_payment_method(ctx, card_number, card_exp_month, card_exp_year, card_cvc):
"""
Set your payment method.
"""
gigalixir_payment_method.update(ctx.obj['host'], card_number, card_exp_month, card_exp_year, card_cvc)
@cli.command(name='account:upgrade')
@click.option('-y', '--yes', is_flag=True)
@click.pass_context
@report_errors
def upgrade(ctx, yes):
"""
Upgrade from free tier to standard tier.
"""
if yes or click.confirm('Are you sure you want to upgrade to the standard tier?'):
gigalixir_user.upgrade(ctx.obj['host'])
@cli.command(name='account:destroy')
@click.option('-y', '--yes', is_flag=True)
@click.option('-e', '--email', prompt=True)
@click.option('-p', '--password', prompt=True, hide_input=True, confirmation_prompt=False)
@click.pass_context
@report_errors
def destroy_account(ctx, yes, email, password):
"""
Destroy your account
"""
logging.getLogger("gigalixir-cli").info("WARNING: Deleting an account can not be undone.")
if yes or click.confirm('Are you sure you want to delete your account (%s)?' % email):
gigalixir_user.delete(ctx.obj['host'], email, password)
# @reset.command()
@cli.command(name='account:password:set')
@click.option('-t', '--token', prompt=True)
@click.option('-p', '--password', prompt=True, hide_input=True, confirmation_prompt=False)
@click.pass_context
@report_errors
def set_password(ctx, token, password):
"""
Set password using reset password token. Deprecated. Use the web form instead.
"""
gigalixir_user.reset_password(ctx.obj['host'], token, password)
# @update.command()
@cli.command(name='account:password:change')
@click.option('-p', '--current_password', prompt=True, hide_input=True, confirmation_prompt=False)
@click.option('-n', '--new_password', prompt=True, hide_input=True, confirmation_prompt=False)
@click.pass_context
@report_errors
def change_password(ctx, current_password, new_password):
"""
Change password.
"""
gigalixir_user.change_password(ctx.obj['host'], current_password, new_password)
@cli.command()
@click.pass_context
@report_errors
def account(ctx):
"""
Account information.
"""
gigalixir_user.account(ctx.obj['host'])
# @update.command()
@cli.command(name='account:api_key:reset')
@click.option('-p', '--password', prompt=True, hide_input=True, confirmation_prompt=False)
@click.option('-y', '--yes', is_flag=True)
@click.pass_context
@report_errors
def reset_api_key(ctx, password, yes):
"""
Regenerate a replacement api key.
"""
gigalixir_api_key.regenerate(ctx.obj['host'], password, yes, ctx.obj['env'])
@cli.command(name='account:mfa:activate')
@click.option('-y', '--yes', is_flag=True)
@click.pass_context
@report_errors
def mfa_activate(ctx, yes):
"""
Start the multi-factor authentication activation process.
"""
gigalixir_mfa.activate(ctx.obj['host'], yes)
@cli.command(name='account:mfa:deactivate')
@click.option('-y', '--yes', is_flag=True)
@click.pass_context
@report_errors
def mfa_deactivate(ctx, yes):
"""
Deactivate multi-factor authentication.
"""
if yes or click.confirm('Are you sure you want to deactivate multi-factor authentication?'):
gigalixir_mfa.deactivate(ctx.obj['host'])
@cli.command(name='account:mfa:recovery_codes:regenerate')
@click.option('-y', '--yes', is_flag=True)
@click.pass_context
@report_errors
def mfa_deactivate(ctx, yes):
"""
Regenerate multi-factor authentication recovery codes.
"""
gigalixir_mfa.regenerate_recovery_codes(ctx.obj['host'], yes)
@cli.command()
@click.pass_context
@report_errors
def logout(ctx):
"""
Logout
"""
gigalixir_user.logout(ctx.obj['env'])
@cli.command()
@click.option('-e', '--email', prompt=True)
@click.option('-p', '--password', prompt=True, hide_input=True, confirmation_prompt=False)
@click.option('-t', '--mfa_token', prompt=False) # we handle prompting if needed, not always needed
@click.option('-y', '--yes', is_flag=True)
@click.pass_context
@report_errors
def login(ctx, email, password, yes, mfa_token):
"""
Login and receive an api key.
"""
gigalixir_user.login(ctx.obj['host'], email, password, yes, ctx.obj['env'], mfa_token)
# @get.command()
@cli.command()
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-n', '--num')
@click.option('-t', '--no_tail', is_flag=True)
@click.pass_context
@report_errors
@detect_app_name
def logs(ctx, app_name, num, no_tail):
"""
Stream logs from app.
"""
gigalixir_app.logs(ctx.obj['host'], app_name, num, no_tail)
# @get.command()
@cli.command(name='account:payment_method')
@click.pass_context
@report_errors
def payment_method(ctx):
"""
Get your payment method.
"""
gigalixir_payment_method.get(ctx.obj['host'])
# @get.command()
@cli.command(name='drains')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def log_drains(ctx, app_name):
"""
Get your log drains.
"""
gigalixir_log_drain.get(ctx.obj['host'], app_name)
# @get.command()
@cli.command(name='account:ssh_keys')
@click.pass_context
@report_errors
def ssh_keys(ctx):
"""
Get your ssh keys.
"""
gigalixir_ssh_key.get(ctx.obj['host'])
@cli.command(name="apps:info")
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def app_info(ctx, app_name):
"""
Get app info
"""
gigalixir_app.info(ctx.obj['host'], app_name)
# @get.command()
@cli.command()
@click.pass_context
@report_errors
def apps(ctx):
"""
Get apps.
"""
gigalixir_app.get(ctx.obj['host'])
@cli.command(name='apps:activity')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def app_activity(ctx, app_name):
"""
Get activity for an app.
"""
gigalixir_app_activity.get(ctx.obj['host'], app_name)
# @get.command()
@cli.command()
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def releases(ctx, app_name):
"""
Get previous releases for app.
"""
gigalixir_release.get(ctx.obj['host'], app_name)
# @get.command()
@cli.command(name='access')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def permissions(ctx, app_name):
"""
Get permissions for app.
"""
gigalixir_permission.get(ctx.obj['host'], app_name)
# @create.command()
@cli.command(name='drains:add')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('url')
@click.pass_context
@report_errors
@detect_app_name
def add_log_drain(ctx, app_name, url):
"""
Add a drain to send your logs to.
"""
gigalixir_log_drain.create(ctx.obj['host'], app_name, url)
# @create.command()
@cli.command(name='account:ssh_keys:add')
@click.argument('ssh_key')
@click.pass_context
@report_errors
def add_ssh_key(ctx, ssh_key):
"""
Add an ssh key. Make sure you use the actual key and not the filename as the argument. For example,
don't use ~/.ssh/id_rsa.pub, use the contents of that file.
"""
gigalixir_ssh_key.create(ctx.obj['host'], ssh_key)
# @create.command()
@cli.command(name='domains:add')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('fully_qualified_domain_name')
@click.pass_context
@report_errors
@detect_app_name
def add_domain(ctx, app_name, fully_qualified_domain_name):
"""
Adds a custom domain name to your app.
"""
gigalixir_domain.create(ctx.obj['host'], app_name, fully_qualified_domain_name)
# @create.command()
@cli.command(name='deprecated:set_config')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('key')
@click.argument('value')
@click.pass_context
@report_errors
@detect_app_name
def set_config(ctx, app_name, key, value):
"""
Set an app configuration/environment variable.
"""
gigalixir_config.create(ctx.obj['host'], app_name, key, value)
@cli.command(name="config:copy")
@click.option('-s', '--src_app_name', required=True)
@click.option('-d', '--dst_app_name', required=True)
@click.option('-y', '--yes', is_flag=True)
@click.pass_context
@report_errors
# no detecting app name for this one
def config_copy(ctx, src_app_name, dst_app_name, yes):
"""
Copy configuration variables from one app to another
and restarts your app.
"""
logging.getLogger("gigalixir-cli").info("WARNING: This will copy all configs from %s to %s. This might overwrite some configs in %s." % (src_app_name, dst_app_name, dst_app_name))
if yes or click.confirm('Are you sure you want to continue?'):
gigalixir_config.copy(ctx.obj['host'], src_app_name, dst_app_name)
@cli.command(name="config:set")
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('assignments', nargs=-1)
@click.pass_context
@report_errors
@detect_app_name
def config_set(ctx, app_name, assignments):
"""
Set configuration variables and restarts your app.
ASSIGNMENTS are of the form KEY=VALUE.
For example,
gigalixir config:set KEY0=VALUE0 KEY1="VALUE 1"
"""
colored_keys = []
configs = {}
for assignment in assignments:
try:
key, value = assignment.split('=', 1)
configs[key] = value
except ValueError:
print_help(ctx, "config:set")
raise
gigalixir_config.create_multiple(ctx.obj['host'], app_name, configs)
# @get.command()
@cli.command(name='account:confirmation:resend')
@click.option('-e', '--email', prompt=True)
@click.pass_context
@report_errors
def send_email_confirmation_token(ctx, email):
"""
Regenerate a email confirmation token and send to email.
"""
gigalixir_user.get_confirmation_token(ctx.obj['host'], email)
# @get.command()
@cli.command(name='account:password:reset')
@click.option('-e', '--email', prompt=True)
@click.pass_context
@report_errors
def send_reset_password_token(ctx, email):
"""
Send reset password token to email.
"""
gigalixir_user.get_reset_password_token(ctx.obj['host'], email)
# @get.command()
@cli.command(name='account:email:set')
@click.option('-p', '--password', prompt=True, hide_input=True, confirmation_prompt=False)
@click.option('-e', '--email', prompt=True)
@click.pass_context
@report_errors
def send_reset_password_token(ctx, password, email):
"""
Set account email
"""
gigalixir_user.set_email(ctx.obj['host'], password, email)
# @get.command()
@cli.command(name='pg')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def databases(ctx, app_name):
"""
Get databases for your app.
"""
gigalixir_database.get(ctx.obj['host'], app_name)
# @get.command()
# deprecated. pg/databases above lists free and standard.
@cli.command(name='deprecated:free_databases')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def free_databases(ctx, app_name):
"""
Get free databases for your app.
"""
gigalixir_free_database.get(ctx.obj['host'], app_name)
# @get.command()
@cli.command()
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def domains(ctx, app_name):
"""
Get custom domains for your app.
"""
gigalixir_domain.get(ctx.obj['host'], app_name)
# @get.command()
@cli.command()
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def config(ctx, app_name):
"""
Get app configuration/environment variables.
"""
gigalixir_config.get(ctx.obj['host'], app_name)
# @delete.command()
@cli.command(name='drains:remove')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('drain_id')
@click.pass_context
@report_errors
@detect_app_name
def delete_log_drain(ctx, app_name, drain_id):
"""
Deletes a log drain. Find the drain_id from gigalixir log_drains.
"""
gigalixir_log_drain.delete(ctx.obj['host'], app_name, drain_id)
# @delete.command()
@cli.command(name='account:ssh_keys:remove')
@click.argument('key_id')
@click.pass_context
@report_errors
def delete_ssh_key(ctx, key_id):
"""
Deletes your ssh key. Find the key_id from gigalixir get_ssh_keys.
"""
gigalixir_ssh_key.delete(ctx.obj['host'], key_id)
@cli.command(name='apps:destroy')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-y', '--yes', is_flag=True)
@click.pass_context
@report_errors
@detect_app_name
def delete_app(ctx, app_name, yes):
"""
Deletes an app. Can not be undone.
"""
logging.getLogger("gigalixir-cli").info("WARNING: Deleting an app can not be undone.")
if yes or click.confirm('Do you want to delete your app (%s)?' % app_name):
gigalixir_app.delete(ctx.obj['host'], app_name)
# @delete.command()
@cli.command(name='access:remove')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('email')
@click.pass_context
@report_errors
@detect_app_name
def delete_permission(ctx, app_name, email):
"""
Denies user access to app.
"""
gigalixir_permission.delete(ctx.obj['host'], app_name, email)
# @delete.command()
@cli.command(name='pg:destroy')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-y', '--yes', is_flag=True)
@click.option('-d', '--database_id', required=True)
@click.pass_context
@report_errors
@detect_app_name
def delete_database(ctx, app_name, yes, database_id):
"""
Delete database. Find the database id by running `gigalixir pg`
"""
logging.getLogger("gigalixir-cli").info("WARNING: Deleting your database will destroy all your data and backups.")
logging.getLogger("gigalixir-cli").info("WARNING: This can not be undone.")
logging.getLogger("gigalixir-cli").info("WARNING: Please make sure you backup your data first.")
if yes or click.confirm('Do you want to delete your database and all backups?'):
gigalixir_database.delete(ctx.obj['host'], app_name, database_id)
# @delete.command()
# is this command still needed? i think delete_database/pg:destroy above can delete free databases?
@cli.command(name='deprecated:delete_free_database')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-y', '--yes', is_flag=True)
@click.option('-d', '--database_id', required=True)
@click.pass_context
@report_errors
@detect_app_name
def delete_free_database(ctx, app_name, yes, database_id):
"""
Delete free database. Find the database id by running `gigalixir pg`
"""
logging.getLogger("gigalixir-cli").info("WARNING: Deleting your database will destroy all your data.")
logging.getLogger("gigalixir-cli").info("WARNING: This can not be undone.")
logging.getLogger("gigalixir-cli").info("WARNING: Please make sure you backup your data first.")
if yes or click.confirm('Do you want to delete your database?'):
gigalixir_free_database.delete(ctx.obj['host'], app_name, database_id)
# @delete.command()
@cli.command(name='domains:remove')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('fully_qualified_domain_name')
@click.pass_context
@report_errors
@detect_app_name
def delete_domain(ctx, app_name, fully_qualified_domain_name):
"""
Delete custom domain from your app.
"""
gigalixir_domain.delete(ctx.obj['host'], app_name, fully_qualified_domain_name)
# @delete.command()
@cli.command(name='config:unset')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('key')
@click.pass_context
@report_errors
@detect_app_name
def delete_config(ctx, app_name, key):
"""
Delete app configuration/environment variables.
"""
gigalixir_config.delete(ctx.obj['host'], app_name, key)
# @create.command()
@cli.command(name='access:add')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.argument('email')
@click.pass_context
@report_errors
@detect_app_name
def add_permission(ctx, app_name, email):
"""
Grants a user permission to deploy an app.
"""
gigalixir_permission.create(ctx.obj['host'], app_name, email)
@cli.command(name='pg:psql')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def pg_psql(ctx, app_name):
"""
Connect to the database using psql
"""
gigalixir_database.psql(ctx.obj['host'], app_name)
@cli.command(name='pg:create')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-s', '--size', type=float, default=0.6, help='Size of the database can be 0.6, 1.7, 4, 8, 16, 32, 64, or 128.')
@click.option('-c', '--cloud')
@click.option('-r', '--region')
@click.option('-f', '--free', is_flag=True)
@click.option('-y', '--yes', is_flag=True)
@click.pass_context
@report_errors
@detect_app_name
def create_database(ctx, app_name, size, cloud, region, free, yes):
"""
Create a new database for app.
"""
if free:
if cloud != None or region != None:
raise Exception("Sorry, free tier databases only run on gcp in us-central1. Try creating a standard database instead.")
else:
if yes or click.confirm("A word of caution: Free tier databases are not suitable for production and migrating from a free db to a standard db is not trivial. Do you wish to continue?"):
gigalixir_free_database.create(ctx.obj['host'], app_name)
else:
gigalixir_database.create(ctx.obj['host'], app_name, size, cloud, region)
@cli.command(name='deprecated:create_free_database')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def create_free_database(ctx, app_name):
"""
Create a new free database for app.
"""
gigalixir_free_database.create(ctx.obj['host'], app_name)
# @create.command()
@cli.command(name='git:remote')
@click.argument('app_name')
@click.pass_context
@report_errors
def set_git_remote(ctx, app_name):
"""
Set the gigalixir git remote.
"""
gigalixir_app.set_git_remote(ctx.obj['host'], app_name)
# @create.command()
@cli.command(name='apps:create')
@click.option('-n', '--name')
@click.option('-c', '--cloud')
@click.option('-r', '--region')
@click.option('-s', '--stack')
@click.pass_context
@report_errors
def create(ctx, name, cloud, region, stack):
"""
Create a new app.
"""
gigalixir_app.create(ctx.obj['host'], name, cloud, region, stack)
@cli.command(name='account:invoices')
@click.pass_context
@report_errors
def invoices(ctx):
"""
List all previous invoices.
"""
gigalixir_invoice.get(ctx.obj['host'])
@cli.command(name='account:usage')
@click.pass_context
@report_errors
def current_period_usage(ctx):
"""
See the usage so far this month.
"""
gigalixir_usage.get(ctx.obj['host'])
# @create.command()
@cli.command()
@click.option('--email')
@click.option('-p', '--password')
@click.option('-y', '--accept_terms_of_service_and_privacy_policy', is_flag=True)
@click.pass_context
@report_errors
def signup(ctx, email, password, accept_terms_of_service_and_privacy_policy):
"""
Sign up for a new account.
"""
if not accept_terms_of_service_and_privacy_policy:
logging.getLogger("gigalixir-cli").info("GIGALIXIR Terms of Service: https://www.gigalixir.com/terms")
logging.getLogger("gigalixir-cli").info("GIGALIXIR Privacy Policy: https://www.gigalixir.com/privacy")
if not click.confirm('Do you accept the Terms of Service and Privacy Policy?'):
raise Exception("You must accept the Terms of Service and Privacy Policy to continue.")
if email == None:
email = click.prompt('Email')
gigalixir_user.validate_email(ctx.obj['host'], email)
if password == None:
password = click.prompt('Password', hide_input=True)
gigalixir_user.validate_password(ctx.obj['host'], password)
gigalixir_user.create(ctx.obj['host'], email, password, accept_terms_of_service_and_privacy_policy)
@cli.command(name='ps:observer')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-c', '--cookie')
@click.option('-o', '--ssh_opts', default="", help='Command-line options to pass to ssh.')
@click.option('-c', '--ssh_cmd', default="ssh", help='Which ssh command to use.')
@click.pass_context
@report_errors
@detect_app_name
def observer(ctx, app_name, cookie, ssh_opts, ssh_cmd):
"""
Launch remote production observer.
"""
gigalixir_observer.observer(ctx, app_name, cookie, ssh_opts, ssh_cmd)
@cli.command()
@click.pass_context
@report_errors
def version(ctx):
"""
Show the CLI version.
"""
click.echo(pkg_resources.get_distribution("gigalixir").version)
@cli.command(name='open')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def open_app(ctx, app_name):
ctx.obj['opener'].open("https://%s.gigalixirapp.com" % app_name)
@cli.command(name='pg:backups')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-d', '--database_id', required=True)
@click.pass_context
@report_errors
@detect_app_name
def pg_backups(ctx, app_name, database_id):
"""
List available backups. Find the database id by running `gigalixir pg`
"""
gigalixir_database.backups(ctx.obj['host'], app_name, database_id)
@cli.command(name='pg:backups:restore')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-d', '--database_id', required=True)
@click.option('-b', '--backup_id', required=True)
@click.pass_context
@report_errors
@detect_app_name
def pg_backups_restore(ctx, app_name, database_id, backup_id):
"""
Restore database from backup. Find the database id by running `gigalixir pg`
"""
gigalixir_database.restore(ctx.obj['host'], app_name, database_id, backup_id)
@cli.command(name='stack:set')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-s', '--stack', required=True)
@click.pass_context
@report_errors
@detect_app_name
def set_stack(ctx, app_name, stack):
"""
Set your app stack.
"""
gigalixir_app.set_stack(ctx.obj['host'], app_name, stack)
@cli.command(name='canary')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.pass_context
@report_errors
@detect_app_name
def canary(ctx, app_name):
"""
Get canary
"""
gigalixir_canary.get(ctx.obj['host'], app_name)
@cli.command(name='canary:set')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-c', '--canary_name')
@click.option('-w', '--weight', type=int)
@click.pass_context
@report_errors
@detect_app_name
def set_canary(ctx, app_name, canary_name, weight):
"""
Set a canary and weight for your app.
"""
gigalixir_canary.set(ctx.obj['host'], app_name, canary_name, weight)
@cli.command(name='canary:unset')
@click.option('-a', '--app_name', envvar="GIGALIXIR_APP")
@click.option('-c', '--canary_name', required=True)
@click.pass_context
@report_errors
@detect_app_name
def unset_canary(ctx, app_name, canary_name):
"""
Unset a canary for your app.
"""
gigalixir_canary.delete(ctx.obj['host'], app_name, canary_name)
|
<filename>falmer/events/migrations/0006_auto_20170817_1119.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-17 11:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('events', '0005_event_url'),
]
operations = [
migrations.CreateModel(
name='BrandingPeriod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=72)),
('website_link', models.URLField(blank=True)),
],
),
migrations.CreateModel(
name='Bundle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=72)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=72)),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name')),
],
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=72)),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name')),
],
),
migrations.AddField(
model_name='event',
name='alcohol',
field=models.CharField(choices=[('AV', 'Soft drinks & alcohol available'), ('NO', 'No alcohol'), ('NF', 'Not alcohol focused')], default='NF', max_length=2),
),
migrations.AddField(
model_name='event',
name='body',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='event',
name='cost',
field=models.CharField(choices=[('FREE', 'Free'), ('PAID', 'Paid'), ('NA', 'n/a')], default='NA', max_length=10),
),
migrations.AddField(
model_name='event',
name='embargo_until',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='event',
name='is_over_18_only',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='event',
name='just_for_pgs',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='event',
name='slug',
field=django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='title'),
),
migrations.AddField(
model_name='event',
name='suitable_kids_families',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='event',
name='ticket_level',
field=models.CharField(choices=[('NA', 'Not applicable'), ('LA', 'Limited availability'), ('SO', 'Sold out')], default='NA', max_length=2),
),
migrations.AddField(
model_name='event',
name='brand',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='events.BrandingPeriod'),
),
migrations.AddField(
model_name='event',
name='bundle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='events.Bundle'),
),
migrations.AddField(
model_name='event',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='events.Category'),
),
migrations.AddField(
model_name='event',
name='type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='events.Type'),
),
]
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect, render
from recipemaster.recipes.forms import CollectionForm, RecipeForm, SearchForm
from recipemaster.recipes.models import Recipe, RecipeCollection, Tag
from recipemaster.recipes.search import get_query
from .forms import AddUserForm
@login_required
def index(request):
collections = RecipeCollection.objects.filter(users=request.user).order_by('title')
return render(request, 'recipes/index.html', {'collections': collections})
@login_required
def tag_filter(request, collection_id, slug):
collection = get_object_or_404(RecipeCollection, pk=collection_id, users=request.user)
tag = get_object_or_404(Tag, slug=slug)
recipes = collection.recipes.filter(tags=tag).order_by('title')
form = SearchForm()
if request.GET.get('search'):
form = SearchForm(request.GET)
if form.is_valid():
query_string = form.cleaned_data['search']
entry_query = get_query(query_string, ['title'])
recipes = collection.recipes.filter(entry_query, tags=tag,).order_by('title')
return render(request, 'recipes/view_collection.html', {
'collection': collection,
'form': form,
'query': query_string,
'recipes': recipes
})
else:
messages.error(request, 'Could not filter. Please try again.')
return render(request, 'recipes/view_collection.html', {
'collection': collection,
'recipes': recipes,
'form': form
})
@login_required
def edit_collection(request, collection_id=None):
collection = RecipeCollection()
if collection_id:
collection = get_object_or_404(RecipeCollection, pk=collection_id, users=request.user)
form = CollectionForm(instance=collection)
if request.method == 'POST':
form = CollectionForm(request.POST, instance=collection)
if form.is_valid():
collection = form.save()
collection.users.add(request.user)
messages.success(request, 'Saved collection')
return redirect('recipes:view_collection', collection_id=collection.pk)
return render(request, 'recipes/edit_collection.html', {
'form': form
})
@login_required
def delete_collection(request, collection_id):
collection = get_object_or_404(RecipeCollection, pk=collection_id, users=request.user)
if request.method == 'POST':
if request.POST.get('delete') == 'yes':
collection.delete()
messages.success(request, 'Deleted collection')
else:
messages.error(request, 'Could not delete collection. Please try again. ')
return redirect('recipes:index')
@login_required
def view_collection(request, collection_id):
query_string = ''
collection = get_object_or_404(RecipeCollection, pk=collection_id, users=request.user)
recipes = collection.recipes.all().order_by('title')
form = SearchForm()
if request.GET.get('search'):
form = SearchForm(request.GET)
if form.is_valid():
query_string = form.cleaned_data['search']
entry_query = get_query(query_string, ['title'])
recipes = collection.recipes.filter(entry_query).order_by('title')
return render(request, 'recipes/view_collection.html', {
'collection': collection,
'form': form,
'query': query_string,
'recipes': recipes
})
else:
messages.error(request, 'Could not search. Please try again.')
return render(request, 'recipes/view_collection.html', {
'collection': collection,
'form': form,
'recipes': recipes
})
@login_required
def remove_recipe_from_collection(request, collection_id, recipe_id):
collection = get_object_or_404(RecipeCollection, pk=collection_id, users=request.user)
recipe = get_object_or_404(Recipe, pk=recipe_id)
if request.method == 'POST':
if request.POST.get('delete') == 'yes':
collection.recipes.remove(recipe)
messages.success(
request,
'Removed recipe {} from collection {}'.format(recipe.title, collection.title))
else:
messages.error(request, 'Could not delete recipe. Please try again. ')
return redirect('recipes:view_collection', collection_id=collection.pk)
@login_required
def edit_recipe_in_collection(request, collection_id, recipe_id=None):
collection = get_object_or_404(RecipeCollection, pk=collection_id, users=request.user)
recipe = Recipe()
if recipe_id:
recipe = get_object_or_404(Recipe, pk=recipe_id)
form = RecipeForm(instance=recipe)
if request.method == 'POST':
form = RecipeForm(request.POST, instance=recipe)
if form.is_valid():
recipe = form.save()
collection.recipes.add(recipe)
messages.success(request, 'Saved recipe')
return redirect('recipes:view_collection', collection_id=collection.pk)
return render(request, 'recipes/edit_recipe.html', {
'form': form
})
@login_required
def add_user_to_collection(request, collection_id):
collection = get_object_or_404(RecipeCollection, pk=collection_id, users=request.user)
form = AddUserForm()
if request.method == 'POST':
form = AddUserForm(request.POST)
if form.is_valid():
try:
user = User.objects.get(username=form.cleaned_data['username'])
collection.users.add(user)
messages.success(
request,
'Added {} to collection {}'.format(user.username, collection.title))
return redirect('recipes:view_collection', collection_id=collection.pk)
except ObjectDoesNotExist:
messages.error(request, 'User does not exist')
return render(request, 'recipes/add_user_to_collection.html', {
'form': form, 'collection': collection})
|
# -*- coding: utf-8 -*-
#
# rtk.dao.RTKNSWC.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com
"""
===============================================================================
The RTKNSWC Table
===============================================================================
"""
# pylint: disable=E0401
from sqlalchemy import Column, Float, ForeignKey, Integer
from sqlalchemy.orm import relationship # pylint: disable=E0401
# Import other RTK modules.
from Utilities import error_handler, none_to_default # pylint: disable=E0401
from dao.RTKCommonDB import RTK_BASE # pylint: disable=E0401
# pylint: disable=R0902
class RTKNSWC(RTK_BASE):
"""
Class to represent the rtk_nswc table in the RTK Program database.
This table shares a One-to-One relationship with rtk_hardware.
"""
__tablename__ = 'rtk_nswc'
__table_args__ = {'extend_existing': True}
hardware_id = Column('fld_hardware_id', Integer,
ForeignKey('rtk_hardware.fld_hardware_id'),
primary_key=True, nullable=False)
Cac = Column('fld_c_ac', Float, default=0.0)
Calt = Column('fld_c_alt', Float, default=0.0)
Cb = Column('fld_c_b', Float, default=0.0) # pylint: disable=invalid-name
Cbl = Column('fld_c_bl', Float, default=0.0)
Cbt = Column('fld_c_bt', Float, default=0.0)
Cbv = Column('fld_c_bv', Float, default=0.0)
Cc = Column('fld_c_c', Float, default=0.0) # pylint: disable=invalid-name
Ccf = Column('fld_c_cf', Float, default=0.0)
Ccp = Column('fld_c_cp', Float, default=0.0)
Ccs = Column('fld_c_cs', Float, default=0.0)
Ccv = Column('fld_c_cv', Float, default=0.0)
Ccw = Column('fld_c_cw', Float, default=0.0)
Cd = Column('fld_c_d', Float, default=0.0) # pylint: disable=invalid-name
Cdc = Column('fld_c_dc', Float, default=0.0)
Cdl = Column('fld_c_dl', Float, default=0.0)
Cdp = Column('fld_c_dp', Float, default=0.0)
Cds = Column('fld_c_ds', Float, default=0.0)
Cdt = Column('fld_c_dt', Float, default=0.0)
Cdw = Column('fld_c_dw', Float, default=0.0)
Cdy = Column('fld_c_dy', Float, default=0.0)
Ce = Column('fld_c_e', Float, default=0.0) # pylint: disable=invalid-name
Cf = Column('fld_c_f', Float, default=0.0) # pylint: disable=invalid-name
Cg = Column('fld_c_g', Float, default=0.0) # pylint: disable=invalid-name
Cga = Column('fld_c_ga', Float, default=0.0)
Cgl = Column('fld_c_gl', Float, default=0.0)
Cgp = Column('fld_c_gp', Float, default=0.0)
Cgs = Column('fld_c_gs', Float, default=0.0)
Cgt = Column('fld_c_gt', Float, default=0.0)
Cgv = Column('fld_c_gv', Float, default=0.0)
Ch = Column('fld_c_h', Float, default=0.0) # pylint: disable=invalid-name
Ci = Column('fld_c_i', Float, default=0.0) # pylint: disable=invalid-name
Ck = Column('fld_c_k', Float, default=0.0) # pylint: disable=invalid-name
Cl = Column('fld_c_l', Float, default=0.0) # pylint: disable=invalid-name
Clc = Column('fld_c_lc', Float, default=0.0)
Cm = Column('fld_c_m', Float, default=0.0) # pylint: disable=invalid-name
Cmu = Column('fld_c_mu', Float, default=0.0)
Cn = Column('fld_c_n', Float, default=0.0) # pylint: disable=invalid-name
Cnp = Column('fld_c_np', Float, default=0.0)
Cnw = Column('fld_c_nw', Float, default=0.0)
Cp = Column('fld_c_p', Float, default=0.0) # pylint: disable=invalid-name
Cpd = Column('fld_c_pd', Float, default=0.0)
Cpf = Column('fld_c_pf', Float, default=0.0)
Cpv = Column('fld_c_pv', Float, default=0.0)
Cq = Column('fld_c_q', Float, default=0.0) # pylint: disable=invalid-name
Cr = Column('fld_c_r', Float, default=0.0) # pylint: disable=invalid-name
Crd = Column('fld_c_rd', Float, default=0.0)
Cs = Column('fld_c_s', Float, default=0.0) # pylint: disable=invalid-name
Csc = Column('fld_c_sc', Float, default=0.0)
Csf = Column('fld_c_sf', Float, default=0.0)
Cst = Column('fld_c_st', Float, default=0.0)
Csv = Column('fld_c_sv', Float, default=0.0)
Csw = Column('fld_c_sw', Float, default=0.0)
Csz = Column('fld_c_sz', Float, default=0.0)
Ct = Column('fld_c_t', Float, default=0.0) # pylint: disable=invalid-name
Cv = Column('fld_c_v', Float, default=0.0) # pylint: disable=invalid-name
Cw = Column('fld_c_w', Float, default=0.0) # pylint: disable=invalid-name
Cy = Column('fld_c_y', Float, default=0.0) # pylint: disable=invalid-name
# Define the relationships to other tables in the RTK Program database.
hardware = relationship('RTKHardware', back_populates='nswc')
def get_attributes(self):
"""
Method to retrieve the current values of the RTKNSWC data model
attributes.
:return: (hardware_id, availability_alloc, env_factor, goal_measure_id,
hazard_rate_alloc, hazard_rate_goal, included, int_factor,
method_id, mtbf_alloc, mtbf_goal, n_sub_systems,
n_sub_elements, parent_id, percent_wt_factor,
reliability_alloc, reliability_goal, op_time_factor,
soa_factor, weight_factor)
:rtype: tuple
"""
_attributes = (self.hardware_id, self.Cac, self.Calt, self.Cb,
self.Cbl, self.Cbt, self.Cbv, self.Cc, self.Ccf,
self.Ccp, self.Ccs, self.Ccv, self.Ccw, self.Cd,
self.Cdc, self.Cdl, self.Cdp, self.Cds, self.Cdt,
self.Cdw, self.Cdy, self.Ce, self.Cf, self.Cg, self.Cg,
self.Cgl, self.Cgp, self.Cgs, self.Cgt, self.Cgv,
self.Ch, self.Ci, self.Ck, self.Cl, self.Clc, self.Cm,
self.Cmu, self.Cn, self.Cnp, self.Cnw, self.Cp,
self.Cpd, self.Cpf, self.Cpv, self.Cq, self.Cr,
self.Crd, self.Cs, self.Csc, self.Csf, self.Cst,
self.Csv, self.Csw, self.Csz, self.Ct, self.Cv, self.Cw,
self.Cy)
return _attributes
def set_attributes(self, attributes):
"""
Method to set the RTKNSWC data model attributes.
:param tuple attributes: tuple of values to assign to the instance
attributes.
:return: (_error_code, _msg); the error code and error message.
:rtype: tuple
"""
_error_code = 0
_msg = "RTK SUCCESS: Updating RTKNSWC {0:d} attributes.". \
format(self.hardware_id)
try:
self.Cac = float(none_to_default(attributes[0], 0.0))
self.Calt = float(none_to_default(attributes[1], 0.0))
self.Cb = float(none_to_default(attributes[2], 0.0))
self.Cbl = float(none_to_default(attributes[3], 0.0))
self.Cbt = float(none_to_default(attributes[4], 0.0))
self.Cbv = float(none_to_default(attributes[5], 0.0))
self.Cc = float(none_to_default(attributes[6], 0.0))
self.Ccf = float(none_to_default(attributes[7], 0.0))
self.Ccp = float(none_to_default(attributes[8], 0.0))
self.Ccs = float(none_to_default(attributes[9], 0.0))
self.Ccv = float(none_to_default(attributes[10], 0.0))
self.Ccw = float(none_to_default(attributes[11], 0.0))
self.Cd = float(none_to_default(attributes[12], 0.0))
self.Cdc = float(none_to_default(attributes[13], 0.0))
self.Cdl = float(none_to_default(attributes[14], 0.0))
self.Cdp = float(none_to_default(attributes[15], 0.0))
self.Cds = float(none_to_default(attributes[16], 0.0))
self.Cdt = float(none_to_default(attributes[17], 0.0))
self.Cdw = float(none_to_default(attributes[18], 0.0))
self.Cdy = float(none_to_default(attributes[19], 0.0))
self.Ce = float(none_to_default(attributes[20], 0.0))
self.Cf = float(none_to_default(attributes[21], 0.0))
self.Cg = float(none_to_default(attributes[22], 0.0))
self.Cg = float(none_to_default(attributes[23], 0.0))
self.Cgl = float(none_to_default(attributes[24], 0.0))
self.Cgp = float(none_to_default(attributes[25], 0.0))
self.Cgs = float(none_to_default(attributes[26], 0.0))
self.Cgt = float(none_to_default(attributes[27], 0.0))
self.Cgv = float(none_to_default(attributes[28], 0.0))
self.Ch = float(none_to_default(attributes[29], 0.0))
self.Ci = float(none_to_default(attributes[30], 0.0))
self.Ck = float(none_to_default(attributes[31], 0.0))
self.Cl = float(none_to_default(attributes[32], 0.0))
self.Clc = float(none_to_default(attributes[33], 0.0))
self.Cm = float(none_to_default(attributes[34], 0.0))
self.Cmu = float(none_to_default(attributes[35], 0.0))
self.Cn = float(none_to_default(attributes[36], 0.0))
self.Cnp = float(none_to_default(attributes[37], 0.0))
self.Cnw = float(none_to_default(attributes[38], 0.0))
self.Cp = float(none_to_default(attributes[39], 0.0))
self.Cpd = float(none_to_default(attributes[40], 0.0))
self.Cpf = float(none_to_default(attributes[41], 0.0))
self.Cpv = float(none_to_default(attributes[42], 0.0))
self.Cq = float(none_to_default(attributes[43], 0.0))
self.Cr = float(none_to_default(attributes[44], 0.0))
self.Crd = float(none_to_default(attributes[45], 0.0))
self.Cs = float(none_to_default(attributes[46], 0.0))
self.Csc = float(none_to_default(attributes[47], 0.0))
self.Csf = float(none_to_default(attributes[48], 0.0))
self.Cst = float(none_to_default(attributes[49], 0.0))
self.Csv = float(none_to_default(attributes[50], 0.0))
self.Csw = float(none_to_default(attributes[51], 0.0))
self.Csz = float(none_to_default(attributes[52], 0.0))
self.Ct = float(none_to_default(attributes[53], 0.0))
self.Cv = float(none_to_default(attributes[54], 0.0))
self.Cw = float(none_to_default(attributes[55], 0.0))
self.Cy = float(none_to_default(attributes[56], 0.0))
except IndexError as _err:
_error_code = error_handler(_err.args)
_msg = "RTK ERROR: Insufficient number of input values to " \
"RTKNSWC.set_attributes()."
except (TypeError, ValueError) as _err:
_error_code = error_handler(_err.args)
_msg = "RTK ERROR: Incorrect data type when converting one or " \
"more RTKNSWC attributes."
return _error_code, _msg
|
import dgl
import networkx as nx
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
path_object = 'data_dgl/object.csv'
path_object_object = 'data_dgl/object-interact-object.csv'
path_room = 'data_dgl/room.csv'
path_room_object = 'data_dgl/room-interact-object.csv'
path_attribute = 'data_dgl/attribute.csv'
path_attribute_object = 'data_dgl/attribute-behave-object.csv'
# https://networkx.github.io/documentation/stable/tutorial.html#drawing-graphs
def load_heterograph():
nodes_feature_object = read_node_data(path_object)
nodes_feature_room = read_node_data(path_room)
nodes_feature_attribute = read_node_data(path_attribute)
#
src_objectobject, dst_objectobject = read_edge(path_object_object)
edges_objectobject = list(map(lambda tup: tup, zip(
src_objectobject, dst_objectobject)))
#
src_roomobject, dst_roomobject = read_edge(path_room_object)
start_id_room = nodes_feature_object.shape[0]
src_roomobject += start_id_room
edges_roomobject = list(map(lambda tup: tup, zip(
src_roomobject, dst_roomobject)))
#
src_attributeobject, dst_attributeobject = read_edge(path_attribute_object)
start_id_attribute = start_id_room + nodes_feature_room.shape[0]
src_attributeobject += start_id_attribute
edges_attributeobject = list(map(lambda tup: tup, zip(
src_attributeobject, dst_attributeobject)))
G = nx.MultiDiGraph()
G.add_edges_from(edges_objectobject)
G.add_edges_from(edges_roomobject)
G.add_edges_from(edges_attributeobject)
print(G.number_of_nodes())
print(G.number_of_edges())
# import pdb;pdb.set_trace()
# G.nodes
# G.add_nodes_from([3, 2], time=["2pm", "3"])
# {'time': ['2pm', '3']}
# G.nodes[3]
# G.edges
return G
def load_homogeneous(path_node, path_edge):
# edges
node_feature_object = read_node_data(path_node)
src_node1_node2, dst_node1_node2 = read_edge(path_edge)
g = dgl.graph((src_node1_node2, dst_node1_node2))
g.ndata['feature'] = node_feature_object
nx_g = dgl.to_networkx(g, node_attrs=['feature'])
return nx_g
def read_node_data(path):
def get_feature(csv_nodes_data):
feature = [csv_nodes_data['feature'].to_list()]
for i in range(1, 300):
feature.extend([csv_nodes_data['feature.{}'.format(i)].to_list()])
feature = torch.tensor(feature).float().transpose(0, 1)
return feature
nodes_data = pd.read_csv(path)
node_feature = get_feature(nodes_data)
return node_feature
def read_edge(path):
edges_data = pd.read_csv(path)
src_node1_node2 = edges_data['Src'].to_numpy()
dst_node1_node2 = edges_data['Dst'].to_numpy()
return src_node1_node2, dst_node1_node2
def draw_graph(G):
# import pdb; pdb.set_trace()
# plt.subplot(121)
pos = nx.spring_layout(G)
nx.draw(G, with_labels=True, font_weight='bold', pos=pos)
# plt.subplot(122)
# nx.draw_shell(G, nlist=[range(5, 10), range(5)], with_labels=True, font_weight='bold', pos=pos)
plt.show()
if __name__ == '__main__':
# ('object', 'interacts', 'object')
# g1 = load_homogeneous(path_object, path_object_object)
# load_heterograph
g_multi = load_heterograph()
draw_graph(g_multi)
|
<reponame>kandouss/kamarl<gh_stars>1-10
import numpy as np
import torch
import tqdm
from collections import namedtuple, defaultdict
import random
import gc
import numba
import pickle
import gym
import itertools
def chunked_iterable(iterable, size):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, size))
if not chunk:
break
yield chunk
def init_array(spec, length, array_hook=np.zeros):
if isinstance(spec, gym.spaces.Box):
return array_hook(shape=(length, *spec.shape), dtype=spec.dtype)
elif isinstance(spec, gym.spaces.Discrete):
return array_hook(shape=(length,), dtype=np.int)
elif isinstance(spec, gym.Space):
raise ValueError("Unsupported gym space.")
elif isinstance(spec, tuple):
shape, dtype = spec
return array_hook(shape=(length, *shape), dtype=dtype)
else:
raise ValueError
def init_array_recursive(spec, length, key_list = [], array_hook=np.zeros, array_kwargs = {}):
def dtype_fun(d):
if array_hook in (torch.zeros,):
return getattr(torch, np.dtype(d).name)
else:
return d
if not isinstance(length, (tuple, list)):
length = (length,)
if isinstance(spec, gym.spaces.Box):
return array_hook((*length, *spec.shape), dtype=dtype_fun(spec.dtype), **array_kwargs), [key_list]
elif isinstance(spec, gym.spaces.Discrete):
return array_hook((*length,), dtype=dtype_fun(np.dtype('int')), **array_kwargs), [key_list]
elif isinstance(spec, tuple):
shape, dtype = spec
return array_hook((*length, *shape), dtype=dtype_fun(dtype), **array_kwargs), [key_list]
elif isinstance(spec, (dict, gym.spaces.Dict)):
if isinstance(spec, gym.spaces.Dict):
spec = spec.spaces
out = {}
leaf_keys = []
for k,v in spec.items():
out[k], tmp = init_array_recursive(v, length, [*key_list, k], array_hook=array_hook, array_kwargs=array_kwargs)
leaf_keys.extend(tuple(tmp))
return out, leaf_keys
else:
raise ValueError("Unsupported space {spec}.")
class Episode:
def __init__(self, spaces, max_length=1000):
super().__init__()
self.max_length = max_length
self.length = 0
self.buffers, self.flat_keys = init_array_recursive(spaces, length=max_length)
self.frozen = False
self.tensor_mode = False
self.tensor_device = None
def __len__(self):
return self.length
def __getattr__(self, x):
return self.buffers[x][:self.length]
def append(self, data):
if self.length >= self.max_length:
raise ValueError(f"Didn't allocate enough space in array. Trying to append step {self.length} to episode with size {self.max_length}.")
if self.frozen:
raise ValueError("Can't append to frozen episode.")
for flat_key in self.flat_keys:
src = data
tgt = self.buffers
for key in flat_key[:-1]:
if key not in src:
break
src = src[key]
tgt = tgt[key]
else:
if flat_key[-1] in src:
try:
tgt[flat_key[-1]][self.length] = src[flat_key[-1]]
except:
import pdb; pdb.set_trace()
# for k,v in data.items():
# self.buffers[k][self.length] = v
self.length += 1
def _iter_buffers(self):
for key_tuple in self.flat_keys:
ret = self.buffers
for k in key_tuple:
ret = ret[k]
yield ret
def to_tensor(self, device):
for key_tuple in self.flat_keys:
ret = self.buffers
for k in key_tuple[:-1]:
ret = ret[k]
ret[key_tuple[-1]] = torch.from_numpy(ret[key_tuple[-1]]).to(device=device)
self.tensor_mode = True
self.tensor_device = device
return self
def freeze(self):
for buffer in self._iter_buffers():
buffer.resize((self.length, *buffer.shape[1:]), refcheck=False)
self.frozen = True
def _get_indices(self, ix):
if isinstance(ix, tuple) and len(ix)==2 and not isinstance(ix[1], str):
ind_keys, ind_steps = ix
elif (isinstance(ix, tuple) and all(isinstance(x, str) for x in ix)) or isinstance(ix, str):
ind_keys, ind_steps = ix, slice(None)
elif isinstance(ix, (slice, int)):
ind_keys, ind_steps = None, ix
else:
raise ValueError
if ind_keys is None or ind_keys == slice(None):
ind_keys = list(self.buffers.keys())
if isinstance(ind_steps, slice):
ind_steps = slice(*ind_steps.indices(self.length))
return ind_keys, ind_steps
def __getitem__(self, ix):
ind_keys, ind_steps = self._get_indices(ix)
if not isinstance(ind_keys, (list, tuple)):
ind_keys = [ind_keys]
return_dict = False
else:
return_dict = True
ret = {}#k: None for k in ind_keys}
for key_tuple in self.flat_keys:
if key_tuple[0] in ind_keys:
src = self.buffers
tgt = ret
for key in key_tuple[:-1]:
tgt = tgt.setdefault(key, {})
src = src[key]
tgt[key_tuple[-1]] = src[key_tuple[-1]][ind_steps]
if return_dict is False:
return next(iter(ret.values()))
return ret
def __setitem__(self, ix, val):
if isinstance(val, dict):
raise ValueError("Current implementation doesn't support dict assignment for values stored in episodes.")
ind_keys, ind_steps = self._get_indices(ix)
if not isinstance(ind_keys, (list, tuple)):
ind_keys = [ind_keys]
return_dict = False
else:
return_dict = True
for key_tuple in self.flat_keys:
if key_tuple[0] in ind_keys:
tgt = self.buffers
for key in key_tuple[:-1]:
tgt = tgt[key]
tgt[key_tuple[-1]][ind_steps] = val[ind_steps]
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def pad_to_length(A, target_length, axis=0):
if isinstance(A, dict):
return {k: pad_to_length(v, target_length=target_length, axis=axis) for k,v in A.items()}
else:
if target_length == A.shape[axis]:
return A
pad_width = [(0,0)]*len(A.shape)
pad_width[axis] = (0, int(target_length - A.shape[axis]))
return np.pad(A, pad_width=tuple(pad_width), mode='constant', constant_values=0)
class RecurrentReplayMemory:
def __init__(
self,
spaces,
max_episode_length=1000,
max_num_steps=100000,
max_num_episodes=None,
):
if (max_num_steps is None) == (max_num_episodes is None):
raise ValueError(
"Exactly one of max_steps and max_num_episodes must be specified."
)
self.max_num_episodes = max_num_episodes
self.max_num_steps = max_num_steps
self.max_episode_length = max_episode_length
self.episodes = []#Episode(spaces, max_length=max_episode_length)]
self.spaces = spaces
@property
def current_episode(self):
return self.episodes[-1]
@property
def episode_lengths(self):
return np.array([e.length for e in self.episodes], dtype=np.int)
def __length__(self):
return self.episode_lengths.sum()
@property
def full(self):
if self.max_num_steps is not None:
return len(self) >= self.max_num_steps
else:
return len(self.episodes) >= self.max_num_episodes
def clear(self):
del self.episodes
gc.collect()
self.episodes = []
def start_episode(self):
self.episodes.append(Episode(self.spaces, max_length=self.max_episode_length))
def get_new_episode(self):
return Episode(self.spaces, max_length=self.max_episode_length)
def add_episode(self, ep):
self.episodes.append(ep)
while self.full:
self.remove_an_episode()
def remove_an_episode(self, cmp=None):
if len(self.episodes) == 1:
raise ValueError("Attempting to delete only episode, but only episode might be active!")
if cmp is None:
del self.episodes[0]
return []
else:
assert len(cmp) == len(self.episodes)
ind = np.argmin(cmp[:-1])
del self.episodes[ind]
del cmp[ind]
print(f"Removing episode {ind}/{len(cmp)}.")
return cmp
def end_episode(self, drop_key=None):
self.current_episode.freeze()
if drop_key is not None:
drop_vals = [ep[drop_key].mean() for ep in self.episodes]
while self.full:
drop_vals = self.remove_an_episode(drop_vals)
else:
while self.full:
self.remove_an_episode()
def get_obs(self, X):
if self.n_obs is None:
return X[0]
else:
return X[: self.n_obs]
@property
def n_episodes(self):
return len(self.episodes)
def __len__(self):
"""
Total number of steps in all episodes in the buffer.
"""
return sum([len(ep) for ep in self.episodes], 0)
def sample_sequence(
self,
batch_size,
seq_len,
include_current=True,
return_indices=False,
equal_weight_episodes=False,
through_end=True,
priority_key=None,
compute_hidden_hook=None,
):
with torch.no_grad():
# through_end==True <=> we're OK with sampling sequences that end after sequences terminate.
subtract_len = 1 if through_end else (1 + seq_len)
if include_current:
episodes_to_sample = self.episodes
episode_lengths = self.episode_lengths
else:
episodes_to_sample = self.episodes[:-1]
episode_lengths = self.episode_lengths[:-1]
if equal_weight_episodes:
episode_sample_weights = ((episode_lengths - subtract_len)>0).astype(float)
elif priority_key is not None:
episode_sample_weights = np.array([e[priority_key, :len(e)].sum() for e, l in episodes_to_sample])
else:
episode_sample_weights = (episode_lengths - subtract_len).clip(0)
episode_sample_weights[episode_lengths<=(subtract_len)] = 0
if episode_sample_weights.sum() == 0:
return []
to_sample = np.random.choice(
len(episodes_to_sample),
size=batch_size,
replace=True,
p=episode_sample_weights/episode_sample_weights.sum(),
)
if priority_key is None:
sample_start_ixs = [
np.random.choice(episode_lengths[ix] - subtract_len) for ix in to_sample
]
else:
norm = lambda A: A/A.sum()
sample_start_ixs = [
np.random.choice(
episode_lengths[ix] - subtract_len,
p=norm(episodes_to_sample[ix][priority_key, :len(episodes_to_sample[ix])-subtract_len])
) for ix in to_sample
]
end_ixs = []
hiddens = []
# res = defaultdict(list)
if self.episodes[0].tensor_mode:
ret, ret_keys = init_array_recursive(
self.spaces, (batch_size, seq_len),
array_hook=torch.zeros, array_kwargs={'device':self.episodes[0].tensor_device})
else:
ret, ret_keys = init_array_recursive(
self.spaces, (batch_size, seq_len),
)
sample_info = []
for ix_in_batch, (ep_ix, start_ix) in enumerate(zip(to_sample, sample_start_ixs)):
end_ix = min(start_ix + seq_len, len(episodes_to_sample[ep_ix]))
end_ixs.append(end_ix)
sample_info.append((ix_in_batch, ep_ix, start_ix, end_ix))
tmp = episodes_to_sample[ep_ix][start_ix:end_ix]
for key_list in ret_keys:
src = tmp
tgt = ret
for k in key_list[:-1]:
src = src[k]
tgt = tgt[k]
tgt[key_list[-1]][ix_in_batch, :end_ix-start_ix, ...] = src[key_list[-1]]
if compute_hidden_hook is not None:
hiddens.append(compute_hidden_hook(episodes_to_sample[ep_ix]['obs', :start_ix]))
if compute_hidden_hook is not None:
ret['hx_cx'] = torch.stack(hiddens, -2)
if return_indices:
return ret, np.array([to_sample, sample_start_ixs, end_ixs]).T
else:
return ret
|
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from api.v2.views import PlatformTypeViewSet as ViewSet
from api.tests.factories import UserFactory, AnonymousUserFactory, GroupFactory, PlatformTypeFactory
from django.core.urlresolvers import reverse
class GetListTests(APITestCase):
def setUp(self):
self.view = ViewSet.as_view({'get': 'list'})
self.anonymous_user = AnonymousUserFactory()
self.user = UserFactory.create()
self.group = GroupFactory.create(name=self.user.username)
self.staff_user = UserFactory.create(is_staff=True)
self.platform_type = PlatformTypeFactory.create()
factory = APIRequestFactory()
url = reverse('api:v2:platformtype-list')
self.request = factory.get(url)
force_authenticate(self.request, user=self.user)
self.response = self.view(self.request)
def test_is_not_public(self):
force_authenticate(self.request, user=self.anonymous_user)
response = self.view(self.request)
self.assertEquals(response.status_code, 403)
def test_is_visible_to_authenticated_user(self):
force_authenticate(self.request, user=self.user)
response = self.view(self.request)
self.assertEquals(response.status_code, 200)
def test_response_is_paginated(self):
response = self.response
self.assertIn('count', response.data)
self.assertIn('results', response.data)
def test_response_contains_expected_fields(self):
force_authenticate(self.request, user=self.user)
response = self.view(self.request)
data = response.data.get('results')[0]
self.assertEquals(len(data), 5)
self.assertIn('id', data)
self.assertIn('url', data)
self.assertIn('name', data)
self.assertIn('start_date', data)
self.assertIn('end_date', data)
class GetDetailTests(APITestCase):
def setUp(self):
self.view = ViewSet.as_view({'get': 'retrieve'})
self.anonymous_user = AnonymousUserFactory()
self.user = UserFactory.create()
self.group = GroupFactory.create(name=self.user.username)
self.staff_user = UserFactory.create(is_staff=True)
self.platform_type = PlatformTypeFactory.create()
factory = APIRequestFactory()
url = reverse(
'api:v2:platformtype-detail',
args=(
self.platform_type.id,
))
self.request = factory.get(url)
force_authenticate(self.request, user=self.user)
self.response = self.view(self.request, pk=self.platform_type.id)
def test_is_not_public(self):
force_authenticate(self.request, user=self.anonymous_user)
response = self.view(self.request, pk=self.platform_type.id)
self.assertEquals(response.status_code, 403)
def test_is_visible_to_authenticated_user(self):
force_authenticate(self.request, user=self.user)
response = self.view(self.request, pk=self.platform_type.id)
self.assertEquals(response.status_code, 200)
def test_response_contains_expected_fields(self):
force_authenticate(self.request, user=self.user)
response = self.view(self.request, pk=self.platform_type.id)
data = response.data
self.assertEquals(len(data), 5)
self.assertIn('id', data)
self.assertIn('url', data)
self.assertIn('name', data)
self.assertIn('start_date', data)
self.assertIn('end_date', data)
class CreateTests(APITestCase):
def test_endpoint_does_not_exist(self):
self.assertTrue('post' not in ViewSet.http_method_names)
class UpdateTests(APITestCase):
def test_endpoint_does_not_exist(self):
self.assertTrue('put' not in ViewSet.http_method_names)
class DeleteTests(APITestCase):
def test_endpoint_does_not_exist(self):
self.assertTrue('delete' not in ViewSet.http_method_names)
|
#!/usr/bin/env python3
import json
import logging
import os
import os.path
import sys
import time
import unittest
from unittest.mock import MagicMock
from unittest.mock import call
import uuid
import praw
from praw.config import Config
import prawcore
import requests
import scrape
# I didn't know this before creating the test
hsbot = __import__("hearthscan-bot")
import commentDB
import credentials
import formatter
from cardDB import CardDB
from constants import Constants
from helper import HSHelper
from helper import SpellChecker
from praww import RedditBot
from praww import _SeenDB
# start with 'test.py online' to start slow tests requiring internet and working credentials
SKIP_INTERNET_TESTS = len(sys.argv) < 2 or sys.argv[1] != "online"
def removeFile(path):
"""error free file delete"""
if os.path.isfile(path):
os.remove(path)
class TempJson():
"""context aware, self deleting json file creator"""
def __init__(self, obj):
self.obj = obj
self.file = str(uuid.uuid4()) + '.json'
def __enter__(self):
with open(self.file, "w", newline="\n") as f:
json.dump(self.obj, f, sort_keys=True, indent=2, separators=(',', ':'))
return self.file
def __exit__(self, type, value, traceback):
removeFile(self.file)
class TempFile():
"""context aware, self deleting file creator"""
def __init__(self, suffix):
self.file = str(uuid.uuid4()) + '.' + suffix
def __enter__(self):
return self.file
def __exit__(self, type, value, traceback):
removeFile(self.file)
class TestScrape(unittest.TestCase):
"""scrape.py"""
def test_camelCase(self):
self.assertEqual(scrape.camelCase("SPELL"), "Spell")
self.assertEqual(scrape.camelCase("HERO_POWER"), "Hero Power")
self.assertEqual(scrape.camelCase(""), None)
self.assertEqual(scrape.camelCase(None), None)
# @unittest.skipIf(SKIP_INTERNET_TESTS, "requires internet (and is slow)")
def test_hearthhead(self):
with requests.Session() as s:
self.assertEqual(scrape.getHTDId('Quick Shot', 'Spell', s),
'quick-shot')
self.assertEqual(scrape.getHTDId('Undercity Valiant',
'Minion', s), 'undercity-valiant')
self.assertEqual(scrape.getHTDId('Gorehowl', 'Weapon', s),
'gorehowl')
self.assertEqual(scrape.getHTDId('V-07-TR-0N',
'Minion', s), 'v-07-tr-0n')
self.assertEqual(scrape.getHTDId("Al'Akir the Windlord",
'Minion', s), 'alakir-the-windlord')
@unittest.skipIf(SKIP_INTERNET_TESTS, "requires internet (and is slow)")
def test_Hearthpwn(self):
with requests.Session() as s:
self.assertEqual(scrape.getHearthpwnIdAndUrl('Quick Shot',
'Blackrock Mountain', 'Spell', False, s),
(14459, 'https://media-hearth.cursecdn.com/avatars/328/302/14459.png'))
self.assertEqual(scrape.getHearthpwnIdAndUrl('Upgrade!',
'Classic', 'Spell', False, s),
(638, 'https://media-hearth.cursecdn.com/avatars/330/899/638.png'))
@unittest.skipIf(SKIP_INTERNET_TESTS, "requires internet (and is slow)")
def test_full(self):
expected = {
'Quick Shot': {
'type': 'Spell',
'hpwn': 14459,
'cdn': 'https://media-hearth.cursecdn.com/avatars/328/302/14459.png',
'desc': 'Deal 3 damage. If your hand is empty, draw a card.',
'hp': None,
'class': 'Hunter',
'subType': None,
'set': 'Blackrock Mountain',
'rarity': 'Common',
'atk': None,
'head': 'quick-shot',
'name': 'Quick Shot',
'cost': 2
}
}
# scrape just one card
cards = {
"ignoredId" : {
'type': 'Spell',
'desc': 'Deal 3 damage. If your hand is empty, draw a card.',
'hp': None,
'class': 'Hunter',
'subType': None,
'set': 'Blackrock Mountain',
'rarity': 'Common',
'atk': None,
'name': 'Quick Shot',
'cost': 2
}
}
# this file is created to cache results
removeFile('data/07 Blackrock Mountain.json')
scraped = scrape.loadSets(cards, ['07'])
removeFile('data/07 Blackrock Mountain.json')
self.assertEqual(scraped, expected)
@unittest.skipIf(SKIP_INTERNET_TESTS, "requires internet (and is slow)")
def test_full_tokens(self):
self.maxDiff = None
expected = {
'Quick Shot': {
'type': 'Spell',
'hpwn': 14459,
'cdn': 'https://media-hearth.cursecdn.com/avatars/328/302/14459.png',
'desc': 'Deal 3 damage. If your hand is empty, draw a card.',
'hp': None,
'class': 'Hunter',
'subType': None,
'set': 'Blackrock Mountain',
'rarity': 'Common',
'atk': None,
'head': 'quick-shot',
'name': 'Quick Shot',
'cost': 2
}
}
# scrape just one card
wantedtokens = {
"Quick Shot": {
"id" : "BRM_013",
"hpwn": 14459
}
}
tokens = {
"BRM_013" : {
'type': 'Spell',
'desc': 'Deal 3 damage. If your hand is empty, draw a card.',
'hp': None,
'class': 'Hunter',
'subType': None,
'set': 'Blackrock Mountain',
'rarity': 'Common',
'atk': None,
'name': '<NAME>',
'cost': 2
}
}
self.assertEqual(scrape.loadTokens(tokens, wantedtokens), expected)
@unittest.skipIf(SKIP_INTERNET_TESTS, "requires internet (and is slow)")
def test_JsonCards_loadFixer(self):
cards, tokens = scrape.loadJsonCards()
# description
self.assertEqual(cards['LOE_079']['desc'],
"Battlecry: Shuffle the 'Map to the Golden Monkey' into your deck.")
self.assertEqual(cards['GVG_085']['desc'], "Taunt Divine Shield")
self.assertEqual(cards['GVG_012']['desc'][:16], "Restore 3 Health")
self.assertEqual(cards['EX1_279']['desc'], "Deal 10 damage.")
self.assertEqual(cards['BRM_013']['desc'],
"Deal 3 damage. If your hand is empty, draw a card.")
self.assertEqual(cards['EX1_298']['desc'][:13], "Can't attack.")
self.assertEqual(cards['CFM_902']['desc'],
"Battlecry and Deathrattle: Summon a Jade Golem.")
# multi class
self.assertEqual(cards['CFM_902']['class'], "Lotus (DRS)")
@unittest.skipIf(SKIP_INTERNET_TESTS, "requires internet (and is slow)")
def test_single(self):
expected = {
'type': 'Spell',
'hpwn': 14459,
'cdn': 'https://media-hearth.cursecdn.com/avatars/328/302/14459.png',
'desc': 'Deal 3 damage.If your hand is empty, draw a card.',
'hp': None,
'class': 'Hunter',
'subType': None,
'set': 'Blackrock Mountain',
'rarity': 'Common',
'atk': None,
'head': 'quick-shot',
'name': '<NAME>',
'cost': 2
}
name, card = scrape.parseSingle(14459)
self.assertEqual(card, expected)
class TestConst(unittest.TestCase):
"""constants.py constants.json"""
def test_ScrapeConstSetLength(self):
# easy to miss one when a new set is added
c = Constants()
self.assertEqual(len(scrape.jsonToCCSet), len(c.sets),
'okay to fail during spoiler season')
self.assertEqual(len(scrape.setids), len(c.sets))
self.assertEqual(len(scrape.setNameIds), len(c.sets))
def test_SpecialReplacements(self):
constantJson = {
'sets' : { },
'specials' : {
"dream cards" : ["dream", "emeralddrake", "laughingsister",
"nightmare", "ysera awakens"]
},
'alternative_names' : { }
}
with TempJson(constantJson) as json:
c = Constants(json)
# tests replace
replaced = c.replaceSpecial(["111", "dreamcards", "333", "444"])
self.assertEqual(replaced, ["111",
"dream",
"emeralddrake",
"laughingsister",
"nightmare",
"yseraawakens",
"333",
"444"])
def test_AlternativeReplacements(self):
constantJson = {
'sets' : { },
'specials' : { },
'alternative_names' : {
'carda' : 'ca',
'card b' : ['cb', 'cb b']
}
}
with TempJson(constantJson) as json:
c = Constants(json)
self.assertEqual(c.translateAlt("ca"), "carda")
self.assertEqual(c.translateAlt("cb"), "cardb")
self.assertEqual(c.translateAlt("cc"), "cc")
class TestCommentDB(unittest.TestCase):
"""commentDB.py"""
testDBName = "test.db"
def test_CreateFindFailParent(self):
removeFile(self.testDBName)
db = commentDB.DB(self.testDBName)
self.assertFalse(db.exists("abc", ["a card"]))
# inserted on exists
self.assertTrue(db.exists("abc", ["a card"]))
self.assertFalse(db.exists("abc", ["b card"]))
self.assertTrue(db.exists("abc", ["a card", "b card"]))
self.assertFalse(db.exists("abc", ["a card", "b card", "c card"]))
self.assertFalse(db.exists("123", ["a card"]))
db.close()
removeFile(self.testDBName)
class TestPRAWW(unittest.TestCase):
"""praww.py"""
@unittest.skipIf(SKIP_INTERNET_TESTS, "requires internet (and is slow)")
def test_RedditAuth(self):
# will fail for missing/bad praw.ini
with TempFile('db') as seenDB:
RedditBot(subreddits=[], newLimit=1, sleep=0, connectAttempts=1,
dbName=seenDB) \
.run(lambda: removeFile(RedditBot.LOCK_FILE))
@unittest.skipIf(SKIP_INTERNET_TESTS, "requires internet (and is slow)")
def test_RedditAuthFail(self):
def raiseError():
raise Exception('unexpected')
try:
# backup existing praw ini, create our own
if os.path.isfile('praw.ini'):
os.rename('praw.ini', '_praw.ini')
with open('praw.ini', 'w', newline="\n") as f:
f.write('[testbot]\n')
f.write('check_for_updates=false\n')
f.write('client_id=badid\n')
f.write('client_secret=badsecret\n')
f.write('refresh_token=badtoken\n')
f.write('user_agent=praw:hearthscan-test:1.0 (by /u/b0ne123)')
Config.CONFIG = None
with self.assertRaises(prawcore.exceptions.ResponseException), \
TempFile('db') as seenDB:
RedditBot(subreddits=[], newLimit=1, sleep=0, connectAttempts=1,
iniSite='testbot', dbName=seenDB) \
.run(raiseError)
finally:
removeFile('praw.ini')
if os.path.isfile('_praw.ini'):
os.rename('_praw.ini', 'praw.ini')
def test_seenDB(self):
with TempFile('db') as dbfile:
db = _SeenDB(dbfile)
class Thing():
fullname = "t1_thingid"
thing = Thing()
self.assertFalse(db.isSeen(thing))
self.assertTrue(db.isSeen(thing))
db.cleanup(secondsOld = 0)
self.assertFalse(db.isSeen(thing))
self.assertTrue(db.isSeen(thing))
db.close()
class TestCardDB(unittest.TestCase):
"""cardDB.py"""
def test_CleanName(self):
self.assertEqual(CardDB.cleanName('Ab: 1c'), 'abc')
def test_CardDB(self):
cardDict = {
'Quick Shot': {
'type': 'Spell',
'hpwn': 14459,
'cdn': 'https://media-Hearth.cursecdn.com/14459.png',
'desc': 'Deal 3 damage. Draw a card.',
'hp': 1,
'class': 'Hunter',
'subType': 'Mech',
'set': 'Basic',
'rarity': 'Common',
'atk': 3,
'head': 'quick-shot',
'name': 'Quick Shot',
'cost': 2
}
}
constantDict = {
'sets' : { '01' : {'name' : 'Basic'} },
'specials' : { },
'alternative_names' : { }
}
with TempJson(constantDict) as constJson, \
TempJson(cardDict) as cardJson, \
TempJson({}) as emptyJson:
c = Constants(constJson)
db = CardDB(constants=c, cardJSON=cardJson, duelsJSON=emptyJson, vanillaJSON=emptyJson, tokenJSON=emptyJson, tempJSON=emptyJson)
self.assertEqual(db.cardNames(), ['quickshot'])
self.assertEqual(db.tokens, [])
self.assertTrue('quickshot' in db)
self.assertFalse('slowshot' in db)
self.assertFalse('d!quickshot' in db)
self.assertFalse('c!quickshot' in db)
self.assertTrue('Quick Shot' in db['quickshot'])
def test_CardDBTokens(self):
cardDict = {
'Quick Shot': {
'type': 'Spell',
'hpwn': 14459,
'cdn': 'https://media-Hearth.cursecdn.com/14459.png',
'desc': 'Deal 3 damage. Draw a card.',
'hp': 1,
'class': 'Hunter',
'subType': 'Mech',
'set': 'Basic',
'rarity': 'Token',
'atk': 3,
'head': 'quick-shot',
'name': 'Quick Shot',
'cost': 2
}
}
constantDict = {
'sets' : { '01' : {'name' : 'Basic'} },
'specials' : { },
'alternative_names' : { }
}
with TempJson(constantDict) as constJson, \
TempJson(cardDict) as cardJson, \
TempJson({}) as emptyJson:
c = Constants(constJson)
db = CardDB(constants=c, cardJSON=emptyJson, duelsJSON=emptyJson, vanillaJSON=emptyJson, tokenJSON=cardJson, tempJSON=emptyJson)
self.assertEqual(db.cardNames(), ['quickshot'])
self.assertEqual(db.tokens, ['quickshot'])
self.assertTrue('quickshot' in db)
self.assertFalse('d!quickshot' in db)
self.assertFalse('c!quickshot' in db)
self.assertTrue('Quick Shot' in db['quickshot'])
def test_CardDBDuels(self):
cardDict = {
'Quick Shot': {
'type': 'Spell',
'hpwn': 14459,
'cdn': 'https://media-Hearth.cursecdn.com/14459.png',
'desc': 'Deal 3 damage. Draw a card.',
'hp': 1,
'class': 'Hunter',
'subType': 'Mech',
'set': 'Basic',
'rarity': 'Token',
'atk': 3,
'head': 'quick-shot',
'name': 'Quick Shot',
'cost': 2
}
}
constantDict = {
'sets' : { '01' : {'name' : 'Basic'} },
'specials' : { },
'alternative_names' : { }
}
with TempJson(constantDict) as constJson, \
TempJson(cardDict) as cardJson, \
TempJson({}) as emptyJson:
c = Constants(constJson)
db = CardDB(constants=c, cardJSON=emptyJson, duelsJSON=cardJson, vanillaJSON=emptyJson, tokenJSON=emptyJson, tempJSON=emptyJson)
self.assertEqual(db.cardNames(), ['quickshot'])
self.assertEqual(db.tokens, [])
self.assertTrue('quickshot' in db)
self.assertTrue('d!quickshot' in db)
self.assertFalse('c!quickshot' in db)
self.assertTrue('Quick Shot' in db['quickshot'])
self.assertTrue('Quick Shot' in db['d!quickshot'])
def test_CardDBVanilla(self):
cardDict = {
'Quick Shot': {
'type': 'Spell',
'hpwn': 14459,
'cdn': 'https://media-Hearth.cursecdn.com/14459.png',
'desc': 'Deal 3 damage. Draw a card.',
'hp': 1,
'class': 'Hunter',
'subType': 'Mech',
'set': 'Basic',
'rarity': 'Token',
'atk': 3,
'head': 'quick-shot',
'name': 'Quick Shot',
'cost': 2
}
}
constantDict = {
'sets' : { '01' : {'name' : 'Basic'} },
'specials' : { },
'alternative_names' : { }
}
with TempJson(constantDict) as constJson, \
TempJson(cardDict) as cardJson, \
TempJson({}) as emptyJson:
c = Constants(constJson)
db = CardDB(constants=c, cardJSON=emptyJson, duelsJSON=emptyJson, vanillaJSON=cardJson, tokenJSON=emptyJson, tempJSON=emptyJson)
self.assertEqual(db.cardNames(), ['quickshot'])
self.assertEqual(db.tokens, [])
self.assertTrue('quickshot' in db)
self.assertTrue('c!quickshot' in db)
self.assertFalse('d!quickshot' in db)
self.assertTrue('Quick Shot' in db['quickshot'])
self.assertTrue('Quick Shot' in db['c!quickshot'])
def test_RefreshCardDB(self):
cardDict = {
'Quick Shot': {
'type': 'Spell',
'hpwn': 14459,
'cdn': 'https://media-Hearth.cursecdn.com/14459.png',
'desc': 'Deal 3 damage. Draw a card.',
'hp': 1,
'class': 'Hunter',
'subType': "Mech",
'set': 'Basic',
'rarity': 'Common',
'atk': 3,
'head': 'quick-shot',
'name': '<NAME>',
'cost': 2
}
}
constantDict = {
'sets' : { '01' : {'name' : 'Basic'} },
'specials' : { },
'alternative_names' : { }
}
with TempJson(constantDict) as constJson, \
TempJson(cardDict) as cardJson, \
TempJson({}) as emptyJson:
c = Constants(constJson)
db = CardDB(constants=c, cardJSON=emptyJson, duelsJSON=emptyJson, vanillaJSON=emptyJson, tokenJSON=emptyJson, tempJSON='notexisting.json')
self.assertEqual(db.cardNames(), [])
self.assertFalse('quickshot' in db)
db.tempJSON = cardJson
db.refreshTemp()
self.assertTrue('quickshot' in db)
self.assertTrue('Quick Shot' in db['quickshot'])
class TestHelper(unittest.TestCase):
"""helper.py HSHelper"""
def test_QuoteCleaner(self):
self.assertEqual(HSHelper.removeQuotes("> b\na\n> b\nc"), "a c")
self.assertEqual(HSHelper.removeQuotes("> abc"), "")
def test_getCardsFromComment(self):
cardDict = {
'Quick Shot': {
'type': 'Spell',
'hpwn': 14459,
'cdn': 'https://media-Hearth.cursecdn.com/14459.png',
'desc': 'Deal 3 damage. Draw a card.',
'hp': 1,
'class': 'Hunter',
'subType': "Mech",
'set': 'Basic',
'rarity': 'Common',
'atk': 3,
'head': 'quick-shot',
'name': 'Quick Shot',
'cost': 2
}
}
# we need more cards (Card AA - Card UU)
for i in range(21):
name = 'Card ' + chr(97 + i)
cardDict[name] = cardDict['Quick Shot'].copy()
cardDict[name]['name'] = name
constantDict = {
'sets' : { '01' : {'name' : 'Basic'} },
'specials' : { },
'alternative_names' : { "quickshot" : "qs" }
}
with TempJson(constantDict) as constJson, \
TempJson(cardDict) as cardJson, \
TempJson({}) as emptyJson:
c = Constants(constJson)
db = CardDB(constants=c, cardJSON=cardJson, duelsJSON=emptyJson, vanillaJSON=emptyJson, tokenJSON=emptyJson, tempJSON=emptyJson)
helper = HSHelper(db, c)
# simple find
text = '[[Quick Shot]]'
cards, text = helper.parseText(text)
self.assertEqual(cards, ['quickshot'], 'simple card')
self.assertTrue('Quick Shot' in text)
# escaped simple find
text = '\\[\\[quickshot\\]\\]'
cards, text = helper.parseText(text)
self.assertEqual(cards, ['quickshot'], 'simple card')
self.assertTrue('Quick Shot' in text)
# two cards, cleanName
text = ' [[card a]] world [[quickshot 42]] '
cards, text = helper.parseText(text)
self.assertEqual(cards, ['carda', 'quickshot'], 'multi cards, clean')
self.assertTrue('Quick Shot' in text)
self.assertTrue('Card a' in text)
# spell check
text = '[[Quic Shot]]'
cards, text = helper.parseText(text)
self.assertEqual(cards, ['quickshot'], 'simple card')
self.assertTrue('Quick Shot' in text)
# alternative name
text = '[[QS]]'
cards, text = helper.parseText(text)
self.assertEqual(cards, ['quickshot'], 'alternative name')
self.assertTrue('Quick Shot' in text)
# test card limit always working
cardsNames = ['card' + chr(97 + i) for i in range(c.CARD_LIMIT + 1)]
cardsNames = ['no card'] + cardsNames
text = '[[' + ']][['.join(cardsNames) + ']]'
cards, text = helper.parseText(text)
self.assertEqual(cards, cardsNames[1:-1],
'CARD_LIMIT cards expected')
self.assertTrue('no card' not in text, 'unknown should be skipped')
for i in range(c.CARD_LIMIT):
self.assertTrue('Card ' + chr(97 + i) in text)
# test short text
text = '[[a]]'
cards, text = helper.parseText(text)
self.assertEqual(len(cards), 0, 'no cards')
self.assertEqual(len(text), 0, 'no cards')
# test no valid text
text = '[[123]] [abc]'
cards, text = helper.parseText(text)
self.assertEqual(len(cards), 0, 'no valid text')
self.assertEqual(len(text), 0, 'no valid text')
# card too long
text = '[[123456789012345678901234567890abc]]'
cards, text = helper.parseText(text)
self.assertEqual(len(cards), 0, 'card too long')
self.assertEqual(len(text), 0, 'card too long')
def test_loadInfoTempl_simple(self):
constantDict = {
'sets' : { },
'specials' : { 'dream' : ['no'] },
'alternative_names' : { 'quickshot' : 'qs' }
}
try:
if os.path.isfile('data/info_msg.templ'):
os.rename('data/info_msg.templ', 'data/_info_msg.templ')
with TempJson(constantDict) as constJson, \
TempJson({}) as emptyJson:
with open('data/info_msg.templ', 'w', newline="\n") as f:
f.write('{user}-{alts}-{tokens}-{special}')
c = Constants(constJson)
db = CardDB(constants=c, cardJSON=emptyJson, duelsJSON=emptyJson, vanillaJSON=emptyJson, tokenJSON=emptyJson, tempJSON=emptyJson)
helper = HSHelper(db, c)
info = helper.getInfoText('user')
self.assertEqual(info, 'user-qs--dream')
finally:
removeFile('data/info_msg.templ')
if os.path.isfile('data/_info_msg.templ'):
os.rename('data/_info_msg.templ', 'data/info_msg.templ')
def test_JsonFiles(self):
if os.path.isfile('data/tempinfo.json'):
with open('data/tempinfo.json', 'r') as infofile:
json.load(infofile)
if os.path.isfile("data/tokens.json"):
with open('data/tokens.json', 'r') as infofile:
json.load(infofile)
if os.path.isfile("data/cards.json"):
with open('data/cards.json', 'r') as infofile:
json.load(infofile)
if os.path.isfile("data/duels.json"):
with open('data/duels.json', 'r') as infofile:
json.load(infofile)
class TestSpelling(unittest.TestCase):
"""helper.py SpellChecker"""
def test_Spellchecker(self):
checker = SpellChecker(["abcdef"])
self.assertEqual(checker.correct("abcdef"), "abcdef")
self.assertEqual(checker.correct("abcde"), "abcdef")
self.assertEqual(checker.correct("bcdef"), "abcdef")
self.assertEqual(checker.correct("acdef"), "abcdef")
self.assertEqual(checker.correct("bacdef"), "abcdef")
self.assertEqual(checker.correct("abcdeg"), "abcdef")
self.assertEqual(checker.correct("aabcdef"), "abcdef")
# only distance 1 errors are fixed
self.assertEqual(checker.correct("abcd"), "abcd")
class TestBot(unittest.TestCase):
"""hearthscan-bot.py"""
def test_AnswerMail_UserOnSpam(self):
r = MagicMock()
msg = MagicMock()
msg.subreddit = None
msg.author.name = 'user'
msg.id = 'msgidus'
msg.distinguished = None
pmUserCache = {'user' : 1234}
helper = MagicMock()
# test
hsbot.answerPM(r, msg, pmUserCache, helper)
self.assertEqual(r.method_calls, [], 'no reddit calls')
self.assertEqual(helper.method_calls, [], 'no helper calls')
def test_AnswerMail_Success(self):
r = MagicMock()
msg = MagicMock()
msg.subreddit = None
msg.author.name = 'user'
msg.id = 'msgids'
msg.distinguished = None
msg.subject = 'sub'
msg.body = 'body'
pmUserCache = { }
helper = MagicMock()
helper.parseText = MagicMock(return_value=(['card'], 'text'))
# test
hsbot.answerPM(r, msg, pmUserCache, helper)
self.assertTrue('user' in pmUserCache, 'user added to cache')
self.assertEqual(r.method_calls, [], 'no reddit calls')
expected = [call.parseText('sub body')]
self.assertEqual(helper.method_calls, expected, 'parseText')
expected = [call.reply('text')]
self.assertEqual(msg.method_calls, expected, 'reply')
def test_Forward_PM(self):
r = MagicMock()
msg = MagicMock()
msg.subreddit = None
msg.author.name = 'user'
msg.id = 'msgidpm'
msg.distinguished = None
msg.subject = 'sub'
msg.body = 'body'
pmUserCache = { }
helper = MagicMock()
helper.parseText = MagicMock(return_value=([], ''))
redMsg = MagicMock()
r.redditor = MagicMock(return_value=redMsg)
# test
hsbot.answerPM(r, msg, pmUserCache, helper)
self.assertTrue('user' in pmUserCache, 'user added to cache')
expected = [call.redditor(credentials.admin_username)]
self.assertEqual(r.method_calls, expected, 'get redditor')
expected = [call.message('#msgidpm /u/user: "sub"', msg.body)]
self.assertEqual(redMsg.method_calls, expected, 'set message')
expected = [call.parseText('sub body')]
self.assertEqual(helper.method_calls, expected, 'parseText')
self.assertEqual(msg.method_calls, [], 'no reply')
def test_Forward_PM_Answer(self):
r = MagicMock()
msg = MagicMock()
msg.subreddit = None
msg.author.name = credentials.admin_username
msg.id = 'msgid2'
msg.distinguished = None
msg.subject = 're: #msgid1 /u/user: "sub"'
msg.body = 'body'
pmUserCache = { }
helper = MagicMock()
helper.parseText = MagicMock(return_value=([], 'text'))
oldMsg = MagicMock()
r.inbox.message = MagicMock(return_value=oldMsg)
# test
hsbot.answerPM(r, msg, pmUserCache, helper)
self.assertTrue(msg.author.name not in pmUserCache, "don't admin")
expected = [call.inbox.message('msgid1')]
self.assertEqual(r.method_calls, expected, 'reddit call')
expected = [call.message('msgid1')]
self.assertEqual(r.inbox.method_calls, expected, 'get old msg')
expected = [call.reply('body')]
self.assertEqual(oldMsg.method_calls, expected, 'reply old')
expected = [call.reply('answer forwarded')]
self.assertEqual(msg.method_calls, expected, 'reply new')
self.assertEqual(helper.method_calls, [], 'no helper calls')
def test_CleamPMUserCache(self):
future = int(time.time()) + 60
cache = {"aaa": 123, "bbb": future}
hsbot.cleanPMUserCache(cache)
self.assertIsNone(cache.get("aaa"))
self.assertEqual(cache["bbb"], future)
if __name__ == '__main__':
removeFile("test.log")
logging.basicConfig(filename="test.log",
format='%(asctime)s %(levelname)s %(name)s %(message)s',
level=logging.DEBUG)
print("run 'test.py online' to test online scraping functionality")
# lazy argv fix
unittest.main(warnings='ignore', argv=[sys.argv[0]])
|
<filename>fdm-devito-notebooks/01_vib/exer-vib/vib_undamped_verify_mms.py<gh_stars>1-10
import sympy as sym
import numpy as np
V, t, I, w, dt = sym.symbols('V t I w dt') # global symbols
f = None # global variable for the source term in the ODE
def ode_source_term(u):
"""Return the terms in the ODE that the source term
must balance, here u'' + w**2*u.
u is symbolic Python function of t."""
return sym.diff(u(t), t, t) + w**2*u(t)
def residual_discrete_eq(u):
"""Return the residual of the discrete eq. with u inserted."""
R = DtDt(u, dt) + w**2*u(t) - f
return sym.simplify(R)
def residual_discrete_eq_step1(u):
"""Return the residual of the discrete eq. at the first
step with u inserted."""
half = sym.Rational(1,2)
R = u(t+dt) - I - dt*V - \
half*dt**2*f.subs(t, 0) + half*dt**2*w**2*I
R = R.subs(t, 0) # t=0 in the rhs of the first step eq.
return sym.simplify(R)
def DtDt(u, dt):
"""Return 2nd-order finite difference for u_tt.
u is a symbolic Python function of t.
"""
return (u(t+dt) - 2*u(t) + u(t-dt))/dt**2
def main(u):
"""
Given some chosen solution u (as a function of t, implemented
as a Python function), use the method of manufactured solutions
to compute the source term f, and check if u also solves
the discrete equations.
"""
print '=== Testing exact solution: %s ===' % u(t)
print "Initial conditions u(0)=%s, u'(0)=%s:" % \
(u(t).subs(t, 0), sym.diff(u(t), t).subs(t, 0))
# Method of manufactured solution requires fitting f
global f # source term in the ODE
f = sym.simplify(ode_source_term(u))
# Residual in discrete equations (should be 0)
print 'residual step1:', residual_discrete_eq_step1(u)
print 'residual:', residual_discrete_eq(u)
def linear():
"""Test linear function V*t+I: u(0)=I, u'(0)=V."""
main(lambda t: V*t + I)
def quadratic():
"""Test quadratic function q*t**2 + V*t + I."""
q = sym.Symbol('q') # arbitrary constant in t**2 term
u_e = lambda t: q*t**2 + V*t + I
main(u_e)
def cubic():
r, q = sym.symbols('r q')
main(lambda t: r*t**3 + q*t**2 + V*t + I)
def solver(I, V, f, w, dt, T):
"""
Solve u'' + w**2*u = f for t in (0,T], u(0)=I and u'(0)=V,
by a central finite difference method with time step dt.
f(t) is a callable Python function.
"""
dt = float(dt)
Nt = int(round(T/dt))
u = np.zeros(Nt+1)
t = np.linspace(0, Nt*dt, Nt+1)
u[0] = I
u[1] = u[0] - 0.5*dt**2*w**2*u[0] + 0.5*dt**2*f(t[0]) + dt*V
for n in range(1, Nt):
u[n+1] = 2*u[n] - u[n-1] - dt**2*w**2*u[n] + dt**2*f(t[n])
return u, t
def test_quadratic_exact_solution():
"""Verify solver function via quadratic solution."""
# Transform global symbolic variables to functions and numbers
# for numerical computations
global p, V, I, w
p, V, I, w = 2.3, 0.9, 1.2, 1.5
global f, t
u_e = lambda t: p*t**2 + V*t + I # use p, V, I, w as numbers
f = ode_source_term(u_e) # fit source term
f = sym.lambdify(t, f) # make function numerical
dt = 2./w
u, t = solver(I=I, V=V, f=f, w=w, dt=dt, T=3)
u_e = u_e(t)
error = np.abs(u - u_e).max()
tol = 1E-12
assert error < tol
print 'Error in computing a quadratic solution:', error
if __name__ == '__main__':
linear()
quadratic()
cubic()
test_quadratic_exact_solution()
|
"""Decorators and small standalone functions for api module"""
import logging
import urllib.parse
from functools import wraps
from typing import Sequence, Union, Iterable, Optional, List
from collections.abc import Mapping
import fnmatch
import pandas as pd
from iblutil.io import parquet
import numpy as np
import one.alf.exceptions as alferr
from one.alf.files import rel_path_parts, get_session_path
from one.alf.spec import FILE_SPEC, regex as alf_regex
import one.alf.io as alfio
logger = logging.getLogger(__name__)
def Listable(t):
"""Return a typing.Union if the input and sequence of input"""
return Union[t, Sequence[t]]
def ses2records(ses: dict) -> [pd.Series, pd.DataFrame]:
"""Extract session cache record and datasets cache from a remote session data record.
TODO Fix for new tables; use to update caches from remote queries.
Parameters
----------
ses : dict
Session dictionary from Alyx REST endpoint
Returns
-------
pd.Series
Session record
pd.DataFrame
Datasets frame
"""
# Extract session record
eid = parquet.str2np(ses['url'][-36:])
session_keys = ('subject', 'start_time', 'lab', 'number', 'task_protocol', 'project')
session_data = {k: v for k, v in ses.items() if k in session_keys}
# session_data['id_0'], session_data['id_1'] = eid.flatten().tolist()
session = (
(pd.Series(data=session_data, name=tuple(eid.flatten()))
.rename({'start_time': 'date'}, axis=1))
)
session['date'] = session['date'][:10]
# Extract datasets table
def _to_record(d):
rec = dict(file_size=d['file_size'], hash=d['hash'], exists=True)
rec['id_0'], rec['id_1'] = parquet.str2np(d['id']).flatten().tolist()
rec['eid_0'], rec['eid_1'] = session.name
file_path = urllib.parse.urlsplit(d['data_url'], allow_fragments=False).path.strip('/')
file_path = alfio.remove_uuid_file(file_path, dry=True).as_posix()
rec['session_path'] = get_session_path(file_path).as_posix()
rec['rel_path'] = file_path[len(rec['session_path']):].strip('/')
if 'default_revision' in d:
rec['default_revision'] = d['default_revision'] == 'True'
return rec
records = map(_to_record, ses['data_dataset_session_related'])
datasets = pd.DataFrame(records).set_index(['id_0', 'id_1']).sort_index()
return session, datasets
def datasets2records(datasets) -> pd.DataFrame:
"""Extract datasets DataFrame from one or more Alyx dataset records
Parameters
----------
datasets : dict, list
One or more records from the Alyx 'datasets' endpoint
Returns
-------
pd.DataFrame
Datasets frame
Examples
--------
>>> datasets = ONE().alyx.rest('datasets', 'list', subject='foobar')
>>> df = datasets2records(datasets)
"""
records = []
for d in ensure_list(datasets):
file_record = next((x for x in d['file_records'] if x['data_url'] and x['exists']), None)
if not file_record:
continue # Ignore files that are not accessible
rec = dict(file_size=d['file_size'], hash=d['hash'], exists=True)
rec['id_0'], rec['id_1'] = parquet.str2np(d['url'][-36:]).flatten().tolist()
rec['eid_0'], rec['eid_1'] = parquet.str2np(d['session'][-36:]).flatten().tolist()
data_url = urllib.parse.urlsplit(file_record['data_url'], allow_fragments=False)
file_path = data_url.path.strip('/')
file_path = alfio.remove_uuid_file(file_path, dry=True).as_posix()
rec['session_path'] = get_session_path(file_path).as_posix()
rec['rel_path'] = file_path[len(rec['session_path']):].strip('/')
rec['default_revision'] = d['default_dataset']
records.append(rec)
if not records:
keys = ('id_0', 'id_1', 'eid_0', 'eid_1', 'file_size', 'hash', 'session_path',
'rel_path', 'default_revision')
return pd.DataFrame(columns=keys).set_index(['id_0', 'id_1'])
return pd.DataFrame(records).set_index(['id_0', 'id_1']).sort_index()
def parse_id(method):
"""
Ensures the input experiment identifier is an experiment UUID string
Parameters
----------
method : function
An ONE method whose second arg is an experiment ID
Returns
-------
function
A wrapper function that parses the ID to the expected string
Raises
------
ValueError
Unable to convert input to a valid experiment ID
"""
@wraps(method)
def wrapper(self, id, *args, **kwargs):
eid = self.to_eid(id)
if eid is None:
raise ValueError(f'Cannot parse session ID "{id}" (session may not exist)')
return method(self, eid, *args, **kwargs)
return wrapper
def refresh(method):
"""
Refresh cache depending of query_type kwarg
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
mode = kwargs.get('query_type', None)
if not mode or mode == 'auto':
mode = self.mode
self.refresh_cache(mode=mode)
return method(self, *args, **kwargs)
return wrapper
def validate_date_range(date_range) -> (pd.Timestamp, pd.Timestamp):
"""
Validates and arrange date range in a 2 elements list
Parameters
----------
date_range : str, datetime.date, datetime.datetime, pd.Timestamp, np.datetime64, list, None
A single date or tuple/list of two dates. None represents no bound.
Returns
-------
tuple of pd.Timestamp
The start and end timestamps
Examples
--------
>>> validate_date_range('2020-01-01') # On this day
>>> validate_date_range(datetime.date(2020, 1, 1))
>>> validate_date_range(np.array(['2022-01-30', '2022-01-30'], dtype='datetime64[D]'))
>>> validate_date_range(pd.Timestamp(2020, 1, 1))
>>> validate_date_range(np.datetime64(2021, 3, 11))
>>> validate_date_range(['2020-01-01']) # from date
>>> validate_date_range(['2020-01-01', None]) # from date
>>> validate_date_range([None, '2020-01-01']) # up to date
Raises
------
ValueError
Size of date range tuple must be 1 or 2
"""
if date_range is None:
return
# Ensure we have exactly two values
if isinstance(date_range, str) or not isinstance(date_range, Iterable):
# date_range = (date_range, pd.Timestamp(date_range) + pd.Timedelta(days=1))
dt = pd.Timedelta(days=1) - pd.Timedelta(milliseconds=1)
date_range = (date_range, pd.Timestamp(date_range) + dt)
elif len(date_range) == 1:
date_range = [date_range[0], pd.Timestamp.max]
elif len(date_range) != 2:
raise ValueError
# For comparisons, ensure both values are pd.Timestamp (datetime, date and datetime64
# objects will be converted)
start, end = date_range
start = start or pd.Timestamp.min # Convert None to lowest possible date
end = end or pd.Timestamp.max # Convert None to highest possible date
# Convert to timestamp
if not isinstance(start, pd.Timestamp):
start = pd.Timestamp(start)
if not isinstance(end, pd.Timestamp):
end = pd.Timestamp(end)
return start, end
def _collection_spec(collection=None, revision=None) -> str:
"""
Return a template string for a collection/revision regular expression. Because both are
optional in the ALF spec, None will match any (including absent), while an empty string will
match absent.
Parameters
----------
collection : None, str
An optional collection regular expression
revision : None, str
An optional revision regular expression
Returns
-------
str
A string format for matching the collection/revision
"""
spec = ''
for value, default in zip((collection, revision), ('{collection}/', '#{revision}#/')):
if not value:
default = f'({default})?' if value is None else ''
spec += default
return spec
def _file_spec(**kwargs):
"""
Return a template string for a ALF dataset regular expression. Because 'namespace',
'timescale', and 'extra' are optional None will match any (including absent). This function
removes the regex flags from the file spec string that make certain parts optional.
TODO an empty string should only match absent; this could be achieved by removing parts from
spec string
Parameters
----------
namespace : None, str
If namespace is not None, the namespace section of the returned file spec will not be
optional.
timescale : None, str
If timescale is not None, the namespace section of the returned file spec will not be
optional.
extra : None, str
If extra is not None, the namespace section of the returned file spec will not be
optional.
Returns
-------
str
A string format for matching an ALF dataset
"""
OPTIONAL = {'namespace': '?', 'timescale': '?', 'extra': '*'}
filespec = FILE_SPEC
for k, v in kwargs.items():
if k in OPTIONAL and v is not None:
i = filespec.find(k) + len(k)
i += filespec[i:].find(OPTIONAL[k])
filespec = filespec[:i] + filespec[i:].replace(OPTIONAL[k], '', 1)
return filespec
def filter_datasets(all_datasets, filename=None, collection=None, revision=None,
revision_last_before=True, assert_unique=True, wildcards=False):
"""
Filter the datasets cache table by the relative path (dataset name, collection and revision).
When None is passed, all values will match. To match on empty parts, use an empty string.
When revision_last_before is true, None means return latest revision.
Parameters
----------
all_datasets : pandas.DataFrame
A datasets cache table
filename : str, dict, None
A filename str or a dict of alf parts. Regular expressions permitted.
collection : str, None
A collection string. Regular expressions permitted.
revision : str, None
A revision string to match. If revision_last_before is true, regular expressions are
not permitted.
revision_last_before : bool
When true and no exact match exists, the (lexicographically) previous revision is used
instead. When false the revision string is matched like collection and filename,
with regular expressions permitted.
assert_unique : bool
When true an error is raised if multiple collections or datasets are found
wildcards : bool
If true, use unix shell style matching instead of regular expressions
Returns
-------
pd.DataFrame
A slice of all_datasets that match the filters
Examples
--------
Filter by dataset name and collection
>>> datasets = filter_datasets(all_datasets, '.*spikes.times.*', 'alf/probe00')
Filter datasets not in a collection
>>> datasets = filter_datasets(all_datasets, collection='')
Filter by matching revision
>>> datasets = filter_datasets(all_datasets, 'spikes.times.npy',
... revision='2020-01-12', revision_last_before=False)
Filter by filename parts
>>> datasets = filter_datasets(all_datasets, dict(object='spikes', attribute='times'))
"""
# Create a regular expression string to match relative path against
filename = filename or {}
regex_args = {'collection': collection}
spec_str = _collection_spec(collection, None if revision_last_before else revision)
if isinstance(filename, dict):
spec_str += _file_spec(**filename)
regex_args.update(**filename)
else:
# Convert to regex is necessary and assert end of string
filename = [fnmatch.translate(x) if wildcards else x + '$' for x in ensure_list(filename)]
spec_str += '|'.join(filename)
# If matching revision name, add to regex string
if not revision_last_before:
regex_args.update(revision=revision)
for k, v in regex_args.items():
if v is None:
continue
if wildcards:
# Convert to regex, remove \\Z which asserts end of string
v = (fnmatch.translate(x).replace('\\Z', '') for x in ensure_list(v))
if not isinstance(v, str):
regex_args[k] = '|'.join(v) # logical OR
# Build regex string
pattern = alf_regex('^' + spec_str, **regex_args)
match = all_datasets[all_datasets['rel_path'].str.match(pattern)]
if len(match) == 0 or not (revision_last_before or assert_unique):
return match
revisions = [rel_path_parts(x)[1] or '' for x in match.rel_path.values]
if assert_unique:
collections = set(rel_path_parts(x)[0] or '' for x in match.rel_path.values)
if len(collections) > 1:
_list = '"' + '", "'.join(collections) + '"'
raise alferr.ALFMultipleCollectionsFound(_list)
if not revision_last_before:
if filename and len(match) > 1:
_list = '"' + '", "'.join(match['rel_path']) + '"'
raise alferr.ALFMultipleObjectsFound(_list)
if len(set(revisions)) > 1:
_list = '"' + '", "'.join(set(revisions)) + '"'
raise alferr.ALFMultipleRevisionsFound(_list)
else:
return match
elif filename and len(set(revisions)) != len(revisions):
_list = '"' + '", "'.join(match['rel_path']) + '"'
raise alferr.ALFMultipleObjectsFound(_list)
return filter_revision_last_before(match, revision, assert_unique=assert_unique)
def filter_revision_last_before(datasets, revision=None, assert_unique=True):
"""
Filter datasets by revision, returning previous revision in ordered list if revision
doesn't exactly match.
Parameters
----------
datasets : pandas.DataFrame
A datasets cache table
revision : str
A revision string to match (regular expressions not permitted)
assert_unique : bool
When true an alferr.ALFMultipleRevisionsFound exception is raised when multiple
default revisions are found; an alferr.ALFError when no default revision is found
Returns
-------
pd.DataFrame
A datasets DataFrame with 0 or 1 row per unique dataset
"""
def _last_before(df):
"""Takes a DataFrame with only one dataset and multiple revisions, returns matching row"""
if revision is None and 'default_revision' in df.columns:
if assert_unique and sum(df.default_revision) > 1:
revisions = df['revision'][df.default_revision.values]
rev_list = '"' + '", "'.join(revisions) + '"'
raise alferr.ALFMultipleRevisionsFound(rev_list)
if sum(df.default_revision) == 1:
return df[df.default_revision]
# default_revision column all False; default doesn't isn't copied to remote repository
dset_name = df['rel_path'].iloc[0]
if assert_unique:
raise alferr.ALFError(f'No default revision for dataset {dset_name}')
else:
logger.warning(f'No default revision for dataset {dset_name}; using most recent')
# Compare revisions lexicographically
if assert_unique and len(df['revision'].unique()) > 1:
rev_list = '"' + '", "'.join(df['revision'].unique()) + '"'
raise alferr.ALFMultipleRevisionsFound(rev_list)
# Square brackets forces 1 row DataFrame returned instead of Series
idx = index_last_before(df['revision'].tolist(), revision)
# return df.iloc[slice(0, 0) if idx is None else [idx], :]
return df.iloc[slice(0, 0) if idx is None else [idx], :]
with pd.option_context('mode.chained_assignment', None): # FIXME Explicitly copy?
datasets['revision'] = [rel_path_parts(x)[1] or '' for x in datasets.rel_path]
groups = datasets.rel_path.str.replace('#.*#/', '', regex=True).values
grouped = datasets.groupby(groups, group_keys=False)
return grouped.apply(_last_before)
def index_last_before(revisions: List[str], revision: Optional[str]) -> Optional[int]:
"""
Returns the index of string that occurs directly before the provided revision string when
lexicographic sorted. If revision is None, the index of the most recent revision is returned.
Parameters
----------
revisions : list of strings
A list of revision strings
revision : None, str
The revision string to match on
Returns
-------
int, None
Index of revision before matching string in sorted list or None
Examples
--------
>>> idx = index_last_before([], '2020-08-01')
"""
if len(revisions) == 0:
return # No revisions, just return
revisions_sorted = sorted(revisions, reverse=True)
if revision is None: # Return most recent revision
return revisions.index(revisions_sorted[0])
lt = np.array(revisions_sorted) <= revision
return revisions.index(revisions_sorted[lt.argmax()]) if any(lt) else None
def autocomplete(term, search_terms) -> str:
"""
Validate search term and return complete name, e.g. autocomplete('subj') == 'subject'
"""
term = term.lower()
# Check if term already complete
if term in search_terms:
return term
full_key = (x for x in search_terms if x.lower().startswith(term))
key_ = next(full_key, None)
if not key_:
raise ValueError(f'Invalid search term "{term}", see `one.search_terms()`')
elif next(full_key, None):
raise ValueError(f'Ambiguous search term "{term}"')
return key_
def ensure_list(value):
"""Ensure input is a list"""
return [value] if isinstance(value, (str, dict)) or not isinstance(value, Iterable) else value
class LazyId(Mapping):
"""
Using a paginated response object or list of session records, extracts eid string when required
"""
def __init__(self, pg):
self._pg = pg
def __getitem__(self, item):
return self.ses2eid(self._pg.__getitem__(item))
def __len__(self):
return self._pg.__len__()
def __iter__(self):
return map(self.ses2eid, self._pg.__iter__())
@staticmethod
def ses2eid(ses):
"""
Parameters
----------
ses : one.webclient._PaginatedResponse, dict, list
A collection of Alyx REST sessions endpoint records
Returns
-------
str, list
One or more experiment ID strings
"""
if isinstance(ses, list):
return [LazyId.ses2eid(x) for x in ses]
else:
return ses.get('id', None) or ses['url'].split('/').pop()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Comparison functions for `astropy.cosmology.Cosmology`.
This module is **NOT** public API. To use these functions, import them from
the top-level namespace -- :mod:`astropy.cosmology`.
This module will be moved.
"""
from __future__ import annotations
import functools
import inspect
from typing import Any, Callable, Set, Tuple, Union
import numpy as np
from numpy import False_, True_, ndarray
from astropy import table
from astropy.cosmology.core import Cosmology
__all__ = [] # Nothing is scoped here
##############################################################################
# PARAMETERS
_FormatType = Union[bool, None, str]
_FormatsT = Union[_FormatType, Tuple[_FormatType, ...]]
_CompFnT = Callable[[Any, _FormatType], Cosmology]
_COSMO_AOK: Set[Any] = {None, True_, False_, "astropy.cosmology"}
# The numpy bool also catches real bool for ops "==" and "in"
##############################################################################
# UTILITIES
class _CosmologyWrapper:
"""
A private wrapper class to hide things from :mod:`numpy`.
This should never be exposed to the user.
"""
__slots__ = ("wrapped", )
# Use less memory and speed up initilization.
_cantbroadcast: Tuple[type, ...] = (table.Row, table.Table)
"""
Have to deal with things that do not broadcast well. e.g.
`~astropy.table.Row` cannot be used in an array, even if ``dtype=object``
and will raise a segfault when used in a `numpy.ufunc`.
"""
wrapped: Any
def __init__(self, wrapped: Any) -> None:
self.wrapped = wrapped
# TODO! when py3.9+ use @functools.partial(np.frompyfunc, nin=2, nout=1)
# TODO! https://github.com/numpy/numpy/issues/9477 segfaults on astropy.row
# and np.vectorize can't coerce table to dtypes
def _wrap_to_ufunc(nin: int, nout: int) -> Callable[[_CompFnT], np.ufunc]:
def wrapper(pyfunc: _CompFnT) -> np.ufunc:
ufunc = np.frompyfunc(pyfunc, 2, 1)
return ufunc
return wrapper
@_wrap_to_ufunc(2, 1)
def _parse_format(cosmo: Any, format: _FormatType, /,) -> Cosmology:
"""Parse Cosmology-like input into Cosmologies, given a format hint.
Parameters
----------
cosmo : |Cosmology|-like, positional-only
|Cosmology| to parse.
format : bool or None or str, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting.
Returns
-------
|Cosmology| or generator thereof
Raises
------
TypeError
If ``cosmo`` is not a |Cosmology| and ``format`` equals `False`.
TypeError
If ``cosmo`` is a |Cosmology| and ``format`` is not `None` or equal to
`True`.
"""
# Deal with private wrapper
if isinstance(cosmo, _CosmologyWrapper):
cosmo = cosmo.wrapped
# Shortcut if already a cosmology
if isinstance(cosmo, Cosmology):
if format not in _COSMO_AOK:
allowed = '/'.join(map(str, _COSMO_AOK))
raise ValueError(f"for parsing a Cosmology, 'format' must be {allowed}, not {format}")
return cosmo
# Convert, if allowed.
elif format == False_: # catches False and False_
raise TypeError(f"if 'format' is False, arguments must be a Cosmology, not {cosmo}")
else:
format = None if format == True_ else format # str->str, None/True/True_->None
out = Cosmology.from_format(cosmo, format=format) # this can error!
return out
def _parse_formats(*cosmos: object, format: _FormatsT) -> ndarray:
"""Parse Cosmology-like to |Cosmology|, using provided formats.
``format`` is broadcast to match the shape of the cosmology arguments. Note
that the cosmology arguments are not broadcast against ``format``, so it
cannot determine the output shape.
Parameters
----------
*cosmos : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by the corresponding ``format``.
format : bool or None or str or array-like thereof, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting. Note ``format`` is broadcast as an object array to match the
shape of ``cosmos`` so ``format`` cannot determine the output shape.
Raises
------
TypeError
If any in ``cosmos`` is not a |Cosmology| and the corresponding
``format`` equals `False`.
"""
formats = np.broadcast_to(np.array(format, dtype=object), len(cosmos))
# parse each cosmo & format
# Have to deal with things that do not broadcast well.
# astropy.row cannot be used in an array, even if dtype=object
# and will raise a segfault when used in a ufunc.
towrap = (isinstance(cosmo, _CosmologyWrapper._cantbroadcast) for cosmo in cosmos)
wcosmos = [c if not wrap else _CosmologyWrapper(c) for c, wrap in zip(cosmos, towrap)]
return _parse_format(wcosmos, formats)
def _comparison_decorator(pyfunc: Callable[..., Any]) -> Callable[..., Any]:
"""Decorator to make wrapper function that parses |Cosmology|-like inputs.
Parameters
----------
pyfunc : Python function object
An arbitrary Python function.
Returns
-------
callable[..., Any]
Wrapped `pyfunc`, as described above.
Notes
-----
All decorated functions should add the following to 'Parameters'.
format : bool or None or str or array-like thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
"""
sig = inspect.signature(pyfunc)
nin = sum(p.kind == 0 for p in sig.parameters.values())
# Make wrapper function that parses cosmology-like inputs
@functools.wraps(pyfunc)
def wrapper(*cosmos: Any, format: _FormatsT = False, **kwargs: Any) -> bool:
if len(cosmos) > nin:
raise TypeError(f"{wrapper.__wrapped__.__name__} takes {nin} positional"
f" arguments but {len(cosmos)} were given")
# Parse cosmologies to format. Only do specified number.
cosmos = _parse_formats(*cosmos, format=format)
# Evaluate pyfunc, erroring if didn't match specified number.
result = wrapper.__wrapped__(*cosmos, **kwargs)
# Return, casting to correct type casting is possible.
return result
return wrapper
##############################################################################
# COMPARISON FUNCTIONS
@_comparison_decorator
def cosmology_equal(cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool=False) -> bool:
r"""Return element-wise equality check on the cosmologies.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
Examples
--------
Assuming the following imports
>>> import astropy.units as u
>>> from astropy.cosmology import FlatLambdaCDM
Two identical cosmologies are equal.
>>> cosmo1 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmology_equal(cosmo1, cosmo2)
True
And cosmologies with different parameters are not.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.4)
>>> cosmology_equal(cosmo1, cosmo3)
False
Two cosmologies may be equivalent even if not of the same class. In these
examples the |LambdaCDM| has :attr:`~astropy.cosmology.LambdaCDM.Ode0` set
to the same value calculated in |FlatLambdaCDM|.
>>> from astropy.cosmology import LambdaCDM
>>> cosmo3 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmology_equal(cosmo1, cosmo3)
False
>>> cosmology_equal(cosmo1, cosmo3, allow_equivalent=True)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo4 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmology_equal(cosmo3, cosmo4, allow_equivalent=True)
False
Also, using the keyword argument, the notion of equality is extended to any
Python object that can be converted to a |Cosmology|.
>>> mapping = cosmo2.to_format("mapping")
>>> cosmology_equal(cosmo1, mapping, format=True)
True
Either (or both) arguments can be |Cosmology|-like.
>>> cosmology_equal(mapping, cosmo2, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be checked
with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of these
kinds can still be checked for equality, but the correct format string must
be used.
>>> yml = cosmo2.to_format("yaml")
>>> cosmology_equal(cosmo1, yml, format=(None, "yaml"))
True
This also works with an array of ``format`` matching the number of
cosmologies.
>>> cosmology_equal(mapping, yml, format=[True, "yaml"])
True
"""
# Check parameter equality
if not allow_equivalent:
eq = (cosmo1 == cosmo2)
else:
# Check parameter equivalence
# The options are: 1) same class & parameters; 2) same class, different
# parameters; 3) different classes, equivalent parameters; 4) different
# classes, different parameters. (1) & (3) => True, (2) & (4) => False.
eq = cosmo1.__equiv__(cosmo2)
if eq is NotImplemented:
eq = cosmo2.__equiv__(cosmo1) # that failed, try from 'other'
eq = False if eq is NotImplemented else eq
# TODO! include equality check of metadata
return eq
@_comparison_decorator
def _cosmology_not_equal(cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool=False) -> bool:
r"""Return element-wise cosmology non-equality check.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
out : ndarray, None, optional
A location into which the result is stored. If provided, it must have a
shape that the inputs broadcast to. If not provided or None, a
freshly-allocated array is returned.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a Cosmology. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. ``format`` is
broadcast to match the shape of the cosmology arguments. Note that the
cosmology arguments are not broadcast against ``format``, so it cannot
determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
See Also
--------
astropy.cosmology.cosmology_equal
Element-wise equality check, with argument conversion to Cosmology.
"""
neq = not cosmology_equal(cosmo1, cosmo2, allow_equivalent=allow_equivalent)
# TODO! it might eventually be worth the speed boost to implement some of
# the internals of cosmology_equal here, but for now it's a hassle.
return neq
|
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.l1 = nn.Linear(3136, 512)
self.l2 = nn.Linear(512, num_actions)
def forward(self, state):
q = F.relu(self.c1(state))
q = F.relu(self.c2(q))
q = F.relu(self.c3(q))
q = F.relu(self.l1(q.reshape(-1, 3136)))
return self.l2(q)
# Used for Box2D / Toy problems
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q)
class DQN(object):
def __init__(self, parameters, env_properties, device):
self.device = device
print("---------------------------------------")
print("--------------- DQN ----------------")
print("---------------------------------------")
# Make Q network, target network and initialize optimizer
self.Q = Conv_Q(4, env_properties["num_actions"]).to(self.device) if env_properties["atari"] \
else FC_Q(env_properties["state_dim"], env_properties["num_actions"]).to(self.device)
self.Q_target = deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, parameters["optimizer"])(
self.Q.parameters(), **parameters["optimizer_parameters"]
)
# Parameters for train()
self.discount = parameters["discount"]
# Select target update rule
# copy: copy full target network every "target_update_frequency" iterations
# polyak: update every timestep with proportion tau
self.maybe_update_target = self.polyak_target_update if parameters["polyak_target_update"] \
else self.copy_target_update
self.target_update_frequency = parameters["target_update_frequency"] \
/ parameters["update_frequency"]
self.tau = parameters["tau"]
# Parameters for exploration + Compute linear decay for epsilon
self.initial_epsilon = parameters["initial_epsilon"]
self.end_epsilon = parameters["end_epsilon"]
self.slope = (self.end_epsilon - self.initial_epsilon) \
/ parameters["epsilon_decay_period"] * parameters["update_frequency"]
# Parameters for evaluation
self.state_shape = (-1, 4, 84, 84) if env_properties["atari"] else (-1, env_properties["state_dim"]) # need to unroll gridworld into the right form
self.evaluation_epsilon = parameters["evaluation_epsilon"]
self.num_actions = env_properties["num_actions"]
# Number of training iterations
self.iterations = 0
# List of elements returned by train(), to be displayed by logger
self.display_list = ["Batch_Q", "Batch_TD", "Epsilon"]
def select_action(self, state, eval=False):
eps = self.evaluation_epsilon if eval \
else max(self.slope * self.iterations + self.initial_epsilon, self.end_epsilon)
# Select action according to policy with probability (1-eps)
# otherwise, select random action
if np.random.uniform(0,1) > eps:
with torch.no_grad():
#state = utils.get_obss_preprocessor(state)
state = torch.FloatTensor(state.sum()['image']).reshape(self.state_shape).to(self.device)
return int(self.Q(state).argmax(1))
else:
return np.random.randint(self.num_actions)
# return 0
def train(self, replay_buffer, batch_size):
# Sample replay buffer
state, action, next_state, reward, done = replay_buffer.sample(batch_size)
# Compute the target Q value
with torch.no_grad():
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).max(1, keepdim=True)[0]
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action.long())
# Compute critic loss
critic_loss = F.smooth_l1_loss(current_Q, target_Q)
# Optimize the critic
self.Q_optimizer.zero_grad()
critic_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
# Return values for logger to display
eps = max(self.slope * self.iterations + self.initial_epsilon, self.end_epsilon)
return {
"Batch_Q": current_Q.cpu().data.numpy().mean(),
"Batch_TD": critic_loss.cpu().data.numpy(),
"Epsilon": eps
}
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def save(self, filename):
torch.save(self.Q.state_dict(), filename + "/model.pth")
def load(self, filename):
self.Q.load_state_dict(torch.load(filename + "/model.pth"))
|
"""
Find modules used by a script, using bytecode analysis.
Based on the stdlib modulefinder by <NAME> and <NAME>,
but uses a graph data structure and 2.3 features
"""
from pkg_resources import require
require("altgraph")
import dis
import imp
import marshal
import os
import sys
import new
import struct
import urllib
from itertools import ifilter, imap
from altgraph.Dot import Dot
from altgraph.ObjectGraph import ObjectGraph
from altgraph.GraphUtil import filter_stack
from altgraph.compat import *
READ_MODE = "U" # universal line endings
LOAD_CONST = chr(dis.opname.index('LOAD_CONST'))
IMPORT_NAME = chr(dis.opname.index('IMPORT_NAME'))
STORE_NAME = chr(dis.opname.index('STORE_NAME'))
STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL'))
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT)
# Modulegraph does a good job at simulating Python's, but it can not
# handle packagepath modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
def moduleInfoForPath(path, suffixes=imp.get_suffixes()):
for (ext, readmode, typ) in imp.get_suffixes():
if path.endswith(ext):
return os.path.basename(path)[:-len(ext)], readmode, typ
return None
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleGraph.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Node(object):
def __init__(self, identifier):
self.graphident = identifier
self.identifier = identifier
self.namespace = {}
self.filename = None
self.packagepath = None
self.code = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = set()
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = set()
def __contains__(self, name):
return name in self.namespace
def __getitem__(self, name):
return self.namespace[name]
def __setitem__(self, name, value):
self.namespace[name] = value
def get(self, *args):
return self.namespace.get(*args)
def __cmp__(self, other):
return cmp(self.graphident, other.graphident)
def __hash__(self):
return hash(self.graphident)
def infoTuple(self):
return (self.identifier,)
def __repr__(self):
return '%s%r' % (type(self).__name__, self.infoTuple())
class Alias(str):
pass
class AliasNode(Node):
def __init__(self, name, node):
super(AliasNode, self).__init__(name)
for k in ['identifier', 'packagepath', 'namespace', 'globalnames', 'startimports']:
setattr(self, k, getattr(node, k, None))
def infoTuple(self):
return (self.graphident, self.identifier)
class BadModule(Node):
pass
class ExcludedModule(BadModule):
pass
class MissingModule(BadModule):
pass
class Script(Node):
def __init__(self, filename):
super(Script, self).__init__(filename)
self.filename = filename
def infoTuple(self):
return (self.filename,)
class BaseModule(Node):
def __init__(self, name, filename=None, path=None):
super(BaseModule, self).__init__(name)
self.filename = filename
self.packagepath = path
def infoTuple(self):
return tuple(filter(None, (self.identifier, self.filename, self.packagepath)))
class BuiltinModule(BaseModule):
pass
class SourceModule(BaseModule):
pass
class CompiledModule(BaseModule):
pass
class Package(BaseModule):
pass
class FlatPackage(BaseModule):
pass
class Extension(BaseModule):
pass
class NamespaceModule(BaseModule):
pass
class ModuleGraph(ObjectGraph):
def __init__(self, path=None, excludes=(), replace_paths=(), implies=(), graph=None, debug=0):
super(ModuleGraph, self).__init__(graph=graph, debug=debug)
if path is None:
path = sys.path
self.path = path
self.lazynodes = {}
# excludes is stronger than implies
self.lazynodes.update(dict(implies))
for m in excludes:
self.lazynodes[m] = None
self.replace_paths = replace_paths
def implyNodeReference(self, node, other):
"""
Imply that one node depends on another.
other may be a module name or another node.
For use by extension modules and tricky import code
"""
if not isinstance(other, Node):
if not isinstance(other, tuple):
other = (other, node)
others = self.import_hook(*other)
for other in others:
self.createReference(node, other)
elif isinstance(other, AliasNode):
self.addNode(other)
other.connectTo(node)
else:
self.createReference(node, other)
def createReference(self, fromnode, tonode, edge_data='direct'):
return super(ModuleGraph, self).createReference(fromnode, tonode, edge_data=edge_data)
def findNode(self, name):
"""
Find a node by identifier. If a node by that identifier exists,
it will be returned.
If a lazy node exists by that identifier with no dependencies (excluded),
it will be instantiated and returned.
If a lazy node exists by that identifier with dependencies, it and its
dependencies will be instantiated and scanned for additional dependencies.
"""
data = super(ModuleGraph, self).findNode(name)
if data is not None:
return data
if name in self.lazynodes:
deps = self.lazynodes.pop(name)
if deps is None:
# excluded module
m = self.createNode(ExcludedModule, name)
elif isinstance(deps, Alias):
other = self._safe_import_hook(deps, None, None).pop()
m = self.createNode(AliasNode, name, other)
self.implyNodeReference(m, other)
else:
m = self._safe_import_hook(name, None, None).pop()
for dep in deps:
self.implyNodeReference(m, dep)
return m
return None
def run_script(self, pathname, caller=None):
"""
Create a node by path (not module name). It is expected to be a Python
source file, and will be scanned for dependencies.
"""
self.msg(2, "run_script", pathname)
pathname = os.path.realpath(pathname)
m = self.findNode(pathname)
if m is not None:
return m
co = compile(file(pathname, READ_MODE).read()+'\n', pathname, 'exec')
if self.replace_paths:
co = self.replace_paths_in_code(co)
m = self.createNode(Script, pathname)
m.code = co
self.createReference(caller, m)
self.scan_code(co, m)
return m
def import_hook(self, name, caller=None, fromlist=None, level=-1):
"""
Import a module
"""
self.msg(3, "import_hook", name, caller, fromlist)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
modules = set([m])
if fromlist and m.packagepath:
modules.update(self.ensure_fromlist(m, fromlist))
for m in modules:
self.createReference(caller, m)
return modules
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.identifier
if level >= 1: # relative import
if caller.packagepath:
level -= 1
if level == 0:
parent = self.findNode(pname)
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError, "relative importpath too deep"
pname = ".".join(pname.split(".")[:-level])
parent = self.findNode(pname)
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.packagepath:
parent = self.findNode(pname)
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.findNode(pname)
if parent:
assert parent.identifier == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
"""
Given a calling parent package and an import name determine the containing
package for the name
"""
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
head, tail = name.split('.', 1)
else:
head, tail = name, ''
if parent:
qname = parent.identifier + '.' + head
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.identifier, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist):
fromlist = set(fromlist)
self.msg(4, "ensure_fromlist", m, fromlist)
if '*' in fromlist:
fromlist.update(self.find_all_submodules(m))
fromlist.remove('*')
for sub in fromlist:
submod = m.get(sub)
if submod is None:
fullname = m.identifier + '.' + sub
submod = self.import_module(sub, fullname, m)
if submod is None:
raise ImportError, "No module named " + fullname
yield submod
def find_all_submodules(self, m):
if not m.packagepath:
return
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = [triple[0] for triple in imp.get_suffixes()]
for path in m.packagepath:
try:
names = os.listdir(path)
except os.error:
self.msg(2, "can't list directory", path)
continue
for (path, mode, typ) in ifilter(None, imap(moduleInfoForPath, names)):
if path != '__init__':
yield path
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
m = self.findNode(fqname)
if m is not None:
self.msgout(3, "import_module ->", m)
if parent:
self.createReference(m, parent)
return m
if parent and parent.packagepath is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.packagepath, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
m = self.load_module(fqname, fp, pathname, stuff)
if parent:
self.createReference(m, parent)
parent[partname] = m
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, (suffix, mode, typ)):
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
packagepath = None
if typ == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if typ == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
cls = SourceModule
elif typ == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
cls = CompiledModule
elif typ == imp.C_BUILTIN:
cls = BuiltinModule
co = None
elif typ == NamespaceModule:
cls = NamespaceModule
co = None
packagepath = sys.modules[fqname].__path__
else:
cls = Extension
co = None
m = self.createNode(cls, fqname)
m.filename = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.code = co
self.scan_code(co, m)
if packagepath is not None:
m.packagepath = packagepath
self.msgout(2, "load_module ->", m)
return m
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
try:
mods = self.import_hook(name, caller, level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
m = self.createNode(MissingModule, name)
self.createReference(caller, m)
else:
assert len(mods) == 1
m = list(mods)[0]
subs = set([m])
for sub in (fromlist or ()):
# If this name is in the module namespace already,
# then add the entry to the list of substitutions
if sub in m:
sm = m[sub]
if sm is not None:
subs.add(sm)
self.createReference(caller, sm)
continue
# See if we can load it
fullname = name + '.' + sub
sm = self.findNode(fullname)
if sm is None:
try:
sm = self.import_hook(name, caller, [sub], level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
sm = self.createNode(MissingModule, fullname)
else:
sm = self.findNode(fullname)
m[sub] = sm
if sm is not None:
self.createReference(sm, m)
subs.add(sm)
return subs
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if c == LOAD_CONST and code[3] == IMPORT_NAME:
oparg_1, oparg_2 = unpack('<xHxH', code[:6])
yield "import", (consts[oparg_1], names[oparg_2])
code = code[6:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == -1: # normal import
yield "import", (consts[oparg_2], names[oparg_3])
elif level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_code(self, co, m):
code = co.co_code
if sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames.add(name)
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import": level = 0
else: level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.packagepath:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.findNode(m.identifier+ "." + name)
if mm is None:
mm = self.findNode(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.code is None:
m.starimports.add(name)
else:
m.starimports.add(name)
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.identifier, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.createNode(Package, fqname)
m.filename = pathname
# As per comment at top of file, simulate runtime packagepath additions.
additions = packagePathMap.get(fqname, [])
if pathname in additions:
m.packagepath = additions
else:
m.packagepath = [pathname]+additions
fp, buf, stuff = self.find_module("__init__", m.packagepath)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.identifier+'.'+name
else:
fullname = name
node = self.findNode(fullname)
if node is not None:
self.msgout(3, "find_module -> already included?", node)
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
try:
fp, buf, stuff = imp.find_module(name, path)
except ImportError:
# pip installed namespace packages without a __init__
m = sys.modules.get(fullname)
if m is None or getattr(m, "__file__", None) or not getattr(m, "__path__", None):
raise
return (None, None, ("", "", NamespaceModule))
if buf:
buf = os.path.realpath(buf)
return (fp, buf, stuff)
def create_xref(self, out=None):
if out is None:
out = sys.stdout
scripts = []
mods = []
for mod in self.flatten():
name = os.path.basename(mod.identifier)
if isinstance(mod, Script):
scripts.append((name, mod))
else:
mods.append((name, mod))
scripts.sort()
mods.sort()
scriptnames = [name for name, m in scripts]
scripts.extend(mods)
mods = scripts
title = "modulegraph cross reference for " + ', '.join(scriptnames)
print >>out, """<html><head><title>%s</title></head>
<body><h1>%s</h1>""" % (title, title)
def sorted_namelist(mods):
lst = [os.path.basename(mod.identifier) for mod in mods if mod]
lst.sort()
return lst
for name, m in mods:
if isinstance(m, BuiltinModule):
print >>out, """<a name="%s" /><tt>%s</tt>
<i>(builtin module)</i> <br />""" % (name, name)
elif isinstance(m, Extension):
print >>out, """<a name="%s" /><tt>%s</tt> <tt>%s</tt></a>
<br />""" % (name, name, m.filename)
else:
url = urllib.pathname2url(m.filename or "")
print >>out, """<a name="%s" />
<a target="code" href="%s" type="text/plain"><tt>%s</tt></a>
<br />""" % (name, url, name)
oute, ince = map(sorted_namelist, self.get_edges(m))
if oute:
print >>out, 'imports:'
for n in oute:
print >>out, """<a href="#%s">%s</a>""" % (n, n)
print >>out, '<br />'
if ince:
print >>out, 'imported by:'
for n in ince:
print >>out, """<a href="#%s">%s</a>""" % (n, n)
print >>out, '<br />'
print >>out, '<br/>'
print >>out, '</body></html>'
def itergraphreport(self, name='G', flatpackages=()):
nodes = map(self.graph.describe_node, self.graph.iterdfs(self))
describe_edge = self.graph.describe_edge
edges = deque()
packagenodes = set()
packageidents = {}
nodetoident = {}
inpackages = {}
mainedges = set()
# XXX - implement
flatpackages = dict(flatpackages)
def nodevisitor(node, data, outgoing, incoming):
if not isinstance(data, Node):
return {'label': str(node)}
#if isinstance(d, (ExcludedModule, MissingModule, BadModule)):
# return None
s = '<f0> ' + type(data).__name__
for i,v in izip(count(1), data.infoTuple()[:1]):
s += '| <f%d> %s' % (i,v)
return {'label':s, 'shape':'record'}
def edgevisitor(edge, data, head, tail):
if data == 'orphan':
return {'style':'dashed'}
elif data == 'pkgref':
return {'style':'dotted'}
return {}
yield 'digraph %s {\n' % (name,)
attr = dict(rankdir='LR', concentrate='true')
cpatt = '%s="%s"'
for item in attr.iteritems():
yield '\t%s;\n' % (cpatt % item,)
# find all packages (subgraphs)
for (node, data, outgoing, incoming) in nodes:
nodetoident[node] = getattr(data, 'identifier', None)
if isinstance(data, Package):
packageidents[data.identifier] = node
inpackages[node] = set([node])
packagenodes.add(node)
# create sets for subgraph, write out descriptions
for (node, data, outgoing, incoming) in nodes:
# update edges
for edge in imap(describe_edge, outgoing):
edges.append(edge)
# describe node
yield '\t"%s" [%s];\n' % (
node,
','.join([
(cpatt % item) for item in
nodevisitor(node, data, outgoing, incoming).iteritems()
]),
)
inside = inpackages.get(node)
if inside is None:
inside = inpackages[node] = set()
ident = nodetoident[node]
if ident is None:
continue
pkgnode = packageidents.get(ident[:ident.rfind('.')])
if pkgnode is not None:
inside.add(pkgnode)
graph = []
subgraphs = {}
for key in packagenodes:
subgraphs[key] = []
while edges:
edge, data, head, tail = edges.popleft()
if ((head, tail)) in mainedges:
continue
mainedges.add((head, tail))
tailpkgs = inpackages[tail]
common = inpackages[head] & tailpkgs
if not common and tailpkgs:
usepkgs = sorted(tailpkgs)
if len(usepkgs) != 1 or usepkgs[0] != tail:
edges.append((edge, data, head, usepkgs[0]))
edges.append((edge, 'pkgref', usepkgs[-1], tail))
continue
if common:
common = common.pop()
if tail == common:
edges.append((edge, data, tail, head))
elif head == common:
subgraphs[common].append((edge, 'pkgref', head, tail))
else:
edges.append((edge, data, common, head))
edges.append((edge, data, common, tail))
else:
graph.append((edge, data, head, tail))
def do_graph(edges, tabs):
edgestr = tabs + '"%s" -> "%s" [%s];\n'
# describe edge
for (edge, data, head, tail) in edges:
attribs = edgevisitor(edge, data, head, tail)
yield edgestr % (
head,
tail,
','.join([(cpatt % item) for item in attribs.iteritems()]),
)
for g, edges in subgraphs.iteritems():
yield '\tsubgraph "cluster_%s" {\n' % (g,)
yield '\t\tlabel="%s";\n' % (nodetoident[g],)
for s in do_graph(edges, '\t\t'):
yield s
yield '\t}\n'
for s in do_graph(graph, '\t'):
yield s
yield '}\n'
def graphreport(self, fileobj=None, flatpackages=()):
if fileobj is None:
fileobj = sys.stdout
fileobj.writelines(self.itergraphreport(flatpackages=flatpackages))
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print
print "%-15s %-25s %s" % ("Class", "Name", "File")
print "%-15s %-25s %s" % ("----", "----", "----")
# Print modules found
sorted = [(os.path.basename(mod.identifier), mod) for mod in self.flatten()]
sorted.sort()
for (name, m) in sorted:
print "%-15s %-25s %s" % (type(m).__name__, name, m.filename or "")
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
f = os.path.join(f, '')
r = os.path.join(r, '')
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return new.code(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def main():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dgmp:qx:")
except getopt.error, msg:
print msg
return
# Process options
debug = 1
domods = 0
dodot = False
addpath = []
excludes = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
excludes.append(a)
if o == '-g':
dodot = True
# Provide default arguments
if not args:
script = __file__
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", repr(item)
# Create the module finder and turn its crank
mf = ModuleGraph(path, excludes=excludes, debug=debug)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.run_script(arg)
mf.run_script(script)
if dodot:
mf.graphreport()
else:
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = main()
except KeyboardInterrupt:
print "\n[interrupt]"
|
import datetime as dt
import matplotlib.dates as dates
import matplotlib.pyplot as plt
import pandas as pd
import pylab
from matplotlib import ticker
from mplfinance.original_flavor import candlestick_ohlc
from pandas import DataFrame
import vnpy.analyze.data.data_prepare as dp
import vnpy.analyze.view.view_util as vutil
from vnpy.trader.constant import Direction
from vnpy.trader.utility import round_to
import math
slow_ma = 5
fast_ma = 10
def draw(days: DataFrame, trades=None):
"""作图"""
days['date'] = pd.to_datetime(days['date'])
days['date'] = days['date'].apply(lambda x: dates.date2num(x))
# drop the date index from the dataframe & make a copy
days_reshape = days.reset_index()
days_reshape.drop('volume', axis=1, inplace=True)
days_reshape = days_reshape.reindex(columns=['date', 'open', 'high', 'low', 'close'])
fig = plt.figure(facecolor='#07000d', figsize=(days.__len__() * 0.1, 10))
ax = plt.subplot2grid((6, 4), (1, 0), rowspan=4, colspan=4)
ax.set_facecolor('#07000d')
candlestick_ohlc(ax, days_reshape.values, width=.6, colorup='#ff1717', colordown='#53c156')
# 添加均线
short_rolling = days_reshape.close.rolling(window=slow_ma).mean()
long_rolling = days_reshape.close.rolling(window=fast_ma).mean()
SP = len(days_reshape.date.values[fast_ma - 1:])
ax.plot(days_reshape.date.values[-SP:], short_rolling[-SP:], '#e1edf9', label=str(slow_ma) + 'SMA', linewidth=1.5)
ax.plot(days_reshape.date.values[-SP:], long_rolling[-SP:], '#4ee6fd', label=str(fast_ma) + 'SMA', linewidth=1.5)
# 样式设置
ax.grid(True, color='w')
ax.xaxis.set_major_locator(ticker.MaxNLocator(math.floor(days_reshape.__len__() / 22)))
ax.xaxis.set_major_formatter(dates.DateFormatter('%Y-%m-%d'))
ax.yaxis.label.set_color("w")
ax.spines['bottom'].set_color("#5998ff")
ax.spines['top'].set_color("#5998ff")
ax.spines['left'].set_color("#5998ff")
ax.spines['right'].set_color("#5998ff")
ax.tick_params(axis='y', colors='w')
plt.gca().yaxis.set_major_locator(ticker.MaxNLocator(prune='upper'))
ax.tick_params(axis='x', colors='w')
ax.set_ylabel('Stock price and Volume')
# 1、绘制成交量
volumeMin = 0
ax1v = ax.twinx()
ax1v.fill_between(days.date.values, volumeMin, days.volume.values, facecolor='#00ffe8',
alpha=.4)
ax1v.axes.yaxis.set_ticklabels([])
ax1v.grid(False)
# Edit this to 3, so it's a bit larger
ax1v.set_ylim(0, 3 * days.volume.values.max())
ax1v.spines['bottom'].set_color("#5998ff")
ax1v.spines['top'].set_color("#5998ff")
ax1v.spines['left'].set_color("#5998ff")
ax1v.spines['right'].set_color("#5998ff")
ax1v.tick_params(axis='x', colors='w')
ax1v.tick_params(axis='y', colors='w')
# 2、绘制RSI
maLeg = plt.legend(loc=9, ncol=2, prop={'size': 7},
fancybox=True, borderaxespad=0.)
maLeg.get_frame().set_alpha(0.4)
textEd = pylab.gca().get_legend().get_texts()
pylab.setp(textEd[0:5], color='w')
ax0 = plt.subplot2grid((6, 4), (0, 0), sharex=ax, rowspan=1, colspan=4)
ax0.set_facecolor('#07000d')
rsi = vutil.relative_strength_index(days_reshape, 5)['RSI_5']
rsiCol = '#c1f9f7'
posCol = '#386d13'
negCol = '#8f2020'
ax0.plot(days_reshape.date.values[-SP:], rsi[-SP:], rsiCol, linewidth=1.5)
ax0.axhline(70, color=negCol)
ax0.axhline(30, color=posCol)
ax0.fill_between(days_reshape.date.values[-SP:], rsi[-SP:], 70, where=(rsi[-SP:] >= 70), facecolor=negCol,
edgecolor=negCol, alpha=0.5)
ax0.fill_between(days_reshape.date.values[-SP:], rsi[-SP:], 30, where=(rsi[-SP:] <= 30), facecolor=posCol,
edgecolor=posCol, alpha=0.5)
ax0.set_yticks([30, 70])
ax0.yaxis.label.set_color("w")
ax0.spines['bottom'].set_color("#5998ff")
ax0.spines['top'].set_color("#5998ff")
ax0.spines['left'].set_color("#5998ff")
ax0.spines['right'].set_color("#5998ff")
ax0.tick_params(axis='y', colors='w')
ax0.tick_params(axis='x', colors='w')
ax0.set_ylabel('RSI')
# 2、绘制MACD
ax2 = plt.subplot2grid((6, 4), (5, 0), sharex=ax, rowspan=1, colspan=4)
ax2.set_facecolor('#07000d')
fillcolor = '#00ffe8'
nslow = 26
nfast = 12
nema = 9
df = vutil.macd(days_reshape, 12, 26)
ema9 = df['MACDsign_12_26']
macd = df['MACD_12_26']
ax2.plot(days_reshape.date.values[-SP:], macd[-SP:], color='#4ee6fd', lw=2)
ax2.plot(days_reshape.date.values[-SP:], ema9[-SP:], color='#e1edf9', lw=1)
# 红色填充大于0区域、绿色填充小于0区域
ax2.fill_between(days_reshape.date.values[-SP:], macd[-SP:] - ema9[-SP:], 0, where=(macd[-SP:] - ema9[-SP:] > 0),
alpha=0.5, facecolor=negCol,
edgecolor=negCol)
ax2.fill_between(days_reshape.date.values[-SP:], macd[-SP:] - ema9[-SP:], 0, where=(macd[-SP:] - ema9[-SP:] < 0),
alpha=0.5, facecolor=posCol,
edgecolor=posCol)
plt.gca().yaxis.set_major_locator(ticker.MaxNLocator(prune='upper'))
ax2.spines['bottom'].set_color("#5998ff")
ax2.spines['top'].set_color("#5998ff")
ax2.spines['left'].set_color("#5998ff")
ax2.spines['right'].set_color("#5998ff")
ax2.tick_params(axis='x', colors='w')
ax2.tick_params(axis='y', colors='w')
plt.ylabel('MACD', color='w')
ax2.yaxis.set_major_locator(ticker.MaxNLocator(nbins=5, prune='upper'))
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
ax2.set_ylabel('MACD')
# 增加提示点
plt.suptitle('000001.XSHG', color='w')
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax.get_xticklabels(), visible=False)
# buy and sell annotate
# TODO: 箭头的长度可以根据ATR确定
if trades is not None:
for trade in trades.values():
info = '%s,%s,%s' % (trade.datetime.strftime("%Y/%m/%d"), round_to(trade.price, 0.01), trade.volume)
if Direction.LONG == trade.direction:
ax.annotate(info, xy=(dates.date2num(trade.datetime), trade.price), xycoords='data', color='yellow',
bbox=dict(boxstyle="round", fc="none", ec="yellow"),
xytext=(0, -40), textcoords='offset points', ha='center',
arrowprops=dict(color='yellow', arrowstyle="->"))
if Direction.SHORT == trade.direction:
ax.annotate(info, xy=(dates.date2num(trade.datetime), trade.price), xycoords='data', color='green',
bbox=dict(boxstyle="round", fc="none", ec="green"),
xytext=(0, 40), textcoords='offset points', ha='center',
arrowprops=dict(color='green', arrowstyle="->"))
plt.subplots_adjust(left=.09, bottom=.14, right=.94, top=.95, wspace=.20, hspace=0)
plt.show()
def draw_backtesting(his_data, trades=None):
# his_data转dataframe
df = pd.DataFrame(columns=('date', 'open', 'high', 'low', 'close', 'volume'))
for data in his_data:
df = df.append(
{'date': data.datetime, 'open': data.open_price, 'high': data.high_price, 'low': data.low_price,
'close': data.close_price,
'volume': data.low_price}, ignore_index=True)
draw(df, trades)
if __name__ == "__main__":
# 集合竞价 get_call_auction('002594.XSHE', "2020-04-07", "2020-04-08", fields=None)
days = dp.load_bar_data('000001', 'XSHG', start_date=dt.datetime(2013, 1, 1), end_data=dt.datetime(2020, 1, 1))
draw(days)
|
import json
from base64 import b64encode
import boto3
from prefect import Task
from prefect.client import Secret
from prefect.utilities.tasks import defaults_from_attrs
class LambdaCreate(Task):
"""
Task for creating a Lambda function.
Args:
- function_name (str): name of the Lambda function to create
- runtime (str): the identifier of the function's runtime
- role (str): the Amazon Resource Name of the function's execution role
- handler (str): the name of the method within your code that Lambda calls
to execute your function
- zip_file (str): path to zip file containing code for Lambda function,
either zip_file or (bucket and bucket_key) must be passed
- bucket (str): an S3 bucket in the same AWS region as your function
- bucket_key (str): the Amazon S3 key of the deployment package
- object_version (str, optional): for versioned S3 objects, the version of the
deployment package to use
- description (str, optional): description of Lambda function
- function_timeout (int, optional): Lambda function timeout in seconds, default is 3 seconds
- memorysize (int, optional): amount of memory that Lambda function has
access to in MB, must be a multiple of 64 MB, default is 128
- publish (bool, optional): set to True to publish the first version of the
function during creation, defaults to True
- subnet_ids (List[str], optional): list of subnet ids for vpc
configuration
- security_group_ids (List[str], optional): list of security
group ideas for vpc configuration
- dead_letter_config (dict, optional): a dead letter queue configuration that
specifies the queue or topic where Lambda sends asynchronous events
when they fail processing
- environment_variables (dict, optional): key-value pairs of environment
variables to pass to the Lambda function
- kms_key_arn (str, optional): the ARN of the AWS key management service used
to encrypt your function's environment variables, if not provided, AWS
Lambda uses a default service key
- function_tags (dict, optional): a list of tags to apply to the function, string
to string map
- tracing_config (str, optional): set to Active to samle and trace a
subset of incoming requests with Amazon X-Ray
- layers (List[str], optional): a list of function layers to add to
the function's execution environment, specify each layer by its ARN
- aws_credentials_secret (str, optional): the name of the Prefect Secret
that stores your AWS credentials; this Secret must be a JSON string
with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY`
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
function_name: str,
runtime: str,
role: str,
handler: str,
zip_file: str = None,
bucket: str = "",
bucket_key: str = "",
object_version: str = None,
description: str = "",
function_timeout: int = 3,
memorysize: int = 128,
publish: bool = True,
subnet_ids: list = None,
security_group_ids: list = None,
dead_letter_config: dict = None,
environment_variables: dict = None,
kms_key_arn: str = "",
function_tags: dict = None,
tracing_config: str = "PassThrough",
layers: list = None,
aws_credentials_secret: str = "AWS_CREDENTIALS",
**kwargs
):
self.aws_credentials_secret = aws_credentials_secret
self.function_name = function_name
self.runtime = runtime
self.role = role
self.handler = handler
## if zip file is provided, pass this to boto3 create, otherwise pass s3 object
if zip_file:
self.code = {"ZipFile": open(zip_file, "rb").read()}
else:
self.code = {"S3Bucket": bucket, "S3Key": bucket_key}
if object_version:
self.code["S3ObjectVersion"] = object_version
self.description = description
self.function_timeout = function_timeout
self.memorysize = memorysize
self.publish = publish
self.vpc_config = {}
if subnet_ids:
self.vpc_config["SubnetIds"] = subnet_ids
if security_group_ids:
self.vpc_config["SecurityGroupIds"] = security_group_ids
self.dead_letter_config = dead_letter_config
self.environment_variables = environment_variables
self.kms_key_arn = kms_key_arn
self.function_tags = function_tags
self.tracing_config = tracing_config
self.layers = layers
super().__init__(**kwargs)
@defaults_from_attrs("aws_credentials_secret")
def run(self, aws_credentials_secret: str = "AWS_CREDENTIALS"):
"""
Task run method. Creates Lambda function.
Args:
- aws_credentials_secret (str, optional): the name of the Prefect Secret
that stores your AWS credentials; this Secret must be a JSON string
with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY`
Returns:
- json: response from AWS CreateFunction endpoint
"""
## get AWS credentials
aws_credentials = Secret(aws_credentials_secret).get()
aws_access_key = aws_credentials["ACCESS_KEY"]
aws_secret_access_key = aws_credentials["SECRET_ACCESS_KEY"]
lambda_client = boto3.client(
"lambda",
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key,
)
## create lambda function
response = lambda_client.create_function(
FunctionName=self.function_name,
Runtime=self.runtime,
Role=self.role,
Handler=self.handler,
Code=self.code,
Description=self.description,
Timeout=self.function_timeout,
MemorySize=self.memorysize,
Publish=self.publish,
VpcConfig=self.vpc_config,
DeadLetterConfig=self.dead_letter_config or {},
Environment={"Variables": self.environment_variables or {}},
KMSKeyArn=self.kms_key_arn,
TracingConfig={"Mode": self.tracing_config},
Tags=self.function_tags or {},
Layers=self.layers or [],
)
return response
class LambdaDelete(Task):
"""
Task for deleting a Lambda function.
Args:
- function_name (str): name of the Lambda function to delete
- qualifier (str, optional): specify a version to delete, if not
provided, the function will be deleted entirely
- aws_credentials_secret (str, optional): the name of the Prefect Secret
that stores your AWS credentials; this Secret must be a JSON string
with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY`
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
function_name: str,
qualifier: str = "",
aws_credentials_secret: str = "AWS_CREDENTIALS",
**kwargs
):
self.function_name = function_name
self.qualifier = qualifier
self.aws_credentials_secret = aws_credentials_secret
super().__init__(**kwargs)
@defaults_from_attrs("aws_credentials_secret")
def run(self, aws_credentials_secret: str = "AWS_CREDENTIALS"):
"""
Task run method. Deletes Lambda function.
Args:
- aws_credentials_secret (str, optional): the name of the Prefect Secret
that stores your AWS credentials; this Secret must be a JSON string
with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY`
Returns:
- dict: response from AWS DeleteFunction endpoint
"""
## get AWS credentials
aws_credentials = Secret(aws_credentials_secret).get()
aws_access_key = aws_credentials["ACCESS_KEY"]
aws_secret_access_key = aws_credentials["SECRET_ACCESS_KEY"]
lambda_client = boto3.client(
"lambda",
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key,
)
## delete function, depending on if qualifier provided
if len(self.qualifier) > 0:
response = lambda_client.delete_function(
FunctionName=self.function_name, Qualifier=self.qualifier
)
return response
response = lambda_client.delete_function(FunctionName=self.function_name)
return response
class LambdaInvoke(Task):
"""
Task to invoke a Lambda function.
Args:
- function_name (str): the name of the Lambda funciton to invoke
- invocation_type (str, optional): the invocation type of Lambda
function, default is RequestResponse other options include
Event and DryRun
- log_type (str, optional): set to 'Tail' to include the execution
log in the response
- client_context (dict, optional): data to pass to the function in the
context object, dict object will be transformed into base64 encoded
json automatically
- payload (bytes or seekable file-like object): the JSON provided to
Lambda function as input
- qualifier (str, optional): specify a version or alias to invoke a
published version of the function, defaults to $LATEST
- aws_credentials_secret (str, optional): the name of the Prefect Secret
that stores your AWS credentials; this Secret must be a JSON string
with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY`
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
function_name: str,
invocation_type: str = "RequestResponse",
log_type: str = "None",
client_context: dict = None,
payload: str = json.dumps(None),
qualifier: str = "$LATEST",
aws_credentials_secret: str = "AWS_CREDENTIALS",
**kwargs
):
self.function_name = function_name
self.invocation_type = invocation_type
self.log_type = log_type
## encode input dictionary as base64 json
self.client_context = self._encode_lambda_context(**(client_context or {}))
self.payload = payload
self.qualifier = qualifier
self.aws_credentials_secret = aws_credentials_secret
super().__init__(**kwargs)
def _encode_lambda_context(self, custom=None, env=None, client=None):
"""
Utility function for encoding Lambda context
Args:
- custom (dict, optional): key-value pairs to pass to custom context
- env (dict, optional): key-value pairs to pass to environment context
- client (dict, optional): key-value pairs to pass to client context
Returns:
- json: base64 encoded json object
"""
client_context = dict(custom=custom, env=env, client=client)
json_context = json.dumps(client_context).encode("utf-8")
return b64encode(json_context).decode("utf-8")
@defaults_from_attrs("function_name", "payload", "aws_credentials_secret")
def run(
self,
function_name: str = None,
payload: str = None,
aws_credentials_secret: str = "AWS_CREDENTIALS",
):
"""
Task run method. Invokes Lambda function.
Args:
- function_name (str): the name of the Lambda funciton to invoke
- payload (bytes or seekable file-like object): the JSON provided to
Lambda function as input
- aws_credentials_secret (str, optional): the name of the Prefect Secret
that stores your AWS credentials; this Secret must be a JSON string
with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY`
Returns:
- dict : response from AWS Invoke endpoint
"""
## get AWS credentials
aws_credentials = Secret(aws_credentials_secret).get()
aws_access_key = aws_credentials["ACCESS_KEY"]
aws_secret_access_key = aws_credentials["SECRET_ACCESS_KEY"]
lambda_client = boto3.client(
"lambda",
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key,
)
## invoke lambda function
response = lambda_client.invoke(
FunctionName=function_name,
InvocationType=self.invocation_type,
LogType=self.log_type,
ClientContext=self.client_context,
Payload=payload,
Qualifier=self.qualifier,
)
return response
class LambdaList(Task):
"""
Task to list Lambda functions.
Args:
- master_region (str, optional): for Lambda@Edge functions, the AWS
region of the master function
- function_version (str, optional): the version of a function,
default is 'ALL'
- marker (str, optional): specify the pagination token that's returned
by a previous request to retreive the next page of results
- max_items (int, optional): specify a value between 1 and 50 to limit
the number of functions in the response
- aws_credentials_secret (str, optional): the name of the Prefect Secret
that stores your AWS credentials; this Secret must be a JSON string
with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY`
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
master_region: str = "ALL",
function_version: str = "ALL",
marker: str = None,
max_items: int = 50,
aws_credentials_secret: str = "AWS_CREDENTIALS",
**kwargs
):
self.master_region = master_region
self.function_version = function_version
self.marker = marker
self.max_items = max_items
self.aws_credentials_secret = aws_credentials_secret
super().__init__(**kwargs)
@defaults_from_attrs("aws_credentials_secret")
def run(self, aws_credentials_secret: str = "AWS_CREDENTIALS"):
"""
Task fun method. Lists all Lambda functions.
Args:
- aws_credentials_secret (str, optional): the name of the Prefect Secret
that stores your AWS credentials; this Secret must be a JSON string
with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY`
Returns:
- dict : a list of Lambda functions from AWS ListFunctions endpoint
"""
## get AWS credentials
aws_credentials = Secret(aws_credentials_secret).get()
aws_access_key = aws_credentials["ACCESS_KEY"]
aws_secret_access_key = aws_credentials["SECRET_ACCESS_KEY"]
lambda_client = boto3.client(
"lambda",
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key,
)
## list functions, optionally passing in marker if not None
if self.marker:
response = lambda_client.list_functions(
MasterRegion=self.master_region,
FunctionVersion=self.function_version,
Marker=self.marker,
MaxItems=self.max_items,
)
return response
response = lambda_client.list_functions(
MasterRegion=self.master_region,
FunctionVersion=self.function_version,
MaxItems=self.max_items,
)
return response
|
<reponame>LaudateCorpus1/PACE
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This program parses JMH benchmarks and graphs them.
"""
from collections import namedtuple
import json
import locale
from os import mkdir, path
from cycler import cycler
from enum import Enum
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
# Make sure the output directory exists
output_directory = path.join('./performance_graphs/')
if not path.exists(output_directory):
mkdir(output_directory)
# Named tuples and enum
Benchmark = namedtuple('Benchmark', ['filename', 'config_files'])
MeasurementPlot = namedtuple('MeasurementPlot', ['benchmark_name', 'configs', 'params', 'plot_options'])
RunParams = namedtuple('RunParams', ['rows', 'columns', 'keySize', 'valueSize'])
RunKey = namedtuple('RunKey', ['config_file', 'run_params'])
RunValue = namedtuple('RunValue', ['score', 'error'])
class GraphType(Enum):
total_time = 1
time_per_op = 2
ops_per_time = 3
# Available benchmarks
benchmarks = {
'Encryption (Read)': Benchmark('jmh-encryption-read.json', {
'Accumulo': '',
'Field-Level': 'encryption/encrypt-baseline.ini',
'CEABAC': 'encryption/encrypt-value.ini',
'CEABAC-Entry': 'encryption/encrypt-entry.ini',
'Searchable': 'encryption/searchable.ini',
}),
'Encryption (Write)': Benchmark('jmh-encryption-write.json', {
'Accumulo': '',
'Field-Level': 'encryption/encrypt-baseline.ini',
'CEABAC': 'encryption/encrypt-value.ini',
'CEABAC-Entry': 'encryption/encrypt-entry.ini',
'Searchable': 'encryption/searchable.ini',
}),
'Signature (Read)': Benchmark('jmh-signature-read.json', {
'Accumulo': '',
'Value': 'signature/read/value.ini',
'Visibility': 'signature/read/column.ini',
'Table': 'signature/read/table.ini',
'RSA-PKCS1': 'signature/rsa-pkcs1.ini',
'RSA-PSS': 'signature/read/value.ini',
'DSA': 'signature/dsa.ini',
'ECDSA': 'signature/ecdsa.ini'
}),
'Signature (Write)': Benchmark('jmh-signature-write.json', {
'Accumulo': '',
'Value': 'signature/write/value.ini',
'Visibility': 'signature/write/column.ini',
'Table': 'signature/write/table.ini',
'RSA-PKCS1': 'signature/rsa-pkcs1.ini',
'RSA-PSS': 'signature/rsa-pss.ini',
'DSA': 'signature/dsa.ini',
'ECDSA': 'signature/write/value.ini'
})
}
# Options list and various configurations
default_options = {
'title': None, # graph's (optional) title
'x_label': '', # graph's X label
'x_tick_labels': '', # graph's X tick labels
'y_label': 'Operations / sec', # graph's Y label
'confidence_interval': .95, # confidence interval to calculate
'mode': GraphType.ops_per_time, # mode to graph
'scale': 1000.0, # multiply each data point by scale
'figure_size': [8.0, 3.0], # size of the figure area in inches
'number_format': '%.0f', # number format for data point labels
'number_rotation': 75, # rotation of numbers
'number_y_offset': 0, # offset of the y value to the bars
'legend': True, # whether to display a legend
'legend_location': 'best', # legend location
'legend_bounding_box': None, # legend bounding box
'legend_ncol': 2, # number of columns in the legend
'filename': None # name of the file to save
}
configs_encryption = ['Accumulo', 'Field-Level', 'CEABAC', 'CEABAC-Entry', 'Searchable']
configs_signature = ['Accumulo', 'Value', 'Visibility', 'Table']
configs_signature_modes = ['RSA-PKCS1', 'RSA-PSS', 'DSA', 'ECDSA']
params_set_data_size = [
RunParams(rows='1000', columns='10', keySize='10', valueSize='10'),
RunParams(rows='1000', columns='10', keySize='100', valueSize='10'),
RunParams(rows='1000', columns='10', keySize='10', valueSize='1000'),
RunParams(rows='1000', columns='10', keySize='100', valueSize='1000')
]
params_set_operations = [
RunParams(rows='10', columns='1', keySize='100', valueSize='1000'),
RunParams(rows='100', columns='1', keySize='100', valueSize='1000'),
RunParams(rows='1000', columns='1', keySize='100', valueSize='1000'),
RunParams(rows='1000', columns='10', keySize='100', valueSize='1000')
]
x_label_operations = 'Batch Size'
x_label_data_size = 'Data Size'
x_tick_labels_operations = ('10', '100', '1,000', '10,000')
x_tick_labels_data_size = ('key=10 bytes,\nvalue=10 bytes',
'key=10 bytes,\nvalue=1,000 bytes',
'key=100 bytes,\nvalue=10 bytes',
'key=100 bytes,\nvalue=1,000 bytes')
figure_size_normal = [8.0, 3.0]
figure_size_wide = [16.0, 3.0]
# Load runs
def load_runs(benchmark_name, configs, params, options):
"""Function to load a benchmark."""
benchmark = benchmarks[benchmark_name]
with open(benchmark.filename) as file_handle:
contents = json.load(file_handle)
included_config_files = [benchmark.config_files[name] for name in configs]
runs = dict()
for run in contents:
config_file = run['params']['configFile']
run_params = RunParams(
run['params']['rowCount'],
run['params']['columnCount'],
run['params']['keyFieldSize'],
run['params']['valueFieldSize'])
# Only save runs to be plotted
if not config_file in included_config_files:
continue
if not run_params in params:
continue
# Generate the metric appropriately
ops = float(run_params.rows) * float(run_params.columns)
data = np.array([score for scores in run['primaryMetric']['rawData'] for score in scores])
mode = options['mode']
if mode is GraphType.total_time:
pass
elif mode is GraphType.time_per_op:
data = data / ops
elif mode is GraphType.ops_per_time:
data = ops / data
else:
raise 'invalid graph type'
data = data * options['scale']
mean = np.mean(data)
standard_error = stats.sem(data)
interval = standard_error * stats.t.ppf((1 + options['confidence_interval']) / 2, len(data) - 1)
runs[RunKey(config_file, run_params)] = RunValue(mean, interval)
return runs
# Create graphs
plt.style.use('ggplot')
locale.setlocale(locale.LC_ALL, 'en_US')
colors = []
for item in plt.rcParams['axes.prop_cycle']:
colors.append(item['color'])
colors.pop(3)
colors.append('purple')
colors.append('orange')
plt.rc('axes', prop_cycle=cycler('color', colors))
def plot_measurements(benchmark_name, configs, params, plot_options, show_figure=False):
""" Plot measurements. """
options = default_options.copy()
options.update(plot_options)
runs = load_runs(benchmark_name, configs, params, options)
# Create the bar chart
fig, ax = plt.subplots(figsize=options['figure_size'])
extra_artists = []
bar_locs = np.arange(len(params)) * (len(configs) + 1) + 1
bar_graphs = []
for index, config in enumerate(configs):
config_file = benchmarks[benchmark_name].config_files[config]
means = []
stds = []
for run_params in params:
run = runs[RunKey(config_file, run_params)]
means.append(run.score)
stds.append(run.error)
bar_graph = ax.bar(bar_locs + index,
means,
color=colors[index % len(colors)],
yerr=stds,
error_kw=dict(ecolor='black', lw=.5, capsize=4, capthick=.5))
bar_graphs.append(bar_graph)
# Add some text for labels, title and axes ticks
if options['title'] is not None:
extra_artists.append(ax.set_title(options['title']))
ax.set_xlabel(options['x_label'])
ax.set_xticks(bar_locs + len(configs) / 2.)
ax.set_xticklabels(options['x_tick_labels'])
ax.set_ylabel(options['y_label'])
if options['legend']:
extra_artists.append(ax.legend(
tuple([bar_graph[0] for bar_graph in bar_graphs]),
tuple(configs),
loc=options['legend_location'],
bbox_to_anchor=options['legend_bounding_box'],
ncol=options['legend_ncol']))
for bar_graph in bar_graphs:
for bar in bar_graph:
height = bar.get_height()
extra_artists.append(ax.text(
bar.get_x() + bar.get_width() * 2./3.,
height + options['number_y_offset'],
locale.format(options['number_format'], height, grouping=True),
ha='center',
va='bottom',
rotation=options['number_rotation']))
# Show the plot
if show_figure:
plt.show()
fig.savefig(path.join(output_directory, options['filename']),
bbox_extra_artists=tuple(extra_artists),
bbox_inches='tight')
def plot_legend(configs, filename, ncol=None):
"""Draw a legend by itself."""
fig = plt.figure(figsize=(0.1, 0.1))
patches = [mpatches.Patch(color=color, label=config) for config, color in zip(configs, colors)]
legend = fig.legend(patches, configs, loc='center', ncol=ncol if ncol is not None else len(configs))
fig.savefig(filename, bbox_extra_artists=(legend,), bbox_inches='tight')
# Plot legends.
# List all measurement plots and create them
plots = [
MeasurementPlot('Encryption (Write)', configs_encryption, params_set_data_size, {
'x_label': x_label_data_size,
'x_tick_labels': x_tick_labels_data_size,
'number_y_offset': 2500,
'filename': 'encryption_write_data_size.pdf'
}),
MeasurementPlot('Encryption (Read)', configs_encryption, params_set_data_size, {
'x_label': x_label_data_size,
'x_tick_labels': x_tick_labels_data_size,
'number_y_offset': 1000,
'filename': 'encryption_read_data_size.pdf'
}),
MeasurementPlot('Encryption (Write)', configs_encryption, params_set_operations, {
'x_label': x_label_operations,
'x_tick_labels': x_tick_labels_operations,
'number_y_offset': 250,
'filename': 'encryption_write_operations.pdf'
}),
MeasurementPlot('Encryption (Read)', configs_encryption, params_set_operations, {
'x_label': x_label_operations,
'x_tick_labels': x_tick_labels_operations,
'number_y_offset': 250,
'filename': 'encryption_read_operations.pdf'
}),
MeasurementPlot('Signature (Write)', configs_signature, params_set_data_size, {
'x_label': x_label_data_size,
'x_tick_labels': x_tick_labels_data_size,
'number_y_offset': 2500,
'filename': 'signature_write_data_size.pdf'
}),
MeasurementPlot('Signature (Read)', configs_signature, params_set_data_size, {
'x_label': x_label_data_size,
'x_tick_labels': x_tick_labels_data_size,
'number_y_offset': 1250,
'filename': 'signature_read_data_size.pdf'
}),
MeasurementPlot('Signature (Write)', configs_signature, params_set_operations, {
'x_label': x_label_operations,
'x_tick_labels': x_tick_labels_operations,
'number_y_offset': 125,
'filename': 'signature_write_operations.pdf'
}),
MeasurementPlot('Signature (Read)', configs_signature, params_set_operations, {
'x_label': x_label_operations,
'x_tick_labels': x_tick_labels_operations,
'number_y_offset': 125,
'filename': 'signature_read_operations.pdf'
}),
MeasurementPlot('Signature (Write)', configs_signature_modes, params_set_data_size, {
'x_label': x_label_data_size,
'x_tick_labels': x_tick_labels_data_size,
'number_y_offset': 250,
'filename': 'signature_write_data_size_modes.pdf'
}),
MeasurementPlot('Signature (Read)', configs_signature_modes, params_set_data_size, {
'x_label': x_label_data_size,
'x_tick_labels': x_tick_labels_data_size,
'number_y_offset': 250,
'filename': 'signature_read_data_size_modes.pdf'
}),
MeasurementPlot('Signature (Write)', configs_signature_modes, params_set_operations, {
'x_label': x_label_operations,
'x_tick_labels': x_tick_labels_operations,
'number_y_offset': 125,
'filename': 'signature_write_operations_modes.pdf'
}),
MeasurementPlot('Signature (Read)', configs_signature_modes, params_set_operations, {
'x_label': x_label_operations,
'x_tick_labels': x_tick_labels_operations,
'number_y_offset': 125,
'filename': 'signature_read_operations_modes.pdf'
})
]
for plot in plots:
plot_measurements(plot.benchmark_name, plot.configs, plot.params, plot.plot_options)
|
<gh_stars>0
from tests.integration.star_wars import star_wars_test_urls, \
STAR_WARS_TRIVIA_PART_2_DEFAULT_ANSWERS, \
STAR_WARS_TRIVIA_PART_3_DEFAULT_ANSWERS, \
STAR_WARS_TRIVIA_PART_1_DEFAULT_ANSWERS
from tests.integration.star_wars.star_wars_tests import StarWarsTestCase
class TestNavigation(StarWarsTestCase):
def test_light_side_path(self):
self.launchSurvey()
self.start_questionnaire_and_navigate_routing()
# navigate back to the introduction
self.get(star_wars_test_urls.STAR_WARS_INTRODUCTION)
# navigate to the first page
self.get(star_wars_test_urls.STAR_WARS_TRIVIA_PART_1)
# Form submission with no errors
self.post(STAR_WARS_TRIVIA_PART_1_DEFAULT_ANSWERS)
# Check the second page (we're on it already)
self.assertInUrl('star-wars-trivia-part-2')
self.check_second_quiz_page()
# now navigate back to the first page
self.get(star_wars_test_urls.STAR_WARS_TRIVIA_PART_1)
self._check_quiz_first_page()
# now go back to the second page
self.get(star_wars_test_urls.STAR_WARS_TRIVIA_PART_2)
self.check_second_quiz_page()
# And continue
self.assertInUrl('star-wars-trivia-part-2')
self.post(STAR_WARS_TRIVIA_PART_2_DEFAULT_ANSWERS)
# third page
self.assertInUrl('star-wars-trivia-part-3')
self.assertInPage('Finally, which is your favourite film?')
self.post(STAR_WARS_TRIVIA_PART_3_DEFAULT_ANSWERS)
# There are no validation errors
self.assertRegexUrl(star_wars_test_urls.STAR_WARS_SUMMARY_REGEX)
# We are on the review answers page
self.assertInPage('>Star Wars</')
self.assertInPage('>Check your answers and submit<')
self.assertRegexPage('(?s)How old is Chewy?.*?234')
self.assertRegexPage('(?s)How many Octillions do Nasa reckon it would cost to build a death star?.*?£40')
self.assertRegexPage('(?s)How hot is a lightsaber in degrees C?.*?1,370')
self.assertRegexPage("(?s)What animal was used to create the engine sound of the Empire's TIE fighters?.*?Elephant") # NOQA
self.assertRegexPage('(?s)Which of these Darth Vader quotes is wrong?.*?Luke, I am your father')
self.assertRegexPage('(?s)Which 3 have wielded a green lightsaber?.*?Yoda') # NOQA
self.assertRegexPage('(?s)Which 3 appear in any of the opening crawlers?')
self.assertRegexPage("(?s)When was The Empire Strikes Back released?.*?<span class='date'>28 May 1983</span> "
"to <span class='date'>29 May 1983</span>") # NOQA
self.assertRegexPage('(?s)What was the total number of Ewoks?.*?')
self.assertRegexPage("(?s)Why doesn't Chewbacca receive a medal at the end of A New Hope?.*?"
'Wookiees don’t place value in material rewards and refused the medal initially') # NOQA
self.assertInPage('>You can check your answers below<')
self.assertInPage('>Submit answers<')
# Submit answers
self.post(action=None)
self.assertInUrl('thank-you')
def _check_quiz_first_page(self):
self.assertInPage('>Save and continue<')
self.assertInPage('Star Wars Quiz')
self.assertInPage('May the force be with you young EQ developer')
# Integer question
self.assertInPage('How old is Chewy?')
self.assertInPage('chewies-age-answer')
# Currency question
self.assertInPage('How many Octillions do Nasa reckon it would cost to build a death star?')
self.assertInPage('death-star-cost-answer')
# Radio box question
self.assertInPage("What animal was used to create the engine sound of the Empire's TIE fighters?") # NOQA
self.assertInPage('Lion')
self.assertInPage('Cow')
self.assertInPage('Elephant')
self.assertInPage('Hippo')
self.assertInPage('tie-fighter-sound-answer')
# Checkbox question
self.assertInPage('Which 3 have wielded a green lightsaber?')
self.assertInPage('Luke Skywalker')
self.assertInPage('Anakin Skywalker')
self.assertInPage('Obi-Wan Kenobi')
self.assertInPage('Yoda')
self.assertInPage('Rey')
self.assertInPage('Qui-<NAME>')
self.assertInPage('green-lightsaber-answer')
# Date Range question
self.assertInPage('When was The Empire Strikes Back released?')
self.assertInPage('Period from')
self.assertInPage('Period to')
self.assertInPage('Day')
self.assertInPage('Month')
self.assertInPage('Year')
self.assertInPage('empire-strikes-back-from-answer')
self.assertInPage('empire-strikes-back-to-answer')
# Pipe Test for question description
self.assertInPage("It could be between <span class='date'>1 April 2016</span> and "
"<span class='date'>30 April 2016</span>. But that might just be a test") # NOQA
|
# SPDX-License-Identifier: BSD-3-Clause
from platform import python_version
from typing import Iterator, List, cast
from softfab.FabPage import FabPage
from softfab.Page import PageProcessor
from softfab.packaging import dependencies, getDistribution
from softfab.projectlib import getBootTime
from softfab.timeview import formatTime
from softfab.userlib import User
from softfab.utils import parseVersion
from softfab.version import VERSION as softFabVersion
from softfab.webgui import Column, Table, docLink, maybeLink
from softfab.xmlgen import XMLContent, xhtml
class About_GET(FabPage[FabPage.Processor, FabPage.Arguments]):
icon = 'IconHome'
description = 'About'
def checkAccess(self, user: User) -> None:
pass
def presentContent(self, **kwargs: object) -> XMLContent:
yield xhtml.h2[ 'SoftFab ', softFabVersion ]
yield xhtml.h3[ 'Status' ]
yield StatusTable.instance.present(**kwargs)
yield xhtml.h3[ 'Installation' ]
yield xhtml.p[
'This Control Center runs on the following '
'open source software:'
]
yield InstallationTable.instance.present(**kwargs)
proc = cast(FabPage.Processor, kwargs['proc'])
yield xhtml.h3[ 'Web Browser' ]
yield BrowserTable.instance.present(**kwargs)
yield (
xhtml.p[ 'Raw user agent string:' ],
xhtml.pre(style = 'white-space: pre-wrap')[
proc.req.userAgent.rawUserAgent
]
)
yield xhtml.h3[ 'Documentation' ]
yield (
xhtml.p[
'The complete set of SoftFab documentation can be found '
'on the ', docLink('/')['documentation pages'],
'.'
]
)
class StatusTable(Table):
columns = None, None
def iterRows(self, **kwargs: object) -> Iterator[XMLContent]:
proc = cast(FabPage.Processor, kwargs['proc'])
yield 'Up since', (
formatTime(getBootTime())
)
dbVersion = proc.project.dbVersion
yield 'Database version', (
dbVersion
if parseVersion(dbVersion)[:2] == parseVersion(softFabVersion)[:2]
else xhtml.span(style = 'color: red')[
dbVersion + ' (database must be upgraded)'
]
)
class InstallationTable(Table):
packageColumn = Column('Package')
versionColumn = Column('Version', cellStyle='centeralign')
descriptionColumn = Column('Description')
def showVersions(self, **kwargs: object) -> bool:
proc = cast(FabPage.Processor, kwargs['proc'])
return proc.user.hasPrivilege('sysver')
def iterColumns(self, **kwargs: object) -> Iterator[Column]:
yield self.packageColumn
if self.showVersions(**kwargs):
yield self.versionColumn
yield self.descriptionColumn
def iterRows(self, **kwargs: object) -> Iterator[XMLContent]:
showVersions = self.showVersions(**kwargs)
names = list(dependencies('softfab'))
names.append('Python')
names.sort(key=str.casefold)
for name in names:
if name == 'Python':
version = python_version()
url = 'https://www.python.org/'
desc = "An interpreted, interactive, " \
"object-oriented programming language"
else:
dist = getDistribution(name)
if dist is None:
continue
version = dist.version
metadata = dist.metadata
url = metadata['Home-page']
desc = metadata['Summary'].rstrip('.')
row: List[XMLContent]
row = [ maybeLink(url)[name] ]
if showVersions:
row.append(version)
row.append(desc)
yield row
class BrowserTable(Table):
columns = None, None
def iterRows(self, **kwargs: object) -> Iterator[XMLContent]:
proc = cast(PageProcessor, kwargs['proc'])
userAgent = proc.req.userAgent
yield 'Browser:', userAgent.family or 'unknown'
versionTuple = userAgent.version
if versionTuple is None:
version = 'unknown'
else:
version = '.'.join(str(i) for i in versionTuple)
yield 'Version:', version
yield 'Operating system:', userAgent.operatingSystem or 'unknown'
yield 'Accepts XHTML:', 'yes' if userAgent.acceptsXHTML else 'no'
|
from alambi import db, app
from flask_login import UserMixin
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
import datetime
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return "User('{self.email}')"
tags = db.Table('tags',
db.Column('blog_id', db.Integer, db.ForeignKey('blog.blog_id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.tag_id'))
)
class Blog(db.Model):
blog_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(200), unique=False, nullable=False)
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
text = db.Column(db.Text)
like = db.Column(db.Integer, default=0)
sticky = db.Column(db.Boolean, default=False)
category = db.Column(db.String(200), default='No Category')
tags = db.relationship('Tag', secondary=tags,
backref=db.backref('post_tags', lazy='dynamic'))
comments = db.relationship("Comment", backref='post', cascade="all, delete-orphan")
comment_count = db.Column(db.Integer, default=0)
class Tag(db.Model):
tag_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200))
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
post_id = db.Column(db.Integer, db.ForeignKey('blog.blog_id'))
name = db.Column(db.String(200), nullable=False)
email = db.Column(db.String(120))
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
text = db.Column(db.Text)
class GeneralSettings(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
init = db.Column(db.Boolean, unique=False, default=False, nullable=True)
name = db.Column(db.String(500), unique=False, nullable=False)
author = db.Column(db.String(500), unique=False, nullable=False)
post_count = db.Column(db.Integer)
excerpt = db.Column(db.Boolean, unique=False, default=False, nullable=True)
comments = db.Column(db.Boolean, unique=False, default=False, nullable=True)
class SidebarSettings(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
main_position = db.Column(db.Integer, default=2, nullable=False)
post_position = db.Column(db.Integer, default=2, nullable=False)
show_blog_name = db.Column(db.Boolean, nullable=True)
show_logo = db.Column(db.Boolean, nullable=True)
text = db.Column(db.Text)
search = db.Column(db.Boolean, default=True, nullable=True)
recent_posts = db.Column(db.Boolean, default=True, nullable=True)
max_recent = db.Column(db.Integer, default=3, nullable=True)
popular_posts = db.Column(db.Boolean, default=True, nullable=True)
max_popular = db.Column(db.Integer, default=3, nullable=True)
category = db.Column(db.Boolean, default=True, nullable=True)
max_category = db.Column(db.Integer, default=5, nullable=True)
tag = db.Column(db.Boolean, default=True, nullable=True)
max_tag = db.Column(db.Integer, default=5, nullable=True)
class Theme(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
selected = db.Column(db.Boolean, nullable=False)
name = db.Column(db.String(500), unique=False, nullable=False)
bg_color = db.Column(db.String(10), unique=False, nullable=False)
text_color = db.Column(db.String(10), unique=False, nullable=False)
post_container_color = db.Column(db.String(10), unique=False, nullable=False)
blog_name_color = db.Column(db.String(10), unique=False, nullable=False)
header_color = db.Column(db.String(10), unique=False, nullable=False)
alt_header_color = db.Column(db.String(10), unique=False, nullable=False)
link_color = db.Column(db.String(10), unique=False, nullable=False)
like_color = db.Column(db.String(10), unique=False, nullable=False)
comment_color = db.Column(db.String(10), unique=False, nullable=False)
sticky_color = db.Column(db.String(10), unique=False, nullable=False)
main_font = db.Column(db.String(200), unique=False, nullable=False)
header_font = db.Column(db.String(200), unique=False, nullable=False)
if __name__ == '__main__':
manager.run() |
import numpy as np
import os.path as path
import datetime
from keras.layers import Dense, LSTM, Input
from keras.models import Model
from keras.callbacks import TensorBoard
from utils import print_sequence
import dataset as ds
from argparse import ArgumentParser
def run(args):
# Read the data from the corpus directory.
encoder_input_data,\
decoder_input_data,\
decoder_target_data,\
pos_to_int_dict = ds.read_data(args.corpus_dir)
# Determine the size of POS embeddings.
# Input data has the shape
# (num_phrases, max_phrase_length, embedding_size).
embedding_dim = encoder_input_data.shape[2]
# Start creating the model
# We define the encoder input saying that it will accept matrices of
# unknown number of rows and `embedding_dim` columns.
encoder_inputs = Input(shape=(None, embedding_dim))
# The encoder is an LSTM layer
# with the size of output space = `args.hidden_size`.
encoder = LSTM(args.hidden_size, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# The LSTM layer will output 2 state vectors which will
# be used to 'seed' the decoder.
encoder_states = [state_h, state_c]
# The decoder will also accept as input matrices of
# unknown number of rows and `embedding_dim` columns.
decoder_inputs = Input(shape=(None, embedding_dim))
decoder_lstm = LSTM(
args.hidden_size, return_sequences=True, return_state=True)
# As mentioned before, we pass the `encoder_states` as the
# initial 'seed' of the decoder.
decoder_outputs, _, _ = decoder_lstm(
decoder_inputs, initial_state=encoder_states)
# The output from decoder needs to be passed
# through an activation function that will determine
# what kind of token is the output token.
decoder_dense = Dense(embedding_dim, activation=args.activation)
decoder_outputs = decoder_dense(decoder_outputs)
# Setup TensorBoard visualization of model evolution.
# Define the log directory for current run.
current_time = datetime.datetime.now().strftime('%Y-%m-%d-%H%M')
run_config = "iter-{}-bs-{}-hs-{}-act-{}-{}".format(
args.num_iterations, args.batch_size, args.hidden_size,
args.activation, current_time)
logdir = path.join('./logs', run_config)
# Create the visualization callback.
tensorboardDisplay = TensorBoard(
log_dir=logdir,
histogram_freq=0,
write_graph=True,
write_images=True,
write_grads=True,
batch_size=16)
# Build and train the model.
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.summary()
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data],
decoder_target_data,
batch_size=args.batch_size,
epochs=args.num_iterations,
callbacks=[tensorboardDisplay])
model.save("{}.h5".format(run_config))
# Start sampling from the trained model.
# Create an encoder that will receive the input sequence and encode it.
encoder_model = Model(encoder_inputs, encoder_states)
# Create a decoder model that will receive the encoded input
# and `start-of-sequence` token.
decoder_state_input_h = Input(shape=(args.hidden_size, ))
decoder_state_input_c = Input(shape=(args.hidden_size, ))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# Feed a random sample to the encoder to see it in action.
# Get the sample index.
idx = np.random.randint(encoder_input_data.shape[0])
# Reshape the sample.
input_seq = encoder_input_data[idx]
input_seq = np.reshape(input_seq,
(1, input_seq.shape[0], input_seq.shape[1]))
print_sequence(input_seq, pos_to_int_dict)
print("-" * 10)
# Get the sample encoding.
states_value = encoder_model.predict(input_seq)
# Create the `start-of-sequence` token to be fed to decoder.
target_seq = np.zeros((1, 1, embedding_dim))
idx = pos_to_int_dict['<start>']
target_seq[0, 0, idx] = 1
# Create a container for output sequence.
output_seq = np.copy(target_seq)
# Feed the decoder the encoded sequence and sample new token at each
# step. At the next step feed the sampled token also.
stop = False
max_seq_len = encoder_input_data.shape[1]
while not stop:
output_tokens, h, c = decoder_model.predict([target_seq] +
states_value)
token_index = np.argmax(output_tokens[0, -1, :])
sampled_token = np.zeros((1, 1, embedding_dim))
sampled_token[0, 0, token_index] = 1
if token_index == pos_to_int_dict[
'<end>'] or output_seq.shape[1] >= max_seq_len:
stop = True
target_seq = np.zeros((1, 1, embedding_dim))
target_seq[0, 0, token_index] = 1
output_seq = np.concatenate((output_seq, sampled_token), axis=1)
states_value = [h, c]
print_sequence(output_seq, pos_to_int_dict)
def parse_arguments():
parser = ArgumentParser()
parser.add_argument(
'--corpus-dir',
help='The directory with training data.',
required=True)
parser.add_argument(
'--num-iterations',
help='Number of training iterations.',
required=False,
type=int,
default=100)
parser.add_argument(
'--hidden-size',
help='Number of hidden units.',
required=False,
type=int,
default=128)
parser.add_argument(
'--activation',
help='Name of the activation function for the decoder.',
required=False,
default='softmax',
choices=[
'softmax', 'elu', 'selu', 'relu', 'tanh', 'sigmoid', 'linear'
])
parser.add_argument(
'--batch-size',
help='The size of the training batch.',
required=False,
type=int,
default=16)
parser.add_argument(
'--tensorboard-log-dir',
help='Log directory for TensorBoard.',
required=False,
default='./logs/')
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
run(args)
|
<reponame>jelic98/raf_pp
from app.type import *
from app.token import Token
class Lexer():
def __init__(self, text):
self.text = text
self.pos = 0
def skip_whitespace(self):
while self.pos < len(self.text) and self.text[self.pos].isspace():
self.pos += 1
def parse_number(self):
number = ''
while(self.pos < len(self.text) and self.text[self.pos].isdigit()):
number += self.text[self.pos]
self.pos += 1
self.pos -= 1
return int(number)
def parse_string(self):
string = ''
self.pos += 1
while(self.pos < len(self.text) and self.text[self.pos] != '"'):
string += self.text[self.pos]
self.pos += 1
return string
def parse_keyword(self):
string = ''
while(self.pos < len(self.text)
and (self.text[self.pos].isalpha()
or self.text[self.pos].isdigit()
or self.text[self.pos] == '_'
or self.text[self.pos] == '#'
or self.text[self.pos] == '~')):
string += self.text[self.pos]
self.pos += 1
self.pos -= 1
if string == '#program':
return Token(PROGRAM_POCETAK, string)
elif string == '##program':
return Token(PROGRAM_KRAJ, string)
elif string == '#postojanje':
return Token(POSTOJANJE_POCETAK, string)
elif string == '##postojanje':
return Token(POSTOJANJE_KRAJ, string)
elif string == '#dodela':
return Token(DODELA_POCETAK, string)
elif string == '##dodela':
return Token(DODELA_KRAJ, string)
elif string == '#polje':
return Token(POLJE_POCETAK, string)
elif string == '##polje':
return Token(POLJE_KRAJ, string)
elif string == '#naredba':
return Token(NAREDBA_POCETAK, string)
elif string == '##naredba':
return Token(NAREDBA_KRAJ, string)
elif string == '#celina':
return Token(CELINA_POCETAK, string)
elif string == '##celina':
return Token(CELINA_KRAJ, string)
elif string == '#rutina':
return Token(RUTINA_POCETAK, string)
elif string == '##rutina':
return Token(RUTINA_KRAJ, string)
elif string == '#~rutina':
return Token(RUTINA_POZIV_POCETAK, string)
elif string == '##~rutina':
return Token(RUTINA_POZIV_KRAJ, string)
elif string == '#~ugradjena_rutina':
return Token(UGRADJENA_RUTINA_POZIV_POCETAK, string)
elif string == '##~ugradjena_rutina':
return Token(UGRADJENA_RUTINA_POZIV_KRAJ, string)
elif string == '#vrati':
return Token(VRATI_POCETAK, string)
elif string == '##vrati':
return Token(VRATI_KRAJ, string)
elif string == '#prekini_ponavljanje':
return Token(PREKINI_PONAVLJANJE, string)
elif string == 'uslov':
return Token(NAREDBA_USLOV, string)
elif string == 'ponavljanje':
return Token(NAREDBA_PONAVLJANJE, string)
elif string == 'pitanje':
return Token(CELINA_PITANJE, string)
elif string == 'da':
return Token(CELINA_DA, string)
elif string == 'ne':
return Token(CELINA_NE, string)
elif string == 'ponovi':
return Token(CELINA_PONOVI, string)
elif string == 'polja':
return Token(CELINA_POLJA, string)
elif string == 'sadrzaj_rutine':
return Token(CELINA_SADRZAJ_RUTINE, string)
elif string == '~ceo_broj' or string == '~struna' or string == '~jeste_nije':
return Token(TIP_PODATKA, string)
elif string == 'jeste' or string == 'nije':
return Token(JESTE_NIJE, string)
return Token(NAZIV, string)
def advance(self):
self.pos += 1
if self.pos == len(self.text):
self.error("");
self.current_char = self.text[self.pos];
def get_next_token(self):
self.skip_whitespace()
if self.pos >= len(self.text):
return Token(EOF, None)
current_char = self.text[self.pos]
token = None
if current_char.isdigit():
token = Token(CEO_BROJ, self.parse_number())
elif self.text[self.pos].isalpha() or self.text[self.pos] == '#' or self.text[self.pos] == '~':
token = self.parse_keyword()
elif current_char == '"':
token = Token(STRUNA, self.parse_string())
elif current_char == '+':
token = Token(PLUS, '+')
elif current_char == '-':
token = Token(MINUS, '-')
elif current_char == '*':
token = Token(MNOZENJE, '*')
elif current_char == '/':
token = Token(DELJENJE, '/')
elif current_char == '%':
token = Token(OSTATAK, '%')
elif current_char == '(':
token = Token(ZAGRADA_OTVORENA, '(')
elif current_char == ')':
token = Token(ZAGRADA_ZATVORENA, ')')
elif current_char == '<':
token = Token(MANJE, '<')
elif current_char == '>':
token = Token(VECE, '>')
elif current_char == '<=':
token = Token(MANJE_JEDNAKO, '<')
elif current_char == '>=':
token = Token(VECE_JEDNAKO, '>')
elif current_char == '=':
token = Token(JEDNAKO, '=')
elif current_char == ':':
token = Token(COLON, ':')
elif current_char == ',':
token = Token(COMMA, ',')
elif current_char == '<':
token = Token(MANJE, '<')
elif current_char == '>':
token = Token(VECE, '>')
elif current_char == '<=':
token = Token(MANJE_JEDNAKO, '<')
elif current_char == '>=':
token = Token(VECE_JEDNAKO, '>')
elif current_char == '=':
token = Token(JEDNAKO, '=')
elif current_char == ':':
token = Token(COLON, ':')
elif current_char == ',':
token = Token(COMMA, ',')
elif current_char == '!':
self.advance()
if self.current_char == '=':
token = Token(NIJE_JEDNAKO, '!=')
else:
token = Token(LOGICKO_NE, '!')
self.pos -= 1
elif current_char == '&':
self.advance()
if self.current_char == '&':
token = Token(LOGICKO_I, '&&')
elif current_char == '|':
self.advance()
if self.current_char == '|':
token = Token(LOGICKO_ILI, '||')
else:
token = Token(PIPE, '|')
self.pos -= 1
else:
self.error(current_char)
self.pos += 1
return token
def error(self, current_char):
raise Exception("Unexpected character {}".format(current_char))
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import subprocess
import signal
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
import pydcgm
import dcgm_fields
import dcgm_structs
import threading
from DcgmReader import DcgmReader
if 'DCGM_TESTING_FRAMEWORK' in os.environ:
try:
import collectd_tester_api as collectd
except:
import collectd
else:
import collectd
# Set default values for the hostname and the library path
g_dcgmLibPath = '/usr/lib'
g_dcgmHostName = 'localhost'
# Add overriding through the environment instead of through the
if 'DCGM_HOSTNAME' in os.environ:
g_dcgmHostName = os.environ['DCGM_HOSTNAME']
if 'DCGMLIBPATH' in os.environ:
g_dcgmLibPath = os.environ['DCGMLIBPATH']
g_intervalSec = 10
g_dcgmIgnoreFields = [dcgm_fields.DCGM_FI_DEV_UUID] #Fields not to publish
g_publishFieldIds = [
dcgm_fields.DCGM_FI_DEV_UUID, #Needed for plugin instance
dcgm_fields.DCGM_FI_DEV_POWER_USAGE,
dcgm_fields.DCGM_FI_DEV_GPU_TEMP,
dcgm_fields.DCGM_FI_DEV_SM_CLOCK,
dcgm_fields.DCGM_FI_DEV_GPU_UTIL,
dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING,
dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_FREE,
dcgm_fields.DCGM_FI_DEV_FB_USED,
dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION,
dcgm_fields.DCGM_FI_DEV_XID_ERRORS,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_MEM_CLOCK,
dcgm_fields.DCGM_FI_DEV_MEMORY_TEMP,
dcgm_fields.DCGM_FI_DEV_TOTAL_ENERGY_CONSUMPTION,
dcgm_fields.DCGM_FI_DEV_MEM_COPY_UTIL,
dcgm_fields.DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_BANDWIDTH_TOTAL,
dcgm_fields.DCGM_FI_DEV_PCIE_TX_THROUGHPUT,
dcgm_fields.DCGM_FI_DEV_PCIE_RX_THROUGHPUT
]
class DcgmCollectdPlugin(DcgmReader):
###########################################################################
def __init__(self):
collectd.debug('Initializing DCGM with interval={}s'.format(g_intervalSec))
DcgmReader.__init__(self, fieldIds=g_publishFieldIds, ignoreList=g_dcgmIgnoreFields, fieldGroupName='collectd_plugin', updateFrequency=g_intervalSec*1000000)
###########################################################################
def CustomDataHandler(self, fvs):
value = collectd.Values(type='gauge') # pylint: disable=no-member
value.plugin = 'dcgm_collectd'
for gpuId in fvs.keys():
gpuFv = fvs[gpuId]
uuid = self.m_gpuIdToUUId[gpuId]
value.plugin_instance = '%s' % (uuid)
typeInstance = str(gpuId)
for fieldId in gpuFv.keys():
# Skip ignore list
if fieldId in self.m_dcgmIgnoreFields:
continue
fieldTag = self.m_fieldIdToInfo[fieldId].tag
val = gpuFv[fieldId][-1]
#Skip blank values. Otherwise, we'd have to insert a placeholder blank value based on the fieldId
if val.isBlank:
continue
valTimeSec1970 = (val.ts / 1000000) #Round down to 1-second for now
valueArray = [val.value, ]
value.dispatch(type=fieldTag, type_instance=typeInstance, time=valTimeSec1970, values=valueArray, plugin=value.plugin)
collectd.debug("gpuId %d, tag %s, value %s" % (gpuId, fieldTag, str(val.value))) # pylint: disable=no-member
###########################################################################
def LogInfo(self, msg):
collectd.info(msg) # pylint: disable=no-member
###########################################################################
def LogError(self, msg):
collectd.error(msg) # pylint: disable=no-member
###############################################################################
##### Wrapper the Class methods for collectd callbacks
###############################################################################
def config_dcgm(config):
global g_intervalSec
for node in config.children:
if node.key == 'Interval':
g_intervalSec = int(node.values[0])
###############################################################################
def init_dcgm():
global g_dcgmCollectd
# restore default SIGCHLD behavior to avoid exceptions with new processes
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
g_dcgmCollectd = DcgmCollectdPlugin()
g_dcgmCollectd.Init()
###############################################################################
def shutdown_dcgm():
g_dcgmCollectd.Shutdown()
###############################################################################
def read_dcgm(data=None):
g_dcgmCollectd.Process()
def register_collectd_callbacks():
collectd.register_config(config_dcgm) # pylint: disable=no-member
collectd.register_init(init_dcgm) # pylint: disable=no-member
collectd.register_read(read_dcgm) # pylint: disable=no-member
collectd.register_shutdown(shutdown_dcgm) # pylint: disable=no-member
###############################################################################
##### Main
###############################################################################
register_collectd_callbacks()
|
<reponame>AllenInstitute/em_stitch<gh_stars>1-10
#!/usr/bin/env python
''' Converts metafiles to p->q collections.
<NAME> 2019.02.15
'''
import argparse
import glob
import json
import os
import sys
from enum import IntEnum
# Position codes in metafile
class Edge(IntEnum):
INVALID = 0
CENTER = 1 # unused
LEFT = 2
TOP = 3
RIGHT = 4
class MetaToCollection(object):
''' Converts a raw TEMCA metafile into a collection
json file which the render stack can consume.
'''
def tile_from_raster_pos(self, args, col, row, direction=None):
''' returns a neighboring tile given a col, row.
direction is either
None (return this tile), LEFT, RIGHT, or TOP
If the tile has no neighbor in the given direction,
None is returned
'''
if direction is None:
return args.raster_pos_lookup[str(col) + "_" + str(row)]
elif direction == Edge.LEFT:
if col > 0:
try:
return args.raster_pos_lookup[
str(col - 1) + "_" + str(row)]
except:
return None
else:
return None
elif direction == Edge.RIGHT:
if col < args.tcols:
try:
return args.raster_pos_lookup[
str(col + 1) + "_" + str(row)]
except:
return None
else:
return None
elif direction == Edge.TOP:
if row > 0:
try:
return args.raster_pos_lookup[
str(col) + "_" + str(row - 1)]
except:
return None
else:
return None
def tile_from_tile(self, args, tile, direction=None):
''' returns a neighboring tile given a tile.
direction is either None (return this tile), LEFT, RIGHT, or TOP
'''
col, row = tile['img_meta']['raster_pos']
return self.tile_from_raster_pos(args, col, row, direction)
def create_raster_pos_dict(self, args):
''' create the look up dictionary for raster pos to nodes '''
args.raster_pos_lookup = {}
for tile in args.data:
rp = tile['img_meta']['raster_pos']
col, row = rp
args.raster_pos_lookup[str(col) + "_" + str(row)] = tile
def get_meta_and_montage_files(self, rootdir):
'''get the names of the meta and montage files'''
for name in glob.glob(os.path.join(rootdir, r"_meta*.*")):
meta = name
montage = None
for name in glob.glob(os.path.join(rootdir, r"_montage*.*")):
montage = name
return (meta, montage)
def process(self, args):
''' the main thing.'''
''' read in the metadata file and extract relevant info'''
rootdir = args.directory
try:
args.meta_file, args.montage_file = \
self.get_meta_and_montage_files(rootdir)
with open(args.meta_file) as data_file:
json_data = json.load(data_file)
except:
raise Exception("Cannot find or parse metafile in: " +
args.directory)
metadata = args.metadata = json_data[0]['metadata']
data = args.data = json_data[1]['data']
temca_id = metadata["temca_id"]
session_id = metadata["session_id"]
grid = metadata["grid"]
specimen_id = metadata["specimen_id"]
if "tape_id" in metadata:
tape_id = metadata["tape_id"]
else:
tape_id = None
gid = (
str(specimen_id) + '_' +
str(temca_id) + '_' +
str(tape_id) + '_' +
str(session_id) + '_' +
str(grid))
qGroupId = pGroupId = gid
# total number of rows and cols
args.trows = max([tile['img_meta']['raster_pos'][1] for tile in data])
args.tcols = max([tile['img_meta']['raster_pos'][0] for tile in data])
print('rows: ', args.trows, ', cols: ', args.tcols)
# create a dictionary to look up neighboring tiles
self.create_raster_pos_dict(args)
samples = []
tilespecs = []
# for all tiles
for index, tile in enumerate(data):
# tile == 'q' tile, where the template search is taking place
qId = tile["img_path"]
# hmm, munge the filenames?
qId = qId.replace(".tif", "")
tilespec = {
'tileId': qId,
'xstage': float(tile["img_meta"]['stage_pos'][0]),
'ystage': float(tile["img_meta"]['stage_pos'][1])
}
tilespecs.append(tilespec)
p = [[], []]
q = [[], []]
w = []
if 'matcher' in tile:
for match in tile['matcher']:
position = match['position']
match_quality = match['match_quality']
if match_quality == -1:
# -1 is a flag indicating no matches
# are possible for this tile edge
continue
neighbor = self.tile_from_tile(args, tile, position)
# neighbor == 'p' tile, which
# contains the original template
if neighbor:
p = [match["pX"], match["pY"]]
q = [match["qX"], match["qY"]]
w = [1] * len(match["pX"])
# hmm, munge the filenames?
pId = neighbor["img_path"]
pId = pId.replace(".tif", "")
samples.append({
'pId': pId,
'qId': qId,
'pGroupId': pGroupId,
'qGroupId': qGroupId,
'matches': {
'p': p,
'q': q,
'w': w,
'match_count': len(w),
}
})
#with open(args.output_file, 'w') as f:
# json.dump(samples, f, indent=2)
return samples
def main(args):
parent_parser = argparse.ArgumentParser(
description='Converts raw TEMCA metadata files to render collections')
parent_parser.add_argument(
'directory',
help='the directory to process.',
metavar="",
nargs='?',
default="/allen/programs/celltypes/workgroups/em-connectomics/danielk/lcdata/lens_correction16/000000/0")
parent_parser.add_argument(
'-o',
'--output_file',
type=str,
default="test.json",
metavar="",
help='name of the json output file')
args = parent_parser.parse_args(args)
m2c = MetaToCollection()
return m2c.process(args)
if __name__ == "__main__":
main(sys.argv[1:])
|
import codecs
import json
import os
import random
import asyncio
import re
from cloudbot import hook
from cloudbot.util import textgen
nick_re = re.compile("^[A-Za-z0-9_|@|<|>|.\-\]\[\{\}]*$", re.I)
def is_valid(target):
""" Checks if a string is a valid IRC nick. """
if nick_re.match(target):
return True
else:
return False
def is_self(conn, target):
""" Checks if a string is "****self" or contains conn.name. """
if re.search("(^..?.?.?self|{})".format(re.escape(conn.nick)), target, re.I):
return True
else:
return False
@hook.on_start()
def load_attacks(bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
global larts, flirts, kills, slaps, north_korea, insults, strax, compliments, presents
with codecs.open(os.path.join(bot.data_dir, "larts.txt"), encoding="utf-8") as f:
larts = [line.strip() for line in f.readlines() if not line.startswith("//")]
with codecs.open(os.path.join(bot.data_dir, "flirts.txt"), encoding="utf-8") as f:
flirts = [line.strip() for line in f.readlines() if not line.startswith("//")]
with codecs.open(os.path.join(bot.data_dir, "insults.txt"), encoding="utf-8") as f:
insults = [line.strip() for line in f.readlines() if not line.startswith("//")]
with codecs.open(os.path.join(bot.data_dir, "kills.json"), encoding="utf-8") as f:
kills = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "slaps.json"), encoding="utf-8") as f:
slaps = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "strax.json"), encoding="utf-8") as f:
strax = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "compliments.json"), encoding="utf-8") as f:
compliments = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "north_korea.txt"), encoding="utf-8") as f:
north_korea = [line.strip() for line in f.readlines() if not line.startswith("//")]
with codecs.open(os.path.join(bot.data_dir, "presents.json"), encoding="utf-8") as f:
presents = json.load(f)
@asyncio.coroutine
@hook.command
def lart(text, conn, nick, action):
"""<user> - LARTs <user>"""
target = text.strip()
if not is_valid(target):
return "I can't lart that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
phrase = random.choice(larts)
# act out the message
action(phrase.format(user=target))
@asyncio.coroutine
@hook.command("flirt", "sexup", "jackmeoff")
def flirt(text, conn, nick, message):
"""<user> - flirts with <user>"""
target = text.strip()
if not is_valid(target):
return "I can't flirt with that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
message('{}, {}'.format(target, random.choice(flirts)))
@asyncio.coroutine
@hook.command("kill", "end")
def kill(text, conn, nick, action):
"""<user> - kills <user>"""
target = text.strip()
if not is_valid(target):
return "I can't attack that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
generator = textgen.TextGenerator(kills["templates"], kills["parts"], variables={"user": target})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def slap(text, action, nick, conn):
"""<user> -- Makes the bot slap <user>."""
target = text.strip()
if not is_valid(target):
return "I can't slap that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
variables = {
"user": target
}
generator = textgen.TextGenerator(slaps["templates"], slaps["parts"], variables=variables)
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def compliment(text, action, nick, conn):
"""<user> -- Makes the bot compliment <user>."""
target = text.strip()
if not is_valid(target):
return "I can't compliment that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
variables = {
"user": target
}
generator = textgen.TextGenerator(compliments["templates"], compliments["parts"], variables=variables)
# act out the message
action(generator.generate_string())
@hook.command(autohelp=False)
def strax(text, conn, message, nick):
"""Strax quote."""
if text:
target = text.strip()
if not is_valid(target):
return "I can't do that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
variables = {
"user": target
}
generator = textgen.TextGenerator(strax["target_template"], strax["parts"], variables=variables)
else:
generator = textgen.TextGenerator(strax["template"], strax["parts"])
# Become Strax
message(generator.generate_string())
@hook.command(autohelp=False)
def nk(chan, message):
"""outputs a random North Korea propoganda slogan"""
index = random.randint(0,len(north_korea)-1)
slogan = north_korea[index]
message(slogan, chan)
@asyncio.coroutine
@hook.command()
def insult(text, conn, nick, notice, message):
"""<user> - insults <user>
:type text: str
:type conn: cloudbot.client.Client
:type nick: str
"""
target = text.strip()
if " " in target:
notice("Invalid username!")
return
# if the user is trying to make the bot target itself, target them
if is_self(conn, target):
target = nick
message("{}, {}".format(target, random.choice(insults)))
@asyncio.coroutine
@hook.command("present", "gift")
def present(text, conn, nick, action):
"""<user> - gives gift to <user>"""
target = text.strip()
if not is_valid(target):
return "I can't gift that."
if is_self(conn, target):
#user is trying to make the bot gift itself!
target = nick
variables = {
"user": target
}
generator = textgen.TextGenerator(presents["templates"], presents["parts"], variables=variables)
action(generator.generate_string())
|
from django.http import HttpResponse
from django.db.models import Q
from drf_yasg.utils import swagger_auto_schema
from drf_yasg.openapi import Parameter, Schema, Response, TYPE_INTEGER, TYPE_OBJECT, TYPE_STRING, IN_QUERY
from json import dumps
from .. import models
from .StudentsInformation import StudentsInformation
from .Public import responses_fail, get_request_args, content_type_tmp, patch_error, data_page_response, post_search, post_error
from rest_framework.views import APIView
from django.views.decorators.csrf import csrf_exempt
class TeachersInformation(StudentsInformation):
'''
list
list all information about Equipment
'''
data_schema = {
'id': Schema(
title='ID',
description='使用者的id',
type=TYPE_INTEGER,
format='int32',
),
'nocard': Schema(
title='ID of card ',
description='用户的卡号',
type=TYPE_STRING,
format='string',
),
'nouser': Schema(
title='ID of user ',
description='用户的身份id(比如学生的id就是他自己的学号)',
type=TYPE_STRING,
format='string',
),
'name': Schema(
title='用户的姓名',
description='用户的姓名',
type=TYPE_STRING,
format='string'
),
'psw': Schema(
title='用户的密码',
description='用户的密码',
type=TYPE_STRING,
format='string'
),
'deptid__name': Schema(
title='部门',
description='用户所属的部门名称',
type=TYPE_STRING,
format='string'
),
'sex': Schema(
title='性别',
description='用户的性别,0代表女性,1代表男性',
enum=[0, 1],
type=TYPE_INTEGER,
format='int32',
),
'attr': Schema(
title='权限',
description='用户管理权限,0代表超级管理员,1代表教务处管理员,2代表辅导员,3代表教师,4代表学生',
enum=[0, 1, 2, 3, 4],
type=TYPE_INTEGER,
format='int32',
),
'timeupdate': Schema(
title='信息更新时间',
description='用户表的更新时间',
type=TYPE_INTEGER,
format='int32',
),
'localid': Schema(
title='管理员地点',
description=' 管理员所在的地点',
type=TYPE_STRING,
format='string'
),
'userex_related_to_user_information__timeupdate': Schema(
title='timeupdate',
description='用户附件表的更新时间',
type=TYPE_INTEGER,
format='int32',
),
'userex_related_to_user_information__idmanager__name': Schema(
title='管理员姓名',
description='修改账户的管理员姓名',
type=TYPE_STRING,
format='string'
),
'userex_related_to_user_information__rem': Schema(
title='描述',
description='照片的描述',
type=TYPE_STRING,
format='string'
)
}
data_schema_present = Schema(
title='查询成功的返回',
description='查询成功返回的函数值',
type=TYPE_OBJECT, # 类型
properties=data_schema)
get_responses_success = Schema(
title='成功获取查询数据',
description='这个接口用于展示成功获取全部数据的格式',
type=TYPE_OBJECT,
properties={
'page': Schema(
title='页码',
description='用于表示展示的页码数',
type=TYPE_INTEGER,
format='int32',
),
'limits': Schema(
title='页码',
description='用于表示每页展示的行数',
type=TYPE_INTEGER,
format='int32',
),
'error_code': Schema(
title='是否有报错数据',
description='用于传达是否有报错数据,0表示没有报错数据,1表示有报错数据',
type=TYPE_INTEGER,
format='int32',
),
'data': Schema(
title='数据',
description='用于传递查询到的全部数据',
type=TYPE_OBJECT,
properties=[data_schema_present, data_schema_present]
),
}
)
TeachersInformation_get_responses_success = Response(
description='成功获取信息的响应',
schema=get_responses_success,
examples=None
)
UsertInformation_get_responses_fail = Response(
description='查询所有教师和管理员的个人信息失败的响应',
schema=responses_fail,
examples={
'error_code': 1,
'message': patch_error
})
page_get_parammeter = Parameter(
name='page',
in_=IN_QUERY,
description='查询时设定的页码数',
required=True,
type=TYPE_INTEGER,
format='int32',
)
limits_get_parammeter = Parameter(
name='limits',
in_=IN_QUERY,
description='查询时设定的每页行数',
required=True,
type=TYPE_INTEGER,
format='int32',
)
@swagger_auto_schema(
request_body=None,
manual_parameters=[
page_get_parammeter, limits_get_parammeter],
operation_id=None,
operation_description='用于获取所有教师和管理员的个人信息',
operation_summary=None,
security=None,
responses={
200: TeachersInformation_get_responses_success,
401: UsertInformation_get_responses_fail
},
tags=None)
@get_request_args
@csrf_exempt
def get(self, request, args, session):
is_login = request.COOKIES.get('is_login')
if not request.session.get(is_login, None):
return HttpResponse(dumps({'code': 0}), content_type=content_type_tmp, charset='utf-8')
pages = int(args.get('page', 1))
limits = int(args.get('limits', 20))
data_user = models.TCyuser.objects.filter().exclude(attr=4).values('id', 'nocard', 'nouser', 'name', 'psw', 'deptid__name', 'sex', 'attr', 'timeupdate', 'userex_related_to_user_information__rem',
'localid', 'userex_related_to_user_information__timeupdate', 'userex_related_to_user_information__idmanager__name').distinct().order_by('id')
return data_page_response(data_user, pages, limits)
'''
list
list all information about Equipment
'''
TeachersInformation_post_request_body = Schema(
title=' 查询个人数据所需要的查询数据', # 标题
description=' 这个端口用于查询所有老师和管理员的个人信息 ', # 接口描述
type=TYPE_OBJECT, # 类型 "object" ,"string" ,"number" ,"integer" ,"boolean" ,"array" ,"file"
format=None, # 格式 date,date-time,password,binary,bytes,float,double,int32,int64,email,ipv4, ipv6, uri, uuid, slug, decimal
enum=None, # [列表]列举参数的请求值
pattern=None, # 当 format为 string是才填此项
# 当 type为object时,为dict对象 {'str1': Schema对象, 'str2': SchemaRef对象}
properties=post_search,
required=['input_string', 'page', 'limits'], # [必须的属性列表]
items=None, # 当type是array时,填此项
)
TeachersInformation_post_responses_success = Response(
description='查询所有教师和管理员的个人信息成功的响应',
schema=Schema(
title='成功数据',
description='成功数据',
type=TYPE_OBJECT,
properties=data_schema
), examples=None
)
TeachersInformation_post_responses_fail = Response(
description='查询所有教师和管理员的个人信息失败的响应',
schema=responses_fail,
examples={
'error_code': 1,
'message': post_error
})
@swagger_auto_schema(
request_body=TeachersInformation_post_request_body,
manual_parameters=None,
operation_id=None,
operation_description='这个端口用于获取所有老师和管理员个人信息',
operation_summary=None,
security=None,
responses={
201: TeachersInformation_post_responses_success,
400: TeachersInformation_post_responses_fail
},
tags=None)
@get_request_args
@csrf_exempt
def post(self, request, args, session):
is_login = request.COOKIES.get('is_login')
if not request.session.get(is_login, None):
return HttpResponse(dumps({'code': 0}), content_type=content_type_tmp, charset='utf-8')
input_string = args.get('input_string', None)
pages = int(args.get('page', 1))
limits = int(args.get('limits', 20))
if input_string == None:
data_user_information = models.TCyuser.objects.filter().exclude(attr=4).values('id', 'nocard', 'nouser', 'name', 'psw', 'deptid__name', 'sex', 'attr', 'timeupdate',
'userex_related_to_user_information__rem', 'localid', 'userex_related_to_user_information__timeupdate', 'userex_related_to_user_information__idmanager__name').distinct().order_by('id')
else:
input_string = input_string.strip()
try:
test_input = eval(input_string)
except:
test_input = input_string
if isinstance(test_input, int):
data_user_information = models.TCyuser.objects.filter(
Q(id=test_input) |
Q(nocard__icontains=str(test_input)) |
Q(nouser__icontains=str(test_input)) |
Q(deptid=test_input) |
Q(sex=test_input) |
Q(attr=test_input) |
Q(timeupdate=test_input)
).exclude(attr=4).values('id', 'nocard', 'nouser', 'name', 'psw', 'deptid__name', 'sex', 'attr', 'timeupdate', 'userex_related_to_user_information__rem', 'localid', 'userex_related_to_user_information__timeupdate', 'userex_related_to_user_information__idmanager__name').distinct().order_by('id')
else:
data_user_information = models.TCyuser.objects.filter(
Q(name__icontains=test_input) |
Q(psw__icontains=test_input) |
Q(deptid__name__icontains=test_input) |
Q(userex_related_to_user_information__rem=test_input)
).exclude(attr=4).values('id', 'nocard', 'nouser', 'name', 'psw', 'deptid__name', 'sex', 'attr', 'timeupdate', 'userex_related_to_user_information__rem', 'localid', 'userex_related_to_user_information__timeupdate', 'userex_related_to_user_information__idmanager__name').distinct().order_by('id')
return data_page_response(data_user_information, pages, limits)
|
# !/usr/bin/python
"""
Copyright ©️: 2020 Seniatical / _-*™#7519
License: Apache 2.0
A permissive license whose main conditions require preservation of copyright and license notices.
Contributors provide an express grant of patent rights.
Licensed works, modifications, and larger works may be distributed under different terms and without source code.
FULL LICENSE CAN BE FOUND AT:
https://www.apache.org/licenses/LICENSE-2.0.html
Any violation to the license, will result in moderate action
You are legally required to mention (original author, license, source and any changes made)
"""
easy = [
"tempt",
"jodia",
"viren",
"peeve",
"below",
"sheep",
"smelt",
"venal",
"yours",
"emnet",
"ceets",
"sowse",
"index",
"eyrar",
"small",
"swing",
"corax",
"cessy",
"whizz",
"mutal",
"lower",
"stalk",
"friar",
"whirr",
"mount",
"frold",
"ensen",
"honky",
"firds",
"ample",
"pilot",
"slimy",
"turst",
"vouch",
"benno",
"houry",
"shimy",
"alive",
"yikes",
"field",
"dvala",
"vivan",
"trout",
"alarm",
"giddy",
"otter",
"gield",
"smell",
"singe",
"vowel",
]
medium = [
"penitent",
"swindler",
"dominant",
"fanaholm",
"obliging",
"mylohyus",
"dinosaur",
"organize",
"vomitory",
"griphook",
"unfasten",
"epirrhoe",
"unlawful",
"deformed",
"spending",
"moderate",
"adalbert",
"comitata",
"onlooker",
"juggling",
"guttural",
"sculling",
"graminum",
"terrible",
"exposure",
"rotating",
"myolepta",
"liberian",
"kneepads",
"shallots",
"scratchy",
"kidneyed",
"lukewarm",
"plebejus",
"forelimb",
"disaster",
"richardi",
"petowker",
"beddinge",
"broccoli",
"pallipes",
"animagus",
"burhinus",
"cavities",
"hornbill",
"finished",
"halftime",
"accurate",
"fossarum",
"resolved",
]
hard = [
"nothophantes",
"leptopsammia",
"heterodermia",
"monocephalus",
"strawberries",
"penicillatus",
"jamesoniella",
"dactylorhiza",
"acrocephalus",
"hydrochoerus",
"blackberries",
"cercotrichas",
"alexandrinus",
"delawarensis",
"eschrichtius",
"megarhynchos",
"hydrodamalis",
"corallioides",
"serrirostris",
"caerulescens",
"puncticollis",
"dermocorynus",
"circumspecta",
"hypocreopsis",
"lymnocryptes",
"xanthomelana",
"campsicnemus",
"gelochelidon",
"nitidicollis",
"erythrophaea",
"inisitijitty",
"angustifolia",
"aztlanolagus",
"quisquilious",
"polrumptious",
"schistisagus",
"subterraneus",
"cureckitycoo",
"phaeophyscia",
"lycopodiella",
"cheeseburger",
"nematostella",
"mediterranea",
"sublivescens",
"subturgidula",
"hydrochaeris",
"densirostris",
"pallavicinia",
"hyssopifolia",
"philadelphia",
]
impossible = [
"slackumtrance",
"tarantallegra",
"horripilation",
"processionary",
"pandiculating",
"firkytoodling",
"snickerdoodle",
"stiltstalking",
"shackbaggerly",
"recumbentibus",
"expergefactor",
"pseudopseudohypoparathyroidism",
"floccinaucinihilipilification",
"antidisestablishmentarianism",
"supercalifragilisticexpialidocious",
"incomprehensibilities",
"strengths",
"euouae",
"unimaginatively",
"uncopyrightable",
"subdermatoglyphic",
"sesquipedalianism",
"abiogenetically",
"abnormalization",
"aboriginalities",
"absorbabilities",
"absorptiometers",
"absorptiometric",
"absquatulations",
"abstractionisms",
"abstractionists",
"abstractiveness",
"acanthocephalan",
"accelerationist",
"acceptabilities",
"acceptingnesses",
"accessarinesses",
"accessibilities",
"accessorinesses",
"accessorization",
"accidentalities",
"acclimatization",
"accommodatingly",
"accommodational",
"accomplishments",
"accountableness",
"accountantships",
"acculturational",
"acetometrically",
"acetophenetidin",
"acetylcysteines",
"achlorophyllous",
"achondroplasias",
"achondroplastic",
"achromaticities",
"achromatization",
"achromatophilia",
"acidimetrically",
"acknowledgeable",
"acknowledgement",
"acknowledgments",
"acleistocardias",
"acoenaesthesias",
"acoustoelectric",
"acoustoopticses",
"acquirabilities",
"acquisitiveness",
]
|
from functools import partial
import numpy as np
class Covariance:
def __init__(self, nol: int, alt: np.ma.MaskedArray):
"""assumed covariances
:param no number of levels
:param alt altitudes
"""
self.nol = nol
self.alt = alt
def gaussian(self, x, mu, sig):
"""Gaussian function
:param x: Input value
:param mu: Mean value of gaussian
:param sig: Standard deviation of gaussian
"""
return np.exp(-((x - mu)*(x - mu))/(2 * sig * sig))
def traf(self) -> np.ndarray:
"""P (see equation 6)
Used to transform {ln[H2O], ln[HDO]} state
into the new coordination systems
{(ln[H2O]+ln[HDO])/2 and ln[HDO]-ln[H2O]}
"""
return np.block([[np.identity(self.nol)*0.5, np.identity(self.nol)*0.5],
[-np.identity(self.nol), np.identity(self.nol)]])
def assumed_covariance(self, species=2, w1=1.0, w2=0.01, correlation_length=2500) -> np.ndarray:
"""Sa' (see equation 7)
A priori covariance of {(ln[H2O]+ln[HDO])/2 and ln[HDO]-ln[H2O]} state
Sa See equation 5 in paper
:param species Number of atmospheric species (1 or 2)
:param w1: Weight for upper left quadrant
:param w2: Weight for lower right quadrant (ignored with 1 species)
:param correlation_length: Assumed correlation of atmospheric levels in meter
"""
# only 1 or 2 species are allowed
assert (species >= 1) and (species <= 2)
result = np.zeros((species * self.nol, species * self.nol))
for i in range(self.nol):
for j in range(self.nol):
# 2500 = correlation length
# 100% for
# (ln[H2O]+ln[HDO])/2 state
result[i, j] = w1 * \
self.gaussian(self.alt[i], self.alt[j], correlation_length)
if species == 2:
# 10% for (0.01 covariance)
# ln[HDO]-ln[H2O] state
result[i + self.nol, j + self.nol] = w2 * \
self.gaussian(
self.alt[i], self.alt[j], correlation_length)
return result
def apriori_covariance(self) -> np.ndarray:
"""Sa (see equation 5)
A priori Covariance of {ln[H2O], ln[HDO]} state
Sa' = P * Sa * P.T (equation 7 in paper)
equals to
Sa = inv(P) * Sa' * inv(P.T)
"""
P = self.traf()
return np.linalg.inv(P) @ self.apriori_covariance_traf() @ np.linalg.inv(P.T)
def type1_of(self, matrix) -> np.ndarray:
"""A' (see equation 10)
Return tranformed martix
"""
P = self.traf()
return P @ matrix @ np.linalg.inv(P)
def c_by_type1(self, A_) -> np.ndarray:
return np.block([[A_[self.nol:, self.nol:], np.zeros((self.nol, self.nol))],
[-A_[self.nol:, :self.nol], np.identity(self.nol)]])
def c_by_avk(self, avk):
A_ = self.type1_of(avk)
return self.c_by_type1(A_)
def type2_of(self, matrix) -> np.ndarray:
"""A'' (see equation 15)
A posteriori transformed matrix
"""
A_ = self.type1_of(matrix)
C = self.c_by_type1(A_)
return C @ A_
def smoothing_error(self, actual_matrix, to_compare, **kwargs) -> np.ndarray:
"""S's (see equation 11)
"""
return (actual_matrix - to_compare) @ self.assumed_covariance(**kwargs) @ (actual_matrix - to_compare).T
|
<filename>cyner/tner/checkpoint_versioning.py
""" checkpoint versioning tool """
import os
import hashlib
import json
import shutil
import logging
import requests
from glob import glob
__all__ = 'Argument'
class Argument:
""" Model training arguments manager """
def __init__(self, checkpoint_dir: str, **kwargs):
""" Model training arguments manager
Parameter
-------------------
checkpoint_dir: str
Directory to organize the checkpoint files
kwargs: model arguments
"""
assert type(checkpoint_dir) is str
self.checkpoint_dir, self.parameter, self.is_trained = self.version(checkpoint_dir, parameter=kwargs)
logging.info('checkpoint: {}'.format(self.checkpoint_dir))
for k, v in self.parameter.items():
logging.info(' - [arg] {}: {}'.format(k, str(v)))
self.__dict__.update(self.parameter)
@staticmethod
def md5(file_name):
""" get MD5 checksum """
hash_md5 = hashlib.md5()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def version(self, checkpoint_dir: str, parameter: dict = None):
""" Checkpoint versioning """
is_trained = True
try:
# load checkpoint on huggingface.transformers that trained with TNER
url = 'https://huggingface.co/{}/raw/main/parameter.json'.format(parameter['transformers_model'])
parameter['tner_config'] = json.loads(requests.get(url).content)
logging.info('load TNER finetuned checkpoint: {}'.format(parameter['transformers_model']))
checkpoint_dir = self.issue_new_checkpoint(checkpoint_dir, parameter)
return checkpoint_dir, parameter, is_trained
except json.JSONDecodeError:
if os.path.exists(parameter['transformers_model']):
# load local checkpoint that trained with TNER
logging.info('load local checkpoint: {}'.format(parameter['transformers_model']))
else:
# new check point for finetuning
is_trained = False
logging.info('create new checkpoint')
checkpoint_dir = self.issue_new_checkpoint(checkpoint_dir, parameter)
return checkpoint_dir, parameter, is_trained
def issue_new_checkpoint(self, checkpoint_dir, parameter):
checkpoints = self.cleanup_checkpoint_dir(checkpoint_dir)
if len(checkpoints) == 0:
os.makedirs(checkpoint_dir, exist_ok=True)
# create a new checkpoint
with open('{}/parameter.json'.format(checkpoint_dir), 'w') as f:
json.dump(parameter, f)
return checkpoint_dir
else:
if len(checkpoints) != 0:
for _dir in checkpoints:
with open('{}/parameter.json'.format(_dir), 'r') as f:
if parameter == json.load(f):
exit('find same configuration at: {}'.format(_dir))
# create a new checkpoint
with open('{}/tmp.json'.format(checkpoint_dir), 'w') as f:
json.dump(parameter, f)
_id = self.md5('{}/tmp.json'.format(checkpoint_dir))
new_checkpoint_dir = '{}_{}'.format(checkpoint_dir, _id)
os.makedirs(new_checkpoint_dir, exist_ok=True)
shutil.move('{}/tmp.json'.format(checkpoint_dir), '{}/parameter.json'.format(new_checkpoint_dir))
return new_checkpoint_dir
@staticmethod
def cleanup_checkpoint_dir(checkpoint_dir):
all_dir = glob('{}*'.format(checkpoint_dir))
if len(all_dir) == 0:
return []
for _dir in all_dir:
if os.path.exists('{}/parameter.json'.format(checkpoint_dir))\
and os.path.exists('{}/pytorch_model.bin'.format(checkpoint_dir))\
and os.path.exists('{}/tokenizer_config.json'.format(checkpoint_dir)):
pass
else:
logging.info('removed incomplete checkpoint {}'.format(_dir))
shutil.rmtree(_dir)
return glob('{}*'.format(checkpoint_dir))
|
#!/usr/bin/python
#
# Copyright (c) 2017 <NAME>, <<EMAIL>>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_sqlelasticpool_facts
version_added: "2.5"
short_description: Get SQL Elastic Pool facts.
description:
- Get facts of SQL Elastic Pool.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
elastic_pool_name:
description:
- The name of the elastic pool to be retrieved.
extends_documentation_fragment:
- azure
author:
- "<NAME> (@zikalino)"
'''
EXAMPLES = '''
- name: Get instance of SQL Elastic Pool
azure_rm_sqlelasticpool_facts:
resource_group: resource_group_name
server_name: server_name
elastic_pool_name: elastic_pool_name
- name: List instances of SQL Elastic Pool
azure_rm_sqlelasticpool_facts:
resource_group: resource_group_name
server_name: server_name
'''
RETURN = '''
elastic_pools:
description: A list of dict results where the key is the name of the SQL Elastic Pool and the values are the facts for that SQL Elastic Pool.
returned: always
type: complex
contains:
sqlelasticpool_name:
description: The key is the name of the server that the values relate to.
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/sqlcrudtest-2369/providers/Microsoft.Sql/servers/sqlcrudtest-
8069/elasticPools/sqlcrudtest-8102"
name:
description:
- Resource name.
returned: always
type: str
sample: sqlcrudtest-8102
type:
description:
- Resource type.
returned: always
type: str
sample: Microsoft.Sql/servers/elasticPools
location:
description:
- Resource location.
returned: always
type: str
sample: Japan East
state:
description:
- "The state of the elastic pool. Possible values include: 'Creating', 'Ready', 'Disabled'"
returned: always
type: str
sample: Ready
edition:
description:
- "The edition of the elastic pool. Possible values include: 'Basic', 'Standard', 'Premium'"
returned: always
type: str
sample: Basic
dtu:
description:
- The total shared DTU for the database elastic pool.
returned: always
type: int
sample: 50
kind:
description:
- Kind of elastic pool. This is metadata used for the Azure portal experience.
returned:
type: str
sample: kind
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.sql import SqlManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMElasticPoolsFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
elastic_pool_name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False,
ansible_facts=dict()
)
self.mgmt_client = None
self.resource_group = None
self.server_name = None
self.elastic_pool_name = None
super(AzureRMElasticPoolsFacts, self).__init__(self.module_arg_spec)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(SqlManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and
self.server_name is not None and
self.elastic_pool_name is not None):
self.results['elastic_pools'] = self.get()
elif (self.resource_group is not None and
self.server_name is not None):
self.results['elastic_pools'] = self.list_by_server()
return self.results
def get(self):
'''
Gets facts of the specified SQL Elastic Pool.
:return: deserialized SQL Elastic Poolinstance state dictionary
'''
response = None
results = {}
try:
response = self.mgmt_client.elastic_pools.get(resource_group_name=self.resource_group,
server_name=self.server_name,
elastic_pool_name=self.elastic_pool_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for ElasticPools.')
if response is not None:
results[response.name] = response.as_dict()
return results
def list_by_server(self):
'''
Gets facts of the specified SQL Elastic Pool.
:return: deserialized SQL Elastic Poolinstance state dictionary
'''
response = None
results = {}
try:
response = self.mgmt_client.elastic_pools.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for ElasticPools.')
if response is not None:
for item in response:
results[item.name] = item.as_dict()
return results
def main():
AzureRMElasticPoolsFacts()
if __name__ == '__main__':
main()
|
'''
This module contains the handler for web requests pertaining to
the list of oversubscribed modules.
'''
from app import RENDER
import web
from components import model, session
from components.handlers.fixed_module_mountings import Fixed
from components.handlers.tentative_module_mountings import Tentative
class OversubModule(object):
'''
This class contains the implementations of the GET
requests.
'''
def GET(self, *test_data):
'''
Renders the oversubscribed modules page if users requested
for the page through the GET method.
'''
if test_data:
target_ay_sem = test_data[0]
else:
web.header('X-Frame-Options', 'SAMEORIGIN')
web.header('X-Content-Type-Options', 'nosniff')
web.header('X-XSS-Protection', '1')
if not session.validate_session():
raise web.seeother('/login')
input_data = model.validate_input(web.input(), ["aysem"],
aysem_specific=False, attr_required=False)
try:
target_ay_sem = input_data.aysem
except AttributeError:
target_ay_sem = model.get_current_ay_sem()
all_ay_sems = model.get_all_ay_sems()
#list_of_oversub_mod = model.get_oversub_mod()
list_of_oversub_mod = []
current_ay = model.get_current_ay()
if target_ay_sem[0:8] == current_ay:
fixed_mounting_handler = Fixed()
fixed_mounting_handler.GET(to_render=False, logged_in=True)
full_mounting_plan = fixed_mounting_handler.full_mounting_plan
if target_ay_sem[9:15] == "Sem 1":
for subplan in full_mounting_plan:
module_code = subplan[0]
module_name = subplan[1]
sem1_quota = subplan[4]
sem1_num_students = subplan[6]
if ((sem1_quota != '?' and sem1_quota != '-') \
and sem1_num_students > sem1_quota) \
or ((sem1_quota == '?' or sem1_quota == '-') and sem1_num_students > 0):
if sem1_quota == '?' or sem1_quota == '-':
oversub_amount = sem1_num_students
else:
oversub_amount = sem1_num_students - sem1_quota
list_of_oversub_mod.append((module_code, module_name, target_ay_sem,
sem1_quota, sem1_num_students, oversub_amount))
else:
for subplan in full_mounting_plan:
module_code = subplan[0]
module_name = subplan[1]
sem2_quota = subplan[5]
sem2_num_students = subplan[7]
if ((sem2_quota != '?' and sem2_quota != '-') \
and sem2_num_students > sem2_quota) \
or ((sem2_quota == '?' or sem2_quota == '-') and sem2_num_students > 0):
if sem2_quota == '?' or sem2_quota == '-':
oversub_amount = sem2_num_students
else:
oversub_amount = sem2_num_students - sem2_quota
list_of_oversub_mod.append((module_code, module_name, target_ay_sem,
sem2_quota, sem2_num_students, oversub_amount))
else:
tenta_mounting_handler = Tentative()
tenta_mounting_handler.GET(to_render=False, logged_in=True, is_testing=True)
full_mounting_plan = tenta_mounting_handler.full_mounting_plan
if target_ay_sem[9:15] == "Sem 1":
for subplan in full_mounting_plan:
module_code = subplan[0]
module_name = subplan[1]
sem1_quota = subplan[4]
sem1_num_students = subplan[6]
if ((sem1_quota != '?' and sem1_quota != '-') \
and sem1_num_students > sem1_quota) \
or ((sem1_quota == '?' or sem1_quota == '-') and sem1_num_students > 0):
if sem1_quota == '?' or sem1_quota == '-':
oversub_amount = sem1_num_students
else:
oversub_amount = sem1_num_students - sem1_quota
list_of_oversub_mod.append((module_code, module_name, target_ay_sem,
sem1_quota, sem1_num_students, oversub_amount))
else:
for subplan in full_mounting_plan:
module_code = subplan[0]
module_name = subplan[1]
sem2_quota = subplan[5]
sem2_num_students = subplan[7]
if ((sem2_quota != '?' and sem2_quota != '-') \
and sem2_num_students > sem2_quota) \
or ((sem2_quota == '?' or sem2_quota == '-') and sem2_num_students > 0):
if sem2_quota == '?' or sem2_quota == '-':
oversub_amount = sem2_num_students
else:
oversub_amount = sem2_num_students - sem2_quota
list_of_oversub_mod.append((module_code, module_name, target_ay_sem,
sem2_quota, sem2_num_students, oversub_amount))
if not test_data:
return RENDER.oversubscribedModules(list_of_oversub_mod, all_ay_sems, target_ay_sem)
else:
return list_of_oversub_mod
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.