code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Copyright (c) 2019 <NAME>
"""
A collection of Poker games often used in computational poker research.
"""
import numpy as np
from PokerRL.game.Poker import Poker
from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules
from PokerRL.game._.rl_env.poker_types.DiscretizedPokerEnv import DiscretizedPokerEnv
from PokerRL.game._.rl_env.poker_types.LimitPokerEnv import LimitPokerEnv
from PokerRL.game._.rl_env.poker_types.NoLimitPokerEnv import NoLimitPokerEnv
from PokerRL.game.PokerEnvStateDictEnums import EnvDictIdxs, PlayerDictIdxs
from PokerRL.game._.rl_env.base._Deck import DeckOfCards
# """""""""""""""
# Leduc Family
# """""""""""""""
class StandardLeduc(LeducRules, LimitPokerEnv):
"""
Leduc Hold'em is a very small poker game meant for fast experimentation with new algorithms. It is played with 3
ranks and 2 suits. Typically players place an ante of 1, the small_bet is 2, and the big_bet is 4.
"""
RULES = LeducRules
IS_FIXED_LIMIT_GAME = True
IS_POT_LIMIT_GAME = False
MAX_N_RAISES_PER_ROUND = {
Poker.PREFLOP: 2,
Poker.FLOP: 2,
}
SMALL_BLIND = 0
BIG_BLIND = 0
ANTE = 1
SMALL_BET = 2
BIG_BET = 4
DEFAULT_STACK_SIZE = 13
EV_NORMALIZER = 1000.0 / ANTE # Milli Antes
WIN_METRIC = Poker.MeasureAnte
ROUND_WHERE_BIG_BET_STARTS = Poker.FLOP
def __init__(self, env_args, lut_holder, is_evaluating):
LeducRules.__init__(self)
LimitPokerEnv.__init__(self,
env_args=env_args,
lut_holder=lut_holder,
is_evaluating=is_evaluating)
class BigLeduc(BigLeducRules, LimitPokerEnv):
RULES = BigLeducRules
IS_FIXED_LIMIT_GAME = True
IS_POT_LIMIT_GAME = False
MAX_N_RAISES_PER_ROUND = {
Poker.PREFLOP: 6,
Poker.FLOP: 6,
}
SMALL_BLIND = 0
BIG_BLIND = 0
ANTE = 1
SMALL_BET = 2
BIG_BET = 4
DEFAULT_STACK_SIZE = 100
EV_NORMALIZER = 1000.0 / ANTE # Milli Antes
WIN_METRIC = Poker.MeasureAnte
ROUND_WHERE_BIG_BET_STARTS = Poker.FLOP
def __init__(self, env_args, lut_holder, is_evaluating):
BigLeducRules.__init__(self)
LimitPokerEnv.__init__(self,
env_args=env_args,
lut_holder=lut_holder,
is_evaluating=is_evaluating)
class MidLeduc(BigLeducRules, LimitPokerEnv):
RULES = BigLeducRules
IS_FIXED_LIMIT_GAME = True
IS_POT_LIMIT_GAME = False
MAX_N_RAISES_PER_ROUND = {
Poker.PREFLOP: 2,
Poker.FLOP: 2,
}
SMALL_BLIND = 0
BIG_BLIND = 0
ANTE = 1
SMALL_BET = 2
BIG_BET = 4
DEFAULT_STACK_SIZE = 100
EV_NORMALIZER = 1000.0 / ANTE # Milli Antes
WIN_METRIC = Poker.MeasureAnte
ROUND_WHERE_BIG_BET_STARTS = Poker.FLOP
def __init__(self, env_args, lut_holder, is_evaluating):
BigLeducRules.__init__(self)
LimitPokerEnv.__init__(self,
env_args=env_args,
lut_holder=lut_holder,
is_evaluating=is_evaluating)
class NoLimitLeduc(LeducRules, NoLimitPokerEnv):
"""
A variant of Leduc with no bet-cap in the no-limit format. It uses blinds instead of antes.
"""
RULES = LeducRules
IS_FIXED_LIMIT_GAME = False
IS_POT_LIMIT_GAME = False
SMALL_BLIND = 50
BIG_BLIND = 100
ANTE = 0
DEFAULT_STACK_SIZE = 20000
EV_NORMALIZER = 1000.0 / BIG_BLIND # Milli BB
WIN_METRIC = Poker.MeasureBB
def __init__(self, env_args, lut_holder, is_evaluating):
LeducRules.__init__(self)
NoLimitPokerEnv.__init__(self,
env_args=env_args,
lut_holder=lut_holder,
is_evaluating=is_evaluating)
class DiscretizedNLLeduc(LeducRules, DiscretizedPokerEnv):
"""
Discretized version of No-Limit Leduc Hold'em (i.e. agents can only select from a predefined set of betsizes)
"""
RULES = LeducRules
IS_FIXED_LIMIT_GAME = False
IS_POT_LIMIT_GAME = False
SMALL_BLIND = 50
BIG_BLIND = 100
ANTE = 0
DEFAULT_STACK_SIZE = 20000
EV_NORMALIZER = 1000.0 / BIG_BLIND # Milli BB
WIN_METRIC = Poker.MeasureBB
def __init__(self, env_args, lut_holder, is_evaluating):
LeducRules.__init__(self)
DiscretizedPokerEnv.__init__(self,
env_args=env_args,
lut_holder=lut_holder,
is_evaluating=is_evaluating)
# """""""""""""""
# Hold'em Family
# """""""""""""""
class LimitHoldem(HoldemRules, LimitPokerEnv):
"""
Fixed-Limit Texas Hold'em is a long-standing benchmark game that has been essentially solved by Bowling et al
(http://science.sciencemag.org/content/347/6218/145) using an efficient distributed implementation of CFR+, an
optimized version of regular CFR.
"""
RULES = HoldemRules
IS_FIXED_LIMIT_GAME = True
IS_POT_LIMIT_GAME = False
MAX_N_RAISES_PER_ROUND = {
Poker.PREFLOP: 4,
Poker.FLOP: 4,
Poker.TURN: 4,
Poker.RIVER: 4,
}
ROUND_WHERE_BIG_BET_STARTS = Poker.TURN
SMALL_BLIND = 1
BIG_BLIND = 2
ANTE = 0
SMALL_BET = 2
BIG_BET = 4
DEFAULT_STACK_SIZE = 48
EV_NORMALIZER = 1000.0 / BIG_BLIND # Milli BB
WIN_METRIC = Poker.MeasureBB
def __init__(self, env_args, lut_holder, is_evaluating):
HoldemRules.__init__(self)
LimitPokerEnv.__init__(self,
env_args=env_args,
lut_holder=lut_holder,
is_evaluating=is_evaluating)
class NoLimitHoldem(HoldemRules, NoLimitPokerEnv):
"""
No-Limit Texas Hold'em is the largest poker game in which AI beat humans as of 31.08.2018. It has been the focus in
work such as DeepStack (https://arxiv.org/abs/1701.01724) and Libratus
(http://science.sciencemag.org/content/early/2017/12/15/science.aao1733).
"""
RULES = HoldemRules
IS_FIXED_LIMIT_GAME = False
IS_POT_LIMIT_GAME = False
SMALL_BLIND = 50
BIG_BLIND = 100
ANTE = 0
DEFAULT_STACK_SIZE = 20000
EV_NORMALIZER = 1000.0 / BIG_BLIND # Milli BB
WIN_METRIC = Poker.MeasureBB
def __init__(self, env_args, lut_holder, is_evaluating):
HoldemRules.__init__(self)
NoLimitPokerEnv.__init__(self,
env_args=env_args,
lut_holder=lut_holder,
is_evaluating=is_evaluating)
class DiscretizedNLHoldem(HoldemRules, DiscretizedPokerEnv):
"""
Discretized version of No-Limit Texas Hold'em (i.e. agents can only select from a predefined set of betsizes)
"""
RULES = HoldemRules
IS_FIXED_LIMIT_GAME = False
IS_POT_LIMIT_GAME = False
SMALL_BLIND = 50
BIG_BLIND = 100
ANTE = 0
DEFAULT_STACK_SIZE = 20000
EV_NORMALIZER = 1000.0 / BIG_BLIND # Milli BB
WIN_METRIC = Poker.MeasureBB
def __init__(self, env_args, lut_holder, is_evaluating):
HoldemRules.__init__(self)
DiscretizedPokerEnv.__init__(self,
env_args=env_args,
lut_holder=lut_holder,
is_evaluating=is_evaluating)
class Flop5Holdem(FlopHoldemRules, LimitPokerEnv):
RULES = FlopHoldemRules
IS_FIXED_LIMIT_GAME = True
IS_POT_LIMIT_GAME = False
SMALL_BLIND = 50
BIG_BLIND = 100
ANTE = 0
DEFAULT_STACK_SIZE = 20000
EV_NORMALIZER = 1000.0 / BIG_BLIND # Milli BB
WIN_METRIC = Poker.MeasureBB
MAX_N_RAISES_PER_ROUND = {
Poker.PREFLOP: 2, # is actually 1, but BB counts as a raise in this codebase
Poker.FLOP: 2,
}
ROUND_WHERE_BIG_BET_STARTS = Poker.TURN
UNITS_SMALL_BET = None
UNITS_BIG_BET = None
FIRST_ACTION_NO_CALL = True
def __init__(self, env_args, lut_holder, is_evaluating):
FlopHoldemRules.__init__(self)
LimitPokerEnv.__init__(self,
env_args=env_args,
lut_holder=lut_holder,
is_evaluating=is_evaluating)
def _adjust_raise(self, raise_total_amount_in_chips):
return self.get_fraction_of_pot_raise(fraction=1.0, player_that_bets=self.current_player)
from pathlib import Path
def read_subgame_file(subgame_id):
file = Path(__file__).parent.parent.parent / "LibratusEndgames" / "subgame{}.txt".format(subgame_id)
with file.open("r") as f:
for line in f.readlines():
if "round" in line:
round = int(line.strip().split(" ")[1])
if "board" in line:
board = line.strip().split(" ")[1]
board = [board[i * 2: (i + 1) * 2] for i in range(len(board) // 2)]
if "pot" in line:
pot = int(line.strip().split(" ")[1])
if "reach" in line:
reach = list(map(float, line.strip().split(" ")[1:]))
return round, board, pot, reach[:1326], reach[1326:]
class DiscretizedNLHoldemSubGame(DiscretizedNLHoldem):
CURRENT_ROUND = NotImplemented
SUBGAME_ID = NotImplemented
def __init__(self, env_args, lut_holder, is_evaluating):
super().__init__(env_args, lut_holder, is_evaluating)
self.build_str_to_id_dict()
self.build_str_to_hand_id_dict()
self.build_libratus_hand_id_to_str_dict()
self.create_root_env_state()
def build_str_to_id_dict(self):
self.str_to_id_dict = {}
self.id_2d_to_str_dict = {}
self.id_1d_to_str_dict = {}
for i in range(4):
for j in range(13):
cardid = [j, i]
cardstr = self.cards2str([cardid])[:2]
self.str_to_id_dict[cardstr] = cardid
self.id_2d_to_str_dict[(j, i)] = cardstr
self.id_2d_to_str_dict[(i, j)] = cardstr
card_1d = self.lut_holder.get_1d_cards(np.array([[j, i]]))[0]
self.id_1d_to_str_dict[card_1d] = cardstr
def build_str_to_hand_id_dict(self):
self.str_to_hand_id_dict = {}
self.hand_id_to_str_dict = {}
card_strs = list(self.str_to_id_dict.keys())
for i in range(52):
for j in range(i + 1, 52):
card1_str = card_strs[i]
card2_str = card_strs[j]
card1_id = self.str_to_id_dict[card1_str]
card2_id = self.str_to_id_dict[card2_str]
if card1_id > card2_id:
card1_id, card2_id = card2_id, card1_id
hand_2d_id = np.array([card1_id, card2_id])
hand_id = self.lut_holder.get_range_idx_from_hole_cards(hand_2d_id)
hand_str = card1_str + card2_str
self.str_to_hand_id_dict[hand_str] = hand_id
self.hand_id_to_str_dict[hand_id] = hand_str
hand_str = card2_str + card1_str
self.str_to_hand_id_dict[hand_str] = hand_id
def build_libratus_hand_id_to_str_dict(self):
self.libratus_hand_id_to_str_dict = {}
card_ranks = ["2", "3", "4", "5", "6", "7", "8", "9", "T", "J", "Q", "K", "A"]
card_suits = ["s", "h", "d", 'c']
card_strs = []
for card_rank in card_ranks:
for card_suit in card_suits:
card_str = card_rank + card_suit
card_strs.append(card_str)
count = 0
for i in range(52):
for j in range(i + 1, 52):
card1_str = card_strs[i]
card2_str = card_strs[j]
hand_str = card1_str + card2_str
libratus_hand_id = count
count += 1
self.libratus_hand_id_to_str_dict[libratus_hand_id] = hand_str
def libratus_reach_to_pokerRL_reach(self, libratus_reach):
pokerRL_reach = [0 for i in range(1326)]
for libratus_hand_id in range(1326):
hand_str = self.libratus_hand_id_to_str_dict[libratus_hand_id]
pokerRL_hand_id = self.str_to_hand_id_dict[hand_str]
pokerRL_reach[pokerRL_hand_id] = libratus_reach[libratus_hand_id]
return pokerRL_reach
def create_root_env_state(self):
round, board, pot, reach1, reach2 = read_subgame_file(self.SUBGAME_ID)
deck = DeckOfCards(num_suits=4, num_ranks=13)
board = np.array([self.str_to_id_dict[card_str] for card_str in board] + [[-127, -127] for _ in range(5 - len(board))])
hand1 = np.array([[0, 0], [0, 1]])
hand2 = np.array([[0, 2], [0, 3]])
for cards in [board, hand1, hand2]:
deck.remove_cards(cards)
main_pot = pot
stack = 20000
self.root_env_state = {
EnvDictIdxs.is_evaluating: True,
EnvDictIdxs.current_round: self.CURRENT_ROUND,
EnvDictIdxs.side_pots: [0, 0],
EnvDictIdxs.main_pot: main_pot, # int by value
EnvDictIdxs.board_2d: board, # np array
EnvDictIdxs.last_action: [1, 2, 1],
EnvDictIdxs.capped_raise: None,
EnvDictIdxs.current_player: 1, # idx in _env.seats
EnvDictIdxs.last_raiser: None,
EnvDictIdxs.deck: deck.state_dict(),
EnvDictIdxs.n_actions_this_episode: 0, # int
EnvDictIdxs.seats:
[
{
PlayerDictIdxs.seat_id: 0,
PlayerDictIdxs.hand: hand1, # np array
PlayerDictIdxs.hand_rank: None, # int by value
PlayerDictIdxs.stack: stack - main_pot // 2, # int by value
PlayerDictIdxs.current_bet: 0, # int by value
PlayerDictIdxs.is_allin: False, # bool by value
PlayerDictIdxs.folded_this_episode: False, # bool by value
PlayerDictIdxs.has_acted_this_round: False, # bool by value
PlayerDictIdxs.side_pot_rank: -1 # int by value
},
{
PlayerDictIdxs.seat_id: 1,
PlayerDictIdxs.hand: hand2, # np array
PlayerDictIdxs.hand_rank: None, # int by value
PlayerDictIdxs.stack: stack - main_pot // 2, # int by value
PlayerDictIdxs.current_bet: 0, # int by value
PlayerDictIdxs.is_allin: False, # bool by value
PlayerDictIdxs.folded_this_episode: False, # bool by value
PlayerDictIdxs.has_acted_this_round: False, # bool by value
PlayerDictIdxs.side_pot_rank: -1 # int by value
}
],
EnvDictIdxs.n_raises_this_round: 0
}
reach1 = self.libratus_reach_to_pokerRL_reach(reach1)
reach2 = self.libratus_reach_to_pokerRL_reach(reach2)
self.reach_probs = np.array([reach1, reach2], dtype=np.float32)
self.reach_probs /= np.sum(self.reach_probs, axis=1, keepdims=True)
class DiscretizedNLHoldemSubGame4(DiscretizedNLHoldemSubGame):
CURRENT_ROUND = 3
SUBGAME_ID = 4
class DiscretizedNLHoldemSubGame3(DiscretizedNLHoldemSubGame):
CURRENT_ROUND = 3
SUBGAME_ID = 3
class DiscretizedNLHoldemSubGame2(DiscretizedNLHoldemSubGame):
CURRENT_ROUND = 2
SUBGAME_ID = 2
class DiscretizedNLHoldemSubGame1(DiscretizedNLHoldemSubGame):
CURRENT_ROUND = 2
SUBGAME_ID = 1
"""
register all new envs here!
"""
ALL_ENVS = [
StandardLeduc,
BigLeduc,
MidLeduc,
NoLimitLeduc,
DiscretizedNLLeduc,
LimitHoldem,
NoLimitHoldem,
DiscretizedNLHoldem,
Flop5Holdem,
DiscretizedNLHoldemSubGame4,
DiscretizedNLHoldemSubGame3,
DiscretizedNLHoldemSubGame2,
DiscretizedNLHoldemSubGame1,
]
| [
"PokerRL.game._.rl_env.poker_types.LimitPokerEnv.LimitPokerEnv.__init__",
"numpy.sum",
"PokerRL.game._.rl_env.game_rules.HoldemRules.__init__",
"PokerRL.game._.rl_env.game_rules.BigLeducRules.__init__",
"PokerRL.game._.rl_env.poker_types.NoLimitPokerEnv.NoLimitPokerEnv.__init__",
"PokerRL.game._.rl_env.ba... | [((1459, 1484), 'PokerRL.game._.rl_env.game_rules.LeducRules.__init__', 'LeducRules.__init__', (['self'], {}), '(self)\n', (1478, 1484), False, 'from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules\n'), ((1493, 1596), 'PokerRL.game._.rl_env.poker_types.LimitPokerEnv.LimitPokerEnv.__init__', 'LimitPokerEnv.__init__', (['self'], {'env_args': 'env_args', 'lut_holder': 'lut_holder', 'is_evaluating': 'is_evaluating'}), '(self, env_args=env_args, lut_holder=lut_holder,\n is_evaluating=is_evaluating)\n', (1515, 1596), False, 'from PokerRL.game._.rl_env.poker_types.LimitPokerEnv import LimitPokerEnv\n'), ((2222, 2250), 'PokerRL.game._.rl_env.game_rules.BigLeducRules.__init__', 'BigLeducRules.__init__', (['self'], {}), '(self)\n', (2244, 2250), False, 'from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules\n'), ((2259, 2362), 'PokerRL.game._.rl_env.poker_types.LimitPokerEnv.LimitPokerEnv.__init__', 'LimitPokerEnv.__init__', (['self'], {'env_args': 'env_args', 'lut_holder': 'lut_holder', 'is_evaluating': 'is_evaluating'}), '(self, env_args=env_args, lut_holder=lut_holder,\n is_evaluating=is_evaluating)\n', (2281, 2362), False, 'from PokerRL.game._.rl_env.poker_types.LimitPokerEnv import LimitPokerEnv\n'), ((2987, 3015), 'PokerRL.game._.rl_env.game_rules.BigLeducRules.__init__', 'BigLeducRules.__init__', (['self'], {}), '(self)\n', (3009, 3015), False, 'from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules\n'), ((3024, 3127), 'PokerRL.game._.rl_env.poker_types.LimitPokerEnv.LimitPokerEnv.__init__', 'LimitPokerEnv.__init__', (['self'], {'env_args': 'env_args', 'lut_holder': 'lut_holder', 'is_evaluating': 'is_evaluating'}), '(self, env_args=env_args, lut_holder=lut_holder,\n is_evaluating=is_evaluating)\n', (3046, 3127), False, 'from PokerRL.game._.rl_env.poker_types.LimitPokerEnv import LimitPokerEnv\n'), ((3707, 3732), 'PokerRL.game._.rl_env.game_rules.LeducRules.__init__', 'LeducRules.__init__', (['self'], {}), '(self)\n', (3726, 3732), False, 'from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules\n'), ((3741, 3846), 'PokerRL.game._.rl_env.poker_types.NoLimitPokerEnv.NoLimitPokerEnv.__init__', 'NoLimitPokerEnv.__init__', (['self'], {'env_args': 'env_args', 'lut_holder': 'lut_holder', 'is_evaluating': 'is_evaluating'}), '(self, env_args=env_args, lut_holder=lut_holder,\n is_evaluating=is_evaluating)\n', (3765, 3846), False, 'from PokerRL.game._.rl_env.poker_types.NoLimitPokerEnv import NoLimitPokerEnv\n'), ((4460, 4485), 'PokerRL.game._.rl_env.game_rules.LeducRules.__init__', 'LeducRules.__init__', (['self'], {}), '(self)\n', (4479, 4485), False, 'from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules\n'), ((4494, 4603), 'PokerRL.game._.rl_env.poker_types.DiscretizedPokerEnv.DiscretizedPokerEnv.__init__', 'DiscretizedPokerEnv.__init__', (['self'], {'env_args': 'env_args', 'lut_holder': 'lut_holder', 'is_evaluating': 'is_evaluating'}), '(self, env_args=env_args, lut_holder=lut_holder,\n is_evaluating=is_evaluating)\n', (4522, 4603), False, 'from PokerRL.game._.rl_env.poker_types.DiscretizedPokerEnv import DiscretizedPokerEnv\n'), ((5628, 5654), 'PokerRL.game._.rl_env.game_rules.HoldemRules.__init__', 'HoldemRules.__init__', (['self'], {}), '(self)\n', (5648, 5654), False, 'from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules\n'), ((5663, 5766), 'PokerRL.game._.rl_env.poker_types.LimitPokerEnv.LimitPokerEnv.__init__', 'LimitPokerEnv.__init__', (['self'], {'env_args': 'env_args', 'lut_holder': 'lut_holder', 'is_evaluating': 'is_evaluating'}), '(self, env_args=env_args, lut_holder=lut_holder,\n is_evaluating=is_evaluating)\n', (5685, 5766), False, 'from PokerRL.game._.rl_env.poker_types.LimitPokerEnv import LimitPokerEnv\n'), ((6526, 6552), 'PokerRL.game._.rl_env.game_rules.HoldemRules.__init__', 'HoldemRules.__init__', (['self'], {}), '(self)\n', (6546, 6552), False, 'from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules\n'), ((6561, 6666), 'PokerRL.game._.rl_env.poker_types.NoLimitPokerEnv.NoLimitPokerEnv.__init__', 'NoLimitPokerEnv.__init__', (['self'], {'env_args': 'env_args', 'lut_holder': 'lut_holder', 'is_evaluating': 'is_evaluating'}), '(self, env_args=env_args, lut_holder=lut_holder,\n is_evaluating=is_evaluating)\n', (6585, 6666), False, 'from PokerRL.game._.rl_env.poker_types.NoLimitPokerEnv import NoLimitPokerEnv\n'), ((7283, 7309), 'PokerRL.game._.rl_env.game_rules.HoldemRules.__init__', 'HoldemRules.__init__', (['self'], {}), '(self)\n', (7303, 7309), False, 'from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules\n'), ((7318, 7427), 'PokerRL.game._.rl_env.poker_types.DiscretizedPokerEnv.DiscretizedPokerEnv.__init__', 'DiscretizedPokerEnv.__init__', (['self'], {'env_args': 'env_args', 'lut_holder': 'lut_holder', 'is_evaluating': 'is_evaluating'}), '(self, env_args=env_args, lut_holder=lut_holder,\n is_evaluating=is_evaluating)\n', (7346, 7427), False, 'from PokerRL.game._.rl_env.poker_types.DiscretizedPokerEnv import DiscretizedPokerEnv\n'), ((8195, 8225), 'PokerRL.game._.rl_env.game_rules.FlopHoldemRules.__init__', 'FlopHoldemRules.__init__', (['self'], {}), '(self)\n', (8219, 8225), False, 'from PokerRL.game._.rl_env.game_rules import HoldemRules, LeducRules, FlopHoldemRules, BigLeducRules\n'), ((8234, 8337), 'PokerRL.game._.rl_env.poker_types.LimitPokerEnv.LimitPokerEnv.__init__', 'LimitPokerEnv.__init__', (['self'], {'env_args': 'env_args', 'lut_holder': 'lut_holder', 'is_evaluating': 'is_evaluating'}), '(self, env_args=env_args, lut_holder=lut_holder,\n is_evaluating=is_evaluating)\n', (8256, 8337), False, 'from PokerRL.game._.rl_env.poker_types.LimitPokerEnv import LimitPokerEnv\n'), ((12629, 12667), 'PokerRL.game._.rl_env.base._Deck.DeckOfCards', 'DeckOfCards', ([], {'num_suits': '(4)', 'num_ranks': '(13)'}), '(num_suits=4, num_ranks=13)\n', (12640, 12667), False, 'from PokerRL.game._.rl_env.base._Deck import DeckOfCards\n'), ((12812, 12838), 'numpy.array', 'np.array', (['[[0, 0], [0, 1]]'], {}), '([[0, 0], [0, 1]])\n', (12820, 12838), True, 'import numpy as np\n'), ((12855, 12881), 'numpy.array', 'np.array', (['[[0, 2], [0, 3]]'], {}), '([[0, 2], [0, 3]])\n', (12863, 12881), True, 'import numpy as np\n'), ((15191, 15235), 'numpy.array', 'np.array', (['[reach1, reach2]'], {'dtype': 'np.float32'}), '([reach1, reach2], dtype=np.float32)\n', (15199, 15235), True, 'import numpy as np\n'), ((15264, 15311), 'numpy.sum', 'np.sum', (['self.reach_probs'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.reach_probs, axis=1, keepdims=True)\n', (15270, 15311), True, 'import numpy as np\n'), ((10909, 10939), 'numpy.array', 'np.array', (['[card1_id, card2_id]'], {}), '([card1_id, card2_id])\n', (10917, 10939), True, 'import numpy as np\n'), ((8657, 8671), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (8661, 8671), False, 'from pathlib import Path\n'), ((10262, 10280), 'numpy.array', 'np.array', (['[[j, i]]'], {}), '([[j, i]])\n', (10270, 10280), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import lightgbm as lgb
from catboost import CatBoost
from catboost import Pool
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report, f1_score
from tqdm import tqdm
class Report:
def __init__(self, y_train_proba, y_valid_proba, y_train_eval, y_valid_eval):
self.y_train_proba = y_train_proba
self.y_valid_proba = y_valid_proba
self.y_train_eval = y_train_eval
self.y_valid_eval = y_valid_eval
def get_proba(self, is_train=True):
return self.y_train_proba if is_train else self.y_valid_proba
def get_eval(self, is_train=True):
return self.y_train_eval if is_train else self.y_valid_eval
def get_text_model(train, valid):
def get_c_with_prefix(prefix):
return [column for column in train.columns.tolist() if prefix == column[:len(prefix)]]
c_word, c_url, c_hashtag = get_c_with_prefix('word_'), get_c_with_prefix('url_'), get_c_with_prefix('hashtag_')
train.fillna(0, inplace=True)
X_train, X_valid = train[c_word+c_url+c_hashtag].values, valid[c_word+c_url+c_hashtag].values
y_train, y_valid = train.target.values, valid.target.values
lgb_train = lgb.Dataset(X_train, y_train)
lgb_valid = lgb.Dataset(X_valid, y_valid, reference=lgb_train)
def lgb_f1_score(y_hat, data):
y_true = data.get_label()
y_hat = np.round(y_hat) # scikits f1 doesn't like probabilities
return 'f1', f1_score(y_true, y_hat), True
lgbm_params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbosity': -1,
'boosting_type': 'gbdt',
'learning_rate': 0.1,
'lambda_l1': 3.642434329823594,
'lambda_l2': 1.0401748765492007e-08,
'num_leaves': 172,
'feature_fraction': 0.8251431673667773,
'bagging_fraction': 0.9755605959841563,
'bagging_freq': 2,
'min_child_samples': 5,
'random_state': 68
}
model = lgb.train(
lgbm_params,
lgb_train,
valid_sets=lgb_valid,
verbose_eval=False,
feval=lgb_f1_score,
num_boost_round=300,
)
def get_pred_f1(X, y):
y_pred = model.predict(X, num_iteration=model.best_iteration)
y_pred_cls = y_pred >= 0.5
return y_pred, f1_score(y, y_pred_cls, average=None)[0]
y_train_proba, train_f1 = get_pred_f1(X_train, y_train)
y_valid_proba, valid_f1 = get_pred_f1(X_valid, y_valid)
report = Report(y_train_proba, y_valid_proba, train_f1, valid_f1)
return report
def get_category_model(train, valid):
c_text = ['keyword', 'location']
X_train, X_valid, = train[c_text].values, valid[c_text].values
y_train, y_valid = train.target, valid.target
train_pool = Pool(X_train, label=y_train)
valid_pool = Pool(X_valid, label=y_valid)
params = {
'used_ram_limit': '3gb',
'eval_metric': 'F1',
'verbose': None,
'silent': False,
'learning_rate': 0.3,
'num_boost_round': 1000,
'objective': 'CrossEntropy',
'colsample_bylevel': 0.010419852115438836,
'depth': 7,
'boosting_type': 'Ordered',
'bootstrap_type': 'Bayesian',
'random_state': 19,
'bagging_temperature': 9.096903904222094
}
model = CatBoost(params)
model.fit(train_pool, logging_level='Silent')
def get_pred_f1(X_pool, y):
y_pred = model.predict(X_pool, prediction_type='Class')
y_pred_proba = model.predict(X_pool, prediction_type='Probability')[:, 1]
return y_pred_proba, f1_score(y, y_pred, average=None)[0]
y_train_proba, train_f1 = get_pred_f1(train_pool, y_train)
y_valid_proba, valid_f1 = get_pred_f1(valid_pool, y_valid)
report = Report(y_train_proba, y_valid_proba, train_f1, valid_f1)
return report
def get_bert_model(train, valid):
def get_c_with_prefix(train, prefix):
return [column for column in train.columns.tolist() if prefix == column[:len(prefix)]]
c_vecs = get_c_with_prefix(train, 'vecs')
X_train, X_valid = train[c_vecs], valid[c_vecs]
y_train, y_valid = train.target.values, valid.target.values
lgb_train = lgb.Dataset(X_train, y_train)
lgb_valid = lgb.Dataset(X_valid, y_valid, reference=lgb_train)
def lgb_f1_score(y_hat, data):
y_true = data.get_label()
y_hat = np.round(y_hat) # scikits f1 doesn't like probabilities
return 'f1', f1_score(y_true, y_hat, average=None)[0], True
lgbm_params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbosity': -1,
'boosting_type': 'gbdt',
'learning_rate': 0.1,
'lambda_l1': 0.0003543420203502818,
'lambda_l2': 4.468466658809475,
'num_leaves': 169,
'feature_fraction': 0.8390907205934592,
'bagging_fraction': 0.8070674146918868,
'bagging_freq': 5,
'min_child_samples': 65,
'random_state': 5
}
model = lgb.train(
lgbm_params,
lgb_train,
valid_sets=lgb_valid,
verbose_eval=False,
feval=lgb_f1_score,
num_boost_round=300,
)
def get_pred_f1(X, y):
y_pred = model.predict(X, num_iteration=model.best_iteration)
y_pred_cls = y_pred >= 0.5
return y_pred, f1_score(y, y_pred_cls, average=None)[0]
y_train_proba, train_f1 = get_pred_f1(X_train, y_train)
y_valid_proba, valid_f1 = get_pred_f1(X_valid, y_valid)
report = Report(y_train_proba, y_valid_proba, train_f1, valid_f1)
return report
def get_merge_model(train, valid):
c_merge = ['text', 'category', 'bert']
X_train, X_valid, = train[c_merge].values, valid[c_merge].values
y_train, y_valid = train.target, valid.target
params = {
'max_iter': 100,
'verbose': 0,
'n_jobs': -1,
'penalty': 'l2',
'class_weight': None,
'warm_start': False,
'random_state': 33,
'multi_class': 'auto',
'solver': 'saga',
'C': 0.0017866172292125638
}
model = LogisticRegression(**params)
model.fit(X_train, y_train)
def get_pred_f1(X, y):
y_pred = model.predict(X)
y_pred_proba = model.predict_proba(X, )[:, 1]
return y_pred_proba, f1_score(y, y_pred, average=None)[0]
y_train_proba, train_f1 = get_pred_f1(X_train, y_train)
y_valid_proba, valid_f1 = get_pred_f1(X_valid, y_valid)
report = Report(y_train_proba, y_valid_proba, train_f1, valid_f1)
return report
def cross_validation():
eval_text_train, eval_text_valid = [], []
eval_cat_train, eval_cat_valid = [], []
eval_bert_train, eval_bert_valid = [], []
eval_merge_train, eval_merge_valid = [], []
vecs_by_bert = pd.read_csv('./fact/train_vecs.csv')
for i in tqdm(range(1, 6)):
trainfile = './fact/train_cv_{}.csv'.format(i)
validfile = './fact/valid_cv_{}.csv'.format(i)
df_train = pd.merge(pd.read_csv(trainfile), vecs_by_bert, on='id')
df_valid = pd.merge(pd.read_csv(validfile), vecs_by_bert, on='id')
# text model
report_text = get_text_model(df_train, df_valid)
eval_text_train.append(report_text.get_eval(True))
eval_text_valid.append(report_text.get_eval(False))
# category model
report_cat = get_category_model(df_train, df_valid)
eval_cat_train.append(report_cat.get_eval(True))
eval_cat_valid.append(report_cat.get_eval(False))
# bert model
report_bert = get_bert_model(df_train, df_valid)
eval_bert_train.append(report_bert.get_eval(True))
eval_bert_valid.append(report_bert.get_eval(False))
# merge model
df_train_merge = pd.DataFrame(
{'text': report_text.get_proba(True),
'category': report_cat.get_proba(True),
'bert': report_bert.get_proba(True),
'target': df_train.target})
df_train_merge.to_csv('./fact/train_cv_merge_{}.csv'.format(i), index=None)
df_valid_merge = pd.DataFrame(
{'text': report_text.get_proba(False),
'category': report_cat.get_proba(False),
'bert': report_bert.get_proba(False),
'target': df_valid.target})
df_valid_merge.to_csv('./fact/valid_cv_merge_{}.csv'.format(i), index=None)
report_merge = get_merge_model(df_train_merge, df_valid_merge)
eval_merge_train.append(report_merge.get_eval(True))
eval_merge_valid.append(report_merge.get_eval(False))
pd.DataFrame(
{'id': df_valid.id,
'proba': report_merge.get_proba(False),
'target': df_valid.target}).to_csv('./fact/valid_merge_{}.csv'.format(i), index=None)
def print_f1(evals, modelname):
print('f1 of train for {0}: {1:.3f} +- {2:.3f} in ({3})'.format(
modelname,
np.mean(evals),
np.std(evals),
', '.join(['{:.3f}'.format(eva) for eva in evals])
))
print_f1(eval_text_train, 'text')
print_f1(eval_text_valid, 'text')
print_f1(eval_cat_train, 'category')
print_f1(eval_cat_valid, 'category')
print_f1(eval_bert_train, 'bert')
print_f1(eval_bert_valid, 'bert')
print_f1(eval_merge_train, 'merge')
print_f1(eval_merge_valid, 'merge')
def test():
trainfile = './fact/train.csv'
testfile = './fact/test.csv'
vecs_train_by_bert = pd.read_csv('./fact/train_vecs.csv')
vecs_test_by_bert = pd.read_csv('./fact/test_vecs.csv')
df_train = pd.merge(pd.read_csv(trainfile), vecs_train_by_bert, on='id')
df_test = pd.merge(pd.read_csv(testfile), vecs_test_by_bert, on='id')
def get_report(train, test, get_model, model_name):
report = get_model(train, test)
print('f1 of train for {0}: {1:.3f}'.format(model_name, report.get_eval(True)))
print('f1 of test for {0}: {1:.3f}'.format(model_name, report.get_eval(False)))
df_submit = pd.DataFrame(
{'id': test.id,
'target': (report.get_proba(False) >= 0.5).astype(int)})
df_submit.to_csv('./output/submit_{}.csv'.format(model_name), index=None)
return report
report_text = get_report(df_train, df_test, get_text_model, 'text')
report_cat = get_report(df_train, df_test, get_category_model, 'category')
report_bert = get_report(df_train, df_test, get_bert_model, 'bert')
# merge
df_train_merge = pd.DataFrame(
{
'id': df_train.id,
'text': report_text.get_proba(True),
'category': report_cat.get_proba(True),
'bert': report_bert.get_proba(True),
'target': df_train.target})
df_test_merge = pd.DataFrame(
{
'id': df_test.id,
'text': report_text.get_proba(False),
'category': report_cat.get_proba(False),
'bert': report_bert.get_proba(False),
'target': df_test.target})
report_merge = get_report(df_train_merge, df_test_merge, get_merge_model, 'merge')
if __name__ == '__main__':
cross_validation()
test()
| [
"lightgbm.train",
"lightgbm.Dataset",
"pandas.read_csv",
"numpy.std",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.f1_score",
"numpy.mean",
"catboost.CatBoost",
"numpy.round",
"catboost.Pool"
] | [((1293, 1322), 'lightgbm.Dataset', 'lgb.Dataset', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1304, 1322), True, 'import lightgbm as lgb\n'), ((1339, 1389), 'lightgbm.Dataset', 'lgb.Dataset', (['X_valid', 'y_valid'], {'reference': 'lgb_train'}), '(X_valid, y_valid, reference=lgb_train)\n', (1350, 1389), True, 'import lightgbm as lgb\n'), ((2087, 2207), 'lightgbm.train', 'lgb.train', (['lgbm_params', 'lgb_train'], {'valid_sets': 'lgb_valid', 'verbose_eval': '(False)', 'feval': 'lgb_f1_score', 'num_boost_round': '(300)'}), '(lgbm_params, lgb_train, valid_sets=lgb_valid, verbose_eval=False,\n feval=lgb_f1_score, num_boost_round=300)\n', (2096, 2207), True, 'import lightgbm as lgb\n'), ((2900, 2928), 'catboost.Pool', 'Pool', (['X_train'], {'label': 'y_train'}), '(X_train, label=y_train)\n', (2904, 2928), False, 'from catboost import Pool\n'), ((2946, 2974), 'catboost.Pool', 'Pool', (['X_valid'], {'label': 'y_valid'}), '(X_valid, label=y_valid)\n', (2950, 2974), False, 'from catboost import Pool\n'), ((3450, 3466), 'catboost.CatBoost', 'CatBoost', (['params'], {}), '(params)\n', (3458, 3466), False, 'from catboost import CatBoost\n'), ((4348, 4377), 'lightgbm.Dataset', 'lgb.Dataset', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (4359, 4377), True, 'import lightgbm as lgb\n'), ((4394, 4444), 'lightgbm.Dataset', 'lgb.Dataset', (['X_valid', 'y_valid'], {'reference': 'lgb_train'}), '(X_valid, y_valid, reference=lgb_train)\n', (4405, 4444), True, 'import lightgbm as lgb\n'), ((5150, 5270), 'lightgbm.train', 'lgb.train', (['lgbm_params', 'lgb_train'], {'valid_sets': 'lgb_valid', 'verbose_eval': '(False)', 'feval': 'lgb_f1_score', 'num_boost_round': '(300)'}), '(lgbm_params, lgb_train, valid_sets=lgb_valid, verbose_eval=False,\n feval=lgb_f1_score, num_boost_round=300)\n', (5159, 5270), True, 'import lightgbm as lgb\n'), ((6248, 6276), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '(**params)\n', (6266, 6276), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6951, 6987), 'pandas.read_csv', 'pd.read_csv', (['"""./fact/train_vecs.csv"""'], {}), "('./fact/train_vecs.csv')\n", (6962, 6987), True, 'import pandas as pd\n'), ((9729, 9765), 'pandas.read_csv', 'pd.read_csv', (['"""./fact/train_vecs.csv"""'], {}), "('./fact/train_vecs.csv')\n", (9740, 9765), True, 'import pandas as pd\n'), ((9790, 9825), 'pandas.read_csv', 'pd.read_csv', (['"""./fact/test_vecs.csv"""'], {}), "('./fact/test_vecs.csv')\n", (9801, 9825), True, 'import pandas as pd\n'), ((1480, 1495), 'numpy.round', 'np.round', (['y_hat'], {}), '(y_hat)\n', (1488, 1495), True, 'import numpy as np\n'), ((4531, 4546), 'numpy.round', 'np.round', (['y_hat'], {}), '(y_hat)\n', (4539, 4546), True, 'import numpy as np\n'), ((9850, 9872), 'pandas.read_csv', 'pd.read_csv', (['trainfile'], {}), '(trainfile)\n', (9861, 9872), True, 'import pandas as pd\n'), ((9926, 9947), 'pandas.read_csv', 'pd.read_csv', (['testfile'], {}), '(testfile)\n', (9937, 9947), True, 'import pandas as pd\n'), ((1557, 1580), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_hat'], {}), '(y_true, y_hat)\n', (1565, 1580), False, 'from sklearn.metrics import confusion_matrix, classification_report, f1_score\n'), ((7164, 7186), 'pandas.read_csv', 'pd.read_csv', (['trainfile'], {}), '(trainfile)\n', (7175, 7186), True, 'import pandas as pd\n'), ((7239, 7261), 'pandas.read_csv', 'pd.read_csv', (['validfile'], {}), '(validfile)\n', (7250, 7261), True, 'import pandas as pd\n'), ((2421, 2458), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred_cls'], {'average': 'None'}), '(y, y_pred_cls, average=None)\n', (2429, 2458), False, 'from sklearn.metrics import confusion_matrix, classification_report, f1_score\n'), ((3729, 3762), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred'], {'average': 'None'}), '(y, y_pred, average=None)\n', (3737, 3762), False, 'from sklearn.metrics import confusion_matrix, classification_report, f1_score\n'), ((4608, 4645), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_hat'], {'average': 'None'}), '(y_true, y_hat, average=None)\n', (4616, 4645), False, 'from sklearn.metrics import confusion_matrix, classification_report, f1_score\n'), ((5484, 5521), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred_cls'], {'average': 'None'}), '(y, y_pred_cls, average=None)\n', (5492, 5521), False, 'from sklearn.metrics import confusion_matrix, classification_report, f1_score\n'), ((6458, 6491), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred'], {'average': 'None'}), '(y, y_pred, average=None)\n', (6466, 6491), False, 'from sklearn.metrics import confusion_matrix, classification_report, f1_score\n'), ((9172, 9186), 'numpy.mean', 'np.mean', (['evals'], {}), '(evals)\n', (9179, 9186), True, 'import numpy as np\n'), ((9201, 9214), 'numpy.std', 'np.std', (['evals'], {}), '(evals)\n', (9207, 9214), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
data = pd.read_csv('/home/atrides/Desktop/R/statistics_with_Python/04_Exploring_Data_with_Graphs/Data_Files/Exam Anxiety.dat', sep='\s+')
data.set_index(['Code'],drop=True,inplace=True)
print(data.head())
fig, ax = plt.subplots(figsize=(10,10))
ax.set_xlabel('Anxiety')
ax.set_ylabel('Exam')
ax.set_title('Exam Performance vs Anxiety')
ax.scatter(data['Anxiety'], data['Exam'], marker = 'o')
plt.show()
corr_coeff = data['Exam'].corr(data['Anxiety'])
print(corr_coeff)
x = np.array(data['Anxiety'])
y = np.array(data['Exam'])
coef = np.polyfit(x,y, 1)
poly1d_fn = np.poly1d(coef)
_=plt.plot(x,y, 'yo',x, poly1d_fn(x), '--k')
plt.show()
# fitting line of higher orders
_ = sns.regplot(x="Anxiety", y='Exam', data=data,order=3)
plt.show()
_ = sns.lmplot(x='Anxiety', y='Exam', data=data, hue='Gender')
plt.show()
| [
"numpy.poly1d",
"seaborn.lmplot",
"matplotlib.pyplot.show",
"numpy.polyfit",
"pandas.read_csv",
"seaborn.regplot",
"numpy.array",
"matplotlib.pyplot.subplots"
] | [((128, 269), 'pandas.read_csv', 'pd.read_csv', (['"""/home/atrides/Desktop/R/statistics_with_Python/04_Exploring_Data_with_Graphs/Data_Files/Exam Anxiety.dat"""'], {'sep': '"""\\\\s+"""'}), "(\n '/home/atrides/Desktop/R/statistics_with_Python/04_Exploring_Data_with_Graphs/Data_Files/Exam Anxiety.dat'\n , sep='\\\\s+')\n", (139, 269), True, 'import pandas as pd\n'), ((338, 368), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (350, 368), True, 'import matplotlib.pyplot as plt\n'), ((515, 525), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (523, 525), True, 'import matplotlib.pyplot as plt\n'), ((598, 623), 'numpy.array', 'np.array', (["data['Anxiety']"], {}), "(data['Anxiety'])\n", (606, 623), True, 'import numpy as np\n'), ((628, 650), 'numpy.array', 'np.array', (["data['Exam']"], {}), "(data['Exam'])\n", (636, 650), True, 'import numpy as np\n'), ((658, 677), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (668, 677), True, 'import numpy as np\n'), ((689, 704), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (698, 704), True, 'import numpy as np\n'), ((750, 760), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (758, 760), True, 'import matplotlib.pyplot as plt\n'), ((798, 852), 'seaborn.regplot', 'sns.regplot', ([], {'x': '"""Anxiety"""', 'y': '"""Exam"""', 'data': 'data', 'order': '(3)'}), "(x='Anxiety', y='Exam', data=data, order=3)\n", (809, 852), True, 'import seaborn as sns\n'), ((852, 862), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (860, 862), True, 'import matplotlib.pyplot as plt\n'), ((869, 927), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': '"""Anxiety"""', 'y': '"""Exam"""', 'data': 'data', 'hue': '"""Gender"""'}), "(x='Anxiety', y='Exam', data=data, hue='Gender')\n", (879, 927), True, 'import seaborn as sns\n'), ((928, 938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (936, 938), True, 'import matplotlib.pyplot as plt\n')] |
import luigi
import mlflow
import numpy as np
import random
import yaml
# from src.models import get_model_task_by_name
from src.utils.params_to_filename import encode_task_to_filename
from src.visualization.log_metrics import LogMetrics
_inf = np.finfo(np.float64).max
class SearchRandom(luigi.Task):
model_name = luigi.Parameter(
default='logistic-regression',
description='model name (e.g. logistic-regression)'
)
metric = luigi.Parameter(
default='accuracy',
description='metric to optimize on'
)
max_runs = luigi.IntParameter(
default=10,
description='maximum number of runs to evaluate'
)
random_seed = luigi.IntParameter(
default=12345,
description='seed for the random generator'
)
def output(self):
# TODO: Do I really need to store best solution?
filename = encode_task_to_filename(self)
return luigi.LocalTarget(
f'reports/scores/best_random_search__{filename}.yaml'
)
def run(self):
print('Search Random # 1')
random.seed(self.random_seed)
np.random.seed(self.random_seed)
params_space = [{
# remove saga (because it is way too slow, x10 of so)
# aslo newton-cg, because it is x100 slower
# 'solver': random.choice(['newton-cg', 'sag', 'saga', 'lbfgs']),
'solver': random.choice(['sag', 'lbfgs']),
'C': np.random.uniform(0, 1.0)
} for _ in range(self.max_runs)]
print('Search Random # 2')
with mlflow.start_run() as run:
experiment_id = run.info.experiment_id
print('Search Random # 3')
# run all random tasks in parallel
# TODO: pass train, test, and validation sets?
tasks = yield [LogMetrics(
model_name=self.model_name,
model_params={
**params,
'random_seed': self.random_seed,
},
experiment_id=experiment_id,
) for params in params_space]
print('Search Random # 4')
# find the best params (based on validation metric)
best_run = None
best_val_train = -_inf
best_val_valid = -_inf
best_val_test = -_inf
for model_output in tasks:
# TODO: get the score and compare with the best
with model_output['score'].open('r') as f:
res = yaml.load(f)
# TODO: we don't have yet validation set (should add)
# new_val_valid = res['valid'][self.metric]
new_val_valid = res['test'][self.metric]
# TODO: in case of accuracy it is "<"
# in case of loss it should be ">"
print('best_val_valid < new_val_valid', best_val_valid, new_val_valid)
if best_val_valid < new_val_valid:
print('find better', new_val_valid)
best_run = res['run_id']
best_val_train = res['train'][self.metric]
best_val_valid = new_val_valid
best_val_test = res['test'][self.metric]
metrics = {
f'train_{self.metric}': float(best_val_train),
# f'val_{self.metric}': best_val_valid,
f'test_{self.metric}': float(best_val_test),
}
mlflow.set_tag('best_run', best_run)
mlflow.log_metrics(metrics)
with self.output().open('w') as f:
yaml.dump({
'metrics': metrics,
'best_run_id': best_run,
}, f, default_flow_style=False)
if __name__ == '__main__':
luigi.run()
| [
"mlflow.start_run",
"numpy.random.uniform",
"yaml.load",
"numpy.random.seed",
"src.utils.params_to_filename.encode_task_to_filename",
"mlflow.set_tag",
"luigi.run",
"yaml.dump",
"src.visualization.log_metrics.LogMetrics",
"random.choice",
"numpy.finfo",
"random.seed",
"luigi.LocalTarget",
... | [((247, 267), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (255, 267), True, 'import numpy as np\n'), ((323, 427), 'luigi.Parameter', 'luigi.Parameter', ([], {'default': '"""logistic-regression"""', 'description': '"""model name (e.g. logistic-regression)"""'}), "(default='logistic-regression', description=\n 'model name (e.g. logistic-regression)')\n", (338, 427), False, 'import luigi\n'), ((458, 530), 'luigi.Parameter', 'luigi.Parameter', ([], {'default': '"""accuracy"""', 'description': '"""metric to optimize on"""'}), "(default='accuracy', description='metric to optimize on')\n", (473, 530), False, 'import luigi\n'), ((568, 653), 'luigi.IntParameter', 'luigi.IntParameter', ([], {'default': '(10)', 'description': '"""maximum number of runs to evaluate"""'}), "(default=10, description='maximum number of runs to evaluate'\n )\n", (586, 653), False, 'import luigi\n'), ((689, 767), 'luigi.IntParameter', 'luigi.IntParameter', ([], {'default': '(12345)', 'description': '"""seed for the random generator"""'}), "(default=12345, description='seed for the random generator')\n", (707, 767), False, 'import luigi\n'), ((3848, 3859), 'luigi.run', 'luigi.run', ([], {}), '()\n', (3857, 3859), False, 'import luigi\n'), ((889, 918), 'src.utils.params_to_filename.encode_task_to_filename', 'encode_task_to_filename', (['self'], {}), '(self)\n', (912, 918), False, 'from src.utils.params_to_filename import encode_task_to_filename\n'), ((934, 1006), 'luigi.LocalTarget', 'luigi.LocalTarget', (['f"""reports/scores/best_random_search__{filename}.yaml"""'], {}), "(f'reports/scores/best_random_search__{filename}.yaml')\n", (951, 1006), False, 'import luigi\n'), ((1092, 1121), 'random.seed', 'random.seed', (['self.random_seed'], {}), '(self.random_seed)\n', (1103, 1121), False, 'import random\n'), ((1130, 1162), 'numpy.random.seed', 'np.random.seed', (['self.random_seed'], {}), '(self.random_seed)\n', (1144, 1162), True, 'import numpy as np\n'), ((1577, 1595), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (1593, 1595), False, 'import mlflow\n'), ((3529, 3565), 'mlflow.set_tag', 'mlflow.set_tag', (['"""best_run"""', 'best_run'], {}), "('best_run', best_run)\n", (3543, 3565), False, 'import mlflow\n'), ((3578, 3605), 'mlflow.log_metrics', 'mlflow.log_metrics', (['metrics'], {}), '(metrics)\n', (3596, 3605), False, 'import mlflow\n'), ((1411, 1442), 'random.choice', 'random.choice', (["['sag', 'lbfgs']"], {}), "(['sag', 'lbfgs'])\n", (1424, 1442), False, 'import random\n'), ((1461, 1486), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (1478, 1486), True, 'import numpy as np\n'), ((3670, 3759), 'yaml.dump', 'yaml.dump', (["{'metrics': metrics, 'best_run_id': best_run}", 'f'], {'default_flow_style': '(False)'}), "({'metrics': metrics, 'best_run_id': best_run}, f,\n default_flow_style=False)\n", (3679, 3759), False, 'import yaml\n'), ((1828, 1957), 'src.visualization.log_metrics.LogMetrics', 'LogMetrics', ([], {'model_name': 'self.model_name', 'model_params': "{**params, 'random_seed': self.random_seed}", 'experiment_id': 'experiment_id'}), "(model_name=self.model_name, model_params={**params,\n 'random_seed': self.random_seed}, experiment_id=experiment_id)\n", (1838, 1957), False, 'from src.visualization.log_metrics import LogMetrics\n'), ((2530, 2542), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (2539, 2542), False, 'import yaml\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Computing surface energy from OER slabs and bulk formation energy
# ### Import Modules
# +
import os
print(os.getcwd())
import sys
import time; ti = time.time()
import pickle
import numpy as np
import pandas as pd
pd.set_option("display.max_columns", None)
import plotly.express as px
# #########################################################
from ase_modules.ase_methods import create_species_element_dict
# #########################################################
from proj_data import metal_atom_symbol
from proj_data import stoich_color_dict
# #########################################################
from methods import (
get_df_dft,
get_df_jobs_data,
get_df_jobs,
get_df_features_targets,
get_df_slab,
)
# #########################################################
# Data from PROJ_irox
sys.path.insert(0, os.path.join(
os.environ["PROJ_irox"], "data"))
from proj_data_irox import h2_ref, h2o_ref
# -
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
else:
from tqdm import tqdm
verbose = False
# ### Read Data
# +
df_features_targets = get_df_features_targets()
df_jobs = get_df_jobs()
df_dft = get_df_dft()
df_jobs_data = get_df_jobs_data()
# +
# # TEMP
# print(222 * "TEMP | ")
# df_features_targets = df_features_targets.sample(n=30)
# -
# ### Preparing oxygen reference energy
G_O = -1 * ((-1.23 * 2) - h2o_ref + h2_ref)
# ### Main loop
# +
# #########################################################
data_dict_list = []
# #########################################################
for name_i, row_i in df_features_targets.iterrows():
# print(name_i)
# #####################################################
data_dict_i = dict()
# #####################################################
name_dict_i = dict(zip(
df_features_targets.index.names,
name_i))
# #####################################################
job_id_o_i = row_i[("data", "job_id_o", "")]
stoich_i = row_i[("data", "stoich", "")]
# #####################################################
# #####################################################
row_data_i = df_jobs_data.loc[job_id_o_i]
# #####################################################
elec_energy_i = row_data_i.pot_e
atoms_init_i = row_data_i.init_atoms
# #####################################################
# #####################################################
row_jobs_i = df_jobs.loc[job_id_o_i]
# #####################################################
bulk_id_i = row_jobs_i.bulk_id
# #####################################################
# #####################################################
row_dft_i = df_dft.loc[bulk_id_i]
# #####################################################
bulk_energy_pa_i = row_dft_i.energy_pa
# #####################################################
# Calculate surface area of slab
cell = atoms_init_i.cell
cross_prod_i = np.cross(cell[0], cell[1])
area_i = np.linalg.norm(cross_prod_i)
elem_dict_i = create_species_element_dict(
atoms_init_i,
include_all_elems=False,
elems_to_always_include=None,
)
stoich_B_i = int(stoich_i[2:])
num_atoms_in_form_unit = stoich_B_i + 1
num_metal_atoms = elem_dict_i[metal_atom_symbol]
N_stoich_units = num_metal_atoms
num_stoich_O = num_metal_atoms * stoich_B_i
num_nonstoich_O = elem_dict_i["O"] - num_stoich_O
assert num_nonstoich_O >= 0, "Must have non-negative number of non-stoich Os"
surf_energy_i_0 = elec_energy_i - \
(N_stoich_units * num_atoms_in_form_unit * bulk_energy_pa_i) - \
(num_nonstoich_O * G_O)
norm_mode = "area"
units = "J/m^2"
if norm_mode == "area":
norm_term = 2 * area_i
surf_energy_i_1 = surf_energy_i_0 / norm_term
else:
print("NOT GOOD")
if norm_mode == "area":
if units == "eV/A^2":
pass
elif units == "J/m^2":
# Convert eV/A^2 to J/m^2
# (1E10 A/m) ^ 2 * (1.6022E-19 J/eV) = 16.022
ev_A2__to__J_m2 = 16.022
surf_energy_i_2 = surf_energy_i_1 * ev_A2__to__J_m2
surf_energy__area_J_m2 = surf_energy_i_2
# print(
# "SE: ",
# # str(np.round(surf_energy_i_2, 3)).zfill(5),
# np.round(surf_energy_i_2, 3),
# " J/m2",
# sep="")
# #####################################################
data_dict_i.update(name_dict_i)
# #####################################################
data_dict_i["SE__area_J_m2"] = surf_energy__area_J_m2
data_dict_i["num_nonstoich_O"] = num_nonstoich_O
data_dict_i["N_stoich_units"] = N_stoich_units
data_dict_i["stoich"] = stoich_i
# data_dict_i[""] =
# #####################################################
data_dict_list.append(data_dict_i)
# #####################################################
# #########################################################
df_SE = pd.DataFrame(data_dict_list)
df_SE = df_SE.set_index(["compenv", "slab_id", "active_site"])
# #########################################################
# -
df_SE
# ### Plot surface energy data histogram
# +
fig = px.histogram(df_SE,
x="SE__area_J_m2",
color="stoich",
barmode="overlay",
barnorm="percent",
color_discrete_map=stoich_color_dict,
# 'fraction'` or `'percent'`
)
# 'group'
# 'overlay'
# 'relative'
fig.show()
# -
# ### Writting data to file
# Pickling data ###########################################
directory = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/surface_energy/out_data")
file_name_i = "df_SE.pickle"
path_i = os.path.join(directory, file_name_i)
if not os.path.exists(directory): os.makedirs(directory)
with open(path_i, "wb") as fle:
pickle.dump(df_SE, fle)
# #########################################################
# +
from methods import get_df_SE
df_SE_tmp = get_df_SE()
df_SE_tmp
# -
# #########################################################
print(20 * "# # ")
print("All done!")
print("Run time:", np.round((time.time() - ti) / 60, 3), "min")
print("surface_energy.ipynb")
print(20 * "# # ")
# #########################################################
# + active=""
#
#
#
# + jupyter={"source_hidden": true}
# dir(atoms_init_i)
# + jupyter={"source_hidden": true}
# row_i.data
# + jupyter={"source_hidden": true}
# elem_dict_i
# + jupyter={"source_hidden": true}
# data_dict_i
# + jupyter={"source_hidden": true}
# str(np.round(surf_energy_i_2, 3)).zfill(5)
# + jupyter={"source_hidden": true}
# norm_mode
# units
# + jupyter={"source_hidden": true}
# name_dict_i = dict(zip(
# df_features_targets.index.names,
# name_i))
# + jupyter={"source_hidden": true}
# norm_mode = "area"
# # units = "eV/A^2" # 'eV/A^2' or 'J/m^2'
# units = "J/m^2" # 'eV/A^2' or 'J/m^2'
# + jupyter={"source_hidden": true}
# surf_energy_i_0 = elec_energy_i - \
# (N_stoich_units * num_atoms_in_form_unit * bulk_energy_pa_i) - \
# (num_nonstoich_O * G_O)
# N_stoich_units
# num_nonstoich_O
# + jupyter={"source_hidden": true}
# df_features_targets.head()
# + jupyter={"source_hidden": true}
# def get_df_SE():
# """
# The data object is created by the following notebook:
# $PROJ_irox_oer/workflow/surface_energy/surface_energy.ipynb
# """
# #| - get_df_jobs
# # #####################################################
# # Reading df_jobs dataframe from pickle
# import pickle; import os
# path_i = os.path.join(
# os.environ["PROJ_irox_oer"],
# "workflow/surface_energy",
# "out_data/df_SE.pickle")
# with open(path_i, "rb") as fle:
# df_SE = pickle.load(fle)
# return(df_SE)
# #__|
# + jupyter={"source_hidden": true}
# num_nonstoich_O
# +
# # px.histogram?
| [
"pandas.DataFrame",
"pickle.dump",
"methods.get_df_dft",
"methods.get_df_features_targets",
"os.path.join",
"os.makedirs",
"os.getcwd",
"methods.get_df_jobs_data",
"numpy.cross",
"ase_modules.ase_methods.create_species_element_dict",
"os.path.exists",
"time.time",
"numpy.linalg.norm",
"met... | [((489, 500), 'time.time', 'time.time', ([], {}), '()\n', (498, 500), False, 'import time\n'), ((556, 598), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (569, 598), True, 'import pandas as pd\n'), ((1336, 1348), 'methods.isnotebook', 'isnotebook', ([], {}), '()\n', (1346, 1348), False, 'from methods import isnotebook\n'), ((1516, 1541), 'methods.get_df_features_targets', 'get_df_features_targets', ([], {}), '()\n', (1539, 1541), False, 'from methods import get_df_dft, get_df_jobs_data, get_df_jobs, get_df_features_targets, get_df_slab\n'), ((1553, 1566), 'methods.get_df_jobs', 'get_df_jobs', ([], {}), '()\n', (1564, 1566), False, 'from methods import get_df_dft, get_df_jobs_data, get_df_jobs, get_df_features_targets, get_df_slab\n'), ((1577, 1589), 'methods.get_df_dft', 'get_df_dft', ([], {}), '()\n', (1587, 1589), False, 'from methods import get_df_dft, get_df_jobs_data, get_df_jobs, get_df_features_targets, get_df_slab\n'), ((1606, 1624), 'methods.get_df_jobs_data', 'get_df_jobs_data', ([], {}), '()\n', (1622, 1624), False, 'from methods import get_df_dft, get_df_jobs_data, get_df_jobs, get_df_features_targets, get_df_slab\n'), ((5453, 5481), 'pandas.DataFrame', 'pd.DataFrame', (['data_dict_list'], {}), '(data_dict_list)\n', (5465, 5481), True, 'import pandas as pd\n'), ((5669, 5803), 'plotly.express.histogram', 'px.histogram', (['df_SE'], {'x': '"""SE__area_J_m2"""', 'color': '"""stoich"""', 'barmode': '"""overlay"""', 'barnorm': '"""percent"""', 'color_discrete_map': 'stoich_color_dict'}), "(df_SE, x='SE__area_J_m2', color='stoich', barmode='overlay',\n barnorm='percent', color_discrete_map=stoich_color_dict)\n", (5681, 5803), True, 'import plotly.express as px\n'), ((6013, 6090), 'os.path.join', 'os.path.join', (["os.environ['PROJ_irox_oer']", '"""workflow/surface_energy/out_data"""'], {}), "(os.environ['PROJ_irox_oer'], 'workflow/surface_energy/out_data')\n", (6025, 6090), False, 'import os\n'), ((6138, 6174), 'os.path.join', 'os.path.join', (['directory', 'file_name_i'], {}), '(directory, file_name_i)\n', (6150, 6174), False, 'import os\n'), ((6400, 6411), 'methods.get_df_SE', 'get_df_SE', ([], {}), '()\n', (6409, 6411), False, 'from methods import get_df_SE\n'), ((447, 458), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (456, 458), False, 'import os\n'), ((1186, 1231), 'os.path.join', 'os.path.join', (["os.environ['PROJ_irox']", '"""data"""'], {}), "(os.environ['PROJ_irox'], 'data')\n", (1198, 1231), False, 'import os\n'), ((3409, 3435), 'numpy.cross', 'np.cross', (['cell[0]', 'cell[1]'], {}), '(cell[0], cell[1])\n', (3417, 3435), True, 'import numpy as np\n'), ((3449, 3477), 'numpy.linalg.norm', 'np.linalg.norm', (['cross_prod_i'], {}), '(cross_prod_i)\n', (3463, 3477), True, 'import numpy as np\n'), ((3497, 3597), 'ase_modules.ase_methods.create_species_element_dict', 'create_species_element_dict', (['atoms_init_i'], {'include_all_elems': '(False)', 'elems_to_always_include': 'None'}), '(atoms_init_i, include_all_elems=False,\n elems_to_always_include=None)\n', (3524, 3597), False, 'from ase_modules.ase_methods import create_species_element_dict\n'), ((6182, 6207), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (6196, 6207), False, 'import os\n'), ((6209, 6231), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (6220, 6231), False, 'import os\n'), ((6268, 6291), 'pickle.dump', 'pickle.dump', (['df_SE', 'fle'], {}), '(df_SE, fle)\n', (6279, 6291), False, 'import pickle\n'), ((6554, 6565), 'time.time', 'time.time', ([], {}), '()\n', (6563, 6565), False, 'import time\n')] |
import numpy as np
import random
ROW=10
COLUMN=10
def myboard():
board=np.zeros((ROW,COLUMN))
return board
r=[0,1,2,3,4,5,6,7,8,9]
c=[0,1,2,3,4,5,6,7,8,9]
def location(board,gotti,dicevalue):
board[r][c]=gotti
if dicevalue <11:
return board[0][dicevalue-1] == gotti
elif dicevalue < 21:
return board[1][dicevalue-11] == gotti
elif dicevalue < 31:
return board[2][dicevalue - 21] == gotti
elif dicevalue < 41:
return board[3][dicevalue - 31] == gotti
elif dicevalue < 51:
return board[4][dicevalue - 41] == gotti
elif dicevalue < 61:
return board[5][dicevalue - 51] == gotti
elif dicevalue < 71:
return board[6][dicevalue - 61] == gotti
elif dicevalue < 81:
return board[7][dicevalue - 71] == gotti
elif dicevalue < 91:
return board[8][dicevalue - 81] == gotti
elif dicevalue <=100:
return board[9][dicevalue - 91] == gotti
def location2(board, gotti, dicevalue2):
board[r][c]=gotti
if dicevalue2 < 11:
return board[0][dicevalue2-1] == gotti
elif dicevalue < 21:
return board[1][dicevalue2 - 11] == gotti
elif dicevalue < 31:
return board[2][dicevalue2 - 21] == gotti
elif dicevalue < 41:
return board[3][dicevalue2 - 31] == gotti
elif dicevalue < 51:
return board[4][dicevalue2 - 41] == gotti
elif dicevalue < 61:
return board[5][dicevalue2 - 51] == gotti
elif dicevalue < 71:
return board[6][dicevalue2 - 61] == gotti
elif dicevalue < 81:
return board[7][dicevalue2 - 71] == gotti
elif dicevalue < 91:
return board[8][dicevalue2 - 81] == gotti
elif dicevalue <= 100:
return board[9][dicevalue2 - 91] == gotti
def flip(board):
print(np.flip(board, 0))
def end(dicevalue,dicevalue2):
if dicevalue == 100:
print("player 1 won")
gameover=True
elif dicevalue2 == 100:
print("player 2 won")
gameover=True
gameover=False
board=myboard()
print(board)
turn=0
dicevalue=1
dicevalue2=1
while not gameover:
if turn ==0:
print(input("player1 chance"))
col=int(random.randint(1,6))
print(col)
dicevalue += col
print(dicevalue)
location(board,1,dicevalue)
turn+=1
elif turn ==1:
print(input("player 2 chance"))
col=int(random.randint(1,6))
print (col)
dicevalue2 += col
print(dicevalue2)
location2(board, 2, dicevalue2)
turn-=1
flip(board) | [
"numpy.zeros",
"random.randint",
"numpy.flip"
] | [((78, 101), 'numpy.zeros', 'np.zeros', (['(ROW, COLUMN)'], {}), '((ROW, COLUMN))\n', (86, 101), True, 'import numpy as np\n'), ((1918, 1935), 'numpy.flip', 'np.flip', (['board', '(0)'], {}), '(board, 0)\n', (1925, 1935), True, 'import numpy as np\n'), ((2286, 2306), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (2300, 2306), False, 'import random\n'), ((2483, 2503), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (2497, 2503), False, 'import random\n')] |
"""
Added file for pre-processing the testing dataset for testing
"""
import os
import shutil
import json
import numpy as np
import sys
class PoseParser:
def __init__(self, camera_json, gt_json, images_path, diameter, output_path, obj_dict=None):
self.camera_file_path = camera_json
with open(os.path.abspath(camera_json), 'r') as cfile:
cam = json.load(cfile)
self.cam_dict = cam
self.gt_file_path = gt_json
with open(os.path.abspath(gt_json), 'r') as gtfile:
gt = json.load(gtfile)
self.gt_dict = gt
self.class_id, self.instance_id = self.get_instance(obj_dict)
print(f"Getting object with class id {self.class_id} and instance id {self.instance_id}")
if type(diameter) == list:
if len(diameter) == 3:
self.diameter = self.calculate_diameter(diameter)
else:
self.diameter = diameter
self.output_path = output_path
self.images_path = images_path
@staticmethod
def calculate_diameter(bbox_sizes):
return np.sqrt(bbox_sizes[0] ** 2 + bbox_sizes[1] ** 2 + bbox_sizes[2] ** 2)
def get_instance(self, obj_dict):
json_file = obj_dict["classes_object"]
object_type = obj_dict["object_type"]
object_name = obj_dict["object_name"]
class_id = -1
instance_id = -1
with open(os.path.abspath(json_file), 'r') as gtfile:
ids = json.load(gtfile)
for key in ids.keys():
if ids[key]["class_name"] == object_type:
class_id = int(key)
break
if class_id == -1:
print(f"Object class {object_type} not found")
for key, value in ids[str(class_id)]["objs"].items():
if value == object_name:
instance_id = int(key)
break
if instance_id == -1:
print(f"Object name {object_name} not found")
return class_id, instance_id
def create_txt_files(self):
cam_K = self.cam_dict["rs"]
cam_out_file = self.output_path + "camera.txt"
if os.path.exists(cam_out_file):
os.remove(cam_out_file)
K = [cam_K["fx"], 0, cam_K["cx"],
0, cam_K["fy"], cam_K["cy"],
0, 0, 1]
cam_str = ""
for i, k in enumerate(K):
if i in [2, 5]:
cam_str += str(k).zfill(16) + " \n"
else:
cam_str += str(k).zfill(16) + " "
with open(os.path.abspath(cam_out_file), 'w') as cam_file:
cam_file.write(cam_str)
diam_out_file = self.output_path + "diameter.txt"
if os.path.exists(diam_out_file):
os.remove(diam_out_file)
with open(os.path.abspath(diam_out_file), 'w') as diam_file:
diam_file.write(str(self.diameter))
def create_npy_files(self, example_file="pose1.npy"):
out_path = self.output_path + "pose/"
if os.path.exists(out_path):
shutil.rmtree(out_path)
os.mkdir(out_path)
ex_file = None
# print(ex_file)
# print(ex_file.shape)
for i in range(len(self.gt_dict.keys())):
gt_params = [gt_dict for gt_dict in self.gt_dict[str(i)] if
(gt_dict["class_id"] == self.class_id and gt_dict["inst_id"] == self.instance_id)]
assert len(gt_params) == 1, f"Error, only one object with obj_id==1 should be found, however gt_params={gt_params} "
gt_params = gt_params[0]
cam_R = np.array(gt_params["cam_R_m2c"]).reshape((3, 3))
# cam_T = np.array(gt_params["cam_t_m2c"]) / 1000.0
cam_T = np.array(gt_params["cam_t_m2c"])
rot_mat = np.zeros(shape=(3, 4))
rot_mat[:3, :3] = cam_R
rot_mat[:3, 3] = cam_T.flatten()
np.save(out_path + f"pose{i}.npy", rot_mat)
ex_file = rot_mat
print("All poses successfully created")
def create_test_images(self):
rgb_path = self.output_path + "rgb/"
if os.path.exists(rgb_path):
shutil.rmtree(rgb_path)
os.mkdir(rgb_path)
all_folders = sorted(os.listdir(self.images_path))
if "lava" in all_folders:
all_folders.remove("lava")
for fold in all_folders:
shutil.copy2(self.images_path + fold + "/stokes_s0.jpg", rgb_path + str(fold) + ".jpg")
def assert_all_folders_okay(self):
# this function asserts that all the poses and values are stored correctly
list_files = os.listdir(self.output_path)
assert "model.ply" in list_files, "Error, no model.ply found in the dataset"
assert "camera.txt" in list_files, "Error, no camera file found in the dataset"
assert "diameter.txt" in list_files, "Error, no diameter file found in the dataset"
assert "rgb" in list_files, "Error, no RGB folder found in the dataset"
assert "mask" in list_files, "Error, no mask folder found in the dataset"
assert "pose" in list_files, "Error, no pose folder found in the dataset"
len_rgb_pics = len(os.listdir(self.output_path + "rgb/"))
len_mask_pics = len(os.listdir(self.output_path + "mask/"))
len_poses = len(os.listdir(self.output_path + "pose/"))
assert len_rgb_pics == len_mask_pics and len_rgb_pics == len_poses, "Error, the amount of images does not " \
"match the masks or poses "
def run_all(self):
# self.create_test_images()
self.create_txt_files()
self.create_npy_files()
print("All processes finished and tested okay")
if __name__ == "__main__":
files_path = "/home/arturo/datasets/testset_glass/"
camera_json = "/home/arturo/datasets/test_dataset_arturo/scene_camera.json"
ground_truth_json = "/home/arturo/datasets/sequence_12/scene_gt.json"
images_path = "/home/arturo/renders/glass/mitsuba_glass/output/"
object_get = {"classes_object": "/home/arturo/datasets/test_dataset_arturo/class_obj_taxonomy.json",
"object_name": "glass_beer_mug",
"object_type": "glass"
}
diameter = 0.163514
new_diameter_glass = [0.131568, 0.086612, 0.16365] # 3D sizes of the bbox are also supported
simple_parser = PoseParser(camera_json=camera_json, gt_json=ground_truth_json, images_path=images_path,
diameter=new_diameter_glass, output_path=files_path, obj_dict=object_get)
try:
arg = sys.argv[1]
except:
arg = None
if arg == "txt":
print("Creating txt files")
simple_parser.create_txt_files()
elif arg == "npy":
print("Creating npy poses")
simple_parser.create_npy_files()
else:
print("No argument provided, creating both txt and npy poses")
simple_parser.run_all()
| [
"os.mkdir",
"os.remove",
"json.load",
"numpy.save",
"os.path.abspath",
"os.path.exists",
"numpy.zeros",
"numpy.array",
"shutil.rmtree",
"os.listdir",
"numpy.sqrt"
] | [((1089, 1158), 'numpy.sqrt', 'np.sqrt', (['(bbox_sizes[0] ** 2 + bbox_sizes[1] ** 2 + bbox_sizes[2] ** 2)'], {}), '(bbox_sizes[0] ** 2 + bbox_sizes[1] ** 2 + bbox_sizes[2] ** 2)\n', (1096, 1158), True, 'import numpy as np\n'), ((2133, 2161), 'os.path.exists', 'os.path.exists', (['cam_out_file'], {}), '(cam_out_file)\n', (2147, 2161), False, 'import os\n'), ((2682, 2711), 'os.path.exists', 'os.path.exists', (['diam_out_file'], {}), '(diam_out_file)\n', (2696, 2711), False, 'import os\n'), ((2983, 3007), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (2997, 3007), False, 'import os\n'), ((3053, 3071), 'os.mkdir', 'os.mkdir', (['out_path'], {}), '(out_path)\n', (3061, 3071), False, 'import os\n'), ((4086, 4110), 'os.path.exists', 'os.path.exists', (['rgb_path'], {}), '(rgb_path)\n', (4100, 4110), False, 'import os\n'), ((4156, 4174), 'os.mkdir', 'os.mkdir', (['rgb_path'], {}), '(rgb_path)\n', (4164, 4174), False, 'import os\n'), ((4584, 4612), 'os.listdir', 'os.listdir', (['self.output_path'], {}), '(self.output_path)\n', (4594, 4612), False, 'import os\n'), ((378, 394), 'json.load', 'json.load', (['cfile'], {}), '(cfile)\n', (387, 394), False, 'import json\n'), ((537, 554), 'json.load', 'json.load', (['gtfile'], {}), '(gtfile)\n', (546, 554), False, 'import json\n'), ((1465, 1482), 'json.load', 'json.load', (['gtfile'], {}), '(gtfile)\n', (1474, 1482), False, 'import json\n'), ((2175, 2198), 'os.remove', 'os.remove', (['cam_out_file'], {}), '(cam_out_file)\n', (2184, 2198), False, 'import os\n'), ((2725, 2749), 'os.remove', 'os.remove', (['diam_out_file'], {}), '(diam_out_file)\n', (2734, 2749), False, 'import os\n'), ((3021, 3044), 'shutil.rmtree', 'shutil.rmtree', (['out_path'], {}), '(out_path)\n', (3034, 3044), False, 'import shutil\n'), ((3701, 3733), 'numpy.array', 'np.array', (["gt_params['cam_t_m2c']"], {}), "(gt_params['cam_t_m2c'])\n", (3709, 3733), True, 'import numpy as np\n'), ((3756, 3778), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3, 4)'}), '(shape=(3, 4))\n', (3764, 3778), True, 'import numpy as np\n'), ((3872, 3915), 'numpy.save', 'np.save', (["(out_path + f'pose{i}.npy')", 'rot_mat'], {}), "(out_path + f'pose{i}.npy', rot_mat)\n", (3879, 3915), True, 'import numpy as np\n'), ((4124, 4147), 'shutil.rmtree', 'shutil.rmtree', (['rgb_path'], {}), '(rgb_path)\n', (4137, 4147), False, 'import shutil\n'), ((4204, 4232), 'os.listdir', 'os.listdir', (['self.images_path'], {}), '(self.images_path)\n', (4214, 4232), False, 'import os\n'), ((5150, 5187), 'os.listdir', 'os.listdir', (["(self.output_path + 'rgb/')"], {}), "(self.output_path + 'rgb/')\n", (5160, 5187), False, 'import os\n'), ((5217, 5255), 'os.listdir', 'os.listdir', (["(self.output_path + 'mask/')"], {}), "(self.output_path + 'mask/')\n", (5227, 5255), False, 'import os\n'), ((5281, 5319), 'os.listdir', 'os.listdir', (["(self.output_path + 'pose/')"], {}), "(self.output_path + 'pose/')\n", (5291, 5319), False, 'import os\n'), ((315, 343), 'os.path.abspath', 'os.path.abspath', (['camera_json'], {}), '(camera_json)\n', (330, 343), False, 'import os\n'), ((478, 502), 'os.path.abspath', 'os.path.abspath', (['gt_json'], {}), '(gt_json)\n', (493, 502), False, 'import os\n'), ((1403, 1429), 'os.path.abspath', 'os.path.abspath', (['json_file'], {}), '(json_file)\n', (1418, 1429), False, 'import os\n'), ((2527, 2556), 'os.path.abspath', 'os.path.abspath', (['cam_out_file'], {}), '(cam_out_file)\n', (2542, 2556), False, 'import os\n'), ((2768, 2798), 'os.path.abspath', 'os.path.abspath', (['diam_out_file'], {}), '(diam_out_file)\n', (2783, 2798), False, 'import os\n'), ((3568, 3600), 'numpy.array', 'np.array', (["gt_params['cam_R_m2c']"], {}), "(gt_params['cam_R_m2c'])\n", (3576, 3600), True, 'import numpy as np\n')] |
'''I/O operations'''
import numpy as np
import nibabel as nib
def load_bvec(fpath):
'''Loads bvec into numpy array
Args:
fpath (str): path to bvec file
Returns:
bvec (np.ndarray): bvec array, shape -> (3, b)
'''
bvec = np.genfromtxt(fpath, dtype=np.float32)
if bvec.shape[1] == 3:
bvec = bvec.T
return bvec
def load_bval(fpath):
'''Loads bval into numpy array
Args:
fpath (str): path to bvec file
Returns:
bval (np.ndarray): bval array, shape -> (b,)
'''
return np.genfromtxt(fpath, dtype=np.float32)
def load_nifti(nifti_fpath, dtype=np.float32, force_ras=False):
'''Loads NIfTI image into memory
Args:
nifti_fpath (str): Filepath to nifti image
dtype (type): Datatype to load array with.
Default: `np.float32`
force_ras (bool): Forces data into RAS data ordering scheme.
Default: `False`.
Returns:
data (np.ndarray): image data
affine (np.ndarray): affine transformation -> shape (4, 4)
'''
img = nib.load(nifti_fpath)
if force_ras:
if nib.aff2axcodes(img.affine) != ('R', 'A', 'S'):
print(f'Converting {img.get_filename()} to RAS co-ords')
img = nib.as_closest_canonical(img)
data = np.asarray(img.dataobj, dtype=dtype)
return data, img.affine
def save_nifti(data, affine, fpath, descrip=None):
'''Saves NIfTI image to disk.
Args:
data (np.ndarray): Data array
affine (np.ndarray): affine transformation -> shape (4, 4)
fpath (str): Filepath to save to.
descrip (str): Additional info to add to header description
Default: `None`.
'''
img = nib.Nifti2Image(data, affine)
if descrip is not None:
img.header['descrip'] = descrip
nib.save(img, fpath)
def save_bvec(bvec, fpath):
'''Saves bvec to file in shape (3, b)
'bval': (np.ndarray) -> shape (b,)
Args:
bvec (np.ndarray): bvec array, accepts shapes
(3, b) or (b, 3).
fpath (str): filepath to save bvec to.
'''
if bvec.shape[1] == 3:
bvec = bvec.T
np.savetxt(fpath, bvec, fmt='%1.6f')
def save_bval(bval, fpath):
'''Saves bval to file
Args:
bval (np.ndarray): bval array shape -> (b,)
fpath (str): filepath to save bval to
'''
np.savetxt(fpath, bval, newline=' ', fmt='%g')
def autocrop_dmri(dmri, mask):
'''Crops `dmri` and `mask` data
so dimensions that contain only zeros are cropped
Args:
dmri (np.ndarray): shape -> (i, j, k, b)
mask (np.ndarray): shape -> (i, j, k)
Returns:
dmri (np.ndarray): shape -> (i-ix, k-kx, j-jx, b)
mask (np.ndarray): shape -> (i-ix, k-kx, j-jx)
'''
def _get_data_mask(dmri, mask, axis):
new_mask = np.expand_dims(mask, axis=-1)
data_mask = np.concatenate([dmri, new_mask], axis=-1)
data_mask = np.sum(data_mask, axis=axis).astype(bool)
return data_mask
# Axis 0
data_mask = _get_data_mask(dmri, mask, (1, 2, 3))
dmri = dmri[data_mask, ...]
mask = mask[data_mask, ...]
# Axis 1
data_mask = _get_data_mask(dmri, mask, (0, 2, 3))
dmri = dmri[:, data_mask, ...]
mask = mask[:, data_mask, :]
# Axis 2
data_mask = _get_data_mask(dmri, mask, (0, 1, 3))
dmri = dmri[:, :, data_mask, :]
mask = mask[:, :, data_mask]
return dmri, mask
def split_image_to_octants(data):
'''Splits `data` in half in each
spatial dimension, yielding 8 patches at an 8th of
the size.
Args:
data (np.ndarray): shape -> (i, j, k, ...)
Returns:
klist (List[np.ndarray,]): list of split images
'''
i, j, k = data.shape[0], data.shape[1], data.shape[2]
idx = i // 2
ilist = []
ilist.append(data[0:idx, ...])
ilist.append(data[idx:, ...])
jdx = j // 2
jlist = []
for idata in ilist:
jlist.append(idata[:, 0:jdx, ...])
jlist.append(idata[:, jdx:, ...])
kdx = k // 2
klist = []
for jdata in jlist:
klist.append(jdata[:, :, 0:kdx, ...])
klist.append(jdata[:, :, kdx:, ...])
return klist
| [
"nibabel.as_closest_canonical",
"numpy.sum",
"nibabel.load",
"numpy.asarray",
"numpy.savetxt",
"numpy.genfromtxt",
"numpy.expand_dims",
"nibabel.save",
"nibabel.aff2axcodes",
"numpy.concatenate",
"nibabel.Nifti2Image"
] | [((260, 298), 'numpy.genfromtxt', 'np.genfromtxt', (['fpath'], {'dtype': 'np.float32'}), '(fpath, dtype=np.float32)\n', (273, 298), True, 'import numpy as np\n'), ((560, 598), 'numpy.genfromtxt', 'np.genfromtxt', (['fpath'], {'dtype': 'np.float32'}), '(fpath, dtype=np.float32)\n', (573, 598), True, 'import numpy as np\n'), ((1085, 1106), 'nibabel.load', 'nib.load', (['nifti_fpath'], {}), '(nifti_fpath)\n', (1093, 1106), True, 'import nibabel as nib\n'), ((1312, 1348), 'numpy.asarray', 'np.asarray', (['img.dataobj'], {'dtype': 'dtype'}), '(img.dataobj, dtype=dtype)\n', (1322, 1348), True, 'import numpy as np\n'), ((1738, 1767), 'nibabel.Nifti2Image', 'nib.Nifti2Image', (['data', 'affine'], {}), '(data, affine)\n', (1753, 1767), True, 'import nibabel as nib\n'), ((1842, 1862), 'nibabel.save', 'nib.save', (['img', 'fpath'], {}), '(img, fpath)\n', (1850, 1862), True, 'import nibabel as nib\n'), ((2185, 2221), 'numpy.savetxt', 'np.savetxt', (['fpath', 'bvec'], {'fmt': '"""%1.6f"""'}), "(fpath, bvec, fmt='%1.6f')\n", (2195, 2221), True, 'import numpy as np\n'), ((2399, 2445), 'numpy.savetxt', 'np.savetxt', (['fpath', 'bval'], {'newline': '""" """', 'fmt': '"""%g"""'}), "(fpath, bval, newline=' ', fmt='%g')\n", (2409, 2445), True, 'import numpy as np\n'), ((2876, 2905), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (2890, 2905), True, 'import numpy as np\n'), ((2926, 2967), 'numpy.concatenate', 'np.concatenate', (['[dmri, new_mask]'], {'axis': '(-1)'}), '([dmri, new_mask], axis=-1)\n', (2940, 2967), True, 'import numpy as np\n'), ((1136, 1163), 'nibabel.aff2axcodes', 'nib.aff2axcodes', (['img.affine'], {}), '(img.affine)\n', (1151, 1163), True, 'import nibabel as nib\n'), ((1271, 1300), 'nibabel.as_closest_canonical', 'nib.as_closest_canonical', (['img'], {}), '(img)\n', (1295, 1300), True, 'import nibabel as nib\n'), ((2988, 3016), 'numpy.sum', 'np.sum', (['data_mask'], {'axis': 'axis'}), '(data_mask, axis=axis)\n', (2994, 3016), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
interview_meter.py: Analyze camera time for studio interviews.
Author: <NAME> github.com/joaquincabezas
Date: 10/04/2020
Instructions:
Extract the reference frames using ffmpeg
ffmpeg -ss 00:00:XX -t 00:00:00.01 -i YOURMOVIE.MP4 -r 25.0 REFERENCE_NAME.jpg
Replace XX with the exact second where the scene is displaying
(from https://stackoverflow.com/questions/8287759/extracting-frames-from-mp4-flv)
You have to extract every reference image and leave it in the directory
Now run the script with the argument -f VIDEOFILE.mp4
The script generates simple stats and a file VIDEOFILE.csv to further analyze stats
Please see the Notebook in the project to analyze and create plots
TODO:
1. Automatic scene change detection
2. Classifier for different scenes
3. Background removal
4. Face detection
5. Face identification
"""
import argparse
import glob
import os
import numpy as np
import cv2
from skimage.metrics import structural_similarity as ssim
def scenes():
""" Gather the different scenes available to compare each video frame
Args:
None
Return:
scenarios (dict): The images of each scenario prepared to be compared
scenarios_name (dict): The index and name of each scenario
"""
scenarios = {}
scenes_name = {}
index = 0
# loop over the images we already extracted (see instructions)
for image_path in glob.glob("./*.png"):
filename = image_path[image_path.rfind("/") + 1:]
image = cv2.imread(image_path)
# We strip out the "./" and the extension of the file to get just the name
name = os.path.splitext(filename)[0][2:]
# For SSIM we use grayscale images
scenarios[name] = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# We create a dictionary with the indexes to use just numbers in numpy array
scenes_name[index] = name
index += 1
return scenarios, scenes_name
def stats(project_name, matches, scenes_name):
""" Creates the statistics and outputs some simple stats
Args:
project_name (str): Name of the video we are processing
matches (numpy array): Array containing which scene is displayed in each second
scenarios_name (dict): Names of the different scenarios
Return:
None
"""
np.savetxt(project_name + '.csv', matches, delimiter=',', fmt='%1d')
print("Simple stats. Display time:")
for idx, name in scenes_name.items():
percentage = round(100*np.count_nonzero(matches == idx)/len(matches))
print('For ' + name + ': ' + str(percentage) + '%')
def open_video(input_file):
""" Open the video file and returns basic information
Args:
input_file (str): Route to the video file
Return:
cap (VideoCapture) OpenCV2 object
total_frames (int): Number of frames of the video (we will be using less, only one per sec)
frames_per_sec (int): Frame rate
"""
cap = cv2.VideoCapture(input_file)
frames_per_sec = round(cap.get(5)) #frame rate
total_frames = round(cap.get(7))
return cap, total_frames, frames_per_sec
def main():
""" Main function
Prepare the input data and go through a loop for every frame in the video
Compared every key_frame (1 per sec) to the different scenes and prepare an array
Then call stats to save this information for future use
"""
# Get arguments from the command line
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', required=True)
parser.add_argument('-s', '--show')
args = parser.parse_args()
input_file = args.file
show_video = args.show
project_name = input_file[0:-4]
# Open the input file video and return also the total number of frames
cap, total_frames, frames_per_sec = open_video(input_file)
# We prepare an array with every keyframe (1 frame per second)
# We use numpy so you can develop any statistics study easily
total_keyframes = round(total_frames/frames_per_sec)
matches = np.zeros(total_keyframes, dtype=np.int32)
scenarios, scenes_name = scenes()
results = {}
keyframe_count = 0
frame_count = 0
while cap.isOpened():
ret, frame = cap.read()
# We evaluate only one frame per second
is_key_frame = (frame_count % frames_per_sec) == 0
# We analyze each key frame
if ret and is_key_frame:
video_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# We compare ech scenario with the current keyframe
for key, scenario_image in scenarios.items():
score, _diff = ssim(scenario_image, video_image, full=True)
results[key] = score
# We select the most probable match and add to the array in the current keyframe
max_result = max(results, key=results.get)
max_result_idx = list(scenes_name.keys())[list(scenes_name.values()).index(max_result)]
matches[keyframe_count] = max_result_idx
# We can show the video to check the analysis is working properly
if show_video:
cv2.putText(frame, matches[keyframe_count],
(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
cv2.imshow('Frame', frame)
# And finish it at any time by pressing 'q' (typical for CV)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
keyframe_count += 1
if (keyframe_count % round(total_keyframes/20)) == 0:
print_progress(keyframe_count, total_keyframes, 'Progress:', 'Complete', length=70)
# Once we reach the end of the video, we exit the loop
if keyframe_count == total_keyframes:
break
# In case not every keyframe is available, we exit once arriving at the end of the video
if frame_count == total_frames:
break
frame_count += 1
# When everything is done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
stats(project_name, matches, scenes_name)
def print_progress(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = int(length * iteration // total)
progress_bar = fill * filled_length + '-' * (length - filled_length)
print('\r%s |%s| %s%% %s' % (prefix, progress_bar, percent, suffix))
# Print New Line on Complete
if iteration == total:
print()
if __name__ == "__main__":
main()
| [
"cv2.putText",
"argparse.ArgumentParser",
"numpy.count_nonzero",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.savetxt",
"numpy.zeros",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.imread",
"skimage.metrics.structural_similarity",
"os.path.splitext",
"glob.glob",
"cv2.destroyAllWindows"
] | [((1516, 1536), 'glob.glob', 'glob.glob', (['"""./*.png"""'], {}), "('./*.png')\n", (1525, 1536), False, 'import glob\n'), ((2426, 2494), 'numpy.savetxt', 'np.savetxt', (["(project_name + '.csv')", 'matches'], {'delimiter': '""","""', 'fmt': '"""%1d"""'}), "(project_name + '.csv', matches, delimiter=',', fmt='%1d')\n", (2436, 2494), True, 'import numpy as np\n'), ((3079, 3107), 'cv2.VideoCapture', 'cv2.VideoCapture', (['input_file'], {}), '(input_file)\n', (3095, 3107), False, 'import cv2\n'), ((3567, 3592), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3590, 3592), False, 'import argparse\n'), ((4156, 4197), 'numpy.zeros', 'np.zeros', (['total_keyframes'], {'dtype': 'np.int32'}), '(total_keyframes, dtype=np.int32)\n', (4164, 4197), True, 'import numpy as np\n'), ((6216, 6239), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6237, 6239), False, 'import cv2\n'), ((1613, 1635), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1623, 1635), False, 'import cv2\n'), ((1839, 1878), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1851, 1878), False, 'import cv2\n'), ((4560, 4599), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (4572, 4599), False, 'import cv2\n'), ((1735, 1761), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1751, 1761), False, 'import os\n'), ((4754, 4798), 'skimage.metrics.structural_similarity', 'ssim', (['scenario_image', 'video_image'], {'full': '(True)'}), '(scenario_image, video_image, full=True)\n', (4758, 4798), True, 'from skimage.metrics import structural_similarity as ssim\n'), ((5260, 5364), 'cv2.putText', 'cv2.putText', (['frame', 'matches[keyframe_count]', '(10, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 255, 255)'], {}), '(frame, matches[keyframe_count], (10, 20), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))\n', (5271, 5364), False, 'import cv2\n'), ((5404, 5430), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (5414, 5430), False, 'import cv2\n'), ((2610, 2642), 'numpy.count_nonzero', 'np.count_nonzero', (['(matches == idx)'], {}), '(matches == idx)\n', (2626, 2642), True, 'import numpy as np\n'), ((5528, 5543), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (5539, 5543), False, 'import cv2\n')] |
import logging
import os
import sys
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
from bisect import bisect_right
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
import torch
from torch import nn
from .RawDeformationNetSolverV0 import RawDeformationNetSolverV0
from model import define_net
from model.loss.ChamferDistancePytorch.chamfer3D.dist_chamfer_3D import chamfer_3DDist
from util.util_dir import mkdir
from util.util_visual import plot_3d_point_cloud
from metrics.miou_shape import calc_miou
logger = logging.getLogger('base')
class RawDeformationNetSolverV1(RawDeformationNetSolverV0):
# evaluate segmentation
def feed_data(self, data, test=False):
self.src_shape = data['src_shape'].to(self.device)
self.src_path = data['src_path']
self.src_seg = data['src_seg']
self.tgt_shape = data['tgt_shape'].to(self.device)
self.tgt_path = data['tgt_path']
self.tgt_seg = data['tgt_seg']
self.parts = data['label'][0]
def evaluate(self):
return_dict = OrderedDict()
with torch.no_grad():
for k, v in self.cri_dict.items():
if 'fit' in k:
loss_fit = v['cri'](self.tgt_shape, self.deform_shape)
return_dict['loss_' + k] = loss_fit.item()
elif 'sym' in k:
flipped = v['sym_vec'] * self.deform_shape
loss_sym = v['cri'](flipped, self.deform_shape)
return_dict['loss_' + k] = loss_sym.item()
# evaluate iou
miou = calc_miou(self.tgt_shape,
self.deform_shape,
self.src_seg,
self.tgt_seg,
self.parts,
self.nnd)
return_dict['miou'] = miou
return return_dict
def update_learning_rate(self):
for s in self.scheduler_list:
s.step(self.step)
def calc_nnd(self, pc1, pc2):
dist1, dist2, _, _ = self.nnd(
pc1.transpose(2, 1).contiguous(),
pc2.transpose(2, 1).contiguous())
return dist1.mean() + dist2.mean()
def calc_emd(self, pc1, pc2):
emdist = self.emd(
pc1.transpose(2, 1).contiguous(),
pc2.transpose(2, 1).contiguous())
return emdist.mean()
def get_current_log(self):
return self.log_dict
def log_current(self, epoch, tb_logger=None):
logs = self.log_dict
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(
epoch, self.step, self.get_current_learning_rate())
for k, v in logs.items():
message += '{:s}: {:.4e} '.format(k, v)
# tensorboard logger
logger.info(message)
if tb_logger is not None:
for k, v in logs.items():
tb_logger.add_scalar('loss/%s' % k, v,
self.step)
# log loss weights
for k, v in self.cri_dict.items():
tb_logger.add_scalar('weight/weight_%s' % k, v['weight'], self.step)
def get_current_visual(self):
fig = plt.figure(figsize=(9, 3))
num_point = 2048
colors = np.linspace(start=0, stop=2*np.pi, num=2048)
ax_src = fig.add_subplot(1, 3, 1, projection='3d')
pc_src = self.src_shape.cpu().numpy()[0]
plot_3d_point_cloud(pc_src[2], -pc_src[0], pc_src[1],
axis=ax_src, show=False, lim=[((-1, 1))] * 3,
c=colors, cmap='hsv')
ax_src.set_title('source shape')
ax_tgt = fig.add_subplot(1, 3, 2, projection='3d')
pc_tgt = self.tgt_shape.cpu().numpy()[0]
plot_3d_point_cloud(pc_tgt[2], -pc_tgt[0], pc_tgt[1],
axis=ax_tgt, show=False, lim=[((-1, 1))] * 3,
c=colors, cmap='hsv')
ax_tgt.set_title('target shape')
ax_deform = fig.add_subplot(1, 3, 3, projection='3d')
pc_deform = self.deform_shape.cpu().numpy()[0]
plot_3d_point_cloud(pc_deform[2], -pc_deform[0], pc_deform[1],
axis=ax_deform, show=False, lim=[((-1, 1))] * 3,
c=colors, cmap='hsv')
ax_deform.set_title('deform shape')
plt.tight_layout()
return fig
def print_network(self):
# Generator
s, n = self.get_network_description(self.model)
if isinstance(self.model, nn.DataParallel):
net_struc_str = '{} - {}'.format(
self.model.__class__.__name__,
self.model.module.__class__.__name__)
else:
net_struc_str = '{}'.format(self.model.__class__.__name__)
logger.info('Network G structure: {}, with parameters: {:,d}'.format(
net_struc_str, n))
# logger.info(s)
def load(self):
if self.opt['path']['strict_load'] is None:
strict = True
else:
strict = self.opt['path']['strict_load']
load_path = self.opt['path']['pretrain_model']
if load_path is not None:
logger.info('Loading model from [{:s}] ...'.format(load_path))
self.load_network(load_path, self.model, strict)
def save(self, save_label):
self.save_network(self.model, 'model', save_label)
| [
"matplotlib.pyplot.tight_layout",
"metrics.miou_shape.calc_miou",
"warnings.filterwarnings",
"matplotlib.pyplot.figure",
"numpy.linspace",
"collections.OrderedDict",
"torch.no_grad",
"util.util_visual.plot_3d_point_cloud",
"logging.getLogger"
] | [((171, 226), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (194, 226), False, 'import warnings\n'), ((576, 601), 'logging.getLogger', 'logging.getLogger', (['"""base"""'], {}), "('base')\n", (593, 601), False, 'import logging\n'), ((1106, 1119), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1117, 1119), False, 'from collections import OrderedDict\n'), ((3258, 3284), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 3)'}), '(figsize=(9, 3))\n', (3268, 3284), True, 'import matplotlib.pyplot as plt\n'), ((3327, 3373), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(2 * np.pi)', 'num': '(2048)'}), '(start=0, stop=2 * np.pi, num=2048)\n', (3338, 3373), True, 'import numpy as np\n'), ((3497, 3621), 'util.util_visual.plot_3d_point_cloud', 'plot_3d_point_cloud', (['pc_src[2]', '(-pc_src[0])', 'pc_src[1]'], {'axis': 'ax_src', 'show': '(False)', 'lim': '([(-1, 1)] * 3)', 'c': 'colors', 'cmap': '"""hsv"""'}), "(pc_src[2], -pc_src[0], pc_src[1], axis=ax_src, show=\n False, lim=[(-1, 1)] * 3, c=colors, cmap='hsv')\n", (3516, 3621), False, 'from util.util_visual import plot_3d_point_cloud\n'), ((3842, 3966), 'util.util_visual.plot_3d_point_cloud', 'plot_3d_point_cloud', (['pc_tgt[2]', '(-pc_tgt[0])', 'pc_tgt[1]'], {'axis': 'ax_tgt', 'show': '(False)', 'lim': '([(-1, 1)] * 3)', 'c': 'colors', 'cmap': '"""hsv"""'}), "(pc_tgt[2], -pc_tgt[0], pc_tgt[1], axis=ax_tgt, show=\n False, lim=[(-1, 1)] * 3, c=colors, cmap='hsv')\n", (3861, 3966), False, 'from util.util_visual import plot_3d_point_cloud\n'), ((4197, 4333), 'util.util_visual.plot_3d_point_cloud', 'plot_3d_point_cloud', (['pc_deform[2]', '(-pc_deform[0])', 'pc_deform[1]'], {'axis': 'ax_deform', 'show': '(False)', 'lim': '([(-1, 1)] * 3)', 'c': 'colors', 'cmap': '"""hsv"""'}), "(pc_deform[2], -pc_deform[0], pc_deform[1], axis=\n ax_deform, show=False, lim=[(-1, 1)] * 3, c=colors, cmap='hsv')\n", (4216, 4333), False, 'from util.util_visual import plot_3d_point_cloud\n'), ((4450, 4468), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4466, 4468), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1148), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1146, 1148), False, 'import torch\n'), ((1639, 1737), 'metrics.miou_shape.calc_miou', 'calc_miou', (['self.tgt_shape', 'self.deform_shape', 'self.src_seg', 'self.tgt_seg', 'self.parts', 'self.nnd'], {}), '(self.tgt_shape, self.deform_shape, self.src_seg, self.tgt_seg,\n self.parts, self.nnd)\n', (1648, 1737), False, 'from metrics.miou_shape import calc_miou\n')] |
"""Module to read, check and write a HDSR meetpuntconfiguratie."""
__title__ = "histTags2mpt"
__description__ = "to evaluate a HDSR FEWS-config with a csv with CAW histTags"
__version__ = "0.1.0"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__license__ = "MIT License"
from meetpuntconfig.fews_utilities import FewsConfig, xml_to_dict
from pathlib import Path
import json
import numpy as np
import pandas as pd
import logging
from openpyxl import load_workbook
from openpyxl.styles import Font, PatternFill
import os
import sys
import re
from shapely.geometry import Point
pd.options.mode.chained_assignment = None
def idmap2tags(row, idmap):
"""Add FEWS-locationIds to hist_tags in df.apply() method."""
exloc, expar = row["serie"].split("_", 1)
fews_locs = [
col["internalLocation"]
for col in idmap
if col["externalLocation"] == exloc and col["externalParameter"] == expar
]
if len(fews_locs) == 0:
fews_locs = np.NaN
return fews_locs
def get_validation_attribs(validation_rules, int_pars=None, loc_type=None):
"""Get attributes from validationRules."""
if int_pars is None:
int_pars = [rule["parameter"] for rule in validation_rules]
result = []
for rule in validation_rules:
if "type" in rule.keys():
if rule["type"] == loc_type:
if any(re.match(rule["parameter"], int_par) for int_par in int_pars):
for key, attribute in rule["extreme_values"].items():
if isinstance(attribute, list):
result += [value["attribute"] for value in attribute]
else:
result += [attribute]
elif any(re.match(rule["parameter"], int_par) for int_par in int_pars):
for key, attribute in rule["extreme_values"].items():
if isinstance(attribute, list):
result += [value["attribute"] for value in attribute]
else:
result += [attribute]
return result
def update_hlocs(row, h_locs, mpt_df):
"""Add startdate and enddate op hoofdloc dataframe with df.apply() method."""
loc_id = row.name
start_date = row["STARTDATE"]
end_date = row["ENDDATE"]
if loc_id in h_locs:
start_date = (
mpt_df[mpt_df.index.str.contains(loc_id[0:-1])]["STARTDATE"].dropna().min()
)
end_date = (
mpt_df[mpt_df.index.str.contains(loc_id[0:-1])]["ENDDATE"].dropna().max()
)
return start_date, end_date
def update_date(row, mpt_df, date_threshold):
"""Return start and end-date in df.apply() method."""
int_loc = row["LOC_ID"]
if int_loc in mpt_df.index:
start_date = mpt_df.loc[int_loc]["STARTDATE"].strftime("%Y%m%d")
end_date = mpt_df.loc[int_loc]["ENDDATE"]
if end_date > date_threshold:
end_date = pd.Timestamp(year=2100, month=1, day=1)
end_date = end_date.strftime("%Y%m%d")
else:
start_date = row["START"]
end_date = row["EIND"]
return start_date, end_date
def update_histtag(row, grouper):
"""Assign last histTag to waterstandsloc in df.apply method."""
return next(
(
df.sort_values("total_max_end_dt", ascending=False)["serie"].values[0]
for loc_id, df in grouper
if loc_id == row["LOC_ID"]
),
None,
)
def _sort_validation_attribs(rule):
result = {}
for key, value in rule.items():
if isinstance(value, str):
result[key] = [value]
elif isinstance(value, list):
periods = [val["period"] for val in value]
attribs = [val["attribute"] for val in value]
result[key] = [attrib for _, attrib in sorted(zip(periods, attribs))]
return result
class MeetpuntConfig:
"""Meetpuntconfig class."""
def __init__(self, config_path, log_level="INFO"):
self.paths = dict()
self.fews_config = None
self.location_sets = dict()
self.hist_tags = None
self.hist_tags_ignore = None
self.fixed_sheets = None
self.idmap_files = None
self.idmap_sections = None
self.external_parameters_allowed = None
self.consistency = None
self.parameter_mapping = None
self.validation_rules = None
self.logging = logging
self.hoofdloc = None
self.subloc = None
self.waterstandloc = None
self.mswloc = None
self.mpt_hist_tags = None
self._locs_mapping = dict(
hoofdlocaties="hoofdloc",
sublocaties="subloc",
waterstandlocaties="waterstandloc",
mswlocaties="mswloc",
)
self.logging.basicConfig(level=os.environ.get("LOGLEVEL", log_level))
self._read_config(Path(config_path))
def _read_config(self, config_json):
if config_json.exists():
with open(config_json) as src:
config = json.load(src)
workdir = Path(config_json).parent
else:
self.logging.error(f"{config_json} does not exist")
sys.exit()
# add paths to config
for key, path in config["paden"].items():
path = Path(path)
if not path.is_absolute():
path = workdir.joinpath(path).resolve()
if path.exists():
self.paths[key] = path
else:
if path.suffix == "":
logging.warning(f"{path} does not exist. Folder will be created")
path.mkdir()
else:
self.logging.error(
(
f"{path} does not exist. "
f"Please define existing file "
f"in {config_json}."
)
)
sys.exit()
# add fews_config
self.fews_config = FewsConfig(self.paths["fews_config"])
# add location_sets
for key, value in config["location_sets"].items():
if value in self.fews_config.locationSets.keys():
if "csvFile" in self.fews_config.locationSets[value].keys():
self.location_sets[key] = {
"id": value,
"gdf": self.fews_config.get_locations(value),
}
else:
self.logging.error((f"{key} not a csvFile location-set"))
else:
self.logging.error(
(
f"locationSet {key} specified in {config_json} "
f"not in fews-config"
)
)
# add rest of config
self.idmap_files = config["idmap_files"]
self.idmap_sections = config["idmap_sections"]
self.external_parameters_allowed = config["external_parameters_allowed"]
self.parameter_mapping = config["parameter_mapping"]
self.validation_rules = config["validation_rules"]
self.fixed_sheets = config["fixed_sheets"]
# read consistency df from input-excel
self.consistency = pd.read_excel(
self.paths["consistency_xlsx"], sheet_name=None, engine="openpyxl"
)
self.consistency = {
key: value
for key, value in self.consistency.items()
if key in self.fixed_sheets
}
def _read_hist_tags(self, force=False):
if (not self.hist_tags) or force:
if "hist_tags_csv" in self.paths.keys():
self.logging.info(f"reading histags: {self.paths['hist_tags_csv']}")
dtype_cols = ["total_min_start_dt", "total_max_end_dt"]
self.hist_tags = pd.read_csv(
self.paths["hist_tags_csv"],
parse_dates=dtype_cols,
sep=None,
engine="python",
)
for col in dtype_cols:
if not pd.api.types.is_datetime64_dtype(self.hist_tags[col]):
self.logging.error(
(
f"col '{col}' in '{self.paths['hist_tags_csv']} "
"can't be converted to np.datetime64 format. "
"Check if values are dates."
)
)
sys.exit()
def _read_hist_tags_ignore(self, force=False):
if (not self.hist_tags_ignore) or force:
if "mpt_ignore_csv" in self.paths.keys():
self.logging.info(
f"Reading hist tags to be ingored from "
f"{self.paths['mpt_ignore_csv']}"
)
self.hist_tags_ignore = pd.read_csv(
self.paths["mpt_ignore_csv"], sep=None, header=0, engine="python"
)
elif "histTag_ignore" in self.consistency.keys():
self.hist_tags_ignore = self.consistency["histTag_ignore"]
self.logging.info(
f"Reading hist tags to be ignored from "
f"{self.paths['consistency_xlsx']}"
)
else:
self.logging.error(
(
f"specify a histTag_ignore worksheet in "
f"{self.paths['consistency_xlsx']} or a csv-file "
"in the config-json"
)
)
sys.exit()
self.hist_tags_ignore["UNKNOWN_SERIE"] = self.hist_tags_ignore[
"UNKNOWN_SERIE"
].str.replace("#", "")
def _get_idmaps(self, idmap_files=None):
if not idmap_files:
idmap_files = self.idmap_files
idmaps = [
xml_to_dict(self.fews_config.IdMapFiles[idmap])["idMap"]["map"]
for idmap in idmap_files
]
return [item for sublist in idmaps for item in sublist]
def _read_locs(self):
self.hoofdloc = self.fews_config.get_locations("OPVLWATER_HOOFDLOC")
self.subloc = self.fews_config.get_locations("OPVLWATER_SUBLOC")
self.waterstandloc = self.fews_config.get_locations(
"OPVLWATER_WATERSTANDEN_AUTO"
)
self.mswloc = self.fews_config.get_locations("MSW_STATIONS")
def _update_staff_gauge(self, row):
"""Assign upstream and downstream staff gauges to subloc."""
result = {"HBOV": "", "HBEN": ""}
for key in result.keys():
df = self.waterstandloc.loc[self.waterstandloc["LOC_ID"] == row[key]]
if not df.empty:
result[key] = df["PEILSCHAAL"].values[0]
return result["HBOV"], result["HBEN"]
def hist_tags_to_mpt(self, sheet_name="mpt"):
"""Convert histTag-ids to mpt-ids."""
if self.hist_tags is None:
self._read_hist_tags()
idmaps = self._get_idmaps()
hist_tags_df = self.hist_tags.copy()
hist_tags_df["fews_locid"] = hist_tags_df.apply(
idmap2tags, args=[idmaps], axis=1
)
hist_tags_df = hist_tags_df[hist_tags_df["fews_locid"].notna()]
mpt_hist_tags_df = hist_tags_df.explode("fews_locid").reset_index(drop=True)
self.mpt_hist_tags = mpt_hist_tags_df
mpt_df = pd.concat(
[
mpt_hist_tags_df.groupby(["fews_locid"], sort=False)[
"total_min_start_dt"
].min(),
mpt_hist_tags_df.groupby(["fews_locid"], sort=False)[
"total_max_end_dt"
].max(),
],
axis=1,
)
mpt_df = mpt_df.sort_index(axis=0)
mpt_df.columns = ["STARTDATE", "ENDDATE"]
mpt_df.index.name = "LOC_ID"
kw_locs = list(mpt_df[mpt_df.index.str.contains("KW", regex=False)].index)
h_locs = np.unique(["{}0".format(loc[0:-1]) for loc in kw_locs])
h_locs_missing = [loc for loc in h_locs if loc not in list(mpt_df.index)]
h_locs_df = pd.DataFrame(
data={
"LOC_ID": h_locs_missing,
"STARTDATE": [pd.NaT] * len(h_locs_missing),
"ENDDATE": [pd.NaT] * len(h_locs_missing),
}
)
h_locs_df = h_locs_df.set_index("LOC_ID")
mpt_df = pd.concat([mpt_df, h_locs_df], axis=0)
mpt_df[["STARTDATE", "ENDDATE"]] = mpt_df.apply(
update_hlocs, args=[h_locs, mpt_df], axis=1, result_type="expand"
)
mpt_df = mpt_df.sort_index()
self.consistency["mpt"] = mpt_df
def check_idmap_sections(self, sheet_name="idmap section error"):
"""Check if all KW/OW locations are in the correct section."""
self.consistency[sheet_name] = pd.DataFrame(
columns=[
"bestand",
"externalLocation",
"externalParameter",
"internalLocation",
"internalParameter",
]
)
for idmap, subsecs in self.idmap_sections.items():
for section_type, sections in subsecs.items():
for section in sections:
if section_type == "KUNSTWERKEN":
prefix = "KW"
if section_type == "WATERSTANDLOCATIES":
prefix = "OW"
if section_type == "MSWLOCATIES":
prefix = "(OW|KW)"
pattern = fr"{prefix}\d{{6}}$"
idmapping = xml_to_dict(
self.fews_config.IdMapFiles[idmap], **section
)["idMap"]["map"]
idmap_wrong_section = [
idmap
for idmap in idmapping
if not bool(re.match(pattern, idmap["internalLocation"]))
]
if idmap_wrong_section:
section_start = (
section["section_start"]
if "section_start" in section.keys()
else ""
)
section_end = (
section["section_end"]
if "section_end" in section.keys()
else ""
)
self.logging.warning(
(
f"{len(idmap_wrong_section)} "
f"internalLocations not {prefix}XXXXXX "
f"between {section_start} and {section_end} "
f"in {idmap}."
)
)
df = pd.DataFrame(idmap_wrong_section)
df["sectie"] = section_start
df["bestand"] = idmap
self.consistency[sheet_name] = pd.concat(
[self.consistency[sheet_name], df], axis=0
)
def check_missing_hist_tags(self, sheet_name="histTags noMatch"):
"""Check if hisTags are missing in config."""
if self.hist_tags_ignore is None:
self._read_hist_tags_ignore()
if self.hist_tags is None:
self._read_hist_tags()
hist_tags_df = self.hist_tags.copy()
idmaps = self._get_idmaps()
hist_tags_df["fews_locid"] = self.hist_tags.apply(
idmap2tags, args=[idmaps], axis=1
)
hist_tags_no_match_df = hist_tags_df[hist_tags_df["fews_locid"].isna()]
hist_tags_no_match_df = hist_tags_no_match_df[
~hist_tags_no_match_df["serie"].isin(self.hist_tags_ignore["UNKNOWN_SERIE"])
]
hist_tags_no_match_df = hist_tags_no_match_df.drop("fews_locid", axis=1)
hist_tags_no_match_df.columns = ["UNKNOWN_SERIE", "STARTDATE", "ENDDATE"]
hist_tags_no_match_df = hist_tags_no_match_df.set_index("UNKNOWN_SERIE")
self.consistency[sheet_name] = hist_tags_no_match_df
if not self.consistency[sheet_name].empty:
self.logging.warning(
"{} histTags not in idMaps".format(len(self.consistency[sheet_name]))
)
else:
self.logging.info("all histTags in idMaps")
def check_ignored_hist_tags(
self, sheet_name="histTags ignore match", idmap_files=["IdOPVLWATER"]
):
"""Check if ignored histTags do match with idmap."""
if self.hist_tags_ignore is None:
self._read_hist_tags_ignore()
if self.hist_tags is None:
self._read_hist_tags()
hist_tags_opvlwater_df = self.hist_tags.copy()
idmaps = self._get_idmaps(idmap_files=idmap_files)
hist_tags_opvlwater_df["fews_locid"] = self.hist_tags.apply(
idmap2tags, args=[idmaps], axis=1
)
hist_tags_opvlwater_df = hist_tags_opvlwater_df[
hist_tags_opvlwater_df["fews_locid"].notna()
]
hist_tag_ignore_match_df = self.hist_tags_ignore[
self.hist_tags_ignore["UNKNOWN_SERIE"].isin(hist_tags_opvlwater_df["serie"])
]
hist_tag_ignore_match_df = hist_tag_ignore_match_df.set_index("UNKNOWN_SERIE")
self.consistency[sheet_name] = hist_tag_ignore_match_df
if not self.consistency[sheet_name].empty:
self.logging.warning(
(
f"{len(self.consistency[sheet_name])} "
r"histTags should not be in histTags ignore."
)
)
else:
self.logging.info("hisTags ignore list consistent with idmaps")
def check_double_idmaps(self, sheet_name="idmaps double"):
"""Check if identical idmaps are doubled."""
self.consistency[sheet_name] = pd.DataFrame(
columns=[
"bestand",
"externalLocation",
"externalParameter",
"internalLocation",
"internalParameter",
]
)
for idmap_file in self.idmap_files:
idmaps = self._get_idmaps(idmap_files=[idmap_file])
idmap_doubles = [idmap for idmap in idmaps if idmaps.count(idmap) > 1]
if len(idmap_doubles) > 0:
idmap_doubles = list(
{
idmap["externalLocation"]: idmap for idmap in idmap_doubles
}.values()
)
df = pd.DataFrame(
idmap_doubles,
columns=[
"internalLocation",
"externalLocation",
"internalParameter",
"externalParameter",
],
)
df["bestand"] = idmap_file
self.consistency[sheet_name] = pd.concat(
[self.consistency[sheet_name], df], axis=0
)
self.logging.warning(
"{} double idmap(s) in {}".format(len(idmap_doubles), idmap_file)
)
else:
self.logging.info("No double idmaps in {}".format(idmap_file))
def check_missing_pars(self, sheet_name="pars missing"):
"""Check if internal parameters in idmaps are missing in paramters.xml."""
config_parameters = list(
self.fews_config.get_parameters(dict_keys="parameters").keys()
)
idmaps = self._get_idmaps()
id_map_parameters = [id_map["internalParameter"] for id_map in idmaps]
params_missing = [
parameter
for parameter in id_map_parameters
if parameter not in config_parameters
]
if len(params_missing) == 0:
self.logging.info("all internal paramters are in config")
else:
self.logging.warning(
"{} parameter(s) in idMaps are missing in config".format(
len(params_missing)
)
)
self.consistency[sheet_name] = pd.DataFrame({"parameters": params_missing})
# self.consistency[sheet_name] = self.consistency[sheet_name].set_index(
# "parameters"
# )
def check_hloc_consistency(self, sheet_name="hloc error"):
"""Check if all sublocs of same hloc have consistent parameters."""
if "xy_ignore" in self.consistency.keys():
xy_ignore_df = self.consistency["xy_ignore"]
else:
xy_ignore_df = pd.DataFrame({"internalLocation": [], "x": [], "y": []})
if self.hoofdloc is None:
self._read_locs()
hloc_errors = {
"LOC_ID": [],
"SUB_LOCS": [],
"LOC_NAME": [],
"GEOMETRY": [],
"SYSTEEM": [],
"RAYON": [],
"KOMPAS": [],
}
grouper = self.subloc.groupby("PAR_ID")
par_dict = {
"LOC_ID": [],
"LOC_NAME": [],
"X": [],
"Y": [],
"ALLE_TYPES": [],
"START": [],
"EIND": [],
"SYSTEEM": [],
"RAYON": [],
"KOMPAS": [],
}
for loc_id, gdf in grouper:
caw_code = loc_id[2:-2]
errors = dict.fromkeys(
["LOC_NAME", "GEOMETRY", "SYSTEEM", "RAYON", "KOMPAS"], False
)
fields = dict.fromkeys(par_dict.keys(), None)
fields["LOC_ID"] = loc_id
loc_names = np.unique(
gdf["LOC_NAME"]
.str.extract(pat=f"([A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*)")
.values
)
if not len(loc_names) == 1:
errors["LOC_NAME"] = ",".join(loc_names)
else:
fields["LOC_NAME"] = loc_names[0]
if any([re.match(loc, loc_id) for loc in xy_ignore_df["internalLocation"]]):
fields["X"], fields["Y"] = next(
[row["x"], row["y"]]
for index, row in xy_ignore_df.iterrows()
if re.match(row["internalLocation"], loc_id)
)
else:
geoms = gdf["geometry"].unique()
if not len(geoms) == 1:
errors["GEOMETRY"] = ",".join(
[f"({geom.x} {geom.y})" for geom in geoms]
)
else:
fields["X"] = geoms[0].x
fields["Y"] = geoms[0].y
all_types = list(gdf["TYPE"].unique())
all_types.sort()
fields["ALLE_TYPES"] = "/".join(all_types)
fields["START"] = gdf["START"].min()
fields["EIND"] = gdf["EIND"].max()
for attribuut in ["SYSTEEM", "RAYON", "KOMPAS"]:
vals = gdf[attribuut].unique()
if not len(vals) == 1:
errors[attribuut] = ",".join(vals)
else:
fields[attribuut] = vals[0]
if None not in fields.values():
for key, value in fields.items():
par_dict[key].append(value)
if any(errors.values()):
hloc_errors["LOC_ID"].append(loc_id)
hloc_errors["SUB_LOCS"].append(",".join(gdf["LOC_ID"].values))
for key, value in errors.items():
if value is False:
value = ""
hloc_errors[key].append(value)
self.consistency[sheet_name] = pd.DataFrame(hloc_errors)
if self.consistency[sheet_name].empty:
self.logging.info("no consistency errors. Hlocs rewritten from sublocs")
par_gdf = pd.DataFrame(par_dict)
columns = list(self.hoofdloc.columns)
drop_cols = [
col
for col in self.hoofdloc.columns
if (col in par_gdf.columns) & (not col == "LOC_ID")
]
drop_cols = drop_cols + ["geometry"]
self.hoofdloc = self.hoofdloc.drop(drop_cols, axis=1)
self.hoofdloc = par_gdf.merge(self.hoofdloc, on="LOC_ID")
self.hoofdloc["geometry"] = self.hoofdloc.apply(
(lambda x: Point(float(x["X"]), float(x["Y"]))), axis=1
)
self.hoofdloc = self.hoofdloc[columns]
else:
self.logging.warning(
"{} Errors in consistency hlocs".format(
len(self.consistency[sheet_name])
)
)
self.logging.warning(
(
"Hoofdlocaties will only be re-written "
"when consistency errors are resolved"
)
)
def check_expar_errors_intloc_missing(
self, expar_sheet="exPar error", intloc_sheet="intLoc missing"
):
"""Check on wrong external parameters and missing internal locations."""
expars_allowed = self.external_parameters_allowed
if self.hoofdloc is None:
self._read_locs()
ex_par_errors = {
"internalLocation": [],
"locationType": [],
"exParError": [],
"types": [],
"FQ": [],
"I.X": [],
"IX.": [],
"SS./SM.": [],
}
int_loc_missing = []
idmap_df = pd.DataFrame.from_dict(self._get_idmaps(["IdOPVLWATER"]))
for int_loc, loc_group in idmap_df.groupby("internalLocation"):
errors = dict.fromkeys(["I.X", "IX.", "FQ", "SS./SM."], False)
ex_pars = np.unique(loc_group["externalParameter"].values)
ex_pars_gen = [re.sub(r"\d", ".", ex_par) for ex_par in ex_pars]
if int_loc in self.hoofdloc["LOC_ID"].values:
loc_properties = self.hoofdloc[self.hoofdloc["LOC_ID"] == int_loc]
loc_type = "hoofdloc"
elif int_loc in self.subloc["LOC_ID"].values:
loc_properties = self.subloc[self.subloc["LOC_ID"] == int_loc]
loc_type = "subloc"
regexes = ["HR.$"]
elif int_loc in self.waterstandloc["LOC_ID"].values:
loc_type = "waterstandloc"
elif int_loc in self.mswloc["LOC_ID"].values:
loc_type = "mswloc"
else:
loc_type = None
int_loc_missing += [int_loc]
if loc_type in ["hoofdloc", "subloc"]:
all_types = loc_properties["ALLE_TYPES"].values[0].split("/")
all_types = [item.lower() for item in all_types]
elif loc_type == "waterstandloc":
all_types = ["waterstandloc"]
if loc_type == "subloc":
sub_type = self.subloc[self.subloc["LOC_ID"] == int_loc]["TYPE"].values[
0
]
regexes += [
j
for i in [
values
for keys, values in expars_allowed.items()
if keys in all_types
]
for j in i
]
regexes += list(dict.fromkeys(regexes))
ex_par_error = [
ex_par
for ex_par in ex_pars
if not any(
[
regex.match(ex_par)
for regex in [re.compile(rex) for rex in regexes]
]
)
]
if sub_type == "schuif":
if not any(
[ex_par for ex_par in ex_pars_gen if ex_par in ["SS.", "SM."]]
):
errors["SS./SM."] = True
if any(
[
ex_par
for ex_par in ex_pars_gen
if ex_par in ["I.B", "I.H", "I.L"]
]
):
if not any(
[
ex_par
for ex_par in ex_pars_gen
if ex_par in ["IB.", "IH.", "IL."]
]
):
errors["IX."] = True
elif any(
[
ex_par
for ex_par in ex_pars_gen
if ex_par in ["IB.", "IH.", "IL."]
]
):
errors["I.X"] = True
if "FQ." in ex_pars_gen:
if not any(
[
ex_par
for ex_par in ex_pars_gen
if ex_par in ["IB.", "IH.", "IL.", "I.B", "I.H", "I.L"]
]
):
errors["FQ"] = True
elif loc_type == "hoofdloc":
regexes = ["HS.$", "QR.$", "QS.$", "WR", "WS"]
ex_par_error = [
ex_par
for ex_par in ex_pars
if not any(
[
regex.match(ex_par)
for regex in [re.compile(rex) for rex in regexes]
]
)
]
else:
ex_par_error = []
if len(ex_par_error) > 0 | any(errors.values()):
ex_par_errors["internalLocation"].append(int_loc)
ex_par_errors["locationType"].append(loc_type)
ex_par_errors["exParError"].append(",".join(ex_par_error))
ex_par_errors["types"].append(",".join(all_types))
for key, value in errors.items():
ex_par_errors[key].append(value)
self.consistency[expar_sheet] = pd.DataFrame(ex_par_errors)
self.consistency[intloc_sheet] = pd.DataFrame(
{"internalLocation": int_loc_missing}
)
if len(self.consistency[expar_sheet]) == 0:
self.logging.info("geen ExPar errors")
else:
self.logging.warning(
"{} locaties met ExPar errors".format(
len(self.consistency[expar_sheet])
)
)
if len(self.consistency[intloc_sheet]) == 0:
self.logging.info("All internal locations are in locationSets")
else:
self.logging.warning(
"{} Internal locations are not in locationSets.".format(
len(self.consistency[intloc_sheet])
)
)
def check_expar_missing(self, sheet_name="exPar missing"):
"""Check if external paramters are missing on locations."""
ex_par_missing = {
"internalLocation": [],
"exPars": [],
"QR": [],
"QS": [],
"HS": [],
}
if self.hoofdloc is None:
self._read_locs()
idmap_df = pd.DataFrame.from_dict(self._get_idmaps(["IdOPVLWATER"]))
for index, row in self.hoofdloc.iterrows():
missings = dict.fromkeys(["QR", "QS", "HS"], False)
int_loc = row["LOC_ID"]
loc_group = next(
(
df
for loc, df in idmap_df.groupby("internalLocation")
if loc == int_loc
),
pd.DataFrame(),
)
if not loc_group.empty:
ex_pars = np.unique(loc_group["externalParameter"].values)
ex_pars_gen = [re.sub(r"\d", ".", ex_par) for ex_par in ex_pars]
else:
ex_pars = []
ex_pars_gen = []
if not ("HS." in ex_pars_gen):
missings["HS"] = True
if not ("QR." in ex_pars_gen):
missings["QR"] = True
if not ("QS." in ex_pars_gen):
missings["QS"] = True
if any(missings.values()):
ex_par_missing["internalLocation"].append(int_loc)
ex_par_missing["exPars"].append(",".join(ex_pars))
for key, value in missings.items():
ex_par_missing[key].append(value)
self.consistency[sheet_name] = pd.DataFrame(ex_par_missing)
if len(self.consistency[sheet_name]) == 0:
self.logging.info("No ExPar missing")
else:
self.logging.warning(
"{} Locations with ExPar missing".format(
len(self.consistency[sheet_name])
)
)
def check_exloc_intloc_consistency(self, sheet_name="exLoc error"):
"""Check if external locations are consistent with internal locations."""
ex_loc_errors = {"internalLocation": [], "externalLocation": []}
idmap_df = pd.DataFrame.from_dict(self._get_idmaps(["IdOPVLWATER"]))
for loc_group in idmap_df.groupby("externalLocation"):
int_loc_error = []
ex_loc = loc_group[0]
int_locs = np.unique(loc_group[1]["internalLocation"].values)
if len(ex_loc) == 3:
if not bool(re.match("8..$", ex_loc)):
int_loc_error = [
int_loc
for int_loc in int_locs
if not bool(re.match(f"...{ex_loc}..$", int_loc))
]
else:
for loc_type in ["KW", "OW"]:
int_locs_select = [
int_loc
for int_loc in int_locs
if bool(re.match(f"{loc_type}.", int_loc))
]
if (
len(
np.unique([int_loc[:-1] for int_loc in int_locs_select])
)
> 1
):
int_loc_error += list(int_locs_select)
if len(ex_loc) == 4:
if not bool(re.match(".8..$", ex_loc)):
int_loc_error += [
int_loc
for int_loc in int_locs
if not bool(re.match(f"..{ex_loc}..$", int_loc))
]
else:
for loc_type in ["KW", "OW"]:
int_locs_select = [
int_loc
for int_loc in int_locs
if bool(re.match(f"{loc_type}.", int_loc))
]
if (
len(
np.unique([int_loc[:-1] for int_loc in int_locs_select])
)
> 1
):
int_loc_error += list(int_locs_select)
if "exLoc_ignore" in self.consistency.keys():
if (
int(ex_loc)
in self.consistency["exLoc_ignore"]["externalLocation"].values
):
int_loc_error = [
int_loc
for int_loc in int_loc_error
if int_loc
not in self.consistency["exLoc_ignore"][
self.consistency["exLoc_ignore"]["externalLocation"]
== int(ex_loc)
]["internalLocation"].values
]
for int_loc in int_loc_error:
ex_loc_errors["internalLocation"].append(int_loc)
ex_loc_errors["externalLocation"].append(ex_loc)
self.consistency[sheet_name] = pd.DataFrame(ex_loc_errors)
if len(self.consistency[sheet_name]) == 0:
self.logging.info("all external and internal locations consistent")
else:
self.logging.warning(
"{} external locations inconsistent with internal locations".format(
len(self.consistency[sheet_name])
)
)
def check_timeseries_logic(self, sheet_name="timeSeries error"):
"""Check if timeseries are consistent with internal locations and parameters."""
if "TS800_ignore" in self.consistency.keys():
ts_ignore_df = self.consistency["TS800_ignore"]
else:
ts_ignore_df = pd.DataFrame(
{"internalLocation": [], "externalLocation": []}
)
idmap_df = pd.DataFrame.from_dict(self._get_idmaps(["IdOPVLWATER"]))
if self.subloc is None:
self._read_locs()
idmap_subloc_df = idmap_df[
idmap_df["internalLocation"].isin(self.subloc["LOC_ID"].values)
]
idmap_subloc_df.loc[:, "type"] = idmap_subloc_df["internalLocation"].apply(
(lambda x: self.subloc[self.subloc["LOC_ID"] == x]["TYPE"].values[0])
)
idmap_subloc_df.loc[:, "loc_groep"] = idmap_subloc_df["internalLocation"].apply(
(lambda x: x[0:-1])
)
ts_errors = {
"internalLocation": [],
"eind": [],
"internalParameters": [],
"externalParameters": [],
"externalLocations": [],
"type": [],
"fout": [],
}
for loc_group, group_df in idmap_subloc_df.groupby("loc_groep"):
ex_locs = np.unique(group_df["externalLocation"].values)
ex_locs_dict = {ex_loc: idx for idx, ex_loc in enumerate(ex_locs)}
split_ts = [
key
for key in ex_locs_dict.keys()
if any(
[
regex.match(key)
for regex in [re.compile(rex) for rex in ["8..", ".8.."]]
]
)
]
ex_locs_skip = ts_ignore_df[
ts_ignore_df["internalLocation"].isin(group_df["internalLocation"])
]["externalLocation"]
split_ts = [
key
for key in split_ts
if not str(key) in ex_locs_skip.values.astype(np.str)
]
ex_locs_dict = {
k: (
ex_locs_dict[k[1:]]
if (k[1:] in ex_locs_dict.keys()) and (k not in split_ts)
else v
)
for (k, v) in ex_locs_dict.items()
}
org_uniques = np.unique(
[val for key, val in ex_locs_dict.items() if key not in split_ts]
)
if (len(org_uniques) == 1) & (len(split_ts) == 1):
ex_locs_dict = {
k: (org_uniques[0] if k in split_ts else v)
for (k, v) in ex_locs_dict.items()
}
group_df["ex_loc_group"] = group_df["externalLocation"].apply(
(lambda x: ex_locs_dict[x])
)
for int_loc, loc_df in group_df.groupby("internalLocation"):
sub_type = self.subloc[self.subloc["LOC_ID"] == int_loc]["TYPE"].values[
0
]
end_time = pd.to_datetime(
self.subloc[self.subloc["LOC_ID"] == int_loc]["EIND"].values[0]
)
ex_pars = np.unique(loc_df["externalParameter"].values)
int_pars = np.unique(loc_df["internalParameter"].values)
ex_locs = np.unique(loc_df["externalLocation"].values)
if sub_type in ["krooshek", "debietmeter"]:
if any([re.match("HR.", ex_par) for ex_par in ex_pars]):
ts_errors["internalLocation"].append(int_loc)
ts_errors["eind"].append(end_time)
ts_errors["internalParameters"].append(",".join(int_pars))
ts_errors["externalParameters"].append(",".join(ex_pars))
ts_errors["externalLocations"].append(",".join(ex_locs))
ts_errors["type"].append(sub_type)
ts_errors["fout"].append(f"{sub_type} met stuurpeil")
else:
if not any([re.match("HR.", ex_par) for ex_par in ex_pars]):
if any(
[
re.match("HR.", ex_par)
for ex_par in np.unique(group_df["externalParameter"])
]
):
if sub_type not in ["totaal", "vispassage"]:
if pd.Timestamp.now() < end_time:
sp_locs = np.unique(
group_df[
group_df["externalParameter"].str.match(
"HR."
)
]["internalLocation"]
)
ts_errors["internalLocation"].append(int_loc)
ts_errors["eind"].append(end_time)
ts_errors["internalParameters"].append(
",".join(int_pars)
)
ts_errors["externalParameters"].append(
",".join(ex_pars)
)
ts_errors["externalLocations"].append(
",".join(ex_locs)
)
ts_errors["type"].append(sub_type)
ts_errors["fout"].append(
(
f"{sub_type} zonder stuurpeil "
f"({','.join(sp_locs)} wel)"
)
)
else:
time_series = loc_df.groupby(
["ex_loc_group", "externalParameter"]
)
sp_series = [
series
for series in time_series
if bool(re.match("HR.", series[0][1]))
]
for idx, series in enumerate(sp_series):
ex_par = series[0][1]
ex_locs = series[1]["externalLocation"]
int_par = np.unique(series[1]["internalParameter"])
if len(int_par) > 1:
ts_errors["internalLocation"].append(int_loc)
ts_errors["eind"].append(end_time)
ts_errors["internalParameters"].append(
",".join(int_pars)
)
ts_errors["externalParameters"].append(
",".join(ex_pars)
)
ts_errors["externalLocations"].append(",".join(ex_locs))
ts_errors["type"].append(sub_type)
ts_errors["fout"].append(
(
f'{",".join(int_par)} coupled to 1 sp-series ('
f"exPar: {ex_par}, exLoc(s)): "
f'{",".join(ex_locs)}'
)
)
other_series = [
series
for idy, series in enumerate(sp_series)
if not idy == idx
]
other_int_pars = [
np.unique(series[1]["internalParameter"])
for series in other_series
]
if len(other_int_pars) > 0:
other_int_pars = np.concatenate(other_int_pars)
conflicting_pars = [
par for par in int_par if par in other_int_pars
]
if len(conflicting_pars) > 0:
# 2 sp series gekoppeld aan dezelfde fews parameter
ts_errors["internalLocation"].append(int_loc)
ts_errors["eind"].append(end_time)
ts_errors["internalParameters"].append(
",".join(int_pars)
)
ts_errors["externalParameters"].append(
",".join(ex_pars)
)
ts_errors["externalLocations"].append(",".join(ex_locs))
ts_errors["type"].append(sub_type)
ts_errors["fout"].append(
(
f'{",".join(conflicting_pars)} gekoppeld aan '
f"sp-serie (exPar: {ex_par}, exLoc(s)):"
f'{",".join(ex_locs)}'
)
)
self.consistency[sheet_name] = pd.DataFrame(ts_errors)
if len(self.consistency[sheet_name]) == 0:
self.logging.info(
(
"logical coupling of all timeseries to internal "
"locations/parameters"
)
)
else:
self.logging.warning(
(
f"{len(self.consistency[sheet_name])} timeseries "
r"coupled illogical to internal locations/parameters"
)
)
def check_validation_rules(self, sheet_name="validation error"):
"""Check if validation rules are consistent."""
valid_errors = {
"internalLocation": [],
"start": [],
"eind": [],
"internalParameters": [],
"fout_type": [],
"fout_beschrijving": [],
}
location_sets_dict = xml_to_dict(
self.fews_config.RegionConfigFiles["LocationSets"]
)["locationSets"]["locationSet"]
if self.hoofdloc is None:
self._read_locs()
for set_name in self.validation_rules.keys():
location_set_meta = next(
loc_set
for loc_set in location_sets_dict
if loc_set["id"] == self.location_sets[set_name]["id"]
)["csvFile"]
location_set_gdf = getattr(self, self._locs_mapping[set_name])
attrib_files = location_set_meta["attributeFile"]
if not isinstance(attrib_files, list):
attrib_files = [attrib_files]
attrib_files = [
attrib_file
for attrib_file in attrib_files
if "attribute" in attrib_file.keys()
]
for attrib_file in attrib_files:
attribs = attrib_file["attribute"]
join_id = attrib_file["id"].replace("%", "")
if not isinstance(attrib_file["attribute"], list):
attribs = [attribs]
attribs = [
attrib["number"].replace("%", "")
for attrib in attribs
if "number" in attrib.keys()
]
attrib_df = pd.read_csv(
self.fews_config.MapLayerFiles[
attrib_file["csvFile"].replace(".csv", "")
],
sep=None,
engine="python",
)
attrib_df.rename(columns={join_id: "LOC_ID"}, inplace=True)
drop_cols = [
col for col in attrib_df if col not in attribs + ["LOC_ID"]
]
attrib_df = attrib_df.drop(columns=drop_cols, axis=1)
location_set_gdf = location_set_gdf.merge(
attrib_df, on="LOC_ID", how="outer"
)
validation_rules = self.validation_rules[set_name]
validaton_attributes = get_validation_attribs(validation_rules)
idmap_df = pd.DataFrame.from_dict(self._get_idmaps(["IdOPVLWATER"]))
params_df = pd.DataFrame.from_dict(
{
int_loc: [df["internalParameter"].values]
for int_loc, df in idmap_df.groupby("internalLocation")
},
orient="index",
columns=["internalParameters"],
)
for (idx, row) in location_set_gdf.iterrows():
int_loc = row["LOC_ID"]
row = row.dropna()
# if set_name == 'sublocaties':
# loc_type = row['TYPE']
if int_loc in params_df.index:
int_pars = np.unique(params_df.loc[int_loc]["internalParameters"])
else:
int_pars = []
attribs_required = get_validation_attribs(validation_rules, int_pars)
attribs_missing = [
attrib for attrib in attribs_required if attrib not in row.keys()
]
attribs_obsolete = [
attrib
for attrib in validaton_attributes
if (attrib not in attribs_required) and (attrib in row.keys())
]
attribs = [
attrib
for attrib in attribs_required
if attrib not in attribs_missing
]
for key, value in {
"missend": attribs_missing,
"overbodig": attribs_obsolete,
}.items():
if len(value) > 0:
valid_errors["internalLocation"] += [int_loc]
valid_errors["start"] += [row["START"]]
valid_errors["eind"] += [row["EIND"]]
valid_errors["internalParameters"] += [",".join(int_pars)]
valid_errors["fout_type"] += [key]
valid_errors["fout_beschrijving"] += [",".join(value)]
for validation_rule in validation_rules:
errors = {"fout_type": None, "fout_beschrijving": []}
param = validation_rule["parameter"]
if any(re.match(param, int_par) for int_par in int_pars):
rule = validation_rule["extreme_values"]
rule = _sort_validation_attribs(rule)
if all(key in ["hmax", "hmin"] for key in rule.keys()):
for hmin, hmax in zip(rule["hmin"], rule["hmax"]):
if all(attrib in row.keys() for attrib in [hmin, hmax]):
if row[hmax] < row[hmin]:
errors["fout_type"] = "waarde"
errors["fout_beschrijving"] += [
f"{hmax} < {hmin}"
]
elif all(
key in rule.keys()
for key in ["hmax", "smax", "smin", "hmin"]
):
hmax = rule["hmax"][0]
hmin = rule["hmin"][0]
for smin, smax in zip(rule["smin"], rule["smax"]):
if all(attrib in row.keys() for attrib in [smin, smax]):
if row[smax] <= row[smin]:
errors["fout_type"] = "waarde"
errors["fout_beschrijving"] += [
f"{smax} <= {smin}"
]
if row[hmax] < row[smax]:
errors["fout_type"] = "waarde"
errors["fout_beschrijving"] += [
f"{'hmax'} < {smax}"
]
if row[smin] < row[hmin]:
errors["fout_type"] = "waarde"
errors["fout_beschrijving"] += [
f"{smin} < {hmin}"
]
valid_errors["internalLocation"] += [row["LOC_ID"]] * len(
errors["fout_beschrijving"]
)
valid_errors["start"] += [row["START"]] * len(
errors["fout_beschrijving"]
)
valid_errors["eind"] += [row["EIND"]] * len(
errors["fout_beschrijving"]
)
valid_errors["internalParameters"] += [",".join(int_pars)] * len(
errors["fout_beschrijving"]
)
valid_errors["fout_type"] += [errors["fout_type"]] * len(
errors["fout_beschrijving"]
)
valid_errors["fout_beschrijving"] += errors["fout_beschrijving"]
self.consistency[sheet_name] = pd.DataFrame(valid_errors)
self.consistency[sheet_name] = self.consistency[sheet_name].drop_duplicates()
if len(self.consistency[sheet_name]) == 0:
self.logging.info("No missing incorrect validation rules")
else:
self.logging.warning(
"{} validation rules contain errors/are missing".format(
len(self.consistency[sheet_name])
)
)
def check_intpar_expar_consistency(self, sheet_name="par mismatch"):
"""Check if internal and external parameters are consistent."""
par_errors = {
"internalLocation": [],
"internalParameter": [],
"externalParameter": [],
"fout": [],
}
# internal_parameters = [mapping[
# 'internal'] for mapping in self.parameter_mapping]
idmap_df = pd.DataFrame.from_dict(self._get_idmaps(["IdOPVLWATER"]))
for idx, row in idmap_df.iterrows():
error = None
ext_par = None
ext_par = [
mapping["external"]
for mapping in self.parameter_mapping
if re.match(f'{mapping["internal"]}[0-9]', row["internalParameter"])
]
if ext_par:
if not any(re.match(par, row["externalParameter"]) for par in ext_par):
error = "parameter mismatch"
else:
error = "pars niet opgenomen in config"
if error:
par_errors["internalLocation"].append(row["internalLocation"])
par_errors["internalParameter"].append(row["internalParameter"])
par_errors["externalParameter"].append(row["externalParameter"])
par_errors["fout"].append(error)
self.consistency[sheet_name] = pd.DataFrame(par_errors)
if len(self.consistency[sheet_name]) == 0:
self.logging.info("geen regex fouten op interne en externe parameters")
else:
self.logging.warning(
"{} regex fouten op interne en externe parameters".format(
len(self.consistency[sheet_name])
)
)
def check_location_set_errors(self, sheet_name="locSet error"):
"""Check on errors in locationsets."""
# fews_config = self.fews_config
# config = self
xy_ignore_df = self.consistency["xy_ignore"]
# from fews_utilities import xml_to_dict
# import regex as re
location_sets = self.location_sets
idmap_sections = self.idmap_sections
loc_set_errors = {
"locationId": [],
"caw_code": [],
"caw_name": [],
"csv_file": [],
"location_name": [],
"type": [],
"functie": [],
"name_error": [],
"caw_name_inconsistent": [],
"missing_in_map": [],
"missing_in_set": [],
"missing_peilschaal": [],
"missing_hbov": [],
"missing_hben": [],
"missing_hbovps": [],
"missing_hbenps": [],
"missing_hloc": [],
"xy_not_same": [],
}
sets = {
"waterstandlocaties": "WATERSTANDLOCATIES",
"sublocaties": "KUNSTWERKEN",
"hoofdlocaties": "KUNSTWERKEN",
}
for set_name, section_name in sets.items():
self.logging.info(set_name)
location_set = location_sets[set_name]
location_gdf = location_set["gdf"]
csv_file = self.fews_config.locationSets[location_set["id"]]["csvFile"][
"file"
]
int_locs = []
for idmap in ["IdOPVLWATER", "IdOPVLWATER_HYMOS"]:
for section in idmap_sections[idmap][section_name]:
int_locs += [
item["internalLocation"]
for item in xml_to_dict(
self.fews_config.IdMapFiles[idmap], **section
)["idMap"]["map"]
]
if set_name == "sublocaties":
int_locs = [loc for loc in int_locs if not loc[-1] == "0"]
par_gdf = location_sets["hoofdlocaties"]["gdf"]
elif set_name == "hoofdlocaties":
int_locs = [loc for loc in int_locs if loc[-1] == "0"]
# idx, row = list(location_gdf.iterrows())[0]
for idx, row in list(location_gdf.iterrows()):
error = {
"name_error": False,
"caw_name_inconsistent": False,
"missing_in_map": False,
"type": "",
"functie": "",
"missing_in_set": False,
"missing_peilschaal": False,
"missing_hbov": False,
"missing_hben": False,
"missing_hbovps": False,
"missing_hbenps": False,
"missing_hloc": False,
"xy_not_same": False,
}
loc_id = row["LOC_ID"]
caw_code = loc_id[2:-2]
loc_name = row["LOC_NAME"]
caw_name = ""
if set_name == "sublocaties":
loc_functie = row["FUNCTIE"]
sub_type = row["TYPE"]
if sub_type in [
"afsluiter",
"debietmeter",
"krooshek",
"vispassage",
]:
if not re.match(
f"[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*-{sub_type}", loc_name
):
error["name_error"] = True
else:
if not re.match((
f"[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*-"
f"{sub_type}[0-9]_{loc_functie}"),
loc_name,
):
error["name_error"] = True
if not error["name_error"]:
caw_name = re.match("([A-Z0-9 ]*)_", loc_name).group(1)
if not all(
location_gdf[
location_gdf["LOC_ID"].str.match(f"..{caw_code}")
]["LOC_NAME"].str.match(f"({caw_name}_{caw_code}-K)")
):
error["caw_name_inconsistent"] = True
if (
not row["HBOV"]
in location_sets["waterstandlocaties"]["gdf"]["LOC_ID"].values
):
error["missing_hbov"] = True
if (
not row["HBEN"]
in location_sets["waterstandlocaties"]["gdf"]["LOC_ID"].values
):
error["missing_hben"] = True
if (
not row["HBOVPS"]
in location_sets["peilschalen"]["gdf"]["LOC_ID"].values
):
error["missing_hbovps"] = True
if (
not row["HBENPS"]
in location_sets["peilschalen"]["gdf"]["LOC_ID"].values
):
error["missing_hbenps"] = True
if (
not row["PAR_ID"]
in location_sets["hoofdlocaties"]["gdf"]["LOC_ID"].values
):
error["missing_hloc"] = True
else:
if not any(
[
re.match(loc, loc_id)
for loc in xy_ignore_df["internalLocation"]
]
):
if (
not par_gdf[par_gdf["LOC_ID"] == row["PAR_ID"]][
"geometry"
]
.values[0]
.equals(row["geometry"])
):
error["xy_not_same"] = True
if any(error.values()):
error["type"] = sub_type
error["functie"] = loc_functie
elif set_name == "hoofdlocaties":
if not re.match(f"[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*", loc_name):
error["name_error"] = True
elif set_name == "waterstandlocaties":
if not re.match(f"[A-Z0-9 ]*_{caw_code}-w_.*", loc_name):
error["name_error"] = True
if not error["name_error"]:
caw_name = re.match("([A-Z0-9 ]*)_", loc_name).group(1)
if not all(
location_gdf[
location_gdf["LOC_ID"].str.match(f"..{caw_code}")
]["LOC_NAME"].str.match(f"({caw_name}_{caw_code}-w)")
):
error["caw_name_inconsistent"] = True
if (
not row["PEILSCHAAL"]
in location_sets["peilschalen"]["gdf"]["LOC_ID"].values
):
error["missing_peilschaal"] = True
if loc_id not in int_locs:
error["missing_in_map"] = True
if any(error.values()):
loc_set_errors["locationId"].append(loc_id)
loc_set_errors["caw_name"].append(caw_name)
loc_set_errors["caw_code"].append(caw_code)
loc_set_errors["csv_file"].append(csv_file)
loc_set_errors["location_name"].append(loc_name)
for key, value in error.items():
loc_set_errors[key].append(value)
self.consistency["locSet error"] = pd.DataFrame(loc_set_errors)
# opname in samenvatting
if len(self.consistency["locSet error"]) == 0:
self.logging.info("no errors in locationSets")
else:
self.logging.warning(
"{} errors in locationSets".format(
len(self.consistency["locSet error"])
)
)
def write_excel(self):
"""Write consistency to excel."""
consistency_xlsx = self.paths["consistency_xlsx"]
consistency_out_xlsx = consistency_xlsx.parent.joinpath(
f"{consistency_xlsx.stem}_uit.xlsx"
)
index = self.consistency["inhoudsopgave"]
index.index = index["werkblad"]
summary = {
key: len(df)
for key, df in self.consistency.items()
if key not in self.fixed_sheets + ["inhoudsopgave"]
}
# read input xlsx and empty all warning sheets
book = load_workbook(consistency_xlsx)
for worksheet in book.worksheets:
if worksheet.title not in self.fixed_sheets:
book.remove(worksheet)
# add summary
worksheet = book.create_sheet("samenvatting", 1)
worksheet.sheet_properties.tabColor = "92D050"
worksheet.append(["controle", "aantal", "beschrijving"])
for cell in worksheet["{}".format(worksheet.max_row)]:
cell.font = Font(bold=True)
for key, value in summary.items():
worksheet.append([key, value, index.loc[key]["beschrijving"]])
if (value > 0) and (key != "mpt"):
worksheet[worksheet.max_row][1].fill = PatternFill(
fgColor="FF0000", fill_type="solid"
)
else:
worksheet[worksheet.max_row][1].fill = PatternFill(
fgColor="92D050", fill_type="solid"
)
worksheet.column_dimensions["A"].width = 40
worksheet.column_dimensions["C"].width = 100
worksheet.auto_filter.ref = worksheet.dimensions
# add warning sheets
xls_writer = pd.ExcelWriter(consistency_out_xlsx, engine="openpyxl")
xls_writer.book = book
for sheet_name in summary.keys():
df = self.consistency[sheet_name]
if not df.empty:
if df.index.name is None:
df.to_excel(xls_writer, sheet_name=sheet_name, index=False)
else:
df.to_excel(xls_writer, sheet_name=sheet_name, index=True)
worksheet = xls_writer.sheets[sheet_name]
for col in worksheet.columns:
worksheet.column_dimensions[col[0].column_letter].width = 20
worksheet.auto_filter.ref = worksheet.dimensions
worksheet.freeze_panes = worksheet["B2"]
if not df.empty:
if sheet_name == "mpt":
worksheet.sheet_properties.tabColor = "92D050"
else:
worksheet.sheet_properties.tabColor = "FF0000"
xls_writer.book.active = xls_writer.book["samenvatting"]
xls_writer.save()
def write_csvs(self):
"""Write locationSets to CSV."""
if "mpt" not in self.consistency.keys():
self.hist_tags_to_mpt()
mpt_df = self.consistency["mpt"]
if self.waterstandloc is None:
self._read_locs()
date_threshold = mpt_df["ENDDATE"].max() - pd.Timedelta(weeks=26)
location_sets = {
key: value
for key, value in self.location_sets.items()
if value["id"]
in ["OPVLWATER_HOOFDLOC", "OPVLWATER_WATERSTANDEN_AUTO", "OPVLWATER_SUBLOC"]
}
for key, value in location_sets.items():
self.logging.info(f"writing CSV for set: {key}")
gdf = value["gdf"]
df = gdf.drop("geometry", axis=1)
df[["START", "EIND"]] = df.apply(
update_date, args=(mpt_df, date_threshold), axis=1, result_type="expand"
)
if value["id"] == "OPVLWATER_WATERSTANDEN_AUTO":
grouper = self.mpt_hist_tags.groupby(["fews_locid"])
df["HIST_TAG"] = df.apply(
update_histtag, args=[grouper], axis=1, result_type="expand"
)
elif value["id"] == "OPVLWATER_SUBLOC":
grouper = df.groupby(["PAR_ID"])
par_types_df = (
grouper["TYPE"]
.unique()
.apply(lambda x: sorted(x))
.transform(lambda x: "/".join(x))
)
df["PAR_ID"] = gdf["LOC_ID"].str[0:-1] + "0"
df["ALLE_TYPES"] = df["PAR_ID"].apply(lambda x: par_types_df.loc[x])
df[["HBOVPS", "HBENPS"]] = df.apply(
self._update_staff_gauge, axis=1, result_type="expand"
)
csv_file = self.paths["csv_out"].joinpath(
self.fews_config.locationSets[value["id"]]["csvFile"]["file"]
)
if csv_file.suffix == "":
csv_file = Path(f"{csv_file}.csv")
df.to_csv(csv_file, index=False)
| [
"pandas.read_csv",
"openpyxl.load_workbook",
"pathlib.Path",
"pandas.api.types.is_datetime64_dtype",
"pandas.Timestamp.now",
"openpyxl.styles.PatternFill",
"numpy.unique",
"pandas.DataFrame",
"logging.warning",
"pandas.Timedelta",
"re.sub",
"pandas.ExcelWriter",
"pandas.concat",
"meetpuntc... | [((6040, 6077), 'meetpuntconfig.fews_utilities.FewsConfig', 'FewsConfig', (["self.paths['fews_config']"], {}), "(self.paths['fews_config'])\n", (6050, 6077), False, 'from meetpuntconfig.fews_utilities import FewsConfig, xml_to_dict\n'), ((7278, 7364), 'pandas.read_excel', 'pd.read_excel', (["self.paths['consistency_xlsx']"], {'sheet_name': 'None', 'engine': '"""openpyxl"""'}), "(self.paths['consistency_xlsx'], sheet_name=None, engine=\n 'openpyxl')\n", (7291, 7364), True, 'import pandas as pd\n'), ((12480, 12518), 'pandas.concat', 'pd.concat', (['[mpt_df, h_locs_df]'], {'axis': '(0)'}), '([mpt_df, h_locs_df], axis=0)\n', (12489, 12518), True, 'import pandas as pd\n'), ((12923, 13042), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['bestand', 'externalLocation', 'externalParameter', 'internalLocation',\n 'internalParameter']"}), "(columns=['bestand', 'externalLocation', 'externalParameter',\n 'internalLocation', 'internalParameter'])\n", (12935, 13042), True, 'import pandas as pd\n'), ((18031, 18150), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['bestand', 'externalLocation', 'externalParameter', 'internalLocation',\n 'internalParameter']"}), "(columns=['bestand', 'externalLocation', 'externalParameter',\n 'internalLocation', 'internalParameter'])\n", (18043, 18150), True, 'import pandas as pd\n'), ((23823, 23848), 'pandas.DataFrame', 'pd.DataFrame', (['hloc_errors'], {}), '(hloc_errors)\n', (23835, 23848), True, 'import pandas as pd\n'), ((30245, 30272), 'pandas.DataFrame', 'pd.DataFrame', (['ex_par_errors'], {}), '(ex_par_errors)\n', (30257, 30272), True, 'import pandas as pd\n'), ((30314, 30365), 'pandas.DataFrame', 'pd.DataFrame', (["{'internalLocation': int_loc_missing}"], {}), "({'internalLocation': int_loc_missing})\n", (30326, 30365), True, 'import pandas as pd\n'), ((32693, 32721), 'pandas.DataFrame', 'pd.DataFrame', (['ex_par_missing'], {}), '(ex_par_missing)\n', (32705, 32721), True, 'import pandas as pd\n'), ((36194, 36221), 'pandas.DataFrame', 'pd.DataFrame', (['ex_loc_errors'], {}), '(ex_loc_errors)\n', (36206, 36221), True, 'import pandas as pd\n'), ((46306, 46329), 'pandas.DataFrame', 'pd.DataFrame', (['ts_errors'], {}), '(ts_errors)\n', (46318, 46329), True, 'import pandas as pd\n'), ((54604, 54630), 'pandas.DataFrame', 'pd.DataFrame', (['valid_errors'], {}), '(valid_errors)\n', (54616, 54630), True, 'import pandas as pd\n'), ((56444, 56468), 'pandas.DataFrame', 'pd.DataFrame', (['par_errors'], {}), '(par_errors)\n', (56456, 56468), True, 'import pandas as pd\n'), ((64913, 64941), 'pandas.DataFrame', 'pd.DataFrame', (['loc_set_errors'], {}), '(loc_set_errors)\n', (64925, 64941), True, 'import pandas as pd\n'), ((65864, 65895), 'openpyxl.load_workbook', 'load_workbook', (['consistency_xlsx'], {}), '(consistency_xlsx)\n', (65877, 65895), False, 'from openpyxl import load_workbook\n'), ((67019, 67074), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['consistency_out_xlsx'], {'engine': '"""openpyxl"""'}), "(consistency_out_xlsx, engine='openpyxl')\n", (67033, 67074), True, 'import pandas as pd\n'), ((2933, 2972), 'pandas.Timestamp', 'pd.Timestamp', ([], {'year': '(2100)', 'month': '(1)', 'day': '(1)'}), '(year=2100, month=1, day=1)\n', (2945, 2972), True, 'import pandas as pd\n'), ((4876, 4893), 'pathlib.Path', 'Path', (['config_path'], {}), '(config_path)\n', (4880, 4893), False, 'from pathlib import Path\n'), ((5194, 5204), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5202, 5204), False, 'import sys\n'), ((5305, 5315), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (5309, 5315), False, 'from pathlib import Path\n'), ((20311, 20355), 'pandas.DataFrame', 'pd.DataFrame', (["{'parameters': params_missing}"], {}), "({'parameters': params_missing})\n", (20323, 20355), True, 'import pandas as pd\n'), ((20786, 20842), 'pandas.DataFrame', 'pd.DataFrame', (["{'internalLocation': [], 'x': [], 'y': []}"], {}), "({'internalLocation': [], 'x': [], 'y': []})\n", (20798, 20842), True, 'import pandas as pd\n'), ((24003, 24025), 'pandas.DataFrame', 'pd.DataFrame', (['par_dict'], {}), '(par_dict)\n', (24015, 24025), True, 'import pandas as pd\n'), ((25888, 25936), 'numpy.unique', 'np.unique', (["loc_group['externalParameter'].values"], {}), "(loc_group['externalParameter'].values)\n", (25897, 25936), True, 'import numpy as np\n'), ((33473, 33523), 'numpy.unique', 'np.unique', (["loc_group[1]['internalLocation'].values"], {}), "(loc_group[1]['internalLocation'].values)\n", (33482, 33523), True, 'import numpy as np\n'), ((36887, 36949), 'pandas.DataFrame', 'pd.DataFrame', (["{'internalLocation': [], 'externalLocation': []}"], {}), "({'internalLocation': [], 'externalLocation': []})\n", (36899, 36949), True, 'import pandas as pd\n'), ((37902, 37948), 'numpy.unique', 'np.unique', (["group_df['externalLocation'].values"], {}), "(group_df['externalLocation'].values)\n", (37911, 37948), True, 'import numpy as np\n'), ((66321, 66336), 'openpyxl.styles.Font', 'Font', ([], {'bold': '(True)'}), '(bold=True)\n', (66325, 66336), False, 'from openpyxl.styles import Font, PatternFill\n'), ((68407, 68429), 'pandas.Timedelta', 'pd.Timedelta', ([], {'weeks': '(26)'}), '(weeks=26)\n', (68419, 68429), True, 'import pandas as pd\n'), ((4810, 4847), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', 'log_level'], {}), "('LOGLEVEL', log_level)\n", (4824, 4847), False, 'import os\n'), ((5038, 5052), 'json.load', 'json.load', (['src'], {}), '(src)\n', (5047, 5052), False, 'import json\n'), ((7870, 7965), 'pandas.read_csv', 'pd.read_csv', (["self.paths['hist_tags_csv']"], {'parse_dates': 'dtype_cols', 'sep': 'None', 'engine': '"""python"""'}), "(self.paths['hist_tags_csv'], parse_dates=dtype_cols, sep=None,\n engine='python')\n", (7881, 7965), True, 'import pandas as pd\n'), ((8895, 8973), 'pandas.read_csv', 'pd.read_csv', (["self.paths['mpt_ignore_csv']"], {'sep': 'None', 'header': '(0)', 'engine': '"""python"""'}), "(self.paths['mpt_ignore_csv'], sep=None, header=0, engine='python')\n", (8906, 8973), True, 'import pandas as pd\n'), ((18709, 18832), 'pandas.DataFrame', 'pd.DataFrame', (['idmap_doubles'], {'columns': "['internalLocation', 'externalLocation', 'internalParameter',\n 'externalParameter']"}), "(idmap_doubles, columns=['internalLocation', 'externalLocation',\n 'internalParameter', 'externalParameter'])\n", (18721, 18832), True, 'import pandas as pd\n'), ((19098, 19151), 'pandas.concat', 'pd.concat', (['[self.consistency[sheet_name], df]'], {'axis': '(0)'}), '([self.consistency[sheet_name], df], axis=0)\n', (19107, 19151), True, 'import pandas as pd\n'), ((25964, 25990), 're.sub', 're.sub', (['"""\\\\d"""', '"""."""', 'ex_par'], {}), "('\\\\d', '.', ex_par)\n", (25970, 25990), False, 'import re\n'), ((31828, 31842), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (31840, 31842), True, 'import pandas as pd\n'), ((31921, 31969), 'numpy.unique', 'np.unique', (["loc_group['externalParameter'].values"], {}), "(loc_group['externalParameter'].values)\n", (31930, 31969), True, 'import numpy as np\n'), ((39680, 39759), 'pandas.to_datetime', 'pd.to_datetime', (["self.subloc[self.subloc['LOC_ID'] == int_loc]['EIND'].values[0]"], {}), "(self.subloc[self.subloc['LOC_ID'] == int_loc]['EIND'].values[0])\n", (39694, 39759), True, 'import pandas as pd\n'), ((39825, 39870), 'numpy.unique', 'np.unique', (["loc_df['externalParameter'].values"], {}), "(loc_df['externalParameter'].values)\n", (39834, 39870), True, 'import numpy as np\n'), ((39898, 39943), 'numpy.unique', 'np.unique', (["loc_df['internalParameter'].values"], {}), "(loc_df['internalParameter'].values)\n", (39907, 39943), True, 'import numpy as np\n'), ((39970, 40014), 'numpy.unique', 'np.unique', (["loc_df['externalLocation'].values"], {}), "(loc_df['externalLocation'].values)\n", (39979, 40014), True, 'import numpy as np\n'), ((47198, 47261), 'meetpuntconfig.fews_utilities.xml_to_dict', 'xml_to_dict', (["self.fews_config.RegionConfigFiles['LocationSets']"], {}), "(self.fews_config.RegionConfigFiles['LocationSets'])\n", (47209, 47261), False, 'from meetpuntconfig.fews_utilities import FewsConfig, xml_to_dict\n'), ((66558, 66606), 'openpyxl.styles.PatternFill', 'PatternFill', ([], {'fgColor': '"""FF0000"""', 'fill_type': '"""solid"""'}), "(fgColor='FF0000', fill_type='solid')\n", (66569, 66606), False, 'from openpyxl.styles import Font, PatternFill\n'), ((66718, 66766), 'openpyxl.styles.PatternFill', 'PatternFill', ([], {'fgColor': '"""92D050"""', 'fill_type': '"""solid"""'}), "(fgColor='92D050', fill_type='solid')\n", (66729, 66766), False, 'from openpyxl.styles import Font, PatternFill\n'), ((70100, 70123), 'pathlib.Path', 'Path', (['f"""{csv_file}.csv"""'], {}), "(f'{csv_file}.csv')\n", (70104, 70123), False, 'from pathlib import Path\n'), ((1743, 1779), 're.match', 're.match', (["rule['parameter']", 'int_par'], {}), "(rule['parameter'], int_par)\n", (1751, 1779), False, 'import re\n'), ((5079, 5096), 'pathlib.Path', 'Path', (['config_json'], {}), '(config_json)\n', (5083, 5096), False, 'from pathlib import Path\n'), ((5556, 5621), 'logging.warning', 'logging.warning', (['f"""{path} does not exist. Folder will be created"""'], {}), "(f'{path} does not exist. Folder will be created')\n", (5571, 5621), False, 'import logging\n'), ((5975, 5985), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5983, 5985), False, 'import sys\n'), ((8120, 8173), 'pandas.api.types.is_datetime64_dtype', 'pd.api.types.is_datetime64_dtype', (['self.hist_tags[col]'], {}), '(self.hist_tags[col])\n', (8152, 8173), True, 'import pandas as pd\n'), ((8520, 8530), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8528, 8530), False, 'import sys\n'), ((9640, 9650), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9648, 9650), False, 'import sys\n'), ((9942, 9989), 'meetpuntconfig.fews_utilities.xml_to_dict', 'xml_to_dict', (['self.fews_config.IdMapFiles[idmap]'], {}), '(self.fews_config.IdMapFiles[idmap])\n', (9953, 9989), False, 'from meetpuntconfig.fews_utilities import FewsConfig, xml_to_dict\n'), ((22128, 22149), 're.match', 're.match', (['loc', 'loc_id'], {}), '(loc, loc_id)\n', (22136, 22149), False, 'import re\n'), ((32001, 32027), 're.sub', 're.sub', (['"""\\\\d"""', '"""."""', 'ex_par'], {}), "('\\\\d', '.', ex_par)\n", (32007, 32027), False, 'import re\n'), ((50029, 50084), 'numpy.unique', 'np.unique', (["params_df.loc[int_loc]['internalParameters']"], {}), "(params_df.loc[int_loc]['internalParameters'])\n", (50038, 50084), True, 'import numpy as np\n'), ((55776, 55841), 're.match', 're.match', (['f"""{mapping[\'internal\']}[0-9]"""', "row['internalParameter']"], {}), '(f"{mapping[\'internal\']}[0-9]", row[\'internalParameter\'])\n', (55784, 55841), False, 'import re\n'), ((1371, 1407), 're.match', 're.match', (["rule['parameter']", 'int_par'], {}), "(rule['parameter'], int_par)\n", (1379, 1407), False, 'import re\n'), ((14939, 14972), 'pandas.DataFrame', 'pd.DataFrame', (['idmap_wrong_section'], {}), '(idmap_wrong_section)\n', (14951, 14972), True, 'import pandas as pd\n'), ((15127, 15180), 'pandas.concat', 'pd.concat', (['[self.consistency[sheet_name], df]'], {'axis': '(0)'}), '([self.consistency[sheet_name], df], axis=0)\n', (15136, 15180), True, 'import pandas as pd\n'), ((33585, 33609), 're.match', 're.match', (['"""8..$"""', 'ex_loc'], {}), "('8..$', ex_loc)\n", (33593, 33609), False, 'import re\n'), ((34495, 34520), 're.match', 're.match', (['""".8..$"""', 'ex_loc'], {}), "('.8..$', ex_loc)\n", (34503, 34520), False, 'import re\n'), ((13687, 13745), 'meetpuntconfig.fews_utilities.xml_to_dict', 'xml_to_dict', (['self.fews_config.IdMapFiles[idmap]'], {}), '(self.fews_config.IdMapFiles[idmap], **section)\n', (13698, 13745), False, 'from meetpuntconfig.fews_utilities import FewsConfig, xml_to_dict\n'), ((22372, 22413), 're.match', 're.match', (["row['internalLocation']", 'loc_id'], {}), "(row['internalLocation'], loc_id)\n", (22380, 22413), False, 'import re\n'), ((40103, 40126), 're.match', 're.match', (['"""HR."""', 'ex_par'], {}), "('HR.', ex_par)\n", (40111, 40126), False, 'import re\n'), ((43250, 43291), 'numpy.unique', 'np.unique', (["series[1]['internalParameter']"], {}), "(series[1]['internalParameter'])\n", (43259, 43291), True, 'import numpy as np\n'), ((51601, 51625), 're.match', 're.match', (['param', 'int_par'], {}), '(param, int_par)\n', (51609, 51625), False, 'import re\n'), ((55908, 55947), 're.match', 're.match', (['par', "row['externalParameter']"], {}), "(par, row['externalParameter'])\n", (55916, 55947), False, 'import re\n'), ((60305, 60373), 're.match', 're.match', (['f"""[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*-{sub_type}"""', 'loc_name'], {}), "(f'[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*-{sub_type}', loc_name)\n", (60313, 60373), False, 'import re\n'), ((60542, 60633), 're.match', 're.match', (['f"""[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*-{sub_type}[0-9]_{loc_functie}"""', 'loc_name'], {}), "(f'[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*-{sub_type}[0-9]_{loc_functie}',\n loc_name)\n", (60550, 60633), False, 'import re\n'), ((63296, 63353), 're.match', 're.match', (['f"""[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*"""', 'loc_name'], {}), "(f'[A-Z0-9 ]*_{caw_code}-K_[A-Z0-9 ]*', loc_name)\n", (63304, 63353), False, 'import re\n'), ((34221, 34277), 'numpy.unique', 'np.unique', (['[int_loc[:-1] for int_loc in int_locs_select]'], {}), '([int_loc[:-1] for int_loc in int_locs_select])\n', (34230, 34277), True, 'import numpy as np\n'), ((35132, 35188), 'numpy.unique', 'np.unique', (['[int_loc[:-1] for int_loc in int_locs_select]'], {}), '([int_loc[:-1] for int_loc in int_locs_select])\n', (35141, 35188), True, 'import numpy as np\n'), ((40719, 40742), 're.match', 're.match', (['"""HR."""', 'ex_par'], {}), "('HR.', ex_par)\n", (40727, 40742), False, 'import re\n'), ((40862, 40885), 're.match', 're.match', (['"""HR."""', 'ex_par'], {}), "('HR.', ex_par)\n", (40870, 40885), False, 'import re\n'), ((44672, 44713), 'numpy.unique', 'np.unique', (["series[1]['internalParameter']"], {}), "(series[1]['internalParameter'])\n", (44681, 44713), True, 'import numpy as np\n'), ((44909, 44939), 'numpy.concatenate', 'np.concatenate', (['other_int_pars'], {}), '(other_int_pars)\n', (44923, 44939), True, 'import numpy as np\n'), ((60887, 60922), 're.match', 're.match', (['"""([A-Z0-9 ]*)_"""', 'loc_name'], {}), "('([A-Z0-9 ]*)_', loc_name)\n", (60895, 60922), False, 'import re\n'), ((63489, 63538), 're.match', 're.match', (['f"""[A-Z0-9 ]*_{caw_code}-w_.*"""', 'loc_name'], {}), "(f'[A-Z0-9 ]*_{caw_code}-w_.*', loc_name)\n", (63497, 63538), False, 'import re\n'), ((13966, 14010), 're.match', 're.match', (['pattern', "idmap['internalLocation']"], {}), "(pattern, idmap['internalLocation'])\n", (13974, 14010), False, 'import re\n'), ((33766, 33802), 're.match', 're.match', (['f"""...{ex_loc}..$"""', 'int_loc'], {}), "(f'...{ex_loc}..$', int_loc)\n", (33774, 33802), False, 'import re\n'), ((34066, 34099), 're.match', 're.match', (['f"""{loc_type}."""', 'int_loc'], {}), "(f'{loc_type}.', int_loc)\n", (34074, 34099), False, 'import re\n'), ((34678, 34713), 're.match', 're.match', (['f"""..{ex_loc}..$"""', 'int_loc'], {}), "(f'..{ex_loc}..$', int_loc)\n", (34686, 34713), False, 'import re\n'), ((34977, 35010), 're.match', 're.match', (['f"""{loc_type}."""', 'int_loc'], {}), "(f'{loc_type}.', int_loc)\n", (34985, 35010), False, 'import re\n'), ((38245, 38260), 're.compile', 're.compile', (['rex'], {}), '(rex)\n', (38255, 38260), False, 'import re\n'), ((40932, 40972), 'numpy.unique', 'np.unique', (["group_df['externalParameter']"], {}), "(group_df['externalParameter'])\n", (40941, 40972), True, 'import numpy as np\n'), ((41138, 41156), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (41154, 41156), True, 'import pandas as pd\n'), ((42972, 43001), 're.match', 're.match', (['"""HR."""', 'series[0][1]'], {}), "('HR.', series[0][1])\n", (42980, 43001), False, 'import re\n'), ((58597, 58655), 'meetpuntconfig.fews_utilities.xml_to_dict', 'xml_to_dict', (['self.fews_config.IdMapFiles[idmap]'], {}), '(self.fews_config.IdMapFiles[idmap], **section)\n', (58608, 58655), False, 'from meetpuntconfig.fews_utilities import FewsConfig, xml_to_dict\n'), ((62528, 62549), 're.match', 're.match', (['loc', 'loc_id'], {}), '(loc, loc_id)\n', (62536, 62549), False, 'import re\n'), ((63675, 63710), 're.match', 're.match', (['"""([A-Z0-9 ]*)_"""', 'loc_name'], {}), "('([A-Z0-9 ]*)_', loc_name)\n", (63683, 63710), False, 'import re\n'), ((27754, 27769), 're.compile', 're.compile', (['rex'], {}), '(rex)\n', (27764, 27769), False, 'import re\n'), ((29613, 29628), 're.compile', 're.compile', (['rex'], {}), '(rex)\n', (29623, 29628), False, 'import re\n')] |
import numpy as np
import cv2
from glob import glob
import os
import os.path as path
import random
import pickle
import scipy.stats as st
import torch
import torch.utils.data as Data
def process_pts(line):
line = line.replace(',', '')
line = line.split(' ')
fname = line[0]
pts = line[1:-3]
ang = line[-3:]
ang = [float(i) for i in ang]
ang = np.float32(ang)
pts = [float(i) for i in pts]
pts = np.float32(pts)
pts = pts.reshape([-1,3])
return fname, pts, ang
def plot_gaussian_kernel(pos, size=25):
sigma = (size-1) / 6
xx = np.linspace(-3,3,size)
x, y = pos[0], pos[1]
xbias = (x - (size-1)/2) / sigma
x = xx + xbias
ybias = (y - (size-1)/2) / sigma
y = xx + ybias
x = st.norm.pdf(x)
y = st.norm.pdf(y)
exp = np.outer(y,x)
hmap = exp / exp.max()
return hmap
def plot_gaussian(hmap, pos, size, ksize=25):
x, y = pos[0]/(384/size), pos[1]/(384/size)
x1 = int(np.floor(x - ksize//2))
x2 = x1 + ksize
y1 = int(np.floor(y - ksize//2))
y2 = y1 + ksize
x = x - x1
y = y - y1
kernel = plot_gaussian_kernel([x,y], size=ksize)
kernel_x1 = kernel_y1 = 0
kernel_x2 = kernel_y2 = ksize
if x1<0:
kernel_x1 = -x1
x1 = 0
if y1<0:
kernel_y1 = -y1
y1 = 0
if y2>size:
kernel_y2 = ksize - (y2 - size)
y2 = size
if x2 > size:
kernel_x2 = ksize - (x2 - size)
x2 = size
# try:
hmap[y1:y2, x1:x2] = kernel[kernel_y1:kernel_y2, kernel_x1:kernel_x2]
# except Exception as e:
# print(e)
# print(y1,y2,x1,x2, kernel_y1,kernel_y2, kernel_x1, kernel_x2)
def get_hmap(pts, size=128):
hmap = np.zeros([size, size, 68])
for i in range(len(pts)):
plot_gaussian(hmap[:,:,i], pts[i], size=size)
return hmap
# def get_hmap(pts, size=256):
# pos = np.dstack(np.mgrid[0:size:1, 0:size:1])
# hmap = np.zeros([size, size, 68])
# for i, point in enumerate(pts):
# p_resize = point / 256 * size
# hmap[:, :, i] = st.multivariate_normal(mean=[p_resize[1], p_resize[0]], cov=16).pdf(pos)
# return hmap
def Seg2map(seg, size=128, interpolation=cv2.INTER_NEAREST):
seg_new = np.zeros(seg.shape, dtype='float32')
seg_new[seg > 7.5] = 1
seg = np.copy(cv2.resize(seg_new, (size, size), interpolation=interpolation))
return seg
def Cv2tensor(img):
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img.astype(np.float32))
return img
class Reenactset(Data.Dataset):
def __init__(self, pkl_path='', img_path='', max_iter=80000, consistency_iter=3, image_size=128):
super(Reenactset, self).__init__()
self.img_path = img_path
self.data = pickle.load(open(pkl_path, 'rb'))
self.idx = list(self.data.keys())
self.size = max_iter
self.image_size = image_size
self.consistency_iter = consistency_iter
assert self.consistency_iter > 0
def __getitem__(self, index):
ID = random.choice(self.idx)
samples = random.sample(self.data[ID], self.consistency_iter+1)
source = samples[0]
target = samples[1]
mid_samples = samples[2:]
source_name, _, _ = process_pts(source)
target_name, pts, _ = process_pts(target)
m_pts = []
for m_s in mid_samples:
_, m_pt, _ = process_pts(m_s)
m_pts.append(m_pt)
# pts = torch.from_numpy(pts[:, 0:2].astype(np.float32))
# pts = torch.unsqueeze(pts, 0)
m_hmaps = []
hmap = Cv2tensor(get_hmap(pts, size=self.image_size))
for m_pt in m_pts:
m_hmaps.append(Cv2tensor(get_hmap(m_pt, size=self.image_size)))
source_file = self.img_path + f'/img/{ID}/{source_name}'
target_file = self.img_path + f'/img/{ID}/{target_name}'
target_seg_file = self.img_path + f'/seg/{ID}/seg_{target_name}'
source_img = cv2.imread(source_file)
source_img = cv2.resize(source_img, (self.image_size, self.image_size), interpolation=cv2.INTER_LINEAR)
source_img = source_img / 127.5 - 1
source_img = Cv2tensor(source_img)
target_img = cv2.imread(target_file)
target_img = cv2.resize(target_img, (self.image_size, self.image_size), interpolation=cv2.INTER_LINEAR)
target_img = target_img / 127.5 - 1
target_img = Cv2tensor(target_img)
# source_seg = cv2.imread(self.img_path + f'/seg/{ID}/seg_{source_name}')
# source_seg = Seg2map(source_seg, size=self.image_size)
# source_seg = Cv2tensor(source_seg)
target_seg = cv2.imread(target_seg_file)
target_seg = Seg2map(target_seg, size=self.image_size)
target_seg = Cv2tensor(target_seg)
return source_img, hmap, target_img, target_seg, m_hmaps
def __len__(self):
return self.size
class Reenactset_new(Data.Dataset):
def __init__(self, img_path='', seg_path='', max_iter=80000, consistency_iter=3, image_size=256):
super(Reenactset_new, self).__init__()
self.img_path = img_path
self.seg_path = seg_path
self.data_list = sorted(glob(self.img_path + '/*.txt'))
self.size = max_iter
self.image_size = image_size
self.consistency_iter = consistency_iter
assert self.consistency_iter > 0
for data in self.data_list:
lineList = [line.rstrip('\n') for line in open(data, 'r')]
if len(lineList)<(self.consistency_iter+1):
self.data_list.remove(data)
def __getitem__(self, index):
while True:
try:
ID = random.choice(self.data_list)
# ID = '/home/yy/FSGAN/ijbc_clean_no_align_v2/145.txt'
lineList = [line.rstrip('\n') for line in open(ID, 'r')]
samples = random.sample(lineList, self.consistency_iter+1)
source = samples[0]
target = samples[1]
mid_samples = samples[2:]
source_name, _, _ = process_pts(source)
target_name, pts, _ = process_pts(target)
source_file = self.img_path + f'/img/{path.basename(path.split(source_name)[0])}/{path.basename(source_name)}'
target_file = self.img_path + f'/img/{path.basename(path.split(target_name)[0])}/{path.basename(target_name)}'
target_seg_file = self.seg_path + f'/seg/{path.basename(path.split(target_name)[0])}/seg_{path.basename(target_name)}'[:-4] + '.png'
if not os.path.isfile(source_file) and os.path.isfile(target_file) and os.path.isfile(target_seg_file):
continue
m_pts = []
for m_s in mid_samples:
_, m_pt, _ = process_pts(m_s)
m_pts.append(m_pt)
# pts = torch.from_numpy(pts[:, 0:2].astype(np.float32))
# pts = torch.unsqueeze(pts, 0)
m_hmaps = []
hmap = Cv2tensor(get_hmap(pts, size=self.image_size))
for m_pt in m_pts:
m_hmaps.append(Cv2tensor(get_hmap(m_pt, size=self.image_size)))
except:
continue
source_img = cv2.imread(source_file)
source_img = cv2.resize(source_img, (self.image_size, self.image_size), interpolation=cv2.INTER_LINEAR)
source_img = source_img / 127.5 - 1
source_img = Cv2tensor(source_img)
target_img = cv2.imread(target_file)
target_img = cv2.resize(target_img, (self.image_size, self.image_size), interpolation=cv2.INTER_LINEAR)
target_img = target_img / 127.5 - 1
target_img = Cv2tensor(target_img)
# source_seg = cv2.imread(self.img_path + f'/seg/{ID}/seg_{source_name}')
# source_seg = Seg2map(source_seg, size=self.image_size)
# source_seg = Cv2tensor(source_seg)
target_seg = cv2.imread(target_seg_file)
target_seg = Seg2map(target_seg, size=self.image_size)
target_seg = Cv2tensor(target_seg)
break
return source_img, hmap, target_img, target_seg, m_hmaps
def __len__(self):
return self.size
def low_pass(data, cutoff):
assert cutoff<data.shape[0]//2,'cutoff should be less than half seq'
print('Apply low pass with stop band:',cutoff)
for pt in range(data.shape[1]):
for dim in range(data.shape[2]):
pts1 = data[:,pt,dim]
x = np.array(list(range(len(pts1))))
fft1 = np.fft.fft(pts1)
fft1_amp = np.fft.fftshift(fft1)
fft1_amp = np.abs(fft1_amp)
fft1[cutoff:-cutoff] = 0
recover = np.fft.ifft(fft1)
data[:,pt,dim] = recover
return data
| [
"numpy.fft.ifft",
"numpy.outer",
"numpy.abs",
"os.path.basename",
"random.sample",
"numpy.fft.fft",
"numpy.float32",
"numpy.floor",
"numpy.zeros",
"scipy.stats.norm.pdf",
"random.choice",
"cv2.imread",
"os.path.isfile",
"numpy.fft.fftshift",
"numpy.linspace",
"glob.glob",
"os.path.sp... | [((354, 369), 'numpy.float32', 'np.float32', (['ang'], {}), '(ang)\n', (364, 369), True, 'import numpy as np\n'), ((408, 423), 'numpy.float32', 'np.float32', (['pts'], {}), '(pts)\n', (418, 423), True, 'import numpy as np\n'), ((544, 568), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'size'], {}), '(-3, 3, size)\n', (555, 568), True, 'import numpy as np\n'), ((695, 709), 'scipy.stats.norm.pdf', 'st.norm.pdf', (['x'], {}), '(x)\n', (706, 709), True, 'import scipy.stats as st\n'), ((715, 729), 'scipy.stats.norm.pdf', 'st.norm.pdf', (['y'], {}), '(y)\n', (726, 729), True, 'import scipy.stats as st\n'), ((737, 751), 'numpy.outer', 'np.outer', (['y', 'x'], {}), '(y, x)\n', (745, 751), True, 'import numpy as np\n'), ((1539, 1565), 'numpy.zeros', 'np.zeros', (['[size, size, 68]'], {}), '([size, size, 68])\n', (1547, 1565), True, 'import numpy as np\n'), ((2022, 2058), 'numpy.zeros', 'np.zeros', (['seg.shape'], {'dtype': '"""float32"""'}), "(seg.shape, dtype='float32')\n", (2030, 2058), True, 'import numpy as np\n'), ((890, 914), 'numpy.floor', 'np.floor', (['(x - ksize // 2)'], {}), '(x - ksize // 2)\n', (898, 914), True, 'import numpy as np\n'), ((941, 965), 'numpy.floor', 'np.floor', (['(y - ksize // 2)'], {}), '(y - ksize // 2)\n', (949, 965), True, 'import numpy as np\n'), ((2098, 2160), 'cv2.resize', 'cv2.resize', (['seg_new', '(size, size)'], {'interpolation': 'interpolation'}), '(seg_new, (size, size), interpolation=interpolation)\n', (2108, 2160), False, 'import cv2\n'), ((2736, 2759), 'random.choice', 'random.choice', (['self.idx'], {}), '(self.idx)\n', (2749, 2759), False, 'import random\n'), ((2772, 2827), 'random.sample', 'random.sample', (['self.data[ID]', '(self.consistency_iter + 1)'], {}), '(self.data[ID], self.consistency_iter + 1)\n', (2785, 2827), False, 'import random\n'), ((3535, 3558), 'cv2.imread', 'cv2.imread', (['source_file'], {}), '(source_file)\n', (3545, 3558), False, 'import cv2\n'), ((3574, 3669), 'cv2.resize', 'cv2.resize', (['source_img', '(self.image_size, self.image_size)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(source_img, (self.image_size, self.image_size), interpolation=\n cv2.INTER_LINEAR)\n', (3584, 3669), False, 'import cv2\n'), ((3756, 3779), 'cv2.imread', 'cv2.imread', (['target_file'], {}), '(target_file)\n', (3766, 3779), False, 'import cv2\n'), ((3795, 3890), 'cv2.resize', 'cv2.resize', (['target_img', '(self.image_size, self.image_size)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(target_img, (self.image_size, self.image_size), interpolation=\n cv2.INTER_LINEAR)\n', (3805, 3890), False, 'import cv2\n'), ((4152, 4179), 'cv2.imread', 'cv2.imread', (['target_seg_file'], {}), '(target_seg_file)\n', (4162, 4179), False, 'import cv2\n'), ((4631, 4661), 'glob.glob', 'glob', (["(self.img_path + '/*.txt')"], {}), "(self.img_path + '/*.txt')\n", (4635, 4661), False, 'from glob import glob\n'), ((6311, 6334), 'cv2.imread', 'cv2.imread', (['source_file'], {}), '(source_file)\n', (6321, 6334), False, 'import cv2\n'), ((6351, 6446), 'cv2.resize', 'cv2.resize', (['source_img', '(self.image_size, self.image_size)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(source_img, (self.image_size, self.image_size), interpolation=\n cv2.INTER_LINEAR)\n', (6361, 6446), False, 'import cv2\n'), ((6536, 6559), 'cv2.imread', 'cv2.imread', (['target_file'], {}), '(target_file)\n', (6546, 6559), False, 'import cv2\n'), ((6576, 6671), 'cv2.resize', 'cv2.resize', (['target_img', '(self.image_size, self.image_size)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(target_img, (self.image_size, self.image_size), interpolation=\n cv2.INTER_LINEAR)\n', (6586, 6671), False, 'import cv2\n'), ((6939, 6966), 'cv2.imread', 'cv2.imread', (['target_seg_file'], {}), '(target_seg_file)\n', (6949, 6966), False, 'import cv2\n'), ((7463, 7479), 'numpy.fft.fft', 'np.fft.fft', (['pts1'], {}), '(pts1)\n', (7473, 7479), True, 'import numpy as np\n'), ((7494, 7515), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft1'], {}), '(fft1)\n', (7509, 7515), True, 'import numpy as np\n'), ((7530, 7546), 'numpy.abs', 'np.abs', (['fft1_amp'], {}), '(fft1_amp)\n', (7536, 7546), True, 'import numpy as np\n'), ((7589, 7606), 'numpy.fft.ifft', 'np.fft.ifft', (['fft1'], {}), '(fft1)\n', (7600, 7606), True, 'import numpy as np\n'), ((5030, 5059), 'random.choice', 'random.choice', (['self.data_list'], {}), '(self.data_list)\n', (5043, 5059), False, 'import random\n'), ((5194, 5244), 'random.sample', 'random.sample', (['lineList', '(self.consistency_iter + 1)'], {}), '(lineList, self.consistency_iter + 1)\n', (5207, 5244), False, 'import random\n'), ((5823, 5850), 'os.path.isfile', 'os.path.isfile', (['target_file'], {}), '(target_file)\n', (5837, 5850), False, 'import os\n'), ((5855, 5886), 'os.path.isfile', 'os.path.isfile', (['target_seg_file'], {}), '(target_seg_file)\n', (5869, 5886), False, 'import os\n'), ((5791, 5818), 'os.path.isfile', 'os.path.isfile', (['source_file'], {}), '(source_file)\n', (5805, 5818), False, 'import os\n'), ((5499, 5525), 'os.path.basename', 'path.basename', (['source_name'], {}), '(source_name)\n', (5512, 5525), True, 'import os.path as path\n'), ((5614, 5640), 'os.path.basename', 'path.basename', (['target_name'], {}), '(target_name)\n', (5627, 5640), True, 'import os.path as path\n'), ((5469, 5492), 'os.path.split', 'path.split', (['source_name'], {}), '(source_name)\n', (5479, 5492), True, 'import os.path as path\n'), ((5584, 5607), 'os.path.split', 'path.split', (['target_name'], {}), '(target_name)\n', (5594, 5607), True, 'import os.path as path\n'), ((5737, 5763), 'os.path.basename', 'path.basename', (['target_name'], {}), '(target_name)\n', (5750, 5763), True, 'import os.path as path\n'), ((5703, 5726), 'os.path.split', 'path.split', (['target_name'], {}), '(target_name)\n', (5713, 5726), True, 'import os.path as path\n')] |
import sys
sys.path.append('../../')
import cnvfc
import numpy as np
import pandas as pd
import pathlib as pal
root_p = pal.Path('../../data/')
profile_p = root_p / 'processed/fc_profiles/cnv_FC_profile.tsv'
connectomes_p = root_p / 'processed/residual_connectomes/icc_residual_connectomes.npy'
out_p = root_p / 'processed/weights/'
conn_mask = np.tril(np.ones((64, 64))).astype(bool)
profile = pd.read_csv(profile_p, sep='\t')
profile_mat = cnvfc.tools.conn2mat(profile.betas.values, conn_mask)
connectomes = np.load(connectomes_p)
n_sub = connectomes.shape[0]
# Cast the vectorized connectomes back to matrices
connectome_mat = np.array([cnvfc.tools.conn2mat(connectomes[i, :], conn_mask)
for i in range(connectomes.shape[0])])
w = cnvfc.stats.make_weights(connectome_mat, profile_mat)
np.save(out_p / 'icc_cnv_weights.npy', w)
| [
"sys.path.append",
"cnvfc.tools.conn2mat",
"numpy.load",
"numpy.save",
"pandas.read_csv",
"numpy.ones",
"pathlib.Path",
"cnvfc.stats.make_weights"
] | [((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((121, 144), 'pathlib.Path', 'pal.Path', (['"""../../data/"""'], {}), "('../../data/')\n", (129, 144), True, 'import pathlib as pal\n'), ((398, 430), 'pandas.read_csv', 'pd.read_csv', (['profile_p'], {'sep': '"""\t"""'}), "(profile_p, sep='\\t')\n", (409, 430), True, 'import pandas as pd\n'), ((445, 498), 'cnvfc.tools.conn2mat', 'cnvfc.tools.conn2mat', (['profile.betas.values', 'conn_mask'], {}), '(profile.betas.values, conn_mask)\n', (465, 498), False, 'import cnvfc\n'), ((514, 536), 'numpy.load', 'np.load', (['connectomes_p'], {}), '(connectomes_p)\n', (521, 536), True, 'import numpy as np\n'), ((766, 819), 'cnvfc.stats.make_weights', 'cnvfc.stats.make_weights', (['connectome_mat', 'profile_mat'], {}), '(connectome_mat, profile_mat)\n', (790, 819), False, 'import cnvfc\n'), ((820, 861), 'numpy.save', 'np.save', (["(out_p / 'icc_cnv_weights.npy')", 'w'], {}), "(out_p / 'icc_cnv_weights.npy', w)\n", (827, 861), True, 'import numpy as np\n'), ((644, 694), 'cnvfc.tools.conn2mat', 'cnvfc.tools.conn2mat', (['connectomes[i, :]', 'conn_mask'], {}), '(connectomes[i, :], conn_mask)\n', (664, 694), False, 'import cnvfc\n'), ((355, 372), 'numpy.ones', 'np.ones', (['(64, 64)'], {}), '((64, 64))\n', (362, 372), True, 'import numpy as np\n')] |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import numpy as np
import matplotlib.pyplot as plt
from indra import trips
from indra.assemblers import PysbAssembler
from indra.util.plot_formatting import *
from pysb import Observable, Parameter
from pysb.integrate import Solver
def assemble_model(model_name, reread=False):
xml_fname = model_name + '.xml'
if not reread:
print('Processing %s' % xml_fname)
if os.path.exists(xml_fname):
with open(xml_fname, 'rb') as fh:
tp = trips.process_xml(fh.read())
else:
reread = True
if reread:
fname = model_name + '.txt'
print('Reading %s' % fname)
with open(fname, 'rb') as fh:
tp = trips.process_text(fh.read(), xml_fname)
print('Assembling statements:')
for i, st in enumerate(tp.statements):
print('%d: %s' % (i, st))
print('----------------------')
pa = PysbAssembler()
pa.add_statements(tp.statements)
model = pa.make_model()
model.name = model_name
p53 = model.monomers['TP53']
obs = Observable(b'p53_active', p53(activity='active'))
model.add_component(obs)
if not model_name.endswith('var'):
model.parameters['kf_aa_act_1'].value = 5e-06
model.parameters['kf_pt_act_1'].value = 1e-05
if model_name == 'p53_ATM':
model.add_component(Parameter('ATMa_0', 1))
atm = model.monomers['ATM']
model.initial(atm(activity='active'),
model.parameters['ATMa_0'])
model.parameters['kf_pa_act_1'].value = 1e-04
obs = Observable(b'atm_active', atm(activity='active'))
model.add_component(obs)
if model_name == 'p53_ATR':
model.add_component(Parameter('ATRa_0', 1))
atr = model.monomers['ATR']
model.initial(atr(activity='active'),
model.parameters['ATRa_0'])
obs = Observable(b'atr_active', atr(activity='active'))
model.add_component(obs)
if model_name == 'p53_ATM_var':
#model.add_component(Parameter('ATMa_0', 1))
#atm = model.monomers['ATM']
#model.initial(atm(activity='active'),
# model.parameters['ATMa_0'])
model.add_component(Parameter('ATMa_0', 1))
atm = model.monomers['ATM']
model.initial(atm(phospho='p'),
model.parameters['ATMa_0'])
model.parameters['kf_pa_dephosphorylation_1'].value = 1e-04
model.parameters['MDM2_0'].value = 0
model.parameters['kf_m_deg_1'].value = 8e-01
model.parameters['kf_tm_synth_1'].value = 0.2
model.parameters['kf_aa_phosphorylation_1'].value = 5e-06
obs = Observable(b'atm_active', atm(phospho='p'))
model.add_component(obs)
pa.model = model
pa.save_model('%s.py' % model_name)
return model
def run_model(model):
sim_hours = 200
ts = np.linspace(0, sim_hours*3600, sim_hours*60)
solver = Solver(model, ts)
solver.run()
plt.figure(figsize=(2,2), dpi=300)
set_fig_params()
plt.plot(ts, solver.yobs['p53_active'], 'r')
#if model.name == 'p53_ATR':
# plt.plot(ts, solver.yobs['atr_active'], 'b')
#else:
# plt.plot(ts, solver.yobs['atm_active'], 'b')
plt.xticks([])
plt.xlabel('Time (a.u.)', fontsize=12)
plt.ylabel('Active p53', fontsize=12)
plt.yticks([])
plt.savefig(model.name + '.pdf')
return ts, solver
if __name__ == '__main__':
reread = False
#model_names = ['p53_ATR', 'p53_ATM', 'p53_ATM_var']
model_names = ['p53_ATM_var']
for model_name in model_names:
model = assemble_model(model_name, reread=reread)
ts, solver = run_model(model)
| [
"matplotlib.pyplot.savefig",
"pysb.Parameter",
"matplotlib.pyplot.plot",
"pysb.integrate.Solver",
"matplotlib.pyplot.yticks",
"os.path.exists",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"indra.assemblers.PysbAssembler",
"matplotlib.py... | [((1008, 1023), 'indra.assemblers.PysbAssembler', 'PysbAssembler', ([], {}), '()\n', (1021, 1023), False, 'from indra.assemblers import PysbAssembler\n'), ((2976, 3024), 'numpy.linspace', 'np.linspace', (['(0)', '(sim_hours * 3600)', '(sim_hours * 60)'], {}), '(0, sim_hours * 3600, sim_hours * 60)\n', (2987, 3024), True, 'import numpy as np\n'), ((3034, 3051), 'pysb.integrate.Solver', 'Solver', (['model', 'ts'], {}), '(model, ts)\n', (3040, 3051), False, 'from pysb.integrate import Solver\n'), ((3073, 3108), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)', 'dpi': '(300)'}), '(figsize=(2, 2), dpi=300)\n', (3083, 3108), True, 'import matplotlib.pyplot as plt\n'), ((3133, 3177), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', "solver.yobs['p53_active']", '"""r"""'], {}), "(ts, solver.yobs['p53_active'], 'r')\n", (3141, 3177), True, 'import matplotlib.pyplot as plt\n'), ((3334, 3348), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (3344, 3348), True, 'import matplotlib.pyplot as plt\n'), ((3353, 3391), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (a.u.)"""'], {'fontsize': '(12)'}), "('Time (a.u.)', fontsize=12)\n", (3363, 3391), True, 'import matplotlib.pyplot as plt\n'), ((3396, 3433), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Active p53"""'], {'fontsize': '(12)'}), "('Active p53', fontsize=12)\n", (3406, 3433), True, 'import matplotlib.pyplot as plt\n'), ((3438, 3452), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3448, 3452), True, 'import matplotlib.pyplot as plt\n'), ((3457, 3489), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(model.name + '.pdf')"], {}), "(model.name + '.pdf')\n", (3468, 3489), True, 'import matplotlib.pyplot as plt\n'), ((502, 527), 'os.path.exists', 'os.path.exists', (['xml_fname'], {}), '(xml_fname)\n', (516, 527), False, 'import os\n'), ((1444, 1466), 'pysb.Parameter', 'Parameter', (['"""ATMa_0"""', '(1)'], {}), "('ATMa_0', 1)\n", (1453, 1466), False, 'from pysb import Observable, Parameter\n'), ((1812, 1834), 'pysb.Parameter', 'Parameter', (['"""ATRa_0"""', '(1)'], {}), "('ATRa_0', 1)\n", (1821, 1834), False, 'from pysb import Observable, Parameter\n'), ((2318, 2340), 'pysb.Parameter', 'Parameter', (['"""ATMa_0"""', '(1)'], {}), "('ATMa_0', 1)\n", (2327, 2340), False, 'from pysb import Observable, Parameter\n')] |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
'''
Vasp CHGCAR file format
See also
https://cms.mpi.univie.ac.at/vasp/vasp/CHGCAR_file.html
'''
import collections
import time
import numpy
import pyscf
from pyscf import lib
from pyscf.pbc import gto as pbcgto
from pyscf.pbc.dft import numint, gen_grid
from pyscf.tools import cubegen
def density(cell, outfile, dm, nx=60, ny=60, nz=60, moN = -1, fileFormat = "cube"):
'''Calculates electron density and write out in CHGCAR format.
Args:
cell : Cell
pbc Cell
outfile : str
Name of Cube file to be written.
dm : ndarray
Density matrix of molecule.
Kwargs:
nx : int
Number of grid point divisions in x direction.
Note this is function of the molecule's size; a larger molecule
will have a coarser representation than a smaller one for the
same value.
ny : int
Number of grid point divisions in y direction.
nz : int
Number of grid point divisions in z direction.
Returns:
No return value. This function outputs a VASP chgcarlike file
(with phase if desired)...it can be opened in VESTA or VMD or
many other softwares
Examples:
>>> # generates the first MO from the list of mo_coefficents
>>> from pyscf.pbc import gto, scf
>>> from pyscf.tools import chgcar
>>> cell = gto.M(atom='H 0 0 0; H 0 0 1', a=numpy.eye(3)*3)
>>> mf = scf.RHF(cell).run()
>>> chgcar.density(cell, 'h2.CHGCAR', mf.make_rdm1())
'''
assert(isinstance(cell, pbcgto.Cell))
cc = CHGCAR(cell, nx=nx, ny=ny, nz=nz)
coords = cc.get_coords()
ngrids = cc.get_ngrids()
blksize = min(8000, ngrids)
rho = numpy.empty(ngrids)
for ip0, ip1 in lib.prange(0, ngrids, blksize):
ao = numint.eval_ao(cell, coords[ip0:ip1])
rho[ip0:ip1] = numint.eval_rho(cell, ao, dm)
rho = rho.reshape(nx,ny,nz)
cc.write(rho, outfile)
def orbital(cell, outfile, coeff, nx=60, ny=60, nz=60):
'''Calculate orbital value on real space grid and write out in
CHGCAR format.
Args:
cell : Cell
pbc Cell
outfile : str
Name of Cube file to be written.
dm : ndarray
Density matrix of molecule.
Kwargs:
nx : int
Number of grid point divisions in x direction.
Note this is function of the molecule's size; a larger molecule
will have a coarser representation than a smaller one for the
same value.
ny : int
Number of grid point divisions in y direction.
nz : int
Number of grid point divisions in z direction.
Returns:
No return value. This function outputs a VASP chgcarlike file
(with phase if desired)...it can be opened in VESTA or VMD or
many other softwares
Examples:
>>> # generates the first MO from the list of mo_coefficents
>>> from pyscf.pbc import gto, scf
>>> from pyscf.tools import chgcar
>>> cell = gto.M(atom='H 0 0 0; H 0 0 1', a=numpy.eye(3)*3)
>>> mf = scf.RHF(cell).run()
>>> chgcar.orbital(cell, 'h2_mo1.CHGCAR', mf.mo_coeff[:,0])
'''
assert(isinstance(cell, pbcgto.Cell))
cc = CHGCAR(cell, nx=nx, ny=ny, nz=nz)
coords = cc.get_coords()
ngrids = cc.get_ngrids()
blksize = min(8000, ngrids)
orb_on_grid = numpy.empty(ngrids)
for ip0, ip1 in lib.prange(0, ngrids, blksize):
ao = numint.eval_ao(cell, coords[ip0:ip1])
orb_on_grid[ip0:ip1] = numpy.dot(ao, coeff)
orb_on_grid = orb_on_grid.reshape(nx,ny,nz)
cc.write(orb_on_grid, outfile, comment='Orbital value in real space (1/Bohr^3)')
class CHGCAR(cubegen.Cube):
''' Read-write of the Vasp CHGCAR files '''
def __init__(self, cell, nx=60, ny=60, nz=60):
self.nx = nx
self.ny = ny
self.nz = nz
self.cell = cell
self.box = cell.lattice_vectors()
self.boxorig = numpy.zeros(3)
self.xs = numpy.arange(nx) * (1./nx)
self.ys = numpy.arange(ny) * (1./ny)
self.zs = numpy.arange(nz) * (1./nz)
def get_coords(self) :
""" Result: set of coordinates to compute a field which is to be stored
in the file.
"""
xyz = lib.cartesian_prod((self.xs, self.ys, self.zs))
coords = numpy.dot(xyz, self.box)
return numpy.asarray(coords, order='C')
def write(self, field, fname, comment=None):
""" Result: .vasp file with the field in the file fname. """
assert(field.ndim == 3)
assert(field.shape == (self.nx, self.ny, self.nz))
if comment is None:
comment = 'VASP file: Electron density in real space (e/Bohr^3) '
cell = self.cell
# See CHGCAR format https://cms.mpi.univie.ac.at/vasp/vasp/CHGCAR_file.html
field = field * cell.vol
boxA = self.box * lib.param.BOHR
atomList= [cell.atom_pure_symbol(i) for i in range(cell.natm)]
Axyz = zip(atomList, cell.atom_coords().tolist())
Axyz = sorted(Axyz, key = lambda x: x[0])
swappedCoords = [(vec[1]+self.boxorig) * lib.param.BOHR for vec in Axyz]
vaspAtomicInfo = collections.Counter([xyz[0] for xyz in Axyz])
vaspAtomicInfo = sorted(vaspAtomicInfo.items())
with open(fname, 'w') as f:
f.write(comment)
f.write('PySCF Version: %s Date: %s\n' % (pyscf.__version__, time.ctime()))
f.write('1.0000000000\n')
f.write('%14.8f %14.8f %14.8f \n' % (boxA[0,0],boxA[0,1],boxA[0,2]))
f.write('%14.8f %14.8f %14.8f \n' % (boxA[1,0],boxA[1,1],boxA[1,2]))
f.write('%14.8f %14.8f %14.8f \n' % (boxA[2,0],boxA[2,1],boxA[2,2]))
f.write(''.join(['%5.3s'%atomN[0] for atomN in vaspAtomicInfo]) + '\n')
f.write(''.join(['%5d'%atomN[1] for atomN in vaspAtomicInfo]) + '\n')
f.write('Cartesian \n')
for ia in range(cell.natm):
f.write(' %14.8f %14.8f %14.8f\n' % tuple(swappedCoords[ia]))
f.write('\n')
f.write('%6.5s %6.5s %6.5s \n' % (self.nx,self.ny,self.nz))
fmt = ' %14.8e '
for iz in range(self.nx):
for iy in range(self.ny):
f.write('\n')
for ix in range(self.nz):
f.write(fmt % field[ix,iy,iz])
if __name__ == '__main__':
from pyscf.pbc import gto, scf
from pyscf.tools import chgcar
cell = gto.M(atom='H 0 0 0; H 0 0 1', a=numpy.eye(3)*3)
mf = scf.RHF(cell).run()
chgcar.density(cell, 'h2.CHGCAR', mf.make_rdm1()) #makes total density
chgcar.orbital(cell, 'h2_mo1.CHGCAR', mf.mo_coeff[:,0]) # makes mo#1 (sigma)
chgcar.orbital(cell, 'h2_mo2.CHGCAR', mf.mo_coeff[:,1]) # makes mo#2 (sigma*)
| [
"numpy.eye",
"pyscf.lib.prange",
"numpy.empty",
"numpy.asarray",
"collections.Counter",
"pyscf.pbc.dft.numint.eval_rho",
"numpy.zeros",
"time.ctime",
"pyscf.pbc.dft.numint.eval_ao",
"numpy.arange",
"numpy.dot",
"pyscf.tools.chgcar.orbital",
"pyscf.pbc.scf.RHF",
"pyscf.lib.cartesian_prod"
] | [((2451, 2470), 'numpy.empty', 'numpy.empty', (['ngrids'], {}), '(ngrids)\n', (2462, 2470), False, 'import numpy\n'), ((2491, 2521), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'ngrids', 'blksize'], {}), '(0, ngrids, blksize)\n', (2501, 2521), False, 'from pyscf import lib\n'), ((4162, 4181), 'numpy.empty', 'numpy.empty', (['ngrids'], {}), '(ngrids)\n', (4173, 4181), False, 'import numpy\n'), ((4202, 4232), 'pyscf.lib.prange', 'lib.prange', (['(0)', 'ngrids', 'blksize'], {}), '(0, ngrids, blksize)\n', (4212, 4232), False, 'from pyscf import lib\n'), ((7455, 7511), 'pyscf.tools.chgcar.orbital', 'chgcar.orbital', (['cell', '"""h2_mo1.CHGCAR"""', 'mf.mo_coeff[:, 0]'], {}), "(cell, 'h2_mo1.CHGCAR', mf.mo_coeff[:, 0])\n", (7469, 7511), False, 'from pyscf.tools import chgcar\n'), ((7536, 7592), 'pyscf.tools.chgcar.orbital', 'chgcar.orbital', (['cell', '"""h2_mo2.CHGCAR"""', 'mf.mo_coeff[:, 1]'], {}), "(cell, 'h2_mo2.CHGCAR', mf.mo_coeff[:, 1])\n", (7550, 7592), False, 'from pyscf.tools import chgcar\n'), ((2536, 2573), 'pyscf.pbc.dft.numint.eval_ao', 'numint.eval_ao', (['cell', 'coords[ip0:ip1]'], {}), '(cell, coords[ip0:ip1])\n', (2550, 2573), False, 'from pyscf.pbc.dft import numint, gen_grid\n'), ((2597, 2626), 'pyscf.pbc.dft.numint.eval_rho', 'numint.eval_rho', (['cell', 'ao', 'dm'], {}), '(cell, ao, dm)\n', (2612, 2626), False, 'from pyscf.pbc.dft import numint, gen_grid\n'), ((4247, 4284), 'pyscf.pbc.dft.numint.eval_ao', 'numint.eval_ao', (['cell', 'coords[ip0:ip1]'], {}), '(cell, coords[ip0:ip1])\n', (4261, 4284), False, 'from pyscf.pbc.dft import numint, gen_grid\n'), ((4316, 4336), 'numpy.dot', 'numpy.dot', (['ao', 'coeff'], {}), '(ao, coeff)\n', (4325, 4336), False, 'import numpy\n'), ((4755, 4769), 'numpy.zeros', 'numpy.zeros', (['(3)'], {}), '(3)\n', (4766, 4769), False, 'import numpy\n'), ((5061, 5108), 'pyscf.lib.cartesian_prod', 'lib.cartesian_prod', (['(self.xs, self.ys, self.zs)'], {}), '((self.xs, self.ys, self.zs))\n', (5079, 5108), False, 'from pyscf import lib\n'), ((5126, 5150), 'numpy.dot', 'numpy.dot', (['xyz', 'self.box'], {}), '(xyz, self.box)\n', (5135, 5150), False, 'import numpy\n'), ((5166, 5198), 'numpy.asarray', 'numpy.asarray', (['coords'], {'order': '"""C"""'}), "(coords, order='C')\n", (5179, 5198), False, 'import numpy\n'), ((5989, 6034), 'collections.Counter', 'collections.Counter', (['[xyz[0] for xyz in Axyz]'], {}), '([xyz[0] for xyz in Axyz])\n', (6008, 6034), False, 'import collections\n'), ((4788, 4804), 'numpy.arange', 'numpy.arange', (['nx'], {}), '(nx)\n', (4800, 4804), False, 'import numpy\n'), ((4833, 4849), 'numpy.arange', 'numpy.arange', (['ny'], {}), '(ny)\n', (4845, 4849), False, 'import numpy\n'), ((4878, 4894), 'numpy.arange', 'numpy.arange', (['nz'], {}), '(nz)\n', (4890, 4894), False, 'import numpy\n'), ((7356, 7369), 'pyscf.pbc.scf.RHF', 'scf.RHF', (['cell'], {}), '(cell)\n', (7363, 7369), False, 'from pyscf.pbc import gto, scf\n'), ((7331, 7343), 'numpy.eye', 'numpy.eye', (['(3)'], {}), '(3)\n', (7340, 7343), False, 'import numpy\n'), ((6230, 6242), 'time.ctime', 'time.ctime', ([], {}), '()\n', (6240, 6242), False, 'import time\n')] |
import logging
from numpy.random import uniform
from problems.test_case import TestCase, TestCaseTypeEnum
from problems.solutions.plump_moose import moose_body_mass
logger = logging.getLogger(__name__)
FUNCTION_NAME = "moose_body_mass"
INPUT_VARS = ["latitude"]
OUTPUT_VARS = ["mass"]
STATIC_RESOURCES = []
PHYSICAL_CONSTANTS = {}
ATOL = {}
RTOL = {
"mass": 1e-6
}
class TestCaseType(TestCaseTypeEnum):
MALMO = ("Moose from Malmö", 1)
STOCKHOLM = ("Moose from Stockholm", 1)
KIRUNA = ("Moose from Kiruna", 1)
SOUTHERN = ("Southern moose", 1)
NORTHERN = ("Northern moose", 1)
RANDOM = ("Random", 2)
class ProblemTestCase(TestCase):
def input_tuple(self):
return self.input["latitude"],
def output_tuple(self):
return self.output["mass"],
def generate_test_case(test_type):
test_case = ProblemTestCase(test_type)
if test_type is TestCaseType.MALMO:
latitude = 55.60587
elif test_type is TestCaseType.STOCKHOLM:
latitude = 59.33258
elif test_type is TestCaseType.KIRUNA:
latitude = 67.85507
elif test_type is TestCaseType.SOUTHERN:
latitude = uniform(58, 62)
elif test_type is TestCaseType.NORTHERN:
latitude = uniform(62, 66)
elif test_type is TestCaseType.RANDOM:
latitude = uniform(57, 67)
else:
raise ValueError(f"Unrecognized test case: {test_type}")
test_case.input["latitude"] = latitude
test_case.output["mass"] = moose_body_mass(latitude)
return test_case
| [
"numpy.random.uniform",
"problems.solutions.plump_moose.moose_body_mass",
"logging.getLogger"
] | [((177, 204), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (194, 204), False, 'import logging\n'), ((1488, 1513), 'problems.solutions.plump_moose.moose_body_mass', 'moose_body_mass', (['latitude'], {}), '(latitude)\n', (1503, 1513), False, 'from problems.solutions.plump_moose import moose_body_mass\n'), ((1161, 1176), 'numpy.random.uniform', 'uniform', (['(58)', '(62)'], {}), '(58, 62)\n', (1168, 1176), False, 'from numpy.random import uniform\n'), ((1242, 1257), 'numpy.random.uniform', 'uniform', (['(62)', '(66)'], {}), '(62, 66)\n', (1249, 1257), False, 'from numpy.random import uniform\n'), ((1321, 1336), 'numpy.random.uniform', 'uniform', (['(57)', '(67)'], {}), '(57, 67)\n', (1328, 1336), False, 'from numpy.random import uniform\n')] |
import numpy as np
import pytest
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs import WCS
from astropy.nddata import NDData
from ..utils import parse_input_data, parse_output_projection
def test_parse_input_data(tmpdir):
header = fits.Header.fromtextfile(get_pkg_data_filename('data/gc_ga.hdr'))
data = np.arange(200).reshape((10, 20))
hdu = fits.ImageHDU(data)
# As HDU
array, coordinate_system = parse_input_data(hdu)
np.testing.assert_allclose(array, data)
# As filename
filename = tmpdir.join('test.fits').strpath
hdu.writeto(filename)
with pytest.raises(ValueError) as exc:
array, coordinate_system = parse_input_data(filename)
assert exc.value.args[0] == ("More than one HDU is present, please specify "
"HDU to use with ``hdu_in=`` option")
array, coordinate_system = parse_input_data(filename, hdu_in=1)
np.testing.assert_allclose(array, data)
# As array, header
array, coordinate_system = parse_input_data((data, header))
np.testing.assert_allclose(array, data)
# As array, WCS
wcs = WCS(hdu.header)
array, coordinate_system = parse_input_data((data, wcs))
np.testing.assert_allclose(array, data)
ndd = NDData(data, wcs=wcs)
array, coordinate_system = parse_input_data(ndd)
np.testing.assert_allclose(array, data)
assert coordinate_system is wcs
# Invalid
with pytest.raises(TypeError) as exc:
parse_input_data(data)
assert exc.value.args[0] == ("input_data should either be an HDU object or "
"a tuple of (array, WCS) or (array, Header)")
def test_parse_output_projection(tmpdir):
header = fits.Header.fromtextfile(get_pkg_data_filename('data/gc_ga.hdr'))
wcs = WCS(header)
# As header
with pytest.raises(ValueError) as exc:
parse_output_projection(header)
assert exc.value.args[0] == ("Need to specify shape since output header "
"does not contain complete shape information")
parse_output_projection(header, shape_out=(200, 200))
header['NAXIS'] = 2
header['NAXIS1'] = 200
header['NAXIS2'] = 300
parse_output_projection(header)
# As WCS
with pytest.raises(ValueError) as exc:
parse_output_projection(wcs)
assert exc.value.args[0] == ("Need to specify shape_out when specifying "
"output_projection as WCS object")
parse_output_projection(wcs, shape_out=(200, 200))
| [
"astropy.io.fits.ImageHDU",
"astropy.nddata.NDData",
"astropy.utils.data.get_pkg_data_filename",
"astropy.wcs.WCS",
"pytest.raises",
"numpy.arange",
"numpy.testing.assert_allclose"
] | [((412, 431), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['data'], {}), '(data)\n', (425, 431), False, 'from astropy.io import fits\n'), ((503, 542), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['array', 'data'], {}), '(array, data)\n', (529, 542), True, 'import numpy as np\n'), ((967, 1006), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['array', 'data'], {}), '(array, data)\n', (993, 1006), True, 'import numpy as np\n'), ((1099, 1138), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['array', 'data'], {}), '(array, data)\n', (1125, 1138), True, 'import numpy as np\n'), ((1170, 1185), 'astropy.wcs.WCS', 'WCS', (['hdu.header'], {}), '(hdu.header)\n', (1173, 1185), False, 'from astropy.wcs import WCS\n'), ((1251, 1290), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['array', 'data'], {}), '(array, data)\n', (1277, 1290), True, 'import numpy as np\n'), ((1302, 1323), 'astropy.nddata.NDData', 'NDData', (['data'], {'wcs': 'wcs'}), '(data, wcs=wcs)\n', (1308, 1323), False, 'from astropy.nddata import NDData\n'), ((1381, 1420), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['array', 'data'], {}), '(array, data)\n', (1407, 1420), True, 'import numpy as np\n'), ((1839, 1850), 'astropy.wcs.WCS', 'WCS', (['header'], {}), '(header)\n', (1842, 1850), False, 'from astropy.wcs import WCS\n'), ((315, 354), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['"""data/gc_ga.hdr"""'], {}), "('data/gc_ga.hdr')\n", (336, 354), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((646, 671), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (659, 671), False, 'import pytest\n'), ((1481, 1505), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1494, 1505), False, 'import pytest\n'), ((1788, 1827), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['"""data/gc_ga.hdr"""'], {}), "('data/gc_ga.hdr')\n", (1809, 1827), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((1878, 1903), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1891, 1903), False, 'import pytest\n'), ((2309, 2334), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2322, 2334), False, 'import pytest\n'), ((368, 382), 'numpy.arange', 'np.arange', (['(200)'], {}), '(200)\n', (377, 382), True, 'import numpy as np\n')] |
from EQTransformer.core.EqT_utils import f1, SeqSelfAttention, FeedForward, LayerNormalization
from EQTransformer.core.mseed_predictor import (
mseed_predictor,
_mseed2nparry,
PreLoadGeneratorTest,
_picker,
_get_snr,
_output_writter_prediction,
_plotter_prediction,
_resampling,
)
import keras
from keras.models import load_model
from keras.optimizers import Adam
from keras.engine.training_utils import iter_sequence_infinite
import keras2onnx
import platform
from os import listdir
from os.path import join
import pprint as pp
import numpy as np
import onnxruntime
import sys
import os
import csv
import shutil
import time
import pandas as pd
import json
import obspy
from obspy import read
"""
params_pred = {'batch_size': 500,
'norm_mode': 'std'}
args = {'input_dir': 'downloads_mseeds',
'input_model': 'EqT_model.h5',
'stations_json': 'station_list.json',
'output_dir': 'detections2',
'loss_weights': [0.02, 0.40, 0.58],
'detection_threshold': 0.3,
'P_threshold': 0.1,
'S_threshold': 0.1,
'number_of_plots': 10,
'plot_mode': 'time_frequency',
'normalization_mode': 'std',
'batch_size': 500,
'overlap': 0.3,
'gpuid': None,
'gpu_limit': None}
overwrite = False
"""
params_pred = {'batch_size': 1,
'norm_mode': 'std'}
args = {'input_dir': 'test_dataset',
'input_model': 'EqT_model.h5',
'stations_json': 'test_dataset.json',
'output_dir': 'test_detections',
'loss_weights': [0.02, 0.40, 0.58],
'detection_threshold': 0.3,
'P_threshold': 0.1,
'S_threshold': 0.1,
'number_of_plots': 10,
'plot_mode': 'time_frequency',
'normalization_mode': 'std',
'batch_size': 1,
'overlap': 0.3,
'gpuid': None,
'gpu_limit': None}
overwrite = False
# ONNX model predict_generator monkey typing
def onnx_predict_generator(pred_generator, sess):
all_outs = []
out_pred_generator = iter_sequence_infinite(pred_generator)
steps_done = 0
steps = len(pred_generator)
while steps_done < steps:
generator_output = next(out_pred_generator)
x = generator_output
x_test = list(x.values())[0].astype(np.float32)
outs = sess.run(None, input_feed={'input': x_test})
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
results = [np.concatenate(out) for out in all_outs]
#print(f'{len(results[0])},{len(results[1])},{len(results[2])}')
return results[0], results[1], results[2]
def mseed2nparry_one_minute(args, matching, time_slots, comp_types, st_name):
' read miniseed files and from a list of string names and returns 3 dictionaries of numpy arrays, meta data, and time slice info'
json_file = open(args['stations_json'])
stations_ = json.load(json_file)
st = obspy.core.Stream()
tsw = False
for m in matching:
temp_st = read(os.path.join(str(args['input_dir']), m),debug_headers=True)
if tsw == False and temp_st:
tsw = True
for tr in temp_st:
time_slots.append((tr.stats.starttime, tr.stats.endtime))
try:
temp_st.merge(fill_value=0)
except Exception:
temp_st =_resampling(temp_st)
temp_st.merge(fill_value=0)
temp_st.detrend('demean')
st += temp_st
st.filter(type='bandpass', freqmin = 1.0, freqmax = 45, corners=2, zerophase=True)
st.taper(max_percentage=0.001, type='cosine', max_length=2)
if len([tr for tr in st if tr.stats.sampling_rate != 100.0]) != 0:
try:
st.interpolate(100, method="linear")
except Exception:
st=_resampling(st)
st.trim(min([tr.stats.starttime for tr in st]), max([tr.stats.endtime for tr in st]), pad=True, fill_value=0)
start_time = st[0].stats.starttime
end_time = st[0].stats.endtime
meta = {"start_time":start_time,
"end_time": end_time,
"trace_name":m
}
chanL = [tr.stats.channel[-1] for tr in st]
comp_types.append(len(chanL))
tim_shift = int(60-(args['overlap']*60))
next_slice = start_time+60
data_set={}
sl = 0; st_times = []
#while next_slice <= end_time:
npz_data = np.zeros([6000, 3])
st_times.append(str(start_time).replace('T', ' ').replace('Z', ''))
w = st.slice(start_time, next_slice)
if 'Z' in chanL:
npz_data[:,2] = w[chanL.index('Z')].data[:6000]
if ('E' in chanL) or ('1' in chanL):
try:
npz_data[:,0] = w[chanL.index('E')].data[:6000]
except Exception:
npz_data[:,0] = w[chanL.index('1')].data[:6000]
if ('N' in chanL) or ('2' in chanL):
try:
npz_data[:,1] = w[chanL.index('N')].data[:6000]
except Exception:
npz_data[:,1] = w[chanL.index('2')].data[:6000]
data_set.update( {str(start_time).replace('T', ' ').replace('Z', '') : npz_data})
start_time = start_time+tim_shift
next_slice = next_slice+tim_shift
sl += 1
meta["trace_start_time"] = st_times
try:
meta["receiver_code"]=st[0].stats.station
meta["instrument_type"]=st[0].stats.channel[:2]
meta["network_code"]=stations_[st[0].stats.station]['network']
meta["receiver_latitude"]=stations_[st[0].stats.station]['coords'][0]
meta["receiver_longitude"]=stations_[st[0].stats.station]['coords'][1]
meta["receiver_elevation_m"]=stations_[st[0].stats.station]['coords'][2]
except Exception:
meta["receiver_code"]=st_name
meta["instrument_type"]=stations_[st_name]['channels'][0][:2]
meta["network_code"]=stations_[st_name]['network']
meta["receiver_latitude"]=stations_[st_name]['coords'][0]
meta["receiver_longitude"]=stations_[st_name]['coords'][1]
meta["receiver_elevation_m"]=stations_[st_name]['coords'][2]
return meta, time_slots, comp_types, data_set
if __name__ == '__main__':
# original
# detection.ipynb
print('Keras:')
model = load_model('EqT_model.h5',
custom_objects={
'SeqSelfAttention': SeqSelfAttention,
'FeedForward': FeedForward,
'LayerNormalization': LayerNormalization,
'f1': f1})
model.compile(loss = ['binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy'],
loss_weights = [0.02, 0.40, 0.58],
optimizer = Adam(lr = 0.001),
metrics = [f1])
out_dir = os.path.join(os.getcwd(), str(args['output_dir']))
if os.path.isdir(out_dir):
# print('============================================================================')
# print(f' *** {out_dir} already exists!')
print(f"*** {out_dir} already exists!")
if overwrite == True:
inp = "y"
print("Overwriting your previous results")
else:
inp = input(" --> Type (Yes or y) to create a new empty directory! This will erase your previous results so make a copy if you want them.")
if inp.lower() == "yes" or inp.lower() == "y":
shutil.rmtree(out_dir)
os.makedirs(out_dir)
else:
print("Okay.")
sys.exit(1)
if platform.system() == 'Windows':
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("\\")[-1] != ".DS_Store"]
else:
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("/")[-1] != ".DS_Store"]
station_list = sorted(set(station_list))
for ct, st in enumerate(station_list):
# create output directories
save_dir = os.path.join(out_dir, str(st)+'_outputs')
save_figs = os.path.join(save_dir, 'figures')
if os.path.isdir(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
if args['number_of_plots']:
os.makedirs(save_figs)
plt_n = 0
csvPr_gen = open(os.path.join(save_dir,'X_prediction_results.csv'), 'w')
predict_writer = csv.writer(csvPr_gen, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
predict_writer.writerow(['file_name',
'network',
'station',
'instrument_type',
'station_lat',
'station_lon',
'station_elv',
'event_start_time',
'event_end_time',
'detection_probability',
'detection_uncertainty',
'p_arrival_time',
'p_probability',
'p_uncertainty',
'p_snr',
's_arrival_time',
's_probability',
's_uncertainty',
's_snr'
])
csvPr_gen.flush()
print(f"Started working on {st}, {ct+1} out of {len(station_list)} ...", flush=True)
start_Predicting = time.time()
if platform.system() == 'Windows':
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"\\"+st) if ev.split("\\")[-1].split(".")[-1].lower() == "mseed"]
else:
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"/"+st) if ev.split("/")[-1].split(".")[-1].lower() == "mseed"]
mon = [ev.split('__')[1]+'__'+ev.split('__')[2] for ev in file_list]
uni_list = list(set(mon))
uni_list.sort()
time_slots, comp_types = [], []
for _, month in enumerate(uni_list):
matching = [s for s in file_list if month in s]
print(f'{month}')
#meta, time_slots, comp_types, data_set = _mseed2nparry(args, matching, time_slots, comp_types, st)
meta, time_slots, comp_types, data_set = mseed2nparry_one_minute(args, matching, time_slots, comp_types, st)
pred_generator = PreLoadGeneratorTest(meta["trace_start_time"], data_set, **params_pred)
predD, predP, predS = model.predict_generator(pred_generator)
detection_memory = []
for ix in range(len(predD)):
matches, pick_errors, yh3 = _picker(args, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0])
if (len(matches) >= 1) and ((matches[list(matches)[0]][3] or matches[list(matches)[0]][6])):
snr = [_get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][3], window = 100), _get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][6], window = 100)]
pre_write = len(detection_memory)
detection_memory=_output_writter_prediction(meta, predict_writer, csvPr_gen, matches, snr, detection_memory, ix)
post_write = len(detection_memory)
if plt_n < args['number_of_plots'] and post_write > pre_write:
_plotter_prediction(data_set[meta["trace_start_time"][ix]], args, save_figs, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0], meta["trace_start_time"][ix], matches)
plt_n += 1
end_Predicting = time.time()
delta = (end_Predicting - start_Predicting)
hour = int(delta / 3600)
delta -= hour * 3600
minute = int(delta / 60)
delta -= minute * 60
seconds = delta
dd = pd.read_csv(os.path.join(save_dir,'X_prediction_results.csv'))
print(f"Finished the prediction in: {hour} hours and {minute} minutes and {round(seconds, 2)} seconds.", flush=True)
print(f'*** Detected: '+str(len(dd))+' events.', flush=True)
print(' *** Wrote the results into --> " ' + str(save_dir)+' "', flush=True)
"""
# ONNX port
print('ONNX:')
sess_options = onnxruntime.SessionOptions()
sess = onnxruntime.InferenceSession('eqt_model.onnx', sess_options)
#sess = onnxruntime.InferenceSession('eqt_optimized.onnx', sess_options)
#args['output_dir'] = 'detections_onnx_optimized'
out_dir = os.path.join(os.getcwd(), str(args['output_dir']))
if os.path.isdir(out_dir):
# print('============================================================================')
# print(f' *** {out_dir} already exists!')
print(f"*** {out_dir} already exists!")
if overwrite == True:
inp = "y"
print("Overwriting your previous results")
else:
inp = input(" --> Type (Yes or y) to create a new empty directory! This will erase your previous results so make a copy if you want them.")
if inp.lower() == "yes" or inp.lower() == "y":
shutil.rmtree(out_dir)
os.makedirs(out_dir)
else:
print("Okay.")
sys.exit(1)
if platform.system() == 'Windows':
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("\\")[-1] != ".DS_Store"]
else:
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("/")[-1] != ".DS_Store"]
station_list = sorted(set(station_list))
for ct, st in enumerate(station_list):
# create output directories
save_dir = os.path.join(out_dir, str(st)+'_outputs')
save_figs = os.path.join(save_dir, 'figures')
if os.path.isdir(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
if args['number_of_plots']:
os.makedirs(save_figs)
plt_n = 0
csvPr_gen = open(os.path.join(save_dir,'X_prediction_results.csv'), 'w')
predict_writer = csv.writer(csvPr_gen, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
predict_writer.writerow(['file_name',
'network',
'station',
'instrument_type',
'station_lat',
'station_lon',
'station_elv',
'event_start_time',
'event_end_time',
'detection_probability',
'detection_uncertainty',
'p_arrival_time',
'p_probability',
'p_uncertainty',
'p_snr',
's_arrival_time',
's_probability',
's_uncertainty',
's_snr'
])
csvPr_gen.flush()
print(f"Started working on {st}, {ct+1} out of {len(station_list)} ...", flush=True)
start_Predicting = time.time()
if platform.system() == 'Windows':
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"\\"+st) if ev.split("\\")[-1].split(".")[-1].lower() == "mseed"]
else:
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"/"+st) if ev.split("/")[-1].split(".")[-1].lower() == "mseed"]
mon = [ev.split('__')[1]+'__'+ev.split('__')[2] for ev in file_list]
uni_list = list(set(mon))
uni_list.sort()
time_slots, comp_types = [], []
for _, month in enumerate(uni_list):
matching = [s for s in file_list if month in s]
print(f'{month}', flush=True)
meta, time_slots, comp_types, data_set = mseed2nparry_one_minute(args, matching, time_slots, comp_types, st)
pred_generator = PreLoadGeneratorTest(meta["trace_start_time"], data_set, **params_pred)
predD, predP, predS = onnx_predict_generator(pred_generator)
detection_memory = []
for ix in range(len(predD)):
matches, pick_errors, yh3 = _picker(args, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0])
if (len(matches) >= 1) and ((matches[list(matches)[0]][3] or matches[list(matches)[0]][6])):
snr = [_get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][3], window = 100), _get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][6], window = 100)]
pre_write = len(detection_memory)
detection_memory=_output_writter_prediction(meta, predict_writer, csvPr_gen, matches, snr, detection_memory, ix)
post_write = len(detection_memory)
if plt_n < args['number_of_plots'] and post_write > pre_write:
_plotter_prediction(data_set[meta["trace_start_time"][ix]], args, save_figs, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0], meta["trace_start_time"][ix], matches)
plt_n += 1
end_Predicting = time.time()
delta = (end_Predicting - start_Predicting)
hour = int(delta / 3600)
delta -= hour * 3600
minute = int(delta / 60)
delta -= minute * 60
seconds = delta
dd = pd.read_csv(os.path.join(save_dir,'X_prediction_results.csv'))
print(f"Finished the prediction in: {hour} hours and {minute} minutes and {round(seconds, 2)} seconds.", flush=True)
print(f'*** Detected: '+str(len(dd))+' events.', flush=True)
print(' *** Wrote the results into --> " ' + str(save_dir)+' "', flush=True)
"""
| [
"keras.models.load_model",
"EQTransformer.core.mseed_predictor._picker",
"obspy.core.Stream",
"shutil.rmtree",
"os.path.join",
"EQTransformer.core.mseed_predictor.PreLoadGeneratorTest",
"csv.writer",
"keras.optimizers.Adam",
"EQTransformer.core.mseed_predictor._resampling",
"platform.system",
"o... | [((2147, 2185), 'keras.engine.training_utils.iter_sequence_infinite', 'iter_sequence_infinite', (['pred_generator'], {}), '(pred_generator)\n', (2169, 2185), False, 'from keras.engine.training_utils import iter_sequence_infinite\n'), ((3108, 3128), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3117, 3128), False, 'import json\n'), ((3139, 3158), 'obspy.core.Stream', 'obspy.core.Stream', ([], {}), '()\n', (3156, 3158), False, 'import obspy\n'), ((4562, 4581), 'numpy.zeros', 'np.zeros', (['[6000, 3]'], {}), '([6000, 3])\n', (4570, 4581), True, 'import numpy as np\n'), ((6353, 6522), 'keras.models.load_model', 'load_model', (['"""EqT_model.h5"""'], {'custom_objects': "{'SeqSelfAttention': SeqSelfAttention, 'FeedForward': FeedForward,\n 'LayerNormalization': LayerNormalization, 'f1': f1}"}), "('EqT_model.h5', custom_objects={'SeqSelfAttention':\n SeqSelfAttention, 'FeedForward': FeedForward, 'LayerNormalization':\n LayerNormalization, 'f1': f1})\n", (6363, 6522), False, 'from keras.models import load_model\n'), ((6961, 6983), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (6974, 6983), False, 'import os\n'), ((2678, 2697), 'numpy.concatenate', 'np.concatenate', (['out'], {}), '(out)\n', (2692, 2697), True, 'import numpy as np\n'), ((6916, 6927), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6925, 6927), False, 'import os\n'), ((7660, 7677), 'platform.system', 'platform.system', ([], {}), '()\n', (7675, 7677), False, 'import platform\n'), ((8140, 8173), 'os.path.join', 'os.path.join', (['save_dir', '"""figures"""'], {}), "(save_dir, 'figures')\n", (8152, 8173), False, 'import os\n'), ((8186, 8209), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (8199, 8209), False, 'import os\n'), ((8257, 8278), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (8268, 8278), False, 'import os\n'), ((8510, 8588), 'csv.writer', 'csv.writer', (['csvPr_gen'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csvPr_gen, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (8520, 8588), False, 'import csv\n'), ((9715, 9726), 'time.time', 'time.time', ([], {}), '()\n', (9724, 9726), False, 'import time\n'), ((11895, 11906), 'time.time', 'time.time', ([], {}), '()\n', (11904, 11906), False, 'import time\n'), ((6836, 6850), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (6840, 6850), False, 'from keras.optimizers import Adam\n'), ((7528, 7550), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (7541, 7550), False, 'import shutil\n'), ((7565, 7585), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (7576, 7585), False, 'import os\n'), ((7640, 7651), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7648, 7651), False, 'import sys\n'), ((8223, 8246), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (8236, 8246), False, 'import shutil\n'), ((8328, 8350), 'os.makedirs', 'os.makedirs', (['save_figs'], {}), '(save_figs)\n', (8339, 8350), False, 'import os\n'), ((8419, 8469), 'os.path.join', 'os.path.join', (['save_dir', '"""X_prediction_results.csv"""'], {}), "(save_dir, 'X_prediction_results.csv')\n", (8431, 8469), False, 'import os\n'), ((9739, 9756), 'platform.system', 'platform.system', ([], {}), '()\n', (9754, 9756), False, 'import platform\n'), ((10639, 10710), 'EQTransformer.core.mseed_predictor.PreLoadGeneratorTest', 'PreLoadGeneratorTest', (["meta['trace_start_time']", 'data_set'], {}), "(meta['trace_start_time'], data_set, **params_pred)\n", (10659, 10710), False, 'from EQTransformer.core.mseed_predictor import mseed_predictor, _mseed2nparry, PreLoadGeneratorTest, _picker, _get_snr, _output_writter_prediction, _plotter_prediction, _resampling\n'), ((12133, 12183), 'os.path.join', 'os.path.join', (['save_dir', '"""X_prediction_results.csv"""'], {}), "(save_dir, 'X_prediction_results.csv')\n", (12145, 12183), False, 'import os\n'), ((3546, 3566), 'EQTransformer.core.mseed_predictor._resampling', '_resampling', (['temp_st'], {}), '(temp_st)\n', (3557, 3566), False, 'from EQTransformer.core.mseed_predictor import mseed_predictor, _mseed2nparry, PreLoadGeneratorTest, _picker, _get_snr, _output_writter_prediction, _plotter_prediction, _resampling\n'), ((3989, 4004), 'EQTransformer.core.mseed_predictor._resampling', '_resampling', (['st'], {}), '(st)\n', (4000, 4004), False, 'from EQTransformer.core.mseed_predictor import mseed_predictor, _mseed2nparry, PreLoadGeneratorTest, _picker, _get_snr, _output_writter_prediction, _plotter_prediction, _resampling\n'), ((7743, 7769), 'os.listdir', 'listdir', (["args['input_dir']"], {}), "(args['input_dir'])\n", (7750, 7769), False, 'from os import listdir\n'), ((7869, 7895), 'os.listdir', 'listdir', (["args['input_dir']"], {}), "(args['input_dir'])\n", (7876, 7895), False, 'from os import listdir\n'), ((9796, 9808), 'os.path.join', 'join', (['st', 'ev'], {}), '(st, ev)\n', (9800, 9808), False, 'from os.path import join\n'), ((9950, 9962), 'os.path.join', 'join', (['st', 'ev'], {}), '(st, ev)\n', (9954, 9962), False, 'from os.path import join\n'), ((10919, 10983), 'EQTransformer.core.mseed_predictor._picker', '_picker', (['args', 'predD[ix][:, 0]', 'predP[ix][:, 0]', 'predS[ix][:, 0]'], {}), '(args, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0])\n', (10926, 10983), False, 'from EQTransformer.core.mseed_predictor import mseed_predictor, _mseed2nparry, PreLoadGeneratorTest, _picker, _get_snr, _output_writter_prediction, _plotter_prediction, _resampling\n'), ((9819, 9857), 'os.listdir', 'listdir', (["(args['input_dir'] + '\\\\' + st)"], {}), "(args['input_dir'] + '\\\\' + st)\n", (9826, 9857), False, 'from os import listdir\n'), ((9973, 10010), 'os.listdir', 'listdir', (["(args['input_dir'] + '/' + st)"], {}), "(args['input_dir'] + '/' + st)\n", (9980, 10010), False, 'from os import listdir\n'), ((11407, 11506), 'EQTransformer.core.mseed_predictor._output_writter_prediction', '_output_writter_prediction', (['meta', 'predict_writer', 'csvPr_gen', 'matches', 'snr', 'detection_memory', 'ix'], {}), '(meta, predict_writer, csvPr_gen, matches, snr,\n detection_memory, ix)\n', (11433, 11506), False, 'from EQTransformer.core.mseed_predictor import mseed_predictor, _mseed2nparry, PreLoadGeneratorTest, _picker, _get_snr, _output_writter_prediction, _plotter_prediction, _resampling\n'), ((11665, 11840), 'EQTransformer.core.mseed_predictor._plotter_prediction', '_plotter_prediction', (["data_set[meta['trace_start_time'][ix]]", 'args', 'save_figs', 'predD[ix][:, 0]', 'predP[ix][:, 0]', 'predS[ix][:, 0]', "meta['trace_start_time'][ix]", 'matches'], {}), "(data_set[meta['trace_start_time'][ix]], args, save_figs,\n predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0], meta[\n 'trace_start_time'][ix], matches)\n", (11684, 11840), False, 'from EQTransformer.core.mseed_predictor import mseed_predictor, _mseed2nparry, PreLoadGeneratorTest, _picker, _get_snr, _output_writter_prediction, _plotter_prediction, _resampling\n')] |
#-------by HYH -------#
import numpy as np
p=[0,0.5,0,0.5,0]
u=2
pExact=0.8
pOvershoot=0.1
pUndershoot=0.1
def move(p,u,pExact,pOvershoot,pUndershoot):
n=len(p)
q=np.zeros(n)
for i in range(n):
q[i]=pExact*p[(i-u)%n]
q[i]=q[i]+pOvershoot*p[(i-1-u)%n]
q[i]=q[i]+pUndershoot*p[(i+1-u)%n]
return q
q=move(p, u, pExact, pOvershoot, pUndershoot)
print(q) | [
"numpy.zeros"
] | [((165, 176), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (173, 176), True, 'import numpy as np\n')] |
import numpy as np
from sk_dsp_comm import fec_conv
from sk_dsp_comm import digitalcom as dc
np.random.seed(100)
cc = fec_conv.FecConv()
print(cc.Nstates)
import matplotlib.pyplot as plt
import numpy as np
from sk_dsp_comm import fec_conv as fc
SNRdB = np.arange(2,12,.1)
Pb_uc = fc.conv_Pb_bound(1/2,5,[1,4,12,32,80,192,448,1024],SNRdB,2)
Pb_s = fc.conv_Pb_bound(1/2,5,[1,4,12,32,80,192,448,1024],SNRdB,1)
plt.figure(figsize=(5,5))
plt.semilogy(SNRdB,Pb_uc)
plt.semilogy(SNRdB,Pb_s)
plt.axis([2,12,1e-7,1e0])
plt.xlabel(r'$E_b/N_0$ (dB)')
plt.ylabel(r'Symbol Error Probability')
#plt.legend(('Uncoded BPSK','R=1/2, K=5, Soft'),loc='best')
plt.grid();
plt.show()
| [
"sk_dsp_comm.fec_conv.conv_Pb_bound",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"sk_dsp_comm.fec_conv.FecConv",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid... | [((95, 114), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (109, 114), True, 'import numpy as np\n'), ((121, 139), 'sk_dsp_comm.fec_conv.FecConv', 'fec_conv.FecConv', ([], {}), '()\n', (137, 139), False, 'from sk_dsp_comm import fec_conv\n'), ((257, 278), 'numpy.arange', 'np.arange', (['(2)', '(12)', '(0.1)'], {}), '(2, 12, 0.1)\n', (266, 278), True, 'import numpy as np\n'), ((284, 356), 'sk_dsp_comm.fec_conv.conv_Pb_bound', 'fc.conv_Pb_bound', (['(1 / 2)', '(5)', '[1, 4, 12, 32, 80, 192, 448, 1024]', 'SNRdB', '(2)'], {}), '(1 / 2, 5, [1, 4, 12, 32, 80, 192, 448, 1024], SNRdB, 2)\n', (300, 356), True, 'from sk_dsp_comm import fec_conv as fc\n'), ((351, 423), 'sk_dsp_comm.fec_conv.conv_Pb_bound', 'fc.conv_Pb_bound', (['(1 / 2)', '(5)', '[1, 4, 12, 32, 80, 192, 448, 1024]', 'SNRdB', '(1)'], {}), '(1 / 2, 5, [1, 4, 12, 32, 80, 192, 448, 1024], SNRdB, 1)\n', (367, 423), True, 'from sk_dsp_comm import fec_conv as fc\n'), ((411, 437), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (421, 437), True, 'import matplotlib.pyplot as plt\n'), ((437, 463), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['SNRdB', 'Pb_uc'], {}), '(SNRdB, Pb_uc)\n', (449, 463), True, 'import matplotlib.pyplot as plt\n'), ((463, 488), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['SNRdB', 'Pb_s'], {}), '(SNRdB, Pb_s)\n', (475, 488), True, 'import matplotlib.pyplot as plt\n'), ((488, 517), 'matplotlib.pyplot.axis', 'plt.axis', (['[2, 12, 1e-07, 1.0]'], {}), '([2, 12, 1e-07, 1.0])\n', (496, 517), True, 'import matplotlib.pyplot as plt\n'), ((514, 542), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$E_b/N_0$ (dB)"""'], {}), "('$E_b/N_0$ (dB)')\n", (524, 542), True, 'import matplotlib.pyplot as plt\n'), ((544, 582), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Symbol Error Probability"""'], {}), "('Symbol Error Probability')\n", (554, 582), True, 'import matplotlib.pyplot as plt\n'), ((644, 654), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (652, 654), True, 'import matplotlib.pyplot as plt\n'), ((656, 666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (664, 666), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""various utilities not related to optimization"""
from __future__ import (absolute_import, division, print_function,
) #unicode_literals, with_statement)
import os, sys, time
import warnings
import ast # ast.literal_eval is safe eval
import numpy as np
from collections import defaultdict # since Python 2.5
from .python3for2 import abc, range
del absolute_import, division, print_function #, unicode_literals, with_statement
PY2 = sys.version_info[0] == 2
global_verbosity = 1
# array([]) does not work but np.size(.) == 0
# here is the problem:
# bool(array([0])) is False
# bool(list(array([0]))) is True
# bool(list(array([0, 1]))) is True
# bool(array([0, 1])) raises ValueError
#
# "x in emptysets" cannot be well replaced by "not x"
# which is also True for array([]) and None, but also for 0 and False,
# and False for NaN, and an exception for array([0,1]), see also
# http://google-styleguide.googlecode.com/svn/trunk/pyguide.html#True/False_evaluations
def seval(s, *args, **kwargs):
if any(substring in s for substring in ("import", "sys.", "sys ", "shutil", "val(")):
raise ValueError('"%s" seems unsafe to evaluate' % s)
return eval(s, *args, **kwargs)
def is_(var):
"""intuitive handling of variable truth value also for `numpy` arrays.
Return `True` for any non-empty container, otherwise the truth value of the
scalar `var`.
Caveat of the most unintuitive case: [0] evaluates to True, like [0, 0].
>>> import numpy as np
>>> from cma.utilities.utils import is_
>>> is_({}) or is_(()) or is_(0) or is_(None) or is_(np.array(0))
False
>>> is_({0:0}) and is_((0,)) and is_(np.array([0]))
True
"""
try: # cases: ('', (), [], {}, np.array([]))
return True if len(var) else False
except TypeError: # cases None, False, 0
return True if var else False
def is_one(var):
"""return True if var == 1 or ones vector"""
try: return np.all(np.asarray(var) == 1)
except: return var == 1 # should never happen!?
def is_not(var):
"""see `is_`"""
return not is_(var)
def is_any(var_list):
"""return ``any(is_(v) for v in var_list)``"""
return any(is_(var) for var in var_list)
def is_all(var_list):
"""return ``all(is_(v) for v in var_list)``"""
return all(is_(var) for var in var_list)
def is_str(var):
"""`bytes` (in Python 3) also fit the bill.
>>> from cma.utilities.utils import is_str
>>> assert is_str(b'a') * is_str('a') * is_str(u'a') * is_str(r'b')
>>> assert not is_str([1]) and not is_str(1)
"""
types_ = (bytes, str)
if PY2:
types_ = types_ + (basestring, unicode) # == types.StrTypes
return any(isinstance(var, type_) for type_ in types_)
def is_nan(var):
"""return ``np.isnan(var)`` or `False` if `var` is not numeric"""
try:
return np.isnan(var)
except TypeError:
return False
def is_vector_list(x):
"""make an educated guess whether ``x`` is a list of vectors.
>>> from cma.utilities.utils import is_vector_list as ivl
>>> assert ivl([[0], [0]]) and not ivl([1,2,3])
"""
try:
return np.isscalar(x[0][0])
except:
return False
def as_vector_list(X):
"""a tool to handle a vector or a list of vectors in the same way,
return a list of vectors and a function to revert the "list making".
Useful when we might either have a single solution vector or a
set/list/population of vectors to deal with.
Namely, this function allows to replace a slightly more verbose::
was_list = utils.is_vector_list(X)
X = X if was_list else [X]
# work work work on X, e.g.
res = [x[0] + 1 for x in X]
res = res if was_list else res[0]
with::
X, revert = utils.as_vector_list(X)
# work work work on X, e.g.
res = [x[0] + 2 for x in X]
res, ... = revert(res, ...) # also allows to revert X, if desired
Testing:
>>> from cma.utilities import utils
>>> X = [3] # a single vector
>>> X, revert_vlist = utils.as_vector_list(X) # BEGIN
>>> assert X == [[3]] # a list with one element
>>> # work work work on X as a list of vectors, e.g.
>>> res = [x[0] + 1 for x in X]
>>> X, res = revert_vlist(X, res) # END
>>> assert res == 4
>>> assert X[0] == 3
"""
if is_vector_list(X):
return X, lambda x: x
else:
return [X], lambda *args: args[0][0] if len(args) == 1 else (
arg[0] for arg in args)
def rglen(ar):
"""return generator ``range(len(.))`` with shortcut ``rglen(.)``
"""
return range(len(ar))
def recycled(vec, dim=None, as_=None):
"""return ``vec`` with the last element recycled to ``dim`` if
``len(vec)`` doesn't fail, else ``vec``.
If ``dim`` is not given, ``len(as_)`` is used if available, else a
scalar is returned.
"""
try:
len_ = len(vec)
except TypeError:
return vec
if dim is None:
try:
dim = len(as_)
except TypeError:
return vec[0]
if dim == len_:
return vec
elif dim < len_:
return vec[:dim]
elif dim > len_:
return np.asarray(list(vec) + (dim - len_) * [vec[-1]])
def argsort(a, reverse=False):
"""return index list to get `a` in order, ie
``a[argsort(a)[i]] == sorted(a)[i]``, which leads to unexpected
results with `np.nan` entries, because any comparison with `np.nan`
is `False`.
"""
return sorted(range(len(a)), key=a.__getitem__, reverse=reverse) # a.__getitem__(i) is a[i]
def ranks(a, reverse=False):
"""return ranks of entries starting with zero based on Pythons `sorted`.
This leads to unreasonable results with `np.nan` values.
"""
idx = argsort(a)
return [len(idx) - 1 - idx.index(i) if reverse else idx.index(i)
for i in range(len(idx))]
def zero_values_indices(diffs):
"""generate increasing index pairs ``(i, j)`` with ``all(diffs[i:j] == 0)``
and ``diffs[j] != 0 or j == len(diffs)``, thereby identifying "flat
spots/areas" in `diffs`.
Returns the respective generator type.
Not anymore used to smoothen ECDFs.
Example:
>>> from cma.utilities.utils import zero_values_indices
>>> for i, j in zero_values_indices([0, 0.1, 0, 0, 3.2, 0, 2.1]):
... print((i, j))
(0, 1)
(2, 4)
(5, 6)
"""
i = 0
while i < len(diffs):
if diffs[i] == 0:
j = i
while j < len(diffs) and diffs[j] == 0:
j += 1
yield i, j
i = j + 1 # next possibly zero value
else:
i += 1
def pprint(to_be_printed):
"""nicely formated print"""
try:
import pprint as pp
# generate an instance PrettyPrinter
# pp.PrettyPrinter().pprint(to_be_printed)
pp.pprint(to_be_printed)
except ImportError:
if isinstance(to_be_printed, dict):
print('{')
for k, v in to_be_printed.items():
print("'" + k + "'" if str(k) == k else k,
': ',
"'" + v + "'" if str(v) == v else v,
sep="")
print('}')
else:
print('could not import pprint module, appling regular print')
print(to_be_printed)
def num2str(val, significant_digits=2, force_rounding=False,
max_predecimal_digits=5, max_postdecimal_leading_zeros=1,
remove_trailing_zeros=True, desired_length=None):
"""returns the shortest string representation.
Generally, display either ``significant_digits`` digits or its true
value, whichever is shorter.
``force_rounding`` shows no more than the desired number of significant
digits, which means, e.g., ``12345`` becomes ``12000``.
``remove_trailing_zeros`` removes zeros, if and only if the value is
exactly.
``desired_length`` adds digits up to the desired length.
>>> from cma.utilities import utils
>>> print([utils.num2str(val) for val in [12345, 1234.5, 123.45,
... 12.345, 1.2345, .12345, .012345, .0012345]])
['12345', '1234', '123', '12', '1.2', '0.12', '0.012', '1.2e-3']
"""
if val == 0:
return '0'
if not significant_digits > 0:
raise ValueError('need significant_digits=%s > 0'
% str(significant_digits))
is_negative = val < 0
original_value = val
val = float(np.abs(val))
order_of_magnitude = int(np.floor(np.log10(val)))
# number of digits before decimal point == order_of_magnitude + 1
fac = 10**(significant_digits - 1 - order_of_magnitude)
val_rounded = np.round(fac * val) / fac
# the strategy is now to produce two string representations
# cut each down to the necessary length and return the better
# the first is %f format
if order_of_magnitude + 1 >= significant_digits:
s = str(int(val_rounded if force_rounding else np.round(val)))
else:
s = str(val_rounded)
idx1 = 0 # first non-zero index
while idx1 < len(s) and s[idx1] in ('-', '0', '.'):
idx1 += 1 # find index of first significant number
idx2 = idx1 + significant_digits + (s.find('.') > idx1)
# print(val, val_rounded, s, len(s), idx1, idx2)
# pad some zeros in the end, in case
if val != val_rounded:
if len(s) < idx2:
s += '0' * (idx2 - len(s))
# remove zeros from the end, in case
if val == val_rounded and remove_trailing_zeros:
while s[-1] == '0':
s = s[0:-1]
if s[-1] == '.':
s = s[0:-1]
s_float = ('-' if is_negative else '') + s
# now the second, %e format
s = ('%.' + str(significant_digits - 1) + 'e') % val
if seval(s) == val and s.find('.') > 0:
while s.find('0e') > 0:
s = s.replace('0e', 'e')
s = s.replace('.e', 'e')
s = s.replace('e+', 'e')
while s.find('e0') > 0:
s = s.replace('e0', 'e')
while s.find('e-0') > 0:
s = s.replace('e-0', 'e-')
if s[-1] == 'e':
s = s[:-1]
s_exp = ('-' if is_negative else '') + s
# print(s_float, s_exp)
# now return the better (most of the time the shorter) representation
if (len(s_exp) < len(s_float) or
s_float.find('0.' + '0' * (max_postdecimal_leading_zeros + 1)) > -1 or
np.abs(val_rounded) >= 10**(max_predecimal_digits + 1)
):
s_ret = s_exp
else:
s_ret = s_float
if desired_length:
s_old = ''
while len(s_ret) < desired_length and len(s_old) < len(s_ret):
s_old = s_ret
s_ret = num2str(original_value,
significant_digits + desired_length - len(s_ret),
force_rounding,
max_predecimal_digits,
max_postdecimal_leading_zeros,
remove_trailing_zeros,
desired_length=None)
return s_ret
# todo: this should rather be a class instance
def print_warning(msg, method_name=None, class_name=None, iteration=None,
verbose=None, maxwarns=None):
"""Poor man's maxwarns: warn only if ``iteration<=maxwarns``"""
if verbose is None:
verbose = global_verbosity
if maxwarns is not None and iteration is None:
raise ValueError('iteration must be given to activate maxwarns')
if verbose >= -2 and (iteration is None or maxwarns is None or
iteration <= maxwarns):
warnings.warn(msg + ' (' +
('class=%s ' % str(class_name) if class_name else '') +
('method=%s ' % str(method_name) if method_name else '') +
('iteration=%s' % str(iteration) if iteration else '') +
')')
def print_message(msg, method_name=None, class_name=None, iteration=None,
verbose=None):
if verbose is None:
verbose = global_verbosity
if verbose >= 0:
print('NOTE (module=cma' + # __name__ +
(', class=' + str(class_name) if class_name else '') +
(', method=' + str(method_name) if method_name else '') +
(', iteration=' + str(iteration) if iteration is not None else '') +
'): ', msg)
def set_attributes_from_dict(self, dict_, initial_params_dict_name=None):
"""assign, for example, all arguments given to an ``__init__``
method to attributes in ``self`` or ``self.params`` or ``self.args``.
If ``initial_params_dict_name`` is given, ``dict_`` is also copied
into an attribute of ``self`` with name ``initial_params_dict_name``::
setattr(self, initial_params_dict_name, dict_.copy())
and the ``self`` key is removed from the copied `dict` if present.
>>> from cma.utilities.utils import set_attributes_from_dict
>>> class C(object):
... def __init__(self, arg1, arg2, arg3=None):
... assert len(locals()) == 4 # arguments are locally visible
... set_attributes_from_dict(self, locals())
>>> c = C(1, 22)
>>> assert c.arg1 == 1 and c.arg2 == 22 and c.arg3 is None
>>> assert len(c.__dict__) == 3 and not hasattr(c, 'self')
Details:
- The entry ``dict_['self']`` is always ignored.
- Alternatively::
self.args = locals().copy()
self.args.pop('self', None) # not strictly necessary
puts all arguments into ``self.args: dict``.
"""
if initial_params_dict_name:
setattr(self, initial_params_dict_name, dict_.copy())
getattr(self, initial_params_dict_name).pop('self', None)
for key, val in dict_.items():
if key != 'self': # avoid self referencing
setattr(self, key, val)
def download_file(url, target_dir='.', target_name=None):
import urllib2
if target_name is None:
target_name = url.split(os.path.sep)[-1]
with open(os.path.join(target_dir, target_name), 'wb') as f:
f.write(urllib2.urlopen(url).read())
def extract_targz(tarname, filename=None, target_dir='.'):
"""filename must be a valid path in the tar"""
import tarfile
tmp_dir = '._tmp_'
if filename is None:
tarfile.TarFile.gzopen(tarname).extractall(target_dir)
else:
import shutil
tarfile.TarFile.gzopen(tarname).extractall(tmp_dir)
shutil.copy2(os.path.join(tmp_dir, filename),
os.path.join(target_dir, filename.split(os.path.sep)[-1]))
shutil.rmtree(tmp_dir)
class BlancClass(object):
"""blanc container class to have a collection of attributes.
For rapid shell- or prototyping. In the process of improving the code
this class might/can/will at some point be replaced with a more
tailored class.
Usage:
>>> from cma.utilities.utils import BlancClass
>>> p = BlancClass()
>>> p.value1 = 0
>>> p.value2 = 1
"""
class DictClass(dict):
"""A class wrapped over `dict` to use class .-notation.
>>> from cma.utilities.utils import DictClass
>>> dict_ = dict((3 * c, c) for c in 'abcd')
>>> as_class = DictClass(dict_)
>>> assert as_class.__dict__ == dict_ == as_class
>>> assert as_class.aaa == 'a'
>>> as_class.new = 33
>>> assert 'new' in as_class
>>> as_class['nnew'] = 44
>>> assert as_class.nnew == 44
>>> assert len(as_class) == 6
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
def __dir__(self):
return self.keys()
class DerivedDictBase(abc.MutableMapping):
"""for conveniently adding methods/functionality to a dictionary.
The actual dictionary is in ``self.data``. Derive from this
class and copy-paste and modify setitem, getitem, and delitem,
if necessary.
Details: This is the clean way to subclass the build-in dict, however
it depends on `MutableMapping`.
"""
def __init__(self, *args, **kwargs):
# abc.MutableMapping.__init__(self)
super(DerivedDictBase, self).__init__()
# super(SolutionDict, self).__init__() # the same
self.data = dict()
self.data.update(dict(*args, **kwargs))
def __len__(self):
return len(self.data)
def __contains__(self, key):
return key in self.data
def __iter__(self):
return iter(self.data)
def __setitem__(self, key, value):
"""define ``self[key] = value``"""
self.data[key] = value
def __getitem__(self, key):
"""define access ``self[key]``"""
return self.data[key]
def __delitem__(self, key):
del self.data[key]
class SolutionDict(DerivedDictBase):
"""dictionary with computation of an hash key.
The hash key is generated from the inserted solution and a stack of
previously inserted same solutions is provided. Each entry is meant
to store additional information related to the solution.
>>> import cma.utilities.utils as utils, numpy as np
>>> d = utils.SolutionDict()
>>> x = np.array([1,2,4])
>>> d[x] = {'f': sum(x**2), 'iteration': 1}
>>> assert d[x]['iteration'] == 1
>>> assert d.get(x) == (d[x] if d.key(x) in d.keys() else None)
TODO: data_with_same_key behaves like a stack (see setitem and
delitem), but rather should behave like a queue?! A queue is less
consistent with the operation self[key] = ..., if
self.data_with_same_key[key] is not empty.
TODO: iteration key is used to clean up without error management
"""
def __init__(self, *args, **kwargs):
# DerivedDictBase.__init__(self, *args, **kwargs)
super(SolutionDict, self).__init__(*args, **kwargs)
self.data_with_same_key = {}
self.last_iteration = 0
@staticmethod
def _hash(x):
return x
def key(self, x):
"""compute key of ``x``"""
try:
return self._hash(np.ascontiguousarray(x).data.tobytes()) # much faster than tuple(.)
except AttributeError:
try:
return self._hash(tuple(x)) # using sum(x) is slower, using x[0] is slightly faster
except TypeError:
return self._hash(x)
def __setitem__(self, key, value):
"""define ``self[key] = value``"""
key = self.key(key)
if key in self.data_with_same_key:
self.data_with_same_key[key] += [self.data[key]]
elif key in self.data:
self.data_with_same_key[key] = [self.data[key]]
self.data[key] = value
def __getitem__(self, key): # 50% of time of
"""define access ``self[key]``"""
return self.data[self.key(key)]
def __delitem__(self, key):
"""remove only most current key-entry of list with same keys"""
key = self.key(key)
if key in self.data_with_same_key:
if len(self.data_with_same_key[key]) == 1:
self.data[key] = self.data_with_same_key.pop(key)[0]
else:
self.data[key] = self.data_with_same_key[key].pop(-1)
elif key in self.data:
del self.data[key]
def truncate(self, max_len, min_iter):
"""delete old entries to prevent bloat"""
if len(self) > max_len:
for k in list(self.keys()):
if self[k]['iteration'] < min_iter:
del self[k]
# deletes one item with k as key, better delete all?
class DataDict(defaultdict):
"""a dictionary of lists (of data)"""
def __init__(self, filename='_data.py'):
self.filename = filename
defaultdict.__init__(self, list)
self.load()
def load(self):
"""element-wise append/merge data of loaded `dict` to self,
by calling `update`.
To load cleanly without merge use `clear` + `load` or the class
constructor with a new `filename`.
"""
with open(self.filename, 'rt') as f:
dd = ast.literal_eval(f.read())
self.update(dd)
return self
def update(self, dict_):
"""append data of entries in `dict_` to entries in self"""
for k in dict_:
self[k] += dd[k] # self is a dict of lists
return self
def save(self):
with open(self.filename, 'wt') as f:
f.write(repr(dict(self)))
def clear(self):
for key in [k for k in self]:
del self[key]
return self
class ExclusionListOfVectors(list):
"""For delayed selective mirrored sampling"""
def __contains__(self, vec):
for v in self:
if 1 - 1e-9 < np.dot(v, vec) / (sum(np.asarray(v)**2) * sum(np.asarray(vec)**2))**0.5 < 1 + 1e-9:
return True
return False
class ElapsedWCTime(object):
"""measure elapsed cumulative time while not paused and elapsed time
since last tic.
Use attribute `tic` and methods `pause` () and `reset` ()
to control the timer. Use attributes `toc` and `elapsed` to see
timing results.
>>> import cma
>>> e = cma.utilities.utils.ElapsedWCTime().pause() # (re)start later
>>> assert e.paused and e.elapsed == e.toc < 0.1
>>> assert e.toc == e.tic < 0.1 # timer starts here
>>> assert e.toc <= e.tic # toc is usually a few microseconds smaller
>>> assert not e.paused # the timer is now running due to tic
Details: the attribute ``paused`` equals to the time [s] when paused or
to zero when the timer is running.
"""
def __init__(self, time_offset=0):
"""add time offset in seconds and start timing"""
self._time_offset = time_offset
self.reset()
def reset(self):
"""reset to initial state and start timing"""
self.cum_time = self._time_offset
self.paused = 0
"""time when paused or 0 while running"""
self.last_tic = time.time()
return self
def pause(self):
"""pause timer, resume with `tic`"""
if not self.paused:
self.paused = time.time()
return self
def __call__(self):
"""depreciated return elapsed time (for backwards compatibility)
"""
raise DeprecationWarning()
return self.elapsed
@property
def tic(self):
"""return `toc` and restart tic/toc last-round-timer.
In case, also resume from `pause`.
"""
return_ = self.toc
if self.paused:
if self.paused < self.last_tic:
print_warning("""paused time=%f < last_tic=%f, which
should never happen, but has been observed at least once.
""" % (self.paused, self.last_tic),
"tic", "ElapsedWCTime")
self.paused = self.last_tic
self.cum_time += self.paused - self.last_tic
else:
self.cum_time += time.time() - self.last_tic
self.paused = 0
self.last_tic = time.time()
return return_
@property
def elapsed(self):
"""elapsed time while not paused, measured since creation or last
`reset`
"""
return self.cum_time + self.toc
@property
def toc(self):
"""return elapsed time since last `tic`"""
if self.paused:
return self.paused - self.last_tic
return time.time() - self.last_tic
class TimingWrapper(object):
"""wrap a timer around a callable.
Attribute ``timer`` collects the timing data in an `ElapsedWCTime`
class instance, in particular the overall elapsed time in
``timer.elapsed`` and the time of the last call in ``timer.toc``.
"""
def __init__(self, callable_):
"""``callable_`` is the `callable` to be timed when called"""
self._callable = callable_
self.timer = ElapsedWCTime().pause()
def __call__(self, *args, **kwargs):
self.timer.tic
res = self._callable(*args, **kwargs)
self.timer.pause()
return res
class DictFromTagsInString(dict):
"""read from a string or file all key-value pairs within all
``<python>...</python>`` tags and return a `dict`.
Within the tags valid Python code is expected: either a list of
key-value pairs ``[[key1, value1], [key2, value2], ...]`` or a
dictionary ``{ key1: value1, key2: value2, ...}``. A key can be any
immutable object, while it is often a string or a number.
The `as_python_tag` attribute provides the respective (tagged) string.
The ``tag_string`` attribute defines the tag identifier, 'python' by
default, and can be change if desired at any time.
>>> from cma.utilities.utils import DictFromTagsInString
>>> s = '<python> [[33, 44], ["annotations", [None, 2]]] </python>'
>>> s += '<python> {"annotations": [2, 3]} </python>'
>>> d = DictFromTagsInString(s)
>>> # now d.update can be used to read more tagged strings/files/...
>>> assert d.tag_string == 'python' # can be set to any other value
>>> d.tag_string = 'pyt'
>>> # now 'pyt' tags can/will be read (only)
>>> assert str(d).startswith('<pyt>{') and str(d).endswith('}</pyt>')
>>> assert len(d) == 2 and d[33] == 44 and d['annotations'] == [2, 3]
When the same key appears several times, its value is overwritten.
"""
def __init__(self, *args, **kwargs):
"""for input args see `update` method."""
super(DictFromTagsInString, self).__init__() # not necessary!?
self.tag_string = "python"
if is_(args) or is_(kwargs):
self.update(*args, **kwargs)
def update(self, string_=None, filename=None, file_=None, dict_=None,
tag_string=None):
"""only one of the first four arguments is accepted at a time,
return ``self``.
If the first argument has no keyword, it is assumed to be a string
to be parsed for tags.
"""
args = 4 - ((string_ is None) + (filename is None) +
(file_ is None) + (dict_ is None))
if not args:
raise ValueError('''nothing to update''')
if args > 1:
raise ValueError('''
use either string_ or filename or file_ or dict_ as
input, but not several of them''')
if tag_string is not None:
self.tag_string = tag_string
if filename is not None:
string_ = open(filename, 'r').read()
elif file_ is not None:
string_ = file_.read()
elif dict_ is not None:
super(DictFromTagsInString,
self).update(dict_)
return self
super(DictFromTagsInString,
self).update(self._eval_python_tag(string_))
return self
@property
def as_python_tag(self):
return self._start + repr(dict(self)) + self._end
def __repr__(self):
return self.as_python_tag
@property
def _start(self):
return '<' + self.tag_string + '>'
@property
def _end(self):
return '</' + self.tag_string + '>'
def _eval_python_tag(self, str_):
"""read [key, value] pairs from a `list` or a `dict` within all
``<self.tag_str>`` tags in ``str_`` and return a `dict`.
>>> from cma.utilities.utils import DictFromTagsInString
>>> s = '<py> [[33, 44], ["annotations", []]] </py>'
>>> s += '<py>[["annotations", [1,2,3]]] </py>'
>>> d = DictFromTagsInString()
>>> assert len(d) == 0
>>> d.update(s) # still empty as default tag is not <py>
<python>{}</python>
>>> assert len(d) == 0
>>> d.tag_string = "py" # set desired tag
>>> d.update(s) # doctest:+ELLIPSIS
<py>{...
>>> assert len(d) == 2
>>> assert d[33] == 44 and len(d["annotations"]) == 3
"""
values = {}
str_lower = str_.lower()
start = str_lower.find(self._start)
while start >= 0:
start += len(self._start) # move behind begin tag
end = str_lower.find(self._end, start)
values.update(ast.literal_eval(str_[start:end].strip()))
start = str_lower.find(self._start, start + 1)
return values
class MoreToWrite(list):
"""make sure that this list does not grow unbounded"""
def __init__(self):
self._lenhist = []
def check(self):
self._lenhist += [len(self)]
if len(self._lenhist) > 3:
if all(np.diff(self._lenhist) > 0):
del self[:]
self._lenhist = []
class DefaultSettings(object):
"""resembling somewhat `types.SimpleNamespace` from Python >=3.3
but with instantiation and resembling even more the `dataclass` decorator
from Python >=3.7.
``MyClassSettings(DefaultSettings)`` is preferably used by assigning a settings
attribute in ``__init__`` like:
>>> class MyClass:
... def __init__(self, a, b=None, param1=None, c=3):
... self.settings = MyClassSettings(locals(), 1, self)
The `1` signals, purely for consistency checking, that one parameter defined
in ``MyClassSettings`` is to be set from ``locals()``. ``MyClassSettings``
doesn't use any names which are already defined in ``self.__dict__``. The
settings are defined in a derived parameter class like
>>> from cma.fitness_models import DefaultSettings
>>> class MyClassSettings(DefaultSettings):
... param1 = 123
... val2 = False
... another_par = None # we need to assign at least None always
The main purpose is, with the least effort, (i) to separate
parameters/settings of a class from its remaining attributes, and (ii) to be
flexible as to which of these parameters are arguments to ``__init__``.
Parameters can always be modified after instantiation. Further advantages
are (a) no typing of ``self.`` to assign the default value or the passed
parameter value (the latter are assigned "automatically") and (b) no
confusing name change between the passed option and attribute name is
possible.
The class does not allow to overwrite the default value with `None`.
Now any of these parameters can be used or re-assigned like
>>> c = MyClass(0.1)
>>> c.settings.param1 == 123
True
>>> c = MyClass(2, param1=False)
>>> c.settings.param1 is False
True
"""
def __init__(self, params, number_of_params, obj):
"""Overwrite default settings in case.
:param params: A dictionary (usually locals()) containing the parameters to set/overwrite
:param number_of_params: Number of parameters to set/overwrite
:param obj: elements of obj.__dict__ are in the ignore list.
"""
self.inparams = dict(params)
self._number_of_params = number_of_params
self.obj = obj
self.inparams.pop('self', None)
self._set_from_defaults()
self._set_from_input()
def __str__(self):
# return str(self.__dict__)
return ("{" + '\n'.join(r"%s: %s" % (str(k), str(v)) for k, v in self.items()) + "}")
def _set_from_defaults(self):
"""defaults are taken from the class attributes"""
self.__dict__.update(((key, val)
for (key, val) in type(self).__dict__.items()
if not key.startswith('_')))
def _set_from_input(self):
"""Only existing parameters/attributes and non-None values are set.
The number of parameters is cross-checked.
Remark: we could select only the last arguments
of obj.__init__.__func__.__code__.co_varnames
which have defaults obj.__init__.__func__.__defaults__ (we do
not need the defaults)
"""
discarded = {} # discard name if not in self.__dict__
for key in list(self.inparams):
if key not in self.__dict__ or key in self.obj.__dict__:
discarded[key] = self.inparams.pop(key)
elif self.inparams[key] is not None:
setattr(self, key, self.inparams[key])
if len(self.inparams) != self._number_of_params:
warnings.warn("%s: %d parameters desired; remaining: %s; discarded: %s "
% (str(type(self)), self._number_of_params, str(self.inparams),
str(discarded)))
# self.__dict__.update(self.inparams)
delattr(self, 'obj') # prevent circular reference self.obj.settings where settings is self
class ListOfCallables(list):
"""A `list` of callables that can be called like a single `callable`.
The simplest usecase of this minitool is single-shot usage like::
res = ListOfCallables(callable_or_list_of_callables)(args)
as a one-line simplification of either::
if callable(callable_or_list_of_callables):
res = [callable_or_list_of_callables(args)]
else:
res = [c(args) for c in callable_or_list_of_callables]
or::
try:
res = [c(args) for c in callable_or_list_of_callables]
except TypeError:
res = [callable_or_list_of_callables(args)]
"""
def __init__(self, callback):
"""return a list of callables as a `callable` itself.
``callback`` can be a `callable` or a `list` (or iterable) of
callables. Otherwise a `ValueError` exception is raised.
Possible usecase: termination callback(s) of CMA-ES::
self.opts['termination_callback'](self)
becomes::
ListOfCallables(self.opts['termination_callback'])(self)
"""
self._input_callback = callback
if callback is None:
callback = []
if callable(callback):
callback = [callback]
try:
for c in callback:
if not callable(c):
raise ValueError("""callback argument %s is not
callable""" % str(c))
except TypeError:
raise ValueError("""callback argument must be a `callable` or
an iterable (e.g. a list) of callables, after some
processing it was %s""" % str(callback))
list.__init__(self, callback)
def __call__(self, *args, **kwargs):
"""call each element of the list and return a list of return values
"""
res = [c(*args, **kwargs) for c in self]
if 11 < 3 and self._input_callback is None:
assert len(res) == 0
return None
if 11 < 3 and callable(self._input_callback):
assert len(self) == len(res) == 1
return res[0] # for backwards compatibility when a single callable is used
return res
| [
"numpy.abs",
"os.path.join",
"tarfile.TarFile.gzopen",
"numpy.isscalar",
"numpy.asarray",
"numpy.ascontiguousarray",
"numpy.isnan",
"time.time",
"numpy.log10",
"numpy.diff",
"pprint.pprint",
"numpy.dot",
"shutil.rmtree",
"collections.defaultdict.__init__",
"numpy.round",
"urllib2.urlop... | [((2891, 2904), 'numpy.isnan', 'np.isnan', (['var'], {}), '(var)\n', (2899, 2904), True, 'import numpy as np\n'), ((3186, 3206), 'numpy.isscalar', 'np.isscalar', (['x[0][0]'], {}), '(x[0][0])\n', (3197, 3206), True, 'import numpy as np\n'), ((6940, 6964), 'pprint.pprint', 'pp.pprint', (['to_be_printed'], {}), '(to_be_printed)\n', (6949, 6964), True, 'import pprint as pp\n'), ((8557, 8568), 'numpy.abs', 'np.abs', (['val'], {}), '(val)\n', (8563, 8568), True, 'import numpy as np\n'), ((8773, 8792), 'numpy.round', 'np.round', (['(fac * val)'], {}), '(fac * val)\n', (8781, 8792), True, 'import numpy as np\n'), ((14632, 14654), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (14645, 14654), False, 'import shutil\n'), ((19766, 19798), 'collections.defaultdict.__init__', 'defaultdict.__init__', (['self', 'list'], {}), '(self, list)\n', (19786, 19798), False, 'from collections import defaultdict\n'), ((22028, 22039), 'time.time', 'time.time', ([], {}), '()\n', (22037, 22039), False, 'import os, sys, time\n'), ((23099, 23110), 'time.time', 'time.time', ([], {}), '()\n', (23108, 23110), False, 'import os, sys, time\n'), ((8609, 8622), 'numpy.log10', 'np.log10', (['val'], {}), '(val)\n', (8617, 8622), True, 'import numpy as np\n'), ((10515, 10534), 'numpy.abs', 'np.abs', (['val_rounded'], {}), '(val_rounded)\n', (10521, 10534), True, 'import numpy as np\n'), ((14061, 14098), 'os.path.join', 'os.path.join', (['target_dir', 'target_name'], {}), '(target_dir, target_name)\n', (14073, 14098), False, 'import os, sys, time\n'), ((14511, 14542), 'os.path.join', 'os.path.join', (['tmp_dir', 'filename'], {}), '(tmp_dir, filename)\n', (14523, 14542), False, 'import os, sys, time\n'), ((22180, 22191), 'time.time', 'time.time', ([], {}), '()\n', (22189, 22191), False, 'import os, sys, time\n'), ((23483, 23494), 'time.time', 'time.time', ([], {}), '()\n', (23492, 23494), False, 'import os, sys, time\n'), ((1999, 2014), 'numpy.asarray', 'np.asarray', (['var'], {}), '(var)\n', (2009, 2014), True, 'import numpy as np\n'), ((14343, 14374), 'tarfile.TarFile.gzopen', 'tarfile.TarFile.gzopen', (['tarname'], {}), '(tarname)\n', (14365, 14374), False, 'import tarfile\n'), ((14438, 14469), 'tarfile.TarFile.gzopen', 'tarfile.TarFile.gzopen', (['tarname'], {}), '(tarname)\n', (14460, 14469), False, 'import tarfile\n'), ((23023, 23034), 'time.time', 'time.time', ([], {}), '()\n', (23032, 23034), False, 'import os, sys, time\n'), ((9068, 9081), 'numpy.round', 'np.round', (['val'], {}), '(val)\n', (9076, 9081), True, 'import numpy as np\n'), ((14128, 14148), 'urllib2.urlopen', 'urllib2.urlopen', (['url'], {}), '(url)\n', (14143, 14148), False, 'import urllib2\n'), ((20775, 20789), 'numpy.dot', 'np.dot', (['v', 'vec'], {}), '(v, vec)\n', (20781, 20789), True, 'import numpy as np\n'), ((28597, 28619), 'numpy.diff', 'np.diff', (['self._lenhist'], {}), '(self._lenhist)\n', (28604, 28619), True, 'import numpy as np\n'), ((18084, 18107), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['x'], {}), '(x)\n', (18104, 18107), True, 'import numpy as np\n'), ((20797, 20810), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (20807, 20810), True, 'import numpy as np\n'), ((20821, 20836), 'numpy.asarray', 'np.asarray', (['vec'], {}), '(vec)\n', (20831, 20836), True, 'import numpy as np\n')] |
import requests
import json
import pickle
import sys
text = "please cal"
with open('input_lang.pkl', 'rb') as input1:
input_lang = pickle.load(input1)
with open('output_lang.pkl', 'rb') as target:
target_lang = pickle.load(target)
with open('output_lang1.pkl', 'rb') as target1:
lang_target = pickle.load(target1)
import numpy as np
len_target = 23
len_input = 23
def sentence_to_vector(sentence, lang):
pre = sentence
vec = np.zeros(len_input)
sentence_list = [lang[s] for s in pre.split(' ')]
for i,w in enumerate(sentence_list):
vec[i] = w
return vec
# Given an input string, an encoder model (infenc_model) and a decoder model (infmodel),
def translate(input_sentence):
sv = sentence_to_vector(input_sentence, input_lang)
#sv = sv.reshape(1,len(sv))
output_sentence = ""
print(sv.shape)
print(sv)
payload = {
"instances":[{"input_image": sv.tolist()}]
}
try:
r = requests.post('http://localhost:9000/v1/models/encoder_model:predict', json=payload)
r.raise_for_status()
except requests.exceptions.HTTPError as e:
print(r)
print(r.content)
return "Error: " + str(e)
#json.loads(r.content)
#sys.exit(1)
epred= json.loads(r.content)['predictions']
emb_out = epred[0]['bidirectional_3/concat:0']
sh = epred[0]['concatenate_6/concat:0']
sc = epred[0]['concatenate_7/concat:0']
#[emb_out, sh, sc] = loaded_enc_model.predict(x=sv)
print(epred[0].keys())
i = 0
start_vec = target_lang["<start>"]
stop_vec = target_lang["<end>"]
cur_vec = np.zeros((1))
cur_vec[0] = start_vec
cur_word = "<start>"
output_sentence = ""
print(cur_vec.shape)
while cur_word != "<end>" and i < (len_target-1):
i += 1
if cur_word != "<start>":
output_sentence = output_sentence + " " + cur_word
#x_in = [cur_vec,sh, sc]
####
payload = {
"instances":[{
"inf_decoder_inputs:0": cur_vec.tolist(),
"state_input_c:0" : sh,
"state_input_h:0" : sc
}
]
}
try:
r = requests.post('http://localhost:9001/v1/models/decoder_model:predict', json=payload)
r.raise_for_status()
except requests.exceptions.HTTPError as e:
print(r)
print(r.content)
return "Error: " + str(e)
####
dpred= json.loads(r.content)['predictions']
print(dpred[0].keys())
nvec = dpred[0]['dense_7/truediv:0']
sh = dpred[0]['lstm_1/while/Exit_2:0']
sc = dpred[0]['lstm_1/while/Exit_3:0']
#[nvec, sh, sc] = loaded_dec_model.predict(x=x_in)
cur_vec[0] = np.argmax(nvec[0])
cur_word = lang_target[cur_vec[0]]
return output_sentence
prediction = translate(text.lower())
print(prediction) | [
"json.loads",
"numpy.argmax",
"numpy.zeros",
"pickle.load",
"requests.post"
] | [((137, 156), 'pickle.load', 'pickle.load', (['input1'], {}), '(input1)\n', (148, 156), False, 'import pickle\n'), ((221, 240), 'pickle.load', 'pickle.load', (['target'], {}), '(target)\n', (232, 240), False, 'import pickle\n'), ((307, 327), 'pickle.load', 'pickle.load', (['target1'], {}), '(target1)\n', (318, 327), False, 'import pickle\n'), ((450, 469), 'numpy.zeros', 'np.zeros', (['len_input'], {}), '(len_input)\n', (458, 469), True, 'import numpy as np\n'), ((1639, 1650), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1647, 1650), True, 'import numpy as np\n'), ((982, 1071), 'requests.post', 'requests.post', (['"""http://localhost:9000/v1/models/encoder_model:predict"""'], {'json': 'payload'}), "('http://localhost:9000/v1/models/encoder_model:predict', json\n =payload)\n", (995, 1071), False, 'import requests\n'), ((1280, 1301), 'json.loads', 'json.loads', (['r.content'], {}), '(r.content)\n', (1290, 1301), False, 'import json\n'), ((2852, 2870), 'numpy.argmax', 'np.argmax', (['nvec[0]'], {}), '(nvec[0])\n', (2861, 2870), True, 'import numpy as np\n'), ((2283, 2372), 'requests.post', 'requests.post', (['"""http://localhost:9001/v1/models/decoder_model:predict"""'], {'json': 'payload'}), "('http://localhost:9001/v1/models/decoder_model:predict', json\n =payload)\n", (2296, 2372), False, 'import requests\n'), ((2560, 2581), 'json.loads', 'json.loads', (['r.content'], {}), '(r.content)\n', (2570, 2581), False, 'import json\n')] |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import numpy as np
from pyiron_atomistics.atomistics.job.atomistic import AtomisticGenericJob
from pyiron_atomistics.atomistics.structure.atoms import Atoms
from typing import Tuple, Union
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Nov 1, 2021"
class WaterGeometryCalculator:
"""
Class to analyze the geometries of water molecules in an atomistic simulation.
"""
def __init__(self, job: AtomisticGenericJob, fixed_bonds: bool = True, water_bond_cutoff: float = 1.3):
"""Initializing the class
Args:
job (pyiron_atomistics.atomistics.job.atomistic.AtomisticGenericJob): The given atomistic job
fixed_bonds (bool): True of the water bonds remain unbroken throughout the simulation
water_bond_cutoff (float): The cutoff radius of a sphere centered at the nucleus of an oxygen atom
used to determine the number of hydrogen atoms covalently bonded to it
"""
self._job = job
self._fixed_bonds = fixed_bonds
self._water_bond_cutoff = water_bond_cutoff
self._water_oxygen_indices = []
self._water_hydrogen_indices = []
self._oh_vec_1, self._oh_vec_2 = [], []
self._intra_oh_distances, self._intra_oh_angles = [], []
if fixed_bonds:
self._compute_water_bonds()
else:
raise NotImplementedError("Currently this class can only analyze trajectories"
" where the water bonds are intact")
@property
def structure(self) -> Atoms:
"""
The initial structure of the trajectory
Returns:
pyiron_atomistics.atomistics.structure.atoms.Atoms
"""
return self._job.structure
@property
def water_oxygen_indices(self) -> Union[np.ndarray, list]:
"""Indices of oxygen atoms that are part of water molecules."""
return self._water_oxygen_indices
@property
def water_hydrogen_indices(self) -> Union[np.ndarray, list]:
"""Indices of hydrogen atoms that are part of water molecules."""
return self._water_hydrogen_indices
def _compute_water_bonds(self) -> None:
neighbors = self.structure.get_neighbors(num_neighbors=5)
oxy_indices = self.structure.select_index("O")
hyd_indices = self.structure.select_index("H")
oxy_neigh_indices = np.array(neighbors.indices)[oxy_indices]
oxy_neigh_distances = np.array(neighbors.distances)[oxy_indices]
within_cutoff_bool = oxy_neigh_distances <= self._water_bond_cutoff
oxy_hyd_indices_list = [np.intersect1d(oxy_neigh_indices[i, bool_ind], hyd_indices)
for i, bool_ind in enumerate(within_cutoff_bool)]
water_oxy_indices = list()
water_hyd_indices = list()
for i, oxy_hyd_ind in enumerate(oxy_hyd_indices_list):
if len(oxy_hyd_ind) == 2:
water_oxy_indices.append(oxy_indices[i])
water_hyd_indices.append(oxy_hyd_ind)
self._water_oxygen_indices = np.array(water_oxy_indices)
self._water_hydrogen_indices = np.array(water_hyd_indices)
self._oh_vec_1, self._oh_vec_2 = self._get_intra_oh_vec()
self._intra_oh_distances = np.stack(np.array([np.linalg.norm(val, axis=2)
for val in [self._oh_vec_1, self._oh_vec_2]]))
self._intra_oh_angles = get_angle_traj_vectors(self._oh_vec_1, self._oh_vec_2)
def _get_intra_oh_vec(self) -> Tuple[np.ndarray, np.ndarray]:
positions = self._job.output.unwrapped_positions
oh_vec_1 = positions[:, self._water_hydrogen_indices[:, 0], :] - positions[:, self._water_oxygen_indices, :]
oh_vec_2 = positions[:, self._water_hydrogen_indices[:, 1], :] - positions[:, self._water_oxygen_indices, :]
return oh_vec_1, oh_vec_2
@property
def intra_oh_distances(self) -> Union[list, np.ndarray]:
"""Returns list of intra-molecular OH distances."""
return self._intra_oh_distances
@property
def bond_angles(self) -> Union[list, np.ndarray]:
"""Returns list of water bond angles (in radians)."""
return self._intra_oh_angles
def get_angle_traj_vectors(vec_1: np.ndarray, vec_2: np.ndarray) -> np.ndarray:
"""
Returns the angles between the trajectories of two vectors of the same shape
Args:
vec_1 (ndarray): Vector 1
vec_2 (ndarray): Vector 2
Returns:
ndarray: The agnle (in radians) between the two vectors
"""
return np.arccos(np.sum(vec_1 * vec_2, axis=-1) / (np.linalg.norm(vec_1, axis=-1) * np.linalg.norm(vec_2, axis=-1)))
| [
"numpy.linalg.norm",
"numpy.intersect1d",
"numpy.array",
"numpy.sum"
] | [((3353, 3380), 'numpy.array', 'np.array', (['water_oxy_indices'], {}), '(water_oxy_indices)\n', (3361, 3380), True, 'import numpy as np\n'), ((3420, 3447), 'numpy.array', 'np.array', (['water_hyd_indices'], {}), '(water_hyd_indices)\n', (3428, 3447), True, 'import numpy as np\n'), ((2670, 2697), 'numpy.array', 'np.array', (['neighbors.indices'], {}), '(neighbors.indices)\n', (2678, 2697), True, 'import numpy as np\n'), ((2741, 2770), 'numpy.array', 'np.array', (['neighbors.distances'], {}), '(neighbors.distances)\n', (2749, 2770), True, 'import numpy as np\n'), ((2892, 2951), 'numpy.intersect1d', 'np.intersect1d', (['oxy_neigh_indices[i, bool_ind]', 'hyd_indices'], {}), '(oxy_neigh_indices[i, bool_ind], hyd_indices)\n', (2906, 2951), True, 'import numpy as np\n'), ((4878, 4908), 'numpy.sum', 'np.sum', (['(vec_1 * vec_2)'], {'axis': '(-1)'}), '(vec_1 * vec_2, axis=-1)\n', (4884, 4908), True, 'import numpy as np\n'), ((4912, 4942), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_1'], {'axis': '(-1)'}), '(vec_1, axis=-1)\n', (4926, 4942), True, 'import numpy as np\n'), ((4945, 4975), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_2'], {'axis': '(-1)'}), '(vec_2, axis=-1)\n', (4959, 4975), True, 'import numpy as np\n'), ((3568, 3595), 'numpy.linalg.norm', 'np.linalg.norm', (['val'], {'axis': '(2)'}), '(val, axis=2)\n', (3582, 3595), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
#
# @Author: <NAME>
# @Date: Oct 10, 2017
# @Filename: tiling.py
# @License: BSD 3-Clause
# @Copyright: <NAME>
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import copy
import numpy as np
from astropy import coordinates as coo
from astropy import units as uu
import shapely.affinity
import shapely.geometry
from .ifu import IFU
from ..target.target import Target
__all__ = ['Tiling', 'Tile']
class Tile(object):
"""A tiling element.
A `.Tile` is basically an `.IFU` with a position and a rotation. A
`.Tiling` is composed of multiple Tiles that optimally cover a region or
target.
Parameters:
ifu (`.IFU`):
The `.IFU` to use.
coords (`~astropy.coordinates.SkyCoord` or tuple):
The ``(RA, Dec)`` coordinates on which the centre of mass of the
`.IFU` will be placed.
scale (float):
The plate scale, used to convert IFU physical units to on-sky
positions and angles. In units of arcsec/mm.
angle (`~astropy.coordinates.Angle` or float):
The rotation angle of the `.IFU` measured from North to East.
plot_params (dict):
A dictionary of matplotlib keywords to be used when plotting the
Tile.
"""
def __init__(self, ifu, coords, scale, angle=0, plot_params=None):
assert isinstance(ifu, IFU), 'ifu is not a valid input type.'
self.ifu = ifu
if not isinstance(coords, coo.SkyCoord):
coords = coo.SkyCoord(*coords, unit='deg')
self.coords = coords
self.angle = coo.Angle(angle, unit='deg')
self._plot_params = plot_params
self.shapely = self._create_shapely(scale)
def __repr__(self):
return (f'<Tile RA={self.coords.ra.deg:.3f}, '
f'Dec={self.coords.dec.deg:.3f}, '
f'angle={self.angle.deg:.1f}>')
def _create_shapely(self, scale):
"""Creates a shapely region representing the IFU on the sky."""
if not isinstance(scale, uu.Quantity):
scale = scale * uu.arcsec / uu.mm
subifus_polygons = []
for subifu in self.ifu.subifus:
# The radius of each subifu (the central row) is 1 by definition.
subifu_size = (subifu.n_rows * subifu.n_fibres) / 1000. * uu.mm
# Scales shapely geometry to mm.
subifu_mm = shapely.affinity.scale(subifu.geometry,
subifu_size.to('mm').value,
subifu_size.to('mm').value, center=(0, 0))
# Applies the plate scale and RA correction
subifu_arcsec = shapely.affinity.scale(
subifu_mm, scale, scale / np.cos(np.radians(self.coords.dec.deg)),
center=(0, 0))
# Translates the IFU to its location on the region.
subifu_translated = shapely.affinity.translate(subifu_arcsec,
self.coords.ra.deg,
self.coords.dec.deg)
# Rotates the IFU
sub_ifu_rotated = shapely.affinity.rotate(subifu_translated, -self.angle.deg)
subifus_polygons.append(sub_ifu_rotated)
return shapely.geometry.MultiPolygon(subifus_polygons)
def set_plot_params(self, **kwargs):
"""Sets the default plotting parameters for this tile."""
self._plot_params = kwargs
def plot(self, ax, **kwargs):
"""Plots the tile.
Parameters:
ax (`~matplotlib.axes.Axes`):
The matplotlib axes on which the tile will be plotted.
kwargs (dict):
Matplotlib keywords to be passed to the tile
`~matplotlib.patches.Patch` to customise its style. If no
keywords are passed and ``plot_params`` were defined when
initialising the `.Tile`, those parameters will be used.
Otherwise, the default style will be used.
"""
pass
class Tiling(object):
"""Performs tiling on a target on the sky.
Parameters:
target (`.Target`):
The `.Target` to be tiled.
telescope (`~lvmsurveysim.telescope.Telescope`):
The telescope that will be used to tile the target.
ifu (`.IFU` or None):
The `.IFU` to be used as tiling unit. If ``None``, the IFU
can be defined when calling the object. Otherwise, the tiling will
be run during instantiation.
Attributes:
tiles (list):
A list of `.Tile` objects describing the optimal tile for the
target.
Example:
When a `.Tiling` is instatiated with a `.Target` and an `.IFU`, the
tiling process is run on init ::
>>> apo1 = Telescope('APO-1m')
>>> m81 = Target.from_target_list('M81')
>>> mono = MonolithicIFU()
>>> m81_tiling = Tiling(m81, apo1, ifu=mono)
>>> m81_tiling.tiles
[<Tile RA=169.1, Dec=69.2, angle=3.01>, ...]
Alternatively, a `.Tiling` can be started with just a `.Target`. In
that case, the tiling is executed when the object is called with a
valid `.IFU` ::
>>> m81_tiling = Tiling(m81, apo1)
>>> m81_tiling.tiles
[]
>>> m81_tiling(mono)
>>> m81_tiling.tiles
[<Tile RA=169.1, Dec=69.2, angle=3.01>, ...]
"""
def __init__(self, target, telescope, ifu=None):
assert isinstance(target, Target), 'target is of invalid type.'
self.target = target
self.telescope = telescope
self.tiles = []
# If ifu is defined, we call the tiling routine.
if ifu is not None:
self.__call__(ifu)
def __repr__(self):
return f'<Tiling target={self.target.name!r}, tiles={self.tiles!r}>'
def __call__(self, ifu):
"""Runs the tiling process using ``ifu`` as the tiling unit."""
self._ifu = ifu
# First pass. We overtile target.
untiled_shapely = copy.deepcopy(self.target.region.shapely)
while not untiled_shapely.is_empty:
repr_point = untiled_shapely.representative_point()
new_tile = Tile(self.ifu, (repr_point.x, repr_point.y),
self.telescope.plate_scale.to('arcsec/mm'))
self.tiles.append(new_tile)
untiled_shapely = untiled_shapely.difference(new_tile.ifu.polygon)
def plot(self, **kwargs):
"""Plots the tiles on the target region.
Parameters:
kwargs (dict):
Keyword arguments to be passed to `.Target.plot`. The style
of each `.Tile` can be configured by calling
`.Tile.set_plot_params`.
Returns:
fig, ax:
The matplotlib `~matplotlib.figure.Figure` and
`~matplotlib.axes.Axes` objects for this plot.
"""
fig, ax = self.target.plot(**kwargs)
for tile in self.tiles:
tile.plot(ax)
return fig, ax
@property
def ifu(self):
"""Returns the `.IFU` used for tiling."""
if self._ifu is None:
raise ValueError('ifu has not yet been defined. '
'Use Tiling(ifu) to set it in runtime.')
return self._ifu
| [
"numpy.radians",
"copy.deepcopy",
"astropy.coordinates.Angle",
"astropy.coordinates.SkyCoord"
] | [((1690, 1718), 'astropy.coordinates.Angle', 'coo.Angle', (['angle'], {'unit': '"""deg"""'}), "(angle, unit='deg')\n", (1699, 1718), True, 'from astropy import coordinates as coo\n'), ((6275, 6316), 'copy.deepcopy', 'copy.deepcopy', (['self.target.region.shapely'], {}), '(self.target.region.shapely)\n', (6288, 6316), False, 'import copy\n'), ((1605, 1638), 'astropy.coordinates.SkyCoord', 'coo.SkyCoord', (['*coords'], {'unit': '"""deg"""'}), "(*coords, unit='deg')\n", (1617, 1638), True, 'from astropy import coordinates as coo\n'), ((2857, 2888), 'numpy.radians', 'np.radians', (['self.coords.dec.deg'], {}), '(self.coords.dec.deg)\n', (2867, 2888), True, 'import numpy as np\n')] |
# Modified from: vispy: gallery 2
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Controls:
* 1 - toggle camera between first person (fly), regular 3D (turntable) and
arcball
* 2 - toggle between volume rendering methods
* 3 - toggle between rendered images
* 4 - toggle between colormaps
* 0 - reset cameras
* [] - decrease/increase isosurface threshold
With fly camera:
* WASD or arrow keys - move around
* SPACE - brake
* FC - move up-down
* IJKL or mouse - look around
"""
from itertools import cycle
from tocmfastpy import *
import numpy as np
from scipy import ndimage
from skimage import measure, morphology, segmentation
from skimage.feature import peak_local_max
import os
import watershed
from vispy import app, scene, io
from vispy.color import get_colormaps, BaseColormap
from vispy.visuals.transforms import STTransform
def get_data():
if not os.path.exists('./basins.npy'):
PATH = '/home/yunfanz/Data/21cmFast/Boxes/xH_nohalos_z010.00_nf0.865885_eff20.0_effPLindex0.0_HIIfilter1_Mmin4.3e+08_RHIImax20_500_500Mpc'
d1 = 1 - boxio.readbox(PATH).box_data
ionized = d1 > 0.999
ionized = ionized*morphology.remove_small_objects(ionized, 3) #speeds up later process
EDT = ndimage.distance_transform_edt(ionized)
smoothed_arr = np.load('smoothed.npy')
maxima = watershed.local_maxima(smoothed_arr, ionized, h_transform=False)
basins = np.where(maxima*EDT>5)
basins = np.asarray(basins).T
#import IPython; IPython.embed()
#basins = morphology.remove_small_objects(maxima, 9) #speeds up later process
#markers = measure.label(maxima, connectivity=2)
#R = measure.regionprops(markers)
#basins = np.vstack([np.mean(r.coords, axis=0) for r in R])
np.save('basins.npy', basins)
np.save('EDT.npy', EDT)
else:
EDT = np.load('./EDT.npy')
basins = np.load('./basins.npy')
return EDT, basins
vol1, basins = get_data()
#import IPython; IPython.embed()
# Read volume
#vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']
# vol2 = np.load(io.load_data_file('brain/mri.npz'))['data']
# vol2 = np.flipud(np.rollaxis(vol2, 1))
vol2 = np.load('./smoothed.npy')
# Prepare canvas
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
canvas.measure_fps()
# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()
# Set whether we are emulating a 3D texture
emulate_texture = False
# Create the volume visuals, only one is visible
volume1 = scene.visuals.Volume(vol1, parent=view.scene, threshold=0.5,
emulate_texture=emulate_texture)
#volume1.transform = scene.STTransform(translate=(64, 64, 0))
volume2 = scene.visuals.Volume(vol2, parent=view.scene, threshold=0.5,
emulate_texture=emulate_texture)
volume2.visible = False
scatter = scene.visuals.Markers()
scatter.set_data(basins, edge_color=None, face_color=(1, 0, 0, 1), size=5)
view.add(scatter)
# Create three cameras (Fly, Turntable and Arcball)
fov = 60.
cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly')
cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov,
name='Turntable')
cam3 = scene.cameras.ArcballCamera(parent=view.scene, fov=fov, name='Arcball')
view.camera = cam2 # Select turntable at first
# Create an XYZAxis visual
axis = scene.visuals.XYZAxis(parent=view)
s = STTransform(translate=(50, 50), scale=(50, 50, 50, 1))
affine = s.as_matrix()
axis.transform = affine
# create colormaps that work well for translucent and additive volume rendering
class TransFire(BaseColormap):
glsl_map = """
vec4 translucent_fire(float t) {
return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05));
}
"""
class TransGrays(BaseColormap):
glsl_map = """
vec4 translucent_grays(float t) {
return vec4(t, t, t, t*0.05);
}
"""
# Setup colormap iterators
opaque_cmaps = cycle(get_colormaps())
translucent_cmaps = cycle([TransFire(), TransGrays()])
opaque_cmap = next(opaque_cmaps)
translucent_cmap = next(translucent_cmaps)
# Implement axis connection with cam2
@canvas.events.mouse_move.connect
def on_mouse_move(event):
if event.button == 1 and event.is_dragging:
axis.transform.reset()
axis.transform.rotate(cam2.roll, (0, 0, 1))
axis.transform.rotate(cam2.elevation, (1, 0, 0))
axis.transform.rotate(cam2.azimuth, (0, 1, 0))
axis.transform.scale((50, 50, 0.001))
axis.transform.translate((50., 50.))
axis.update()
# Implement key presses
@canvas.events.key_press.connect
def on_key_press(event):
global opaque_cmap, translucent_cmap
if event.text == '1':
cam_toggle = {cam1: cam2, cam2: cam3, cam3: cam1}
view.camera = cam_toggle.get(view.camera, cam2)
print(view.camera.name + ' camera')
if view.camera is cam2:
axis.visible = True
else:
axis.visible = False
elif event.text == '2':
methods = ['mip', 'translucent', 'iso', 'additive']
method = methods[(methods.index(volume1.method) + 1) % 4]
print("Volume render method: %s" % method)
cmap = opaque_cmap if method in ['mip', 'iso'] else translucent_cmap
volume1.method = method
volume1.cmap = cmap
volume2.method = method
volume2.cmap = cmap
elif event.text == '3':
volume1.visible = not volume1.visible
volume2.visible = not volume1.visible
elif event.text == '4':
if volume1.method in ['mip', 'iso']:
cmap = opaque_cmap = next(opaque_cmaps)
else:
cmap = translucent_cmap = next(translucent_cmaps)
volume1.cmap = cmap
volume2.cmap = cmap
elif event.text == '0':
cam1.set_range()
cam3.set_range()
elif event.text != '' and event.text in '[]':
s = -0.025 if event.text == '[' else 0.025
volume1.threshold += s
volume2.threshold += s
th = volume1.threshold if volume1.visible else volume2.threshold
print("Isosurface threshold: %0.3f" % th)
# for testing performance
# @canvas.connect
# def on_draw(ev):
# canvas.update()
if __name__ == '__main__':
print(__doc__)
app.run()
| [
"vispy.color.get_colormaps",
"vispy.visuals.transforms.STTransform",
"numpy.load",
"scipy.ndimage.distance_transform_edt",
"numpy.save",
"vispy.scene.cameras.TurntableCamera",
"vispy.scene.cameras.ArcballCamera",
"vispy.app.run",
"vispy.scene.cameras.FlyCamera",
"os.path.exists",
"numpy.asarray"... | [((2470, 2495), 'numpy.load', 'np.load', (['"""./smoothed.npy"""'], {}), "('./smoothed.npy')\n", (2477, 2495), True, 'import numpy as np\n'), ((2523, 2588), 'vispy.scene.SceneCanvas', 'scene.SceneCanvas', ([], {'keys': '"""interactive"""', 'size': '(800, 600)', 'show': '(True)'}), "(keys='interactive', size=(800, 600), show=True)\n", (2540, 2588), False, 'from vispy import app, scene, io\n'), ((2846, 2943), 'vispy.scene.visuals.Volume', 'scene.visuals.Volume', (['vol1'], {'parent': 'view.scene', 'threshold': '(0.5)', 'emulate_texture': 'emulate_texture'}), '(vol1, parent=view.scene, threshold=0.5,\n emulate_texture=emulate_texture)\n', (2866, 2943), False, 'from vispy import app, scene, io\n'), ((3043, 3140), 'vispy.scene.visuals.Volume', 'scene.visuals.Volume', (['vol2'], {'parent': 'view.scene', 'threshold': '(0.5)', 'emulate_texture': 'emulate_texture'}), '(vol2, parent=view.scene, threshold=0.5,\n emulate_texture=emulate_texture)\n', (3063, 3140), False, 'from vispy import app, scene, io\n'), ((3203, 3226), 'vispy.scene.visuals.Markers', 'scene.visuals.Markers', ([], {}), '()\n', (3224, 3226), False, 'from vispy import app, scene, io\n'), ((3391, 3454), 'vispy.scene.cameras.FlyCamera', 'scene.cameras.FlyCamera', ([], {'parent': 'view.scene', 'fov': 'fov', 'name': '"""Fly"""'}), "(parent=view.scene, fov=fov, name='Fly')\n", (3414, 3454), False, 'from vispy import app, scene, io\n'), ((3462, 3537), 'vispy.scene.cameras.TurntableCamera', 'scene.cameras.TurntableCamera', ([], {'parent': 'view.scene', 'fov': 'fov', 'name': '"""Turntable"""'}), "(parent=view.scene, fov=fov, name='Turntable')\n", (3491, 3537), False, 'from vispy import app, scene, io\n'), ((3582, 3653), 'vispy.scene.cameras.ArcballCamera', 'scene.cameras.ArcballCamera', ([], {'parent': 'view.scene', 'fov': 'fov', 'name': '"""Arcball"""'}), "(parent=view.scene, fov=fov, name='Arcball')\n", (3609, 3653), False, 'from vispy import app, scene, io\n'), ((3737, 3771), 'vispy.scene.visuals.XYZAxis', 'scene.visuals.XYZAxis', ([], {'parent': 'view'}), '(parent=view)\n', (3758, 3771), False, 'from vispy import app, scene, io\n'), ((3776, 3830), 'vispy.visuals.transforms.STTransform', 'STTransform', ([], {'translate': '(50, 50)', 'scale': '(50, 50, 50, 1)'}), '(translate=(50, 50), scale=(50, 50, 50, 1))\n', (3787, 3830), False, 'from vispy.visuals.transforms import STTransform\n'), ((4318, 4333), 'vispy.color.get_colormaps', 'get_colormaps', ([], {}), '()\n', (4331, 4333), False, 'from vispy.color import get_colormaps, BaseColormap\n'), ((6620, 6629), 'vispy.app.run', 'app.run', ([], {}), '()\n', (6627, 6629), False, 'from vispy import app, scene, io\n'), ((1138, 1168), 'os.path.exists', 'os.path.exists', (['"""./basins.npy"""'], {}), "('./basins.npy')\n", (1152, 1168), False, 'import os\n'), ((1502, 1541), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['ionized'], {}), '(ionized)\n', (1532, 1541), False, 'from scipy import ndimage\n'), ((1565, 1588), 'numpy.load', 'np.load', (['"""smoothed.npy"""'], {}), "('smoothed.npy')\n", (1572, 1588), True, 'import numpy as np\n'), ((1606, 1670), 'watershed.local_maxima', 'watershed.local_maxima', (['smoothed_arr', 'ionized'], {'h_transform': '(False)'}), '(smoothed_arr, ionized, h_transform=False)\n', (1628, 1670), False, 'import watershed\n'), ((1688, 1714), 'numpy.where', 'np.where', (['(maxima * EDT > 5)'], {}), '(maxima * EDT > 5)\n', (1696, 1714), True, 'import numpy as np\n'), ((2052, 2081), 'numpy.save', 'np.save', (['"""basins.npy"""', 'basins'], {}), "('basins.npy', basins)\n", (2059, 2081), True, 'import numpy as np\n'), ((2090, 2113), 'numpy.save', 'np.save', (['"""EDT.npy"""', 'EDT'], {}), "('EDT.npy', EDT)\n", (2097, 2113), True, 'import numpy as np\n'), ((2138, 2158), 'numpy.load', 'np.load', (['"""./EDT.npy"""'], {}), "('./EDT.npy')\n", (2145, 2158), True, 'import numpy as np\n'), ((2176, 2199), 'numpy.load', 'np.load', (['"""./basins.npy"""'], {}), "('./basins.npy')\n", (2183, 2199), True, 'import numpy as np\n'), ((1418, 1461), 'skimage.morphology.remove_small_objects', 'morphology.remove_small_objects', (['ionized', '(3)'], {}), '(ionized, 3)\n', (1449, 1461), False, 'from skimage import measure, morphology, segmentation\n'), ((1728, 1746), 'numpy.asarray', 'np.asarray', (['basins'], {}), '(basins)\n', (1738, 1746), True, 'import numpy as np\n')] |
# Author: <NAME>
# <EMAIL>
#
# Counter needs to be configured to print to serial port at 1 Hz.
#
# Line choices optimized for D2 lamp and CsTe PMT.
import serial
import numpy as np
import sys
import vm502
# Wavelengths for scan
LAMBDA = ['65.0', '116.9', '117.9', '118.9', '119.7',
'120.6', '121.4', '122.8', '123.5',
'124.2', '125.9', '127.4', '130.4',
'131.3', '132.2', '135.0', '137.0', '139.0',
'141.0', '142.0', '144.0', '146.0',
'148.0', '149.5', '151.7', '153.3',
'155.0', '156.5', '158.5', '160.8',
'162.9', '164.5', '167.0', '170.0', '65.0']
# LAMBDA = ['91.9', '93.1', '97.2', '102.5',
# '104.7', '106.6', '109.9', '111.5',
# '114.4', '116.0', '117.4', '119.8',
# '121.4']
# Number of samples to average per wavelength
#N = 10
N = 5
# Read N samples from counter and return average
def read_n_samples(s, n):
s.reset_input_buffer()
print(s.read_until())
#print(s.read_until())
l = []
for i in range(n):
v = s.read_until()
#print(v)
v = np.float(v.decode('ASCII').replace(',', ''))
l.append(v)
#l.append(float(s.read_until()))
a = np.array(l)
return(np.average(a), np.std(a))
def main(mp, cp, fname):
ms = serial.Serial(mp, 9600, timeout = 5.0)
cs = serial.Serial(cp, 9600, timeout = 3.0)
cs.reset_input_buffer()
print(cs.read_until())
cl = vm502.vm502_get_lambda(ms)
print("Wavelength: {0:s}".format(cl))
flux = []
fstd = []
for wav in LAMBDA:
cl = vm502.vm502_goto(ms, wav)
print("Wavelength: {0:s}".format(cl))
f, fdev = read_n_samples(cs, N)
flux.append(f)
fstd.append(fdev)
print("Flux: {0:f}, std: {1:f}".format(f, fdev))
print(LAMBDA)
print(flux)
print(np.column_stack((LAMBDA,flux, fstd)))
np.savetxt(fname, np.column_stack((LAMBDA, flux, fstd)), delimiter = ',', fmt = '%s')
cl = vm502.vm502_goto(ms, '90.0')
cs.close()
ms.close()
if __name__ == '__main__':
if (len(sys.argv) < 4):
print("Usage: monopmtd2scan.py <mono port> <counter port> filename")
exit()
mono_port = str(sys.argv[1])
counter_port = str(sys.argv[2])
print("Monochromator Port: {0:s}".format(mono_port))
print("Counter Port: {0:s}".format(counter_port))
fname = sys.argv[3]
print("Saving to file: {0:s}".format(fname))
main(mono_port, counter_port, fname)
| [
"serial.Serial",
"vm502.vm502_get_lambda",
"vm502.vm502_goto",
"numpy.average",
"numpy.std",
"numpy.array",
"numpy.column_stack"
] | [((1218, 1229), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (1226, 1229), True, 'import numpy as np\n'), ((1304, 1340), 'serial.Serial', 'serial.Serial', (['mp', '(9600)'], {'timeout': '(5.0)'}), '(mp, 9600, timeout=5.0)\n', (1317, 1340), False, 'import serial\n'), ((1352, 1388), 'serial.Serial', 'serial.Serial', (['cp', '(9600)'], {'timeout': '(3.0)'}), '(cp, 9600, timeout=3.0)\n', (1365, 1388), False, 'import serial\n'), ((1457, 1483), 'vm502.vm502_get_lambda', 'vm502.vm502_get_lambda', (['ms'], {}), '(ms)\n', (1479, 1483), False, 'import vm502\n'), ((2009, 2037), 'vm502.vm502_goto', 'vm502.vm502_goto', (['ms', '"""90.0"""'], {}), "(ms, '90.0')\n", (2025, 2037), False, 'import vm502\n'), ((1241, 1254), 'numpy.average', 'np.average', (['a'], {}), '(a)\n', (1251, 1254), True, 'import numpy as np\n'), ((1256, 1265), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (1262, 1265), True, 'import numpy as np\n'), ((1597, 1622), 'vm502.vm502_goto', 'vm502.vm502_goto', (['ms', 'wav'], {}), '(ms, wav)\n', (1613, 1622), False, 'import vm502\n'), ((1866, 1903), 'numpy.column_stack', 'np.column_stack', (['(LAMBDA, flux, fstd)'], {}), '((LAMBDA, flux, fstd))\n', (1881, 1903), True, 'import numpy as np\n'), ((1931, 1968), 'numpy.column_stack', 'np.column_stack', (['(LAMBDA, flux, fstd)'], {}), '((LAMBDA, flux, fstd))\n', (1946, 1968), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import str
from builtins import map
from past.utils import old_div
import numpy as np
from math import radians, cos, sin
from .spacegroup import SpaceGroup
try:
# raise ImportError
import tinyarray as ta
except ImportError:
import numpy as ta
TINYARRAY = False
else:
TINYARRAY = True
def comp2dict(composition):
"""Takes composition: Si20 O10, returns dict of atoms {'Si':20,'O':10}"""
import re
pat = re.compile('([A-z]+|[0-9]+)')
m = re.findall(pat,composition)
return dict(list(zip(m[::2],list(map(int,m[1::2])))))
def dict2comp(d):
"""Takes a composition dictionary and turns it into a string"""
return " ".join(["{}{}".format(*item) for item in list(d.items())])
class UnitCell(SpaceGroup):
"""Class for unit cell/space group functions"""
def __init__(self, cell_params, spgr, name="", composition={}):
if isinstance(spgr, SpaceGroup):
self.__dict__.update(spgr.__dict__)
else:
super(UnitCell, self).__init__(spgr)
self.name = name
if isinstance(composition, str):
composition = comp2dict(composition)
self.composition = composition
if len(cell_params) != 6:
cell_params = self.parse_cellparams(cell_params)
self.parameters = list(float(par) for par in cell_params)
if not self.is_valid_cell():
print("\n >> Warning: Unit cell parameters do not fit with space group {}".format(self.space_group))
def __repr__(self):
if self.name:
return "{}: {} - {}".format(self.name, str(self.parameters), self.spgr_name)
else:
return "{} - {}".format(str(self.parameters), self.spgr_name)
def __iter__(self):
for par in self.parameters:
yield par
@property
def a(self):
return self.parameters[0]
@property
def b(self):
return self.parameters[1]
@property
def c(self):
return self.parameters[2]
@property
def al(self):
return self.parameters[3]
@property
def be(self):
return self.parameters[4]
@property
def ga(self):
return self.parameters[5]
def to_dict(self):
d = {"name": self.name,
"spgr": self.spgr_name,
"params": self.parameters }
if self.composition:
d["composition"] = dict2comp(self.composition)
return d
@classmethod
def from_dict(cls, d):
"""Create UnitCell instance from dict"""
if "params" in d:
cell_params = d["params"]
else:
cell_params = [d[key] for key in ("a", "b", "c", "al", "be", "ga")]
spgr = d["spgr"]
name = d.get("name", "NoName")
composition = d.get("composition", None)
return cls(cell_params=cell_params, spgr=spgr, name=name, composition=composition)
def info(self):
comp = " ({})".format(dict2comp(self.composition)) if self.composition else ""
print("Cell {}{}".format(self.name, comp))
print(" a {:12.4f} al {:12.2f}".format(self.a, self.al))
print(" b {:12.4f} be {:12.2f}".format(self.b, self.be))
print(" c {:12.4f} ga {:12.2f}".format(self.c, self.ga))
print("Vol. {:10.2f}".format(self.volume))
print("Spgr {}".format(self.spgr_name))
print()
def metric_tensor(self, inverse=False):
"""Returns the metric tensor
http://reference.iucr.org/dictionary/Metric_tensor
Dunitz, 1979, p227"""
a, b, c, al, be, ga = self.parameters
al = radians(al)
be = radians(be)
ga = radians(ga)
vol = self.volume
if inverse:
m11 = (b*c*sin(al)/vol)**2
m22 = (c*a*sin(be)/vol)**2
m33 = (a*b*sin(ga)/vol)**2
m12 = a*b*(old_div(c,vol))**2 * (cos(al)*cos(be)-cos(ga))
m23 = b*c*(old_div(a,vol))**2 * (cos(be)*cos(ga)-cos(al))
m13 = a*c*(old_div(b,vol))**2 * (cos(ga)*cos(al)-cos(be))
mat = ta.array([[m11, m12, m13],
[m12, m22, m23],
[m13, m23, m33]])
else:
mat = ta.array([[a*a, a*b*cos(ga), a*c*cos(be)],
[a*b*cos(ga), b*b, b*c*cos(al)],
[a*c*cos(be), b*c*cos(al), c*c]])
return mat
def orthogonalization_matrix(self, inverse=False):
"""orthogonalization matrix for crystal to cartesian coordinates
not to be confused with the unit cell orthogonalization matrix, which is the transpose of this one
<NAME>, Dunitz, 1979, p237"""
a, b, c, al, be, ga = self.parameters
al = radians(al)
be = radians(be)
ga = radians(ga)
vol = self.volume
if inverse:
mat = ta.array([[old_div(1,a), old_div((-1*cos(ga)), (a*sin(ga))), old_div((cos(ga) * cos(al) - cos(be)), (a*vol * sin(ga)))],
[0, old_div(1,
(b*sin(ga))), old_div((cos(ga) * cos(be) - cos(al)), (b*vol * sin(ga)))],
[0, 0, old_div((a*b*sin(ga)), (vol))]])
else:
mat = ta.array([[a, b*cos(ga), c*cos(be)],
[0, b*sin(ga),
c*(cos(al)-cos(be)*cos(ga))/sin(ga)],
[0, 0, old_div(vol,(a*b*sin(ga)))]])
return mat
def _calc_dspacing(self, idx):
"""Calc dspacing at given index (i.e. idx= (1,0,0)
Calculates d-spacing based on given parameters.
a,b,c,al,be,ge are given as floats
al,be,ga can be given as ndarrays or floats
kind specifies the type of cell -> triclinic works for the general case, but is a bit slower
although still fast enough
Tested: orthorhombic cell on (orthorhombic, monoclinic, triclinic)
Tested: triclinic cell with dvalues from topas
"""
kind = self.crystal_system
a, b, c, al, be, ga = self.parameters
h = idx[0]
k = idx[1]
l = idx[2]
if kind == 'Cubic':
idsq = old_div((h**2 + k**2 + l**2), a**2)
elif kind == 'Tetragonal':
idsq = old_div((h**2 + k**2), a**2) + old_div(l**2, c**2)
elif kind == 'Orthorhombic':
idsq = old_div(h**2, a**2) + old_div(k**2, b**2) + old_div(l**2, c**2)
elif kind == "Trigonal":
if self.setting == "R":
al = radians(al)
num = (h**2 + k**2 + l**2) * sin(al)**2 + 2*(h*k + k*l + h*l)*(cos(al)**2 - cos(al))
denom = a**2 * (1 - 3*cos(al)**2 + 2*cos(al)**3)
idsq = old_div(num, denom)
else:
idsq = (old_div(4.0,3.0)) * (h**2 + h*k + k**2) / (a**2) + old_div(l**2, c**2)
elif kind == 'Hexagonal':
idsq = (old_div(4.0,3.0)) * (h**2 + h*k + k**2) / (a**2) + old_div(l**2, c**2)
elif kind == 'Monoclinic':
be = radians(be)
idsq = (old_div(1,sin(be)**2)) * (old_div(h**2,a**2) + k**2 * sin(be)**2 /
b**2 + old_div(l**2,c**2) - old_div((2*h*l*cos(be)), (a*c)))
elif kind == 'Triclinic':
V = self.volume
al = radians(al)
be = radians(be)
ga = radians(ga)
idsq = (old_div(1,V**2)) * (
h**2 * b**2 * c**2 * sin(al)**2
+ k**2 * a**2 * c**2 * sin(be)**2
+ l**2 * a**2 * b**2 * sin(ga)**2
+ 2*h*k*a*b*c**2 * (cos(al) * cos(be) - cos(ga))
+ 2*k*l*b*c*a**2 * (cos(be) * cos(ga) - cos(al))
+ 2*h*l*c*a*b**2 * (cos(al) * cos(ga) - cos(be))
)
else:
raise ValueError("Unknown crystal system {}, fallback to Triclinic".format(kind))
return np.power(idsq, -0.5, where=idsq!=0)
def calc_dspacing(self, idx):
"""When passing a single index [h, k, l]"""
return self._apply_along_index(idx, self._calc_dspacing)
@property
def volume(self):
"""Returns volume for the general case from cell parameters"""
if hasattr(self, "_volume"):
return self._volume
a, b, c, al, be, ga = self.parameters
al = radians(al)
be = radians(be)
ga = radians(ga)
vol = a*b*c * \
((1+2*cos(al)*cos(be)*cos(ga)-cos(al)**2-cos(be)**2-cos(ga)**2)
** .5)
self._volume = vol
return vol
def is_valid_cell(self):
a,b,c,al,be,ga = self.parameters
system = self.crystal_system
setting = self.setting
if system == "Triclinic":
return True
elif system == "Monoclinic":
if self.unique_axis == "y":
return al == ga == 90.0
elif self.unique_axis == "x":
return be == ga == 90.0
elif self.unique_axis == "z":
return al == be == 90.0
elif system == "Orthorhombic":
return al == be == ga
elif system == "Tetragonal":
return (a == b) and (al == be == ga == 90.0)
elif system == "Trigonal":
if setting == "R":
return (a == b == c) and (al == be == ga)
else:
return (a == b) and (al == be == 90.0) and (ga == 120.0)
elif system == "Hexagonal":
return (a == b) and (al == be == 90.0) and (ga == 120.0)
elif system == "Cubic":
return (a == b == c) and (al == be == ga == 90.0)
else:
raise ValueError("Unknown crystal system ".format(system))
def parse_cellparams(self, parameters):
system = self.crystal_system
if system == "Triclinic":
assert len(parameters) == 6, "Expect 6 cell parameters"
elif system == "Monoclinic":
assert len(parameters) == 4, "Expect 4 cell parameters"
a, b, c, angle = parameters
if self.unique_axis == "y":
parameters = [a, b, c, 90.0, angle, 90.0]
elif self.unique_axis == "x":
parameters = [a, b, c, angle, 90.0, 90.0]
elif self.unique_axis == "z":
parameters = [a, b, c, 90.0, 90.0, angle]
elif system == "Orthorhombic":
assert len(parameters) == 3, "Expect 3 cell parameters"
a, b, c = parameters
parameters = [a, b, c, 90.0, 90.0, 90.0]
elif system == "Tetragonal":
assert len(parameters) == 2, "Expect 2 cell parameters"
a, c = parameters
parameters = [a, a, c, 90.0, 90.0, 90.0]
elif system == "Trigonal":
if self.setting == "R":
assert len(parameters) == 2, "Expect 2 cell parameters"
a, al = parameters
parameters = [a, a, a, al, al, al]
else:
assert len(parameters) == 2, "Expect 2 cell parameters"
a, c = parameters
parameters = [a, a, c, 90.0, 90.0, 120.0]
elif system == "Hexagonal":
assert len(parameters) == 2, "Expect 2 cell parameters"
a, c = parameters
parameters = [a, a, c, 90.0, 90.0, 120.0]
elif system == "Cubic":
assert len(parameters) == 1, "Expect 1 cell parameters"
a = parameters[0]
parameters = [a, a, a, 90.0, 90.0, 90.0]
else:
raise ValueError("Unknown crystal system ".format(system))
assert len(parameters) == 6, "Expect 6 cell parameters"
return parameters
def get_dmin(self, indices):
# ipshell();exit()
return np.min(self.calc_dspacing(indices))
def generate_hkl(self, hmax=10, expand=False, include_sysabs=False, include_friedels=False, get_raw=False):
import subprocess as sp
from spacegroup import expand_to_p1
import re
line_match = '#?\s+(?P<h>-?\d+)\s+(?P<k>-?\d+)\s+(?P<l>-?\d+)\s+(?P<m>\d+)\s+(\((?P<sysabs>\d+)\)|\[(?P<friedel>-?\d+)\])?'
find_block = '>Begin hklList.*?\n(.*?)>End hklList'
cmd = [ 'sginfo',
self.space_group,
'-UnitCell={}'.format(" ".join([str(par) for par in self.parameters])),
'-hklList={:d}'.format(hmax),
]
if include_sysabs:
cmd.append('-v')
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT)
out, err = p.communicate()
out = out.decode()
refl_block = re.findall(find_block, out, re.S)
if len(refl_block) != 1:
raise IOError(f"Could not find reflection block (space group: {self.space_group}).")
lines = refl_block[0].splitlines()
lines = (re.match(line_match, line) for line in lines)
if not include_sysabs:
lines = (line for line in lines if not line["sysabs"])
if not include_friedels:
lines = (line for line in lines if not line["friedel"])
if get_raw:
return lines
else:
hkl = np.array([(int(line["h"]), int(line["k"]), int(line["l"])) for line in lines])
if expand:
hkl = expand_to_p1(hkl, self)
return hkl
if __name__ == '__main__':
params = (13.0, 19.0, 20.0, 90.0, 90.0, 90.0)
spgr = "Fmmm"
cell = UnitCell(params, spgr, name="test")
refls = cell.generate_hkl(hmax=20, get_raw=True)
hkl1 = cell.generate_hkl(hmax=20, include_sysabs=False)
hkl2 = cell.generate_hkl(hmax=20, include_sysabs=True)
print()
print(len(hkl1))
print(len(hkl2))
from spacegroup import generate_hkl_listing
generate_hkl_listing(cell)
from IPython import embed
embed()
| [
"subprocess.Popen",
"spacegroup.expand_to_p1",
"spacegroup.generate_hkl_listing",
"past.utils.old_div",
"math.radians",
"numpy.power",
"re.match",
"IPython.embed",
"math.sin",
"re.findall",
"numpy.array",
"math.cos",
"builtins.str",
"builtins.map",
"re.compile"
] | [((609, 638), 're.compile', 're.compile', (['"""([A-z]+|[0-9]+)"""'], {}), "('([A-z]+|[0-9]+)')\n", (619, 638), False, 'import re\n'), ((647, 675), 're.findall', 're.findall', (['pat', 'composition'], {}), '(pat, composition)\n', (657, 675), False, 'import re\n'), ((14072, 14098), 'spacegroup.generate_hkl_listing', 'generate_hkl_listing', (['cell'], {}), '(cell)\n', (14092, 14098), False, 'from spacegroup import generate_hkl_listing\n'), ((14134, 14141), 'IPython.embed', 'embed', ([], {}), '()\n', (14139, 14141), False, 'from IPython import embed\n'), ((3813, 3824), 'math.radians', 'radians', (['al'], {}), '(al)\n', (3820, 3824), False, 'from math import radians, cos, sin\n'), ((3838, 3849), 'math.radians', 'radians', (['be'], {}), '(be)\n', (3845, 3849), False, 'from math import radians, cos, sin\n'), ((3863, 3874), 'math.radians', 'radians', (['ga'], {}), '(ga)\n', (3870, 3874), False, 'from math import radians, cos, sin\n'), ((4968, 4979), 'math.radians', 'radians', (['al'], {}), '(al)\n', (4975, 4979), False, 'from math import radians, cos, sin\n'), ((4993, 5004), 'math.radians', 'radians', (['be'], {}), '(be)\n', (5000, 5004), False, 'from math import radians, cos, sin\n'), ((5018, 5029), 'math.radians', 'radians', (['ga'], {}), '(ga)\n', (5025, 5029), False, 'from math import radians, cos, sin\n'), ((8253, 8290), 'numpy.power', 'np.power', (['idsq', '(-0.5)'], {'where': '(idsq != 0)'}), '(idsq, -0.5, where=idsq != 0)\n', (8261, 8290), True, 'import numpy as np\n'), ((8677, 8688), 'math.radians', 'radians', (['al'], {}), '(al)\n', (8684, 8688), False, 'from math import radians, cos, sin\n'), ((8702, 8713), 'math.radians', 'radians', (['be'], {}), '(be)\n', (8709, 8713), False, 'from math import radians, cos, sin\n'), ((8727, 8738), 'math.radians', 'radians', (['ga'], {}), '(ga)\n', (8734, 8738), False, 'from math import radians, cos, sin\n'), ((12791, 12838), 'subprocess.Popen', 'sp.Popen', (['cmd'], {'stdout': 'sp.PIPE', 'stderr': 'sp.STDOUT'}), '(cmd, stdout=sp.PIPE, stderr=sp.STDOUT)\n', (12799, 12838), True, 'import subprocess as sp\n'), ((12923, 12956), 're.findall', 're.findall', (['find_block', 'out', 're.S'], {}), '(find_block, out, re.S)\n', (12933, 12956), False, 'import re\n'), ((4270, 4331), 'numpy.array', 'ta.array', (['[[m11, m12, m13], [m12, m22, m23], [m13, m23, m33]]'], {}), '([[m11, m12, m13], [m12, m22, m23], [m13, m23, m33]])\n', (4278, 4331), True, 'import numpy as ta\n'), ((6515, 6556), 'past.utils.old_div', 'old_div', (['(h ** 2 + k ** 2 + l ** 2)', '(a ** 2)'], {}), '(h ** 2 + k ** 2 + l ** 2, a ** 2)\n', (6522, 6556), False, 'from past.utils import old_div\n'), ((13150, 13176), 're.match', 're.match', (['line_match', 'line'], {}), '(line_match, line)\n', (13158, 13176), False, 'import re\n'), ((1782, 1802), 'builtins.str', 'str', (['self.parameters'], {}), '(self.parameters)\n', (1785, 1802), False, 'from builtins import str\n'), ((1870, 1890), 'builtins.str', 'str', (['self.parameters'], {}), '(self.parameters)\n', (1873, 1890), False, 'from builtins import str\n'), ((13599, 13622), 'spacegroup.expand_to_p1', 'expand_to_p1', (['hkl', 'self'], {}), '(hkl, self)\n', (13611, 13622), False, 'from spacegroup import expand_to_p1\n'), ((712, 729), 'builtins.map', 'map', (['int', 'm[1::2]'], {}), '(int, m[1::2])\n', (715, 729), False, 'from builtins import map\n'), ((4102, 4109), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (4105, 4109), False, 'from math import radians, cos, sin\n'), ((4172, 4179), 'math.cos', 'cos', (['al'], {}), '(al)\n', (4175, 4179), False, 'from math import radians, cos, sin\n'), ((4242, 4249), 'math.cos', 'cos', (['be'], {}), '(be)\n', (4245, 4249), False, 'from math import radians, cos, sin\n'), ((6606, 6638), 'past.utils.old_div', 'old_div', (['(h ** 2 + k ** 2)', '(a ** 2)'], {}), '(h ** 2 + k ** 2, a ** 2)\n', (6613, 6638), False, 'from past.utils import old_div\n'), ((6637, 6660), 'past.utils.old_div', 'old_div', (['(l ** 2)', '(c ** 2)'], {}), '(l ** 2, c ** 2)\n', (6644, 6660), False, 'from past.utils import old_div\n'), ((3946, 3953), 'math.sin', 'sin', (['al'], {}), '(al)\n', (3949, 3953), False, 'from math import radians, cos, sin\n'), ((3985, 3992), 'math.sin', 'sin', (['be'], {}), '(be)\n', (3988, 3992), False, 'from math import radians, cos, sin\n'), ((4024, 4031), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (4027, 4031), False, 'from math import radians, cos, sin\n'), ((4064, 4079), 'past.utils.old_div', 'old_div', (['c', 'vol'], {}), '(c, vol)\n', (4071, 4079), False, 'from past.utils import old_div\n'), ((4086, 4093), 'math.cos', 'cos', (['al'], {}), '(al)\n', (4089, 4093), False, 'from math import radians, cos, sin\n'), ((4094, 4101), 'math.cos', 'cos', (['be'], {}), '(be)\n', (4097, 4101), False, 'from math import radians, cos, sin\n'), ((4134, 4149), 'past.utils.old_div', 'old_div', (['a', 'vol'], {}), '(a, vol)\n', (4141, 4149), False, 'from past.utils import old_div\n'), ((4156, 4163), 'math.cos', 'cos', (['be'], {}), '(be)\n', (4159, 4163), False, 'from math import radians, cos, sin\n'), ((4164, 4171), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (4167, 4171), False, 'from math import radians, cos, sin\n'), ((4204, 4219), 'past.utils.old_div', 'old_div', (['b', 'vol'], {}), '(b, vol)\n', (4211, 4219), False, 'from past.utils import old_div\n'), ((4226, 4233), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (4229, 4233), False, 'from math import radians, cos, sin\n'), ((4234, 4241), 'math.cos', 'cos', (['al'], {}), '(al)\n', (4237, 4241), False, 'from math import radians, cos, sin\n'), ((5107, 5120), 'past.utils.old_div', 'old_div', (['(1)', 'a'], {}), '(1, a)\n', (5114, 5120), False, 'from past.utils import old_div\n'), ((6758, 6781), 'past.utils.old_div', 'old_div', (['(l ** 2)', '(c ** 2)'], {}), '(l ** 2, c ** 2)\n', (6765, 6781), False, 'from past.utils import old_div\n'), ((8827, 8834), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (8830, 8834), False, 'from math import radians, cos, sin\n'), ((12629, 12637), 'builtins.str', 'str', (['par'], {}), '(par)\n', (12632, 12637), False, 'from builtins import str\n'), ((4450, 4457), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (4453, 4457), False, 'from math import radians, cos, sin\n'), ((4463, 4470), 'math.cos', 'cos', (['be'], {}), '(be)\n', (4466, 4470), False, 'from math import radians, cos, sin\n'), ((4506, 4513), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (4509, 4513), False, 'from math import radians, cos, sin\n'), ((4532, 4539), 'math.cos', 'cos', (['al'], {}), '(al)\n', (4535, 4539), False, 'from math import radians, cos, sin\n'), ((4575, 4582), 'math.cos', 'cos', (['be'], {}), '(be)\n', (4578, 4582), False, 'from math import radians, cos, sin\n'), ((4588, 4595), 'math.cos', 'cos', (['al'], {}), '(al)\n', (4591, 4595), False, 'from math import radians, cos, sin\n'), ((5534, 5541), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (5537, 5541), False, 'from math import radians, cos, sin\n'), ((5571, 5578), 'math.cos', 'cos', (['be'], {}), '(be)\n', (5574, 5578), False, 'from math import radians, cos, sin\n'), ((5615, 5622), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (5618, 5622), False, 'from math import radians, cos, sin\n'), ((5681, 5688), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (5684, 5688), False, 'from math import radians, cos, sin\n'), ((6714, 6737), 'past.utils.old_div', 'old_div', (['(h ** 2)', '(a ** 2)'], {}), '(h ** 2, a ** 2)\n', (6721, 6737), False, 'from past.utils import old_div\n'), ((6736, 6759), 'past.utils.old_div', 'old_div', (['(k ** 2)', '(b ** 2)'], {}), '(k ** 2, b ** 2)\n', (6743, 6759), False, 'from past.utils import old_div\n'), ((6869, 6880), 'math.radians', 'radians', (['al'], {}), '(al)\n', (6876, 6880), False, 'from math import radians, cos, sin\n'), ((7070, 7089), 'past.utils.old_div', 'old_div', (['num', 'denom'], {}), '(num, denom)\n', (7077, 7089), False, 'from past.utils import old_div\n'), ((8816, 8823), 'math.cos', 'cos', (['be'], {}), '(be)\n', (8819, 8823), False, 'from math import radians, cos, sin\n'), ((5133, 5140), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (5136, 5140), False, 'from math import radians, cos, sin\n'), ((5146, 5153), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (5149, 5153), False, 'from math import radians, cos, sin\n'), ((5186, 5193), 'math.cos', 'cos', (['be'], {}), '(be)\n', (5189, 5193), False, 'from math import radians, cos, sin\n'), ((5205, 5212), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (5208, 5212), False, 'from math import radians, cos, sin\n'), ((5306, 5313), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (5309, 5313), False, 'from math import radians, cos, sin\n'), ((5346, 5353), 'math.cos', 'cos', (['al'], {}), '(al)\n', (5349, 5353), False, 'from math import radians, cos, sin\n'), ((5365, 5372), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (5368, 5372), False, 'from math import radians, cos, sin\n'), ((5466, 5473), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (5469, 5473), False, 'from math import radians, cos, sin\n'), ((5769, 5776), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (5772, 5776), False, 'from math import radians, cos, sin\n'), ((7183, 7206), 'past.utils.old_div', 'old_div', (['(l ** 2)', '(c ** 2)'], {}), '(l ** 2, c ** 2)\n', (7190, 7206), False, 'from past.utils import old_div\n'), ((7309, 7332), 'past.utils.old_div', 'old_div', (['(l ** 2)', '(c ** 2)'], {}), '(l ** 2, c ** 2)\n', (7316, 7332), False, 'from past.utils import old_div\n'), ((7382, 7393), 'math.radians', 'radians', (['be'], {}), '(be)\n', (7389, 7393), False, 'from math import radians, cos, sin\n'), ((8805, 8812), 'math.cos', 'cos', (['al'], {}), '(al)\n', (8808, 8812), False, 'from math import radians, cos, sin\n'), ((5166, 5173), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (5169, 5173), False, 'from math import radians, cos, sin\n'), ((5176, 5183), 'math.cos', 'cos', (['al'], {}), '(al)\n', (5179, 5183), False, 'from math import radians, cos, sin\n'), ((5326, 5333), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (5329, 5333), False, 'from math import radians, cos, sin\n'), ((5336, 5343), 'math.cos', 'cos', (['be'], {}), '(be)\n', (5339, 5343), False, 'from math import radians, cos, sin\n'), ((5656, 5663), 'math.cos', 'cos', (['al'], {}), '(al)\n', (5659, 5663), False, 'from math import radians, cos, sin\n'), ((7660, 7671), 'math.radians', 'radians', (['al'], {}), '(al)\n', (7667, 7671), False, 'from math import radians, cos, sin\n'), ((7689, 7700), 'math.radians', 'radians', (['be'], {}), '(be)\n', (7696, 7700), False, 'from math import radians, cos, sin\n'), ((7718, 7729), 'math.radians', 'radians', (['ga'], {}), '(ga)\n', (7725, 7729), False, 'from math import radians, cos, sin\n'), ((8797, 8804), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (8800, 8804), False, 'from math import radians, cos, sin\n'), ((5664, 5671), 'math.cos', 'cos', (['be'], {}), '(be)\n', (5667, 5671), False, 'from math import radians, cos, sin\n'), ((5672, 5679), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (5675, 5679), False, 'from math import radians, cos, sin\n'), ((6926, 6933), 'math.sin', 'sin', (['al'], {}), '(al)\n', (6929, 6933), False, 'from math import radians, cos, sin\n'), ((6973, 6980), 'math.cos', 'cos', (['al'], {}), '(al)\n', (6976, 6980), False, 'from math import radians, cos, sin\n'), ((7132, 7149), 'past.utils.old_div', 'old_div', (['(4.0)', '(3.0)'], {}), '(4.0, 3.0)\n', (7139, 7149), False, 'from past.utils import old_div\n'), ((7258, 7275), 'past.utils.old_div', 'old_div', (['(4.0)', '(3.0)'], {}), '(4.0, 3.0)\n', (7265, 7275), False, 'from past.utils import old_div\n'), ((7751, 7769), 'past.utils.old_div', 'old_div', (['(1)', '(V ** 2)'], {}), '(1, V ** 2)\n', (7758, 7769), False, 'from past.utils import old_div\n'), ((8789, 8796), 'math.cos', 'cos', (['be'], {}), '(be)\n', (8792, 8796), False, 'from math import radians, cos, sin\n'), ((6960, 6967), 'math.cos', 'cos', (['al'], {}), '(al)\n', (6963, 6967), False, 'from math import radians, cos, sin\n'), ((7035, 7042), 'math.cos', 'cos', (['al'], {}), '(al)\n', (7038, 7042), False, 'from math import radians, cos, sin\n'), ((7424, 7431), 'math.sin', 'sin', (['be'], {}), '(be)\n', (7427, 7431), False, 'from math import radians, cos, sin\n'), ((7525, 7548), 'past.utils.old_div', 'old_div', (['(l ** 2)', '(c ** 2)'], {}), '(l ** 2, c ** 2)\n', (7532, 7548), False, 'from past.utils import old_div\n'), ((8781, 8788), 'math.cos', 'cos', (['al'], {}), '(al)\n', (8784, 8788), False, 'from math import radians, cos, sin\n'), ((7020, 7027), 'math.cos', 'cos', (['al'], {}), '(al)\n', (7023, 7027), False, 'from math import radians, cos, sin\n'), ((7440, 7463), 'past.utils.old_div', 'old_div', (['(h ** 2)', '(a ** 2)'], {}), '(h ** 2, a ** 2)\n', (7447, 7463), False, 'from past.utils import old_div\n'), ((7561, 7568), 'math.cos', 'cos', (['be'], {}), '(be)\n', (7564, 7568), False, 'from math import radians, cos, sin\n'), ((8106, 8113), 'math.cos', 'cos', (['be'], {}), '(be)\n', (8109, 8113), False, 'from math import radians, cos, sin\n'), ((8041, 8048), 'math.cos', 'cos', (['al'], {}), '(al)\n', (8044, 8048), False, 'from math import radians, cos, sin\n'), ((8086, 8093), 'math.cos', 'cos', (['al'], {}), '(al)\n', (8089, 8093), False, 'from math import radians, cos, sin\n'), ((8096, 8103), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (8099, 8103), False, 'from math import radians, cos, sin\n'), ((7468, 7475), 'math.sin', 'sin', (['be'], {}), '(be)\n', (7471, 7475), False, 'from math import radians, cos, sin\n'), ((7976, 7983), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (7979, 7983), False, 'from math import radians, cos, sin\n'), ((8021, 8028), 'math.cos', 'cos', (['be'], {}), '(be)\n', (8024, 8028), False, 'from math import radians, cos, sin\n'), ((8031, 8038), 'math.cos', 'cos', (['ga'], {}), '(ga)\n', (8034, 8038), False, 'from math import radians, cos, sin\n'), ((7909, 7916), 'math.sin', 'sin', (['ga'], {}), '(ga)\n', (7912, 7916), False, 'from math import radians, cos, sin\n'), ((7956, 7963), 'math.cos', 'cos', (['al'], {}), '(al)\n', (7959, 7963), False, 'from math import radians, cos, sin\n'), ((7966, 7973), 'math.cos', 'cos', (['be'], {}), '(be)\n', (7969, 7973), False, 'from math import radians, cos, sin\n'), ((7809, 7816), 'math.sin', 'sin', (['al'], {}), '(al)\n', (7812, 7816), False, 'from math import radians, cos, sin\n'), ((7859, 7866), 'math.sin', 'sin', (['be'], {}), '(be)\n', (7862, 7866), False, 'from math import radians, cos, sin\n')] |
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error, mean_squared_error
from PyEMD import CEEMDAN # Empirical Mode Decomposition (EMD). Most popular expansion is Ensemble Empirical Mode Decomposition (EEMD)
from keras.preprocessing.sequence import TimeseriesGenerator
RESULT_COLS = ['train_actual', 'train_pred', 'train_x_axis', \
'val_actual', 'val_pred', 'val_x_axis', \
'test_actual', 'test_pred', 'test_x_axis']
MAX_IMFS = 6 # components are often called Intrinsic Mode Functions (IMF) to highlight
# that they contain an intrinsic property which is a specific oscillation (mode)
WIN_SIZE_FOR_IMFS = {
'IMF1': 2, # use smaller window for high frequency component
'IMF2': 2,
'IMF3': 3,
'IMF4': 3,
'IMF5': 4,
'IMF6': 4,
'IMF7': 5,
'IMF8': 5,
'Rsd' : 6,
'DEFAULT': 4
}
SKIP_REMAIN_FOR_IMFS = {
'IMF1': 4, # 6 (Rsd) - 2
'IMF2': 4,
'IMF3': 3,
'IMF4': 3,
'IMF5': 2,
'IMF6': 2,
'IMF7': 1,
'IMF8': 1,
'Rsd': 0,
'DEFAULT': 2
}
'''
This class takes in a sequence of data-points gathered at equal intervals, along with time series parameters such as stride,
length of history, etc., to produce batches for training/validation.
'''
class ManyToOneTimeSeriesGenerator(TimeseriesGenerator):
def __getitem__(self, idx):
x, y = super().__getitem__(idx)
last_element_index = y.shape[1]-1 # y.shape (1,1)
return x, y[:,last_element_index].reshape(1,-1) # subclassing it so that we only return the last column in batch_size rows
class DataHandler_LSTM:
def __init__(self, data, target, timeframe, log_return, test_size, window_size, use_EMD=False, use_sentiment=False):
assert(target == 'High')
assert(timeframe == -1)
if use_EMD:
assert(log_return == False)
assert(use_sentiment == False)
if use_sentiment:
self.features = ['Open','High','Low','Close','Volume', 'compound', 'Target']
else:
self.features = ['Open','High','Low','Close','Volume', 'Target']
self.data = data
self.target = target
self.timeframe = timeframe
self.log_return = log_return
self.test_size = test_size
self.window_size = window_size
# self.data.set_index(['Date'], inplace=True)
self.data[self.features[-1]] = self.normalize_target() # last column is the target
if use_EMD:
self.decompose()
else:
self.build_generators()
def get_train_val_size(self):
test_size = self.test_size
train_size = 1 - test_size # ratio of training samples to total data
cut = round(train_size*self.data.shape[0])
val_size = round(test_size*self.data.shape[0]/2)
return cut, val_size
'''
Neural net models try to focus on learning the behavior of a series from its data, without prior explicit assumptions,
such as linearity or stationarity. An ideal approach is to divide the tough task of forecasting the original time series
into several subtasks, and each of them forecasts a relatively simpler subsequence.
And then the results of all subtasks are accumulated as the final result.
We will use Empirical Mode Decomposition to decompose each of the 6 (Open, Close, High, Low, Volume, Target) into components.
PyEMD is a Python implementation of Empirical Mode Decomposition (EMD) and its variations. One of the most popular expansion is
Ensemble Empirical Mode Decomposition (EEMD), which utilises an ensemble of noise-assisted executions.
'''
def decompose(self):
data = self.data
features = self.features
ceemdan = CEEMDAN(parallel = True, processes=8)
# data[FEATURES[-1]].fillna(0, inplace=True) # cannot have NaN in CEEMDAN
cut, val_size = self.get_train_val_size()
# First scale
scaled_features_series = {}
self.scalerTgt = None
for col in features:
series = data[col].values.reshape(-1,1)
if col == features[-1]:
series = series[:-1] # leave out NaN in the target column (cannot have NaN in CEEMDAN)
feature_time_series = np.frombuffer(series)
train_ser = feature_time_series[:cut]
scaler = MinMaxScaler()
scaler.fit(train_ser.reshape(-1,1))
scaled_features_series[col] = scaler.transform(feature_time_series.reshape(-1,1)).flatten()
if col == features[-1]:
self.scalerTgt = scaler # save the scaler for inverse_transform after prediction
# Then decompose each input feature using the EMD library
print('Decomposing using EMD library')
decomposed_features_series = {}
for col in features: # decompose the 6 time series (Open, High, Low, Close, Volume, Target)
decomposed_features_series[col] = {}
try:
# decompose
feature_time_series = np.frombuffer(scaled_features_series[col])
feature_time_series_imfs = ceemdan(feature_time_series, max_imf=MAX_IMFS)
# iterating every IMF
for i, imf_series in enumerate(feature_time_series_imfs):
if i < len(feature_time_series_imfs)-1: # last one is residual
decomposed_features_series[col][f'IMF{i+1}'] = imf_series
else:
decomposed_features_series[col][f'Rsd'] = imf_series
print(f'Finished Decomposing {col}: #IMFS: {len(feature_time_series_imfs)}, {len(imf_series)}')
except:
print(f'ERROR decomposing [{col}]')
decomposed_features_series[col] = 'ERROR'
finally:
continue
# Coupling together the IMFs of the same level for different features to create exogenous input
series = {}
self.target_max_imf_level = None
for col in decomposed_features_series.keys(): # Open, High,..., Target
# 6 features, each one has 7 IMFs
imfs = pd.DataFrame.from_dict(decomposed_features_series[col])
# print("Feature", feature , imfs.shape) # len(data), 7 (IMF1, .., IMF6, Rsd)
for imf in imfs:
if imf not in series:
series[imf] = [] # empty list
_series = imfs[imf].values
if col != features[-1]: # other than Target, each series has one more entry
_series = _series[:-1]
_series = _series.reshape((len(_series),1)) # reshaping to get into column format
series[imf] += [_series] # list of (len(data)-1, 1)
# print(feature, imf, _series.shape, len(series[ticker][imf]))
if col == features[-1]:
self.target_max_imf_level = imf
assert(self.target_max_imf_level == 'Rsd')
# horizontal stack
full_data = {}
for imf_level in series:
assert(len(series[imf_level]) == 6)
full_data[imf_level] = np.hstack(tuple(series[imf_level]))
print(imf_level, full_data[imf_level].shape) # (len(data)-1, 6)
self.train_data = {} # needed while modeling for number of input features (6)
val_data = {}
self.test_data = {}
for imf_level in full_data:
# splitting data sets according to rates
self.train_data[imf_level] = full_data[imf_level][:cut, :]
val_data[imf_level] = full_data[imf_level][cut:cut+val_size, :]
self.test_data[imf_level] = full_data[imf_level][cut+val_size:, :] # Note that test_data has one more entry
self.train_gen = {}
self.val_gen = {}
self.test_gen = {}
for imf_level in full_data:
if imf_level in WIN_SIZE_FOR_IMFS:
window_size = WIN_SIZE_FOR_IMFS[imf_level]
else:
window_size = WIN_SIZE_FOR_IMFS['DEFAULT']
# windowing
self.train_gen[imf_level] = ManyToOneTimeSeriesGenerator(self.train_data[imf_level], # data
self.train_data[imf_level], # target
length = window_size, batch_size = 1) # number of timesteps with sampling__rate=1
self.val_gen[imf_level] = ManyToOneTimeSeriesGenerator(val_data[imf_level],
val_data[imf_level],
length = window_size, batch_size = 1)
self.test_gen[imf_level] = ManyToOneTimeSeriesGenerator(self.test_data[imf_level],
self.test_data[imf_level],
length = window_size, batch_size = 1)
return
def build_generators(self):
features = self.features
data = self.data
self.scalers_dict = {}
train_dict = {}
val_dict = {}
test_dict = {}
# First scale by applying min max scaler
# This estimator scales and translates each feature individually such that it is in the given range on the
# training set, e.g. between zero and one.
cut, val_size = self.get_train_val_size()
for feature in features:
series = data[feature].values.reshape(-1,1)
feature_time_series = np.frombuffer(series)
scaler = MinMaxScaler()
self.scalers_dict[feature] = scaler
train_ser = feature_time_series[:cut].reshape(-1,1)
scaler.fit(train_ser)
scaled_feature_ser = scaler.transform(feature_time_series.reshape(-1,1)).flatten()
train_dict[feature] = scaled_feature_ser[:cut]
val_dict[feature] = scaled_feature_ser[cut:cut+val_size]
test_dict[feature] = scaled_feature_ser[cut+val_size:]
print("# Training samples:", cut, " # val samples:", val_size,
" # test samples:", self.data.shape[0] - cut - val_size)
train_df = pd.DataFrame(train_dict, columns=features)
val_df = pd.DataFrame(val_dict, columns=features)
test_df = pd.DataFrame(test_dict, columns=features)
#print(train_df.shape, val_df.shape, test_df.shape)
self.train_gen = ManyToOneTimeSeriesGenerator(train_df.values,
train_df.values,
length = self.window_size,
batch_size = 1)
self.val_gen = ManyToOneTimeSeriesGenerator(val_df.values,
val_df.values,
length = self.window_size,
batch_size = 1)
self.test_gen = ManyToOneTimeSeriesGenerator(test_df.values,
test_df.values,
length = self.window_size,
batch_size = 1)
return
def normalize_target(self):
target = self.target
data = self.data
timeframe = self.timeframe
if target != 'High':
assert(0)
else:
if self.log_return:
return np.log(data[target].shift(timeframe) / data['Close'])
# else: # simple return
return data[target].shift(timeframe) / data['Close']
'''
def unnormalize_target(self, data):
if self.target != 'High':
assert(0)
else:
if self.log_return:
return np.exp(data['target']) * data['Close']
return data['target'] * data['Close']
'''
def calculate_results(self, df_final_results, plot=True, plot_title='Title'):
accuracies_detailed = {}
y_train = df_final_results[RESULT_COLS[0]][~np.isnan(df_final_results[RESULT_COLS[0]])]
yhat_train = df_final_results[RESULT_COLS[1]][~np.isnan(df_final_results[RESULT_COLS[1]])]
y_val = df_final_results[RESULT_COLS[3]][~np.isnan(df_final_results[RESULT_COLS[3]])]
yhat_val = df_final_results[RESULT_COLS[4]][~np.isnan(df_final_results[RESULT_COLS[4]])]
# need to shave the end because we don't have next day data
y_test = df_final_results[RESULT_COLS[6]][~np.isnan(df_final_results[RESULT_COLS[6]])]
yhat_test = df_final_results[RESULT_COLS[7]][~np.isnan(df_final_results[RESULT_COLS[7]])][:-1]
accuracies_detailed['mse'] = {
'train':mean_squared_error(y_train, yhat_train),
'validation':mean_squared_error(y_val, yhat_val),
'test':mean_squared_error(y_test, yhat_test),
}
accuracies_detailed['rmse'] = {
'train':mean_squared_error(y_train, yhat_train, squared=False),
'validation':mean_squared_error(y_val, yhat_val, squared=False),
'test':mean_squared_error(y_test, yhat_test, squared=False),
}
accuracies_detailed['mae'] = {
'train':mean_absolute_error(y_train, yhat_train),
'validation':mean_absolute_error(y_val, yhat_val),
'test':mean_absolute_error(y_test, yhat_test),
}
accuracies_detailed['mape'] = {
'train':np.mean(np.abs((y_train - yhat_train) / y_train)) * 100,
'validation':np.mean(np.abs((y_val - yhat_val) / y_val)) * 100,
'test':np.mean(np.abs((y_test - yhat_test) / y_test)) * 100,
}
if plot:
today = datetime.datetime.today().strftime('%Y/%m/%d')
fig, ax = plt.subplots(1, 1, figsize=(18,6))
ax.xaxis.set_major_locator(mdates.YearLocator(1))
plt.plot(y_train.index, y_train, color='blue', label='Train', alpha=0.5)
plt.plot(y_val.index, yhat_val, color='black', alpha=0.8, label='Validation predict')
plt.plot(y_val.index, y_val, color='yellow', alpha=0.5, label='Validation actual')
plt.plot(y_test.index, yhat_test, color='red', alpha=0.8, label = 'Test predict')
plt.plot(y_test.index, y_test, color='yellow', alpha=0.5, label='Test actual')
plt.title(f'{plot_title} {today}')
plt.legend()
plt.show()
return accuracies_detailed
def process_forecasts(self, df_concatenated, plot=True, plot_title='Title'):
# Apply scaler inverse transform to the predicted target columns
scaler = self.scalers_dict['Target']
for col in df_concatenated.columns:
df_concatenated[col] = scaler.inverse_transform(df_concatenated[col].values.reshape(-1,1))
data = self.data.copy()
data['Back_Shifted_Actual'] = data[self.target].shift(self.timeframe)
win_size = self.window_size
cut, val_size = self.get_train_val_size()
train_dates = data['Date'][win_size:cut] # skip the win_size rows for which we do not have a prediction
val_dates = data['Date'][win_size + cut: cut + val_size] # similarly skip win_size rows from validation and test
test_dates = data['Date'][win_size + cut + val_size:]
print(len(train_dates), len(val_dates), len(test_dates))
df_data_slc = pd.DataFrame()
for ti in [train_dates, val_dates, test_dates]:
tmp_slice = data[(data['Date'].isin(ti))]
df_data_slc = pd.concat([df_data_slc, tmp_slice])
print(df_data_slc.shape)
df_concatenated = df_concatenated.set_index(df_data_slc['Date'])
df_data_slc.set_index('Date', inplace=True)
# join predicted and real
df_recompiled = df_concatenated.join(df_data_slc)
# change prediction back into original feature space
for col in [RESULT_COLS[1], RESULT_COLS[4], RESULT_COLS[7]]: # train_pred, val_pred, test_pred
if self.log_return:
df_recompiled[col] = np.exp(df_recompiled[col]) * df_recompiled['Close']
else:
df_recompiled[col] = df_recompiled[col] * df_recompiled['Close']
for col in [RESULT_COLS[0], RESULT_COLS[3], RESULT_COLS[6]]:
df_recompiled[col] = df_recompiled.apply(lambda x: np.nan if np.isnan(x[col]) else x['Back_Shifted_Actual'], axis=1)
return df_recompiled, self.calculate_results(df_recompiled, plot=plot, plot_title=plot_title) | [
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.plot",
"numpy.abs",
"datetime.datetime.today",
"numpy.frombuffer",
"matplotlib.pyplot.legend",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.metrics.mean_absolute_error",
... | [((3878, 3913), 'PyEMD.CEEMDAN', 'CEEMDAN', ([], {'parallel': '(True)', 'processes': '(8)'}), '(parallel=True, processes=8)\n', (3885, 3913), False, 'from PyEMD import CEEMDAN\n'), ((10470, 10512), 'pandas.DataFrame', 'pd.DataFrame', (['train_dict'], {'columns': 'features'}), '(train_dict, columns=features)\n', (10482, 10512), True, 'import pandas as pd\n'), ((10530, 10570), 'pandas.DataFrame', 'pd.DataFrame', (['val_dict'], {'columns': 'features'}), '(val_dict, columns=features)\n', (10542, 10570), True, 'import pandas as pd\n'), ((10589, 10630), 'pandas.DataFrame', 'pd.DataFrame', (['test_dict'], {'columns': 'features'}), '(test_dict, columns=features)\n', (10601, 10630), True, 'import pandas as pd\n'), ((15830, 15844), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15842, 15844), True, 'import pandas as pd\n'), ((4393, 4414), 'numpy.frombuffer', 'np.frombuffer', (['series'], {}), '(series)\n', (4406, 4414), True, 'import numpy as np\n'), ((4486, 4500), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4498, 4500), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((6314, 6369), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['decomposed_features_series[col]'], {}), '(decomposed_features_series[col])\n', (6336, 6369), True, 'import pandas as pd\n'), ((9809, 9830), 'numpy.frombuffer', 'np.frombuffer', (['series'], {}), '(series)\n', (9822, 9830), True, 'import numpy as np\n'), ((9853, 9867), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (9865, 9867), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((13079, 13118), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'yhat_train'], {}), '(y_train, yhat_train)\n', (13097, 13118), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((13149, 13184), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_val', 'yhat_val'], {}), '(y_val, yhat_val)\n', (13167, 13184), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((13209, 13246), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'yhat_test'], {}), '(y_test, yhat_test)\n', (13227, 13246), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((13334, 13388), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'yhat_train'], {'squared': '(False)'}), '(y_train, yhat_train, squared=False)\n', (13352, 13388), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((13419, 13469), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_val', 'yhat_val'], {'squared': '(False)'}), '(y_val, yhat_val, squared=False)\n', (13437, 13469), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((13494, 13546), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'yhat_test'], {'squared': '(False)'}), '(y_test, yhat_test, squared=False)\n', (13512, 13546), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((13625, 13665), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_train', 'yhat_train'], {}), '(y_train, yhat_train)\n', (13644, 13665), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((13696, 13732), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_val', 'yhat_val'], {}), '(y_val, yhat_val)\n', (13715, 13732), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((13757, 13795), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'yhat_test'], {}), '(y_test, yhat_test)\n', (13776, 13795), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((14209, 14244), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(18, 6)'}), '(1, 1, figsize=(18, 6))\n', (14221, 14244), True, 'import matplotlib.pyplot as plt\n'), ((14318, 14390), 'matplotlib.pyplot.plot', 'plt.plot', (['y_train.index', 'y_train'], {'color': '"""blue"""', 'label': '"""Train"""', 'alpha': '(0.5)'}), "(y_train.index, y_train, color='blue', label='Train', alpha=0.5)\n", (14326, 14390), True, 'import matplotlib.pyplot as plt\n'), ((14403, 14493), 'matplotlib.pyplot.plot', 'plt.plot', (['y_val.index', 'yhat_val'], {'color': '"""black"""', 'alpha': '(0.8)', 'label': '"""Validation predict"""'}), "(y_val.index, yhat_val, color='black', alpha=0.8, label=\n 'Validation predict')\n", (14411, 14493), True, 'import matplotlib.pyplot as plt\n'), ((14501, 14588), 'matplotlib.pyplot.plot', 'plt.plot', (['y_val.index', 'y_val'], {'color': '"""yellow"""', 'alpha': '(0.5)', 'label': '"""Validation actual"""'}), "(y_val.index, y_val, color='yellow', alpha=0.5, label=\n 'Validation actual')\n", (14509, 14588), True, 'import matplotlib.pyplot as plt\n'), ((14596, 14675), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test.index', 'yhat_test'], {'color': '"""red"""', 'alpha': '(0.8)', 'label': '"""Test predict"""'}), "(y_test.index, yhat_test, color='red', alpha=0.8, label='Test predict')\n", (14604, 14675), True, 'import matplotlib.pyplot as plt\n'), ((14690, 14768), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test.index', 'y_test'], {'color': '"""yellow"""', 'alpha': '(0.5)', 'label': '"""Test actual"""'}), "(y_test.index, y_test, color='yellow', alpha=0.5, label='Test actual')\n", (14698, 14768), True, 'import matplotlib.pyplot as plt\n'), ((14781, 14815), 'matplotlib.pyplot.title', 'plt.title', (['f"""{plot_title} {today}"""'], {}), "(f'{plot_title} {today}')\n", (14790, 14815), True, 'import matplotlib.pyplot as plt\n'), ((14828, 14840), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14838, 14840), True, 'import matplotlib.pyplot as plt\n'), ((14853, 14863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14861, 14863), True, 'import matplotlib.pyplot as plt\n'), ((15981, 16016), 'pandas.concat', 'pd.concat', (['[df_data_slc, tmp_slice]'], {}), '([df_data_slc, tmp_slice])\n', (15990, 16016), True, 'import pandas as pd\n'), ((5180, 5222), 'numpy.frombuffer', 'np.frombuffer', (['scaled_features_series[col]'], {}), '(scaled_features_series[col])\n', (5193, 5222), True, 'import numpy as np\n'), ((12413, 12455), 'numpy.isnan', 'np.isnan', (['df_final_results[RESULT_COLS[0]]'], {}), '(df_final_results[RESULT_COLS[0]])\n', (12421, 12455), True, 'import numpy as np\n'), ((12512, 12554), 'numpy.isnan', 'np.isnan', (['df_final_results[RESULT_COLS[1]]'], {}), '(df_final_results[RESULT_COLS[1]])\n', (12520, 12554), True, 'import numpy as np\n'), ((12607, 12649), 'numpy.isnan', 'np.isnan', (['df_final_results[RESULT_COLS[3]]'], {}), '(df_final_results[RESULT_COLS[3]])\n', (12615, 12649), True, 'import numpy as np\n'), ((12704, 12746), 'numpy.isnan', 'np.isnan', (['df_final_results[RESULT_COLS[4]]'], {}), '(df_final_results[RESULT_COLS[4]])\n', (12712, 12746), True, 'import numpy as np\n'), ((12868, 12910), 'numpy.isnan', 'np.isnan', (['df_final_results[RESULT_COLS[6]]'], {}), '(df_final_results[RESULT_COLS[6]])\n', (12876, 12910), True, 'import numpy as np\n'), ((14283, 14304), 'matplotlib.dates.YearLocator', 'mdates.YearLocator', (['(1)'], {}), '(1)\n', (14301, 14304), True, 'import matplotlib.dates as mdates\n'), ((12966, 13008), 'numpy.isnan', 'np.isnan', (['df_final_results[RESULT_COLS[7]]'], {}), '(df_final_results[RESULT_COLS[7]])\n', (12974, 13008), True, 'import numpy as np\n'), ((13883, 13923), 'numpy.abs', 'np.abs', (['((y_train - yhat_train) / y_train)'], {}), '((y_train - yhat_train) / y_train)\n', (13889, 13923), True, 'import numpy as np\n'), ((13969, 14003), 'numpy.abs', 'np.abs', (['((y_val - yhat_val) / y_val)'], {}), '((y_val - yhat_val) / y_val)\n', (13975, 14003), True, 'import numpy as np\n'), ((14043, 14080), 'numpy.abs', 'np.abs', (['((y_test - yhat_test) / y_test)'], {}), '((y_test - yhat_test) / y_test)\n', (14049, 14080), True, 'import numpy as np\n'), ((14140, 14165), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (14163, 14165), False, 'import datetime\n'), ((16504, 16530), 'numpy.exp', 'np.exp', (['df_recompiled[col]'], {}), '(df_recompiled[col])\n', (16510, 16530), True, 'import numpy as np\n'), ((16807, 16823), 'numpy.isnan', 'np.isnan', (['x[col]'], {}), '(x[col])\n', (16815, 16823), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
import io
import random, math
from utils.data_aug import random_crop, random_translate, random_scale, scale_crop
from utils.lpr_util import sparse_tuple_from, CHARS_DICT, decode_sparse_tensor
provinces = ["皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂",
"琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "O"]
alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z', 'O']
ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']
class DataReader(object):
def __init__(self, img_dir, config=None):
'''Load a subset of the COCO dataset.
'''
self.config = config
self.img_dir = img_dir
self.max_objs = 1
self.num_classes = 1
self.num_joints = 4
self.images = self.get_img_list(self.img_dir)
self.num_samples = len(self.images)
self.shuffle()
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
img_id = self.images[idx]
bboxes, kpts, lpnumber = self.parse_lp(img_id)
img_path = os.path.join(self.img_dir, img_id)
img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# image resize and cut to 512x512
size = (self.config.IMAGE_WIDTH, self.config.IMAGE_HEIGHT)
img, bboxes, kpts = scale_crop(img, bboxes, kpts, size)
#print(img.shape)
# data augmentation np.random.randint(5) 0,1,2,3,4
flag = np.random.randint(5)
if flag < 2:
img, bboxes, kpts = random_scale(img, bboxes, kpts)
elif flag == 2:
img, bboxes, kpts = random_crop(img, bboxes, kpts)
elif flag == 3:
img, bboxes, kpts = random_translate(img, bboxes, kpts)
# 处理图像,缩放到指定大小
img, scale_factor = self.imrescale_wh(img, self.config.IMAGE_WIDTH, self.config.IMAGE_HEIGHT)
img = self.imnormalize(img, self.config.MEAN_PIXEL, self.config.STD_PIXEL)
img = self.impad_to_wh(img, self.config.IMAGE_WIDTH, self.config.IMAGE_HEIGHT)
bboxes = bboxes.astype(np.float32)
kpts = kpts.astype(np.float32)
labels = np.ones((1, 13), dtype=np.float32)
labels[:, :4] = bboxes
kpts_f = np.reshape(kpts, (1, 8))
labels[:, 5:] = kpts_f
labels = labels*scale_factor / 4
output_h = self.config.IMAGE_HEIGHT//4
output_w = self.config.IMAGE_WIDTH//4
hm = np.zeros((output_h, output_w, self.num_classes), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.float32)
reg_mask = np.zeros((self.max_objs), dtype=np.float32)
hm_hp = np.zeros((output_h, output_w, self.num_joints), dtype=np.float32)
hp_offset = np.zeros((self.max_objs * self.num_joints, 2), dtype=np.float32)
hp_ind = np.zeros((self.max_objs * self.num_joints), dtype=np.float32)
hp_mask = np.zeros((self.max_objs * self.num_joints), dtype=np.float32)
kps = np.zeros((self.max_objs, self.num_joints*2), dtype=np.float32)
kps_mask = np.zeros((self.max_objs, self.num_joints*2), dtype=np.float32)
gt_det = []
for k in range(self.max_objs):
bbox = bboxes[k]
cls_id = 0
pts = kpts[k]
# process bbox
bbox = bbox * scale_factor / 4 #缩放 scale and 1/4
bbox = np.clip(bbox, 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
rh, rw = self.gaussian_radius((math.ceil(h),math.ceil(w)))
rh = max(0, int(rh))
rw = max(0, int(rw))
ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
self.draw_umich_gaussian(hm[:,:, cls_id], ct_int, rw, rh)
hp_radius = self.gaussian_radius_c((math.ceil(h), math.ceil(w)))
hp_radius = max(0, int(hp_radius))
for j in range(self.num_joints):
pts[j] = pts[j] * scale_factor/4 #缩放 scale and 1/4
if pts[j, 0] >= 0 and pts[j, 0] < output_w and pts[j, 1] >= 0 and pts[j, 1] < output_h:
kps[k, j * 2: j * 2 + 2] = pts[j] - ct_int
kps_mask[k, j * 2: j * 2 + 2] = 1
pt_int = pts[j].astype(np.int32)
hp_offset[k * self.num_joints + j] = pts[j] - pt_int
hp_ind[k * self.num_joints + j] = pt_int[1] * output_w + pt_int[0]
hp_mask[k * self.num_joints + j] = 1
self.draw_umich_gaussian_c(hm_hp[..., j], pt_int, hp_radius)
return img, hm, wh, reg, reg_mask, ind, hm_hp, hp_offset, \
hp_ind, hp_mask, kps, kps_mask, lpnumber, labels
def shuffle(self):
random.shuffle(self.images)
def get_img_list(self, img_path, exts=['jpg', 'png', 'jpeg', 'JPG']):
img_list = os.listdir(img_path)
new_list = []
for img_name in img_list:
for ext in exts:
if img_name.endswith(ext):
new_list.append(img_name)
break
return new_list
def parse_lp(self, img_name):
fn, _ = os.path.splitext(img_name)
#print(fn)
plist = fn.split('-')
bbox = plist[2].split('_')
box = [[int(pt.split('&')[0]), int(pt.split('&')[1])] for pt in bbox]
box = sum(box, []) #[178, 467, 410, 539]
box = [box,] #[[178, 467, 410, 539]] 矩形框 x1,y1, x2,y2
box = np.array(box)
#print(box)
pt4 = plist[3].split('_')
pt4 = np.array(pt4)[[2, 3, 0, 1]]
pts = np.array([[int(pt.split('&')[0]), int(pt.split('&')[1])] for pt in pt4])
pts = pts[np.newaxis, ...]
# lpnumber
lpn7 = plist[4].split('_')
#lpnum = ''.join(lpn7)
#0_9_19_30_29_33_29 皖kv6595
pro = provinces[int(lpn7[0])]
lpnumber = []
lpnumber.append(pro)
for i in range(6):
lpnumber.append(ads[int(lpn7[i+1])])
lp_code = []
for nu in lpnumber:
lp_code.append(CHARS_DICT[nu])
lp_code = np.array(lp_code)
return box, pts, lp_code
def imrescale(self, img, scale):
h, w = img.shape[:2]
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w))
new_size = (int(w * float(scale_factor) + 0.5), int(h * float(scale_factor) + 0.5))
rescaled_img = cv2.resize(img, new_size, interpolation=cv2.INTER_LINEAR)
return rescaled_img, scale_factor
def imrescale_wh(self, img, width, height):
h, w = img.shape[:2]
scale_factor = min(width*1.0/w, height*1.0/h)
new_size = (int(w * float(scale_factor) + 0.5), int(h * float(scale_factor) + 0.5))
rescaled_img = cv2.resize(img, new_size, interpolation=cv2.INTER_LINEAR)
return rescaled_img, scale_factor
def imnormalize(self, img, mean, std):
img = (img - mean) / std
return img.astype(np.float32)
def impad_to_square(self, img, pad_size):
h,w = img.shape[:2]
if len(img.shape) == 2:
pad_size = [[0,pad_size-h], [0,pad_size-w]]
else:
pad_size = [[0,pad_size-h], [0,pad_size-w], [0,0]]
pad = np.pad(img, pad_size, 'constant')
return pad
def impad_to_wh(self, img, width, height):
h,w = img.shape[:2]
pad_size = [[0,height-h], [0,width-w], [0,0]]
pad = np.pad(img, pad_size, 'constant')
return pad
def gaussian_radius(self, det_size, min_overlap=0.7):
height, width = det_size
ra = 0.1155*height
rb = 0.1155*width
return ra, rb
def gaussian2D(self, shape, sigmah=1, sigmaw=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x / (2*sigmaw*sigmaw) + y * y / (2*sigmah*sigmah)))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(self, heatmap, center, rw, rh, k=1):
diameterw = 2 * rw + 1
diameterh = 2 * rh + 1
gaussian = self.gaussian2D((diameterh, diameterw), sigmah=diameterh/6, sigmaw=diameterw/6)
x, y = center
height, width = heatmap.shape[0:2]
left, right = min(x, rw), min(width - x, rw + 1)
top, bottom = min(y, rh), min(height - y, rh + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[rh - top:rh + bottom, rw - left:rw + right]
if min(masked_gaussian.shape)>0 and min(masked_heatmap.shape)>0:
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def gaussian_radius_c(self, det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 - sq1) / (2 * a1)
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 - sq2) / (2 * a2)
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / (2 * a3)
return min(r1, r2, r3)
def gaussian2D_c(self, shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian_c(self, heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = self.gaussian2D_c((diameter, diameter), sigma=diameter / 6)
x, y = center
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap) | [
"numpy.maximum",
"random.shuffle",
"numpy.ones",
"numpy.clip",
"utils.data_aug.random_crop",
"numpy.random.randint",
"numpy.exp",
"os.path.join",
"numpy.pad",
"utils.data_aug.random_scale",
"cv2.cvtColor",
"numpy.finfo",
"numpy.reshape",
"cv2.resize",
"math.ceil",
"utils.data_aug.scale... | [((1375, 1409), 'os.path.join', 'os.path.join', (['self.img_dir', 'img_id'], {}), '(self.img_dir, img_id)\n', (1387, 1409), False, 'import os\n'), ((1509, 1545), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1521, 1545), False, 'import cv2\n'), ((1684, 1719), 'utils.data_aug.scale_crop', 'scale_crop', (['img', 'bboxes', 'kpts', 'size'], {}), '(img, bboxes, kpts, size)\n', (1694, 1719), False, 'from utils.data_aug import random_crop, random_translate, random_scale, scale_crop\n'), ((1820, 1840), 'numpy.random.randint', 'np.random.randint', (['(5)'], {}), '(5)\n', (1837, 1840), True, 'import numpy as np\n'), ((2509, 2543), 'numpy.ones', 'np.ones', (['(1, 13)'], {'dtype': 'np.float32'}), '((1, 13), dtype=np.float32)\n', (2516, 2543), True, 'import numpy as np\n'), ((2592, 2616), 'numpy.reshape', 'np.reshape', (['kpts', '(1, 8)'], {}), '(kpts, (1, 8))\n', (2602, 2616), True, 'import numpy as np\n'), ((2796, 2862), 'numpy.zeros', 'np.zeros', (['(output_h, output_w, self.num_classes)'], {'dtype': 'np.float32'}), '((output_h, output_w, self.num_classes), dtype=np.float32)\n', (2804, 2862), True, 'import numpy as np\n'), ((2876, 2922), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (2884, 2922), True, 'import numpy as np\n'), ((2937, 2983), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (2945, 2983), True, 'import numpy as np\n'), ((2998, 3039), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.float32'}), '(self.max_objs, dtype=np.float32)\n', (3006, 3039), True, 'import numpy as np\n'), ((3061, 3102), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.float32'}), '(self.max_objs, dtype=np.float32)\n', (3069, 3102), True, 'import numpy as np\n'), ((3122, 3187), 'numpy.zeros', 'np.zeros', (['(output_h, output_w, self.num_joints)'], {'dtype': 'np.float32'}), '((output_h, output_w, self.num_joints), dtype=np.float32)\n', (3130, 3187), True, 'import numpy as np\n'), ((3208, 3272), 'numpy.zeros', 'np.zeros', (['(self.max_objs * self.num_joints, 2)'], {'dtype': 'np.float32'}), '((self.max_objs * self.num_joints, 2), dtype=np.float32)\n', (3216, 3272), True, 'import numpy as np\n'), ((3290, 3349), 'numpy.zeros', 'np.zeros', (['(self.max_objs * self.num_joints)'], {'dtype': 'np.float32'}), '(self.max_objs * self.num_joints, dtype=np.float32)\n', (3298, 3349), True, 'import numpy as np\n'), ((3370, 3429), 'numpy.zeros', 'np.zeros', (['(self.max_objs * self.num_joints)'], {'dtype': 'np.float32'}), '(self.max_objs * self.num_joints, dtype=np.float32)\n', (3378, 3429), True, 'import numpy as np\n'), ((3447, 3511), 'numpy.zeros', 'np.zeros', (['(self.max_objs, self.num_joints * 2)'], {'dtype': 'np.float32'}), '((self.max_objs, self.num_joints * 2), dtype=np.float32)\n', (3455, 3511), True, 'import numpy as np\n'), ((3529, 3593), 'numpy.zeros', 'np.zeros', (['(self.max_objs, self.num_joints * 2)'], {'dtype': 'np.float32'}), '((self.max_objs, self.num_joints * 2), dtype=np.float32)\n', (3537, 3593), True, 'import numpy as np\n'), ((5519, 5546), 'random.shuffle', 'random.shuffle', (['self.images'], {}), '(self.images)\n', (5533, 5546), False, 'import random, math\n'), ((5650, 5670), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (5660, 5670), False, 'import os\n'), ((5947, 5973), 'os.path.splitext', 'os.path.splitext', (['img_name'], {}), '(img_name)\n', (5963, 5973), False, 'import os\n'), ((6263, 6276), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (6271, 6276), True, 'import numpy as np\n'), ((6895, 6912), 'numpy.array', 'np.array', (['lp_code'], {}), '(lp_code)\n', (6903, 6912), True, 'import numpy as np\n'), ((7283, 7340), 'cv2.resize', 'cv2.resize', (['img', 'new_size'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, new_size, interpolation=cv2.INTER_LINEAR)\n', (7293, 7340), False, 'import cv2\n'), ((7631, 7688), 'cv2.resize', 'cv2.resize', (['img', 'new_size'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, new_size, interpolation=cv2.INTER_LINEAR)\n', (7641, 7688), False, 'import cv2\n'), ((8105, 8138), 'numpy.pad', 'np.pad', (['img', 'pad_size', '"""constant"""'], {}), "(img, pad_size, 'constant')\n", (8111, 8138), True, 'import numpy as np\n'), ((8302, 8335), 'numpy.pad', 'np.pad', (['img', 'pad_size', '"""constant"""'], {}), "(img, pad_size, 'constant')\n", (8308, 8335), True, 'import numpy as np\n'), ((8676, 8748), 'numpy.exp', 'np.exp', (['(-(x * x / (2 * sigmaw * sigmaw) + y * y / (2 * sigmah * sigmah)))'], {}), '(-(x * x / (2 * sigmaw * sigmaw) + y * y / (2 * sigmah * sigmah)))\n', (8682, 8748), True, 'import numpy as np\n'), ((9775, 9805), 'numpy.sqrt', 'np.sqrt', (['(b1 ** 2 - 4 * a1 * c1)'], {}), '(b1 ** 2 - 4 * a1 * c1)\n', (9782, 9805), True, 'import numpy as np\n'), ((9957, 9987), 'numpy.sqrt', 'np.sqrt', (['(b2 ** 2 - 4 * a2 * c2)'], {}), '(b2 ** 2 - 4 * a2 * c2)\n', (9964, 9987), True, 'import numpy as np\n'), ((10168, 10198), 'numpy.sqrt', 'np.sqrt', (['(b3 ** 2 - 4 * a3 * c3)'], {}), '(b3 ** 2 - 4 * a3 * c3)\n', (10175, 10198), True, 'import numpy as np\n'), ((10411, 10457), 'numpy.exp', 'np.exp', (['(-(x * x + y * y) / (2 * sigma * sigma))'], {}), '(-(x * x + y * y) / (2 * sigma * sigma))\n', (10417, 10457), True, 'import numpy as np\n'), ((11085, 11152), 'numpy.maximum', 'np.maximum', (['masked_heatmap', '(masked_gaussian * k)'], {'out': 'masked_heatmap'}), '(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n', (11095, 11152), True, 'import numpy as np\n'), ((1438, 1475), 'numpy.fromfile', 'np.fromfile', (['img_path'], {'dtype': 'np.uint8'}), '(img_path, dtype=np.uint8)\n', (1449, 1475), True, 'import numpy as np\n'), ((1894, 1925), 'utils.data_aug.random_scale', 'random_scale', (['img', 'bboxes', 'kpts'], {}), '(img, bboxes, kpts)\n', (1906, 1925), False, 'from utils.data_aug import random_crop, random_translate, random_scale, scale_crop\n'), ((3838, 3868), 'numpy.clip', 'np.clip', (['bbox', '(0)', '(output_h - 1)'], {}), '(bbox, 0, output_h - 1)\n', (3845, 3868), True, 'import numpy as np\n'), ((6346, 6359), 'numpy.array', 'np.array', (['pt4'], {}), '(pt4)\n', (6354, 6359), True, 'import numpy as np\n'), ((9458, 9525), 'numpy.maximum', 'np.maximum', (['masked_heatmap', '(masked_gaussian * k)'], {'out': 'masked_heatmap'}), '(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n', (9468, 9525), True, 'import numpy as np\n'), ((1982, 2012), 'utils.data_aug.random_crop', 'random_crop', (['img', 'bboxes', 'kpts'], {}), '(img, bboxes, kpts)\n', (1993, 2012), False, 'from utils.data_aug import random_crop, random_translate, random_scale, scale_crop\n'), ((4128, 4206), 'numpy.array', 'np.array', (['[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]'], {'dtype': 'np.float32'}), '([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n', (4136, 4206), True, 'import numpy as np\n'), ((2069, 2104), 'utils.data_aug.random_translate', 'random_translate', (['img', 'bboxes', 'kpts'], {}), '(img, bboxes, kpts)\n', (2085, 2104), False, 'from utils.data_aug import random_crop, random_translate, random_scale, scale_crop\n'), ((4005, 4017), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (4014, 4017), False, 'import random, math\n'), ((4018, 4030), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (4027, 4030), False, 'import random, math\n'), ((4545, 4557), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (4554, 4557), False, 'import random, math\n'), ((4559, 4571), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (4568, 4571), False, 'import random, math\n'), ((8755, 8772), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (8763, 8772), True, 'import numpy as np\n'), ((10472, 10489), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (10480, 10489), True, 'import numpy as np\n')] |
from render_util import get_shader_dirname
import glob
import os
import numpy
import numpy as np
import skimage
import skimage.io
import scipy.ndimage
import sys
sys.path += ['..']
import argparse_util
tolerance = 2.0
dtype = 'float32'
lo_pct = 20
hi_pct = 80
# Make a simplified normalization assumption
# Once we use enough data points to roughly estimate normalization parameters
# Store these parameters and directly apply to other datasets
def normalize(X_train, feature_bias=None, feature_scale=None):
global lo_pct, hi_pct
if feature_bias is None:
feature_bias = numpy.zeros(X_train.shape[-1], dtype)
feature_scale = numpy.zeros(X_train.shape[-1], dtype)
global tolerance
print(X_train.shape[-1])
for m in range(X_train.shape[3]):
sorted_arr = numpy.sort(X_train[..., m], axis=None)
sorted_arr = sorted_arr[numpy.isnan(sorted_arr) == 0]
sorted_arr = sorted_arr[numpy.isinf(sorted_arr) == 0]
print("Sorted", m)
min_val = sorted_arr[0]
max_val = sorted_arr[-1]
epsilon = min(1e-8 * (max_val - min_val), 1e-8)
print("epsilon", epsilon)
finite = 10
cluster_count = 0
current_min = min_val
apply_outlier = True
for iter in range(finite):
ind = numpy.searchsorted(sorted_arr, current_min + epsilon, side='right')
if ind == sorted_arr.shape[0]:
cluster_count = iter + 1
apply_outlier = False
break
else:
current_min = sorted_arr[ind]
print(sorted_arr[ind])
print("calculating bias and scale to do 0-1 scaling")
if apply_outlier:
print("outlier detection")
Q1 = sorted_arr[int(lo_pct / 100 * (sorted_arr.shape[0] - 1))]
Q3 = sorted_arr[int(hi_pct / 100 * (sorted_arr.shape[0] - 1))]
IQR = Q3 - Q1
if IQR == 0:
Q1 = min_val
Q3 = max_val
IQR = max_val - min_val
print("IQR=0, does not apply clipping")
else:
min_val = max(min_val, Q1 - tolerance * IQR)
max_val = min(max_val, Q3 + tolerance * IQR)
print("clipping outlier")
else:
print("finite set", cluster_count)
Q1 = min_val
Q3 = max_val
IQR = max_val - min_val
tiny = numpy.finfo(dtype).tiny
feature_bias[m] = -min_val
diff = max_val - min_val
feature_scale[m] = 1.0/(diff) if diff >= tiny else 1.0
print("normalized")
return
def read_filename(filename):
d = numpy.load(filename)
nfeatures = d.shape[0]
ans = d.reshape([nfeatures, 1, d.shape[1], d.shape[2]])
ans = numpy.moveaxis(ans, [0, 1, 2, 3], [3, 2, 0, 1])
ans = ans.reshape([ans.shape[0], ans.shape[1], nfeatures])
return ans
def load(subdir, filename):
ans = numpy.asarray(read_filename(os.path.join(subdir, filename)), dtype)
nan_num = numpy.sum(numpy.isnan(ans))
inf_num = numpy.sum(numpy.isinf(ans))
if nan_num > 0 or inf_num > 0:
print(filename, nan_num, inf_num)
return ans
def main():
parser = argparse_util.ArgumentParser(description='PreprocessRawData')
parser.add_argument('--base_dirs', dest='base_dirs', default='', help='dirs to read files from')
parser.add_argument('--shadername', dest='shadername', default='render_zigzag', help='shader name to evaluate on')
parser.add_argument('--geometry', dest='geometry', default='plane', help='geometry to evaluate on')
parser.add_argument('--lo_pct', dest='lo_pct', type=int, default=25, help='set the low percentile in outlier removal')
args = parser.parse_args()
# color channels are not normalized
base_dirs = args.base_dirs
shader_name = args.shadername
geometry = args.geometry
normal_map = 'none'
global lo_pct, hi_pct
lo_pct = args.lo_pct
hi_pct = 100 - args.lo_pct
base_dirs = base_dirs.split(',')
all_n = []
features = []
dirs = []
for base_dir in base_dirs:
dirname = get_shader_dirname(base_dir, shader_name, normal_map, geometry, render_prefix=True)
n = len(glob.glob(os.path.join(dirname, 'g_intermediates*.npy')))
print(n)
all_n.append(n)
dirs.append(dirname)
if not geometry.startswith('boids'):
for i in range(n):
try:
feature = load(dirname, 'g_intermediates%05d.npy'%(i))
except:
print(dirname, i)
raise
print(i, feature.shape)
features.append(feature)
else:
features.append(np.load(os.path.join(dirname, 'g_intermediates.npy')))
features = numpy.asarray(features)
print('finished reading features')
feature_bias = numpy.zeros(features.shape[-1], dtype)
feature_scale = numpy.zeros(features.shape[-1], dtype)
normalize(features, feature_bias=feature_bias, feature_scale=feature_scale)
print(os.path.join(dirs[0], 'feature_bias_%d_%d%s.npy' % (lo_pct, hi_pct, '')))
numpy.save(os.path.join(dirs[0], 'feature_bias_%d_%d%s.npy' % (lo_pct, hi_pct, '')), feature_bias)
numpy.save(os.path.join(dirs[0], 'feature_scale_%d_%d%s.npy' % (lo_pct, hi_pct, '')), feature_scale)
if __name__ == '__main__':
main()
| [
"numpy.load",
"numpy.moveaxis",
"render_util.get_shader_dirname",
"numpy.asarray",
"numpy.zeros",
"numpy.isinf",
"argparse_util.ArgumentParser",
"numpy.isnan",
"numpy.searchsorted",
"numpy.sort",
"numpy.finfo",
"os.path.join"
] | [((2698, 2718), 'numpy.load', 'numpy.load', (['filename'], {}), '(filename)\n', (2708, 2718), False, 'import numpy\n'), ((2827, 2874), 'numpy.moveaxis', 'numpy.moveaxis', (['ans', '[0, 1, 2, 3]', '[3, 2, 0, 1]'], {}), '(ans, [0, 1, 2, 3], [3, 2, 0, 1])\n', (2841, 2874), False, 'import numpy\n'), ((3270, 3331), 'argparse_util.ArgumentParser', 'argparse_util.ArgumentParser', ([], {'description': '"""PreprocessRawData"""'}), "(description='PreprocessRawData')\n", (3298, 3331), False, 'import argparse_util\n'), ((4892, 4915), 'numpy.asarray', 'numpy.asarray', (['features'], {}), '(features)\n', (4905, 4915), False, 'import numpy\n'), ((4977, 5015), 'numpy.zeros', 'numpy.zeros', (['features.shape[-1]', 'dtype'], {}), '(features.shape[-1], dtype)\n', (4988, 5015), False, 'import numpy\n'), ((5036, 5074), 'numpy.zeros', 'numpy.zeros', (['features.shape[-1]', 'dtype'], {}), '(features.shape[-1], dtype)\n', (5047, 5074), False, 'import numpy\n'), ((601, 638), 'numpy.zeros', 'numpy.zeros', (['X_train.shape[-1]', 'dtype'], {}), '(X_train.shape[-1], dtype)\n', (612, 638), False, 'import numpy\n'), ((663, 700), 'numpy.zeros', 'numpy.zeros', (['X_train.shape[-1]', 'dtype'], {}), '(X_train.shape[-1], dtype)\n', (674, 700), False, 'import numpy\n'), ((816, 854), 'numpy.sort', 'numpy.sort', (['X_train[..., m]'], {'axis': 'None'}), '(X_train[..., m], axis=None)\n', (826, 854), False, 'import numpy\n'), ((3090, 3106), 'numpy.isnan', 'numpy.isnan', (['ans'], {}), '(ans)\n', (3101, 3106), False, 'import numpy\n'), ((3132, 3148), 'numpy.isinf', 'numpy.isinf', (['ans'], {}), '(ans)\n', (3143, 3148), False, 'import numpy\n'), ((4207, 4294), 'render_util.get_shader_dirname', 'get_shader_dirname', (['base_dir', 'shader_name', 'normal_map', 'geometry'], {'render_prefix': '(True)'}), '(base_dir, shader_name, normal_map, geometry,\n render_prefix=True)\n', (4225, 4294), False, 'from render_util import get_shader_dirname\n'), ((5167, 5239), 'os.path.join', 'os.path.join', (['dirs[0]', "('feature_bias_%d_%d%s.npy' % (lo_pct, hi_pct, ''))"], {}), "(dirs[0], 'feature_bias_%d_%d%s.npy' % (lo_pct, hi_pct, ''))\n", (5179, 5239), False, 'import os\n'), ((5256, 5328), 'os.path.join', 'os.path.join', (['dirs[0]', "('feature_bias_%d_%d%s.npy' % (lo_pct, hi_pct, ''))"], {}), "(dirs[0], 'feature_bias_%d_%d%s.npy' % (lo_pct, hi_pct, ''))\n", (5268, 5328), False, 'import os\n'), ((5359, 5432), 'os.path.join', 'os.path.join', (['dirs[0]', "('feature_scale_%d_%d%s.npy' % (lo_pct, hi_pct, ''))"], {}), "(dirs[0], 'feature_scale_%d_%d%s.npy' % (lo_pct, hi_pct, ''))\n", (5371, 5432), False, 'import os\n'), ((1321, 1388), 'numpy.searchsorted', 'numpy.searchsorted', (['sorted_arr', '(current_min + epsilon)'], {'side': '"""right"""'}), "(sorted_arr, current_min + epsilon, side='right')\n", (1339, 1388), False, 'import numpy\n'), ((2464, 2482), 'numpy.finfo', 'numpy.finfo', (['dtype'], {}), '(dtype)\n', (2475, 2482), False, 'import numpy\n'), ((3021, 3051), 'os.path.join', 'os.path.join', (['subdir', 'filename'], {}), '(subdir, filename)\n', (3033, 3051), False, 'import os\n'), ((887, 910), 'numpy.isnan', 'numpy.isnan', (['sorted_arr'], {}), '(sorted_arr)\n', (898, 910), False, 'import numpy\n'), ((949, 972), 'numpy.isinf', 'numpy.isinf', (['sorted_arr'], {}), '(sorted_arr)\n', (960, 972), False, 'import numpy\n'), ((4318, 4363), 'os.path.join', 'os.path.join', (['dirname', '"""g_intermediates*.npy"""'], {}), "(dirname, 'g_intermediates*.npy')\n", (4330, 4363), False, 'import os\n'), ((4828, 4872), 'os.path.join', 'os.path.join', (['dirname', '"""g_intermediates.npy"""'], {}), "(dirname, 'g_intermediates.npy')\n", (4840, 4872), False, 'import os\n')] |
import cv2
import numpy as np
import dlib
from imutils import face_utils
import math
def sigmoid(x):
return 1 / (1 + math.exp(-x))
from src.analysis_module import PoseAnalyser
## Template borrowed from Openface project (for author credits view README.md)
### https://github.com/cmusatyalab/openface/blob/master/openface/align_dlib.py
TEMPLATE = np.float32([
(0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943),
(0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066),
(0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778),
(0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149),
(0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107),
(0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279),
(0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421),
(0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744),
(0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053),
(0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323),
(0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851),
(0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854),
(0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114),
(0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193),
(0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758),
(0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668),
(0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208),
(0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656),
(0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002),
(0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083),
(0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225),
(0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267),
(0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656),
(0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172),
(0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073),
(0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768),
(0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516),
(0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972),
(0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792),
(0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727),
(0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612),
(0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691),
(0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626),
(0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)])
TPL_MIN, TPL_MAX = np.min(TEMPLATE, axis=0), np.max(TEMPLATE, axis=0)
MINMAX_TEMPLATE = (TEMPLATE - TPL_MIN) / (TPL_MAX - TPL_MIN)
## END OF BORROWED CODE ##
K = [6.5308391993466671e+002, 0.0, 3.1950000000000000e+002,
0.0, 6.5308391993466671e+002, 2.3950000000000000e+002,
0.0, 0.0, 1.0]
D = [7.0834633684407095e-002, 6.9140193737175351e-002, 0.0, 0.0, -1.3073460323689292e+000]
cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)
object_pts = np.float32([[6.825897, 6.760612, 4.402142],
[1.330353, 7.122144, 6.903745],
[-1.330353, 7.122144, 6.903745],
[-6.825897, 6.760612, 4.402142],
[5.311432, 5.485328, 3.987654],
[1.789930, 5.393625, 4.413414],
[-1.789930, 5.393625, 4.413414],
[-5.311432, 5.485328, 3.987654],
[2.005628, 1.409845, 6.165652],
[-2.005628, 1.409845, 6.165652],
[2.774015, -2.080775, 5.048531],
[-2.774015, -2.080775, 5.048531],
[0.000000, -3.116408, 6.097667],
[0.000000, -7.415691, 4.070434]])
reprojectsrc = np.float32([[10.0, 10.0, 10.0],
[10.0, 10.0, -10.0],
[10.0, -10.0, -10.0],
[10.0, -10.0, 10.0],
[-10.0, 10.0, 10.0],
[-10.0, 10.0, -10.0],
[-10.0, -10.0, -10.0],
[-10.0, -10.0, 10.0]])
line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],
[4, 5], [5, 6], [6, 7], [7, 4],
[0, 4], [1, 5], [2, 6], [3, 7]]
class HeadPoseEstimator(PoseAnalyser):
# Read
#: Landmark indices.
INNER_EYES_AND_BOTTOM_LIP = [39, 42, 57]
OUTER_EYES_AND_NOSE = [36, 45, 33]
## get head pose
##original source: https://github.com/lincolnhard/head-pose-estimation/blob/master/video_test_shape.py
def get_head_pose(self, shape):
image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
shape[39], shape[42], shape[45], shape[31], shape[35],
shape[48], shape[54], shape[57], shape[8]])
_, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)
reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
dist_coeffs)
reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))
# calc euler angle
rotation_mat, _ = cv2.Rodrigues(rotation_vec)
pose_mat = cv2.hconcat((rotation_mat, translation_vec))
_, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)
return reprojectdst, euler_angle
def __init__(self, model="./BEGR/models/dlib/shape_predictor_68_face_landmarks.dat"):
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(model)
self.detected = []
self.predicted = []
self.projections = []
self.show_video = False
def infer(self, img):
self.detected = self.detector(img, 0)
self.predicted = []
self.projections = []
for d in self.detected:
shape = self.predictor(img, d)
shape = face_utils.shape_to_np(shape)
reprojectdst, euler_angle = self.get_head_pose(shape)
self.projections.append( (reprojectdst, euler_angle) )
self.predicted.append(shape)
return (self.detected, self.predicted, self.projections)
def analyse(self, video_file, out_file, show_video=False):
self.show_video = True
super().analyse(video_file, out_file, show_video, infer_method=self.infer)
def drawKeypoints(self, img, index):
shape = self.predicted[index]
for (x, y) in shape:
cv2.circle(img, (x, y), 1, (0, 0, 255), -1)
def drawProjection(self, img, index, col=(0,0,255) ):
reprojectdst, euler_angle = self.projections[index]
for start, end in line_pairs:
cv2.line(img, reprojectdst[start], reprojectdst[end], col)
def getShapes(self):
return self.predicted
class HeadPoseVisualizer(PoseAnalyser):
def drawKeypoints(self, img, shape):
for (x, y) in shape:
cv2.circle(img, (x, y), 1, (0, 0, 255), -1)
def drawProjection(self, img, projections, col=(0,0,255) ):
reprojectdst, euler_angle = projections
for start, end in line_pairs:
cv2.line(img, reprojectdst[start], reprojectdst[end], col)
def draw_func(self, img, kp, options={}):
if len(kp[1]) > 0:
k = kp[1][0]
self.drawKeypoints(img, k)
if len(kp[2]) > 0:
k = kp[2][0]
self.drawProjection(img, k)
_x = k[1][0,0]
_y = k[1][1, 0]
_z = k[1][2,0]
#print("(%.2f, %.2f, %.2f)"%(_x, _y, _z))
avg= (_x+_y+_z)/3
interest = 1-sigmoid(avg)
cv2.putText(img,
"Interest=%.2f"%(interest),
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 0, 0), thickness=2)
return img
def viewKeypointsOnSample(self, sample_dir, options={}):
super().viewKeypointsOnSample(sample_dir, "head", self.draw_func, options)
#h = HeadPoseEstimator("./BEGR/models/dlib/shape_predictor_68_face_landmarks.dat")
#cap = cv2.VideoCapture(0)
#while cap.isOpened():
# ret, img = cap.read()
# if ret:
# result = h.infer(img)
# if len(result) > 0:
# for i, f in enumerate(result):
# h.drawKeypoints(img, i)
# h.drawProjection(img, i)
# cv2.imshow("demo", img)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
| [
"cv2.line",
"math.exp",
"cv2.circle",
"cv2.putText",
"numpy.float32",
"cv2.solvePnP",
"cv2.projectPoints",
"numpy.max",
"numpy.min",
"cv2.Rodrigues",
"cv2.hconcat",
"dlib.get_frontal_face_detector",
"imutils.face_utils.shape_to_np",
"numpy.array",
"dlib.shape_predictor",
"cv2.decompose... | [((356, 2827), 'numpy.float32', 'np.float32', (['[(0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943), (\n 0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066), (\n 0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778), (\n 0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149), (\n 0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107), (\n 0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279), (\n 0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421), (\n 0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744), (\n 0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053), (\n 0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323), (\n 0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851), (\n 0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854), (\n 0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114), (\n 0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193), (\n 0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758), (\n 0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668), (\n 0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208), (\n 0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656), (\n 0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002), (\n 0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083), (\n 0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225), (\n 0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267), (\n 0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656), (\n 0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172), (\n 0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073), (\n 0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768), (\n 0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516), (\n 0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972), (\n 0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792), (\n 0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727), (\n 0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612), (\n 0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691), (\n 0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626), (\n 0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)]'], {}), '([(0.0792396913815, 0.339223741112), (0.0829219487236, \n 0.456955367943), (0.0967927109165, 0.575648016728), (0.122141515615, \n 0.691921601066), (0.168687863544, 0.800341263616), (0.239789390707, \n 0.895732504778), (0.325662452515, 0.977068762493), (0.422318282013, \n 1.04329000149), (0.531777802068, 1.06080371126), (0.641296298053, \n 1.03981924107), (0.738105872266, 0.972268833998), (0.824444363295, \n 0.889624082279), (0.894792677532, 0.792494155836), (0.939395486253, \n 0.681546643421), (0.96111933829, 0.562238253072), (0.970579841181, \n 0.441758925744), (0.971193274221, 0.322118743967), (0.163846223133, \n 0.249151738053), (0.21780354657, 0.204255863861), (0.291299351124, \n 0.192367318323), (0.367460241458, 0.203582210627), (0.4392945113, \n 0.233135599851), (0.586445962425, 0.228141644834), (0.660152671635, \n 0.195923841854), (0.737466449096, 0.182360984545), (0.813236546239, \n 0.192828009114), (0.8707571886, 0.235293377042), (0.51534533827, \n 0.31863546193), (0.516221448289, 0.396200446263), (0.517118861835, \n 0.473797687758), (0.51816430343, 0.553157797772), (0.433701156035, \n 0.604054457668), (0.475501237769, 0.62076344024), (0.520712933176, \n 0.634268222208), (0.565874114041, 0.618796581487), (0.607054002672, \n 0.60157671656), (0.252418718401, 0.331052263829), (0.298663015648, \n 0.302646354002), (0.355749724218, 0.303020650651), (0.403718978315, \n 0.33867711083), (0.352507175597, 0.349987615384), (0.296791759886, \n 0.350478978225), (0.631326076346, 0.334136672344), (0.679073381078, \n 0.29645404267), (0.73597236153, 0.294721285802), (0.782865376271, \n 0.321305281656), (0.740312274764, 0.341849376713), (0.68499850091, \n 0.343734332172), (0.353167761422, 0.746189164237), (0.414587777921, \n 0.719053835073), (0.477677654595, 0.706835892494), (0.522732900812, \n 0.717092275768), (0.569832064287, 0.705414478982), (0.635195811927, \n 0.71565572516), (0.69951672331, 0.739419187253), (0.639447159575, \n 0.805236879972), (0.576410514055, 0.835436670169), (0.525398405766, \n 0.841706377792), (0.47641545769, 0.837505914975), (0.41379548902, \n 0.810045601727), (0.380084785646, 0.749979603086), (0.477955996282, \n 0.74513234612), (0.523389793327, 0.748924302636), (0.571057789237, \n 0.74332894691), (0.672409137852, 0.744177032192), (0.572539621444, \n 0.776609286626), (0.5240106503, 0.783370783245), (0.477561227414, \n 0.778476346951)])\n', (366, 2827), True, 'import numpy as np\n'), ((3319, 3804), 'numpy.float32', 'np.float32', (['[[6.825897, 6.760612, 4.402142], [1.330353, 7.122144, 6.903745], [-1.330353,\n 7.122144, 6.903745], [-6.825897, 6.760612, 4.402142], [5.311432, \n 5.485328, 3.987654], [1.78993, 5.393625, 4.413414], [-1.78993, 5.393625,\n 4.413414], [-5.311432, 5.485328, 3.987654], [2.005628, 1.409845, \n 6.165652], [-2.005628, 1.409845, 6.165652], [2.774015, -2.080775, \n 5.048531], [-2.774015, -2.080775, 5.048531], [0.0, -3.116408, 6.097667],\n [0.0, -7.415691, 4.070434]]'], {}), '([[6.825897, 6.760612, 4.402142], [1.330353, 7.122144, 6.903745],\n [-1.330353, 7.122144, 6.903745], [-6.825897, 6.760612, 4.402142], [\n 5.311432, 5.485328, 3.987654], [1.78993, 5.393625, 4.413414], [-1.78993,\n 5.393625, 4.413414], [-5.311432, 5.485328, 3.987654], [2.005628, \n 1.409845, 6.165652], [-2.005628, 1.409845, 6.165652], [2.774015, -\n 2.080775, 5.048531], [-2.774015, -2.080775, 5.048531], [0.0, -3.116408,\n 6.097667], [0.0, -7.415691, 4.070434]])\n', (3329, 3804), True, 'import numpy as np\n'), ((4131, 4323), 'numpy.float32', 'np.float32', (['[[10.0, 10.0, 10.0], [10.0, 10.0, -10.0], [10.0, -10.0, -10.0], [10.0, -\n 10.0, 10.0], [-10.0, 10.0, 10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, \n -10.0], [-10.0, -10.0, 10.0]]'], {}), '([[10.0, 10.0, 10.0], [10.0, 10.0, -10.0], [10.0, -10.0, -10.0],\n [10.0, -10.0, 10.0], [-10.0, 10.0, 10.0], [-10.0, 10.0, -10.0], [-10.0,\n -10.0, -10.0], [-10.0, -10.0, 10.0]])\n', (4141, 4323), True, 'import numpy as np\n'), ((2814, 2838), 'numpy.min', 'np.min', (['TEMPLATE'], {'axis': '(0)'}), '(TEMPLATE, axis=0)\n', (2820, 2838), True, 'import numpy as np\n'), ((2840, 2864), 'numpy.max', 'np.max', (['TEMPLATE'], {'axis': '(0)'}), '(TEMPLATE, axis=0)\n', (2846, 2864), True, 'import numpy as np\n'), ((4990, 5164), 'numpy.float32', 'np.float32', (['[shape[17], shape[21], shape[22], shape[26], shape[36], shape[39], shape[42\n ], shape[45], shape[31], shape[35], shape[48], shape[54], shape[57],\n shape[8]]'], {}), '([shape[17], shape[21], shape[22], shape[26], shape[36], shape[39\n ], shape[42], shape[45], shape[31], shape[35], shape[48], shape[54],\n shape[57], shape[8]])\n', (5000, 5164), True, 'import numpy as np\n'), ((5256, 5316), 'cv2.solvePnP', 'cv2.solvePnP', (['object_pts', 'image_pts', 'cam_matrix', 'dist_coeffs'], {}), '(object_pts, image_pts, cam_matrix, dist_coeffs)\n', (5268, 5316), False, 'import cv2\n'), ((5344, 5435), 'cv2.projectPoints', 'cv2.projectPoints', (['reprojectsrc', 'rotation_vec', 'translation_vec', 'cam_matrix', 'dist_coeffs'], {}), '(reprojectsrc, rotation_vec, translation_vec, cam_matrix,\n dist_coeffs)\n', (5361, 5435), False, 'import cv2\n'), ((5600, 5627), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rotation_vec'], {}), '(rotation_vec)\n', (5613, 5627), False, 'import cv2\n'), ((5647, 5691), 'cv2.hconcat', 'cv2.hconcat', (['(rotation_mat, translation_vec)'], {}), '((rotation_mat, translation_vec))\n', (5658, 5691), False, 'import cv2\n'), ((5732, 5771), 'cv2.decomposeProjectionMatrix', 'cv2.decomposeProjectionMatrix', (['pose_mat'], {}), '(pose_mat)\n', (5761, 5771), False, 'import cv2\n'), ((5930, 5962), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (5960, 5962), False, 'import dlib\n'), ((5988, 6015), 'dlib.shape_predictor', 'dlib.shape_predictor', (['model'], {}), '(model)\n', (6008, 6015), False, 'import dlib\n'), ((121, 133), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (129, 133), False, 'import math\n'), ((6359, 6388), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (6381, 6388), False, 'from imutils import face_utils\n'), ((6950, 6993), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(1)', '(0, 0, 255)', '(-1)'], {}), '(img, (x, y), 1, (0, 0, 255), -1)\n', (6960, 6993), False, 'import cv2\n'), ((7163, 7221), 'cv2.line', 'cv2.line', (['img', 'reprojectdst[start]', 'reprojectdst[end]', 'col'], {}), '(img, reprojectdst[start], reprojectdst[end], col)\n', (7171, 7221), False, 'import cv2\n'), ((7409, 7452), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(1)', '(0, 0, 255)', '(-1)'], {}), '(img, (x, y), 1, (0, 0, 255), -1)\n', (7419, 7452), False, 'import cv2\n'), ((7616, 7674), 'cv2.line', 'cv2.line', (['img', 'reprojectdst[start]', 'reprojectdst[end]', 'col'], {}), '(img, reprojectdst[start], reprojectdst[end], col)\n', (7624, 7674), False, 'import cv2\n'), ((8148, 8263), 'cv2.putText', 'cv2.putText', (['img', "('Interest=%.2f' % interest)", '(10, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.75)', '(0, 0, 0)'], {'thickness': '(2)'}), "(img, 'Interest=%.2f' % interest, (10, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), thickness=2)\n", (8159, 8263), False, 'import cv2\n'), ((3201, 3212), 'numpy.array', 'np.array', (['K'], {}), '(K)\n', (3209, 3212), True, 'import numpy as np\n'), ((3260, 3271), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (3268, 3271), True, 'import numpy as np\n')] |
"""End-to-end, Variational Autoencoder (VAE) - MNIST
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import numpy
from encoder import FC_Encoder
from decoder import FC_Decoder
from vae import VAE
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
EVAL_FREQUENCY = 500
TRAIN_SIZE = 60000
TEST_SIZE = 10000
PIXEL_DEPTH = 255
NUM_LABELS = 10
SEED = 66478 # Set to None for random seed.
NUM_EPOCHS = 75
BATCH_SIZE = 64
NUM_NODES = 500
NUM_LAYERS = 2
SIZE = 784
LATENT_SIZE = 20
IMAGE_SIZE = 28
FLAGS = tf.app.flags.FLAGS
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.Size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [0.0, 1.0].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32) / 255.0
return data.reshape(num_images, SIZE)
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
data = tf.placeholder(tf.float32, shape=(BATCH_SIZE, SIZE))
encoder_network = FC_Encoder(SIZE, LATENT_SIZE, NUM_NODES, NUM_LAYERS)
decoder_network = FC_Decoder(BATCH_SIZE, SIZE, LATENT_SIZE, NUM_NODES, NUM_LAYERS)
vae = VAE(encoder_network, decoder_network)
loss = vae.loss_func(data)
optimizer = tf.train.AdamOptimizer().minimize(loss)
generated_images = vae.random_generate(BATCH_SIZE, LATENT_SIZE, IMAGE_SIZE, 1)
image_summary_op = tf.image_summary("vae_images", generated_images, max_images=BATCH_SIZE)
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
tf.get_variable_scope().reuse_variables()
print('Initialized!')
# Loop through training steps.
for epoch in range(NUM_EPOCHS):
for step in range(int(TRAIN_SIZE / BATCH_SIZE)):
offset = step * BATCH_SIZE
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
feed_dict = {data: batch_data}
# Run the graph and fetch some of the nodes.
_, loss_value = sess.run([optimizer, loss], feed_dict=feed_dict)
elapsed_time = time.time() - start_time
start_time = time.time()
print("Epoch {0}, Time: {1}, Loss: {2}".format(epoch, 1000.0 * elapsed_time / EVAL_FREQUENCY, loss_value))
writer = tf.train.SummaryWriter("logs/mnist", sess.graph)
images, summary = sess.run([generated_images, image_summary_op])
writer.add_summary(summary)
writer.flush()
| [
"tensorflow.gfile.Exists",
"tensorflow.gfile.MakeDirs",
"gzip.open",
"numpy.frombuffer",
"encoder.FC_Encoder",
"tensorflow.train.SummaryWriter",
"tensorflow.Session",
"tensorflow.get_variable_scope",
"decoder.FC_Decoder",
"vae.VAE",
"tensorflow.placeholder",
"time.time",
"tensorflow.gfile.GF... | [((2667, 2719), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(BATCH_SIZE, SIZE)'}), '(tf.float32, shape=(BATCH_SIZE, SIZE))\n', (2681, 2719), True, 'import tensorflow as tf\n'), ((2738, 2790), 'encoder.FC_Encoder', 'FC_Encoder', (['SIZE', 'LATENT_SIZE', 'NUM_NODES', 'NUM_LAYERS'], {}), '(SIZE, LATENT_SIZE, NUM_NODES, NUM_LAYERS)\n', (2748, 2790), False, 'from encoder import FC_Encoder\n'), ((2809, 2873), 'decoder.FC_Decoder', 'FC_Decoder', (['BATCH_SIZE', 'SIZE', 'LATENT_SIZE', 'NUM_NODES', 'NUM_LAYERS'], {}), '(BATCH_SIZE, SIZE, LATENT_SIZE, NUM_NODES, NUM_LAYERS)\n', (2819, 2873), False, 'from decoder import FC_Decoder\n'), ((2880, 2917), 'vae.VAE', 'VAE', (['encoder_network', 'decoder_network'], {}), '(encoder_network, decoder_network)\n', (2883, 2917), False, 'from vae import VAE\n'), ((3096, 3167), 'tensorflow.image_summary', 'tf.image_summary', (['"""vae_images"""', 'generated_images'], {'max_images': 'BATCH_SIZE'}), "('vae_images', generated_images, max_images=BATCH_SIZE)\n", (3112, 3167), True, 'import tensorflow as tf\n'), ((3229, 3240), 'time.time', 'time.time', ([], {}), '()\n', (3238, 3240), False, 'import time\n'), ((982, 1020), 'os.path.join', 'os.path.join', (['WORK_DIRECTORY', 'filename'], {}), '(WORK_DIRECTORY, filename)\n', (994, 1020), False, 'import os\n'), ((3246, 3258), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3256, 3258), True, 'import tensorflow as tf\n'), ((4073, 4121), 'tensorflow.train.SummaryWriter', 'tf.train.SummaryWriter', (['"""logs/mnist"""', 'sess.graph'], {}), "('logs/mnist', sess.graph)\n", (4095, 4121), True, 'import tensorflow as tf\n'), ((892, 923), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['WORK_DIRECTORY'], {}), '(WORK_DIRECTORY)\n', (907, 923), True, 'import tensorflow as tf\n'), ((933, 966), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['WORK_DIRECTORY'], {}), '(WORK_DIRECTORY)\n', (950, 966), True, 'import tensorflow as tf\n'), ((1032, 1057), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['filepath'], {}), '(filepath)\n', (1047, 1057), True, 'import tensorflow as tf\n'), ((1081, 1140), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['(SOURCE_URL + filename)', 'filepath'], {}), '(SOURCE_URL + filename, filepath)\n', (1107, 1140), False, 'from six.moves import urllib\n'), ((1524, 1543), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (1533, 1543), False, 'import gzip\n'), ((1913, 1932), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (1922, 1932), False, 'import gzip\n'), ((2957, 2981), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (2979, 2981), True, 'import tensorflow as tf\n'), ((3933, 3944), 'time.time', 'time.time', ([], {}), '()\n', (3942, 3944), False, 'import time\n'), ((1154, 1178), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['filepath'], {}), '(filepath)\n', (1168, 1178), True, 'import tensorflow as tf\n'), ((3340, 3369), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (3367, 3369), True, 'import tensorflow as tf\n'), ((3380, 3403), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (3401, 3403), True, 'import tensorflow as tf\n'), ((3887, 3898), 'time.time', 'time.time', ([], {}), '()\n', (3896, 3898), False, 'import time\n'), ((2038, 2078), 'numpy.frombuffer', 'numpy.frombuffer', (['buf'], {'dtype': 'numpy.uint8'}), '(buf, dtype=numpy.uint8)\n', (2054, 2078), False, 'import numpy\n'), ((1651, 1691), 'numpy.frombuffer', 'numpy.frombuffer', (['buf'], {'dtype': 'numpy.uint8'}), '(buf, dtype=numpy.uint8)\n', (1667, 1691), False, 'import numpy\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import datetime
import logging
import time
from collections import OrderedDict
from contextlib import contextmanager
import torch
import cv2
import numpy as np
import os.path as osp
from detectron2.utils.comm import is_main_process
from detectron2.structures import Instances
class DatasetEvaluator:
"""
Base class for a dataset evaluator.
The function :func:`inference_on_dataset` runs the model over
all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
This class will accumulate information of the inputs/outputs (by :meth:`process`),
and produce evaluation results in the end (by :meth:`evaluate`).
"""
def reset(self):
"""
Preparation for a new round of evaluation.
Should be called before starting a round of evaluation.
"""
pass
def process(self, input, output):
"""
Process an input/output pair.
Args:
input: the input that's used to call the model.
output: the return value of `model(output)`
"""
pass
def evaluate(self):
"""
Evaluate/summarize the performance, after processing all input/output pairs.
Returns:
dict:
A new evaluator class can return a dict of arbitrary format
as long as the user can process the results.
In our train_net.py, we expect the following format:
* key: the name of the task (e.g., bbox)
* value: a dict of {metric name: score}, e.g.: {"AP50": 80}
"""
pass
class DatasetEvaluators(DatasetEvaluator):
def __init__(self, evaluators):
assert len(evaluators)
super().__init__()
self._evaluators = evaluators
def reset(self):
for evaluator in self._evaluators:
evaluator.reset()
def process(self, input, output):
for evaluator in self._evaluators:
evaluator.process(input, output)
def evaluate(self):
results = OrderedDict()
for evaluator in self._evaluators:
result = evaluator.evaluate()
if is_main_process():
for k, v in result.items():
assert (
k not in results
), "Different evaluators produce results with the same key {}".format(k)
results[k] = v
return results
def inference_on_dataset(model, data_loader, evaluator, vis=False, vis_dir=None):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
The model will be used in eval mode.
Args:
model (nn.Module): a module which accepts an object from
`data_loader` and returns some outputs. It will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator (DatasetEvaluator): the evaluator to run. Use
:class:`DatasetEvaluators([])` if you only want to benchmark, but
don't want to do any evaluation.
Returns:
The return value of `evaluator.evaluate()`
"""
num_devices = torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
logger = logging.getLogger(__name__)
logger.info("Start inference on {} images".format(len(data_loader)))
total = len(data_loader) # inference data loader must have a fixed length
evaluator.reset()
logging_interval = 50
num_warmup = min(5, logging_interval - 1, total - 1)
start_time = time.time()
total_compute_time = 0
with inference_context(model), torch.no_grad():
for idx, inputs in enumerate(data_loader):
if idx == num_warmup:
start_time = time.time()
total_compute_time = 0
start_compute_time = time.time()
outputs = model(inputs)
torch.cuda.synchronize()
total_compute_time += time.time() - start_compute_time
evaluator.process(inputs, outputs)
if vis:
vis_gt_pred_heatmap(inputs, outputs, vis_dir)
if (idx + 1) % logging_interval == 0:
duration = time.time() - start_time
seconds_per_img = duration / (idx + 1 - num_warmup)
eta = datetime.timedelta(
seconds=int(seconds_per_img * (total - num_warmup) - duration)
)
logger.info(
"Inference done {}/{}. {:.4f} s / img. ETA={}".format(
idx + 1, total, seconds_per_img, str(eta)
)
)
# Measure the time only for this worker (before the synchronization barrier)
total_time = int(time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info(
"Total inference time: {} ({:.6f} s / img per device, on {} devices)".format(
total_time_str, total_time / (total - num_warmup), num_devices
)
)
total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
logger.info(
"Total inference pure compute time: {} ({:.6f} s / img per device, on {} devices)".format(
total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
)
)
results = evaluator.evaluate()
# An evaluator may return None when not in main process.
# Replace it by an empty dict instead to make it easier for downstream code to handle
if results is None:
results = {}
return results
@contextmanager
def inference_context(model):
"""
A context where the model is temporarily changed to eval mode,
and restored to previous mode afterwards.
Args:
model: a torch Module
"""
training_mode = model.training
model.eval()
yield
model.train(training_mode)
def vis_gt_pred_heatmap(inputs, outputs, vis_root, top_n=3):
# gt name and bbox
name = inputs[0]['file_name'].split('/')[-1].split('.')[0]
img, gt_instances = get_gt_img_instances(inputs[0]) # img in rgb order
gt_boxes = gt_instances.gt_boxes.tensor.int().tolist()
# pred bbox and socre
top_n = len(gt_boxes)
pred_instances = outputs[0]['instances'].to('cpu')
pred_boxes = pred_instances.pred_boxes.tensor.int().tolist()[:top_n]
pred_scores = pred_instances.scores.tolist()[:top_n]
# draw gt
gt_color = (255, 0, 0)
gt_img = img.copy()
for x1, y1, x2, y2 in gt_boxes:
cv2.rectangle(gt_img, (x1, y1), (x2, y2), gt_color, 3)
cv2.imwrite(osp.join(vis_root, '{}_gt.jpg'.format(name)), gt_img[:, :, ::-1])
# draw pred
pred_color = (0, 255, 0)
pred_img = img.copy()
for (x1, y1, x2, y2), score in zip(pred_boxes, pred_scores):
if score < 0.3:
continue
cv2.rectangle(pred_img, (x1, y1), (x2, y2), pred_color, 3)
cv2.putText(pred_img, '{:.2f}'.format(score), (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 1.5, (255, 255, 255), 2)
cv2.imwrite(osp.join(vis_root, '{}_pred.jpg'.format(name)), pred_img[:, :, ::-1])
# draw gt and pred on same img
gt_pred_img = img.copy()
for x1, y1, x2, y2 in gt_boxes:
cv2.rectangle(gt_pred_img, (x1, y1), (x2, y2), gt_color, 3)
for (x1, y1, x2, y2), score in zip(pred_boxes, pred_scores):
if score < 0.3:
continue
cv2.rectangle(gt_pred_img, (x1, y1), (x2, y2), pred_color, 3)
cv2.putText(gt_pred_img, '{:.2f}'.format(score), (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 1.5, (255, 255, 255), 2)
cv2.imwrite(osp.join(vis_root, '{}_gt_pred.jpg'.format(name)), gt_pred_img[:, :, ::-1])
# cam_heatmap
if 'heatmaps' in outputs[0]:
heatmaps = outputs[0]['heatmaps']
h, w = img.shape[:2]
for i, x in enumerate(heatmaps):
heatmap = get_heatmap(x.cpu().numpy()[0], (w, h))
heatmap_overlay = cv2.addWeighted(heatmap, 0.3, img, 0.5, 0)
cv2.imwrite(osp.join(vis_root, '{}_heatmap_{}_cam.jpg'.format(name, i+3)), heatmap_overlay[:, :, ::-1])
# raw_heatmap
if 'raw_heatmaps' in outputs[0]:
heatmaps = outputs[0]['raw_heatmaps']
h, w = img.shape[:2]
for i, x in enumerate(heatmaps):
heatmap = get_heatmap(x.cpu().numpy()[0], (w, h))
heatmap_overlay = cv2.addWeighted(heatmap, 0.3, img, 0.5, 0)
cv2.imwrite(osp.join(vis_root, '{}_heatmap_{}_raw.jpg'.format(name, i+3)), heatmap_overlay[:, :, ::-1])
def get_gt_img_instances(input_dict):
"""
image and instances in input_dict is resized by mapper, restore
"""
instance = input_dict['instances']
target_h, target_w = input_dict['height'], input_dict['width']
h, w = instance.image_size
img = input_dict['image'].permute(1, 2, 0).byte().numpy()[:, :, ::-1] # h, w, c, has been resized by mapper
target_img = cv2.resize(img, dsize=(target_w, target_h)) # resize to ori size
scale_x, scale_y = (target_w / w, target_h / h)
target_instances = Instances((target_h, target_w), **instance.get_fields())
if target_instances.has('gt_boxes'):
output_boxes = target_instances.gt_boxes
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(target_instances.image_size)
target_instances = target_instances[output_boxes.nonempty()]
return target_img, target_instances
def get_heatmap(x, size):
x = x - np.min(x)
heatmap = x / np.max(x)
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.resize(heatmap, dsize=size)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
return heatmap
| [
"torch.distributed.is_initialized",
"torch.cuda.synchronize",
"numpy.uint8",
"cv2.cvtColor",
"logging.getLogger",
"time.time",
"cv2.rectangle",
"cv2.addWeighted",
"numpy.min",
"numpy.max",
"datetime.timedelta",
"torch.distributed.get_world_size",
"cv2.applyColorMap",
"collections.OrderedDi... | [((3571, 3598), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3588, 3598), False, 'import logging\n'), ((3875, 3886), 'time.time', 'time.time', ([], {}), '()\n', (3884, 3886), False, 'import time\n'), ((9279, 9322), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(target_w, target_h)'}), '(img, dsize=(target_w, target_h))\n', (9289, 9322), False, 'import cv2\n'), ((9859, 9882), 'numpy.uint8', 'np.uint8', (['(255 * heatmap)'], {}), '(255 * heatmap)\n', (9867, 9882), True, 'import numpy as np\n'), ((9897, 9928), 'cv2.resize', 'cv2.resize', (['heatmap'], {'dsize': 'size'}), '(heatmap, dsize=size)\n', (9907, 9928), False, 'import cv2\n'), ((9944, 9988), 'cv2.applyColorMap', 'cv2.applyColorMap', (['heatmap', 'cv2.COLORMAP_JET'], {}), '(heatmap, cv2.COLORMAP_JET)\n', (9961, 9988), False, 'import cv2\n'), ((10003, 10043), 'cv2.cvtColor', 'cv2.cvtColor', (['heatmap', 'cv2.COLOR_BGR2RGB'], {}), '(heatmap, cv2.COLOR_BGR2RGB)\n', (10015, 10043), False, 'import cv2\n'), ((2129, 2142), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2140, 2142), False, 'from collections import OrderedDict\n'), ((3516, 3550), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (3548, 3550), False, 'import torch\n'), ((3478, 3512), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (3510, 3512), False, 'import torch\n'), ((3949, 3964), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3962, 3964), False, 'import torch\n'), ((5125, 5163), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'total_time'}), '(seconds=total_time)\n', (5143, 5163), False, 'import datetime\n'), ((6903, 6957), 'cv2.rectangle', 'cv2.rectangle', (['gt_img', '(x1, y1)', '(x2, y2)', 'gt_color', '(3)'], {}), '(gt_img, (x1, y1), (x2, y2), gt_color, 3)\n', (6916, 6957), False, 'import cv2\n'), ((7230, 7288), 'cv2.rectangle', 'cv2.rectangle', (['pred_img', '(x1, y1)', '(x2, y2)', 'pred_color', '(3)'], {}), '(pred_img, (x1, y1), (x2, y2), pred_color, 3)\n', (7243, 7288), False, 'import cv2\n'), ((7599, 7658), 'cv2.rectangle', 'cv2.rectangle', (['gt_pred_img', '(x1, y1)', '(x2, y2)', 'gt_color', '(3)'], {}), '(gt_pred_img, (x1, y1), (x2, y2), gt_color, 3)\n', (7612, 7658), False, 'import cv2\n'), ((7777, 7838), 'cv2.rectangle', 'cv2.rectangle', (['gt_pred_img', '(x1, y1)', '(x2, y2)', 'pred_color', '(3)'], {}), '(gt_pred_img, (x1, y1), (x2, y2), pred_color, 3)\n', (7790, 7838), False, 'import cv2\n'), ((9807, 9816), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (9813, 9816), True, 'import numpy as np\n'), ((9835, 9844), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (9841, 9844), True, 'import numpy as np\n'), ((2243, 2260), 'detectron2.utils.comm.is_main_process', 'is_main_process', ([], {}), '()\n', (2258, 2260), False, 'from detectron2.utils.comm import is_main_process\n'), ((4165, 4176), 'time.time', 'time.time', ([], {}), '()\n', (4174, 4176), False, 'import time\n'), ((4225, 4249), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4247, 4249), False, 'import torch\n'), ((5074, 5085), 'time.time', 'time.time', ([], {}), '()\n', (5083, 5085), False, 'import time\n'), ((8305, 8347), 'cv2.addWeighted', 'cv2.addWeighted', (['heatmap', '(0.3)', 'img', '(0.5)', '(0)'], {}), '(heatmap, 0.3, img, 0.5, 0)\n', (8320, 8347), False, 'import cv2\n'), ((8728, 8770), 'cv2.addWeighted', 'cv2.addWeighted', (['heatmap', '(0.3)', 'img', '(0.5)', '(0)'], {}), '(heatmap, 0.3, img, 0.5, 0)\n', (8743, 8770), False, 'import cv2\n'), ((4080, 4091), 'time.time', 'time.time', ([], {}), '()\n', (4089, 4091), False, 'import time\n'), ((4284, 4295), 'time.time', 'time.time', ([], {}), '()\n', (4293, 4295), False, 'import time\n'), ((4525, 4536), 'time.time', 'time.time', ([], {}), '()\n', (4534, 4536), False, 'import time\n')] |
# pylint: disable=missing-function-docstring, missing-module-docstring/
# coding: utf-8
from pyccel.stdlib.internal.mpi import mpi_init
from pyccel.stdlib.internal.mpi import mpi_finalize
from pyccel.stdlib.internal.mpi import mpi_comm_size
from pyccel.stdlib.internal.mpi import mpi_comm_rank
from pyccel.stdlib.internal.mpi import mpi_comm_world
from pyccel.stdlib.internal.mpi import mpi_status_size
from pyccel.stdlib.internal.mpi import mpi_comm_split
from pyccel.stdlib.internal.mpi import mpi_comm_free
from pyccel.stdlib.internal.mpi import mpi_bcast
from pyccel.stdlib.internal.mpi import MPI_INTEGER8
import numpy as np
if __name__ == '__main__':
# we need to declare these variables somehow,
# since we are calling mpi subroutines
ierr = np.int32(-1)
sizes = np.int32(-1)
rank_in_world = np.int32(-1)
mpi_init(ierr)
comm = mpi_comm_world
mpi_comm_size(comm, sizes, ierr)
mpi_comm_rank(comm, rank_in_world, ierr)
master = np.int32(0)
m = np.int32(8)
a = np.zeros(m, 'int')
if rank_in_world == 1:
a[:] = 1
if rank_in_world == 2:
a[:] = 2
key = rank_in_world
if rank_in_world == 1:
key = np.int32(-1)
if rank_in_world == 2:
key = np.int32(-1)
two = 2
c = rank_in_world % two
color = np.int32(c)
newcomm = np.int32(-1)
mpi_comm_split (comm, color, key, newcomm, ierr)
# Broadcast of the message by the rank process master of
# each communicator to the processes of its group
mpi_bcast (a, m, MPI_INTEGER8, master, newcomm, ierr)
print("> processor ", rank_in_world, " has a = ", a)
# Destruction of the communicators
mpi_comm_free (newcomm, ierr)
mpi_finalize(ierr)
| [
"pyccel.stdlib.internal.mpi.mpi_bcast",
"numpy.zeros",
"pyccel.stdlib.internal.mpi.mpi_finalize",
"pyccel.stdlib.internal.mpi.mpi_init",
"pyccel.stdlib.internal.mpi.mpi_comm_free",
"pyccel.stdlib.internal.mpi.mpi_comm_size",
"numpy.int32",
"pyccel.stdlib.internal.mpi.mpi_comm_split",
"pyccel.stdlib.... | [((764, 776), 'numpy.int32', 'np.int32', (['(-1)'], {}), '(-1)\n', (772, 776), True, 'import numpy as np\n'), ((789, 801), 'numpy.int32', 'np.int32', (['(-1)'], {}), '(-1)\n', (797, 801), True, 'import numpy as np\n'), ((822, 834), 'numpy.int32', 'np.int32', (['(-1)'], {}), '(-1)\n', (830, 834), True, 'import numpy as np\n'), ((840, 854), 'pyccel.stdlib.internal.mpi.mpi_init', 'mpi_init', (['ierr'], {}), '(ierr)\n', (848, 854), False, 'from pyccel.stdlib.internal.mpi import mpi_init\n'), ((886, 918), 'pyccel.stdlib.internal.mpi.mpi_comm_size', 'mpi_comm_size', (['comm', 'sizes', 'ierr'], {}), '(comm, sizes, ierr)\n', (899, 918), False, 'from pyccel.stdlib.internal.mpi import mpi_comm_size\n'), ((923, 963), 'pyccel.stdlib.internal.mpi.mpi_comm_rank', 'mpi_comm_rank', (['comm', 'rank_in_world', 'ierr'], {}), '(comm, rank_in_world, ierr)\n', (936, 963), False, 'from pyccel.stdlib.internal.mpi import mpi_comm_rank\n'), ((978, 989), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (986, 989), True, 'import numpy as np\n'), ((1003, 1014), 'numpy.int32', 'np.int32', (['(8)'], {}), '(8)\n', (1011, 1014), True, 'import numpy as np\n'), ((1024, 1042), 'numpy.zeros', 'np.zeros', (['m', '"""int"""'], {}), "(m, 'int')\n", (1032, 1042), True, 'import numpy as np\n'), ((1319, 1330), 'numpy.int32', 'np.int32', (['c'], {}), '(c)\n', (1327, 1330), True, 'import numpy as np\n'), ((1345, 1357), 'numpy.int32', 'np.int32', (['(-1)'], {}), '(-1)\n', (1353, 1357), True, 'import numpy as np\n'), ((1362, 1409), 'pyccel.stdlib.internal.mpi.mpi_comm_split', 'mpi_comm_split', (['comm', 'color', 'key', 'newcomm', 'ierr'], {}), '(comm, color, key, newcomm, ierr)\n', (1376, 1409), False, 'from pyccel.stdlib.internal.mpi import mpi_comm_split\n'), ((1531, 1583), 'pyccel.stdlib.internal.mpi.mpi_bcast', 'mpi_bcast', (['a', 'm', 'MPI_INTEGER8', 'master', 'newcomm', 'ierr'], {}), '(a, m, MPI_INTEGER8, master, newcomm, ierr)\n', (1540, 1583), False, 'from pyccel.stdlib.internal.mpi import mpi_bcast\n'), ((1687, 1715), 'pyccel.stdlib.internal.mpi.mpi_comm_free', 'mpi_comm_free', (['newcomm', 'ierr'], {}), '(newcomm, ierr)\n', (1700, 1715), False, 'from pyccel.stdlib.internal.mpi import mpi_comm_free\n'), ((1722, 1740), 'pyccel.stdlib.internal.mpi.mpi_finalize', 'mpi_finalize', (['ierr'], {}), '(ierr)\n', (1734, 1740), False, 'from pyccel.stdlib.internal.mpi import mpi_finalize\n'), ((1198, 1210), 'numpy.int32', 'np.int32', (['(-1)'], {}), '(-1)\n', (1206, 1210), True, 'import numpy as np\n'), ((1252, 1264), 'numpy.int32', 'np.int32', (['(-1)'], {}), '(-1)\n', (1260, 1264), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import skimage
import utils
def convolve_im(im: np.array,
fft_kernel: np.array,
verbose=True):
""" Convolves the image (im) with the frequency kernel (fft_kernel),
and returns the resulting image.
"verbose" can be used for visualizing different parts of the
convolution
Args:
im: np.array of shape [H, W]
fft_kernel: np.array of shape [H, W]
verbose: bool
Returns:
im: np.array of shape [H, W]
"""
### START YOUR CODE HERE ### (You can change anything inside this block)
conv_result = im
# Compute the Fourier transform of the image
fft_im = np.fft.fft2(im)
# Compute the inverse Fourier transform of F(im)*F(kernel)
inverse_fft_im = np.fft.ifft2(fft_im * fft_kernel)
# Returns real values of complex values
conv_result = np.real(inverse_fft_im)
if verbose:
# Use plt.subplot to place two or more images beside eachother
plt.figure(figsize=(20, 5))
# plt.subplot(num_rows, num_cols, position (1-indexed))
plt.subplot(1, 4, 1)
plt.title("Original image")
plt.imshow(im, cmap="gray")
plt.subplot(1, 4, 2)
plt.title("Absolute value of F(f)")
plt.imshow(np.fft.fftshift(np.log(np.abs(fft_im))), cmap="gray")
plt.subplot(1, 4, 3)
plt.title("Absolute value of F(f*g)")
plt.imshow(np.fft.fftshift(np.log(np.abs(fft_im * fft_kernel))), cmap="gray")
plt.subplot(1, 4, 4)
plt.title("Filtered image")
plt.imshow(conv_result, cmap="gray")
### END YOUR CODE HERE ###
return conv_result
if __name__ == "__main__":
verbose = True
# Changing this code should not be needed
im = skimage.data.camera()
im = utils.uint8_to_float(im)
# DO NOT CHANGE
frequency_kernel_low_pass = utils.create_low_pass_frequency_kernel(im, radius=50)
image_low_pass = convolve_im(im, frequency_kernel_low_pass,
verbose=verbose)
# DO NOT CHANGE
frequency_kernel_high_pass = utils.create_high_pass_frequency_kernel(im, radius=50)
image_high_pass = convolve_im(im, frequency_kernel_high_pass,
verbose=verbose)
if verbose:
plt.show()
utils.save_im("camera_low_pass.png", image_low_pass)
utils.save_im("camera_high_pass.png", image_high_pass)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.abs",
"utils.uint8_to_float",
"matplotlib.pyplot.imshow",
"utils.create_low_pass_frequency_kernel",
"matplotlib.pyplot.figure",
"utils.create_high_pass_frequency_kernel",
"numpy.fft.fft2",
"numpy.real",
"s... | [((720, 735), 'numpy.fft.fft2', 'np.fft.fft2', (['im'], {}), '(im)\n', (731, 735), True, 'import numpy as np\n'), ((821, 854), 'numpy.fft.ifft2', 'np.fft.ifft2', (['(fft_im * fft_kernel)'], {}), '(fft_im * fft_kernel)\n', (833, 854), True, 'import numpy as np\n'), ((918, 941), 'numpy.real', 'np.real', (['inverse_fft_im'], {}), '(inverse_fft_im)\n', (925, 941), True, 'import numpy as np\n'), ((1811, 1832), 'skimage.data.camera', 'skimage.data.camera', ([], {}), '()\n', (1830, 1832), False, 'import skimage\n'), ((1842, 1866), 'utils.uint8_to_float', 'utils.uint8_to_float', (['im'], {}), '(im)\n', (1862, 1866), False, 'import utils\n'), ((1919, 1972), 'utils.create_low_pass_frequency_kernel', 'utils.create_low_pass_frequency_kernel', (['im'], {'radius': '(50)'}), '(im, radius=50)\n', (1957, 1972), False, 'import utils\n'), ((2140, 2194), 'utils.create_high_pass_frequency_kernel', 'utils.create_high_pass_frequency_kernel', (['im'], {'radius': '(50)'}), '(im, radius=50)\n', (2179, 2194), False, 'import utils\n'), ((2352, 2404), 'utils.save_im', 'utils.save_im', (['"""camera_low_pass.png"""', 'image_low_pass'], {}), "('camera_low_pass.png', image_low_pass)\n", (2365, 2404), False, 'import utils\n'), ((2409, 2463), 'utils.save_im', 'utils.save_im', (['"""camera_high_pass.png"""', 'image_high_pass'], {}), "('camera_high_pass.png', image_high_pass)\n", (2422, 2463), False, 'import utils\n'), ((1038, 1065), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (1048, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1139, 1159), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (1150, 1159), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1195), 'matplotlib.pyplot.title', 'plt.title', (['"""Original image"""'], {}), "('Original image')\n", (1177, 1195), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1231), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {'cmap': '"""gray"""'}), "(im, cmap='gray')\n", (1214, 1231), True, 'import matplotlib.pyplot as plt\n'), ((1241, 1261), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(2)'], {}), '(1, 4, 2)\n', (1252, 1261), True, 'import matplotlib.pyplot as plt\n'), ((1270, 1305), 'matplotlib.pyplot.title', 'plt.title', (['"""Absolute value of F(f)"""'], {}), "('Absolute value of F(f)')\n", (1279, 1305), True, 'import matplotlib.pyplot as plt\n'), ((1388, 1408), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(3)'], {}), '(1, 4, 3)\n', (1399, 1408), True, 'import matplotlib.pyplot as plt\n'), ((1417, 1454), 'matplotlib.pyplot.title', 'plt.title', (['"""Absolute value of F(f*g)"""'], {}), "('Absolute value of F(f*g)')\n", (1426, 1454), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1570), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(4)'], {}), '(1, 4, 4)\n', (1561, 1570), True, 'import matplotlib.pyplot as plt\n'), ((1579, 1606), 'matplotlib.pyplot.title', 'plt.title', (['"""Filtered image"""'], {}), "('Filtered image')\n", (1588, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1651), 'matplotlib.pyplot.imshow', 'plt.imshow', (['conv_result'], {'cmap': '"""gray"""'}), "(conv_result, cmap='gray')\n", (1625, 1651), True, 'import matplotlib.pyplot as plt\n'), ((2337, 2347), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2345, 2347), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1362), 'numpy.abs', 'np.abs', (['fft_im'], {}), '(fft_im)\n', (1354, 1362), True, 'import numpy as np\n'), ((1497, 1524), 'numpy.abs', 'np.abs', (['(fft_im * fft_kernel)'], {}), '(fft_im * fft_kernel)\n', (1503, 1524), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import pandas as pd
from grafener.energyplus import process_csv
class TestEnergyPlusDataProcessing(unittest.TestCase):
def test_sim_year(self):
df = pd.DataFrame.from_dict({"Date/Time": [" 01/01 00:15:00", " 01/01 00:30:00", " 01/01 00:45:00"],
"Value": np.arange(3)})
df_2020 = process_csv(df, sim_year=2020)
self.assertEqual(np.datetime64('2020-01-01T00:15:00.000000000'), df_2020.index.values[0])
df_2021 = process_csv(df, sim_year=2021)
self.assertEqual(np.datetime64('2021-01-01T00:15:00.000000000'), df_2021.index.values[0])
def test_timestep_reporting_date_processing(self):
df = pd.DataFrame.from_dict({"Date/Time": [" 01/01 00:15:00", " 01/01 00:30:00", " 01/01 00:45:00"],
"Value": np.arange(3)})
df = process_csv(df, sim_year=2021)
self.assertEqual(3, len(df))
self.assertEqual(np.datetime64('2021-01-01T00:15:00.000000000'), df.index.values[0])
def test_monthly_reporting_date_processing(self):
df = pd.DataFrame.from_dict({"Date/Time": ["January", "February", "March"],
"Value": np.arange(3)})
df = process_csv(df, sim_year=2021)
self.assertEqual(3, len(df))
for i in range(1, 4):
self.assertEqual(np.datetime64('2021-0{}-01T00:00:00.000000000'.format(i)), df.index.values[i - 1])
def test_daily_reporting_date_processing(self):
df = pd.DataFrame.from_dict({"Date/Time": ["01/01", "01/02", "01/03"],
"Value": np.arange(3)})
df = process_csv(df, sim_year=2021)
self.assertEqual(3, len(df))
for i in range(1, 4):
self.assertEqual(np.datetime64('2021-01-0{}T00:00:00.000000000'.format(i)), df.index.values[i - 1])
| [
"numpy.datetime64",
"numpy.arange",
"grafener.energyplus.process_csv"
] | [((379, 409), 'grafener.energyplus.process_csv', 'process_csv', (['df'], {'sim_year': '(2020)'}), '(df, sim_year=2020)\n', (390, 409), False, 'from grafener.energyplus import process_csv\n'), ((526, 556), 'grafener.energyplus.process_csv', 'process_csv', (['df'], {'sim_year': '(2021)'}), '(df, sim_year=2021)\n', (537, 556), False, 'from grafener.energyplus import process_csv\n'), ((897, 927), 'grafener.energyplus.process_csv', 'process_csv', (['df'], {'sim_year': '(2021)'}), '(df, sim_year=2021)\n', (908, 927), False, 'from grafener.energyplus import process_csv\n'), ((1271, 1301), 'grafener.energyplus.process_csv', 'process_csv', (['df'], {'sim_year': '(2021)'}), '(df, sim_year=2021)\n', (1282, 1301), False, 'from grafener.energyplus import process_csv\n'), ((1687, 1717), 'grafener.energyplus.process_csv', 'process_csv', (['df'], {'sim_year': '(2021)'}), '(df, sim_year=2021)\n', (1698, 1717), False, 'from grafener.energyplus import process_csv\n'), ((435, 481), 'numpy.datetime64', 'np.datetime64', (['"""2020-01-01T00:15:00.000000000"""'], {}), "('2020-01-01T00:15:00.000000000')\n", (448, 481), True, 'import numpy as np\n'), ((582, 628), 'numpy.datetime64', 'np.datetime64', (['"""2021-01-01T00:15:00.000000000"""'], {}), "('2021-01-01T00:15:00.000000000')\n", (595, 628), True, 'import numpy as np\n'), ((990, 1036), 'numpy.datetime64', 'np.datetime64', (['"""2021-01-01T00:15:00.000000000"""'], {}), "('2021-01-01T00:15:00.000000000')\n", (1003, 1036), True, 'import numpy as np\n'), ((346, 358), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (355, 358), True, 'import numpy as np\n'), ((869, 881), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (878, 881), True, 'import numpy as np\n'), ((1243, 1255), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1252, 1255), True, 'import numpy as np\n'), ((1659, 1671), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1668, 1671), True, 'import numpy as np\n')] |
import numpy as np
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='remove odd data for 3c beyond some criteria')
## args
parser.add_argument('-i', '--input', default='fit2.value', nargs='?',
help='input list file of transition densities/temperatures')
parser.add_argument('-t1', '--temp1', default=0.5, nargs='?', type=float,
help='lowest temperature/density1')
parser.add_argument('-t2', '--temp2', default=1.0, nargs='?', type=float,
help='highest temperature/density2')
parser.add_argument('-c', '--crit', default=0.02, nargs='?', type=float,
help='acceptance deviation of transition temperature')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
# read args
args = parser.parse_args()
# check args
print(" input arguments: {0}".format(args))
list_val=np.loadtxt(args.input)
list_val=list_val.reshape(-1,3)
b1=np.array([])
b2=np.array([])
b3=np.array([])
for iline in list_val:
if (iline[0] <= iline[2]) and (iline[1] >= iline[2]):
if (iline[0] > args.temp1+args.crit) and (iline[1] < args.temp2-args.crit):
b1=np.append(b1,iline[0])
b2=np.append(b2,iline[1])
b3=np.append(b3,iline[2])
print("b1 = {:.5f} {:.5f}".format(np.average(b1),np.std(b1)))
print("b2 = {:.5f} {:.5f}".format(np.average(b2),np.std(b2)))
print("b3 = {:.5f} {:.5f}".format(np.average(b3),np.std(b3)))
| [
"numpy.average",
"argparse.ArgumentParser",
"numpy.std",
"numpy.append",
"numpy.array",
"numpy.loadtxt"
] | [((44, 192), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""remove odd data for 3c beyond some criteria"""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'remove odd data for 3c beyond some criteria')\n", (67, 192), False, 'import argparse\n'), ((930, 952), 'numpy.loadtxt', 'np.loadtxt', (['args.input'], {}), '(args.input)\n', (940, 952), True, 'import numpy as np\n'), ((988, 1000), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (996, 1000), True, 'import numpy as np\n'), ((1004, 1016), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1012, 1016), True, 'import numpy as np\n'), ((1020, 1032), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1028, 1032), True, 'import numpy as np\n'), ((1311, 1325), 'numpy.average', 'np.average', (['b1'], {}), '(b1)\n', (1321, 1325), True, 'import numpy as np\n'), ((1326, 1336), 'numpy.std', 'np.std', (['b1'], {}), '(b1)\n', (1332, 1336), True, 'import numpy as np\n'), ((1373, 1387), 'numpy.average', 'np.average', (['b2'], {}), '(b2)\n', (1383, 1387), True, 'import numpy as np\n'), ((1388, 1398), 'numpy.std', 'np.std', (['b2'], {}), '(b2)\n', (1394, 1398), True, 'import numpy as np\n'), ((1435, 1449), 'numpy.average', 'np.average', (['b3'], {}), '(b3)\n', (1445, 1449), True, 'import numpy as np\n'), ((1450, 1460), 'numpy.std', 'np.std', (['b3'], {}), '(b3)\n', (1456, 1460), True, 'import numpy as np\n'), ((1195, 1218), 'numpy.append', 'np.append', (['b1', 'iline[0]'], {}), '(b1, iline[0])\n', (1204, 1218), True, 'import numpy as np\n'), ((1224, 1247), 'numpy.append', 'np.append', (['b2', 'iline[1]'], {}), '(b2, iline[1])\n', (1233, 1247), True, 'import numpy as np\n'), ((1253, 1276), 'numpy.append', 'np.append', (['b3', 'iline[2]'], {}), '(b3, iline[2])\n', (1262, 1276), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Check Points class (constructed from ROOT objects)"""
from load import ROOT as R
from matplotlib import pyplot as plt
import numpy as np
import gna.constructors as C
from gna import context
from mpl_toolkits.mplot3d import Axes3D
from mpl_tools import bindings
from mpl_tools.helpers import savefig
import os
from gna.unittest import *
def test_histogram_v01_1d(tmp_path):
edges = np.logspace(-3, 3, 40)
data = np.arange(1.0, edges.size, dtype='d')
hist = C.Histogram(edges, data)
res = hist.hist.hist()
edges_dt = np.array(hist.hist.hist.datatype().edges)
# Plot
fig = plt.figure()
ax = plt.subplot( 111 )
ax.minorticks_on()
ax.grid()
ax.set_xlabel('X label, log scale')
ax.set_ylabel('entries')
ax.set_title('Example histogram')
ax.set_xscale('log')
hist.hist.hist.plot_hist(label='label')
ax.legend()
suffix = 'histogram1d'
path = os.path.join(str(tmp_path), suffix+'.png')
savefig(path, dpi=300)
allure_attach_file(path)
plt.close()
path = os.path.join(str(tmp_path), suffix+'_graph.png')
savegraph(hist.hist, path)
allure_attach_file(path)
plt.close()
# Test consistency
assert np.all(res==data)
assert np.all(edges==edges_dt)
def test_histogram_v02_2d(tmp_path):
edgesx = np.logspace(0, 3, 6, base=2)
edgesy = np.linspace(0, 10, 20)
data = np.arange(1.0, (edgesx.size-1)*(edgesy.size-1)+1, dtype='d').reshape(edgesx.size-1, edgesy.size-1)
hist = C.Histogram2d(edgesx, edgesy, data)
res = hist.hist.hist()
edgesx_dt = np.array(hist.hist.hist.datatype().edgesNd[0])
edgesy_dt = np.array(hist.hist.hist.datatype().edgesNd[1])
# Plot
fig = plt.figure()
ax = plt.subplot( 111 )
ax.minorticks_on()
ax.grid()
ax.set_xlabel('X (column), log scale')
ax.set_ylabel('Y row')
ax.set_title('2d histogram example')
ax.set_xscale('log')
hist.hist.hist.plot_pcolor(colorbar=True)
suffix = 'histogram2d'
path = os.path.join(str(tmp_path), suffix+'.png')
savefig(path, dpi=300)
allure_attach_file(path)
plt.close()
fig = plt.figure()
ax = plt.subplot( 111, projection='3d' )
ax.minorticks_on()
ax.grid()
ax.set_xlabel('X (column)')
ax.set_ylabel('Y (row)')
ax.set_title('2d histogram example (3d)')
ax.azim-=70
hist.hist.hist.plot_bar3d(cmap=True, colorbar=True)
suffix = 'histogram2d_3d'
path = os.path.join(str(tmp_path), suffix+'.png')
savefig(path, dpi=300)
allure_attach_file(path)
plt.close()
path = os.path.join(str(tmp_path), suffix+'_graph.png')
savegraph(hist.hist, path)
allure_attach_file(path)
plt.close()
# Test consistency
assert np.all(res==data)
assert np.all(edgesx==edgesx_dt)
assert np.all(edgesy==edgesy_dt)
if __name__ == "__main__":
run_unittests(globals())
| [
"matplotlib.pyplot.subplot",
"numpy.logspace",
"matplotlib.pyplot.close",
"gna.constructors.Histogram2d",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.linspace",
"mpl_tools.helpers.savefig",
"gna.constructors.Histogram",
"numpy.all"
] | [((413, 435), 'numpy.logspace', 'np.logspace', (['(-3)', '(3)', '(40)'], {}), '(-3, 3, 40)\n', (424, 435), True, 'import numpy as np\n'), ((448, 485), 'numpy.arange', 'np.arange', (['(1.0)', 'edges.size'], {'dtype': '"""d"""'}), "(1.0, edges.size, dtype='d')\n", (457, 485), True, 'import numpy as np\n'), ((497, 521), 'gna.constructors.Histogram', 'C.Histogram', (['edges', 'data'], {}), '(edges, data)\n', (508, 521), True, 'import gna.constructors as C\n'), ((629, 641), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (639, 641), True, 'from matplotlib import pyplot as plt\n'), ((651, 667), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (662, 667), True, 'from matplotlib import pyplot as plt\n'), ((986, 1008), 'mpl_tools.helpers.savefig', 'savefig', (['path'], {'dpi': '(300)'}), '(path, dpi=300)\n', (993, 1008), False, 'from mpl_tools.helpers import savefig\n'), ((1042, 1053), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1051, 1053), True, 'from matplotlib import pyplot as plt\n'), ((1179, 1190), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1188, 1190), True, 'from matplotlib import pyplot as plt\n'), ((1226, 1245), 'numpy.all', 'np.all', (['(res == data)'], {}), '(res == data)\n', (1232, 1245), True, 'import numpy as np\n'), ((1255, 1280), 'numpy.all', 'np.all', (['(edges == edges_dt)'], {}), '(edges == edges_dt)\n', (1261, 1280), True, 'import numpy as np\n'), ((1330, 1358), 'numpy.logspace', 'np.logspace', (['(0)', '(3)', '(6)'], {'base': '(2)'}), '(0, 3, 6, base=2)\n', (1341, 1358), True, 'import numpy as np\n'), ((1372, 1394), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(20)'], {}), '(0, 10, 20)\n', (1383, 1394), True, 'import numpy as np\n'), ((1518, 1553), 'gna.constructors.Histogram2d', 'C.Histogram2d', (['edgesx', 'edgesy', 'data'], {}), '(edgesx, edgesy, data)\n', (1531, 1553), True, 'import gna.constructors as C\n'), ((1730, 1742), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1740, 1742), True, 'from matplotlib import pyplot as plt\n'), ((1752, 1768), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1763, 1768), True, 'from matplotlib import pyplot as plt\n'), ((2077, 2099), 'mpl_tools.helpers.savefig', 'savefig', (['path'], {'dpi': '(300)'}), '(path, dpi=300)\n', (2084, 2099), False, 'from mpl_tools.helpers import savefig\n'), ((2133, 2144), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2142, 2144), True, 'from matplotlib import pyplot as plt\n'), ((2156, 2168), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2166, 2168), True, 'from matplotlib import pyplot as plt\n'), ((2178, 2211), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': '"""3d"""'}), "(111, projection='3d')\n", (2189, 2211), True, 'from matplotlib import pyplot as plt\n'), ((2520, 2542), 'mpl_tools.helpers.savefig', 'savefig', (['path'], {'dpi': '(300)'}), '(path, dpi=300)\n', (2527, 2542), False, 'from mpl_tools.helpers import savefig\n'), ((2576, 2587), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2585, 2587), True, 'from matplotlib import pyplot as plt\n'), ((2713, 2724), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2722, 2724), True, 'from matplotlib import pyplot as plt\n'), ((2760, 2779), 'numpy.all', 'np.all', (['(res == data)'], {}), '(res == data)\n', (2766, 2779), True, 'import numpy as np\n'), ((2789, 2816), 'numpy.all', 'np.all', (['(edgesx == edgesx_dt)'], {}), '(edgesx == edgesx_dt)\n', (2795, 2816), True, 'import numpy as np\n'), ((2826, 2853), 'numpy.all', 'np.all', (['(edgesy == edgesy_dt)'], {}), '(edgesy == edgesy_dt)\n', (2832, 2853), True, 'import numpy as np\n'), ((1407, 1475), 'numpy.arange', 'np.arange', (['(1.0)', '((edgesx.size - 1) * (edgesy.size - 1) + 1)'], {'dtype': '"""d"""'}), "(1.0, (edgesx.size - 1) * (edgesy.size - 1) + 1, dtype='d')\n", (1416, 1475), True, 'import numpy as np\n')] |
import numpy as np
from astropy.table import Table
from xwavecal.tests.utils import FakeContext, FakeImage
from xwavecal.utils.basic_utils import median_subtract_channels_y
from xwavecal import basic
class TestBasic:
CONTEXT = FakeContext()
def test_gain_normalizer(self):
image = FakeImage()
image.data = np.ones((10, 10))
image.set_header_val('gain', 2)
image = basic.GainNormalizer(self.CONTEXT).do_stage(image)
assert image.get_header_val('gain') == 1
assert np.allclose(image.data, 2)
def test_overscan_subtractor(self):
image = FakeImage()
image.data = np.ones((10, 12)).astype(float)
image.data[:, 10:] = .5
image.set_header_val('data_section', '[1:10,1:10]')
image.set_header_val('overscan_section', '[1:10, 11:12]')
image = basic.OverscanSubtractor(self.CONTEXT).do_stage(image)
assert np.allclose(image.data[:10, :10], 0.5)
def test_overscan_trimmer(self):
image = FakeImage()
image.data = np.ones((10, 12)).astype(float)
image.data[:, 10:] = .5
image.set_header_val('data_section', '[1:10,1:10]')
image = basic.Trimmer(self.CONTEXT).do_stage(image)
assert np.allclose(image.data.shape, (10, 10))
class TestBackgroundSubtract1dSpectrum:
def test_do_stage(self):
stage = basic.BackgroundSubtractSpectrum(FakeContext())
image = FakeImage()
image.data_tables = {'SPECBOX': Table({'fiber': [1, 1], 'flux': np.ones((2, 10)),
'stderr': np.ones((2, 10))})}
image = stage.do_stage(image)
assert np.allclose(image.data_tables['SPECBOX']['flux'].data, np.zeros((2, 10)))
class TestMedianSubtractReadoutsAlongY:
def test_do_stage(self):
image = FakeImage()
image.data = np.ones((4, 4))
image.header['num_rd_channels'] = 1
image = basic.MedianSubtractReadoutsAlongY(None).do_stage(image)
assert np.allclose(image.data, 0)
class TestUtils:
def test_median_subtract_channels(self):
a = (np.arange(3) * np.ones((3, 3))).T
assert np.allclose(median_subtract_channels_y(a, 3), 0)
a = (np.array([1, 1, 1, 2, 2, 2]) * np.ones((6, 6))).T
assert np.allclose(median_subtract_channels_y(a, 2), 0)
| [
"xwavecal.basic.MedianSubtractReadoutsAlongY",
"xwavecal.basic.Trimmer",
"numpy.allclose",
"numpy.zeros",
"numpy.ones",
"xwavecal.basic.GainNormalizer",
"xwavecal.tests.utils.FakeContext",
"xwavecal.tests.utils.FakeImage",
"numpy.arange",
"numpy.array",
"xwavecal.basic.OverscanSubtractor",
"xw... | [((234, 247), 'xwavecal.tests.utils.FakeContext', 'FakeContext', ([], {}), '()\n', (245, 247), False, 'from xwavecal.tests.utils import FakeContext, FakeImage\n'), ((301, 312), 'xwavecal.tests.utils.FakeImage', 'FakeImage', ([], {}), '()\n', (310, 312), False, 'from xwavecal.tests.utils import FakeContext, FakeImage\n'), ((334, 351), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (341, 351), True, 'import numpy as np\n'), ((523, 549), 'numpy.allclose', 'np.allclose', (['image.data', '(2)'], {}), '(image.data, 2)\n', (534, 549), True, 'import numpy as np\n'), ((607, 618), 'xwavecal.tests.utils.FakeImage', 'FakeImage', ([], {}), '()\n', (616, 618), False, 'from xwavecal.tests.utils import FakeContext, FakeImage\n'), ((916, 954), 'numpy.allclose', 'np.allclose', (['image.data[:10, :10]', '(0.5)'], {}), '(image.data[:10, :10], 0.5)\n', (927, 954), True, 'import numpy as np\n'), ((1009, 1020), 'xwavecal.tests.utils.FakeImage', 'FakeImage', ([], {}), '()\n', (1018, 1020), False, 'from xwavecal.tests.utils import FakeContext, FakeImage\n'), ((1241, 1280), 'numpy.allclose', 'np.allclose', (['image.data.shape', '(10, 10)'], {}), '(image.data.shape, (10, 10))\n', (1252, 1280), True, 'import numpy as np\n'), ((1432, 1443), 'xwavecal.tests.utils.FakeImage', 'FakeImage', ([], {}), '()\n', (1441, 1443), False, 'from xwavecal.tests.utils import FakeContext, FakeImage\n'), ((1825, 1836), 'xwavecal.tests.utils.FakeImage', 'FakeImage', ([], {}), '()\n', (1834, 1836), False, 'from xwavecal.tests.utils import FakeContext, FakeImage\n'), ((1858, 1873), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (1865, 1873), True, 'import numpy as np\n'), ((2006, 2032), 'numpy.allclose', 'np.allclose', (['image.data', '(0)'], {}), '(image.data, 0)\n', (2017, 2032), True, 'import numpy as np\n'), ((1401, 1414), 'xwavecal.tests.utils.FakeContext', 'FakeContext', ([], {}), '()\n', (1412, 1414), False, 'from xwavecal.tests.utils import FakeContext, FakeImage\n'), ((1719, 1736), 'numpy.zeros', 'np.zeros', (['(2, 10)'], {}), '((2, 10))\n', (1727, 1736), True, 'import numpy as np\n'), ((2171, 2203), 'xwavecal.utils.basic_utils.median_subtract_channels_y', 'median_subtract_channels_y', (['a', '(3)'], {}), '(a, 3)\n', (2197, 2203), False, 'from xwavecal.utils.basic_utils import median_subtract_channels_y\n'), ((2298, 2330), 'xwavecal.utils.basic_utils.median_subtract_channels_y', 'median_subtract_channels_y', (['a', '(2)'], {}), '(a, 2)\n', (2324, 2330), False, 'from xwavecal.utils.basic_utils import median_subtract_channels_y\n'), ((408, 442), 'xwavecal.basic.GainNormalizer', 'basic.GainNormalizer', (['self.CONTEXT'], {}), '(self.CONTEXT)\n', (428, 442), False, 'from xwavecal import basic\n'), ((640, 657), 'numpy.ones', 'np.ones', (['(10, 12)'], {}), '((10, 12))\n', (647, 657), True, 'import numpy as np\n'), ((846, 884), 'xwavecal.basic.OverscanSubtractor', 'basic.OverscanSubtractor', (['self.CONTEXT'], {}), '(self.CONTEXT)\n', (870, 884), False, 'from xwavecal import basic\n'), ((1042, 1059), 'numpy.ones', 'np.ones', (['(10, 12)'], {}), '((10, 12))\n', (1049, 1059), True, 'import numpy as np\n'), ((1182, 1209), 'xwavecal.basic.Trimmer', 'basic.Trimmer', (['self.CONTEXT'], {}), '(self.CONTEXT)\n', (1195, 1209), False, 'from xwavecal import basic\n'), ((1934, 1974), 'xwavecal.basic.MedianSubtractReadoutsAlongY', 'basic.MedianSubtractReadoutsAlongY', (['None'], {}), '(None)\n', (1968, 1974), False, 'from xwavecal import basic\n'), ((2110, 2122), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (2119, 2122), True, 'import numpy as np\n'), ((2125, 2140), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2132, 2140), True, 'import numpy as np\n'), ((2221, 2249), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 2, 2]'], {}), '([1, 1, 1, 2, 2, 2])\n', (2229, 2249), True, 'import numpy as np\n'), ((2252, 2267), 'numpy.ones', 'np.ones', (['(6, 6)'], {}), '((6, 6))\n', (2259, 2267), True, 'import numpy as np\n'), ((1516, 1532), 'numpy.ones', 'np.ones', (['(2, 10)'], {}), '((2, 10))\n', (1523, 1532), True, 'import numpy as np\n'), ((1591, 1607), 'numpy.ones', 'np.ones', (['(2, 10)'], {}), '((2, 10))\n', (1598, 1607), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
__author__ = '<NAME>'
from collections import defaultdict
from itertools import tee, zip
import matplotlib.pyplot as plt
import numpy as np
from numpy import float64
p_x, p_y = lambda p: p[0], lambda p: p[1]
def get_rightest_point(points):
return max(points, key=p_x)
def get_leftest_point(points):
return min(points, key=p_x)
def get_highest_point(points):
return max(points, key=p_y)
def get_lowest_point(points):
return min(points, key=p_y)
def last_abscissa(x_bin):
return p_x(get_rightest_point(x_bin))
def last_ordinate(y_bin):
return p_y(get_highest_point(y_bin))
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def is_sorted_increasing_by(D, increasing_by='x'):
assert increasing_by == 'x' or increasing_by == 'y'
if increasing_by == 'x':
return all(p_x(D[i]) <= p_x(D[i + 1]) for i in range(len(D) - 1))
else:
return all(p_y(D[i]) <= p_y(D[i + 1]) for i in range(len(D) - 1))
def get_distribution_of_points(ordinals):
return np.fromiter((o2 + 1 if o1 < 0 else o2 - o1 for o1, o2 in pairwise(ordinals)), dtype=int)
def number_of_points_in_partition(ordinals):
return ordinals[-1] - ordinals[0]
def get_partition_histogram(ordinals):
distribution_of_points = get_distribution_of_points(ordinals)
histogram = distribution_of_points / float64(number_of_points_in_partition(ordinals))
return histogram
def sort_D_increasing_by(D, increasing_by='x'):
assert increasing_by == 'x' or increasing_by == 'y'
return sorted(D, key=p_x) if increasing_by == 'x' else sorted(D, key=p_y)
def GroupPointsByPartition(P):
d = defaultdict(list)
for k, v in P.iteritems(): d[v].append(k)
return dict(d)
def visualize(x_axis_parition={}, y_axis_partition={}, step=0.2):
points = set()
for partition in x_axis_parition.values():
for p in partition:
points.add(p)
for partition in y_axis_partition.values():
for p in partition:
points.add(p)
fig = plt.figure()
ax = fig.add_subplot(111)
# Scatter points
ax.scatter(map(p_x, points), map(p_y, points))
x_bin_edge = lambda x_bin: last_abscissa(x_bin) + step
y_bin_edge = lambda y_bin: last_ordinate(y_bin) + step
x_ticks = map(x_bin_edge, x_axis_parition.values())
y_ticks = map(y_bin_edge, y_axis_partition.values())
ax.get_xaxis().set_ticks(x_ticks)
ax.get_yaxis().set_ticks(y_ticks)
# Format grid appearance
ax.grid(True, alpha=0.5, color='red', linestyle='-', linewidth=1.5)
x_partition_size = len(x_axis_parition.values())
y_partition_size = len(y_axis_partition.values())
plt.title(str(x_partition_size) + ' - by - ' + str(y_partition_size) + ' Grid')
plt.show()
def GetPartitionMapFromOrdinals(D, ordinals, axis='x'):
assert is_sorted_increasing_by(D, axis)
partition_map = {}
current_partition = 0
for p_begin, p_end in pairwise(ordinals):
partition_points = []
for point_index in range(p_begin + 1, p_end + 1):
partition_points.append(D[point_index])
partition_map[current_partition] = partition_points
current_partition += 1
return partition_map
def partition_size(ordinals):
return len(ordinals) - 1
def GetGridHistogram(P_ordinals, Q):
Dx = sort_D_increasing_by(Q.keys(), 'x')
rows = GroupPointsByPartition(Q)
columns = GetPartitionMapFromOrdinals(Dx, P_ordinals)
m = number_of_points_in_partition(P_ordinals)
def grid_cell_cize(row_index, column_index):
return len(set(rows[row_index]) & set(columns[column_index]))
grid_points_distribution = (grid_cell_cize(r, c) for r in reversed(xrange(len(rows))) for c in xrange(len(columns)))
grid_distribution = np.fromiter(grid_points_distribution, dtype=int)
histogram = grid_distribution / float64(m)
return histogram
def GetPartitionOrdinalsFromMap(D, P, axis='x'):
assert is_sorted_increasing_by(D, axis)
P_tilde = GroupPointsByPartition(P)
if axis == 'x':
return [D.index(get_leftest_point(P_tilde[0])) - 1] + [D.index(get_rightest_point(P_tilde[k])) for k in
sorted(P_tilde.keys())]
elif axis == 'y':
return [D.index(get_lowest_point(P_tilde[0])) - 1] + [D.index(get_highest_point(P_tilde[k])) for k in
sorted(P_tilde.keys())]
| [
"matplotlib.pyplot.show",
"collections.defaultdict",
"itertools.zip",
"matplotlib.pyplot.figure",
"numpy.fromiter",
"itertools.tee",
"numpy.float64"
] | [((741, 754), 'itertools.tee', 'tee', (['iterable'], {}), '(iterable)\n', (744, 754), False, 'from itertools import tee, zip\n'), ((784, 793), 'itertools.zip', 'zip', (['a', 'b'], {}), '(a, b)\n', (787, 793), False, 'from itertools import tee, zip\n'), ((1765, 1782), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1776, 1782), False, 'from collections import defaultdict\n'), ((2151, 2163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2161, 2163), True, 'import matplotlib.pyplot as plt\n'), ((2875, 2885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2883, 2885), True, 'import matplotlib.pyplot as plt\n'), ((3897, 3945), 'numpy.fromiter', 'np.fromiter', (['grid_points_distribution'], {'dtype': 'int'}), '(grid_points_distribution, dtype=int)\n', (3908, 3945), True, 'import numpy as np\n'), ((3983, 3993), 'numpy.float64', 'float64', (['m'], {}), '(m)\n', (3990, 3993), False, 'from numpy import float64\n')] |
import colorsys
import random
import os
import numpy as np
from yolo import YOLO
from PIL import Image
import cv2
import math
#import cv2 as cv
#import argparse
import matplotlib.pyplot as plt
video_path = "D:/test.mp4"
output_path = "D:/0.mp4"
ImageDir = os.listdir("D:/test/testimages")
j = 0
a = 0
b = 0
c = 0
detected_theata = 0
detected_theata1 = 0
detected_theata2 = 0
detected_theata3 = 0
jiaodu = 0
#这一步是为了调用已经训练好的Yolov3模型参数
yolov3_args = {
"model_path": 'logs/000/trained_weights_final.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score": 0.08,
"iou": 0.3,
"model_image_size": (416, 416),
"gpu_num": 1,
}
def image(pic_path):
if pic_path == 0:
yolov3 = YOLO(**yolov3_args)
for i in range(len(ImageDir)):
ImagePath = "D:/test/testimages/" + ImageDir[i]
ImageName = "D:/test/testimages/" + str(i) + ".jpg"
img = Image.open(ImagePath)
image, boxes, scores, classes = yolov3.detect_image_mul(img)
origin = np.asarray(image) #将数据转为矩阵
image_bgr = cv2.cvtColor(np.asarray(origin), cv2.COLOR_RGB2BGR)#cv2下的色彩空间灰度化
cv2.imwrite(ImageName, image_bgr)
elif pic_path != 0:
yolov3 = YOLO(**yolov3_args)
img = Image.open(pic_path)#打开图片
img2 = cv2.imread(pic_path)
image, boxes, scores, classes = yolov3.detect_image_mul(img)#yolov3检测
origin = np.asarray(image) # 将数据转为矩阵
image_bgr = cv2.cvtColor(np.asarray(origin), cv2.COLOR_RGB2BGR) # cv2下的色彩空间灰度化
cv2.imwrite("D:/git/work/keras-yolo3/kuangxuanimages/detected.jpg", image_bgr)
#boxes内返回的是yolo预测出来的边框坐标,通过该坐标可以对原图像进行裁剪
for i in range(boxes.shape[0]):
top, left, bottom, right = boxes[i]
# 或者用下面这句等价
#top = boxes[0][0]
#left = boxes[0][1]
#bottom = boxes[0][2]
#right = boxes[0][3]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
# 左上角点的坐标
top = int(max(0, np.floor(top + 0.5).astype('int32')))
left = int(max(0, np.floor(left + 0.5).astype('int32')))
# 右下角点的坐标
bottom = int(min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32')))
right = int(min(np.shape(image)[1], np.floor(right + 0.5).astype('int32')))
# 记录图片的高度与宽度
a = bottom - top
b = right - left
print ('height', a)
print ('with', b)
croped_region = image_bgr[top:bottom, left:right] # 先高后宽
#cv2.imshow("cropimage", croped_region)
# 将裁剪好的目标保存到本地
j + 1
cv2.imwrite("D:/git/work/keras-yolo3/kuangxuanimages/cutted_img_"+str(j)+".jpg", croped_region)
print('cropped successed')
cv2.waitKey(0)
cv2.destroyAllWindows()
def vameterdetect(num):
if num == 1:
origin = cv2.imread("D:/git/work/keras-yolo3/kuangxuanimages/cutted_img_"+str(j)+".jpg", 0)
nor = cv2.resize(origin, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)#图片归一化cv2.resize(输入图片,输出图片,沿x轴缩放系数,沿y轴缩放系数,插入方式为双线性插值(默认方式))
image_bgr = cv2.cvtColor(nor, cv2.COLOR_RGB2BGR)#转换为灰度图
gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)
median = cv2.medianBlur(origin, 1)# 中值滤波去噪cv2.medianBlur(原图片, 当前的方框尺寸)
edges = cv2.Canny(median, 250, 350, apertureSize=3)# 边缘检测cv2.Canny(原图片, 最小阈值,最大阈值,Sobel算子的大小)
#kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) # 矩形结构
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # 椭圆结构
# kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5)) #十字结构
# cv2.getStructuringElement(指定形状,内核的尺寸,锚点的位置 ) 返回指定形状和尺寸的结构元素。
# 霍夫直线
lines = cv2.HoughLines(edges, 1, np.pi / 180, 60)
result = edges.copy()
for line in lines[5]:
rho = line[0] # 第一个元素是距离rho
theta = line[1] # 第二个元素是角度theta
detected_theata1 = ((theta / np.pi) * 180)
print('distance:' + str(rho), 'theta:' + str(((theta / np.pi) * 180)))
lbael_text = 'distance:' + str(round(rho)) + 'theta:' + str(round((theta / np.pi) * 180 - 90, 2))
if (theta > 3 * (np.pi / 3)) or (theta < (np.pi / 2)): # 垂直直线
# 该直线与第一行的交点
pt1 = (int(rho / np.cos(theta)), 0)
# 该直线与最后一行的焦点
pt2 = (int((rho - result.shape[0] * np.sin(theta)) / np.cos(theta)), result.shape[0])
# 绘制一条白线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat >180 theta<90')
else: # 水平直线
# 该直线与第一列的交点
pt1 = (0, int(rho / np.sin(theta)))
# 该直线与最后一列的交点
pt2 = (result.shape[1], int((rho - result.shape[1] * np.cos(theta)) / np.sin(theta)))
# 绘制一条直线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat <180 theta > 90')
for line in lines[18]:
rho = line[0] # 第一个元素是距离rho
theta = line[1] # 第二个元素是角度theta
detected_theata2 = ((theta / np.pi) * 180)
print('distance:' + str(rho), 'theta:' + str(((theta / np.pi) * 180)))
lbael_text = 'distance:' + str(round(rho)) + 'theta:' + str(round((theta / np.pi) * 180 - 90, 2))
if (theta > 3 * (np.pi / 3)) or (theta < (np.pi / 2)): # 垂直直线
# 该直线与第一行的交点
pt1 = (int(rho / np.cos(theta)), 0)
# 该直线与最后一行的焦点
pt2 = (int((rho - result.shape[0] * np.sin(theta)) / np.cos(theta)), result.shape[0])
# 绘制一条白线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat >180 theta<90')
else: # 水平直线
# 该直线与第一列的交点
pt1 = (0, int(rho / np.sin(theta)))
# 该直线与最后一列的交点
pt2 = (result.shape[1], int((rho - result.shape[1] * np.cos(theta)) / np.sin(theta)))
# 绘制一条直线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat <180 theta > 90')
for line in lines[4]:
rho = line[0] # 第一个元素是距离rho
theta = line[1] # 第二个元素是角度theta
detected_theata3 = ((theta / np.pi) * 180)
print('distance:' + str(rho), 'theta:' + str(((theta / np.pi) * 180)))
lbael_text = 'distance:' + str(round(rho)) + 'theta:' + str(round((theta / np.pi) * 180 - 90, 2))
if (theta > 3 * (np.pi / 3)) or (theta < (np.pi / 2)): # 垂直直线
# 该直线与第一行的交点
pt1 = (int(rho / np.cos(theta)), 0)
# 该直线与最后一行的焦点
pt2 = (int((rho - result.shape[0] * np.sin(theta)) / np.cos(theta)), result.shape[0])
# 绘制一条白线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat >180 theta<90')
else: # 水平直线
# 该直线与第一列的交点
pt1 = (0, int(rho / np.sin(theta)))
# 该直线与最后一列的交点
pt2 = (result.shape[1], int((rho - result.shape[1] * np.cos(theta)) / np.sin(theta)))
# 绘制一条直线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat <180 theta > 90')
#cv2.imwrite("D:/git/work/keras-yolo3/kuangxuanimages/median.jpg", median)
cv2.imwrite("D:/git/work/keras-yolo3/kuangxuanimages/edge.jpg", edges)
cv2.imwrite("D:/git/work/keras-yolo3/kuangxuanimages/result.jpg", result)
#detected_theata = ((detected_theata2 - detected_theata3) / (detected_theata3 - detected_theata1)) * 800
#detected_theata = ((detected_theata1 - detected_theata3) / (detected_theata2 - detected_theata3)) * 500
#detected_theata = ((detected_theata2 - detected_theata1 + 180) / (detected_theata3 - detected_theata1 + 180)) * 2.5
#detected_theata = ((detected_theata1 - detected_theata2) / (detected_theata3 - detected_theata2 + 180)) * 120 - 10
#detected_theata = (180 - (detected_theata2 - detected_theata1)) / (360 - (detected_theata2 - detected_theata3)) * 1
detected_theata = ((180 + detected_theata3 - detected_theata1)) / (360 - (detected_theata1 - detected_theata2)) * 1.6 + 0.03
return detected_theata
def caculatejiaodu(num):
if num == 1 :
jiaodu = vameterdetect(1)
print('readnum = ', jiaodu)
image_detected = cv2.imread("D:/git/work/keras-yolo3/kuangxuanimages/detected.jpg", 0)
#image_cov = cv2.cvtColor(image_detected, cv2.COLOR_GRAY2BGR)
cv2.putText(image_detected, 'Readnum = {}'.format(jiaodu), (11, 11 + 22), cv2.FONT_HERSHEY_COMPLEX, 1, [230, 0, 0], 2)
cv2.imwrite("D:/git/work/keras-yolo3/kuangxuanimages/read_num.jpg", image_detected)
cv2.imshow("ReadNum", image_detected)
print('Read success!')
cv2.waitKey(0)
cv2.destroyAllWindows()
return jiaodu
def video():
#jiaodu = caculatejiaodu(1)
#mode = 1
yolov3 = YOLO(**yolov3_args)
video_cap = cv2.VideoCapture(video_path)
if not video_cap.isOpened():
raise IOError
video_FourCC = int(video_cap.get(cv2.CAP_PROP_FOURCC))
video_fps = video_cap.get(cv2.CAP_PROP_FPS)
video_size = (int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
frame_index = 0
name = 4228
while True:
#RecDraw.clear()
return_value, frame = video_cap.read()
frame_index = frame_index + 1
if frame is None:
break
if frame_index % 2 == 1:
x, y = frame.shape[0:2]
new_image = cv2.resize(frame, (int(y / 2), int(x / 2)))
name += 1
strname = "D:/test/" + str(name) + ".jpg"
cv2.imwrite(strname, new_image)
image_new = Image.fromarray(frame)
image, boxes, scores, classes = yolov3.detect_image_mul(image_new)
origin = np.asarray(image)
image_bgr = cv2.cvtColor(np.asarray(origin), cv2.COLOR_RGB2BGR) # cv2下的色彩空间灰度化
cv2.imwrite("D:/git/work/keras-yolo3/kuangxuanimages/detected.jpg", image_bgr)
# boxes内返回的是yolo预测出来的边框坐标,通过该坐标可以对原图像进行裁剪
for i in range(boxes.shape[0]):
top, left, bottom, right = boxes[i]
# 或者用下面这句等价
# top = boxes[0][0]
# left = boxes[0][1]
# bottom = boxes[0][2]
# right = boxes[0][3]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
# 左上角点的坐标
top = int(max(0, np.floor(top + 0.5).astype('int32')))
left = int(max(0, np.floor(left + 0.5).astype('int32')))
# 右下角点的坐标
bottom = int(min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32')))
right = int(min(np.shape(image)[1], np.floor(right + 0.5).astype('int32')))
# 记录图片的高度与宽度
a = bottom - top
b = right - left
print('height', a)
print('with', b)
croped_region = image_bgr[top:bottom, left:right] # 先高后宽
# cv2.imshow("cropimage", croped_region)
nor = cv2.resize(croped_region, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR) # 图片归一化cv2.resize(输入图片,输出图片,沿x轴缩放系数,沿y轴缩放系数,插入方式为双线性插值(默认方式))
image_bgr = cv2.cvtColor(nor, cv2.COLOR_RGB2BGR) # 转换为灰度图
gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)
median = cv2.medianBlur(origin, 1) # 中值滤波去噪cv2.medianBlur(原图片, 当前的方框尺寸)
edges = cv2.Canny(median, 250, 350, apertureSize=3) # 边缘检测cv2.Canny(原图片, 最小阈值,最大阈值,Sobel算子的大小)
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) # 矩形结构
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # 椭圆结构
# kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5)) #十字结构
# cv2.getStructuringElement(指定形状,内核的尺寸,锚点的位置 ) 返回指定形状和尺寸的结构元素。
# 霍夫直线
lines = cv2.HoughLines(edges, 1, np.pi / 180, 60)
result = edges.copy()
for line in lines[5]:
rho = line[0] # 第一个元素是距离rho
theta = line[1] # 第二个元素是角度theta
detected_theata1 = ((theta / np.pi) * 180)
print('distance:' + str(rho), 'theta:' + str(((theta / np.pi) * 180)))
lbael_text = 'distance:' + str(round(rho)) + 'theta:' + str(round((theta / np.pi) * 180 - 90, 2))
if (theta > 3 * (np.pi / 3)) or (theta < (np.pi / 2)): # 垂直直线
# 该直线与第一行的交点
pt1 = (int(rho / np.cos(theta)), 0)
# 该直线与最后一行的焦点
pt2 = (int((rho - result.shape[0] * np.sin(theta)) / np.cos(theta)), result.shape[0])
# 绘制一条白线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat >180 theta<90')
else: # 水平直线
# 该直线与第一列的交点
pt1 = (0, int(rho / np.sin(theta)))
# 该直线与最后一列的交点
pt2 = (result.shape[1], int((rho - result.shape[1] * np.cos(theta)) / np.sin(theta)))
# 绘制一条直线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat <180 theta > 90')
for line in lines[18]:
rho = line[0] # 第一个元素是距离rho
theta = line[1] # 第二个元素是角度theta
detected_theata2 = ((theta / np.pi) * 180)
print('distance:' + str(rho), 'theta:' + str(((theta / np.pi) * 180)))
lbael_text = 'distance:' + str(round(rho)) + 'theta:' + str(round((theta / np.pi) * 180 - 90, 2))
if (theta > 3 * (np.pi / 3)) or (theta < (np.pi / 2)): # 垂直直线
# 该直线与第一行的交点
pt1 = (int(rho / np.cos(theta)), 0)
# 该直线与最后一行的焦点
pt2 = (int((rho - result.shape[0] * np.sin(theta)) / np.cos(theta)), result.shape[0])
# 绘制一条白线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat >180 theta<90')
else: # 水平直线
# 该直线与第一列的交点
pt1 = (0, int(rho / np.sin(theta)))
# 该直线与最后一列的交点
pt2 = (result.shape[1], int((rho - result.shape[1] * np.cos(theta)) / np.sin(theta)))
# 绘制一条直线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat <180 theta > 90')
for line in lines[4]:
rho = line[0] # 第一个元素是距离rho
theta = line[1] # 第二个元素是角度theta
detected_theata3 = ((theta / np.pi) * 180)
print('distance:' + str(rho), 'theta:' + str(((theta / np.pi) * 180)))
lbael_text = 'distance:' + str(round(rho)) + 'theta:' + str(round((theta / np.pi) * 180 - 90, 2))
if (theta > 3 * (np.pi / 3)) or (theta < (np.pi / 2)): # 垂直直线
# 该直线与第一行的交点
pt1 = (int(rho / np.cos(theta)), 0)
# 该直线与最后一行的焦点
pt2 = (int((rho - result.shape[0] * np.sin(theta)) / np.cos(theta)), result.shape[0])
# 绘制一条白线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat >180 theta<90')
else: # 水平直线
# 该直线与第一列的交点
pt1 = (0, int(rho / np.sin(theta)))
# 该直线与最后一列的交点
pt2 = (result.shape[1], int((rho - result.shape[1] * np.cos(theta)) / np.sin(theta)))
# 绘制一条直线
cv2.line(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)
# print('theat <180 theta > 90')
detected_theata = ((180 + detected_theata3 - detected_theata1)) / (360 - (detected_theata1 - detected_theata2)) * 1.6 + 0.29
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.putText(origin, 'Readnum = {}'.format(detected_theata), (11, 11 + 22), cv2.FONT_HERSHEY_COMPLEX, 1, [230, 0, 0], 2)
cv2.imshow("result", origin)
if isOutput:
out.write(origin)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
# print("please input the type of your want to identify")
# m = input("pic or video? Answer: ")
# if m == "video":image
# elif m == "pic":
# pic_path = input("please input image path : ")
# image(pic_path)
#image("D:/git/work/keras-yolo3/images/6959.jpg")
#meterdetect(1)
#vameterdetect(1)
caculatejiaodu(1)
#video()
# image("D:/r.jpg")
# image(0)
| [
"cv2.medianBlur",
"numpy.floor",
"numpy.shape",
"numpy.sin",
"cv2.VideoWriter",
"cv2.imshow",
"cv2.line",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.destroyAllWindows",
"cv2.resize",
"yolo.YOLO",
"cv2.Canny",
"cv2.waitKey",
"numpy.asarray",
"cv2.HoughLines",
"numpy.cos",
"os.listdir",
"... | [((271, 303), 'os.listdir', 'os.listdir', (['"""D:/test/testimages"""'], {}), "('D:/test/testimages')\n", (281, 303), False, 'import os\n'), ((9468, 9487), 'yolo.YOLO', 'YOLO', ([], {}), '(**yolov3_args)\n', (9472, 9487), False, 'from yolo import YOLO\n'), ((9505, 9533), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (9521, 9533), False, 'import cv2\n'), ((800, 819), 'yolo.YOLO', 'YOLO', ([], {}), '(**yolov3_args)\n', (804, 819), False, 'from yolo import YOLO\n'), ((3217, 3289), 'cv2.resize', 'cv2.resize', (['origin', 'None'], {'fx': '(0.5)', 'fy': '(0.5)', 'interpolation': 'cv2.INTER_LINEAR'}), '(origin, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)\n', (3227, 3289), False, 'import cv2\n'), ((3373, 3409), 'cv2.cvtColor', 'cv2.cvtColor', (['nor', 'cv2.COLOR_RGB2BGR'], {}), '(nor, cv2.COLOR_RGB2BGR)\n', (3385, 3409), False, 'import cv2\n'), ((3433, 3476), 'cv2.cvtColor', 'cv2.cvtColor', (['image_bgr', 'cv2.COLOR_BGR2GRAY'], {}), '(image_bgr, cv2.COLOR_BGR2GRAY)\n', (3445, 3476), False, 'import cv2\n'), ((3497, 3522), 'cv2.medianBlur', 'cv2.medianBlur', (['origin', '(1)'], {}), '(origin, 1)\n', (3511, 3522), False, 'import cv2\n'), ((3578, 3621), 'cv2.Canny', 'cv2.Canny', (['median', '(250)', '(350)'], {'apertureSize': '(3)'}), '(median, 250, 350, apertureSize=3)\n', (3587, 3621), False, 'import cv2\n'), ((3759, 3811), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv2.MORPH_ELLIPSE, (5, 5))\n', (3784, 3811), False, 'import cv2\n'), ((4006, 4047), 'cv2.HoughLines', 'cv2.HoughLines', (['edges', '(1)', '(np.pi / 180)', '(60)'], {}), '(edges, 1, np.pi / 180, 60)\n', (4020, 4047), False, 'import cv2\n'), ((7798, 7868), 'cv2.imwrite', 'cv2.imwrite', (['"""D:/git/work/keras-yolo3/kuangxuanimages/edge.jpg"""', 'edges'], {}), "('D:/git/work/keras-yolo3/kuangxuanimages/edge.jpg', edges)\n", (7809, 7868), False, 'import cv2\n'), ((7878, 7951), 'cv2.imwrite', 'cv2.imwrite', (['"""D:/git/work/keras-yolo3/kuangxuanimages/result.jpg"""', 'result'], {}), "('D:/git/work/keras-yolo3/kuangxuanimages/result.jpg', result)\n", (7889, 7951), False, 'import cv2\n'), ((8871, 8940), 'cv2.imread', 'cv2.imread', (['"""D:/git/work/keras-yolo3/kuangxuanimages/detected.jpg"""', '(0)'], {}), "('D:/git/work/keras-yolo3/kuangxuanimages/detected.jpg', 0)\n", (8881, 8940), False, 'import cv2\n'), ((9149, 9236), 'cv2.imwrite', 'cv2.imwrite', (['"""D:/git/work/keras-yolo3/kuangxuanimages/read_num.jpg"""', 'image_detected'], {}), "('D:/git/work/keras-yolo3/kuangxuanimages/read_num.jpg',\n image_detected)\n", (9160, 9236), False, 'import cv2\n'), ((9242, 9279), 'cv2.imshow', 'cv2.imshow', (['"""ReadNum"""', 'image_detected'], {}), "('ReadNum', image_detected)\n", (9252, 9279), False, 'import cv2\n'), ((9321, 9335), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (9332, 9335), False, 'import cv2\n'), ((9345, 9368), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9366, 9368), False, 'import cv2\n'), ((9917, 9982), 'cv2.VideoWriter', 'cv2.VideoWriter', (['output_path', 'video_FourCC', 'video_fps', 'video_size'], {}), '(output_path, video_FourCC, video_fps, video_size)\n', (9932, 9982), False, 'import cv2\n'), ((10483, 10505), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (10498, 10505), False, 'from PIL import Image\n'), ((10600, 10617), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (10610, 10617), True, 'import numpy as np\n'), ((10716, 10794), 'cv2.imwrite', 'cv2.imwrite', (['"""D:/git/work/keras-yolo3/kuangxuanimages/detected.jpg"""', 'image_bgr'], {}), "('D:/git/work/keras-yolo3/kuangxuanimages/detected.jpg', image_bgr)\n", (10727, 10794), False, 'import cv2\n'), ((16831, 16875), 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""', 'cv2.WINDOW_NORMAL'], {}), "('result', cv2.WINDOW_NORMAL)\n", (16846, 16875), False, 'import cv2\n'), ((17014, 17042), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'origin'], {}), "('result', origin)\n", (17024, 17042), False, 'import cv2\n'), ((1005, 1026), 'PIL.Image.open', 'Image.open', (['ImagePath'], {}), '(ImagePath)\n', (1015, 1026), False, 'from PIL import Image\n'), ((1123, 1140), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1133, 1140), True, 'import numpy as np\n'), ((1253, 1286), 'cv2.imwrite', 'cv2.imwrite', (['ImageName', 'image_bgr'], {}), '(ImageName, image_bgr)\n', (1264, 1286), False, 'import cv2\n'), ((1330, 1349), 'yolo.YOLO', 'YOLO', ([], {}), '(**yolov3_args)\n', (1334, 1349), False, 'from yolo import YOLO\n'), ((1365, 1385), 'PIL.Image.open', 'Image.open', (['pic_path'], {}), '(pic_path)\n', (1375, 1385), False, 'from PIL import Image\n'), ((1407, 1427), 'cv2.imread', 'cv2.imread', (['pic_path'], {}), '(pic_path)\n', (1417, 1427), False, 'import cv2\n'), ((1525, 1542), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1535, 1542), True, 'import numpy as np\n'), ((1652, 1730), 'cv2.imwrite', 'cv2.imwrite', (['"""D:/git/work/keras-yolo3/kuangxuanimages/detected.jpg"""', 'image_bgr'], {}), "('D:/git/work/keras-yolo3/kuangxuanimages/detected.jpg', image_bgr)\n", (1663, 1730), False, 'import cv2\n'), ((10430, 10461), 'cv2.imwrite', 'cv2.imwrite', (['strname', 'new_image'], {}), '(strname, new_image)\n', (10441, 10461), False, 'import cv2\n'), ((10652, 10670), 'numpy.asarray', 'np.asarray', (['origin'], {}), '(origin)\n', (10662, 10670), True, 'import numpy as np\n'), ((11877, 11956), 'cv2.resize', 'cv2.resize', (['croped_region', 'None'], {'fx': '(0.5)', 'fy': '(0.5)', 'interpolation': 'cv2.INTER_LINEAR'}), '(croped_region, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)\n', (11887, 11956), False, 'import cv2\n'), ((12047, 12083), 'cv2.cvtColor', 'cv2.cvtColor', (['nor', 'cv2.COLOR_RGB2BGR'], {}), '(nor, cv2.COLOR_RGB2BGR)\n', (12059, 12083), False, 'import cv2\n'), ((12114, 12157), 'cv2.cvtColor', 'cv2.cvtColor', (['image_bgr', 'cv2.COLOR_BGR2GRAY'], {}), '(image_bgr, cv2.COLOR_BGR2GRAY)\n', (12126, 12157), False, 'import cv2\n'), ((12182, 12207), 'cv2.medianBlur', 'cv2.medianBlur', (['origin', '(1)'], {}), '(origin, 1)\n', (12196, 12207), False, 'import cv2\n'), ((12269, 12312), 'cv2.Canny', 'cv2.Canny', (['median', '(250)', '(350)'], {'apertureSize': '(3)'}), '(median, 250, 350, apertureSize=3)\n', (12278, 12312), False, 'import cv2\n'), ((12461, 12513), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv2.MORPH_ELLIPSE, (5, 5))\n', (12486, 12513), False, 'import cv2\n'), ((12723, 12764), 'cv2.HoughLines', 'cv2.HoughLines', (['edges', '(1)', '(np.pi / 180)', '(60)'], {}), '(edges, 1, np.pi / 180, 60)\n', (12737, 12764), False, 'import cv2\n'), ((1188, 1206), 'numpy.asarray', 'np.asarray', (['origin'], {}), '(origin)\n', (1198, 1206), True, 'import numpy as np\n'), ((1588, 1606), 'numpy.asarray', 'np.asarray', (['origin'], {}), '(origin)\n', (1598, 1606), True, 'import numpy as np\n'), ((2999, 3013), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3010, 3013), False, 'import cv2\n'), ((3027, 3050), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3048, 3050), False, 'import cv2\n'), ((4785, 4840), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (4793, 4840), False, 'import cv2\n'), ((5178, 5233), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (5186, 5233), False, 'import cv2\n'), ((5993, 6048), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (6001, 6048), False, 'import cv2\n'), ((6386, 6441), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (6394, 6441), False, 'import cv2\n'), ((7200, 7255), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (7208, 7255), False, 'import cv2\n'), ((7593, 7648), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (7601, 7648), False, 'import cv2\n'), ((17108, 17122), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (17119, 17122), False, 'import cv2\n'), ((13558, 13613), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (13566, 13613), False, 'import cv2\n'), ((13983, 14038), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (13991, 14038), False, 'import cv2\n'), ((14854, 14909), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (14862, 14909), False, 'import cv2\n'), ((15279, 15334), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (15287, 15334), False, 'import cv2\n'), ((16149, 16204), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (16157, 16204), False, 'import cv2\n'), ((16574, 16629), 'cv2.line', 'cv2.line', (['result', 'pt1', 'pt2', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(result, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n', (16582, 16629), False, 'import cv2\n'), ((11433, 11448), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (11441, 11448), True, 'import numpy as np\n'), ((11523, 11538), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (11531, 11538), True, 'import numpy as np\n'), ((2364, 2379), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (2372, 2379), True, 'import numpy as np\n'), ((2454, 2469), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (2462, 2469), True, 'import numpy as np\n'), ((4589, 4602), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4595, 4602), True, 'import numpy as np\n'), ((4709, 4722), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4715, 4722), True, 'import numpy as np\n'), ((4985, 4998), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4991, 4998), True, 'import numpy as np\n'), ((5119, 5132), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5125, 5132), True, 'import numpy as np\n'), ((5797, 5810), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5803, 5810), True, 'import numpy as np\n'), ((5917, 5930), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5923, 5930), True, 'import numpy as np\n'), ((6193, 6206), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6199, 6206), True, 'import numpy as np\n'), ((6327, 6340), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6333, 6340), True, 'import numpy as np\n'), ((7004, 7017), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7010, 7017), True, 'import numpy as np\n'), ((7124, 7137), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7130, 7137), True, 'import numpy as np\n'), ((7400, 7413), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7406, 7413), True, 'import numpy as np\n'), ((7534, 7547), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7540, 7547), True, 'import numpy as np\n'), ((11272, 11291), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (11280, 11291), True, 'import numpy as np\n'), ((11341, 11361), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (11349, 11361), True, 'import numpy as np\n'), ((11453, 11475), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (11461, 11475), True, 'import numpy as np\n'), ((11543, 11564), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (11551, 11564), True, 'import numpy as np\n'), ((2203, 2222), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (2211, 2222), True, 'import numpy as np\n'), ((2272, 2292), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (2280, 2292), True, 'import numpy as np\n'), ((2384, 2406), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (2392, 2406), True, 'import numpy as np\n'), ((2474, 2495), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (2482, 2495), True, 'import numpy as np\n'), ((13346, 13359), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (13352, 13359), True, 'import numpy as np\n'), ((13474, 13487), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (13480, 13487), True, 'import numpy as np\n'), ((13774, 13787), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (13780, 13787), True, 'import numpy as np\n'), ((13916, 13929), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (13922, 13929), True, 'import numpy as np\n'), ((14642, 14655), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (14648, 14655), True, 'import numpy as np\n'), ((14770, 14783), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (14776, 14783), True, 'import numpy as np\n'), ((15070, 15083), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (15076, 15083), True, 'import numpy as np\n'), ((15212, 15225), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (15218, 15225), True, 'import numpy as np\n'), ((15937, 15950), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (15943, 15950), True, 'import numpy as np\n'), ((16065, 16078), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (16071, 16078), True, 'import numpy as np\n'), ((16365, 16378), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (16371, 16378), True, 'import numpy as np\n'), ((16507, 16520), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (16513, 16520), True, 'import numpy as np\n'), ((4692, 4705), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4698, 4705), True, 'import numpy as np\n'), ((5102, 5115), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5108, 5115), True, 'import numpy as np\n'), ((5900, 5913), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5906, 5913), True, 'import numpy as np\n'), ((6310, 6323), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6316, 6323), True, 'import numpy as np\n'), ((7107, 7120), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7113, 7120), True, 'import numpy as np\n'), ((7517, 7530), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7523, 7530), True, 'import numpy as np\n'), ((13457, 13470), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (13463, 13470), True, 'import numpy as np\n'), ((13899, 13912), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (13905, 13912), True, 'import numpy as np\n'), ((14753, 14766), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (14759, 14766), True, 'import numpy as np\n'), ((15195, 15208), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (15201, 15208), True, 'import numpy as np\n'), ((16048, 16061), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (16054, 16061), True, 'import numpy as np\n'), ((16490, 16503), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (16496, 16503), True, 'import numpy as np\n')] |
import os
import time
import math
from typing import Optional, Tuple
from itertools import zip_longest
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from ufotest.util import cprint, cresult
from ufotest.util import setup_environment, random_string, force_aspect
from ufotest.camera import save_frame, import_raw, get_frame, UfoCamera
from ufotest.config import CONFIG
from ufotest.exceptions import PciError, FrameDecodingError
from ufotest.testing import (AbstractTest,
TestRunner,
ImageTestResult,
CombinedTestResult,
DictTestResult,
FigureTestResult)
from ufotest.testing import MessageTestResult
# This test case is essentially supposed to capture a single frame and potentially even display this frame inside of
# the test report. Obviously this is not a very methodical test. It's purpose is more to heuristically check if
# anything is working at all at first. If I can manage to display the image, this might even serve for a human operator
# checking the report to get a feeling for how the camera is doing.
class AcquireSingleFrame(AbstractTest):
"""
Acquires a single frame from the camera and puts the image into the test report.
"""
MAX_PIXEL_VALUE = 4095
LOW_PERCENTILE = 1
HIGH_PERCENTILE = 99
name = 'single_frame'
description = (
'Requests a single frame from the camera. The contents of this frame are not tested in any way. '
'The test is simply meant to test if a frame can be requested from the camera at all. If the frame was '
'successfully taken, this frame is being converted into an image and this image is then also displayed in the '
'test report. Additionally, the histogram of the frame will be plotted.'
)
def __init__(self, test_runner: TestRunner):
AbstractTest.__init__(self, test_runner)
# So the intention is to somehow include the frame image into the test report. For that purpose we will have
# to save the image into the folder for this test run first.
# So this is actually pretty easy since I reworked the testing system: Each test case gets passed the test
# runner object which will ultimately execute the test. Thus, the test case also has access to the test context
# which contains all the information we could wish for!
self.file_name = 'single_frame.png'
self.frame_path = os.path.join(self.context.folder_path, self.file_name)
self.frame: Optional[np.array] = None
self.frame_flat: Optional[np.array] = None
self.histogram_values: Optional[np.array] = None
self.histogram_x: Optional[np.array] = None
self.top_percentile: Optional[int] = None
self.top_x: Optional[int] = None
self.bottom_percentile: Optional[int] = None
self.bottom_x: Optional[int] = None
def run(self):
# -- SETTING UP ENV VARIABLES
setup_environment()
# -- REQUESTING FRAME FROM CAMERA
# The "capture_frame" method will query the camera object to get a frame in the format of a numpy 2D array and
# then save this into the self.frame property and a flattened 1D version of the frame to the self.frame_flat
# variable
self.capture_frame()
cprint(f'captured frame with {len(self.frame_flat)} pixels')
# -- CREATING THE HISTOGRAM
self.calculate_histogram()
cprint(f'created histogram')
fig_frame = self.create_frame_figure()
fig_frame_description = (
f'A single frame acquired from the camera. (left) The image as it was taken from the camera. The image '
f'itself is not colored, but the pixel range is converted into a color map, where 0 corresponds to dark '
f'blue colors and the maximum pixel value {self.MAX_PIXEL_VALUE} to bright yellow. (right) The frame with '
f'a contrast increasing algorithm applied to it, which will stretch the histogram to take up all the '
f'available space up to the max pixel value.'
)
fig_hist = self.create_histogram_figure()
fig_hist_description = (
f'The histogram of the frame. (left) Shows the histogram, where the bounds of the plot are set to the '
f'minimum and maximum possible pixel values with the currently used detector. (right) Shows the zoomed '
f'version of the histogram, where the borders of the plot are adjusted to start at the 1st percentile and '
f'end at the 99th percentile.'
)
cprint('saved final figures')
return CombinedTestResult(
FigureTestResult(0, self.context, fig_frame, fig_frame_description),
FigureTestResult(0, self.context, fig_hist, fig_hist_description)
)
def capture_frame(self):
self.camera.set_prop('exposure_time', 25)
self.frame = self.camera.get_frame()
self.frame_flat = self.frame.flatten()
def calculate_histogram(self):
self.histogram_values, self.histogram_x = np.histogram(self.frame_flat, bins=range(0, self.MAX_PIXEL_VALUE))
self.histogram_x = self.histogram_x[:-1]
histogram_cumsum = np.cumsum(self.histogram_values)
histogram_sum = np.sum(self.histogram_values)
self.bottom_x = max(x
for x, val in zip(self.histogram_x, histogram_cumsum)
if val <= (self.LOW_PERCENTILE / 100) * histogram_sum)
self.top_x = min(x
for x, val in zip(self.histogram_x, histogram_cumsum)
if val >= (self.HIGH_PERCENTILE / 100) * histogram_sum)
def create_frame_figure(self) -> plt.Figure:
fig, (ax_frame, ax_frame_mod) = plt.subplots(nrows=1, ncols=2, figsize=(20, 15))
norm = mcolors.Normalize(vmin=0, vmax=self.MAX_PIXEL_VALUE)
# ~ plotting the frame as an image
ax_frame.imshow(self.frame, norm=norm)
ax_frame.set_title('Captured Frame')
# ~ plotting the frame with increased contrast
frame_mod = self.increase_frame_contrast(self.frame)
ax_frame_mod.imshow(frame_mod, norm=norm)
ax_frame_mod.set_title('Captured Frame - Increased Contrast')
return fig
def create_histogram_figure(self) -> plt.Figure:
fig, (ax_hist, ax_hist_zoom) = plt.subplots(nrows=1, ncols=2, figsize=(20, 15))
hist_bins = list(range(0, self.MAX_PIXEL_VALUE))
ax_hist.hist(self.frame_flat, bins=hist_bins)
ax_hist.set_title('Captured Frame - Histogram')
ax_hist.set_xlabel('Pixel Values')
ax_hist.set_ylabel('Occurrences')
self.force_aspect(ax_hist, aspect=0.9)
ax_hist_zoom.hist(self.frame_flat, bins=hist_bins)
ax_hist_zoom.set_title('Captured Frame - Zoomed Histogram')
ax_hist_zoom.set_xlabel('Pixel Values')
ax_hist_zoom.set_ylabel('Occurrences')
ax_hist_zoom.set_xlim([self.bottom_x, self.top_x])
self.force_aspect(ax_hist_zoom, aspect=0.9)
return fig
def increase_frame_contrast(self, frame: np.ndarray) -> np.ndarray:
low_value = self.bottom_x
high_value = self.top_x
difference = high_value - low_value
# The absence of this section caused an error before. There is quite a reasonable probability, that this
# difference is actually 0 because the image just is so homogeneous. If that is the case we do not perform
# the procedure to increase the contrast (Division by zero!) and instead return the original frame
if difference == 0:
return frame
frame_result = np.zeros(shape=self.frame.shape, dtype=np.int32)
for i in range(len(frame_result)):
for j in range(len(frame_result[i])):
new_value = (self.MAX_PIXEL_VALUE / difference) * self.frame[i][j] - \
(self.MAX_PIXEL_VALUE / difference) * low_value
frame_result[i][j] = min(new_value, self.MAX_PIXEL_VALUE)
return frame_result
@classmethod
def force_aspect(cls, ax, aspect: float = 1):
x_min, x_max = ax.get_xlim()
y_min, y_max = ax.get_ylim()
base = abs(x_max - x_min) / abs(y_max - y_min)
ax.set_aspect(aspect * base)
class FrameAcquisitionTime(AbstractTest):
"""
Acquires multiple frames to determine the average time needed to per frame acquisition.
"""
FRAME_COUNT = 25
BATCH_COUNT = 5
name = 'frame_time'
description = (
'This test case acquires multiple frames from the camera and records the time needed for this process. It '
'then calculates the average time required to fetch a frame. This is done for multiple batches and the average '
'times for all these batches is plotted. Individual errors during frame acquisition are ignored, but if more '
'than half of the frame requests fail, the test is declared as not passed.'
)
def __init__(self, test_runner: TestRunner, frame_count=FRAME_COUNT, batch_count=BATCH_COUNT):
AbstractTest.__init__(self, test_runner)
self.frame_count = frame_count
self.batch_count = batch_count
self.times = np.zeros(shape=(self.batch_count, self.frame_count), dtype=np.float32)
self.errors = np.zeros(shape=(self.batch_count, self.frame_count), dtype=np.bool)
def run(self):
# This method will actually request all the frames from the camera and save each individual time into the
# self.times matrix and also fills out the self.errors matrix to true at that location if a frame request
# results in an error. Generally, this takes some time.
self.acquire_frames()
# This test case will be declared a failure if more than half the frames could not be acquired due to error.
total_errors = np.sum(self.errors)
exit_code = bool(total_errors >= 0.5 * self.batch_count * self.frame_count)
fig = self.create_figure()
fig_description = (
f'A total of {self.batch_count} batches recorded, each consisting of {self.frame_count} independent frames. '
f'(left) The plot shows the average acquisition time in milliseconds for every batch as well as the '
f'standard deviation within that batch. (right) This bar chart shows how many frames in each batch could'
f'not be acquired due to some error.'
)
figure_result = FigureTestResult(exit_code, self.context, fig, fig_description)
return figure_result
def acquire_frames(self):
for b in range(self.batch_count):
for n in range(self.frame_count):
try:
start_time = time.time()
self.camera.get_frame()
# time is a measure in seconds, but we want to measure in milli seconds, so we multiply by 1000
total_time = time.time() - start_time
self.times[b][n] = total_time * 1000
except (PciError, FrameDecodingError) as e:
self.errors[b][n] = True
self.logger.warning(f'Batch {b}, frame {n} failed with error: {e.__class__}')
def create_figure(self):
fig, (ax_times, ax_errors) = plt.subplots(1, 2, figsize=(15, 20))
masked_times = np.ma.masked_array(self.times, self.errors)
batch_indices = np.array(range(self.batch_count))
batch_times = np.mean(masked_times, axis=1)
batch_stds = np.std(masked_times, axis=1)
# https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.errorbar.html
ax_times.plot(batch_indices, batch_times, color='blue')
ax_times.errorbar(batch_indices, batch_times, yerr=batch_stds, capsize=4.0)
ax_times.set_title('Average acquisition time per batch')
ax_times.set_ylim(0)
ax_times.set_xticks(batch_indices)
ax_times.set_ylabel('Avg. time [ms]')
ax_times.set_xlabel('Batch index')
force_aspect(ax_times, aspect=1)
# https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.bar.html
batch_errors = np.sum(self.errors, axis=1)
ax_errors.bar(batch_indices, batch_errors, color='red')
ax_errors.set_title('Failed errors per batch')
ax_errors.set_ylim([0, 10])
ax_errors.set_xlabel('Batch index')
force_aspect(ax_errors, aspect=1)
return fig
class SingleFrameStatistics(AbstractTest):
"""
Acquires a single frame from the camera and calculates some simple statistics for this frame. Such as the
mean, min and max value and the standard deviation. It also creates a histogram for the frame and that plot is
displayed in the test report.
"""
NDIGITS = 3
name = 'frame_statistics'
description = (
'Requests a single frame from the camera. This frame is then used to compute some simple statistical '
'properties. These are the mean grayscale value, the min value, the max value, the variance and the '
'standard deviation. Additionally, a histogram of the grayscale values within the camera is computed. '
'This histogram is saved as a plot and displayed in the test report. This test relies on the assumption, '
'that the camera is properly setup and ready to accept frame requests.'
)
def __init__(self, test_runner: TestRunner):
AbstractTest.__init__(self, test_runner)
def run(self):
setup_environment()
# -- ACQUIRE FRAME AS MATRIX
file_path = get_frame()
frames = import_raw(file_path, 1, self.config.get_sensor_width(), self.config.get_sensor_height())
frame = frames[0]
stats = self.create_frame_statistics(frame)
dict_result = DictTestResult(0, stats)
fig = self.create_histogram_figure(frame)
figure_description = (
'This figure shows a histogram of the pixel values within the captured frame.'
)
figure_result = FigureTestResult(0, self.context, fig, figure_description)
return CombinedTestResult(
dict_result,
figure_result
)
def create_frame_statistics(self, frame: np.ndarray) -> dict:
return {
'average': round(float(np.mean(frame)), ndigits=self.NDIGITS),
'variance': round(float(np.var(frame)), ndigits=self.NDIGITS),
'standard deviation': round(float(np.std(frame)), ndigits=self.NDIGITS),
'min value': np.min(frame),
'max value': np.max(frame)
}
@classmethod
def create_histogram_figure(cls, frame: np.ndarray) -> plt.Figure:
fig, ax = plt.subplots(nrows=1, ncols=1)
frame_data = frame.flatten()
ax.hist(frame_data, bins=list(range(0, 4096)))
ax.set_title('Histogram of frame values')
ax.set_xlabel('Pixel value')
ax.set_ylabel('Number of occurrences')
return fig
class ExposureTimeImagesTest(AbstractTest):
COLUMN_COUNT = 3
MAX_PIXEL_VALUE = 4095
EXPOSURE_TIME_VALUES = list(range(0, 101, 5))
name = 'exposure_time_images'
description = (
f'This test varies the exposure time between the following values: '
f'{", ".join(map(str, EXPOSURE_TIME_VALUES))}. For each '
f'exposure time, one frame is taken from the camera and all the resulting frames are shown in a figure. This '
f'is merely a visual indication of sorts about whether the exposure time setting works.'
)
def __init__(self, test_runner: TestRunner):
super(ExposureTimeImagesTest, self).__init__(test_runner)
self.frames = {}
def run(self):
# The idea of the test is to set the exposure time to different values and then simple show all
# the images which have been taken
for exposure_time in self.EXPOSURE_TIME_VALUES:
try:
self.camera.set_prop('exposure_time', exposure_time)
frame = self.camera.get_frame()
self.frames[exposure_time] = frame
cprint(f'Acquired frame for exp time: {exposure_time}')
except (FrameDecodingError, PciError):
self.frames[exposure_time] = None
cprint(f'Failed for exp time: {exposure_time}')
fig = self.create_frames_figure()
description = 'none'
return FigureTestResult(0, self.test_runner.context, fig, description)
def create_frames_figure(self) -> plt.Figure:
row_count = math.ceil(len(self.frames) / self.COLUMN_COUNT)
fig, rows = plt.subplots(ncols=self.COLUMN_COUNT, nrows=row_count, figsize=(20, 5 * row_count))
# "rows" is a list of lists, where each sub list contains the column Axes objects for that row. Here
# we flatten it and turn it into just a single list of Axes instances
axs = [axes for sublist in rows for axes in sublist]
norm = mcolors.Normalize(vmin=0, vmax=self.MAX_PIXEL_VALUE)
for (exposure_time, frame), ax in zip_longest(self.frames.items(), axs, fillvalue=(None, None)):
# Disable the axis ticks, not needed for images
ax.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False
)
ax.tick_params(
axis='y',
which='both',
left=False,
right=False,
labelleft=False
)
if frame is None:
fig.delaxes(ax)
else:
average = np.mean(frame)
ax.imshow(frame, norm=norm)
ax.set_title(f'Exposure time: {exposure_time} - avg: {average:0.2f}')
return fig
| [
"numpy.sum",
"numpy.mean",
"numpy.ma.masked_array",
"ufotest.testing.DictTestResult",
"ufotest.testing.CombinedTestResult",
"os.path.join",
"ufotest.testing.FigureTestResult",
"ufotest.util.setup_environment",
"matplotlib.colors.Normalize",
"numpy.std",
"numpy.cumsum",
"numpy.max",
"matplotl... | [((1958, 1998), 'ufotest.testing.AbstractTest.__init__', 'AbstractTest.__init__', (['self', 'test_runner'], {}), '(self, test_runner)\n', (1979, 1998), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((2555, 2609), 'os.path.join', 'os.path.join', (['self.context.folder_path', 'self.file_name'], {}), '(self.context.folder_path, self.file_name)\n', (2567, 2609), False, 'import os\n'), ((3074, 3093), 'ufotest.util.setup_environment', 'setup_environment', ([], {}), '()\n', (3091, 3093), False, 'from ufotest.util import setup_environment, random_string, force_aspect\n'), ((3570, 3598), 'ufotest.util.cprint', 'cprint', (['f"""created histogram"""'], {}), "(f'created histogram')\n", (3576, 3598), False, 'from ufotest.util import cprint, cresult\n'), ((4719, 4748), 'ufotest.util.cprint', 'cprint', (['"""saved final figures"""'], {}), "('saved final figures')\n", (4725, 4748), False, 'from ufotest.util import cprint, cresult\n'), ((5355, 5387), 'numpy.cumsum', 'np.cumsum', (['self.histogram_values'], {}), '(self.histogram_values)\n', (5364, 5387), True, 'import numpy as np\n'), ((5412, 5441), 'numpy.sum', 'np.sum', (['self.histogram_values'], {}), '(self.histogram_values)\n', (5418, 5441), True, 'import numpy as np\n'), ((5916, 5964), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(20, 15)'}), '(nrows=1, ncols=2, figsize=(20, 15))\n', (5928, 5964), True, 'import matplotlib.pyplot as plt\n'), ((5980, 6032), 'matplotlib.colors.Normalize', 'mcolors.Normalize', ([], {'vmin': '(0)', 'vmax': 'self.MAX_PIXEL_VALUE'}), '(vmin=0, vmax=self.MAX_PIXEL_VALUE)\n', (5997, 6032), True, 'import matplotlib.colors as mcolors\n'), ((6519, 6567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(20, 15)'}), '(nrows=1, ncols=2, figsize=(20, 15))\n', (6531, 6567), True, 'import matplotlib.pyplot as plt\n'), ((7818, 7866), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.frame.shape', 'dtype': 'np.int32'}), '(shape=self.frame.shape, dtype=np.int32)\n', (7826, 7866), True, 'import numpy as np\n'), ((9253, 9293), 'ufotest.testing.AbstractTest.__init__', 'AbstractTest.__init__', (['self', 'test_runner'], {}), '(self, test_runner)\n', (9274, 9293), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((9395, 9465), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.batch_count, self.frame_count)', 'dtype': 'np.float32'}), '(shape=(self.batch_count, self.frame_count), dtype=np.float32)\n', (9403, 9465), True, 'import numpy as np\n'), ((9488, 9555), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.batch_count, self.frame_count)', 'dtype': 'np.bool'}), '(shape=(self.batch_count, self.frame_count), dtype=np.bool)\n', (9496, 9555), True, 'import numpy as np\n'), ((10039, 10058), 'numpy.sum', 'np.sum', (['self.errors'], {}), '(self.errors)\n', (10045, 10058), True, 'import numpy as np\n'), ((10645, 10708), 'ufotest.testing.FigureTestResult', 'FigureTestResult', (['exit_code', 'self.context', 'fig', 'fig_description'], {}), '(exit_code, self.context, fig, fig_description)\n', (10661, 10708), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((11469, 11505), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(15, 20)'}), '(1, 2, figsize=(15, 20))\n', (11481, 11505), True, 'import matplotlib.pyplot as plt\n'), ((11530, 11573), 'numpy.ma.masked_array', 'np.ma.masked_array', (['self.times', 'self.errors'], {}), '(self.times, self.errors)\n', (11548, 11573), True, 'import numpy as np\n'), ((11655, 11684), 'numpy.mean', 'np.mean', (['masked_times'], {'axis': '(1)'}), '(masked_times, axis=1)\n', (11662, 11684), True, 'import numpy as np\n'), ((11706, 11734), 'numpy.std', 'np.std', (['masked_times'], {'axis': '(1)'}), '(masked_times, axis=1)\n', (11712, 11734), True, 'import numpy as np\n'), ((12202, 12234), 'ufotest.util.force_aspect', 'force_aspect', (['ax_times'], {'aspect': '(1)'}), '(ax_times, aspect=1)\n', (12214, 12234), False, 'from ufotest.util import setup_environment, random_string, force_aspect\n'), ((12338, 12365), 'numpy.sum', 'np.sum', (['self.errors'], {'axis': '(1)'}), '(self.errors, axis=1)\n', (12344, 12365), True, 'import numpy as np\n'), ((12573, 12606), 'ufotest.util.force_aspect', 'force_aspect', (['ax_errors'], {'aspect': '(1)'}), '(ax_errors, aspect=1)\n', (12585, 12606), False, 'from ufotest.util import setup_environment, random_string, force_aspect\n'), ((13607, 13647), 'ufotest.testing.AbstractTest.__init__', 'AbstractTest.__init__', (['self', 'test_runner'], {}), '(self, test_runner)\n', (13628, 13647), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((13676, 13695), 'ufotest.util.setup_environment', 'setup_environment', ([], {}), '()\n', (13693, 13695), False, 'from ufotest.util import setup_environment, random_string, force_aspect\n'), ((13754, 13765), 'ufotest.camera.get_frame', 'get_frame', ([], {}), '()\n', (13763, 13765), False, 'from ufotest.camera import save_frame, import_raw, get_frame, UfoCamera\n'), ((13974, 13998), 'ufotest.testing.DictTestResult', 'DictTestResult', (['(0)', 'stats'], {}), '(0, stats)\n', (13988, 13998), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((14205, 14263), 'ufotest.testing.FigureTestResult', 'FigureTestResult', (['(0)', 'self.context', 'fig', 'figure_description'], {}), '(0, self.context, fig, figure_description)\n', (14221, 14263), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((14280, 14326), 'ufotest.testing.CombinedTestResult', 'CombinedTestResult', (['dict_result', 'figure_result'], {}), '(dict_result, figure_result)\n', (14298, 14326), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((14965, 14995), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (14977, 14995), True, 'import matplotlib.pyplot as plt\n'), ((16683, 16746), 'ufotest.testing.FigureTestResult', 'FigureTestResult', (['(0)', 'self.test_runner.context', 'fig', 'description'], {}), '(0, self.test_runner.context, fig, description)\n', (16699, 16746), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((16886, 16973), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'self.COLUMN_COUNT', 'nrows': 'row_count', 'figsize': '(20, 5 * row_count)'}), '(ncols=self.COLUMN_COUNT, nrows=row_count, figsize=(20, 5 *\n row_count))\n', (16898, 16973), True, 'import matplotlib.pyplot as plt\n'), ((17234, 17286), 'matplotlib.colors.Normalize', 'mcolors.Normalize', ([], {'vmin': '(0)', 'vmax': 'self.MAX_PIXEL_VALUE'}), '(vmin=0, vmax=self.MAX_PIXEL_VALUE)\n', (17251, 17286), True, 'import matplotlib.colors as mcolors\n'), ((4797, 4864), 'ufotest.testing.FigureTestResult', 'FigureTestResult', (['(0)', 'self.context', 'fig_frame', 'fig_frame_description'], {}), '(0, self.context, fig_frame, fig_frame_description)\n', (4813, 4864), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((4878, 4943), 'ufotest.testing.FigureTestResult', 'FigureTestResult', (['(0)', 'self.context', 'fig_hist', 'fig_hist_description'], {}), '(0, self.context, fig_hist, fig_hist_description)\n', (4894, 4943), False, 'from ufotest.testing import AbstractTest, TestRunner, ImageTestResult, CombinedTestResult, DictTestResult, FigureTestResult\n'), ((14775, 14788), 'numpy.min', 'np.min', (['frame'], {}), '(frame)\n', (14781, 14788), True, 'import numpy as np\n'), ((14834, 14847), 'numpy.max', 'np.max', (['frame'], {}), '(frame)\n', (14840, 14847), True, 'import numpy as np\n'), ((16373, 16428), 'ufotest.util.cprint', 'cprint', (['f"""Acquired frame for exp time: {exposure_time}"""'], {}), "(f'Acquired frame for exp time: {exposure_time}')\n", (16379, 16428), False, 'from ufotest.util import cprint, cresult\n'), ((17937, 17951), 'numpy.mean', 'np.mean', (['frame'], {}), '(frame)\n', (17944, 17951), True, 'import numpy as np\n'), ((10912, 10923), 'time.time', 'time.time', ([], {}), '()\n', (10921, 10923), False, 'import time\n'), ((14501, 14515), 'numpy.mean', 'np.mean', (['frame'], {}), '(frame)\n', (14508, 14515), True, 'import numpy as np\n'), ((14597, 14610), 'numpy.var', 'np.var', (['frame'], {}), '(frame)\n', (14603, 14610), True, 'import numpy as np\n'), ((14692, 14705), 'numpy.std', 'np.std', (['frame'], {}), '(frame)\n', (14698, 14705), True, 'import numpy as np\n'), ((16547, 16594), 'ufotest.util.cprint', 'cprint', (['f"""Failed for exp time: {exposure_time}"""'], {}), "(f'Failed for exp time: {exposure_time}')\n", (16553, 16594), False, 'from ufotest.util import cprint, cresult\n'), ((11117, 11128), 'time.time', 'time.time', ([], {}), '()\n', (11126, 11128), False, 'import time\n')] |
import random
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from scipy.sparse import identity
import os
import pickle
def parallel_generate_walks(d_graph: dict, global_walk_length: int, num_walks: int, cpu_num: int,
sampling_strategy: dict = None, num_walks_key: str = None, walk_length_key: str = None,
neighbors_key: str = None, probabilities_key: str = None, first_travel_key: str = None,
quiet: bool = False) -> list:
"""
Generates the random walks which will be used as the skip-gram input.
:return: List of walks. Each walk is a list of nodes.
"""
walks = list()
if not quiet:
pbar = tqdm(total=num_walks, desc='Generating walks (CPU: {})'.format(cpu_num))
for n_walk in range(num_walks):
# Update progress bar
if not quiet:
pbar.update(1)
# Shuffle the nodes
shuffled_nodes = list(d_graph.keys())
random.shuffle(shuffled_nodes)
# Start a random walk from every node
for source in shuffled_nodes:
# Skip nodes with specific num_walks
if source in sampling_strategy and \
num_walks_key in sampling_strategy[source] and \
sampling_strategy[source][num_walks_key] <= n_walk:
continue
# Start walk
walk = [source]
# Calculate walk length
if source in sampling_strategy:
walk_length = sampling_strategy[source].get(walk_length_key, global_walk_length)
else:
walk_length = global_walk_length
# Perform walk
while len(walk) < walk_length:
walk_options = d_graph[walk[-1]].get(neighbors_key, None)
# Skip dead end nodes
if not walk_options:
break
if len(walk) == 1: # For the first step
probabilities = d_graph[walk[-1]][first_travel_key]
walk_to = np.random.choice(walk_options, size=1, p=probabilities)[0]
else:
probabilities = d_graph[walk[-1]][probabilities_key][walk[-2]]
walk_to = np.random.choice(walk_options, size=1, p=probabilities)[0]
walk.append(walk_to)
walk = list(map(str, walk)) # Convert all to strings
walks.append(walk)
if not quiet:
pbar.close()
return walks
def get_probabilities(current_node, A, node_labels_to_int, probabilities_key: str = None, p: float = 1, q: float = 1):
results = []
current_node_pos = node_labels_to_int[current_node]
int_to_node_labels = {v:k for k,v in node_labels_to_int.items()}
for source_pos in A[current_node_pos, :].nonzero()[1]:
id_mat = identity(A.shape[0]).tocsr()
unnormalized_weights = (A[current_node_pos, :] + ((1/p)-1)*(A[current_node_pos, :].multiply(A[source_pos, :])) + ((1/q)-1)*(id_mat[source_pos, :])).data
normalized_weights = unnormalized_weights / unnormalized_weights.sum()
source = int_to_node_labels[source_pos]
results.append((source, normalized_weights))
pd = (current_node, {probabilities_key: dict(results)})
return pd
def get_first_travel(node, A, node_labels_to_int, first_travel_key: str = None, p: float = 1, q: float = 1):
node_pos = node_labels_to_int[node]
ftd = (node, {first_travel_key: (A[node_pos, :] / np.sum(A[node_pos, :])).data})
#ftd[node][first_travel_key] = (A[node, :] / np.sum(A[node, :])).data
return ftd
def get_neighbors(node, A, node_labels_to_int):
node_pos = node_labels_to_int[node]
int_to_node_labels = {v:k for k,v in node_labels_to_int.items()}
neighbor_positions = list(A[node_pos, :].nonzero()[1])
neighbors = {"neighbors": [int_to_node_labels[neighbor_pos] for neighbor_pos in neighbor_positions]}
return (node, neighbors)
def get_probabilities_chunked(chunk, chunkid, output_folder, *args, quiet=True):
if not quiet:
pbar = tqdm(total=len(chunk), desc='Generating probabilities (chunk: {})'.format(chunkid), position=chunkid)
probs = []
for node in chunk:
r = get_probabilities(node, *args)
probs.append(r)
if not quiet:
pbar.update(1)
filename = "".join([str(chunkid), '.pkl'])
filename = os.path.join(output_folder, filename)
pickle.dump(probs, open( filename, "wb" ) )
if not quiet:
pbar.close()
def get_first_travel_chunked(chunk, chunkid, output_folder, *args):
pbar = tqdm(total=len(chunk), desc='Generating first travel probabilities (chunk: {})'.format(chunkid), position=chunkid)
first_travel = []
for node in chunk:
r = get_first_travel(node, *args)
first_travel.append(r)
pbar.update(1)
filename = "".join([str(chunkid), '.pkl'])
filename = os.path.join(output_folder, filename)
pickle.dump(first_travel, open( filename, "wb" ) )
pbar.close()
def get_neighbors_chunked(chunk, chunkid, output_folder, *args):
pbar = tqdm(total=len(chunk), desc='Generating neighbours (chunk: {})'.format(chunkid), position=chunkid)
neighbors = []
for node in chunk:
r = get_neighbors(node, *args)
neighbors.append(r)
pbar.update(1)
filename = "".join([str(chunkid), '.pkl'])
filename = os.path.join(output_folder, filename)
pickle.dump(neighbors, open( filename, "wb" ) )
pbar.close() | [
"numpy.sum",
"random.shuffle",
"scipy.sparse.identity",
"numpy.random.choice",
"os.path.join"
] | [((4499, 4536), 'os.path.join', 'os.path.join', (['output_folder', 'filename'], {}), '(output_folder, filename)\n', (4511, 4536), False, 'import os\n'), ((5038, 5075), 'os.path.join', 'os.path.join', (['output_folder', 'filename'], {}), '(output_folder, filename)\n', (5050, 5075), False, 'import os\n'), ((5527, 5564), 'os.path.join', 'os.path.join', (['output_folder', 'filename'], {}), '(output_folder, filename)\n', (5539, 5564), False, 'import os\n'), ((1014, 1044), 'random.shuffle', 'random.shuffle', (['shuffled_nodes'], {}), '(shuffled_nodes)\n', (1028, 1044), False, 'import random\n'), ((2900, 2920), 'scipy.sparse.identity', 'identity', (['A.shape[0]'], {}), '(A.shape[0])\n', (2908, 2920), False, 'from scipy.sparse import identity\n'), ((3560, 3582), 'numpy.sum', 'np.sum', (['A[node_pos, :]'], {}), '(A[node_pos, :])\n', (3566, 3582), True, 'import numpy as np\n'), ((2102, 2157), 'numpy.random.choice', 'np.random.choice', (['walk_options'], {'size': '(1)', 'p': 'probabilities'}), '(walk_options, size=1, p=probabilities)\n', (2118, 2157), True, 'import numpy as np\n'), ((2296, 2351), 'numpy.random.choice', 'np.random.choice', (['walk_options'], {'size': '(1)', 'p': 'probabilities'}), '(walk_options, size=1, p=probabilities)\n', (2312, 2351), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
from typing import Dict, Callable, List, Optional, Tuple
import numpy as np
import matplotlib.pyplot as plt
from time import time
try:
import sklearn.cluster
except ModuleNotFoundError as e:
print(e)
try:
import pyclustering.cluster.kmedoids
import pyclustering.cluster.kmedians
except ModuleNotFoundError as e:
print(e)
try:
import kmodes.kprototypes
except ModuleNotFoundError as e:
print(e)
try:
import sklearn_extra.cluster
except ModuleNotFoundError as e:
print(e)
from stochoptim.scenclust.cost_space_partition import CostSpaceScenarioPartitioning
from stochoptim.scengen.scenario_tree import ScenarioTree
from stochoptim.scengen.decision_process import DecisionProcess
from stochoptim.stochprob.stochastic_problem_basis import StochasticProblemBasis
from stochoptim.stochprob.stochastic_solution_basis import StochasticSolutionBasis
class ScenarioClustering:
"""
Class that wraps together several algorithms to perform scenario reduction on a set of (equal-weight) scenarios
for a two-stage stochastic problem (e.g., K-means, K-medoids, Cost-Space-Scenario-Clustering, Monte Carlo, etc.).
"""
def __init__(self,
scenario_tree: ScenarioTree,
stochastic_problem: StochasticProblemBasis,
is_categorical: Optional[np.ndarray] = None,
opport_cost_matrix: Optional[np.ndarray] = None,
reference_solution: Optional[StochasticSolutionBasis] = None,
load_one_hot_scenarios: bool = False):
"""
Arguments:
----------
scenario_tree: instance of ScenarioTree
The uncertainty representation of the stochastic problem.
stochastic_problem: subclass of StochasticProblemBasis
The two-stage stochastic problem.
is_categorical: 1d-array of shape (n_features,) or None (default: None)
(where `n_features` is the number of features in one scenario.)
Array has True at pos i if the i-th feature is a categorical variable; else False.
opportunity_cost_matrix: 2d-array of shape (n_scenarios, n_scenarios) or None (default: None)
(where `n_scenarios` is the number of scenarios in the scenario tree, i.e., the number of child nodes)
This matrix is used for the Cost Space Scenario Clustering (CSSC) algorithm.
If not provided, the algorithm is not available but the other clustering methods still are.
reference_solution: instance of StochasticSolutionBasis (or subclass) or None (default: None)
A solution of the stochastic problem that will served as reference to compute the error gap.
load_one_hot_scenarios: bool (default: False)
If True and if the scenarios have some categorical features, the scenarios of one-hot representation of these
features is created. It may then be used by K-means, K-medoids, K-medians, etc. to perform clustering on
categorical variables. (Note that these scenarios will be much larger than the original because of the one-hot
representation.)
"""
# scenarios
self._scenario_tree = scenario_tree
self._scenarios = self._scenario_tree.to_numpy()
self._n_scenarios = len(self._scenarios)
self._n_features = self._scenarios.shape[1]
# categorical features
self._is_categorical = is_categorical
self._unique_cat = set()
self._n_unique_cat = None
self._n_num_features = None
self._n_cat_features = None
self._scen_num_bin = None
# stochastic problem
self._stochastic_problem = stochastic_problem
self._opport_cost_matrix = opport_cost_matrix
self._reference_solution = reference_solution
self._map_rvar_to_nb = self._stochastic_problem.map_rvar_name_to_nb[1]
# clustering methods
self._methods_type_a = ["CSSC", "Kmedoids", "MC"] # methods with representatives
self._methods_type_b = ["Kmeans", "Kmedians"] # methods without representatives
# Kprototypes only available if array `is_categorical` is provided
if self._is_categorical is not None:
self._methods_type_b.append("Kprototypes")
self._methods_available = self._methods_type_a + self._methods_type_b
self._results = {}
self._solutions = {}
self._start = None
self._set_categorical_attributes(load_one_hot_scenarios)
self._check_sanity()
def _set_categorical_attributes(self, load_one_hot_scenarios):
if self._is_categorical is not None:
# check that categories are all integers
unique_cat = np.unique(self._scenarios[:, self._is_categorical])
assert (unique_cat.astype('int') == unique_cat).all(), "Some categories are not integers."
# set unique categories
self._unique_cat = set(int(cat) for cat in unique_cat)
self._n_unique_cat = len(self._unique_cat)
# numerical features
scen_num = self._scenarios[:, ~self._is_categorical]
self._n_num_features = scen_num.shape[1]
# categorical categorical
scen_cat = self._scenarios[:, self._is_categorical]
self._n_cat_features = scen_cat.shape[1]
if load_one_hot_scenarios:
# categorical to binary with more features (one-hot encoding)
scen_bin_3d = {cat: np.ones((self._n_scenarios, self._n_cat_features)) for cat in self._unique_cat}
scen_bin_3d = {cat: (scen_bin_3d[cat] == scen_cat).astype('int') for cat in self._unique_cat}
scen_bin = np.concatenate([scen_bin_3d[cat] for cat in self._unique_cat], axis=1)
self._scen_num_bin = np.concatenate([scen_num, scen_bin], axis=1)
else:
self._n_unique_cat = 0
self._n_num_features = self._n_features
self._n_cat_features = 0
# --- Properties ---
@property
def results(self):
return self._results
@property
def solutions(self):
return self._solutions
@property
def scenarios(self):
return self._scenarios
@property
def n_features(self):
"""Total number of features in a scenario (i.e., number of parameters included in a scenario)."""
return self._n_features
@property
def n_runs(self) -> Dict[str, Dict[int, int]]:
return {method: {card: self.get_n_runs(method, card) for card in self.cardinalities(method)}
for method in self._methods_available}
def cardinalities(self, method):
return sorted([card for meth, card in self._results.keys() if meth == method])
def n_random_features(self, method=None, cardinality=None, index_sample=0):
"""Number of actually random features in the original set of scenarios. A feature is random if it
has a standard deviation > 0."""
n_rand_features = lambda scenarios: len((np.max(scenarios, axis=0) - np.min(scenarios, axis=0)).nonzero()[0])
if method is None:
return n_rand_features(self._scenarios)
else:
return n_rand_features(self.get_scenarios(method, cardinality, index_sample))
# --- Clustering methods ---
def run_cssc(self,
cardinality,
is_cardinality_fixed=True,
solve_mip=True,
n_random_warmstarts=10**3,
timelimit=None,
**kwargs):
"""Scenario clustering by the Cost Space Scenario Clustering (CSSC) method (implemented in the module
`cost_space_partition`).
Arguments:
----------
cardinality: int >= 1
The number of clusters in the partition.
solve_mip: bool (default: True)
If True, the CSSC problem is tackled via the MIP formulation solved using an exact solver.
If False, the CSSC problem is tackled by sampling partitions randomly and picking the best one (in that
case the `warmstart` argument must be >= 1).
n_random_warmstarts: int >= 0 (default: 10**3)
Number of randomly generated partitions out of which the one with the lowest clustering score will
be picked as the best partition or used to initialize the MIP formulation.
timelimit: int >= 1 or None (default: None)
Time limit for the partioning problem. If None, no limit is set.
kwargs:
-------
logfile: bool (default: True)
If True, the solver's log is saved in a file. This file is located in a folder 'logfiles' in the
current directory (the folder is created if it doesn't already exist).
warmstart_partition: tuple of tuples of int, or None (default: None)
Must be None if warmstart is not None.
If provided, this partition is used as warmstart (along with the representatives).
lowerbound: int >= 1 (default: 1)
The minimum number of elements in each subset of the randomly generated warmstart partitions.
uniform_sampling: bool (default: False)
If True, the partitions are sampled from the uniform distribution. However, this is much slower.
If False, a non-uniform distribution is sampled (this distribution still has a positive probability
of selecting any partition).
"""
assert self._opport_cost_matrix is not None, "Provide the opportunity cost matrix to run the CSSC algorithm."
self._check_opportunity_cost_matrix()
assert solve_mip or n_random_warmstarts >= 1, "`n_random_warmstarts` should be >= 1 if solve_mip=False"
self.cssc = CostSpaceScenarioPartitioning(self._opport_cost_matrix)
self._start = time()
if solve_mip:
self.cssc.solve_mip(cardinality, is_cardinality_fixed, timelimit, n_random_warmstarts, **kwargs)
representatives = self.cssc.solution_mip['representatives']
partition = self.cssc.solution_mip['partition']
score = self.cssc.solution_mip['score']
else:
self.cssc.best_random_partition(cardinality, n_random_warmstarts, **kwargs)
representatives = self.cssc.solution_random['representatives']
partition = self.cssc.solution_random['partition']
score = self.cssc.solution_random['score']
self._set_results('CSSC', len(representatives), representatives, partition, None, score)
def run_kmeans(self, cardinality, **kwargs):
"""Scenario clustering by Kmeans method."""
self._start = time()
self.kmeans = sklearn.cluster.KMeans(n_clusters=cardinality, **kwargs)
if self._n_unique_cat > 0: # if some categorical features, clustering done on one-hot encoding of features
self.kmeans.fit(self._scen_num_bin)
num_centroids = self.kmeans.cluster_centers_[:, :self._n_num_features]
bin_centroids = self.kmeans.cluster_centers_[:, -(self._n_cat_features * self._n_unique_cat):]
cat_centroids = self._one_hot_centroids_to_cat(bin_centroids)
self.kmeans.scenarios = np.zeros((cardinality, self.n_features))
self.kmeans.scenarios[:, ~self._is_categorical] = num_centroids
self.kmeans.scenarios[:, self._is_categorical] = cat_centroids
else:
self.kmeans.fit(self._scenarios)
self.kmeans.scenarios = np.array(self.kmeans.cluster_centers_)
self.kmeans.partition = tuple(tuple((self.kmeans.labels_ == i).nonzero()[0]) for i in range(cardinality))
self._set_results('Kmeans', cardinality, None, self.kmeans.partition,
self.kmeans.scenarios, self.kmeans.inertia_)
def run_kmedians(self, cardinality, **kwargs):
"""Scenario clustering by the Kmedian algorithm of 'pyclustering'"""
self._start = time()
random_indices = np.random.choice(range(self._n_scenarios), size=cardinality, replace=False)
if self._n_unique_cat > 0: # if some categorical features, clustering done on one-hot encoding of features
initial_medians = self._scen_num_bin[random_indices]
self.kmedians = pyclustering.cluster.kmedians.kmedians(self._scen_num_bin, initial_medians, **kwargs)
self.kmedians.process()
num_centroids = np.array(self.kmedians.get_medians())[:, :self._n_num_features]
bin_centroids = np.array(self.kmedians.get_medians())[:, -(self._n_cat_features * self._n_unique_cat):]
cat_centroids = self._one_hot_centroids_to_cat(bin_centroids)
self.kmedians.scenarios = np.zeros((cardinality, self.n_features))
self.kmedians.scenarios[:, ~self._is_categorical] = num_centroids
self.kmedians.scenarios[:, self._is_categorical] = cat_centroids
else:
initial_medians = self._scenarios[random_indices]
self.kmedians = pyclustering.cluster.kmedians.kmedians(self._scenarios, initial_medians, **kwargs)
self.kmedians.process()
self.kmedians.scenarios = np.array(self.kmedians.get_medians())
self.kmedians.partition = tuple(tuple(part) for part in self.kmedians.get_clusters())
self.kmedians.score = self.kmedians.get_total_wce()
self._set_results('Kmedians', cardinality, None, self.kmedians.partition,
self.kmedians.scenarios, self.kmedians.score)
def run_kmedoids(self, cardinality, which_kmedoids='pyclustering', **kwargs):
"""Scenario clustering by the Kmedoids algorithm of the 'pyclustering' or 'sklearn_extra' library.
Arguments:
----------
which_kmedoids: {'pyclustering', 'sklearn_extra'}
"""
param = {**{'which_kmedoids': which_kmedoids}, **kwargs}
self._start = time()
if which_kmedoids == 'pyclustering':
initial_medoids = list(np.random.choice(range(self._n_scenarios), size=cardinality, replace=False))
if self._n_unique_cat > 0: # if some categorical features, clustering done on one-hot encoding of features
self.kmedoids = pyclustering.cluster.kmedoids.kmedoids(self._scen_num_bin, initial_medoids, **kwargs)
else:
self.kmedoids = pyclustering.cluster.kmedoids.kmedoids(self._scenarios, initial_medoids, **kwargs)
self.kmedoids.process()
self.kmedoids.score = None
self.kmedoids.partition = tuple([tuple(part) for part in self.kmedoids.get_clusters()])
self.kmedoids.representatives = tuple(self.kmedoids.get_medoids())
elif which_kmedoids == 'sklearn_extra':
self.kmedoids = sklearn_extra.cluster.KMedoids(n_clusters=cardinality, **kwargs)
if self._n_unique_cat > 0: # if some categorical features, clustering done on one-hot encoding of features
self.kmedoids.fit(self._scen_num_bin)
else:
self.kmedoids.fit(self._scenarios)
self.kmedoids.score = self.kmedoids.inertia_
self.kmedoids.representatives = tuple(self.kmedoids.medoid_indices_)
self.kmedoids.partition = tuple(tuple((self.kmedoids.labels_ == i).nonzero()[0]) for i in range(cardinality))
else:
raise ValueError("Wrong `which_kmedoids` argument: should be 'pyclustering' or 'sklearn_extra', "
f"not {which_kmedoids}")
self._set_results('Kmedoids', cardinality, self.kmedoids.representatives, self.kmedoids.partition,
score=self.kmedoids.score, param=param)
def run_kprototypes(self, cardinality, init='Huang', **kwargs):
"""Scenario clustering by Kprototypes method.
init: {'Huang', 'Cao', 'random'} (default: 'Huang')
If 'random', both the centroids of the numerical and categorical variables are initialized by picking a
scenario randomly.
If not 'random', the categorical centroids are initialized by 'Huang' or 'Cao' and the numerical by
k-means++.
"""
assert self._is_categorical is not None, \
"Provide the mask on the categorical variables using `is_categorical`"
self._start = time()
if self._n_cat_features < self.n_features: # use Kprototype
self.kprot = kmodes.kprototypes.KPrototypes(n_clusters=cardinality, init=init, **kwargs)
categorical_indices = self._is_categorical.nonzero()[0]
self.kprot.fit(self._scenarios, categorical=list(categorical_indices))
self.kprot.partition = tuple(tuple((self.kprot.labels_ == i).nonzero()[0]) for i in range(cardinality))
self.kprot.scenarios = -np.ones((cardinality, self._scenarios.shape[1]))
self.kprot.scenarios[:, ~self._is_categorical] = self.kprot.cluster_centroids_[0]
self.kprot.scenarios[:, self._is_categorical] = self.kprot.cluster_centroids_[1]
else: # use Kmodes instead of Kprototype
self.kprot = kmodes.kmodes.KModes(n_clusters=cardinality, init=init, **kwargs)
self.kprot.fit(self._scenarios)
self.kprot.scenarios = np.array(self.kprot.cluster_centroids_)
self.kprot.partition = tuple(tuple((self.kprot.labels_ == i).nonzero()[0]) for i in range(cardinality))
self._set_results('Kprototypes', cardinality, None, self.kprot.partition,
self.kprot.scenarios, self.kprot.cost_)
def run_monte_carlo(self, cardinality):
"""Random scenario clustering by Monte Carlo method."""
self._start = time()
self.mc_representatives = np.random.choice(range(len(self._scenarios)), size=cardinality, replace=False)
self._set_results('MC', cardinality, tuple(self.mc_representatives))
# --- Clustering results ---
def get_n_runs(self, method, cardinality):
"""Return the number of clustering samples performed"""
return len(self._results.get((method, cardinality), []))
def get_scenarios(self, method, cardinality, index_sample=0):
try:
if method in self._methods_type_a:
return np.array([self._scenarios[rep] for rep in self.get_representatives(method, cardinality,
index_sample)])
elif method in self._methods_type_b:
return self._results[method, cardinality][index_sample]['scenarios']
except (KeyError, IndexError):
return None
def get_weights(self, method, cardinality, index_sample=0):
try:
return self._results[method, cardinality][index_sample]['weights']
except (KeyError, IndexError):
return None
def get_representatives(self, method, cardinality, index_sample=0):
try:
return self._results[method, cardinality][index_sample]['reps']
except (KeyError, IndexError):
return None
def get_partition(self, method, cardinality, index_sample=0):
try:
return self._results[method, cardinality][index_sample]['partition']
except (KeyError, IndexError):
return None
def get_clust_time(self, method, cardinality, index_sample=0):
try:
return self._results[method, cardinality][index_sample]['time']
except (KeyError, IndexError):
return None
def get_score(self, method, cardinality, index_sample=0):
try:
return self._results[method, cardinality][index_sample]['score']
except (KeyError, IndexError):
return None
def get_param(self, method, cardinality, index_sample=0):
try:
return self._results[method, cardinality][index_sample]['param']
except (KeyError, IndexError):
return dict()
def _set_results(self, method, cardinality, representatives, partition=None, scenarios=None, score=None, param=None):
if self.get_n_runs(method, cardinality) == 0:
self._results[method, cardinality] = []
# check param
assert isinstance(param, dict) or param is None, f"`param` must be a dictionary or None, not {param}"
param = param if param is not None else dict()
if method in self._methods_type_a:
self._set_results_type_a(method, cardinality, representatives, partition, score, param)
elif method in self._methods_type_b:
self._set_results_type_b(method, cardinality, partition, scenarios, score, param)
else:
raise ValueError(f"{method} is neither of type 'a' nor of type 'b'.")
def _set_results_type_a(self, method, cardinality, representatives, partition, score, param):
"""Set results for methods of type 'a', i.e., those with representatives (e.g., CSSC, Kmedoids, MC)."""
if partition is None: # equal weights
self._results[method, cardinality].append({'reps': representatives,
'weights': np.ones((cardinality,)) / cardinality,
'partition': None,
'score': score,
'param': param,
'time': time() - self._start})
else:
self._results[method, cardinality].append({'reps': representatives,
'weights': np.array([len(part) / self._n_scenarios
for part in partition]),
'partition': partition,
'score': score,
'param': param,
'time': time() - self._start})
def _set_results_type_b(self, method, cardinality, partition, scenarios, score, param):
"""Set results for methods of type 'b', i.e., those with partition but no representatives (e.g., Kmeans)"""
self._results[method, cardinality].append({'scenarios': scenarios,
'weights': np.array([len(part) / self._n_scenarios
for part in partition]),
'partition': partition,
'score': score,
'param': param,
'time': time() - self._start})
def _one_hot_centroids_to_cat(self, binary_centroids: np.ndarray):
"""
Argument:
---------
binary_centroids: 2d-array of shape (cardinality, n_cat * n_cat_features)
Centroids of the one-hot encoding of categories (with continuous values inside the interval [0, 1])
Returns:
--------
cat_centroids: 2d-array of shape (cardinality, n_cat_feeatures)
Centroids of the categories (with values in `unique_cat`)
"""
cardinality = binary_centroids.shape[0]
assert binary_centroids.shape[1] == self._n_unique_cat * self._n_cat_features, \
(f"Wrong shape for binary centroids, should be ({cardinality}, {self._n_unique_cat * self._n_cat_features}), not "
f"{binary_centroids.shape}")
bin_centroids_3d = np.zeros((self._n_unique_cat, cardinality, self._n_cat_features))
for k, cat in enumerate(self._unique_cat):
bin_centroids_3d[k] = binary_centroids[:, :self._n_cat_features]
binary_centroids = binary_centroids[:, self._n_cat_features:]
cat_centroids = bin_centroids_3d.argmax(axis=0)
# turn category index (k) to actual category (cat)
for k, cat in enumerate(self._unique_cat):
cat_centroids[(cat_centroids == k)] = cat
return cat_centroids
def delete_result(self, method, cardinality, indices_sample):
"""Delete all results that are associated to a specific clustering method, cardinality and sample index.
This deletes the results of the clustering instance as well as all the stochastic problem solutions
obtained for that instance.
Arguments:
----------
indices_sample: int or list of ints
The sample index (or indices) to be deleted.
"""
if isinstance(indices_sample, int):
indices_sample = [indices_sample]
assert len(set(indices_sample)) == len(indices_sample), "Each index should appear only once."
for index in reversed(sorted(indices_sample)): # remove from larger to lower
# delete from results attribute
self._results[method, cardinality].pop(index)
# delete from solution attribute
self._solutions.pop((method, cardinality, index), None)
def delete_solution(self, method, cardinality, index_sample, timelimit):
"""Delete the solution associated to a specific method, cardinality, sample index and time limit.
Arguments:
----------
timelimit: int/float or 2-tuple of ints/floats
"""
# delete solution with this timelimit (whether it is a clustered or implementation solution)
self._solutions.get((method, cardinality, index_sample), {}).pop(timelimit, None)
# if clustered solution, then delete also the implementation solution obtained from it
if isinstance(timelimit, (float, int)):
for tlimit in list(self._solutions.get((method, cardinality, index_sample), {}).keys()):
if isinstance(tlimit, tuple) and tlimit[0] == timelimit:
self._solutions[method, cardinality, index_sample].pop(tlimit, None)
# --- Solve stochastic problems ---
def inner_solve(self,
cardinality: int,
methods: Optional[List[str]] = None,
indices_sample: Optional[Dict[str, List[int]]] = None,
timelimit_opt: Optional[int] = None,
timelimit_eval: Optional[int] = None,
logfile_opt: Optional[str] = None,
logfile_eval: Optional[str] = None,
**kwargs):
"""Solve the stochastic problem over the clustered set of scenarios and evaluate the output decisions.
All solutions are done via the `.solve` method of the problem; if this method does not fit the needs, check
the `outer_solve` where one can implement their own 'optimizer' and 'evaluator' function.
Arguments:
----------
cardinality: int
The cardinality of the clustering for which the stochatic problem will be solved.
methods: list of str or None (default: None)
The clustering methods for which the stochastic problem will be solved.
If None, all methods are solved.
indices_sample: Dict[str, List[int]] or None (default: None)
Mapping from the method to the indices of samples for which the stochastic problem will be solved.
If None, all samples are solved.
timelimit_opt: int >= 1 or None (default: None)
Time limit to solve the proxy problem with the clustered scenarios.
timelimit_eval: int >= 1 or None (default: None)
Time limit to solve the problem that evaluates the solution of the clustered problem.
logfile_opt: str or None (default: "")
This string is appended to the log filename of the optimizer. This file is located in a folder 'logfiles'
in the current directory (the folder is created if it doesn't already exist).
If None, no log is available.
logfile_eval: str or None (default: "")
This string is appended to the log filename of the evaluator. This file is located in a folder 'logfiles'
in the current directory (the folder is created if it doesn't already exist).
If None, no log is available.
kwargs:
-------
all kwargs of StochasticProblemBasis.solve() except:
`timelimit`, `logfile`, `clear_between_trees`
"""
methods = self._check_and_set_methods(methods)
indices_sample = self._check_and_set_samples(indices_sample, methods, cardinality)
method_samples = [(method, index) for method in methods for index in indices_sample[method]]
# create list of reduced scenarios and weights using all the methods
scenarios_sets = [self.get_scenarios(method, cardinality, index) for method, index in method_samples]
weights_sets = [self.get_weights(method, cardinality, index) for method, index in method_samples]
scenario_trees = [ScenarioTree.twostage_from_scenarios(scenarios, self._map_rvar_to_nb, weights)
for scenarios, weights in zip(scenarios_sets, weights_sets)]
# solve the stochastic problem on the reduced sets
solutions = self._stochastic_problem.solve(*scenario_trees,
timelimit=timelimit_opt,
clear_between_trees=True,
logfile=logfile_opt,
fill_scenario_tree=True,
**kwargs)
if len(scenario_trees) == 1:
solutions = [solutions]
for i, (method, index) in enumerate(method_samples):
key = (method, cardinality, index)
if self._solutions.get(key) is None:
self._solutions[key] = {}
self._solutions[key][timelimit_opt] = solutions[i]
# get the implementation solution
for method, index in method_samples:
key = (method, cardinality, index)
decision_process = DecisionProcess(self._stochastic_problem.map_dvar_to_index,
{0: self._solutions[key][timelimit_opt].x0})
self._solutions[key][(timelimit_opt, timelimit_eval)] = \
self._stochastic_problem.solve(self._scenario_tree,
decision_process=decision_process,
timelimit=timelimit_eval,
logfile=logfile_eval,
**kwargs)
def outer_solve(self,
cardinality: int,
optimize_fct: Callable[[ScenarioTree], Tuple[StochasticSolutionBasis, DecisionProcess, int]],
evaluate_fct: Callable[[ScenarioTree, DecisionProcess], Tuple[StochasticSolutionBasis, int]],
methods: Optional[List[str]] = None,
indices_sample: Optional[Dict[str, List[int]]] = None,
**kwargs):
"""Solve the stochastic problem over the clustered set of scenarios and evaluate the solution using the optimizer
and evaluator functions provided as input.
Arguments:
----------
cardinality: int
The cardinality of the clustering for which the stochatic problem will be solved.
optimize_fct: The optimizer function that takes a scenario tree (ScenarioTree) and
returns a solution to the stochastic problem (StochasticSolutionBasis), the decision process
(DecisionProcess) to be evaluated in the next step, and the time (int) it took to solve the problem.
evaluate_fct: The evaluator function that takes a scenario tree (ScenarioTree), a decision process
(DecisionProcess), and returns a solution to the problem (StochasticSolutionBasis) with
the time (int) it took to solve the problem.
methods: list of str or None (default: None)
The clustering methods for which the stochastic problem will be solved.
If None, all methods are solved.
indices_sample: Dict[str, List[int]] or None (default: None)
Mapping from the method to the indices of samples for which the stochastic problem will be solved.
If None, all samples are solved.
"""
methods = self._check_and_set_methods(methods)
indices_sample = self._check_and_set_samples(indices_sample, methods, cardinality)
for method in methods:
for index_sample in indices_sample[method]:
key = (method, cardinality, index_sample)
if self._solutions.get(key) is None:
self._solutions[key] = {}
scenarios = self.get_scenarios(*key)
weights = self.get_weights(*key)
scenario_tree = ScenarioTree.twostage_from_scenarios(scenarios, self._map_rvar_to_nb, weights)
sol_opt, dec_pro, time_opt = optimize_fct(scenario_tree)
sol_eval, time_eval = evaluate_fct(self._scenario_tree, dec_pro)
self._solutions[key][time_opt] = sol_opt
self._solutions[key][(time_opt, time_eval)] = sol_eval
def _check_and_set_samples(self, indices_sample, methods, cardinality):
"""Check and set indices_sample input"""
if indices_sample is None:
indices_sample = {method: range(self.get_n_runs(method, cardinality)) for method in methods}
else:
# check methods name
assert set(indices_sample.keys()).issubset(set(self._methods_available)), \
f"Methods {set(methods).difference(set(self._methods_available))} in `indices_sample` do not exist."
# check indices sample
for method in methods:
all_samples = range(self.get_n_runs(method, cardinality))
if indices_sample.get(method) is not None:
assert set(indices_sample[method]).issubset(set(all_samples)), \
(f"Sample indices {set(indices_sample[method]).difference(set(all_samples))} do not exist "
f"for {method} and cardinality {cardinality}.")
else:
indices_sample[method] = all_samples
return indices_sample
def _check_and_set_methods(self, methods):
"""Check and set methods input"""
if methods is None:
methods = self._methods_available
else:
assert isinstance(methods, list), f"`methods` must be of type list, not {type(methods)}."
# check methods name
assert set(methods).issubset(set(self._methods_available)), \
f"Methods {set(methods).difference(set(self._methods_available))} do not exist."
return methods
def get_opt_solution(self, method, cardinality, index_sample=None, timelimit=None):
try:
if index_sample is None:
index_sample = 0
solutions = self._solutions[method, cardinality, index_sample]
if timelimit is None:
timelimit = [t for t in solutions.keys() if isinstance(t, (int, float)) or t is None][0]
return solutions[timelimit]
except (KeyError, IndexError):
return None
def get_eval_solution(self, method, cardinality, index_sample=None, timelimit_tuple=None):
try:
if index_sample is None:
index_sample = 0
solutions = self._solutions[method, cardinality, index_sample]
if timelimit_tuple is None:
timelimit_tuple = [t for t in solutions.keys() if isinstance(t, tuple)][0]
return solutions[timelimit_tuple]
except (KeyError, IndexError):
return None
def get_gap(self, method, cardinality, index_sample=None, timelimit_tuple=None):
"""Return the gap (in %) from the reference solution"""
try:
v_ref = self._reference_solution.objective_value
v_sol = self.get_eval_solution(method, cardinality, index_sample, timelimit_tuple).objective_value
gap = round(100 * (v_ref - v_sol) / (10**-10 + abs(v_ref)), 5)
return gap if self._stochastic_problem.objective_sense == 'max' else -gap
except (AttributeError, KeyError):
return None
# --- Display Results ---
def plot_method(self,
method: str,
to_show: str = 'obj',
samples: Optional[Dict[int, List[int]]] = None,
card_fct: Callable[[int], bool] = lambda card: True,
x_in_percent: bool = False,
show_mean=False,
title: Optional[str] = None,
figsize=(7,5),
ax=None):
"""
samples: mapping between the cardinality and the list of sample indices to be plotted.
to_show: {'gap', 'obj', 'clust-time', 'solve-time'}
"""
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if samples is None:
samples = {card: range(n_sample) for card, n_sample in self.n_runs[method].items()}
if show_mean:
y_list, card_list = [], []
for card in sorted(list(samples.keys())):
if not card_fct(card):
continue
if show_mean:
y_list.append([])
card_list.append(card)
for index in samples[card]:
key = (method, card, index)
if self._solutions.get(key) is None:
continue
timelimits = self._solutions[key].keys()
timelimits_tuple = [time for time in timelimits if isinstance(time, tuple)]
for timelimit in timelimits_tuple:
# pick what to display
if to_show == 'gap':
assert self._reference_solution is not None, \
"Impossible to plot the gap if the reference solution is not provided"
y = self.get_gap(method, card, index, timelimit)
ax.axhline(0)
elif to_show == 'obj':
y = self._solutions[key][timelimit].objective_value
if self._reference_solution is not None:
ax.axhline(self._reference_solution.objective_value)
elif to_show == 'clust-time':
y = self.get_clust_time(*key)
elif to_show == 'solve-time':
y = self._solutions[key][timelimit[0]].scenario_tree.data['time']
if self._reference_solution is not None:
ax.axhline(self._reference_solution.scenario_tree.data['time'])
else:
raise NotImplementedError("Wrong `to_show` argument: should be 'gap', 'obj', 'clust-time', "
f"or 'solve-time', not {to_show}")
if x_in_percent:
ax.scatter(100 * card / self._n_scenarios, y, marker="x")
else:
ax.scatter(card, y, marker="x")
if show_mean:
y_list[-1].append(y)
if show_mean:
ax.plot(card_list, [np.mean(ys) for ys in y_list])
if x_in_percent:
ax.set_xlabel("cardinality (%)")
else:
ax.set_xlabel("cardinality")
ax.set_ylabel(to_show)
if title:
ax.set_title(title)
else:
ax.set_title(f"{method}")
return ax
def plot_results(self, methods=None, sharey=True, sharex=False, figsize=(15,3), **kwargs):
methods = self._methods_available if methods is None else methods
methods_with_data = [method for method in methods if self.n_runs[method] != dict()]
fig, ax = plt.subplots(1, len(methods_with_data), figsize=figsize, sharey=sharey, sharex=sharex)
if len(methods_with_data) == 1:
self.plot_method(methods_with_data[0], ax=ax, **kwargs)
else:
for i, method in enumerate(methods_with_data):
self.plot_method(method, ax=ax[i], **kwargs)
return ax
# --- Save and load results ----
def to_file(self, wdir, tree_extension='pickle', show_files=True, remove_tree_keys=None):
"""Save the clustering instance (clustering results and clustered solution of the stochastic problem).
Argument:
---------
wdir: string
tree_extension: {'txt', 'pickle'} (default: 'pickle')
show_file: bool (default: True)
remove_tree_keys: List[str] or None (default: None)
Data keys that will not be saved in the scenario tree in addition to those that are not saved by default
(see `_save_tree()`).
"""
# Create folder if doesn't exist
if not os.path.exists(wdir):
os.mkdir(wdir)
else:
print(f"Directory already exists. Files may be overwritten.")
if show_files:
print(f"These files have been saved at {wdir}:")
# (1) Save string representation just for readable information
filename = "clustering summary.txt"
with open(wdir + filename, "w") as f:
f.write(self.__repr__())
if show_files:
print(f" {filename}")
# (2) Save result dictionary
import pickle
filename = "results.pickle"
with open(wdir + filename, "wb") as f:
pickle.dump(self._results, f)
if show_files:
print(f" {filename}")
# (3) Save scenario trees
for method, cardinality, index_sample in self._solutions.keys():
key = (method, cardinality, index_sample)
for timelimit in self._solutions[key].keys():
filename = self._save_tree(wdir, tree_extension, method, cardinality, index_sample,
timelimit, remove_tree_keys)
if show_files:
print(f" {filename}")
def _save_tree(self, wdir, tree_extension, method, cardinality, index_sample, timelimit, remove_tree_keys):
if remove_tree_keys is None:
remove_tree_keys = []
key = (method, cardinality, index_sample)
if isinstance(timelimit, (int, float)) or timelimit is None: # tree from clustered problem
scenario_tree = self._solutions[key][timelimit].scenario_tree
filename = f"tree_clust-{method}-K{cardinality}-#{index_sample}-{timelimit}sec"
scenario_tree.to_file(wdir + filename, 'txt',
without_keys=['scenario', 'memory', 'decision', 'W', 'w'] + remove_tree_keys)
elif isinstance(timelimit, tuple): # tree from implementation problem
scenario_tree = self._solutions[key][timelimit].scenario_tree
filename = f"tree_impl-{method}-K{cardinality}-#{index_sample}-{timelimit}sec"
scenario_tree.to_file(wdir + filename, tree_extension,
without_keys=['scenario', 'memory'] + remove_tree_keys)
else:
raise TypeError(f"Wrong type for for `timelimit`. Should be int, float or tuple, not {type(timelimit)}")
return filename
def from_file(self, wdir, tree_extension='pickle', show_files=False):
"""Load the clustering results.
Specifically, this builds the `results` and `solution` attributes of the instance. This is done by
loading the 'results.pickle' file and all the scenario-tree files saved in the working directory.
Arguments:
---------
wdir: string
Path to the working directory that contains all the files.
tree_extension: {'txt', 'pickle'} (default: 'pickle')
Format in which the scenario trees of the implementation problems are saved.
(Note that the format for the scenario trees of the clustered problems are always 'txt'.)
"""
from os import listdir
from os.path import isfile, join
import pickle
# (1) Load result dictionary
loaded_files = ["results.pickle"]
try:
with open(wdir + "results.pickle", "rb") as f:
self._results = pickle.load(f)
if show_files:
print(f"These files have been loaded from {wdir}: \n results.pickle\n")
except FileNotFoundError:
print(f"File '{wdir}results.pickle' not found. Impossible to load the clustering results.")
return
# (2) Load scenario-tree solution
files_in_wdir = [f for f in listdir(wdir) if isfile(join(wdir, f))] # get all file in working directory
for file in files_in_wdir:
# if file doesn't contain a clustered or implementation scenario tree
if 'tree_clust' not in file and 'tree_impl' not in file:
continue
loaded_files.append(file)
# load tree
filename = file.split('.')[0] # remove extension
extension = 'txt' if 'clust' in filename else tree_extension
scenario_tree = ScenarioTree.from_file(wdir + filename, extension)
# get tuple (method, cardinality, index_sample, timelimit)
method, cardinality, index_sample, timelimit = ScenarioClustering._parse_filename(filename)
key = (method, cardinality, index_sample)
if self._solutions.get(key) is None:
self._solutions[key] = {}
if 'clust' in filename:
self._solutions[key][timelimit] = self._stochastic_problem._solution_class(self._stochastic_problem,
scenario_tree)
else:
# append scenarios to the scenario tree
scenario_tree.append_data_dict(self._scenario_tree.get_data_dict(['scenario']))
self._solutions[key][timelimit] = self._stochastic_problem._solution_class(self._stochastic_problem,
scenario_tree)
if show_files:
print(file)
@staticmethod
def _parse_filename(filename):
method, cardinality_str, index_sample_str, timelimit_str = filename.split('-')[1:]
cardinality = int(cardinality_str[1:]) # remove prefix 'K' and convert to integer
index_sample = int(index_sample_str[1:]) # remove prefix '#' and convert to integer
timelimit = eval(timelimit_str[:-3]) # remove suffix 'sec' and convert to tuple or integer via eval()
return method, cardinality, index_sample, timelimit
# --- Representations ---
def __str__(self):
string = ("Scenario Clustering: \n"
f" Scenarios: {self._n_scenarios} \n"
f" Features: {self.n_features} \n"
f" - Random: {self.n_random_features()} (with std > 0) \n"
f" - Numeric: {self._n_num_features} \n"
f" - Categorical: {self._n_cat_features} \n"
f" Clustering methods: {self._methods_available}\n\n")
string += "Clustering performed:\n"
for method in self._methods_available:
string += (f" {method}: {self.n_runs[method]}\n")
string += ("\nReference solution: \n"
f" {self._reference_solution} \n")
return string
def print_results(self, method=None, cardinality=None, index_sample=None, return_string=False):
# limit print of the weights
np.set_printoptions(threshold=15, linewidth=120) # max 15 weights
# get methods, cardinality and indices to print
methods = self._methods_available if method is None else [method]
is_card_valid = lambda card: True if cardinality is None else card == cardinality
is_index_valid = lambda index: True if index_sample is None else index == index_sample
string = f"Reference solution: \n {self._reference_solution} \n"
for method in methods:
string += f"\n{method}: \n" + "#" * (len(method)+1) + "\n"
for card in self.cardinalities(method):
if not is_card_valid(card): continue
string += f" Cardinality: {card} \n " + "-" * len(f"Cardinality: {card}") + "\n"
for index in range(self.get_n_runs(method, card)):
if not is_index_valid(index): continue
key = (method, card, index)
string += (f" Weights: {self.get_weights(*key)} \n"
f" Representatives: {self.get_representatives(*key)} \n"
f" Score: {self.get_score(*key)} \n"
f" Param: {self.get_param(*key)} \n"
" Random features: "
f"{self.n_random_features(*key)}/{self.n_random_features()} \n"
f" Time: {self.get_clust_time(*key):.2f} sec\n")
if self._solutions.get(key) is None:
string += (f" Clustered problem: None \n"
f" Implement. problem: None \n"
f" Gap to ref. sol. (%): None\n\n")
else:
timelimits = self._solutions[key].keys()
timelimits_clust = [time for time in timelimits if isinstance(time, (int, float)) or time is None]
timelimits_tuple = [time for time in timelimits if isinstance(time, tuple)]
string += f" Clustered problem: \n"
for timelimit in timelimits_clust:
string += f" {timelimit} sec: {self.get_opt_solution(*key, timelimit)} \n"
string += f" Implement. problem: \n"
for timelimit in timelimits_tuple:
string += f" {timelimit} sec: {self.get_eval_solution(*key, timelimit)} \n"
string += f" Gap to ref. sol. (%): \n"
for timelimit in timelimits_tuple:
string += f" {timelimit} sec: {self.get_gap(*key, timelimit)} \n"
string += "\n"
if return_string:
return string
else:
print(string, end="")
# --- Sanity check ---
def _check_opportunity_cost_matrix(self):
if self._opport_cost_matrix is None:
return
assert self._opport_cost_matrix.shape == (self._n_scenarios, self._n_scenarios), \
("Shape of opportunity cost matrix doesn't match the number of scenarios: "
f"{self._opport_cost_matrix.shape} != {(self._n_scenarios, self._n_scenarios)}")
def _check_sanity(self):
self._check_opportunity_cost_matrix()
assert len(self._scenarios) >= 3, \
f"Clustering the scenarios makes sense for 3 scenarios or more, not {len(self._scenarios)}." | [
"os.mkdir",
"pickle.dump",
"stochoptim.scengen.scenario_tree.ScenarioTree.from_file",
"numpy.ones",
"pickle.load",
"numpy.mean",
"os.path.join",
"numpy.unique",
"numpy.set_printoptions",
"os.path.exists",
"numpy.max",
"matplotlib.pyplot.subplots",
"stochoptim.scengen.decision_process.Decisio... | [((10192, 10247), 'stochoptim.scenclust.cost_space_partition.CostSpaceScenarioPartitioning', 'CostSpaceScenarioPartitioning', (['self._opport_cost_matrix'], {}), '(self._opport_cost_matrix)\n', (10221, 10247), False, 'from stochoptim.scenclust.cost_space_partition import CostSpaceScenarioPartitioning\n'), ((10270, 10276), 'time.time', 'time', ([], {}), '()\n', (10274, 10276), False, 'from time import time\n'), ((11134, 11140), 'time.time', 'time', ([], {}), '()\n', (11138, 11140), False, 'from time import time\n'), ((12441, 12447), 'time.time', 'time', ([], {}), '()\n', (12445, 12447), False, 'from time import time\n'), ((14408, 14414), 'time.time', 'time', ([], {}), '()\n', (14412, 14414), False, 'from time import time\n'), ((16858, 16864), 'time.time', 'time', ([], {}), '()\n', (16862, 16864), False, 'from time import time\n'), ((18266, 18272), 'time.time', 'time', ([], {}), '()\n', (18270, 18272), False, 'from time import time\n'), ((24383, 24448), 'numpy.zeros', 'np.zeros', (['(self._n_unique_cat, cardinality, self._n_cat_features)'], {}), '((self._n_unique_cat, cardinality, self._n_cat_features))\n', (24391, 24448), True, 'import numpy as np\n'), ((49519, 49567), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '(15)', 'linewidth': '(120)'}), '(threshold=15, linewidth=120)\n', (49538, 49567), True, 'import numpy as np\n'), ((4947, 4998), 'numpy.unique', 'np.unique', (['self._scenarios[:, self._is_categorical]'], {}), '(self._scenarios[:, self._is_categorical])\n', (4956, 4998), True, 'import numpy as np\n'), ((11692, 11732), 'numpy.zeros', 'np.zeros', (['(cardinality, self.n_features)'], {}), '((cardinality, self.n_features))\n', (11700, 11732), True, 'import numpy as np\n'), ((11979, 12017), 'numpy.array', 'np.array', (['self.kmeans.cluster_centers_'], {}), '(self.kmeans.cluster_centers_)\n', (11987, 12017), True, 'import numpy as np\n'), ((13208, 13248), 'numpy.zeros', 'np.zeros', (['(cardinality, self.n_features)'], {}), '((cardinality, self.n_features))\n', (13216, 13248), True, 'import numpy as np\n'), ((17814, 17853), 'numpy.array', 'np.array', (['self.kprot.cluster_centroids_'], {}), '(self.kprot.cluster_centroids_)\n', (17822, 17853), True, 'import numpy as np\n'), ((29955, 30033), 'stochoptim.scengen.scenario_tree.ScenarioTree.twostage_from_scenarios', 'ScenarioTree.twostage_from_scenarios', (['scenarios', 'self._map_rvar_to_nb', 'weights'], {}), '(scenarios, self._map_rvar_to_nb, weights)\n', (29991, 30033), False, 'from stochoptim.scengen.scenario_tree import ScenarioTree\n'), ((31154, 31265), 'stochoptim.scengen.decision_process.DecisionProcess', 'DecisionProcess', (['self._stochastic_problem.map_dvar_to_index', '{(0): self._solutions[key][timelimit_opt].x0}'], {}), '(self._stochastic_problem.map_dvar_to_index, {(0): self.\n _solutions[key][timelimit_opt].x0})\n', (31169, 31265), False, 'from stochoptim.scengen.decision_process import DecisionProcess\n'), ((38495, 38524), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (38507, 38524), True, 'import matplotlib.pyplot as plt\n'), ((42555, 42575), 'os.path.exists', 'os.path.exists', (['wdir'], {}), '(wdir)\n', (42569, 42575), False, 'import os\n'), ((42590, 42604), 'os.mkdir', 'os.mkdir', (['wdir'], {}), '(wdir)\n', (42598, 42604), False, 'import os\n'), ((43210, 43239), 'pickle.dump', 'pickle.dump', (['self._results', 'f'], {}), '(self._results, f)\n', (43221, 43239), False, 'import pickle\n'), ((46950, 47000), 'stochoptim.scengen.scenario_tree.ScenarioTree.from_file', 'ScenarioTree.from_file', (['(wdir + filename)', 'extension'], {}), '(wdir + filename, extension)\n', (46972, 47000), False, 'from stochoptim.scengen.scenario_tree import ScenarioTree\n'), ((5937, 6007), 'numpy.concatenate', 'np.concatenate', (['[scen_bin_3d[cat] for cat in self._unique_cat]'], {'axis': '(1)'}), '([scen_bin_3d[cat] for cat in self._unique_cat], axis=1)\n', (5951, 6007), True, 'import numpy as np\n'), ((6045, 6089), 'numpy.concatenate', 'np.concatenate', (['[scen_num, scen_bin]'], {'axis': '(1)'}), '([scen_num, scen_bin], axis=1)\n', (6059, 6089), True, 'import numpy as np\n'), ((17346, 17394), 'numpy.ones', 'np.ones', (['(cardinality, self._scenarios.shape[1])'], {}), '((cardinality, self._scenarios.shape[1]))\n', (17353, 17394), True, 'import numpy as np\n'), ((34194, 34272), 'stochoptim.scengen.scenario_tree.ScenarioTree.twostage_from_scenarios', 'ScenarioTree.twostage_from_scenarios', (['scenarios', 'self._map_rvar_to_nb', 'weights'], {}), '(scenarios, self._map_rvar_to_nb, weights)\n', (34230, 34272), False, 'from stochoptim.scengen.scenario_tree import ScenarioTree\n'), ((46045, 46059), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (46056, 46059), False, 'import pickle\n'), ((46438, 46451), 'os.listdir', 'listdir', (['wdir'], {}), '(wdir)\n', (46445, 46451), False, 'from os import listdir\n'), ((5720, 5770), 'numpy.ones', 'np.ones', (['(self._n_scenarios, self._n_cat_features)'], {}), '((self._n_scenarios, self._n_cat_features))\n', (5727, 5770), True, 'import numpy as np\n'), ((23507, 23513), 'time.time', 'time', ([], {}), '()\n', (23511, 23513), False, 'from time import time\n'), ((40904, 40915), 'numpy.mean', 'np.mean', (['ys'], {}), '(ys)\n', (40911, 40915), True, 'import numpy as np\n'), ((46462, 46475), 'os.path.join', 'join', (['wdir', 'f'], {}), '(wdir, f)\n', (46466, 46475), False, 'from os.path import isfile, join\n'), ((21805, 21828), 'numpy.ones', 'np.ones', (['(cardinality,)'], {}), '((cardinality,))\n', (21812, 21828), True, 'import numpy as np\n'), ((22119, 22125), 'time.time', 'time', ([], {}), '()\n', (22123, 22125), False, 'from time import time\n'), ((22726, 22732), 'time.time', 'time', ([], {}), '()\n', (22730, 22732), False, 'from time import time\n'), ((7319, 7344), 'numpy.max', 'np.max', (['scenarios'], {'axis': '(0)'}), '(scenarios, axis=0)\n', (7325, 7344), True, 'import numpy as np\n'), ((7347, 7372), 'numpy.min', 'np.min', (['scenarios'], {'axis': '(0)'}), '(scenarios, axis=0)\n', (7353, 7372), True, 'import numpy as np\n')] |
import numpy as np
import torch
import csv
import os
import cv2
import math
import random
import json
import pickle
import os.path as osp
from lietorch import SE3
from .stream import RGBDStream
from .rgbd_utils import loadtum
intrinsics_dict = {
'freiburg1': [517.3, 516.5, 318.6, 255.3],
'freiburg2': [520.9, 521.0, 325.1, 249.7],
'freiburg3': [535.4, 539.2, 320.1, 247.6],
}
distortion_dict = {
'freiburg1': [0.2624, -0.9531, -0.0054, 0.0026, 1.1633],
'freiburg2': [0.2312, -0.7849, -0.0033, -0.0001, 0.9172],
'freiburg3': [0, 0, 0, 0, 0],
}
def as_intrinsics_matrix(intrinsics):
K = np.eye(3)
K[0,0] = intrinsics[0]
K[1,1] = intrinsics[1]
K[0,2] = intrinsics[2]
K[1,2] = intrinsics[3]
return K
class TUMStream(RGBDStream):
def __init__(self, datapath, **kwargs):
super(TUMStream, self).__init__(datapath=datapath, **kwargs)
def _build_dataset_index(self):
""" build list of images, poses, depths, and intrinsics """
images, depths, poses, intrinsics = loadtum(self.datapath, self.frame_rate)
intrinsic, _ = TUMStream.calib_read(self.datapath)
intrinsics = np.tile(intrinsic[None], (len(images), 1))
# set first pose to identity
poses = SE3(torch.as_tensor(poses))
poses = poses[[0]].inv() * poses
poses = poses.data.cpu().numpy()
self.images = images
self.poses = poses
self.depths = depths
self.intrinsics = intrinsics
@staticmethod
def calib_read(datapath):
if 'freiburg1' in datapath:
intrinsic = intrinsics_dict['freiburg1']
d_coef = distortion_dict['freiburg1']
elif 'freiburg2' in datapath:
intrinsic = intrinsics_dict['freiburg2']
d_coef = distortion_dict['freiburg2']
elif 'freiburg3' in datapath:
intrinsic = intrinsics_dict['freiburg3']
d_coef = distortion_dict['freiburg3']
return np.array(intrinsic), np.array(d_coef)
@staticmethod
def image_read(image_file):
intrinsics, d_coef = TUMStream.calib_read(image_file)
K = as_intrinsics_matrix(intrinsics)
image = cv2.imread(image_file)
return cv2.undistort(image, K, d_coef)
@staticmethod
def depth_read(depth_file):
depth = cv2.imread(depth_file, cv2.IMREAD_ANYDEPTH)
return depth.astype(np.float32) / 5000.0
| [
"numpy.eye",
"cv2.imread",
"numpy.array",
"torch.as_tensor",
"cv2.undistort"
] | [((621, 630), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (627, 630), True, 'import numpy as np\n'), ((2195, 2217), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (2205, 2217), False, 'import cv2\n'), ((2233, 2264), 'cv2.undistort', 'cv2.undistort', (['image', 'K', 'd_coef'], {}), '(image, K, d_coef)\n', (2246, 2264), False, 'import cv2\n'), ((2332, 2375), 'cv2.imread', 'cv2.imread', (['depth_file', 'cv2.IMREAD_ANYDEPTH'], {}), '(depth_file, cv2.IMREAD_ANYDEPTH)\n', (2342, 2375), False, 'import cv2\n'), ((1266, 1288), 'torch.as_tensor', 'torch.as_tensor', (['poses'], {}), '(poses)\n', (1281, 1288), False, 'import torch\n'), ((1983, 2002), 'numpy.array', 'np.array', (['intrinsic'], {}), '(intrinsic)\n', (1991, 2002), True, 'import numpy as np\n'), ((2004, 2020), 'numpy.array', 'np.array', (['d_coef'], {}), '(d_coef)\n', (2012, 2020), True, 'import numpy as np\n')] |
# Copyright 2021 University College London. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for module `image_ops`."""
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from tensorflow_mri.python.ops import image_ops
from tensorflow_mri.python.util import io_util
from tensorflow_mri.python.util import test_util
class PeakSignalToNoiseRatioTest(test_util.TestCase):
"""Tests for PSNR op."""
@classmethod
def setUpClass(cls):
"""Prepare tests."""
super().setUpClass()
cls.data = io_util.read_hdf5('tests/data/image_ops_data.h5')
@test_util.run_in_graph_and_eager_modes
def test_psnr_2d_scalar(self):
"""Test 2D PSNR with scalar batch."""
img1 = self.data['psnr/2d/img1']
img2 = self.data['psnr/2d/img2']
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
result = image_ops.psnr(img1, img2, max_val=255, rank=2)
self.assertAllClose(result, 22.73803845)
result = image_ops.psnr2d(img1, img2, max_val=255)
self.assertAllClose(result, 22.73803845)
@test_util.run_in_graph_and_eager_modes
def test_psnr_2d_trivial_batch(self):
"""Test 2D PSNR with trivial batch of size 1."""
img1 = self.data['psnr/2d/img1']
img2 = self.data['psnr/2d/img2']
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
img1 = tf.expand_dims(img1, 0)
img2 = tf.expand_dims(img2, 0)
result = image_ops.psnr(img1, img2, max_val=255, rank=2)
self.assertAllClose(result, [22.73803845])
@test_util.run_in_graph_and_eager_modes
def test_psnr_2d_batch_multichannel(self):
"""Test 2D PSNR with multichannel batch of images."""
img1 = self.data['psnr/2d/batch/img1']
img2 = self.data['psnr/2d/batch/img2']
ref = [16.35598558,
16.96981631,
17.80788841,
18.1842858,
18.06558658,
17.16817389]
result = image_ops.psnr(img1, img2, max_val=255)
self.assertAllClose(result, ref)
# Test without specifying dynamic range, which should default to 255 for
# `tf.uint8`.
result = image_ops.psnr(img1, img2)
self.assertAllClose(result, ref)
@test_util.run_in_graph_and_eager_modes
def test_psnr_2d_nd_batch(self):
"""Test 2D PSNR with N-D batch of images."""
img1 = self.data['psnr/2d/batch/img1']
img2 = self.data['psnr/2d/batch/img2']
img1 = tf.reshape(img1, (3, 2) + img1.shape[1:])
img2 = tf.reshape(img2, (3, 2) + img2.shape[1:])
ref = [[16.35598558, 16.96981631],
[17.80788841, 18.18428580],
[18.06558658, 17.16817389]]
result = image_ops.psnr(img1, img2, max_val=255, rank=2)
self.assertAllClose(result, ref)
@test_util.run_in_graph_and_eager_modes
def test_psnr_2d_batch_multichannel_float(self):
"""Test 2D PSNR with multichannel batch of floating point images."""
img1 = self.data['psnr/2d/batch/img1']
img2 = self.data['psnr/2d/batch/img2']
ref = [16.35598558,
16.96981631,
17.80788841,
18.1842858,
18.06558658,
17.16817389]
img1 = tf.cast(img1, tf.float32) / 255.0
img2 = tf.cast(img2, tf.float32) / 255.0
result = image_ops.psnr(img1, img2, max_val=1)
self.assertAllClose(result, ref)
# Test without specifying dynamic range, which should default to 1 for
# `tf.float32`.
result = image_ops.psnr(img1, img2)
self.assertAllClose(result, ref)
@test_util.run_in_graph_and_eager_modes
def test_psnr_3d_scalar(self):
"""Test 3D PSNR with scalar batch."""
img1 = self.data['psnr/3d/img1']
img2 = self.data['psnr/3d/img2']
img1 = img1[0, ...]
img2 = img2[0, ...]
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
result = image_ops.psnr(img1, img2, rank=3)
self.assertAllClose(result, 32.3355765)
@test_util.run_in_graph_and_eager_modes
def test_psnr_3d_batch(self):
"""Test 3D PSNR with scalar batch."""
img1 = self.data['psnr/3d/img1']
img2 = self.data['psnr/3d/img2']
ref = [32.335575,
31.898806,
31.149742,
34.818497,
30.58971 ,
32.17367 ]
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
result = image_ops.psnr(img1, img2, max_val=255)
self.assertAllClose(result, ref, rtol=1e-5, atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_psnr_3d_mdbatch(self):
"""Test 3D PSNR with multidimensional batch."""
img1 = self.data['psnr/3d/img1']
img2 = self.data['psnr/3d/img2']
ref = [[32.335575, 31.898806],
[31.149742, 34.818497],
[30.58971 , 32.17367 ]]
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
img1 = tf.reshape(img1, (3, 2) + img1.shape[1:])
img2 = tf.reshape(img2, (3, 2) + img2.shape[1:])
result = image_ops.psnr(img1, img2, max_val=255, rank=3)
self.assertAllClose(result, ref, rtol=1e-3, atol=1e-3)
result = image_ops.psnr3d(img1, img2, max_val=255)
self.assertAllClose(result, ref, rtol=1e-3, atol=1e-3)
@test_util.run_in_graph_and_eager_modes
def test_psnr_3d_multichannel(self):
"""Test 3D PSNR with multichannel inputs."""
img1 = self.data['psnr/3d/img1']
img2 = self.data['psnr/3d/img2']
ref = [32.111702, 32.607716, 31.309875]
img1 = tf.reshape(img1, (3, 2) + img1.shape[1:])
img2 = tf.reshape(img2, (3, 2) + img2.shape[1:])
img1 = tf.transpose(img1, [0, 2, 3, 4, 1])
img2 = tf.transpose(img2, [0, 2, 3, 4, 1])
result = image_ops.psnr(img1, img2, max_val=255, rank=3)
self.assertAllClose(result, ref, rtol=1e-4, atol=1e-4)
def test_psnr_invalid_rank(self):
"""Test PSNR with an invalid rank."""
img1 = self.data['psnr/2d/img1']
img2 = self.data['psnr/2d/img2']
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"`rank` must be >= 2"):
image_ops.psnr(img1, img2, 255)
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"`rank` must be >= 2"):
image_ops.psnr(img1, img2, 255)
class StructuralSimilarityTest(test_util.TestCase):
"""Tests for SSIM op."""
@classmethod
def setUpClass(cls):
"""Prepare tests."""
super().setUpClass()
cls.data = io_util.read_hdf5('tests/data/image_ops_data.h5')
@test_util.run_in_graph_and_eager_modes
def test_ssim_2d_scalar(self):
"""Test 2D SSIM with scalar batch."""
img1 = self.data['psnr/2d/img1']
img2 = self.data['psnr/2d/img2']
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
result = image_ops.ssim(img1, img2, max_val=255, rank=2)
self.assertAllClose(result, 0.5250339, rtol=1e-5, atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_ssim_2d_trivial_batch(self):
"""Test 2D SSIM with trivial batch of size 1."""
img1 = self.data['psnr/2d/img1']
img2 = self.data['psnr/2d/img2']
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
img1 = tf.expand_dims(img1, 0)
img2 = tf.expand_dims(img2, 0)
result = image_ops.ssim(img1, img2, max_val=255, rank=2)
self.assertAllClose(result, [0.5250339], rtol=1e-5, atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_ssim_2d_batch_multichannel(self):
"""Test 2D SSIM with multichannel batch of images."""
img1 = self.data['psnr/2d/batch/img1']
img2 = self.data['psnr/2d/batch/img2']
ref = [0.250783,
0.293936,
0.33806 ,
0.366984,
0.38121 ,
0.366342]
result = image_ops.ssim(img1, img2, max_val=255)
self.assertAllClose(result, ref, rtol=1e-4, atol=1e-4)
# Test without specifying dynamic range, which should default to 255 for
# `tf.uint8`.
result = image_ops.ssim(img1, img2)
self.assertAllClose(result, ref, rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def test_ssim_2d_nd_batch(self):
"""Test 2D SSIM with N-D batch of images."""
img1 = self.data['psnr/2d/batch/img1']
img2 = self.data['psnr/2d/batch/img2']
img1 = tf.reshape(img1, (3, 2) + img1.shape[1:])
img2 = tf.reshape(img2, (3, 2) + img2.shape[1:])
ref = [[0.250783, 0.293936],
[0.33806 , 0.366984],
[0.38121 , 0.366342]]
result = image_ops.ssim(img1, img2, max_val=255, rank=2)
self.assertAllClose(result, ref, rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def test_ssim_2d_batch_multichannel_float(self):
"""Test 2D SSIM with multichannel batch of floating point images."""
img1 = self.data['psnr/2d/batch/img1']
img2 = self.data['psnr/2d/batch/img2']
ref = [0.250783,
0.293936,
0.33806 ,
0.366984,
0.38121 ,
0.366342]
img1 = tf.cast(img1, tf.float32) / 255.0
img2 = tf.cast(img2, tf.float32) / 255.0
result = image_ops.ssim(img1, img2, max_val=1)
self.assertAllClose(result, ref, rtol=1e-4, atol=1e-4)
# Test without specifying dynamic range, which should default to 1 for
# `tf.float32`.
result = image_ops.ssim(img1, img2)
self.assertAllClose(result, ref, rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def test_ssim_3d_scalar(self):
"""Test 3D SSIM with scalar batch."""
img1 = self.data['psnr/3d/img1']
img2 = self.data['psnr/3d/img2']
img1 = img1[0, ...]
img2 = img2[0, ...]
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
result = image_ops.ssim(img1, img2, rank=3)
self.assertAllClose(result, 0.93111473)
@test_util.run_in_graph_and_eager_modes
def test_ssim_3d_batch(self):
"""Test 3D SSIM with batch."""
img1 = self.data['psnr/3d/img1']
img2 = self.data['psnr/3d/img2']
ref = [0.93111473,
0.90337730]
img1 = img1[:2, ...]
img2 = img2[:2, ...]
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
result = image_ops.ssim(img1, img2, max_val=255)
self.assertAllClose(result, ref, rtol=1e-5, atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_ssim_3d_mdbatch(self):
"""Test 3D SSIM with multidimensional batch."""
img1 = self.data['psnr/3d/img1']
img2 = self.data['psnr/3d/img2']
ref = [[0.93111473, 0.90337730],
[0.90820014, 0.92448730]]
img1 = img1[:4, ...]
img2 = img2[:4, ...]
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
img1 = tf.reshape(img1, (2, 2) + img1.shape[1:])
img2 = tf.reshape(img2, (2, 2) + img2.shape[1:])
result = image_ops.ssim(img1, img2, max_val=255, rank=3)
self.assertAllClose(result, ref)
@test_util.run_in_graph_and_eager_modes
def test_ssim_3d_multichannel(self):
"""Test 3D SSIM with multichannel inputs."""
# Does not work on CPU currently - GPU only.
# img1 = self.data['psnr/3d/img1']
# img2 = self.data['psnr/3d/img2']
# ref = [[0.93111473, 0.90337730],
# [0.90820014, 0.92448730],
# [0.90630510, 0.92143655]]
# img1 = tf.reshape(img1, (3, 2) + img1.shape[1:])
# img2 = tf.reshape(img2, (3, 2) + img2.shape[1:])
# img1 = tf.transpose(img1, [0, 2, 3, 4, 1])
# img2 = tf.transpose(img2, [0, 2, 3, 4, 1])
# result = image_ops.ssim(img1, img2, max_val=255, rank=3)
# self.assertAllClose(result, tf.math.reduce_mean(ref, axis=1))
def test_ssim_invalid_rank(self):
"""Test SSIM with an invalid rank."""
img1 = self.data['psnr/2d/img1']
img2 = self.data['psnr/2d/img2']
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"`rank` must be >= 2"):
image_ops.ssim(img1, img2, 255)
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"`rank` must be >= 2"):
image_ops.ssim(img1, img2, 255)
class MultiscaleStructuralSimilarityTest(test_util.TestCase):
"""Tests for MS-SSIM op."""
@classmethod
def setUpClass(cls):
"""Prepare tests."""
super().setUpClass()
cls.data = io_util.read_hdf5('tests/data/image_ops_data.h5')
@test_util.run_in_graph_and_eager_modes
def test_msssim_2d_scalar(self):
"""Test 2D MS-SSIM with scalar batch."""
img1 = self.data['psnr/2d/img1']
img2 = self.data['psnr/2d/img2']
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
result = image_ops.ssim_multiscale(img1, img2, max_val=255, rank=2)
self.assertAllClose(result, 0.8270784)
result = image_ops.ssim2d_multiscale(img1, img2, max_val=255)
self.assertAllClose(result, 0.8270784)
@test_util.run_in_graph_and_eager_modes
def test_msssim_2d_trivial_batch(self):
"""Test 2D MS-SSIM with trivial batch of size 1."""
img1 = self.data['psnr/2d/img1']
img2 = self.data['psnr/2d/img2']
img1 = tf.expand_dims(img1, -1)
img2 = tf.expand_dims(img2, -1)
img1 = tf.expand_dims(img1, 0)
img2 = tf.expand_dims(img2, 0)
result = image_ops.ssim_multiscale(img1, img2, max_val=255, rank=2)
self.assertAllClose(result, [0.8270784])
@test_util.run_in_graph_and_eager_modes
def test_msssim_2d_batch_multichannel(self):
"""Test 2D MS-SSIM with multichannel batch of images."""
img1 = self.data['psnr/2d/batch/img1']
img2 = self.data['psnr/2d/batch/img2']
ref = [0.47854424,
0.60964876,
0.71863150,
0.76113180,
0.77840980,
0.71724670]
result = image_ops.ssim_multiscale(img1, img2, max_val=255)
self.assertAllClose(result, ref, rtol=1e-5, atol=1e-5)
# Test without specifying dynamic range, which should default to 255 for
# `tf.uint8`.
result = image_ops.ssim_multiscale(img1, img2)
self.assertAllClose(result, ref, rtol=1e-5, atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_msssim_2d_nd_batch(self):
"""Test 2D MS-SSIM with N-D batch of images."""
img1 = self.data['psnr/2d/batch/img1']
img2 = self.data['psnr/2d/batch/img2']
img1 = tf.reshape(img1, (3, 2) + img1.shape[1:])
img2 = tf.reshape(img2, (3, 2) + img2.shape[1:])
ref = [[0.47854424, 0.60964876],
[0.71863150, 0.76113180],
[0.77840980, 0.71724670]]
result = image_ops.ssim_multiscale(img1, img2, max_val=255, rank=2)
self.assertAllClose(result, ref, rtol=1e-5, atol=1e-5)
result = image_ops.ssim2d_multiscale(img1, img2, max_val=255)
self.assertAllClose(result, ref, rtol=1e-5, atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_msssim_2d_batch_multichannel_float(self):
"""Test 2D MS-SSIM with multichannel batch of floating point images."""
img1 = self.data['psnr/2d/batch/img1']
img2 = self.data['psnr/2d/batch/img2']
ref = [0.47854424,
0.60964876,
0.71863150,
0.76113180,
0.77840980,
0.71724670]
img1 = tf.cast(img1, tf.float32) / 255.0
img2 = tf.cast(img2, tf.float32) / 255.0
result = image_ops.ssim_multiscale(img1, img2, max_val=1)
self.assertAllClose(result, ref, rtol=1e-5, atol=1e-5)
# Test without specifying dynamic range, which should default to 1 for
# `tf.float32`.
result = image_ops.ssim_multiscale(img1, img2)
self.assertAllClose(result, ref, rtol=1e-5, atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_msssim_3d_scalar(self):
"""Test 3D MS-SSIM with scalar batch."""
# Kills testing hardware.
# img1 = self.data['psnr/3d/img1']
# img2 = self.data['psnr/3d/img2']
# def upsample_3d(img, scale):
# img = tf.repeat(img, scale, axis=1)
# img = tf.repeat(img, scale, axis=2)
# img = tf.repeat(img, scale, axis=3)
# return img
# img1 = upsample_3d(img1, 3)
# img2 = upsample_3d(img2, 3)
# img1 = img1[0, ...]
# img2 = img2[0, ...]
# img1 = tf.expand_dims(img1, -1)
# img2 = tf.expand_dims(img2, -1)
# result = image_ops.ssim_multiscale(img1, img2, rank=3)
# self.assertAllClose(result, 0.96301770)
def test_msssim_input_size_error(self):
"""Test MS-SSIM with an invalid rank."""
img1 = tf.zeros((4, 160, 160, 1))
img2 = tf.zeros((4, 160, 160, 1))
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
"spatial dimensions must have size of at least 161"):
image_ops.ssim_multiscale(img1, img2)
img1 = tf.zeros((4, 161, 161, 1))
img2 = tf.zeros((4, 161, 161, 1))
image_ops.ssim_multiscale(img1, img2)
class CentralCropTest(test_util.TestCase):
"""Tests for central cropping operation."""
# pylint: disable=missing-function-docstring
@test_util.run_in_graph_and_eager_modes
def test_cropping(self):
"""Test cropping."""
shape = [2, 2]
x_np = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
y_np = np.array([[6, 7], [10, 11]])
y_tf = image_ops.central_crop(x_np, shape)
self.assertAllEqual(y_tf, y_np)
@test_util.run_in_graph_and_eager_modes
def test_cropping_unknown_dim(self):
"""Test cropping with an unknown dimension."""
shape = [-1, 2]
x_np = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
y_np = np.array([[2, 3], [6, 7], [10, 11], [14, 15]])
y_tf = image_ops.central_crop(x_np, shape)
self.assertAllEqual(y_tf, y_np)
class SymmetricPadOrCropTest(test_util.TestCase):
"""Tests for symmetric padding/cropping operation."""
# pylint: disable=missing-function-docstring
@test_util.run_in_graph_and_eager_modes
def test_cropping(self):
"""Test cropping."""
shape = [2, 2]
x_np = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
y_np = np.array([[6, 7], [10, 11]])
y_tf = image_ops.resize_with_crop_or_pad(x_np, shape)
self.assertAllEqual(y_tf, y_np)
@test_util.run_in_graph_and_eager_modes
def test_padding(self):
"""Test padding."""
shape = [4, 4]
x_np = np.array([[1, 2], [3, 4]])
y_np = np.array([[0, 0, 0, 0],
[0, 1, 2, 0],
[0, 3, 4, 0],
[0, 0, 0, 0]])
y_tf = image_ops.resize_with_crop_or_pad(x_np, shape)
self.assertAllEqual(y_tf, y_np)
@test_util.run_in_graph_and_eager_modes
def test_padding_non_default_mode(self):
"""Test padding."""
shape = [7]
x_np = np.array([1, 2, 3])
y_np = np.array([3, 2, 1, 2, 3, 2, 1])
y_tf = image_ops.resize_with_crop_or_pad(x_np, shape,
padding_mode='reflect')
self.assertAllEqual(y_tf, y_np)
@test_util.run_in_graph_and_eager_modes
def test_padding_cropping(self):
"""Test combined cropping and padding."""
shape = [1, 5]
x_np = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
y_np = np.array([[0, 4, 5, 6, 0]])
y_tf = image_ops.resize_with_crop_or_pad(x_np, shape)
self.assertAllEqual(y_tf, y_np)
@test_util.run_in_graph_and_eager_modes
def test_padding_cropping_unknown_dimension(self):
"""Test combined cropping and padding with an unknown dimension."""
shape = [1, -1]
x_np = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
y_np = np.array([[4, 5, 6]])
y_tf = image_ops.resize_with_crop_or_pad(x_np, shape)
self.assertAllEqual(y_tf, y_np)
def test_static_shape(self):
"""Test static shapes."""
def get_fn(target_shape):
return lambda x: image_ops.resize_with_crop_or_pad(x, target_shape)
self._assert_static_shape(get_fn([1, -1]), [None, 3], [1, 3])
self._assert_static_shape(get_fn([-1, -1]), [None, 3], [None, 3])
self._assert_static_shape(get_fn([-1, 5]), [None, 3], [None, 5])
self._assert_static_shape(get_fn([5, 5]), [None, None], [5, 5])
self._assert_static_shape(get_fn([-1, -1]), [None, None], [None, None])
self._assert_static_shape(
get_fn([144, 144, 144, -1]), [None, None, None, 1], [144, 144, 144, 1])
def _assert_static_shape(self, fn, input_shape, expected_output_shape):
"""Asserts that function returns the expected static shapes."""
@tf.function
def graph_fn(x):
return fn(x)
input_spec = tf.TensorSpec(shape=input_shape)
concrete_fn = graph_fn.get_concrete_function(input_spec)
self.assertAllEqual(concrete_fn.structured_outputs.shape,
expected_output_shape)
class TotalVariationTest(test_util.TestCase):
"""Tests for operation `total_variation`."""
@test_util.run_in_graph_and_eager_modes
def test_total_variation(self):
"""Test total variation."""
# Example image.
img = [[1, 2, 4, 4],
[4, 7, 2, 1],
[8, 2, 4, 3],
[2, 2, 1, 6]]
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (8-4) + (8-2) + (7-2) + (7-2) + (2-2)
# + (4-2) + (4-2) + (4-1) + (4-1) + (3-1) + (6-3)
# = (3 + 4 + 6) + (5 + 5 + 0) + (2 + 2 + 3) + (3 + 2 + 3)
# = 13 + 10 + 7 + 8 = 38
# sum col dif = (2-1) + (4-2) + (4-4) + (7-4) + (7-2) + (2-1)
# + (8-2) + (4-2) + (4-3) + (2-2) + (2-1) + (6-1)
# = (1 + 2 + 0) + (3 + 5 + 1) + (6 + 2 + 1) + 0 + 1 + 5 =
# = 3 + 9 + 9 + 6
result = image_ops.total_variation(img)
self.assertAllClose(result, 65)
result = image_ops.total_variation(img, axis=0)
self.assertAllClose(result, [13, 10, 7, 8])
result = image_ops.total_variation(img, axis=1)
self.assertAllClose(result, [3, 9, 9, 6])
# Test with `keepdims=True`.
result = image_ops.total_variation(img, axis=0, keepdims=True)
self.assertAllClose(result, tf.reshape([13, 10, 7, 8], [1, 4]))
result = image_ops.total_variation(img, axis=1, keepdims=True)
self.assertAllClose(result, tf.reshape([3, 9, 9, 6], [4, 1]))
# Test float by scaling pixel values. Total variation scales as well.
result = image_ops.total_variation(1.25 * np.array(img))
self.assertAllClose(result, 1.25 * 65)
# Test complex image.
result = image_ops.total_variation(tf.dtypes.complex(
1.25 * np.array(img), 1.5 * np.array(img)))
self.assertAllClose(result, np.sqrt((1.25 * 65) ** 2 + (1.5 * 65) ** 2))
class ExtractGlimpsesTest(test_util.TestCase):
"""Tests for the `extract_glimpses` operation."""
@test_util.run_in_graph_and_eager_modes
def test_extract_glimpses(self):
"""Test `extract_glimpses` operation."""
images = tf.reshape(tf.range(40), [1, 4, 5, 2])
sizes = [2, 3]
offsets = [[2, 2], [0, 1]]
expected = [[[24, 25, 26, 27, 28, 29, 34, 35, 36, 37, 38, 39],
[2, 3, 4, 5, 6, 7, 12, 13, 14, 15, 16, 17]]]
patches = image_ops.extract_glimpses(images, sizes, offsets)
self.assertAllEqual(patches, expected)
class PhantomTest(test_util.TestCase):
"""Tests for `phantom` op."""
@classmethod
def setUpClass(cls):
"""Prepare tests."""
super().setUpClass()
cls.data = io_util.read_hdf5('tests/data/phantoms.h5')
@parameterized.parameters('shepp_logan', 'modified_shepp_logan')
@test_util.run_in_graph_and_eager_modes
def test_shepp_logan(self, phantom_type):
"""Test 2D Shepp-Logan phantom against MATLAB results."""
expected = self.data[phantom_type + '/2d']
result = image_ops.phantom(phantom_type=phantom_type)
self.assertAllClose(result, expected)
@parameterized.parameters('kak_roberts', 'modified_kak_roberts')
@test_util.run_in_graph_and_eager_modes
def test_kak_roberts(self, phantom_type):
"""Test 3D Kak-Roberts phantom against saved results."""
expected = self.data[phantom_type + '/3d']
result = image_ops.phantom(phantom_type=phantom_type, shape=[128, 128, 128])
self.assertAllClose(result, expected)
@test_util.run_in_graph_and_eager_modes
def test_default_2d(self):
"""Test 2D default."""
expected = self.data['modified_shepp_logan/2d']
result = image_ops.phantom()
self.assertAllClose(result, expected)
@test_util.run_in_graph_and_eager_modes
def test_default_3d(self):
"""Test 3D default."""
expected = self.data['modified_kak_roberts/3d']
result = image_ops.phantom(shape=[128, 128, 128])
self.assertAllClose(result, expected)
@parameterized.product(rank=[2, 3],
dtype=[tf.float32, tf.complex64])
@test_util.run_in_graph_and_eager_modes
def test_parallel_imaging(self, rank, dtype): # pylint: disable=missing-param-doc
"""Test parallel imaging phantom."""
image, sens = image_ops.phantom(shape=[64] * rank,
num_coils=12,
dtype=dtype,
return_sensitivities=True)
sens_ref = image_ops._birdcage_sensitivities([64] * rank, 12, dtype=dtype) # pylint: disable=protected-access
image_ref = image_ops.phantom(shape=[64] * rank, dtype=dtype) * sens
self.assertAllClose(image, image_ref)
self.assertAllClose(sens, sens_ref)
@parameterized.product(shape=[[32, 32], [128, 64], [32, 32, 32]],
num_coils=[4, 6],
birdcage_radius=[1.5, 1.3],
num_rings=[2])
@test_util.run_in_graph_and_eager_modes
def test_birdcage_sensitivities(self, # pylint: disable=missing-param-doc
shape,
num_coils,
birdcage_radius,
num_rings):
"""Test birdcage sensitivities."""
tf_sens = image_ops._birdcage_sensitivities(shape, # pylint: disable=protected-access
num_coils,
birdcage_radius=birdcage_radius,
num_rings=num_rings)
np_sens = self._np_birdcage_sensitivities(
[num_coils] + shape, r=birdcage_radius,
nzz=np.ceil(num_coils / num_rings))
self.assertAllClose(tf_sens, np_sens, rtol=1e-4, atol=1e-4)
def _np_birdcage_sensitivities(self, shape, r=1.5, nzz=8, dtype=np.complex64): # pylint: disable=missing-param-doc
"""Simulate birdcage coil sensitivities.
Implementation from:
https://github.com/mikgroup/sigpy/blob/v0.1.23/sigpy/mri/sim.py
"""
if len(shape) == 3:
nc, ny, nx = shape
c, y, x = np.mgrid[:nc, :ny, :nx]
coilx = r * np.cos(c * (2 * np.pi / nc))
coily = r * np.sin(c * (2 * np.pi / nc))
coil_phs = -c * (2 * np.pi / nc)
x_co = (x - nx / 2.0) / (nx / 2.0) - coilx
y_co = (y - ny / 2.0) / (ny / 2.0) - coily
rr = np.sqrt(x_co ** 2 + y_co ** 2)
phi = np.arctan2(x_co, -y_co) + coil_phs
out = (1.0 / rr) * np.exp(1j * phi)
elif len(shape) == 4:
nc, nz, ny, nx = shape
c, z, y, x = np.mgrid[:nc, :nz, :ny, :nx]
coilx = r * np.cos(c * (2 * np.pi / nzz))
coily = r * np.sin(c * (2 * np.pi / nzz))
coilz = np.floor(c / nzz) - 0.5 * (np.ceil(nc / nzz) - 1)
coil_phs = -(c + np.floor(c / nzz)) * (2 * np.pi / nzz)
x_co = (x - nx / 2.0) / (nx / 2.0) - coilx
y_co = (y - ny / 2.0) / (ny / 2.0) - coily
z_co = (z - nz / 2.0) / (nz / 2.0) - coilz
rr = (x_co**2 + y_co**2 + z_co**2)**0.5
phi = np.arctan2(x_co, -y_co) + coil_phs
out = (1 / rr) * np.exp(1j * phi)
else:
raise ValueError('Can only generate shape with length 3 or 4')
rss = sum(abs(out) ** 2, 0)**0.5
out /= rss
return out.astype(dtype)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow_mri.python.ops.image_ops.psnr3d",
"numpy.arctan2",
"tensorflow_mri.python.util.io_util.read_hdf5",
"tensorflow_mri.python.ops.image_ops.total_variation",
"tensorflow.reshape",
"numpy.floor",
"tensorflow_mri.python.ops.image_ops.extract_glimpses",
"tensorflow_mri.python.ops.image_ops._birdc... | [((24037, 24100), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['"""shepp_logan"""', '"""modified_shepp_logan"""'], {}), "('shepp_logan', 'modified_shepp_logan')\n", (24061, 24100), False, 'from absl.testing import parameterized\n'), ((24400, 24463), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['"""kak_roberts"""', '"""modified_kak_roberts"""'], {}), "('kak_roberts', 'modified_kak_roberts')\n", (24424, 24463), False, 'from absl.testing import parameterized\n'), ((25258, 25326), 'absl.testing.parameterized.product', 'parameterized.product', ([], {'rank': '[2, 3]', 'dtype': '[tf.float32, tf.complex64]'}), '(rank=[2, 3], dtype=[tf.float32, tf.complex64])\n', (25279, 25326), False, 'from absl.testing import parameterized\n'), ((26011, 26141), 'absl.testing.parameterized.product', 'parameterized.product', ([], {'shape': '[[32, 32], [128, 64], [32, 32, 32]]', 'num_coils': '[4, 6]', 'birdcage_radius': '[1.5, 1.3]', 'num_rings': '[2]'}), '(shape=[[32, 32], [128, 64], [32, 32, 32]], num_coils=\n [4, 6], birdcage_radius=[1.5, 1.3], num_rings=[2])\n', (26032, 26141), False, 'from absl.testing import parameterized\n'), ((28576, 28590), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (28588, 28590), True, 'import tensorflow as tf\n'), ((1144, 1193), 'tensorflow_mri.python.util.io_util.read_hdf5', 'io_util.read_hdf5', (['"""tests/data/image_ops_data.h5"""'], {}), "('tests/data/image_ops_data.h5')\n", (1161, 1193), False, 'from tensorflow_mri.python.util import io_util\n'), ((1398, 1422), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (1412, 1422), True, 'import tensorflow as tf\n'), ((1434, 1458), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (1448, 1458), True, 'import tensorflow as tf\n'), ((1473, 1520), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(2)'}), '(img1, img2, max_val=255, rank=2)\n', (1487, 1520), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((1580, 1621), 'tensorflow_mri.python.ops.image_ops.psnr2d', 'image_ops.psnr2d', (['img1', 'img2'], {'max_val': '(255)'}), '(img1, img2, max_val=255)\n', (1596, 1621), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((1889, 1913), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (1903, 1913), True, 'import tensorflow as tf\n'), ((1925, 1949), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (1939, 1949), True, 'import tensorflow as tf\n'), ((1961, 1984), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(0)'], {}), '(img1, 0)\n', (1975, 1984), True, 'import tensorflow as tf\n'), ((1996, 2019), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(0)'], {}), '(img2, 0)\n', (2010, 2019), True, 'import tensorflow as tf\n'), ((2034, 2081), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(2)'}), '(img1, img2, max_val=255, rank=2)\n', (2048, 2081), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((2518, 2557), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {'max_val': '(255)'}), '(img1, img2, max_val=255)\n', (2532, 2557), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((2704, 2730), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {}), '(img1, img2)\n', (2718, 2730), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((2992, 3033), 'tensorflow.reshape', 'tf.reshape', (['img1', '((3, 2) + img1.shape[1:])'], {}), '(img1, (3, 2) + img1.shape[1:])\n', (3002, 3033), True, 'import tensorflow as tf\n'), ((3045, 3086), 'tensorflow.reshape', 'tf.reshape', (['img2', '((3, 2) + img2.shape[1:])'], {}), '(img2, (3, 2) + img2.shape[1:])\n', (3055, 3086), True, 'import tensorflow as tf\n'), ((3218, 3265), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(2)'}), '(img1, img2, max_val=255, rank=2)\n', (3232, 3265), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((3804, 3841), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {'max_val': '(1)'}), '(img1, img2, max_val=1)\n', (3818, 3841), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((3988, 4014), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {}), '(img1, img2)\n', (4002, 4014), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((4305, 4329), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (4319, 4329), True, 'import tensorflow as tf\n'), ((4341, 4365), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (4355, 4365), True, 'import tensorflow as tf\n'), ((4380, 4414), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {'rank': '(3)'}), '(img1, img2, rank=3)\n', (4394, 4414), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((4795, 4819), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (4809, 4819), True, 'import tensorflow as tf\n'), ((4831, 4855), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (4845, 4855), True, 'import tensorflow as tf\n'), ((4870, 4909), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {'max_val': '(255)'}), '(img1, img2, max_val=255)\n', (4884, 4909), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((5290, 5314), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (5304, 5314), True, 'import tensorflow as tf\n'), ((5326, 5350), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (5340, 5350), True, 'import tensorflow as tf\n'), ((5363, 5404), 'tensorflow.reshape', 'tf.reshape', (['img1', '((3, 2) + img1.shape[1:])'], {}), '(img1, (3, 2) + img1.shape[1:])\n', (5373, 5404), True, 'import tensorflow as tf\n'), ((5416, 5457), 'tensorflow.reshape', 'tf.reshape', (['img2', '((3, 2) + img2.shape[1:])'], {}), '(img2, (3, 2) + img2.shape[1:])\n', (5426, 5457), True, 'import tensorflow as tf\n'), ((5472, 5519), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(3)'}), '(img1, img2, max_val=255, rank=3)\n', (5486, 5519), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((5593, 5634), 'tensorflow_mri.python.ops.image_ops.psnr3d', 'image_ops.psnr3d', (['img1', 'img2'], {'max_val': '(255)'}), '(img1, img2, max_val=255)\n', (5609, 5634), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((5956, 5997), 'tensorflow.reshape', 'tf.reshape', (['img1', '((3, 2) + img1.shape[1:])'], {}), '(img1, (3, 2) + img1.shape[1:])\n', (5966, 5997), True, 'import tensorflow as tf\n'), ((6009, 6050), 'tensorflow.reshape', 'tf.reshape', (['img2', '((3, 2) + img2.shape[1:])'], {}), '(img2, (3, 2) + img2.shape[1:])\n', (6019, 6050), True, 'import tensorflow as tf\n'), ((6063, 6098), 'tensorflow.transpose', 'tf.transpose', (['img1', '[0, 2, 3, 4, 1]'], {}), '(img1, [0, 2, 3, 4, 1])\n', (6075, 6098), True, 'import tensorflow as tf\n'), ((6110, 6145), 'tensorflow.transpose', 'tf.transpose', (['img2', '[0, 2, 3, 4, 1]'], {}), '(img2, [0, 2, 3, 4, 1])\n', (6122, 6145), True, 'import tensorflow as tf\n'), ((6160, 6207), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(3)'}), '(img1, img2, max_val=255, rank=3)\n', (6174, 6207), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((6591, 6615), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (6605, 6615), True, 'import tensorflow as tf\n'), ((6627, 6651), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (6641, 6651), True, 'import tensorflow as tf\n'), ((6996, 7045), 'tensorflow_mri.python.util.io_util.read_hdf5', 'io_util.read_hdf5', (['"""tests/data/image_ops_data.h5"""'], {}), "('tests/data/image_ops_data.h5')\n", (7013, 7045), False, 'from tensorflow_mri.python.util import io_util\n'), ((7250, 7274), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (7264, 7274), True, 'import tensorflow as tf\n'), ((7286, 7310), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (7300, 7310), True, 'import tensorflow as tf\n'), ((7325, 7372), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(2)'}), '(img1, img2, max_val=255, rank=2)\n', (7339, 7372), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((7661, 7685), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (7675, 7685), True, 'import tensorflow as tf\n'), ((7697, 7721), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (7711, 7721), True, 'import tensorflow as tf\n'), ((7733, 7756), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(0)'], {}), '(img1, 0)\n', (7747, 7756), True, 'import tensorflow as tf\n'), ((7768, 7791), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(0)'], {}), '(img2, 0)\n', (7782, 7791), True, 'import tensorflow as tf\n'), ((7806, 7853), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(2)'}), '(img1, img2, max_val=255, rank=2)\n', (7820, 7853), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((8293, 8332), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {'max_val': '(255)'}), '(img1, img2, max_val=255)\n', (8307, 8332), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((8501, 8527), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {}), '(img1, img2)\n', (8515, 8527), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((8811, 8852), 'tensorflow.reshape', 'tf.reshape', (['img1', '((3, 2) + img1.shape[1:])'], {}), '(img1, (3, 2) + img1.shape[1:])\n', (8821, 8852), True, 'import tensorflow as tf\n'), ((8864, 8905), 'tensorflow.reshape', 'tf.reshape', (['img2', '((3, 2) + img2.shape[1:])'], {}), '(img2, (3, 2) + img2.shape[1:])\n', (8874, 8905), True, 'import tensorflow as tf\n'), ((9019, 9066), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(2)'}), '(img1, img2, max_val=255, rank=2)\n', (9033, 9066), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((9610, 9647), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {'max_val': '(1)'}), '(img1, img2, max_val=1)\n', (9624, 9647), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((9816, 9842), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {}), '(img1, img2)\n', (9830, 9842), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((10155, 10179), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (10169, 10179), True, 'import tensorflow as tf\n'), ((10191, 10215), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (10205, 10215), True, 'import tensorflow as tf\n'), ((10230, 10264), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {'rank': '(3)'}), '(img1, img2, rank=3)\n', (10244, 10264), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((10603, 10627), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (10617, 10627), True, 'import tensorflow as tf\n'), ((10639, 10663), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (10653, 10663), True, 'import tensorflow as tf\n'), ((10678, 10717), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {'max_val': '(255)'}), '(img1, img2, max_val=255)\n', (10692, 10717), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((11118, 11142), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (11132, 11142), True, 'import tensorflow as tf\n'), ((11154, 11178), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (11168, 11178), True, 'import tensorflow as tf\n'), ((11191, 11232), 'tensorflow.reshape', 'tf.reshape', (['img1', '((2, 2) + img1.shape[1:])'], {}), '(img1, (2, 2) + img1.shape[1:])\n', (11201, 11232), True, 'import tensorflow as tf\n'), ((11244, 11285), 'tensorflow.reshape', 'tf.reshape', (['img2', '((2, 2) + img2.shape[1:])'], {}), '(img2, (2, 2) + img2.shape[1:])\n', (11254, 11285), True, 'import tensorflow as tf\n'), ((11300, 11347), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(3)'}), '(img1, img2, max_val=255, rank=3)\n', (11314, 11347), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((12428, 12452), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (12442, 12452), True, 'import tensorflow as tf\n'), ((12464, 12488), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (12478, 12488), True, 'import tensorflow as tf\n'), ((12846, 12895), 'tensorflow_mri.python.util.io_util.read_hdf5', 'io_util.read_hdf5', (['"""tests/data/image_ops_data.h5"""'], {}), "('tests/data/image_ops_data.h5')\n", (12863, 12895), False, 'from tensorflow_mri.python.util import io_util\n'), ((13105, 13129), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (13119, 13129), True, 'import tensorflow as tf\n'), ((13141, 13165), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (13155, 13165), True, 'import tensorflow as tf\n'), ((13180, 13238), 'tensorflow_mri.python.ops.image_ops.ssim_multiscale', 'image_ops.ssim_multiscale', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(2)'}), '(img1, img2, max_val=255, rank=2)\n', (13205, 13238), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((13296, 13348), 'tensorflow_mri.python.ops.image_ops.ssim2d_multiscale', 'image_ops.ssim2d_multiscale', (['img1', 'img2'], {'max_val': '(255)'}), '(img1, img2, max_val=255)\n', (13323, 13348), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((13619, 13643), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(-1)'], {}), '(img1, -1)\n', (13633, 13643), True, 'import tensorflow as tf\n'), ((13655, 13679), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(-1)'], {}), '(img2, -1)\n', (13669, 13679), True, 'import tensorflow as tf\n'), ((13691, 13714), 'tensorflow.expand_dims', 'tf.expand_dims', (['img1', '(0)'], {}), '(img1, 0)\n', (13705, 13714), True, 'import tensorflow as tf\n'), ((13726, 13749), 'tensorflow.expand_dims', 'tf.expand_dims', (['img2', '(0)'], {}), '(img2, 0)\n', (13740, 13749), True, 'import tensorflow as tf\n'), ((13764, 13822), 'tensorflow_mri.python.ops.image_ops.ssim_multiscale', 'image_ops.ssim_multiscale', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(2)'}), '(img1, img2, max_val=255, rank=2)\n', (13789, 13822), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((14257, 14307), 'tensorflow_mri.python.ops.image_ops.ssim_multiscale', 'image_ops.ssim_multiscale', (['img1', 'img2'], {'max_val': '(255)'}), '(img1, img2, max_val=255)\n', (14282, 14307), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((14476, 14513), 'tensorflow_mri.python.ops.image_ops.ssim_multiscale', 'image_ops.ssim_multiscale', (['img1', 'img2'], {}), '(img1, img2)\n', (14501, 14513), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((14802, 14843), 'tensorflow.reshape', 'tf.reshape', (['img1', '((3, 2) + img1.shape[1:])'], {}), '(img1, (3, 2) + img1.shape[1:])\n', (14812, 14843), True, 'import tensorflow as tf\n'), ((14855, 14896), 'tensorflow.reshape', 'tf.reshape', (['img2', '((3, 2) + img2.shape[1:])'], {}), '(img2, (3, 2) + img2.shape[1:])\n', (14865, 14896), True, 'import tensorflow as tf\n'), ((15022, 15080), 'tensorflow_mri.python.ops.image_ops.ssim_multiscale', 'image_ops.ssim_multiscale', (['img1', 'img2'], {'max_val': '(255)', 'rank': '(2)'}), '(img1, img2, max_val=255, rank=2)\n', (15047, 15080), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((15154, 15206), 'tensorflow_mri.python.ops.image_ops.ssim2d_multiscale', 'image_ops.ssim2d_multiscale', (['img1', 'img2'], {'max_val': '(255)'}), '(img1, img2, max_val=255)\n', (15181, 15206), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((15767, 15815), 'tensorflow_mri.python.ops.image_ops.ssim_multiscale', 'image_ops.ssim_multiscale', (['img1', 'img2'], {'max_val': '(1)'}), '(img1, img2, max_val=1)\n', (15792, 15815), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((15984, 16021), 'tensorflow_mri.python.ops.image_ops.ssim_multiscale', 'image_ops.ssim_multiscale', (['img1', 'img2'], {}), '(img1, img2)\n', (16009, 16021), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((16906, 16932), 'tensorflow.zeros', 'tf.zeros', (['(4, 160, 160, 1)'], {}), '((4, 160, 160, 1))\n', (16914, 16932), True, 'import tensorflow as tf\n'), ((16944, 16970), 'tensorflow.zeros', 'tf.zeros', (['(4, 160, 160, 1)'], {}), '((4, 160, 160, 1))\n', (16952, 16970), True, 'import tensorflow as tf\n'), ((17163, 17189), 'tensorflow.zeros', 'tf.zeros', (['(4, 161, 161, 1)'], {}), '((4, 161, 161, 1))\n', (17171, 17189), True, 'import tensorflow as tf\n'), ((17201, 17227), 'tensorflow.zeros', 'tf.zeros', (['(4, 161, 161, 1)'], {}), '((4, 161, 161, 1))\n', (17209, 17227), True, 'import tensorflow as tf\n'), ((17233, 17270), 'tensorflow_mri.python.ops.image_ops.ssim_multiscale', 'image_ops.ssim_multiscale', (['img1', 'img2'], {}), '(img1, img2)\n', (17258, 17270), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((17534, 17607), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]'], {}), '([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])\n', (17542, 17607), True, 'import numpy as np\n'), ((17682, 17710), 'numpy.array', 'np.array', (['[[6, 7], [10, 11]]'], {}), '([[6, 7], [10, 11]])\n', (17690, 17710), True, 'import numpy as np\n'), ((17723, 17758), 'tensorflow_mri.python.ops.image_ops.central_crop', 'image_ops.central_crop', (['x_np', 'shape'], {}), '(x_np, shape)\n', (17745, 17758), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((17960, 18033), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]'], {}), '([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])\n', (17968, 18033), True, 'import numpy as np\n'), ((18108, 18154), 'numpy.array', 'np.array', (['[[2, 3], [6, 7], [10, 11], [14, 15]]'], {}), '([[2, 3], [6, 7], [10, 11], [14, 15]])\n', (18116, 18154), True, 'import numpy as np\n'), ((18167, 18202), 'tensorflow_mri.python.ops.image_ops.central_crop', 'image_ops.central_crop', (['x_np', 'shape'], {}), '(x_np, shape)\n', (18189, 18202), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((18520, 18593), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]'], {}), '([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])\n', (18528, 18593), True, 'import numpy as np\n'), ((18668, 18696), 'numpy.array', 'np.array', (['[[6, 7], [10, 11]]'], {}), '([[6, 7], [10, 11]])\n', (18676, 18696), True, 'import numpy as np\n'), ((18709, 18755), 'tensorflow_mri.python.ops.image_ops.resize_with_crop_or_pad', 'image_ops.resize_with_crop_or_pad', (['x_np', 'shape'], {}), '(x_np, shape)\n', (18742, 18755), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((18916, 18942), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (18924, 18942), True, 'import numpy as np\n'), ((18954, 19020), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]])\n', (18962, 19020), True, 'import numpy as np\n'), ((19096, 19142), 'tensorflow_mri.python.ops.image_ops.resize_with_crop_or_pad', 'image_ops.resize_with_crop_or_pad', (['x_np', 'shape'], {}), '(x_np, shape)\n', (19129, 19142), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((19317, 19336), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (19325, 19336), True, 'import numpy as np\n'), ((19348, 19379), 'numpy.array', 'np.array', (['[3, 2, 1, 2, 3, 2, 1]'], {}), '([3, 2, 1, 2, 3, 2, 1])\n', (19356, 19379), True, 'import numpy as np\n'), ((19392, 19462), 'tensorflow_mri.python.ops.image_ops.resize_with_crop_or_pad', 'image_ops.resize_with_crop_or_pad', (['x_np', 'shape'], {'padding_mode': '"""reflect"""'}), "(x_np, shape, padding_mode='reflect')\n", (19425, 19462), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((19699, 19742), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (19707, 19742), True, 'import numpy as np\n'), ((19796, 19823), 'numpy.array', 'np.array', (['[[0, 4, 5, 6, 0]]'], {}), '([[0, 4, 5, 6, 0]])\n', (19804, 19823), True, 'import numpy as np\n'), ((19836, 19882), 'tensorflow_mri.python.ops.image_ops.resize_with_crop_or_pad', 'image_ops.resize_with_crop_or_pad', (['x_np', 'shape'], {}), '(x_np, shape)\n', (19869, 19882), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((20119, 20162), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (20127, 20162), True, 'import numpy as np\n'), ((20216, 20237), 'numpy.array', 'np.array', (['[[4, 5, 6]]'], {}), '([[4, 5, 6]])\n', (20224, 20237), True, 'import numpy as np\n'), ((20250, 20296), 'tensorflow_mri.python.ops.image_ops.resize_with_crop_or_pad', 'image_ops.resize_with_crop_or_pad', (['x_np', 'shape'], {}), '(x_np, shape)\n', (20283, 20296), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((21179, 21211), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (21192, 21211), True, 'import tensorflow as tf\n'), ((22285, 22315), 'tensorflow_mri.python.ops.image_ops.total_variation', 'image_ops.total_variation', (['img'], {}), '(img)\n', (22310, 22315), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((22366, 22404), 'tensorflow_mri.python.ops.image_ops.total_variation', 'image_ops.total_variation', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (22391, 22404), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((22467, 22505), 'tensorflow_mri.python.ops.image_ops.total_variation', 'image_ops.total_variation', (['img'], {'axis': '(1)'}), '(img, axis=1)\n', (22492, 22505), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((22599, 22652), 'tensorflow_mri.python.ops.image_ops.total_variation', 'image_ops.total_variation', (['img'], {'axis': '(0)', 'keepdims': '(True)'}), '(img, axis=0, keepdims=True)\n', (22624, 22652), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((22735, 22788), 'tensorflow_mri.python.ops.image_ops.total_variation', 'image_ops.total_variation', (['img'], {'axis': '(1)', 'keepdims': '(True)'}), '(img, axis=1, keepdims=True)\n', (22760, 22788), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((23718, 23768), 'tensorflow_mri.python.ops.image_ops.extract_glimpses', 'image_ops.extract_glimpses', (['images', 'sizes', 'offsets'], {}), '(images, sizes, offsets)\n', (23744, 23768), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((23989, 24032), 'tensorflow_mri.python.util.io_util.read_hdf5', 'io_util.read_hdf5', (['"""tests/data/phantoms.h5"""'], {}), "('tests/data/phantoms.h5')\n", (24006, 24032), False, 'from tensorflow_mri.python.util import io_util\n'), ((24309, 24353), 'tensorflow_mri.python.ops.image_ops.phantom', 'image_ops.phantom', ([], {'phantom_type': 'phantom_type'}), '(phantom_type=phantom_type)\n', (24326, 24353), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((24671, 24738), 'tensorflow_mri.python.ops.image_ops.phantom', 'image_ops.phantom', ([], {'phantom_type': 'phantom_type', 'shape': '[128, 128, 128]'}), '(phantom_type=phantom_type, shape=[128, 128, 128])\n', (24688, 24738), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((24945, 24964), 'tensorflow_mri.python.ops.image_ops.phantom', 'image_ops.phantom', ([], {}), '()\n', (24962, 24964), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((25171, 25211), 'tensorflow_mri.python.ops.image_ops.phantom', 'image_ops.phantom', ([], {'shape': '[128, 128, 128]'}), '(shape=[128, 128, 128])\n', (25188, 25211), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((25537, 25631), 'tensorflow_mri.python.ops.image_ops.phantom', 'image_ops.phantom', ([], {'shape': '([64] * rank)', 'num_coils': '(12)', 'dtype': 'dtype', 'return_sensitivities': '(True)'}), '(shape=[64] * rank, num_coils=12, dtype=dtype,\n return_sensitivities=True)\n', (25554, 25631), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((25752, 25815), 'tensorflow_mri.python.ops.image_ops._birdcage_sensitivities', 'image_ops._birdcage_sensitivities', (['([64] * rank)', '(12)'], {'dtype': 'dtype'}), '([64] * rank, 12, dtype=dtype)\n', (25785, 25815), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((26566, 26676), 'tensorflow_mri.python.ops.image_ops._birdcage_sensitivities', 'image_ops._birdcage_sensitivities', (['shape', 'num_coils'], {'birdcage_radius': 'birdcage_radius', 'num_rings': 'num_rings'}), '(shape, num_coils, birdcage_radius=\n birdcage_radius, num_rings=num_rings)\n', (26599, 26676), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((3711, 3736), 'tensorflow.cast', 'tf.cast', (['img1', 'tf.float32'], {}), '(img1, tf.float32)\n', (3718, 3736), True, 'import tensorflow as tf\n'), ((3756, 3781), 'tensorflow.cast', 'tf.cast', (['img2', 'tf.float32'], {}), '(img2, tf.float32)\n', (3763, 3781), True, 'import tensorflow as tf\n'), ((6547, 6578), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2', '(255)'], {}), '(img1, img2, 255)\n', (6561, 6578), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((6779, 6810), 'tensorflow_mri.python.ops.image_ops.psnr', 'image_ops.psnr', (['img1', 'img2', '(255)'], {}), '(img1, img2, 255)\n', (6793, 6810), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((9517, 9542), 'tensorflow.cast', 'tf.cast', (['img1', 'tf.float32'], {}), '(img1, tf.float32)\n', (9524, 9542), True, 'import tensorflow as tf\n'), ((9562, 9587), 'tensorflow.cast', 'tf.cast', (['img2', 'tf.float32'], {}), '(img2, tf.float32)\n', (9569, 9587), True, 'import tensorflow as tf\n'), ((12384, 12415), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2', '(255)'], {}), '(img1, img2, 255)\n', (12398, 12415), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((12616, 12647), 'tensorflow_mri.python.ops.image_ops.ssim', 'image_ops.ssim', (['img1', 'img2', '(255)'], {}), '(img1, img2, 255)\n', (12630, 12647), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((15674, 15699), 'tensorflow.cast', 'tf.cast', (['img1', 'tf.float32'], {}), '(img1, tf.float32)\n', (15681, 15699), True, 'import tensorflow as tf\n'), ((15719, 15744), 'tensorflow.cast', 'tf.cast', (['img2', 'tf.float32'], {}), '(img2, tf.float32)\n', (15726, 15744), True, 'import tensorflow as tf\n'), ((17113, 17150), 'tensorflow_mri.python.ops.image_ops.ssim_multiscale', 'image_ops.ssim_multiscale', (['img1', 'img2'], {}), '(img1, img2)\n', (17138, 17150), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((22685, 22719), 'tensorflow.reshape', 'tf.reshape', (['[13, 10, 7, 8]', '[1, 4]'], {}), '([13, 10, 7, 8], [1, 4])\n', (22695, 22719), True, 'import tensorflow as tf\n'), ((22821, 22853), 'tensorflow.reshape', 'tf.reshape', (['[3, 9, 9, 6]', '[4, 1]'], {}), '([3, 9, 9, 6], [4, 1])\n', (22831, 22853), True, 'import tensorflow as tf\n'), ((23203, 23246), 'numpy.sqrt', 'np.sqrt', (['((1.25 * 65) ** 2 + (1.5 * 65) ** 2)'], {}), '((1.25 * 65) ** 2 + (1.5 * 65) ** 2)\n', (23210, 23246), True, 'import numpy as np\n'), ((23496, 23508), 'tensorflow.range', 'tf.range', (['(40)'], {}), '(40)\n', (23504, 23508), True, 'import tensorflow as tf\n'), ((25868, 25917), 'tensorflow_mri.python.ops.image_ops.phantom', 'image_ops.phantom', ([], {'shape': '([64] * rank)', 'dtype': 'dtype'}), '(shape=[64] * rank, dtype=dtype)\n', (25885, 25917), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((27655, 27685), 'numpy.sqrt', 'np.sqrt', (['(x_co ** 2 + y_co ** 2)'], {}), '(x_co ** 2 + y_co ** 2)\n', (27662, 27685), True, 'import numpy as np\n'), ((20449, 20499), 'tensorflow_mri.python.ops.image_ops.resize_with_crop_or_pad', 'image_ops.resize_with_crop_or_pad', (['x', 'target_shape'], {}), '(x, target_shape)\n', (20482, 20499), False, 'from tensorflow_mri.python.ops import image_ops\n'), ((22976, 22989), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (22984, 22989), True, 'import numpy as np\n'), ((26959, 26989), 'numpy.ceil', 'np.ceil', (['(num_coils / num_rings)'], {}), '(num_coils / num_rings)\n', (26966, 26989), True, 'import numpy as np\n'), ((27430, 27458), 'numpy.cos', 'np.cos', (['(c * (2 * np.pi / nc))'], {}), '(c * (2 * np.pi / nc))\n', (27436, 27458), True, 'import numpy as np\n'), ((27477, 27505), 'numpy.sin', 'np.sin', (['(c * (2 * np.pi / nc))'], {}), '(c * (2 * np.pi / nc))\n', (27483, 27505), True, 'import numpy as np\n'), ((27698, 27721), 'numpy.arctan2', 'np.arctan2', (['x_co', '(-y_co)'], {}), '(x_co, -y_co)\n', (27708, 27721), True, 'import numpy as np\n'), ((27758, 27776), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (27764, 27776), True, 'import numpy as np\n'), ((23134, 23147), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (23142, 23147), True, 'import numpy as np\n'), ((23155, 23168), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (23163, 23168), True, 'import numpy as np\n'), ((27898, 27927), 'numpy.cos', 'np.cos', (['(c * (2 * np.pi / nzz))'], {}), '(c * (2 * np.pi / nzz))\n', (27904, 27927), True, 'import numpy as np\n'), ((27946, 27975), 'numpy.sin', 'np.sin', (['(c * (2 * np.pi / nzz))'], {}), '(c * (2 * np.pi / nzz))\n', (27952, 27975), True, 'import numpy as np\n'), ((27990, 28007), 'numpy.floor', 'np.floor', (['(c / nzz)'], {}), '(c / nzz)\n', (27998, 28007), True, 'import numpy as np\n'), ((28308, 28331), 'numpy.arctan2', 'np.arctan2', (['x_co', '(-y_co)'], {}), '(x_co, -y_co)\n', (28318, 28331), True, 'import numpy as np\n'), ((28366, 28384), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (28372, 28384), True, 'import numpy as np\n'), ((28017, 28034), 'numpy.ceil', 'np.ceil', (['(nc / nzz)'], {}), '(nc / nzz)\n', (28024, 28034), True, 'import numpy as np\n'), ((28063, 28080), 'numpy.floor', 'np.floor', (['(c / nzz)'], {}), '(c / nzz)\n', (28071, 28080), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Provided by <NAME>. Thanks!
"""
import sys
import numpy as np
from astropy import constants as c
import pyPLUTO as pp
GAMMA= 5.0 / 3.0
#First get scaling factorz from the definitions file
inp=open('definitions.h','ro')
for line in inp.readlines():
data=line.split()
if len(data)>1:
if data[1]=='UNIT_DENSITY':
UNIT_DENSITY=float(data[2])
elif data[1]=='UNIT_LENGTH':
UNIT_LENGTH=float(data[2])
elif data[1]=='UNIT_VELOCITY':
UNIT_VELOCITY=float(data[2])
#Compute deived scaling factors
UNIT_MASS=(UNIT_DENSITY*UNIT_LENGTH*UNIT_LENGTH*UNIT_LENGTH)
UNIT_ACCELERATION=(UNIT_VELOCITY*UNIT_VELOCITY/UNIT_LENGTH)
UNIT_FORCE=(UNIT_MASS*UNIT_ACCELERATION)
UNIT_TIME=(UNIT_LENGTH/UNIT_VELOCITY)
UNIT_PRESSURE=(UNIT_DENSITY*UNIT_VELOCITY*UNIT_VELOCITY)
#Compute the numeber that transforms from pressure to temperature
KELVIN=UNIT_VELOCITY*UNIT_VELOCITY*c.m_p.cgs/c.k_B.cgs
inp.close()
#open the actual data file
fname=int(sys.argv[1])
D=pp.pload(fname)
#Get the pressure and density
density=np.transpose(D.rho)*UNIT_DENSITY
pressure=np.transpose(D.prs)*UNIT_PRESSURE
#Compute the internal energy from the pressure
energy=pressure/(GAMMA - 1.)
#Compute/get number densities
#nd=density/(1.43*c.m_p.value)
try:
ne=np.transpose(D.ne)
nh=np.transpose(D.nh)
except:
print("No ne or nh fields, using 1.43 as scaling to nh")
nh=density/(1.43*c.m_p.cgs).value
ne=nh*1.21
#Get the velocities
v_r=np.transpose(D.vx1)*UNIT_VELOCITY
v_t=np.transpose(D.vx2)*UNIT_VELOCITY
v_p=np.transpose(D.vx3)*UNIT_VELOCITY
#And compute the speed
v=np.sqrt(v_r**2+v_t**2)
#Get the cooling rates (if here)
try:
line_c=np.transpose(D.line_c)
xray_h=np.transpose(D.xray_h)
comp_c=np.transpose(D.comp_c)
comp_h=np.transpose(D.comp_h)
brem_c=np.transpose(D.brem_c)
except:
print("No cooling rate info")
try:
line_c_pre=np.transpose(D.line_c_pre)
xray_h_pre=np.transpose(D.xray_h_pre)
comp_c_pre=np.transpose(D.comp_c_pre)
comp_h_pre=np.transpose(D.comp_h_pre)
brem_c_pre=np.transpose(D.brem_c_pre)
except:
print("No cooling rate prefactors")
#Get optcially thin ionization parameter if here
try:
xi=np.transpose(D.XI)
except:
print("No ionization parameter")
#Get temperature - if present or calculate it
try:
temperature=np.transpose(D.T)
except:
print("No temperature data - computing")
temperature=pressure/UNIT_PRESSURE*KELVIN*0.6/(density/UNIT_DENSITY)
#Get the geometric quantities
r=D.x1*UNIT_LENGTH
theta=D.x2
| [
"pyPLUTO.pload",
"numpy.transpose",
"numpy.sqrt"
] | [((987, 1002), 'pyPLUTO.pload', 'pp.pload', (['fname'], {}), '(fname)\n', (995, 1002), True, 'import pyPLUTO as pp\n'), ((1588, 1616), 'numpy.sqrt', 'np.sqrt', (['(v_r ** 2 + v_t ** 2)'], {}), '(v_r ** 2 + v_t ** 2)\n', (1595, 1616), True, 'import numpy as np\n'), ((1044, 1063), 'numpy.transpose', 'np.transpose', (['D.rho'], {}), '(D.rho)\n', (1056, 1063), True, 'import numpy as np\n'), ((1086, 1105), 'numpy.transpose', 'np.transpose', (['D.prs'], {}), '(D.prs)\n', (1098, 1105), True, 'import numpy as np\n'), ((1270, 1288), 'numpy.transpose', 'np.transpose', (['D.ne'], {}), '(D.ne)\n', (1282, 1288), True, 'import numpy as np\n'), ((1293, 1311), 'numpy.transpose', 'np.transpose', (['D.nh'], {}), '(D.nh)\n', (1305, 1311), True, 'import numpy as np\n'), ((1451, 1470), 'numpy.transpose', 'np.transpose', (['D.vx1'], {}), '(D.vx1)\n', (1463, 1470), True, 'import numpy as np\n'), ((1489, 1508), 'numpy.transpose', 'np.transpose', (['D.vx2'], {}), '(D.vx2)\n', (1501, 1508), True, 'import numpy as np\n'), ((1527, 1546), 'numpy.transpose', 'np.transpose', (['D.vx3'], {}), '(D.vx3)\n', (1539, 1546), True, 'import numpy as np\n'), ((1659, 1681), 'numpy.transpose', 'np.transpose', (['D.line_c'], {}), '(D.line_c)\n', (1671, 1681), True, 'import numpy as np\n'), ((1690, 1712), 'numpy.transpose', 'np.transpose', (['D.xray_h'], {}), '(D.xray_h)\n', (1702, 1712), True, 'import numpy as np\n'), ((1721, 1743), 'numpy.transpose', 'np.transpose', (['D.comp_c'], {}), '(D.comp_c)\n', (1733, 1743), True, 'import numpy as np\n'), ((1752, 1774), 'numpy.transpose', 'np.transpose', (['D.comp_h'], {}), '(D.comp_h)\n', (1764, 1774), True, 'import numpy as np\n'), ((1783, 1805), 'numpy.transpose', 'np.transpose', (['D.brem_c'], {}), '(D.brem_c)\n', (1795, 1805), True, 'import numpy as np\n'), ((1863, 1889), 'numpy.transpose', 'np.transpose', (['D.line_c_pre'], {}), '(D.line_c_pre)\n', (1875, 1889), True, 'import numpy as np\n'), ((1902, 1928), 'numpy.transpose', 'np.transpose', (['D.xray_h_pre'], {}), '(D.xray_h_pre)\n', (1914, 1928), True, 'import numpy as np\n'), ((1941, 1967), 'numpy.transpose', 'np.transpose', (['D.comp_c_pre'], {}), '(D.comp_c_pre)\n', (1953, 1967), True, 'import numpy as np\n'), ((1980, 2006), 'numpy.transpose', 'np.transpose', (['D.comp_h_pre'], {}), '(D.comp_h_pre)\n', (1992, 2006), True, 'import numpy as np\n'), ((2019, 2045), 'numpy.transpose', 'np.transpose', (['D.brem_c_pre'], {}), '(D.brem_c_pre)\n', (2031, 2045), True, 'import numpy as np\n'), ((2152, 2170), 'numpy.transpose', 'np.transpose', (['D.XI'], {}), '(D.XI)\n', (2164, 2170), True, 'import numpy as np\n'), ((2281, 2298), 'numpy.transpose', 'np.transpose', (['D.T'], {}), '(D.T)\n', (2293, 2298), True, 'import numpy as np\n')] |
from . import *
from .arctor import *
# from . import Arctor
import exoplanet as xo
import numpy as np
import os
import pygtc
import pymc3 as pm
import starry
import theano.tensor as tt
from statsmodels.robust.scale import mad
from astropy.io import fits
from astropy.stats import mad_std, sigma_clip
from astropy.time import Time
from astropy import units
from tqdm import tqdm
def debug_message(message, end='\n'):
print(f'[DEBUG] {message}', end=end)
def warning_message(message, end='\n'):
print(f'[WARNING] {message}', end=end)
def info_message(message, end='\n'):
print(f'[INFO] {message}', end=end)
def create_raw_lc_stddev(planet):
ppm = 1e6
phot_vals = planet.photometry_df
lc_std_rev = phot_vals.iloc[planet.idx_rev].std(axis=0)
lc_std_fwd = phot_vals.iloc[planet.idx_fwd].std(axis=0)
lc_med_rev = np.median(phot_vals.iloc[planet.idx_rev], axis=0)
lc_med_fwd = np.median(phot_vals.iloc[planet.idx_rev], axis=0)
lc_std = np.mean([lc_std_rev, lc_std_fwd], axis=0)
lc_med = np.mean([lc_med_rev, lc_med_fwd], axis=0)
return lc_std / lc_med * ppm
# def get_flux_idx_from_df(planet, aper_width, aper_height):
# # There *must* be a faster way!
# aperwidth_columns = [colname
# for colname in planet.photometry_df.columns
# if 'aper_width' in colname]
# aperheight_columns = [colname
# for colname in planet.photometry_df.columns
# if 'aper_height' in colname]
# trace_length = np.median(planet.trace_lengths) - 0.1
# aperwidths_df = (planet.photometry_df[aperwidth_columns] - trace_length)
# aperwidths_df = aperwidths_df.astype(int)
# aperheight_df = planet.photometry_df[aperheight_columns].astype(int)
# aperwidth_flag = aperwidths_df.values[0] == aper_width
# aperheight_flag = aperheight_df.values[0] == aper_height
# return np.where(aperwidth_flag * aperheight_flag) # [0][0]
def print_flux_stddev(planet, aper_width, aper_height):
# There *must* be a faster way!
fluxes = planet.photometry_df[f'aperture_sum_{aper_width}x{aper_height}']
fluxes = fluxes / np.median(fluxes)
info_message(f'{aper_width}x{aper_height}: {np.std(fluxes)*1e6:0.0f} ppm')
def find_flux_stddev(planet, flux_std, aper_widths, aper_heights):
# There *must* be a faster way!
for aper_width in tqdm(aper_widths):
for aper_height in tqdm(aper_heights):
flux_key = f'aperture_sum_{aper_width}x{aper_height}'
fluxes = planet.photometry_df[flux_key]
fluxes = fluxes / np.median(fluxes)
if np.std(fluxes) * 1e6 < flux_std:
info_message(f'{aper_width}x{aper_height}: '
f'{np.std(fluxes)*1e6:0.0f} ppm')
def setup_and_plot_GTC(mcmc_fit, plotName='',
varnames=None,
smoothingKernel=1,
square_edepth=False):
trace = mcmc_fit['trace']
map_soln = mcmc_fit['map_soln']
if trace is None:
return
if varnames is None:
varnames = [key for key in map_soln.keys()
if '__' not in key and 'light' not in key
and 'line' not in key
and 'le_edepth_0' not in key]
samples = pm.trace_to_dataframe(trace, varnames=varnames)
varnames = [key for key in map_soln.keys()
if '__' not in key and 'light' not in key
and 'line' not in key]
truths = [float(val) for key, val in map_soln.items() if key in varnames]
pygtc.plotGTC(samples, plotName=plotName, # truths=truths,
smoothingKernel=smoothingKernel,
labelRotation=[True] * 2,
customLabelFont={'rotation': 45},
nContourLevels=3, figureSize='MNRAS_page')
def run_pymc3_multi_dataset(times, data, dataerr, t0, u, period, b,
idx_fwd, idx_rev, random_state=42,
xcenters=None, tune=5000, draws=5000,
target_accept=0.9, do_mcmc=True,
use_log_edepth=False,
allow_negative_edepths=False):
with pm.Model() as model:
# The baseline flux
mean = pm.Normal("mean", mu=1.0, sd=1.0, shape=2,
testval=np.array([0.999, 1.001]))
if use_log_edepth:
edepth = pm.Uniform("log_edepth", lower=-20, upper=-2)
edepth = pm.Deterministic("edepth", pm.math.exp(logP))
edepth = 10**(0.5 * edepth)
else:
if allow_negative_edepths:
edepth = pm.Uniform("edepth", lower=-0.01, upper=0.01)
edepth_sign = pm.math.sgn(edepth)
if pm.math.lt(edepth_sign, 0):
edepth = -pm.math.sqrt(pm.math.abs_(edepth))
else:
edepth = pm.math.sqrt(pm.math.abs_(edepth))
else:
edepth = pm.Uniform("edepth", lower=0, upper=0.01)
edepth = pm.math.sqrt(edepth)
# Set up a Keplerian orbit for the planets
orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b)
# Compute the model light curve using starry
light_curves = xo.LimbDarkLightCurve(u).get_light_curve(
orbit=orbit, r=edepth, t=t)
light_curve = pm.math.sum(light_curves, axis=-1) + mean
# Here we track the value of the model light curve for plotting
# purposes
pm.Deterministic("light_curves", light_curves)
# The likelihood function assuming known Gaussian uncertainty
pm.Normal("obs", mu=light_curve, sd=dataerr, observed=data)
# Fit for the maximum a posteriori parameters given the simuated
# dataset
map_soln = xo.optimize(start=model.test_point)
np.random.seed(random_state)
trace = pm.sample(
tune=tune,
draws=draws,
start=map_soln,
chains=mp.cpu_count(),
step=xo.get_dense_nuts_step(target_accept=target_accept),
cores=mp.cpu_count()
)
return trace, map_soln
def run_pymc3_fwd_rev(times, data, dataerr, t0, u, period, b, idx_fwd, idx_rev,
xcenters=None, tune=5000, draws=5000, target_accept=0.9,
do_mcmc=True, use_log_edepth=False,
allow_negative_edepths=False):
times_bg = times - np.median(times)
with pm.Model() as model:
# The baseline flux
mean_fwd = pm.Normal("mean_fwd", mu=1.0, sd=1.0)
mean_rev = pm.Normal("mean_rev", mu=1.0, sd=1.0)
assert(not (allow_negative_edepths and use_log_edepth)),\
'Cannot have `allow_negative_edepths` with `use_log_edepth`'
if use_log_edepth:
log_edepth = pm.Uniform("log_edepth", lower=-20, upper=-2)
edepth = pm.Deterministic("edepth", 10**(0.5 * log_edepth))
else:
if allow_negative_edepths:
edepth = pm.Uniform("edepth", lower=-0.01, upper=0.01)
else:
edepth = pm.Uniform("edepth", lower=0, upper=0.01)
edepth = pm.math.sqrt(edepth)
slope_time = pm.Uniform("slope_time", lower=-0.1, upper=0.1)
line_fwd = mean_fwd + slope_time * times_bg[idx_fwd]
line_rev = mean_rev + slope_time * times_bg[idx_rev]
if xcenters is not None:
slope_xc = pm.Uniform("slope_xcenter", lower=-0.1, upper=0.1)
line_fwd = line_fwd + slope_xc * xcenters[idx_fwd]
line_rev = line_rev + slope_xc * xcenters[idx_rev]
# Set up a Keplerian orbit for the planets
orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b)
# # Compute the model light curve using starry
star = xo.LimbDarkLightCurve(u)
light_curves_fwd = star.get_light_curve(
orbit=orbit, r=edepth, t=times[idx_fwd])
light_curves_rev = star.get_light_curve(
orbit=orbit, r=edepth, t=times[idx_rev])
light_curve_fwd = pm.math.sum(light_curves_fwd, axis=-1)
light_curve_rev = pm.math.sum(light_curves_rev, axis=-1)
# # Here we track the value of the model light curve for plotting
# # purposes
pm.Deterministic("light_curves_fwd", light_curves_fwd)
pm.Deterministic("light_curves_rev", light_curves_rev)
# # The likelihood function assuming known Gaussian uncertainty
pm.Normal("obs_fwd", mu=light_curve_fwd + line_fwd,
sd=dataerr[idx_fwd], observed=data[idx_fwd])
pm.Normal("obs_rev", mu=light_curve_rev + line_rev,
sd=dataerr[idx_rev], observed=data[idx_rev])
# Fit for the maximum a posteriori parameters
# given the simuated dataset
map_soln = xo.optimize(start=model.test_point)
if use_log_edepth:
map_soln_edepth = 10**map_soln["log_edepth"]
else:
map_soln_edepth = map_soln["edepth"]
info_message(f'map_soln_edepth:{map_soln_edepth*1e6}')
np.random.seed(42)
if do_mcmc:
trace = pm.sample(
tune=tune,
draws=tune,
start=map_soln,
chains=mp.cpu_count(),
step=xo.get_dense_nuts_step(target_accept=target_accept),
cores=mp.cpu_count()
)
else:
trace = None
return trace, map_soln
def run_pymc3_direct(times, data, dataerr, t0, u, period, b, xcenters=None,
tune=5000, draws=5000, target_accept=0.9, do_mcmc=True,
use_log_edepth=False, allow_negative_edepths=False):
times_bg = times - np.median(times)
with pm.Model() as model:
# The baseline flux
mean = pm.Normal("mean", mu=1.0, sd=1.0)
assert(not (allow_negative_edepths and use_log_edepth)),\
'Cannot have `allow_negative_edepths` with `use_log_edepth`'
if use_log_edepth:
log_edepth = pm.Uniform("log_edepth", lower=-20, upper=-2)
edepth = pm.Deterministic("edepth", 10**(0.5 * log_edepth))
else:
if allow_negative_edepths:
edepth = pm.Uniform("edepth", lower=-0.01, upper=0.01)
else:
edepth = pm.Uniform("edepth", lower=0, upper=0.01)
edepth = pm.math.sqrt(edepth)
slope_time = pm.Uniform("slope_time", lower=-0.1, upper=0.1)
line = mean + slope_time * times_bg
if xcenters is not None:
slope_xc = pm.Uniform("slope_xcenter", lower=-0.1, upper=0.1)
line = line + slope_xc * xcenters
# Set up a Keplerian orbit for the planets
orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b)
# Compute the model light curve using starry
light_curves = xo.LimbDarkLightCurve(
u).get_light_curve(orbit=orbit, r=edepth, t=times)
light_curve = pm.math.sum(light_curves, axis=-1)
pm.Deterministic("light_curves", light_curves)
# Combined model: light curve and background
model_ = light_curve + line
# The likelihood function assuming known Gaussian uncertainty
pm.Normal("obs", mu=model_, sd=dataerr, observed=data)
# Fit for the maximum a posteriori parameters given the simuated
# dataset
map_soln = xo.optimize(start=model.test_point)
if use_log_edepth:
map_soln_edepth = 10**map_soln["log_edepth"]
else:
map_soln_edepth = map_soln["edepth"]
info_message(f'map_soln_edepth:{map_soln_edepth*1e6}')
line_map_soln = (map_soln['mean'] +
map_soln['slope_time'] * times_bg.flatten())
if xcenters is not None:
line_map_soln = line_map_soln + \
map_soln['slope_xcenter'] * xcenters
np.random.seed(42)
if do_mcmc:
trace = pm.sample(
tune=tune,
draws=draws,
start=map_soln,
chains=mp.cpu_count(),
step=xo.get_dense_nuts_step(target_accept=target_accept),
cores=mp.cpu_count()
)
else:
trace = None
return trace, map_soln
def build_gp_pink_noise(times, data, dataerr,
log_Q=np.log(1.0 / np.sqrt(2))):
log_w0 = pm.Normal("log_w0", mu=0.0, sigma=15.0,
testval=np.log(3.0))
log_Sw4 = pm.Normal("log_variance_r", mu=0.0, sigma=15.0,
testval=np.log(np.var(data)))
log_s2 = pm.Normal("log_variance_w", mu=0.0, sigma=15.0,
testval=np.log(np.var(data)))
kernel = xo.gp.terms.SHOTerm(
log_Sw4=log_Sw4, log_w0=log_w0, log_Q=log_Q)
return xo.gp.GP(kernel, times, dataerr ** 2 + pm.math.exp(log_s2))
def run_pymc3_both(times, data, dataerr, t0, u, period, b,
xcenters=None, ycenters=None,
trace_angles=None, trace_lengths=None, log_Q=1 / np.sqrt(2),
idx_fwd=None, idx_rev=None, tune=5000, draws=5000,
target_accept=0.9, do_mcmc=True, use_log_edepth=False,
allow_negative_edepths=False, use_pink_gp=False):
if idx_fwd is None or idx_rev is None:
# Make use of idx_fwd and idx_rev trivial
idx_fwd = np.ones_like(times, dtype=bool)
strfwd = ''
strrev = ''
else:
assert(len(idx_fwd) + len(idx_rev) == len(times)),\
f"`idx_fwd` + `idx_rev` must include all idx from `times`"
strfwd = '_fwd'
strrev = '_rev'
times_bg = times - np.median(times)
with pm.Model() as model:
# The baseline flux
mean_fwd = pm.Normal(f"mean{strfwd}", mu=0.0, sd=1.0)
if idx_rev is not None:
mean_rev = pm.Normal(f"mean{strrev}", mu=0.0, sd=1.0)
assert(not (allow_negative_edepths and use_log_edepth)),\
'Cannot have `allow_negative_edepths` with `use_log_edepth`'
if use_log_edepth:
log_edepth = pm.Uniform("log_edepth", lower=-20, upper=-2)
edepth = pm.Deterministic("edepth", 10**(0.5 * log_edepth))
else:
if allow_negative_edepths:
edepth = pm.Uniform("edepth", lower=-0.01, upper=0.01)
else:
edepth = pm.Uniform("edepth", lower=0, upper=0.01)
edepth = pm.math.sqrt(edepth)
slope_time = pm.Uniform("slope_time", lower=-1, upper=1)
line_fwd = mean_fwd + slope_time * times_bg[idx_fwd]
if idx_rev is not None:
line_rev = mean_rev + slope_time * times_bg[idx_rev]
if xcenters is not None:
slope_xc = pm.Uniform("slope_xcenter", lower=-1, upper=1)
line_fwd = line_fwd + slope_xc * xcenters[idx_fwd]
if idx_rev is not None:
line_rev = line_rev + slope_xc * xcenters[idx_rev]
if ycenters is not None:
slope_yc = pm.Uniform("slope_ycenter", lower=-1, upper=1)
line_fwd = line_fwd + slope_yc * ycenters[idx_fwd]
if idx_rev is not None:
line_rev = line_rev + slope_yc * ycenters[idx_rev]
if trace_angles is not None:
slope_ta = pm.Uniform("slope_trace_angle", lower=-1, upper=1)
line_fwd = line_fwd + slope_ta * trace_angles[idx_fwd]
if idx_rev is not None:
line_rev = line_rev + slope_ta * trace_angles[idx_rev]
if trace_lengths is not None:
slope_tl = pm.Uniform("slope_trace_length", lower=-1, upper=1)
line_fwd = line_fwd + slope_tl * trace_lengths[idx_fwd]
if idx_rev is not None:
line_rev = line_rev + slope_tl * trace_lengths[idx_rev]
pm.Deterministic(f'line_model{strfwd}', line_fwd)
if idx_rev is not None:
pm.Deterministic(f'line_model{strrev}', line_rev)
# Set up a Keplerian orbit for the planets
orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b)
# # Compute the model light curve using starry
star = xo.LimbDarkLightCurve(u)
light_curves_fwd = star.get_light_curve(
orbit=orbit, r=edepth, t=times[idx_fwd])
if idx_rev is not None:
light_curves_rev = star.get_light_curve(
orbit=orbit, r=edepth, t=times[idx_rev])
light_curve_fwd = pm.math.sum(light_curves_fwd, axis=-1)
if idx_rev is not None:
light_curve_rev = pm.math.sum(light_curves_rev, axis=-1)
# # Here we track the value of the model light curve for plotting
# # purposes
pm.Deterministic(f"light_curves{strfwd}", light_curve_fwd)
if idx_rev is not None:
pm.Deterministic(f"light_curves{strrev}", light_curve_rev)
# The likelihood function assuming known Gaussian uncertainty
model_fwd = light_curve_fwd + line_fwd
if idx_rev is not None:
model_rev = light_curve_rev + line_rev
if use_pink_gp:
gp = build_gp_pink_noise(
times, data, dataerr, log_Q=log_Q)
# gp = build_gp_pink_noise(times, data, dataerr, log_Q=log_Q)
# mu, _ = xo.eval_in_model(model.test_point)
gp.marginal("gp", observed=data - model_fwd.flatten())
else:
pm.Normal(f"obs{strfwd}", mu=model_fwd,
sd=dataerr[idx_fwd], observed=data[idx_fwd])
if idx_rev is not None:
pm.Normal(f"obs{strrev}", mu=light_curve_rev + line_rev,
sd=dataerr[idx_rev], observed=data[idx_rev])
# Fit for the maximum a posteriori parameters
# given the simuated dataset
map_soln = xo.optimize(start=model.test_point)
if use_log_edepth:
map_soln_edepth = 10**map_soln["log_edepth"]
else:
map_soln_edepth = map_soln["edepth"]
info_message(f'Map Soln Edepth:{map_soln_edepth*1e6}')
np.random.seed(42)
if do_mcmc:
trace = pm.sample(
tune=tune,
draws=tune,
start=map_soln,
chains=mp.cpu_count(),
step=xo.get_dense_nuts_step(target_accept=target_accept),
cores=mp.cpu_count()
)
else:
trace = None
return trace, map_soln
def run_pymc3_w_gp(times, data, dataerr, t0, u, period, b,
xcenters=None, ycenters=None,
trace_angles=None, trace_lengths=None,
log_Q=1 / np.sqrt(2), tune=5000, draws=5000,
target_accept=0.9, do_mcmc=False, normalize=False,
use_pink_gp=False, verbose=False):
times_bg = times - np.median(times)
with pm.Model() as model:
# The baseline flux
mean = pm.Normal(f"mean", mu=0.0, sd=1.0)
edepth = pm.Uniform("edepth", lower=0, upper=0.01)
edepth = pm.math.sqrt(edepth)
slope_time = pm.Uniform("slope_time", lower=-1, upper=1)
line_model = mean + slope_time * times_bg
if xcenters is not None:
med_ = np.median(xcenters)
std_ = np.std(xcenters)
xcenters = (xcenters - med_) / std_ if normalize else xcenters
slope_xc = pm.Uniform("slope_xcenter", lower=-1, upper=1)
line_model = line_model + slope_xc * xcenters
if ycenters is not None:
med_ = np.median(ycenters)
std_ = np.std(ycenters)
ycenters = (ycenters - med_) / std_ if normalize else ycenters
slope_yc = pm.Uniform("slope_ycenter", lower=-1, upper=1)
line_model = line_model + slope_yc * ycenters
if trace_angles is not None:
med_ = np.median(trace_angles)
std_ = np.std(trace_angles)
if normalize:
trace_angles = (trace_angles - med_) / std_
slope_angles = pm.Uniform("slope_trace_angle", lower=-1, upper=1)
line_model = line_model + slope_angles * trace_angles
if trace_lengths is not None:
med_ = np.median(trace_lengths)
std_ = np.std(trace_lengths)
if normalize:
trace_lengths = (trace_lengths - med_) / std_
slope_tl = pm.Uniform("slope_trace_length", lower=-1, upper=1)
line_model = line_model + slope_tl * trace_lengths
pm.Deterministic(f'line_model', line_model)
# Set up a Keplerian orbit for the planets
orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b)
# # Compute the model light curve using starry
star = xo.LimbDarkLightCurve(u)
light_curves = star.get_light_curve(
orbit=orbit, r=edepth, t=times)
light_curve = pm.math.sum(light_curves, axis=-1)
# # Here we track the value of the model light curve for plotting
# # purposes
pm.Deterministic("light_curve", light_curve)
# The likelihood function assuming known Gaussian uncertainty
model_full = light_curve + line_model
if use_pink_gp:
gp = build_gp_pink_noise(
times, data, dataerr, log_Q=log_Q)
# gp = build_gp_pink_noise(times, data, dataerr, log_Q=log_Q)
# mu, _ = xo.eval_in_model(model.test_point)
gp.marginal("gp", observed=data - model_full.flatten())
mu, _ = gp.predict(times, return_var=True, predict_mean=True)
# pm.Deterministic("light_curve", light_curve)
# help(pm.Deterministic)
# print(type("light_curve2"))
pm.Deterministic(name="gp_mu", var=mu)
else:
pm.Normal(f"obs", mu=model_full, sd=dataerr, observed=data)
# with pm.Model() as model:
# Fit for the maximum a posteriori parameters
# given the simuated dataset
map_soln = xo.optimize(start=model.test_point)
if verbose:
ppm = 1e6
info_message(f'Map Soln Edepth:{map_soln["edepth"]*ppm}')
# with pm.Model() as model:
np.random.seed(42)
trace = None
if do_mcmc:
trace = pm.sample(
tune=tune,
draws=tune,
start=map_soln,
chains=mp.cpu_count(),
step=xo.get_dense_nuts_step(target_accept=target_accept),
cores=mp.cpu_count()
)
return trace, map_soln
def run_multiple_pymc3(times, fine_snr_flux, fine_snr_uncs, aper_sum_columns,
t0=0, u=[0], period=1.0, b=0.0, xcenters=None,
idx_fwd=None, idx_rev=None, tune=3000, draws=3000,
target_accept=0.9, do_mcmc=False, save_as_you_go=False,
allow_negative_edepths=False, use_rev_fwd_split=False,
use_log_edepth=False, injected_light_curve=1.0,
base_name=None, working_dir='./'):
if use_rev_fwd_split and (idx_fwd is None or idx_rev is None):
assert(False), (f'if `use_rev_fwd_split` is {use_rev_fwd_split}, '
'then you must provide `idx_fwd` and `idx_rev`. '
'One or both are current set to `None`')
varnames = None
filename = configure_save_name(
base_name=base_name,
do_mcmc=do_mcmc,
use_xcenter=xcenters is not None,
use_log_edepth=use_log_edepth,
use_rev_fwd_split=use_rev_fwd_split,
use_injection=hasattr(injected_light_curve, '__iter__'),
allow_negative_edepths=allow_negative_edepths
)
filename = os.path.join(working_dir, filename)
fine_grain_mcmcs = {}
for colname in aper_sum_columns:
start = time()
info_message(f'Working on {colname} for MAP/Trace MCMCs')
data = fine_snr_flux[colname] * injected_light_curve
dataerr = fine_snr_uncs[colname]
if use_rev_fwd_split:
trace, map_soln = run_pymc3_fwd_rev(
times, data, dataerr, t0, u, period, b,
idx_fwd, idx_rev, xcenters,
tune=tune, draws=draws,
target_accept=target_accept,
do_mcmc=do_mcmc,
use_log_edepth=use_log_edepth,
allow_negative_edepths=allow_negative_edepths
)
else:
trace, map_soln = run_pymc3_direct(
times, data, dataerr,
t0, u, period, b, xcenters,
tune=tune, draws=draws,
target_accept=target_accept,
do_mcmc=do_mcmc,
use_log_edepth=use_log_edepth,
allow_negative_edepths=allow_negative_edepths
)
fine_grain_mcmcs[colname] = {}
fine_grain_mcmcs[colname]['trace'] = trace
fine_grain_mcmcs[colname]['map_soln'] = map_soln
if save_as_you_go and False:
info_message(f'Saving MCMCs to {filename}')
joblib.dump(fine_grain_mcmcs, filename)
if varnames is None:
varnames = [key for key in map_soln.keys()
if '__' not in key and 'light' not in key]
if trace is not None:
print(pm.summary(trace, var_names=varnames))
stop = time() - start
info_message(f'Completed {colname} for Trace MCMCs took {stop}')
if save_as_you_go:
info_message(f'Saving MCMCs to {filename}')
joblib.dump(fine_grain_mcmcs, filename)
return fine_grain_mcmcs, filename
def configure_save_name(base_name=None, working_dir='', do_mcmc=True,
use_xcenter=False, use_log_edepth=False,
use_rev_fwd_split=False, use_injection=False,
allow_negative_edepths=False, planet_name="planet_name"):
if base_name is None:
base_name = f'{planet_name}_fine_grain_photometry_20x20_208ppm'
fname_split = {True: 'w_fwd_rev_split',
False: 'no_fwd_rev_split'}[use_rev_fwd_split]
fname_mcmc = {True: 'MCMCs_w_MAPS', False: 'MAPS_only'}[do_mcmc]
fname_xcenter = {True: 'fit_xcenter', False: 'no_xcenter'}[use_xcenter]
fname_logedepth = {True: 'fit_log_edepth',
False: 'fit_linear_edepth'}[use_log_edepth]
fname_injected = {True: '_injected_signal', False: ''}[use_injection]
fname_neg_ecl = {True: '_allow_neg_edepth',
False: ''}[allow_negative_edepths]
filename = f'{base_name}_{fname_mcmc}_{fname_split}_with_{fname_xcenter}_{fname_logedepth}{fname_injected}{fname_neg_ecl}.joblib.save'
return filename
def compute_xo_lightcurve(planet_name, times, depth_ppm=1000, u=[0]):
planet_params = exoMAST_API(planet_name)
planet_params.orbital_period # = 0.813475 # days # exo.mast.stsci.edu
t0 = planet_params.transit_time
edepth = np.sqrt(edepth_ppm / 1e6) # convert 'depth' to 'radius'
orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b)
return xo.LimbDarkLightCurve(u).get_light_curve(
orbit=orbit, r=edepth, t=times).eval().flatten()
def instantiate_arctor(planet_name, data_dir, working_dir, file_type,
joblib_filename='', sort_by_time=False):
assert(False), ("This needs to be places in the examples. "
"For some reason, `from .arctor import Arctor` "
"does not work as it is expected to work.")
planet = Arctor(
planet_name=planet_name,
data_dir=data_dir,
working_dir=working_dir,
file_type=file_type)
if os.path.exists(joblib_filename):
info_message('Loading Data from Save File')
planet.load_dict(joblib_filename)
else:
info_message('Loading New Data Object')
planet.load_data(sort_by_time=sort_by_time)
return planet
def instantiate_star_planet_system( # Stellar parameters
star_ydeg=0, star_udeg=2, star_L=1.0, star_inc=90.0,
star_obl=0.0, star_m=1.0, star_r=1.0, star_prot=1.0,
star_t0=0, star_theta0=0.0, star_A1=1.0, star_A2=0.0,
star_length_unit=units.Rsun, star_mass_unit=units.Msun,
# Planetary parameters
planet_B1=1.0, planet_ydeg=1, planet_udeg=0, planet_L=1.0,
planet_a=1.0, planet_phase_offset=0., planet_inc=90.0, planet_porb=1.0,
planet_t0=0.0, planet_obl=0.0, planet_m=0.0, planet_r=0.1,
planet_ecc=0.0, planet_w=90.0, planet_Omega=0.0, planet_theta0=0.0,
planet_length_unit=units.Rjup, planet_mass_unit=units.Mjup,
# Universal Parmaeters
time_unit=units.day, angle_unit=units.degree):
stellar_map = starry.Map(ydeg=star_ydeg,
udeg=star_udeg,
L=star_L,
inc=star_inc,
obl=star_obl)
A = starry.Primary(
stellar_map,
m=star_m,
r=star_r,
prot=star_prot,
t0=star_t0,
theta0=star_theta0,
length_unit=star_length_unit,
mass_unit=star_mass_unit,
time_unit=time_unit,
angle_unit=angle_unit
)
A.map[1] = star_A1
A.map[2] = star_A2
planet_map = starry.Map(ydeg=planet_ydeg,
udeg=planet_udeg,
L=planet_L,
inc=planet_inc,
obl=planet_obl)
b = starry.Secondary(
planet_map,
m=tt.as_tensor_variable(planet_m).astype("float64"),
r=planet_r,
a=planet_a,
inc=planet_inc,
t0=planet_t0,
prot=planet_porb, # synchronous rotation
porb=planet_porb,
ecc=planet_ecc,
w=planet_w,
Omega=planet_Omega,
theta0=planet_theta0,
length_unit=planet_length_unit,
mass_unit=planet_mass_unit,
time_unit=time_unit,
angle_unit=angle_unit
)
if planet_ydeg > 0:
b.map[1, 0] = planet_B1
b.theta0 = 180.0 + planet_phase_offset
return starry.System(A, b)
def previous_instantiate_arctor(planet_name, data_dir, working_dir, file_type,
save_name_base='savedict'):
planet = Arctor(
planet_name=planet_name,
data_dir=data_dir,
working_dir=working_dir,
file_type=file_type)
joblib_filename = f'{planet_name}_{save_name_base}.joblib.save'
joblib_filename = f'{working_dir}/{joblib_filename}'
if os.path.exists(joblib_filename):
info_message('Loading Data from Save File')
planet.load_data(joblib_filename)
else:
info_message('Loading New Data Object')
planet.load_data()
return planet
def create_raw_lc_stddev(planet, reject_outliers=True):
ppm = 1e6
phot_vals = planet.photometry_df
n_columns = len(planet.photometry_df.columns)
# lc_med_fwd = np.zeros_like(n_columns)
# lc_med_rev = np.zeros_like(n_columns)
# lc_std_fwd = np.zeros_like(n_columns)
# lc_std_rev = np.zeros_like(n_columns)
lc_med = np.zeros(n_columns)
lc_std = np.zeros(n_columns)
for k, colname in enumerate(phot_vals.columns):
if reject_outliers:
inliers_fwd, inliers_rev = compute_inliers(
planet, aper_colname=colname, n_sig=2
)
else:
inliers_fwd = np.arange(planet.idx_fwd.size)
inliers_rev = np.arange(planet.idx_rev.size)
phots_rev = phot_vals[colname].iloc[planet.idx_rev].iloc[inliers_rev]
phots_fwd = phot_vals[colname].iloc[planet.idx_fwd].iloc[inliers_fwd]
lc_std_rev = mad_std(phots_rev)
lc_std_fwd = mad_std(phots_fwd)
lc_med_rev = np.median(phots_fwd)
lc_med_fwd = np.median(phots_fwd)
lc_std[k] = np.mean([lc_std_rev, lc_std_fwd])
lc_med[k] = np.mean([lc_med_rev, lc_med_fwd])
return lc_std / lc_med * ppm
def center_one_trace(kcol, col, fitter, stddev, y_idx, inds, idx_buffer=10):
model = Gaussian1D(amplitude=col.max(),
mean=y_idx, stddev=stddev)
# idx_narrow = abs(inds - y_idx) < idx_buffer
# results = fitter(model, inds[idx_narrow], col[idx_narrow])
results = fitter(model, inds, col)
return kcol, results, fitter
def fit_one_slopes(kimg, means, fitter, y_idx, slope_guess=2.0 / 466):
model = Linear1D(slope=slope_guess, intercept=y_idx)
inds = np.arange(len(means))
inds = inds - np.median(inds)
results = fitter(model, inds, means)
return kimg, results, fitter
def cosmic_ray_flag_simple(image_, n_sig=5, window=7):
cosmic_rays_ = np.zeros(image_.shape, dtype=bool)
for k, row in enumerate(image_):
row_Med = np.median(row)
row_Std = np.std(row)
cosmic_rays_[k] += abs(row - row_Med) > n_sig * row_Std
image_[k][cosmic_rays_[k]] = row_Med
return image_, cosmic_rays_
def cosmic_ray_flag_rolling(image_, n_sig=5, window=7):
cosmic_rays_ = np.zeros(image_.shape, dtype=bool)
for k, row in enumerate(image_):
row_rMed = pd.Series(row).rolling(window).median()
row_rStd = pd.Series(row).rolling(window).std()
cosmic_rays_[k] += abs(row - row_rMed) > n_sig * row_rStd
image_[k][cosmic_rays_[k]] = row_rMed[cosmic_rays_[k]]
return image_, cosmic_rays_
def aper_table_2_df(aper_phots, aper_widths, aper_heights, n_images):
info_message(f'Restructuring Aperture Photometry into DataFrames')
if len(aper_phots) > 1:
aper_df = aper_phots[0].to_pandas()
for kimg in aper_phots[1:]:
aper_df = pd.concat([aper_df, kimg.to_pandas()], ignore_index=True)
else:
aper_df = aper_phots.to_pandas()
photometry_df_ = aper_df.reset_index().drop(['index', 'id'], axis=1)
mesh_widths, mesh_heights = np.meshgrid(aper_widths, aper_heights)
mesh_widths = mesh_widths.flatten()
mesh_heights = mesh_heights.flatten()
aperture_columns = [colname
for colname in photometry_df_.columns
if 'aperture_sum_' in colname]
photometry_df = pd.DataFrame([])
for colname in aperture_columns:
aper_id = int(colname.replace('aperture_sum_', ''))
aper_width_ = mesh_widths[aper_id].astype(int)
aper_height_ = mesh_heights[aper_id].astype(int)
newname = f'aperture_sum_{aper_width_}x{aper_height_}'
photometry_df[newname] = photometry_df_[colname]
photometry_df['xcenter'] = photometry_df_['xcenter']
photometry_df['ycenter'] = photometry_df_['ycenter']
return photometry_df
def make_mask_cosmic_rays_temporal_simple(val, kcol, krow, n_sig=5):
val_Med = np.median(val)
val_Std = np.std(val)
mask = abs(val - val_Med) > n_sig * val_Std
return kcol, krow, mask, val_Med
def check_if_column_exists(existing_photometry_df, new_photometry_df, colname):
existing_columns = existing_photometry_df.columns
exists = False
similar = False
if colname in existing_columns:
existing_vec = existing_photometry_df[colname]
new_vec = new_photometry_df[colname]
exists = True
similar = np.allclose(existing_vec, new_vec)
if similar:
return exists, similar, colname
else:
same_name = []
for colname in existing_columns:
if f'colname_{len(same_name)}' in existing_columns:
same_name.append(colname)
return exists, similar, f'colname_{len(same_name)}'
else:
return exists, similar, colname
def run_all_12_options(times, flux, uncs,
list_of_aper_columns,
xcenters=None, ycenters=None,
trace_angles=None, trace_lengths=None,
t0=0, u=[0], period=1.0, b=0.0,
idx_fwd=None, idx_rev=None,
tune=3000, draws=3000, target_accept=0.9,
do_mcmc=False, save_as_you_go=False,
injected_light_curve=1.0, working_dir='./',
base_name=f'{planet_name}_fine_grain_photometry_208ppm'):
decor_set = [xcenters, ycenters, trace_angles, trace_lengths]
decor_options = [None, None, None, None,
None, None].extend([decor_set] * 6)
neg_ecl_options = [True, True, True, True,
False, False, False, False,
False, False, False, False]
use_split_options = [True, False, True, False,
True, False, True, False,
True, False, True, False]
log_edepth_options = [False, False, False, False,
False, False, False, False,
True, True, True, True]
pymc3_options = zip(decor_options, neg_ecl_options,
use_split_options, log_edepth_options)
mcmc_fits = {}
start0 = time()
for decor_set_, allow_neg_, use_split_, use_log_edepth_ in pymc3_options:
start1 = time()
print(f'Fit xCenters: {xcenters_ is None}')
print(f'Allow Negative Eclipse Depth: {allow_neg_}')
print(f'Use Fwd/Rev Split: {use_split_}')
print(f'Use Log Edepth: {use_log_edepth_}')
if decor_set_ is not None:
xcenters_, ycenters_, trace_angles_, trace_lengths_ = decor_set_
else:
xcenters_, ycenters_, trace_angles_, trace_lengths_ = [None] * 4
idx_fwd_ = idx_fwd if use_split_ else None
idx_rev_ = idx_rev if use_split_ else None
fine_grain_mcmcs, filename = run_pymc3_both(
times, flux, uncs, list_of_aper_columns,
t0=t0, u=u, period=period, b=b,
idx_fwd=idx_fwd_, idx_rev=idx_rev_,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=xcenters_,
ycenters=ycenters_,
trace_angles=trace_angles_,
trace_lengths=trace_lengths_,
allow_negative_edepths=allow_neg_,
use_rev_fwd_split=use_split_,
use_log_edepth=use_log_edepth_
)
mcmc_fits[filename] = fine_grain_mcmcs
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
n_mcmcs = len(decor_options)
full_time = (time() - start0) / 60
print(f'[INFO] All {n_mcmcs} MCMCs took {full_time:0.2f} minutes')
return mcmc_fits
def run_all_12_options_plain(times, fine_snr_flux, fine_snr_uncs,
near_best_apertures_NxN_small,
t0=0, u=[0], period=1.0, b=0.0,
idx_fwd=None, idx_rev=None,
tune=3000, draws=3000, target_accept=0.9,
do_mcmc=False, save_as_you_go=False,
injected_light_curve=1.0,
base_name=f'{planet_name}_fine_grain_photometry_208ppm'):
base_name = f'{base_name}_near_best_{n_space}x{n_space}'
start0 = time()
# Linear Eclipse Depths with Negative Allowed
start1 = time()
print('Linear Eclipse depth fits - Default everything')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=None,
allow_negative_edepths=True,
use_rev_fwd_split=False,
use_log_edepth=False
)
fine_grain_mcmcs_no_xcenter_lin_edepth_no_split_w_negEcl = fine_grain_mcmcs
filename_no_xcenter_lin_edepth_no_split_w_negEcl = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
start1 = time()
print('Linear Eclipse depth fits - Everything with splitting fwd rev')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=None, # SAME
allow_negative_edepths=True, # SAME
use_rev_fwd_split=True, # DIFFERENT
use_log_edepth=False # SAME
)
fine_grain_mcmcs_with_no_xcenter_lin_edepth_w_split_w_negEcl = fine_grain_mcmcs
filename_with_no_xcenter_lin_edepth_w_split_w_negEcl = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
start1 = time()
print('Linear Eclipse depth fits - Everything with xcenter')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=xcenters_mod, # DIFFERENT
allow_negative_edepths=True, # SAME
use_rev_fwd_split=False, # SAME
use_log_edepth=False, # SAME
)
fine_grain_mcmcs_with_w_xcenter_lin_edepth_no_split_w_negEcl = fine_grain_mcmcs
filename_with_w_xcenter_lin_edepth_no_split_w_negEcl = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
start1 = time()
print('Linear Eclipse depth fits - '
'Everything with xcenter and splitting fwd rev')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=xcenters_mod, # SAME
allow_negative_edepths=True, # SAME
use_rev_fwd_split=True, # DIFFERENT
use_log_edepth=False # SAME
)
fine_grain_mcmcs_with_w_xcenter_lin_edepth_w_split_w_negEcl = fine_grain_mcmcs
filename_with_w_xcenter_lin_edepth_w_split_w_negEcl = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
start1 = time()
# Linear Eclipse Depths without Negative Allowed
print('Linear Eclipse depth fits - Default everything')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet, idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
xcenters=None, # DIFFERENT
allow_negative_edepths=False, # DIFFERENT
use_rev_fwd_split=False, # DIFFERENT
use_log_edepth=False # SAME
)
fine_grain_mcmcs_with_no_xcenter_lin_edepth_no_split = fine_grain_mcmcs
filename_with_no_xcenter_lin_edepth_no_split = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
start1 = time()
print('Linear Eclipse depth fits - Everything with splitting fwd rev')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet, idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
xcenters=None, # SAME
allow_negative_edepths=False, # SAME
use_rev_fwd_split=True, # DIFFERENT
use_log_edepth=False, # SAME
)
fine_grain_mcmcs_with_no_xcenter_lin_edepth_w_split = fine_grain_mcmcs
filename_with_no_xcenter_lin_edepth_w_split = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
start1 = time()
print('Linear Eclipse depth fits - Everything with xcenter')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=xcenters_mod, # DIFFERENT
allow_negative_edepths=False, # SAME
use_rev_fwd_split=False, # DIFFERENT
use_log_edepth=False # SAME
)
fine_grain_mcmcs_with_w_xcenter_lin_edepth_no_split = fine_grain_mcmcs
filename_with_w_xcenter_lin_edepth_no_split = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
start1 = time()
print('Linear Eclipse depth fits - '
'Everything with xcenter and splitting fwd rev')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=xcenters_mod, # SAME
allow_negative_edepths=False, # SAME
use_rev_fwd_split=True, # DIFFERENT
use_log_edepth=False) # SAME
fine_grain_mcmcs_with_w_xcenter_lin_edepth_w_split = fine_grain_mcmcs
filename_with_w_xcenter_lin_edepth_w_split = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
# Logarithmic Eclipse Depths
start1 = time()
print('Log Eclipse depth fits - Default everything')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=None, # DIFFERENT
allow_negative_edepths=False, # SAME
use_rev_fwd_split=False, # DIFFERENT
use_log_edepth=True # DIFFERENT
)
fine_grain_mcmcs_with_no_xcenter_log_edepth_no_split = fine_grain_mcmcs
filename_with_no_xcenter_log_edepth_no_split = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
start1 = time()
print('Log Eclipse depth fits - Everything with splitting fwd rev')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=None, # SAME
allow_negative_edepths=False, # SAME
use_rev_fwd_split=True, # DIFFERENT
use_log_edepth=True # SAME
)
fine_grain_mcmcs_with_no_xcenter_log_edepth_w_split = fine_grain_mcmcs
filename_with_no_xcenter_log_edepth_w_split = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
start1 = time()
print('Log Eclipse depth fits - Everything with xcenter')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=xcenters_mod, # DIFFERENT
allow_negative_edepths=False, # SAME
use_rev_fwd_split=False, # DIFFERENT
use_log_edepth=True # SAME
)
fine_grain_mcmcs_with_w_xcenter_log_edepth_no_split = fine_grain_mcmcs
filename_with_w_xcenter_log_edepth_no_split = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
del fine_grain_mcmcs, filename
start1 = time()
print('Log Eclipse depth fits - Everything with xcenter and splitting fwd rev')
fine_grain_mcmcs, filename = run_multiple_pymc3(
times, fine_snr_flux, fine_snr_uncs, near_best_apertures_NxN_small,
t0=t0_guess, u=u, period=period_planet, b=b_planet,
idx_fwd=idx_fwd, idx_rev=idx_rev,
tune=tune, draws=draws, target_accept=target_accept,
do_mcmc=do_mcmc, save_as_you_go=save_as_you_go,
injected_light_curve=injected_light_curve,
base_name=base_name, working_dir=working_dir,
xcenters=xcenters_mod, # SAME
allow_negative_edepths=False, # SAME
use_rev_fwd_split=True, # DIFFERENT
use_log_edepth=True # SAME
)
fine_grain_mcmcs_with_w_xcenter_log_edepth_w_split = fine_grain_mcmcs
filename_with_w_xcenter_log_edepth_w_split = filename
print(f'[INFO] This MCMCs took {(time() - start1)/60:0.2f} minutes')
print(f'[INFO] All 12 MCMCs took {(time() - start0)/60:0.2f} minutes')
return [fine_grain_mcmcs_with_no_xcenter_lin_edepth_no_split_w_negEcl,
fine_grain_mcmcs_with_no_xcenter_lin_edepth_w_split_w_negEcl,
fine_grain_mcmcs_with_w_xcenter_lin_edepth_no_split_w_negEcl,
fine_grain_mcmcs_with_w_xcenter_lin_edepth_w_split_w_negEcl,
fine_grain_mcmcs_with_no_xcenter_lin_edepth_no_split,
fine_grain_mcmcs_with_no_xcenter_lin_edepth_w_split,
fine_grain_mcmcs_with_w_xcenter_lin_edepth_no_split,
fine_grain_mcmcs_with_w_xcenter_lin_edepth_w_split,
fine_grain_mcmcs_with_no_xcenter_log_edepth_no_split,
fine_grain_mcmcs_with_no_xcenter_log_edepth_w_split,
fine_grain_mcmcs_with_w_xcenter_log_edepth_no_split,
fine_grain_mcmcs_with_w_xcenter_log_edepth_w_split
]
def rename_file(filename, data_dir='./', base_time=2400000.5,
format='jd', scale='utc'):
path_in = os.path.join(data_dir, filename)
header = fits.getheader(path_in, ext=0)
time_stamp = 0.5 * (header['EXPSTART'] + header['EXPEND'])
time_obj = astropy.time.Time(val=time_stamp, val2=base_time,
format=format, scale=scale)
out_filename = f'{time_obj.isot}_{filename}'
path_out = os.path.join(data_dir, out_filename)
os.rename(path_in, path_out)
def compute_delta_sdnr(map_soln, phots, idx_fwd, idx_rev):
ppm = 1e6
phots_std_fwd = phots[idx_fwd].std()
phots_std_rev = phots[idx_rev].std()
phots_std = np.mean([phots_std_fwd, phots_std_rev])
if 'mean_fwd' not in map_soln.keys():
map_model = map_soln['light_curve'].flatten() + map_soln['line_model']
else:
map_model = np.zeros_like(times)
map_model[idx_fwd] = map_soln['light_curve_fwd'].flatten() + \
map_soln['line_model_fwd']
map_model[idx_rev] = map_soln['light_curve_rev'].flatten() + \
map_soln['line_model_rev']
varnames = [key for key in map_soln.keys(
) if '__' not in key and 'light' not in key and 'line' not in key and 'le_edepth_0' not in key]
res_fwd = np.std(map_model[idx_fwd] - phots[idx_fwd])
res_rev = np.std(map_model[idx_rev] - phots[idx_rev])
res_std = np.mean([res_fwd, res_rev])
print(f'{str(varnames):<80}')
print(f'{res_std*ppm:0.2f}, {phots_std*ppm:0.2f}, {(phots_std - res_std)*ppm:0.2f} ppm difference'),
return res_std * ppm, phots_std * ppm, (phots_std - res_std) * ppm
def compute_chisq_aic(planet, aper_column, map_soln, idx_fwd, idx_rev,
use_idx_fwd_, use_xcenters_, use_ycenters_,
use_trace_angles_, use_trace_lengths_, use_pink_gp):
ppm = 1e6
phots = planet.normed_photometry_df[aper_column].values
uncs = planet.normed_uncertainty_df[aper_column].values
phots = phots - np.median(phots)
n_pts = len(phots)
# 2 == eclipse depth + mean
n_params = (2 + use_idx_fwd_ + use_xcenters_ + use_ycenters_ +
use_trace_angles_ + use_trace_lengths_ + 3 * use_pink_gp)
if 'mean_fwd' not in map_soln.keys():
map_model = map_soln['light_curve'].flatten() + map_soln['line_model']
else:
map_model = np.zeros_like(planet.times)
map_model[idx_fwd] = map_soln['light_curve_fwd'].flatten() + \
map_soln['line_model_fwd']
map_model[idx_rev] = map_soln['light_curve_rev'].flatten() + \
map_soln['line_model_rev']
# if we split Fwd/Rev, then there are now 2 means
n_params = n_params + 1
correction = 2 * n_params * (n_params + 1) / (n_pts - n_params - 1)
sdnr_ = np.std(map_model - phots) * ppm
chisq_ = np.sum((map_model - phots)**2 / uncs**2)
aic_ = chisq_ + 2 * n_params + correction
bic_ = chisq_ + n_params * np.log10(n_pts)
return chisq_, aic_, bic_, sdnr_
def compute_inliers(instance, aper_colname='aperture_sum_176x116', n_sig=2):
phots_ = instance.normed_photometry_df[aper_colname]
inliers_fwd = ~sigma_clip(phots_[instance.idx_fwd],
sigma=n_sig,
maxiters=1,
stdfunc=mad_std).mask
inliers_rev = ~sigma_clip(phots_[instance.idx_rev],
sigma=n_sig,
maxiters=1,
stdfunc=mad_std).mask
inliers_fwd = np.where(inliers_fwd)[0]
inliers_rev = np.where(inliers_rev)[0]
return inliers_fwd, inliers_rev
def compute_outliers(instance, aper_colname='aperture_sum_176x116', n_sig=2):
phots_ = instance.normed_photometry_df[aper_colname]
outliers_fwd = sigma_clip(phots_[instance.idx_fwd],
sigma=n_sig,
maxiters=1,
stdfunc=mad_std).mask
outliers_rev = sigma_clip(phots_[instance.idx_rev],
sigma=n_sig,
maxiters=1,
stdfunc=mad_std).mask
outliers_fwd = np.where(outliers_fwd)[0]
outliers_rev = np.where(outliers_rev)[0]
return outliers_fwd, outliers_rev
def extract_map_only_data(planet, idx_fwd, idx_rev,
maps_only_filename=None,
data_dir='../savefiles',
use_pink_gp=False):
if maps_only_filename is None:
maps_only_filename = 'results_decor_span_MAPs_all400_SDNR_only.joblib.save'
maps_only_filename = os.path.join(data_dir, maps_only_filename)
info_message('Loading Decorrelation Results for MAPS only Results')
decor_span_MAPs = joblib.load(maps_only_filename)
decor_aper_columns_list = list(decor_span_MAPs.keys())
n_apers = len(decor_span_MAPs)
aper_widths = []
aper_heights = []
idx_split = []
use_xcenters = []
use_ycenters = []
use_trace_angles = []
use_trace_lengths = []
sdnr_apers = []
chisq_apers = []
aic_apers = []
bic_apers = []
res_std_ppm = []
phots_std_ppm = []
res_diff_ppm = []
keys_list = []
n_pts = len(planet.normed_photometry_df)
map_solns = {}
fine_grain_mcmcs_s = {}
generator = enumerate(decor_span_MAPs.items())
for m, (aper_column, map_results) in tqdm(generator, total=n_apers):
if aper_column in ['xcenter', 'ycenter']:
continue
n_results_ = len(map_results)
for map_result in map_results:
aper_width_, aper_height_ = np.int32(
aper_column.split('_')[-1].split('x'))
aper_widths.append(aper_width_)
aper_heights.append(aper_height_)
idx_split.append(map_result[0])
use_xcenters.append(map_result[2])
use_ycenters.append(map_result[3])
use_trace_angles.append(map_result[4])
use_trace_lengths.append(map_result[5])
fine_grain_mcmcs_ = map_result[6]
map_soln_ = map_result[7]
res_std_ppm.append(map_result[8])
phots_std_ppm.append(map_result[9])
res_diff_ppm.append(map_result[10])
key = (f'aper_column:{aper_column}-'
f'idx_split:{idx_split[-1]}-'
f'_use_xcenters:{use_xcenters[-1]}-'
f'_use_ycenters:{use_ycenters[-1]}-'
f'_use_trace_angles:{use_trace_angles[-1]}-'
f'_use_trace_lengths:{use_trace_lengths[-1]}')
keys_list.append(key)
fine_grain_mcmcs_s[key] = fine_grain_mcmcs_
map_solns[key] = map_soln_
chisq_, aic_, bic_, sdnr_ = compute_chisq_aic(
planet,
aper_column,
map_soln_,
idx_fwd,
idx_rev,
idx_split[-1],
use_xcenters[-1],
use_ycenters[-1],
use_trace_angles[-1],
use_trace_lengths[-1],
use_pink_gp=use_pink_gp)
sdnr_apers.append(sdnr_)
chisq_apers.append(chisq_)
aic_apers.append(aic_)
bic_apers.append(bic_)
aper_widths = np.array(aper_widths)
aper_heights = np.array(aper_heights)
idx_split = np.array(idx_split)
use_xcenters = np.array(use_xcenters)
use_ycenters = np.array(use_ycenters)
use_trace_angles = np.array(use_trace_angles)
use_trace_lengths = np.array(use_trace_lengths)
sdnr_apers = np.array(sdnr_apers)
chisq_apers = np.array(chisq_apers)
aic_apers = np.array(aic_apers)
bic_apers = np.array(bic_apers)
res_std_ppm = np.array(res_std_ppm)
phots_std_ppm = np.array(phots_std_ppm)
res_diff_ppm = np.array(res_diff_ppm)
keys_list = np.array(keys_list)
return (decor_span_MAPs, keys_list, aper_widths, aper_heights,
idx_split, use_xcenters, use_ycenters,
use_trace_angles, use_trace_lengths,
fine_grain_mcmcs_s, map_solns,
res_std_ppm, phots_std_ppm, res_diff_ppm,
sdnr_apers, chisq_apers, aic_apers, bic_apers)
def create_sub_sect(n_options, idx_split, use_xcenters, use_ycenters,
use_trace_angles, use_trace_lengths,
idx_split_, use_xcenters_, use_ycenters_,
use_trace_angles_, use_trace_lengths_):
sub_sect = np.ones(n_options).astype(bool)
_idx_split = idx_split == idx_split_
_use_xcenters = use_xcenters == use_xcenters_
_use_ycenters = use_ycenters == use_ycenters_
_use_trace_angles = use_trace_angles == use_trace_angles_
_use_tracelengths = use_trace_lengths == use_trace_lengths_
sub_sect = np.bitwise_and(sub_sect, _idx_split)
sub_sect = np.bitwise_and(sub_sect, _use_xcenters)
sub_sect = np.bitwise_and(sub_sect, _use_ycenters)
sub_sect = np.bitwise_and(sub_sect, _use_trace_angles)
sub_sect = np.bitwise_and(sub_sect, _use_tracelengths)
return np.where(sub_sect)[0]
def organize_results_ppm_chisq_aic(n_options, idx_split, use_xcenters,
use_ycenters, use_trace_angles,
use_trace_lengths, res_std_ppm,
sdnr_apers, chisq_apers,
aic_apers, bic_apers,
aper_widths,
aper_heights,
idx_split_, use_xcenters_, use_ycenters_,
use_trace_angles_, use_trace_lengths_):
sub_sect = create_sub_sect(n_options,
idx_split,
use_xcenters,
use_ycenters,
use_trace_angles,
use_trace_lengths,
idx_split_,
use_xcenters_,
use_ycenters_,
use_trace_angles_,
use_trace_lengths_)
aper_widths_sub = aper_widths[sub_sect]
aper_heights_sub = aper_heights[sub_sect]
argbest_ppm = res_std_ppm[sub_sect].argmin()
best_ppm_sub = res_std_ppm[sub_sect][argbest_ppm]
best_sdnr_sub = sdnr_apers[sub_sect][argbest_ppm]
best_chisq_sub = chisq_apers[sub_sect][argbest_ppm]
best_aic_sub = aic_apers[sub_sect][argbest_ppm]
width_best = aper_widths_sub[argbest_ppm]
height_best = aper_heights_sub[argbest_ppm]
sdnr_res_sub_min = sdnr_apers[sub_sect].min()
std_res_sub_min = res_std_ppm[sub_sect].min()
chisq_sub_min = chisq_apers[sub_sect].min()
aic_sub_min = aic_apers[sub_sect].min()
entry = {f'idx_split': idx_split_,
f'xcenters': use_xcenters_,
f'ycenters': use_ycenters_,
f'trace_angles': use_trace_angles_,
f'trace_lengths': use_trace_lengths_,
f'width_best': width_best,
f'height_best': height_best,
f'best_ppm_sub': best_ppm_sub,
f'best_sdnr_sub': best_sdnr_sub,
f'best_chisq_sub': best_chisq_sub,
f'best_aic_sub': best_aic_sub,
f'std_res_sub_min': std_res_sub_min,
f'sdnr_res_sub_min': sdnr_res_sub_min,
f'chisq_sub_min': chisq_sub_min,
f'aic_sub_min': aic_sub_min}
return entry
def get_map_results_models(times, map_soln, idx_fwd, idx_rev):
if 'mean_fwd' not in map_soln.keys():
map_model = map_soln['light_curve'].flatten()
line_model = map_soln['line_model'].flatten()
else:
map_model = np.zeros_like(times)
line_model = np.zeros_like(times)
map_model[idx_fwd] = map_soln['light_curve_fwd'].flatten()
line_model[idx_fwd] = map_soln['line_model_fwd'].flatten()
map_model[idx_rev] = map_soln['light_curve_rev'].flatten()
line_model[idx_rev] = map_soln['line_model_rev'].flatten()
return map_model, line_model
def create_results_df(aper_widths, aper_heights,
res_std_ppm, sdnr_apers, chisq_apers,
aic_apers, bic_apers, idx_split,
use_xcenters, use_ycenters,
use_trace_angles, use_trace_lengths):
n_options = len(aper_widths)
results_dict = {}
for idx_split_ in [True, False]:
for use_xcenters_ in [True, False]:
for use_ycenters_ in [True, False]:
for use_trace_angles_ in [True, False]:
for use_trace_lengths_ in [True, False]:
entry = organize_results_ppm_chisq_aic(
n_options, idx_split, use_xcenters, use_ycenters,
use_trace_angles, use_trace_lengths, res_std_ppm,
sdnr_apers, chisq_apers, aic_apers, bic_apers,
aper_widths, aper_heights,
idx_split_, use_xcenters_, use_ycenters_,
use_trace_angles_, use_trace_lengths_)
for key, val in entry.items():
if key not in results_dict.keys():
results_dict[key] = []
results_dict[key].append(val)
return pd.DataFrame(results_dict)
| [
"starry.Primary",
"numpy.sum",
"exoplanet.optimize",
"numpy.random.seed",
"pymc3.math.sum",
"pymc3.Deterministic",
"numpy.allclose",
"pymc3.Normal",
"numpy.ones",
"numpy.mean",
"pymc3.Uniform",
"numpy.arange",
"starry.System",
"os.path.join",
"astropy.stats.sigma_clip",
"theano.tensor.... | [((853, 902), 'numpy.median', 'np.median', (['phot_vals.iloc[planet.idx_rev]'], {'axis': '(0)'}), '(phot_vals.iloc[planet.idx_rev], axis=0)\n', (862, 902), True, 'import numpy as np\n'), ((920, 969), 'numpy.median', 'np.median', (['phot_vals.iloc[planet.idx_rev]'], {'axis': '(0)'}), '(phot_vals.iloc[planet.idx_rev], axis=0)\n', (929, 969), True, 'import numpy as np\n'), ((984, 1025), 'numpy.mean', 'np.mean', (['[lc_std_rev, lc_std_fwd]'], {'axis': '(0)'}), '([lc_std_rev, lc_std_fwd], axis=0)\n', (991, 1025), True, 'import numpy as np\n'), ((1039, 1080), 'numpy.mean', 'np.mean', (['[lc_med_rev, lc_med_fwd]'], {'axis': '(0)'}), '([lc_med_rev, lc_med_fwd], axis=0)\n', (1046, 1080), True, 'import numpy as np\n'), ((2417, 2434), 'tqdm.tqdm', 'tqdm', (['aper_widths'], {}), '(aper_widths)\n', (2421, 2434), False, 'from tqdm import tqdm\n'), ((3346, 3393), 'pymc3.trace_to_dataframe', 'pm.trace_to_dataframe', (['trace'], {'varnames': 'varnames'}), '(trace, varnames=varnames)\n', (3367, 3393), True, 'import pymc3 as pm\n'), ((3622, 3807), 'pygtc.plotGTC', 'pygtc.plotGTC', (['samples'], {'plotName': 'plotName', 'smoothingKernel': 'smoothingKernel', 'labelRotation': '([True] * 2)', 'customLabelFont': "{'rotation': 45}", 'nContourLevels': '(3)', 'figureSize': '"""MNRAS_page"""'}), "(samples, plotName=plotName, smoothingKernel=smoothingKernel,\n labelRotation=[True] * 2, customLabelFont={'rotation': 45},\n nContourLevels=3, figureSize='MNRAS_page')\n", (3635, 3807), False, 'import pygtc\n'), ((12845, 12909), 'exoplanet.gp.terms.SHOTerm', 'xo.gp.terms.SHOTerm', ([], {'log_Sw4': 'log_Sw4', 'log_w0': 'log_w0', 'log_Q': 'log_Q'}), '(log_Sw4=log_Sw4, log_w0=log_w0, log_Q=log_Q)\n', (12864, 12909), True, 'import exoplanet as xo\n'), ((23853, 23888), 'os.path.join', 'os.path.join', (['working_dir', 'filename'], {}), '(working_dir, filename)\n', (23865, 23888), False, 'import os\n'), ((27088, 27119), 'numpy.sqrt', 'np.sqrt', (['(edepth_ppm / 1000000.0)'], {}), '(edepth_ppm / 1000000.0)\n', (27095, 27119), True, 'import numpy as np\n'), ((27158, 27209), 'exoplanet.orbits.KeplerianOrbit', 'xo.orbits.KeplerianOrbit', ([], {'period': 'period', 't0': 't0', 'b': 'b'}), '(period=period, t0=t0, b=b)\n', (27182, 27209), True, 'import exoplanet as xo\n'), ((27805, 27836), 'os.path.exists', 'os.path.exists', (['joblib_filename'], {}), '(joblib_filename)\n', (27819, 27836), False, 'import os\n'), ((28863, 28948), 'starry.Map', 'starry.Map', ([], {'ydeg': 'star_ydeg', 'udeg': 'star_udeg', 'L': 'star_L', 'inc': 'star_inc', 'obl': 'star_obl'}), '(ydeg=star_ydeg, udeg=star_udeg, L=star_L, inc=star_inc, obl=star_obl\n )\n', (28873, 28948), False, 'import starry\n'), ((29069, 29273), 'starry.Primary', 'starry.Primary', (['stellar_map'], {'m': 'star_m', 'r': 'star_r', 'prot': 'star_prot', 't0': 'star_t0', 'theta0': 'star_theta0', 'length_unit': 'star_length_unit', 'mass_unit': 'star_mass_unit', 'time_unit': 'time_unit', 'angle_unit': 'angle_unit'}), '(stellar_map, m=star_m, r=star_r, prot=star_prot, t0=star_t0,\n theta0=star_theta0, length_unit=star_length_unit, mass_unit=\n star_mass_unit, time_unit=time_unit, angle_unit=angle_unit)\n', (29083, 29273), False, 'import starry\n'), ((29416, 29510), 'starry.Map', 'starry.Map', ([], {'ydeg': 'planet_ydeg', 'udeg': 'planet_udeg', 'L': 'planet_L', 'inc': 'planet_inc', 'obl': 'planet_obl'}), '(ydeg=planet_ydeg, udeg=planet_udeg, L=planet_L, inc=planet_inc,\n obl=planet_obl)\n', (29426, 29510), False, 'import starry\n'), ((30245, 30264), 'starry.System', 'starry.System', (['A', 'b'], {}), '(A, b)\n', (30258, 30264), False, 'import starry\n'), ((30682, 30713), 'os.path.exists', 'os.path.exists', (['joblib_filename'], {}), '(joblib_filename)\n', (30696, 30713), False, 'import os\n'), ((31264, 31283), 'numpy.zeros', 'np.zeros', (['n_columns'], {}), '(n_columns)\n', (31272, 31283), True, 'import numpy as np\n'), ((31297, 31316), 'numpy.zeros', 'np.zeros', (['n_columns'], {}), '(n_columns)\n', (31305, 31316), True, 'import numpy as np\n'), ((32829, 32863), 'numpy.zeros', 'np.zeros', (['image_.shape'], {'dtype': 'bool'}), '(image_.shape, dtype=bool)\n', (32837, 32863), True, 'import numpy as np\n'), ((33183, 33217), 'numpy.zeros', 'np.zeros', (['image_.shape'], {'dtype': 'bool'}), '(image_.shape, dtype=bool)\n', (33191, 33217), True, 'import numpy as np\n'), ((34020, 34058), 'numpy.meshgrid', 'np.meshgrid', (['aper_widths', 'aper_heights'], {}), '(aper_widths, aper_heights)\n', (34031, 34058), True, 'import numpy as np\n'), ((34885, 34899), 'numpy.median', 'np.median', (['val'], {}), '(val)\n', (34894, 34899), True, 'import numpy as np\n'), ((34914, 34925), 'numpy.std', 'np.std', (['val'], {}), '(val)\n', (34920, 34925), True, 'import numpy as np\n'), ((52093, 52125), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (52105, 52125), False, 'import os\n'), ((52139, 52169), 'astropy.io.fits.getheader', 'fits.getheader', (['path_in'], {'ext': '(0)'}), '(path_in, ext=0)\n', (52153, 52169), False, 'from astropy.io import fits\n'), ((52424, 52460), 'os.path.join', 'os.path.join', (['data_dir', 'out_filename'], {}), '(data_dir, out_filename)\n', (52436, 52460), False, 'import os\n'), ((52466, 52494), 'os.rename', 'os.rename', (['path_in', 'path_out'], {}), '(path_in, path_out)\n', (52475, 52494), False, 'import os\n'), ((52668, 52707), 'numpy.mean', 'np.mean', (['[phots_std_fwd, phots_std_rev]'], {}), '([phots_std_fwd, phots_std_rev])\n', (52675, 52707), True, 'import numpy as np\n'), ((53263, 53306), 'numpy.std', 'np.std', (['(map_model[idx_fwd] - phots[idx_fwd])'], {}), '(map_model[idx_fwd] - phots[idx_fwd])\n', (53269, 53306), True, 'import numpy as np\n'), ((53321, 53364), 'numpy.std', 'np.std', (['(map_model[idx_rev] - phots[idx_rev])'], {}), '(map_model[idx_rev] - phots[idx_rev])\n', (53327, 53364), True, 'import numpy as np\n'), ((53379, 53406), 'numpy.mean', 'np.mean', (['[res_fwd, res_rev]'], {}), '([res_fwd, res_rev])\n', (53386, 53406), True, 'import numpy as np\n'), ((54825, 54869), 'numpy.sum', 'np.sum', (['((map_model - phots) ** 2 / uncs ** 2)'], {}), '((map_model - phots) ** 2 / uncs ** 2)\n', (54831, 54869), True, 'import numpy as np\n'), ((57426, 57456), 'tqdm.tqdm', 'tqdm', (['generator'], {'total': 'n_apers'}), '(generator, total=n_apers)\n', (57430, 57456), False, 'from tqdm import tqdm\n'), ((59318, 59339), 'numpy.array', 'np.array', (['aper_widths'], {}), '(aper_widths)\n', (59326, 59339), True, 'import numpy as np\n'), ((59359, 59381), 'numpy.array', 'np.array', (['aper_heights'], {}), '(aper_heights)\n', (59367, 59381), True, 'import numpy as np\n'), ((59398, 59417), 'numpy.array', 'np.array', (['idx_split'], {}), '(idx_split)\n', (59406, 59417), True, 'import numpy as np\n'), ((59437, 59459), 'numpy.array', 'np.array', (['use_xcenters'], {}), '(use_xcenters)\n', (59445, 59459), True, 'import numpy as np\n'), ((59479, 59501), 'numpy.array', 'np.array', (['use_ycenters'], {}), '(use_ycenters)\n', (59487, 59501), True, 'import numpy as np\n'), ((59525, 59551), 'numpy.array', 'np.array', (['use_trace_angles'], {}), '(use_trace_angles)\n', (59533, 59551), True, 'import numpy as np\n'), ((59576, 59603), 'numpy.array', 'np.array', (['use_trace_lengths'], {}), '(use_trace_lengths)\n', (59584, 59603), True, 'import numpy as np\n'), ((59622, 59642), 'numpy.array', 'np.array', (['sdnr_apers'], {}), '(sdnr_apers)\n', (59630, 59642), True, 'import numpy as np\n'), ((59661, 59682), 'numpy.array', 'np.array', (['chisq_apers'], {}), '(chisq_apers)\n', (59669, 59682), True, 'import numpy as np\n'), ((59699, 59718), 'numpy.array', 'np.array', (['aic_apers'], {}), '(aic_apers)\n', (59707, 59718), True, 'import numpy as np\n'), ((59735, 59754), 'numpy.array', 'np.array', (['bic_apers'], {}), '(bic_apers)\n', (59743, 59754), True, 'import numpy as np\n'), ((59773, 59794), 'numpy.array', 'np.array', (['res_std_ppm'], {}), '(res_std_ppm)\n', (59781, 59794), True, 'import numpy as np\n'), ((59815, 59838), 'numpy.array', 'np.array', (['phots_std_ppm'], {}), '(phots_std_ppm)\n', (59823, 59838), True, 'import numpy as np\n'), ((59858, 59880), 'numpy.array', 'np.array', (['res_diff_ppm'], {}), '(res_diff_ppm)\n', (59866, 59880), True, 'import numpy as np\n'), ((59897, 59916), 'numpy.array', 'np.array', (['keys_list'], {}), '(keys_list)\n', (59905, 59916), True, 'import numpy as np\n'), ((60823, 60859), 'numpy.bitwise_and', 'np.bitwise_and', (['sub_sect', '_idx_split'], {}), '(sub_sect, _idx_split)\n', (60837, 60859), True, 'import numpy as np\n'), ((60875, 60914), 'numpy.bitwise_and', 'np.bitwise_and', (['sub_sect', '_use_xcenters'], {}), '(sub_sect, _use_xcenters)\n', (60889, 60914), True, 'import numpy as np\n'), ((60930, 60969), 'numpy.bitwise_and', 'np.bitwise_and', (['sub_sect', '_use_ycenters'], {}), '(sub_sect, _use_ycenters)\n', (60944, 60969), True, 'import numpy as np\n'), ((60985, 61028), 'numpy.bitwise_and', 'np.bitwise_and', (['sub_sect', '_use_trace_angles'], {}), '(sub_sect, _use_trace_angles)\n', (60999, 61028), True, 'import numpy as np\n'), ((61044, 61087), 'numpy.bitwise_and', 'np.bitwise_and', (['sub_sect', '_use_tracelengths'], {}), '(sub_sect, _use_tracelengths)\n', (61058, 61087), True, 'import numpy as np\n'), ((2192, 2209), 'numpy.median', 'np.median', (['fluxes'], {}), '(fluxes)\n', (2201, 2209), True, 'import numpy as np\n'), ((2463, 2481), 'tqdm.tqdm', 'tqdm', (['aper_heights'], {}), '(aper_heights)\n', (2467, 2481), False, 'from tqdm import tqdm\n'), ((4269, 4279), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (4277, 4279), True, 'import pymc3 as pm\n'), ((5209, 5260), 'exoplanet.orbits.KeplerianOrbit', 'xo.orbits.KeplerianOrbit', ([], {'period': 'period', 't0': 't0', 'b': 'b'}), '(period=period, t0=t0, b=b)\n', (5233, 5260), True, 'import exoplanet as xo\n'), ((5584, 5630), 'pymc3.Deterministic', 'pm.Deterministic', (['"""light_curves"""', 'light_curves'], {}), "('light_curves', light_curves)\n", (5600, 5630), True, 'import pymc3 as pm\n'), ((5710, 5769), 'pymc3.Normal', 'pm.Normal', (['"""obs"""'], {'mu': 'light_curve', 'sd': 'dataerr', 'observed': 'data'}), "('obs', mu=light_curve, sd=dataerr, observed=data)\n", (5719, 5769), True, 'import pymc3 as pm\n'), ((5881, 5916), 'exoplanet.optimize', 'xo.optimize', ([], {'start': 'model.test_point'}), '(start=model.test_point)\n', (5892, 5916), True, 'import exoplanet as xo\n'), ((5926, 5954), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (5940, 5954), True, 'import numpy as np\n'), ((6531, 6547), 'numpy.median', 'np.median', (['times'], {}), '(times)\n', (6540, 6547), True, 'import numpy as np\n'), ((6557, 6567), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (6565, 6567), True, 'import pymc3 as pm\n'), ((6626, 6663), 'pymc3.Normal', 'pm.Normal', (['"""mean_fwd"""'], {'mu': '(1.0)', 'sd': '(1.0)'}), "('mean_fwd', mu=1.0, sd=1.0)\n", (6635, 6663), True, 'import pymc3 as pm\n'), ((6683, 6720), 'pymc3.Normal', 'pm.Normal', (['"""mean_rev"""'], {'mu': '(1.0)', 'sd': '(1.0)'}), "('mean_rev', mu=1.0, sd=1.0)\n", (6692, 6720), True, 'import pymc3 as pm\n'), ((7309, 7356), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_time"""'], {'lower': '(-0.1)', 'upper': '(0.1)'}), "('slope_time', lower=-0.1, upper=0.1)\n", (7319, 7356), True, 'import pymc3 as pm\n'), ((7781, 7832), 'exoplanet.orbits.KeplerianOrbit', 'xo.orbits.KeplerianOrbit', ([], {'period': 'period', 't0': 't0', 'b': 'b'}), '(period=period, t0=t0, b=b)\n', (7805, 7832), True, 'import exoplanet as xo\n'), ((7904, 7928), 'exoplanet.LimbDarkLightCurve', 'xo.LimbDarkLightCurve', (['u'], {}), '(u)\n', (7925, 7928), True, 'import exoplanet as xo\n'), ((8160, 8198), 'pymc3.math.sum', 'pm.math.sum', (['light_curves_fwd'], {'axis': '(-1)'}), '(light_curves_fwd, axis=-1)\n', (8171, 8198), True, 'import pymc3 as pm\n'), ((8225, 8263), 'pymc3.math.sum', 'pm.math.sum', (['light_curves_rev'], {'axis': '(-1)'}), '(light_curves_rev, axis=-1)\n', (8236, 8263), True, 'import pymc3 as pm\n'), ((8368, 8422), 'pymc3.Deterministic', 'pm.Deterministic', (['"""light_curves_fwd"""', 'light_curves_fwd'], {}), "('light_curves_fwd', light_curves_fwd)\n", (8384, 8422), True, 'import pymc3 as pm\n'), ((8431, 8485), 'pymc3.Deterministic', 'pm.Deterministic', (['"""light_curves_rev"""', 'light_curves_rev'], {}), "('light_curves_rev', light_curves_rev)\n", (8447, 8485), True, 'import pymc3 as pm\n'), ((8567, 8667), 'pymc3.Normal', 'pm.Normal', (['"""obs_fwd"""'], {'mu': '(light_curve_fwd + line_fwd)', 'sd': 'dataerr[idx_fwd]', 'observed': 'data[idx_fwd]'}), "('obs_fwd', mu=light_curve_fwd + line_fwd, sd=dataerr[idx_fwd],\n observed=data[idx_fwd])\n", (8576, 8667), True, 'import pymc3 as pm\n'), ((8690, 8790), 'pymc3.Normal', 'pm.Normal', (['"""obs_rev"""'], {'mu': '(light_curve_rev + line_rev)', 'sd': 'dataerr[idx_rev]', 'observed': 'data[idx_rev]'}), "('obs_rev', mu=light_curve_rev + line_rev, sd=dataerr[idx_rev],\n observed=data[idx_rev])\n", (8699, 8790), True, 'import pymc3 as pm\n'), ((8918, 8953), 'exoplanet.optimize', 'xo.optimize', ([], {'start': 'model.test_point'}), '(start=model.test_point)\n', (8929, 8953), True, 'import exoplanet as xo\n'), ((9174, 9192), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (9188, 9192), True, 'import numpy as np\n'), ((9815, 9831), 'numpy.median', 'np.median', (['times'], {}), '(times)\n', (9824, 9831), True, 'import numpy as np\n'), ((9842, 9852), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (9850, 9852), True, 'import pymc3 as pm\n'), ((9907, 9940), 'pymc3.Normal', 'pm.Normal', (['"""mean"""'], {'mu': '(1.0)', 'sd': '(1.0)'}), "('mean', mu=1.0, sd=1.0)\n", (9916, 9940), True, 'import pymc3 as pm\n'), ((10529, 10576), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_time"""'], {'lower': '(-0.1)', 'upper': '(0.1)'}), "('slope_time', lower=-0.1, upper=0.1)\n", (10539, 10576), True, 'import pymc3 as pm\n'), ((10843, 10894), 'exoplanet.orbits.KeplerianOrbit', 'xo.orbits.KeplerianOrbit', ([], {'period': 'period', 't0': 't0', 'b': 'b'}), '(period=period, t0=t0, b=b)\n', (10867, 10894), True, 'import exoplanet as xo\n'), ((11080, 11114), 'pymc3.math.sum', 'pm.math.sum', (['light_curves'], {'axis': '(-1)'}), '(light_curves, axis=-1)\n', (11091, 11114), True, 'import pymc3 as pm\n'), ((11123, 11169), 'pymc3.Deterministic', 'pm.Deterministic', (['"""light_curves"""', 'light_curves'], {}), "('light_curves', light_curves)\n", (11139, 11169), True, 'import pymc3 as pm\n'), ((11339, 11393), 'pymc3.Normal', 'pm.Normal', (['"""obs"""'], {'mu': 'model_', 'sd': 'dataerr', 'observed': 'data'}), "('obs', mu=model_, sd=dataerr, observed=data)\n", (11348, 11393), True, 'import pymc3 as pm\n'), ((11506, 11541), 'exoplanet.optimize', 'xo.optimize', ([], {'start': 'model.test_point'}), '(start=model.test_point)\n', (11517, 11541), True, 'import exoplanet as xo\n'), ((12009, 12027), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (12023, 12027), True, 'import numpy as np\n'), ((13169, 13179), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13176, 13179), True, 'import numpy as np\n'), ((13506, 13537), 'numpy.ones_like', 'np.ones_like', (['times'], {'dtype': 'bool'}), '(times, dtype=bool)\n', (13518, 13537), True, 'import numpy as np\n'), ((13793, 13809), 'numpy.median', 'np.median', (['times'], {}), '(times)\n', (13802, 13809), True, 'import numpy as np\n'), ((13819, 13829), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (13827, 13829), True, 'import pymc3 as pm\n'), ((13888, 13930), 'pymc3.Normal', 'pm.Normal', (['f"""mean{strfwd}"""'], {'mu': '(0.0)', 'sd': '(1.0)'}), "(f'mean{strfwd}', mu=0.0, sd=1.0)\n", (13897, 13930), True, 'import pymc3 as pm\n'), ((14618, 14661), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_time"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_time', lower=-1, upper=1)\n", (14628, 14661), True, 'import pymc3 as pm\n'), ((15950, 15999), 'pymc3.Deterministic', 'pm.Deterministic', (['f"""line_model{strfwd}"""', 'line_fwd'], {}), "(f'line_model{strfwd}', line_fwd)\n", (15966, 15999), True, 'import pymc3 as pm\n'), ((16162, 16213), 'exoplanet.orbits.KeplerianOrbit', 'xo.orbits.KeplerianOrbit', ([], {'period': 'period', 't0': 't0', 'b': 'b'}), '(period=period, t0=t0, b=b)\n', (16186, 16213), True, 'import exoplanet as xo\n'), ((16285, 16309), 'exoplanet.LimbDarkLightCurve', 'xo.LimbDarkLightCurve', (['u'], {}), '(u)\n', (16306, 16309), True, 'import exoplanet as xo\n'), ((16582, 16620), 'pymc3.math.sum', 'pm.math.sum', (['light_curves_fwd'], {'axis': '(-1)'}), '(light_curves_fwd, axis=-1)\n', (16593, 16620), True, 'import pymc3 as pm\n'), ((16827, 16885), 'pymc3.Deterministic', 'pm.Deterministic', (['f"""light_curves{strfwd}"""', 'light_curve_fwd'], {}), "(f'light_curves{strfwd}', light_curve_fwd)\n", (16843, 16885), True, 'import pymc3 as pm\n'), ((17930, 17965), 'exoplanet.optimize', 'xo.optimize', ([], {'start': 'model.test_point'}), '(start=model.test_point)\n', (17941, 17965), True, 'import exoplanet as xo\n'), ((18186, 18204), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (18200, 18204), True, 'import numpy as np\n'), ((18771, 18781), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (18778, 18781), True, 'import numpy as np\n'), ((18954, 18970), 'numpy.median', 'np.median', (['times'], {}), '(times)\n', (18963, 18970), True, 'import numpy as np\n'), ((18980, 18990), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (18988, 18990), True, 'import pymc3 as pm\n'), ((19045, 19079), 'pymc3.Normal', 'pm.Normal', (['f"""mean"""'], {'mu': '(0.0)', 'sd': '(1.0)'}), "(f'mean', mu=0.0, sd=1.0)\n", (19054, 19079), True, 'import pymc3 as pm\n'), ((19098, 19139), 'pymc3.Uniform', 'pm.Uniform', (['"""edepth"""'], {'lower': '(0)', 'upper': '(0.01)'}), "('edepth', lower=0, upper=0.01)\n", (19108, 19139), True, 'import pymc3 as pm\n'), ((19157, 19177), 'pymc3.math.sqrt', 'pm.math.sqrt', (['edepth'], {}), '(edepth)\n', (19169, 19177), True, 'import pymc3 as pm\n'), ((19200, 19243), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_time"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_time', lower=-1, upper=1)\n", (19210, 19243), True, 'import pymc3 as pm\n'), ((20632, 20675), 'pymc3.Deterministic', 'pm.Deterministic', (['f"""line_model"""', 'line_model'], {}), "(f'line_model', line_model)\n", (20648, 20675), True, 'import pymc3 as pm\n'), ((20744, 20795), 'exoplanet.orbits.KeplerianOrbit', 'xo.orbits.KeplerianOrbit', ([], {'period': 'period', 't0': 't0', 'b': 'b'}), '(period=period, t0=t0, b=b)\n', (20768, 20795), True, 'import exoplanet as xo\n'), ((20867, 20891), 'exoplanet.LimbDarkLightCurve', 'xo.LimbDarkLightCurve', (['u'], {}), '(u)\n', (20888, 20891), True, 'import exoplanet as xo\n'), ((21004, 21038), 'pymc3.math.sum', 'pm.math.sum', (['light_curves'], {'axis': '(-1)'}), '(light_curves, axis=-1)\n', (21015, 21038), True, 'import pymc3 as pm\n'), ((21143, 21187), 'pymc3.Deterministic', 'pm.Deterministic', (['"""light_curve"""', 'light_curve'], {}), "('light_curve', light_curve)\n", (21159, 21187), True, 'import pymc3 as pm\n'), ((22119, 22154), 'exoplanet.optimize', 'xo.optimize', ([], {'start': 'model.test_point'}), '(start=model.test_point)\n', (22130, 22154), True, 'import exoplanet as xo\n'), ((22313, 22331), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (22327, 22331), True, 'import numpy as np\n'), ((31829, 31847), 'astropy.stats.mad_std', 'mad_std', (['phots_rev'], {}), '(phots_rev)\n', (31836, 31847), False, 'from astropy.stats import mad_std, sigma_clip\n'), ((31869, 31887), 'astropy.stats.mad_std', 'mad_std', (['phots_fwd'], {}), '(phots_fwd)\n', (31876, 31887), False, 'from astropy.stats import mad_std, sigma_clip\n'), ((31910, 31930), 'numpy.median', 'np.median', (['phots_fwd'], {}), '(phots_fwd)\n', (31919, 31930), True, 'import numpy as np\n'), ((31952, 31972), 'numpy.median', 'np.median', (['phots_fwd'], {}), '(phots_fwd)\n', (31961, 31972), True, 'import numpy as np\n'), ((31994, 32027), 'numpy.mean', 'np.mean', (['[lc_std_rev, lc_std_fwd]'], {}), '([lc_std_rev, lc_std_fwd])\n', (32001, 32027), True, 'import numpy as np\n'), ((32048, 32081), 'numpy.mean', 'np.mean', (['[lc_med_rev, lc_med_fwd]'], {}), '([lc_med_rev, lc_med_fwd])\n', (32055, 32081), True, 'import numpy as np\n'), ((32661, 32676), 'numpy.median', 'np.median', (['inds'], {}), '(inds)\n', (32670, 32676), True, 'import numpy as np\n'), ((32919, 32933), 'numpy.median', 'np.median', (['row'], {}), '(row)\n', (32928, 32933), True, 'import numpy as np\n'), ((32952, 32963), 'numpy.std', 'np.std', (['row'], {}), '(row)\n', (32958, 32963), True, 'import numpy as np\n'), ((35364, 35398), 'numpy.allclose', 'np.allclose', (['existing_vec', 'new_vec'], {}), '(existing_vec, new_vec)\n', (35375, 35398), True, 'import numpy as np\n'), ((52860, 52880), 'numpy.zeros_like', 'np.zeros_like', (['times'], {}), '(times)\n', (52873, 52880), True, 'import numpy as np\n'), ((53988, 54004), 'numpy.median', 'np.median', (['phots'], {}), '(phots)\n', (53997, 54004), True, 'import numpy as np\n'), ((54355, 54382), 'numpy.zeros_like', 'np.zeros_like', (['planet.times'], {}), '(planet.times)\n', (54368, 54382), True, 'import numpy as np\n'), ((54780, 54805), 'numpy.std', 'np.std', (['(map_model - phots)'], {}), '(map_model - phots)\n', (54786, 54805), True, 'import numpy as np\n'), ((55541, 55562), 'numpy.where', 'np.where', (['inliers_fwd'], {}), '(inliers_fwd)\n', (55549, 55562), True, 'import numpy as np\n'), ((55584, 55605), 'numpy.where', 'np.where', (['inliers_rev'], {}), '(inliers_rev)\n', (55592, 55605), True, 'import numpy as np\n'), ((55802, 55880), 'astropy.stats.sigma_clip', 'sigma_clip', (['phots_[instance.idx_fwd]'], {'sigma': 'n_sig', 'maxiters': '(1)', 'stdfunc': 'mad_std'}), '(phots_[instance.idx_fwd], sigma=n_sig, maxiters=1, stdfunc=mad_std)\n', (55812, 55880), False, 'from astropy.stats import mad_std, sigma_clip\n'), ((55995, 56073), 'astropy.stats.sigma_clip', 'sigma_clip', (['phots_[instance.idx_rev]'], {'sigma': 'n_sig', 'maxiters': '(1)', 'stdfunc': 'mad_std'}), '(phots_[instance.idx_rev], sigma=n_sig, maxiters=1, stdfunc=mad_std)\n', (56005, 56073), False, 'from astropy.stats import mad_std, sigma_clip\n'), ((56189, 56211), 'numpy.where', 'np.where', (['outliers_fwd'], {}), '(outliers_fwd)\n', (56197, 56211), True, 'import numpy as np\n'), ((56234, 56256), 'numpy.where', 'np.where', (['outliers_rev'], {}), '(outliers_rev)\n', (56242, 56256), True, 'import numpy as np\n'), ((56650, 56692), 'os.path.join', 'os.path.join', (['data_dir', 'maps_only_filename'], {}), '(data_dir, maps_only_filename)\n', (56662, 56692), False, 'import os\n'), ((61100, 61118), 'numpy.where', 'np.where', (['sub_sect'], {}), '(sub_sect)\n', (61108, 61118), True, 'import numpy as np\n'), ((63789, 63809), 'numpy.zeros_like', 'np.zeros_like', (['times'], {}), '(times)\n', (63802, 63809), True, 'import numpy as np\n'), ((63831, 63851), 'numpy.zeros_like', 'np.zeros_like', (['times'], {}), '(times)\n', (63844, 63851), True, 'import numpy as np\n'), ((4485, 4530), 'pymc3.Uniform', 'pm.Uniform', (['"""log_edepth"""'], {'lower': '(-20)', 'upper': '(-2)'}), "('log_edepth', lower=-20, upper=-2)\n", (4495, 4530), True, 'import pymc3 as pm\n'), ((5442, 5476), 'pymc3.math.sum', 'pm.math.sum', (['light_curves'], {'axis': '(-1)'}), '(light_curves, axis=-1)\n', (5453, 5476), True, 'import pymc3 as pm\n'), ((6914, 6959), 'pymc3.Uniform', 'pm.Uniform', (['"""log_edepth"""'], {'lower': '(-20)', 'upper': '(-2)'}), "('log_edepth', lower=-20, upper=-2)\n", (6924, 6959), True, 'import pymc3 as pm\n'), ((6981, 7033), 'pymc3.Deterministic', 'pm.Deterministic', (['"""edepth"""', '(10 ** (0.5 * log_edepth))'], {}), "('edepth', 10 ** (0.5 * log_edepth))\n", (6997, 7033), True, 'import pymc3 as pm\n'), ((7536, 7586), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_xcenter"""'], {'lower': '(-0.1)', 'upper': '(0.1)'}), "('slope_xcenter', lower=-0.1, upper=0.1)\n", (7546, 7586), True, 'import pymc3 as pm\n'), ((10134, 10179), 'pymc3.Uniform', 'pm.Uniform', (['"""log_edepth"""'], {'lower': '(-20)', 'upper': '(-2)'}), "('log_edepth', lower=-20, upper=-2)\n", (10144, 10179), True, 'import pymc3 as pm\n'), ((10201, 10253), 'pymc3.Deterministic', 'pm.Deterministic', (['"""edepth"""', '(10 ** (0.5 * log_edepth))'], {}), "('edepth', 10 ** (0.5 * log_edepth))\n", (10217, 10253), True, 'import pymc3 as pm\n'), ((10678, 10728), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_xcenter"""'], {'lower': '(-0.1)', 'upper': '(0.1)'}), "('slope_xcenter', lower=-0.1, upper=0.1)\n", (10688, 10728), True, 'import pymc3 as pm\n'), ((12489, 12499), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (12496, 12499), True, 'import numpy as np\n'), ((12588, 12599), 'numpy.log', 'np.log', (['(3.0)'], {}), '(3.0)\n', (12594, 12599), True, 'import numpy as np\n'), ((12970, 12989), 'pymc3.math.exp', 'pm.math.exp', (['log_s2'], {}), '(log_s2)\n', (12981, 12989), True, 'import pymc3 as pm\n'), ((13987, 14029), 'pymc3.Normal', 'pm.Normal', (['f"""mean{strrev}"""'], {'mu': '(0.0)', 'sd': '(1.0)'}), "(f'mean{strrev}', mu=0.0, sd=1.0)\n", (13996, 14029), True, 'import pymc3 as pm\n'), ((14223, 14268), 'pymc3.Uniform', 'pm.Uniform', (['"""log_edepth"""'], {'lower': '(-20)', 'upper': '(-2)'}), "('log_edepth', lower=-20, upper=-2)\n", (14233, 14268), True, 'import pymc3 as pm\n'), ((14290, 14342), 'pymc3.Deterministic', 'pm.Deterministic', (['"""edepth"""', '(10 ** (0.5 * log_edepth))'], {}), "('edepth', 10 ** (0.5 * log_edepth))\n", (14306, 14342), True, 'import pymc3 as pm\n'), ((14878, 14924), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_xcenter"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_xcenter', lower=-1, upper=1)\n", (14888, 14924), True, 'import pymc3 as pm\n'), ((15149, 15195), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_ycenter"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_ycenter', lower=-1, upper=1)\n", (15159, 15195), True, 'import pymc3 as pm\n'), ((15424, 15474), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_trace_angle"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_trace_angle', lower=-1, upper=1)\n", (15434, 15474), True, 'import pymc3 as pm\n'), ((15712, 15763), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_trace_length"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_trace_length', lower=-1, upper=1)\n", (15722, 15763), True, 'import pymc3 as pm\n'), ((16044, 16093), 'pymc3.Deterministic', 'pm.Deterministic', (['f"""line_model{strrev}"""', 'line_rev'], {}), "(f'line_model{strrev}', line_rev)\n", (16060, 16093), True, 'import pymc3 as pm\n'), ((16684, 16722), 'pymc3.math.sum', 'pm.math.sum', (['light_curves_rev'], {'axis': '(-1)'}), '(light_curves_rev, axis=-1)\n', (16695, 16722), True, 'import pymc3 as pm\n'), ((16930, 16988), 'pymc3.Deterministic', 'pm.Deterministic', (['f"""light_curves{strrev}"""', 'light_curve_rev'], {}), "(f'light_curves{strrev}', light_curve_rev)\n", (16946, 16988), True, 'import pymc3 as pm\n'), ((17529, 17618), 'pymc3.Normal', 'pm.Normal', (['f"""obs{strfwd}"""'], {'mu': 'model_fwd', 'sd': 'dataerr[idx_fwd]', 'observed': 'data[idx_fwd]'}), "(f'obs{strfwd}', mu=model_fwd, sd=dataerr[idx_fwd], observed=data[\n idx_fwd])\n", (17538, 17618), True, 'import pymc3 as pm\n'), ((19347, 19366), 'numpy.median', 'np.median', (['xcenters'], {}), '(xcenters)\n', (19356, 19366), True, 'import numpy as np\n'), ((19386, 19402), 'numpy.std', 'np.std', (['xcenters'], {}), '(xcenters)\n', (19392, 19402), True, 'import numpy as np\n'), ((19502, 19548), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_xcenter"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_xcenter', lower=-1, upper=1)\n", (19512, 19548), True, 'import pymc3 as pm\n'), ((19660, 19679), 'numpy.median', 'np.median', (['ycenters'], {}), '(ycenters)\n', (19669, 19679), True, 'import numpy as np\n'), ((19699, 19715), 'numpy.std', 'np.std', (['ycenters'], {}), '(ycenters)\n', (19705, 19715), True, 'import numpy as np\n'), ((19815, 19861), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_ycenter"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_ycenter', lower=-1, upper=1)\n", (19825, 19861), True, 'import pymc3 as pm\n'), ((19977, 20000), 'numpy.median', 'np.median', (['trace_angles'], {}), '(trace_angles)\n', (19986, 20000), True, 'import numpy as np\n'), ((20020, 20040), 'numpy.std', 'np.std', (['trace_angles'], {}), '(trace_angles)\n', (20026, 20040), True, 'import numpy as np\n'), ((20155, 20205), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_trace_angle"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_trace_angle', lower=-1, upper=1)\n", (20165, 20205), True, 'import pymc3 as pm\n'), ((20330, 20354), 'numpy.median', 'np.median', (['trace_lengths'], {}), '(trace_lengths)\n', (20339, 20354), True, 'import numpy as np\n'), ((20374, 20395), 'numpy.std', 'np.std', (['trace_lengths'], {}), '(trace_lengths)\n', (20380, 20395), True, 'import numpy as np\n'), ((20508, 20559), 'pymc3.Uniform', 'pm.Uniform', (['"""slope_trace_length"""'], {'lower': '(-1)', 'upper': '(1)'}), "('slope_trace_length', lower=-1, upper=1)\n", (20518, 20559), True, 'import pymc3 as pm\n'), ((21844, 21882), 'pymc3.Deterministic', 'pm.Deterministic', ([], {'name': '"""gp_mu"""', 'var': 'mu'}), "(name='gp_mu', var=mu)\n", (21860, 21882), True, 'import pymc3 as pm\n'), ((21910, 21969), 'pymc3.Normal', 'pm.Normal', (['f"""obs"""'], {'mu': 'model_full', 'sd': 'dataerr', 'observed': 'data'}), "(f'obs', mu=model_full, sd=dataerr, observed=data)\n", (21919, 21969), True, 'import pymc3 as pm\n'), ((31562, 31592), 'numpy.arange', 'np.arange', (['planet.idx_fwd.size'], {}), '(planet.idx_fwd.size)\n', (31571, 31592), True, 'import numpy as np\n'), ((31619, 31649), 'numpy.arange', 'np.arange', (['planet.idx_rev.size'], {}), '(planet.idx_rev.size)\n', (31628, 31649), True, 'import numpy as np\n'), ((54943, 54958), 'numpy.log10', 'np.log10', (['n_pts'], {}), '(n_pts)\n', (54951, 54958), True, 'import numpy as np\n'), ((55154, 55232), 'astropy.stats.sigma_clip', 'sigma_clip', (['phots_[instance.idx_fwd]'], {'sigma': 'n_sig', 'maxiters': '(1)', 'stdfunc': 'mad_std'}), '(phots_[instance.idx_fwd], sigma=n_sig, maxiters=1, stdfunc=mad_std)\n', (55164, 55232), False, 'from astropy.stats import mad_std, sigma_clip\n'), ((55348, 55426), 'astropy.stats.sigma_clip', 'sigma_clip', (['phots_[instance.idx_rev]'], {'sigma': 'n_sig', 'maxiters': '(1)', 'stdfunc': 'mad_std'}), '(phots_[instance.idx_rev], sigma=n_sig, maxiters=1, stdfunc=mad_std)\n', (55358, 55426), False, 'from astropy.stats import mad_std, sigma_clip\n'), ((60508, 60526), 'numpy.ones', 'np.ones', (['n_options'], {}), '(n_options)\n', (60515, 60526), True, 'import numpy as np\n'), ((2631, 2648), 'numpy.median', 'np.median', (['fluxes'], {}), '(fluxes)\n', (2640, 2648), True, 'import numpy as np\n'), ((4410, 4434), 'numpy.array', 'np.array', (['[0.999, 1.001]'], {}), '([0.999, 1.001])\n', (4418, 4434), True, 'import numpy as np\n'), ((4579, 4596), 'pymc3.math.exp', 'pm.math.exp', (['logP'], {}), '(logP)\n', (4590, 4596), True, 'import pymc3 as pm\n'), ((4716, 4761), 'pymc3.Uniform', 'pm.Uniform', (['"""edepth"""'], {'lower': '(-0.01)', 'upper': '(0.01)'}), "('edepth', lower=-0.01, upper=0.01)\n", (4726, 4761), True, 'import pymc3 as pm\n'), ((4792, 4811), 'pymc3.math.sgn', 'pm.math.sgn', (['edepth'], {}), '(edepth)\n', (4803, 4811), True, 'import pymc3 as pm\n'), ((4831, 4857), 'pymc3.math.lt', 'pm.math.lt', (['edepth_sign', '(0)'], {}), '(edepth_sign, 0)\n', (4841, 4857), True, 'import pymc3 as pm\n'), ((5053, 5094), 'pymc3.Uniform', 'pm.Uniform', (['"""edepth"""'], {'lower': '(0)', 'upper': '(0.01)'}), "('edepth', lower=0, upper=0.01)\n", (5063, 5094), True, 'import pymc3 as pm\n'), ((5120, 5140), 'pymc3.math.sqrt', 'pm.math.sqrt', (['edepth'], {}), '(edepth)\n', (5132, 5140), True, 'import pymc3 as pm\n'), ((5338, 5362), 'exoplanet.LimbDarkLightCurve', 'xo.LimbDarkLightCurve', (['u'], {}), '(u)\n', (5359, 5362), True, 'import exoplanet as xo\n'), ((6111, 6162), 'exoplanet.get_dense_nuts_step', 'xo.get_dense_nuts_step', ([], {'target_accept': 'target_accept'}), '(target_accept=target_accept)\n', (6133, 6162), True, 'import exoplanet as xo\n'), ((7110, 7155), 'pymc3.Uniform', 'pm.Uniform', (['"""edepth"""'], {'lower': '(-0.01)', 'upper': '(0.01)'}), "('edepth', lower=-0.01, upper=0.01)\n", (7120, 7155), True, 'import pymc3 as pm\n'), ((7199, 7240), 'pymc3.Uniform', 'pm.Uniform', (['"""edepth"""'], {'lower': '(0)', 'upper': '(0.01)'}), "('edepth', lower=0, upper=0.01)\n", (7209, 7240), True, 'import pymc3 as pm\n'), ((7266, 7286), 'pymc3.math.sqrt', 'pm.math.sqrt', (['edepth'], {}), '(edepth)\n', (7278, 7286), True, 'import pymc3 as pm\n'), ((10330, 10375), 'pymc3.Uniform', 'pm.Uniform', (['"""edepth"""'], {'lower': '(-0.01)', 'upper': '(0.01)'}), "('edepth', lower=-0.01, upper=0.01)\n", (10340, 10375), True, 'import pymc3 as pm\n'), ((10419, 10460), 'pymc3.Uniform', 'pm.Uniform', (['"""edepth"""'], {'lower': '(0)', 'upper': '(0.01)'}), "('edepth', lower=0, upper=0.01)\n", (10429, 10460), True, 'import pymc3 as pm\n'), ((10486, 10506), 'pymc3.math.sqrt', 'pm.math.sqrt', (['edepth'], {}), '(edepth)\n', (10498, 10506), True, 'import pymc3 as pm\n'), ((10972, 10996), 'exoplanet.LimbDarkLightCurve', 'xo.LimbDarkLightCurve', (['u'], {}), '(u)\n', (10993, 10996), True, 'import exoplanet as xo\n'), ((12702, 12714), 'numpy.var', 'np.var', (['data'], {}), '(data)\n', (12708, 12714), True, 'import numpy as np\n'), ((12816, 12828), 'numpy.var', 'np.var', (['data'], {}), '(data)\n', (12822, 12828), True, 'import numpy as np\n'), ((14419, 14464), 'pymc3.Uniform', 'pm.Uniform', (['"""edepth"""'], {'lower': '(-0.01)', 'upper': '(0.01)'}), "('edepth', lower=-0.01, upper=0.01)\n", (14429, 14464), True, 'import pymc3 as pm\n'), ((14508, 14549), 'pymc3.Uniform', 'pm.Uniform', (['"""edepth"""'], {'lower': '(0)', 'upper': '(0.01)'}), "('edepth', lower=0, upper=0.01)\n", (14518, 14549), True, 'import pymc3 as pm\n'), ((14575, 14595), 'pymc3.math.sqrt', 'pm.math.sqrt', (['edepth'], {}), '(edepth)\n', (14587, 14595), True, 'import pymc3 as pm\n'), ((17689, 17795), 'pymc3.Normal', 'pm.Normal', (['f"""obs{strrev}"""'], {'mu': '(light_curve_rev + line_rev)', 'sd': 'dataerr[idx_rev]', 'observed': 'data[idx_rev]'}), "(f'obs{strrev}', mu=light_curve_rev + line_rev, sd=dataerr[idx_rev\n ], observed=data[idx_rev])\n", (17698, 17795), True, 'import pymc3 as pm\n'), ((25444, 25481), 'pymc3.summary', 'pm.summary', (['trace'], {'var_names': 'varnames'}), '(trace, var_names=varnames)\n', (25454, 25481), True, 'import pymc3 as pm\n'), ((2259, 2273), 'numpy.std', 'np.std', (['fluxes'], {}), '(fluxes)\n', (2265, 2273), True, 'import numpy as np\n'), ((2665, 2679), 'numpy.std', 'np.std', (['fluxes'], {}), '(fluxes)\n', (2671, 2679), True, 'import numpy as np\n'), ((9391, 9442), 'exoplanet.get_dense_nuts_step', 'xo.get_dense_nuts_step', ([], {'target_accept': 'target_accept'}), '(target_accept=target_accept)\n', (9413, 9442), True, 'import exoplanet as xo\n'), ((12227, 12278), 'exoplanet.get_dense_nuts_step', 'xo.get_dense_nuts_step', ([], {'target_accept': 'target_accept'}), '(target_accept=target_accept)\n', (12249, 12278), True, 'import exoplanet as xo\n'), ((18403, 18454), 'exoplanet.get_dense_nuts_step', 'xo.get_dense_nuts_step', ([], {'target_accept': 'target_accept'}), '(target_accept=target_accept)\n', (18425, 18454), True, 'import exoplanet as xo\n'), ((22551, 22602), 'exoplanet.get_dense_nuts_step', 'xo.get_dense_nuts_step', ([], {'target_accept': 'target_accept'}), '(target_accept=target_accept)\n', (22573, 22602), True, 'import exoplanet as xo\n'), ((29676, 29707), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['planet_m'], {}), '(planet_m)\n', (29697, 29707), True, 'import theano.tensor as tt\n'), ((4988, 5008), 'pymc3.math.abs_', 'pm.math.abs_', (['edepth'], {}), '(edepth)\n', (5000, 5008), True, 'import pymc3 as pm\n'), ((4902, 4922), 'pymc3.math.abs_', 'pm.math.abs_', (['edepth'], {}), '(edepth)\n', (4914, 4922), True, 'import pymc3 as pm\n'), ((27221, 27245), 'exoplanet.LimbDarkLightCurve', 'xo.LimbDarkLightCurve', (['u'], {}), '(u)\n', (27242, 27245), True, 'import exoplanet as xo\n'), ((2791, 2805), 'numpy.std', 'np.std', (['fluxes'], {}), '(fluxes)\n', (2797, 2805), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import glob
import sys
import shutil
import pdb
import re
from argparse import ArgumentParser
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.insert(0,'..')
import ESM_utils as esm
from scipy.optimize import curve_fit
from sklearn.preprocessing import MinMaxScaler
def create_ab_prob_all_visits_df(file_paths, genetic_df, clinical_df, pib_df):
ab_prob_df_list = []
for i, fp in enumerate(file_paths):
ab_curr_prob_df = pd.read_csv(file_paths[i], index_col=0)
visit = file_paths[i].split(".")[-2].split("_")[-1]
ab_curr_prob_df.loc[:, 'visit'] = visit
#drop participants that did not pass QC according to PUP's PET processing
for sub in ab_curr_prob_df.index:
if not ((pib_df['IMAGID'] == sub) & (pib_df['visit'] == visit)).any():
ab_curr_prob_df = ab_curr_prob_df[ab_curr_prob_df.index != sub]
ab_prob_df_list.append(ab_curr_prob_df)
#concatenate all dataframes
ab_prob_all_visits_df = pd.concat(ab_prob_df_list)
#add metadata to the dataframe
ab_prob_all_visits_df = add_metadata_to_amyloid_df(ab_prob_all_visits_df,
genetic_df,
clinical_df)
return ab_prob_all_visits_df
def add_metadata_to_amyloid_df(df, genetic_df, clinical_df):
for sub in df.index:
sub_df = df[df.index == sub]
visits = list(sub_df.visit)
mutation = genetic_df[(genetic_df.IMAGID == sub)].Mutation.values[0]
for i in range(0, len(visits)):
visit = visits[i]
dian_eyo = clinical_df[(clinical_df.IMAGID == sub) & (clinical_df.visit == visit)].DIAN_EYO.values
age = clinical_df[(clinical_df.IMAGID == sub) & (clinical_df.visit == visit)].VISITAGEc.values
if len(dian_eyo) == 0:
print(sub + " " + visit)
if len(dian_eyo) > 0:
df.loc[(df.index == sub) & (df.visit == visit), "DIAN_EYO"] = dian_eyo[0]
df.loc[(df.index == sub) & (df.visit == visit), "VISITAGEc"] = age[0]
df.loc[(df.index == sub) & (df.visit == visit), "visitNumber"] = i + 1
df.loc[(df.index == sub) & (df.visit == visit), "Mutation"] = mutation
return df
def get_rois_to_analyze(roi_colnames, rois_to_exclude):
roi_cols_to_exclude = []
for col in roi_colnames:
for rte in rois_to_exclude:
if rte in col.lower():
roi_cols_to_exclude.append(col)
roi_cols_to_keep = [x for x in roi_cols if x not in roi_cols_to_exclude]
return roi_cols_keep, roi_cols_to_exclude
def exclude_subcortical_rois(df, roi_cols_to_exclude):
df[roi_cols_to_exclude] = 0
return df
def stripplot_subcortical_mc_nc(ab_prob_df):
plt.figure(figsize=(10,10))
nrows = 2
ncols = 2
subcortical_rois = ["Left Thalamus", "Left Caudate", "Left Putamen", "Left Globus Pallidus"]
for i, roi in enumerate(subcortical_rois):
j = i + 1
plt.subplot(nrows, ncols, j)
sns.stripplot(x="Mutation", y=roi, data=ab_prob_df, size=3)
plt.title(roi, fontsize=12)
plt.ylabel("")
#plt.xticks(["Noncarrier", "Mutation Carrier"])
plt.tight_layout()
plt.savefig(os.path.join("../../figures", "mc_nc_roi_stripplot.png"))
plt.close()
def sort_df(ab_prob_df):
# sort subjects
ind_sorter = pd.DataFrame(ab_prob_df,copy=True)
ind_sorter.loc[:,'mean'] = ab_prob_df.mean(axis=1)
ind_order = ind_sorter.sort_values('mean',axis=0,ascending=True).index
# column sorter
col_sorter = pd.DataFrame(ab_prob_df,copy=True)
col_sorter.loc['mean'] = ab_prob_df.mean(axis=0)
col_order = col_sorter.sort_values('mean',axis=1,ascending=False).columns
ab_prob_df_sorted = ab_prob_df.loc[ind_order, col_order]
return ab_prob_df_sorted
def fsigmoid(x, a, b):
# Define sigmoid function
return 1.0 / (1.0 + np.exp(-a*(x-b)))
def zscore_mc_nc(ab_prob_df_mc, ab_prob_df_nc, roi_cols):
ab_prob_df_mc_zscore = ab_prob_df_mc.copy()
for roi in roi_cols:
mc_roi_vals = ab_prob_df_mc.loc[:, roi]
nc_roi_vals = ab_prob_df_nc.loc[:, roi]
mc_roi_vals_zscore = (mc_roi_vals-nc_roi_vals.mean())/nc_roi_vals.std()
ab_prob_df_mc_zscore.loc[:, roi] = np.absolute(mc_roi_vals_zscore)
scaler = MinMaxScaler()
ab_prob_df_mc_zscore[roi_cols] = scaler.fit_transform(ab_prob_df_mc_zscore[roi_cols])
return ab_prob_df_mc_zscore
def sigmoid_normalization(ab_prob_df):
'''
For each ROI, a sigmoidal function is fit to the values across all
individuals to estimate the parameters of a sigmoid for this ROI.
The original ROI signal is rescaled by a multiple of this sigmoid
(1/2 the original signal + 1/2 orig * sigmoid).
ab_prob_df -- A subject x ROI matrix of AB binding probabilities (pandas DataFrame).
'''
# sort the original signal first
ab_prob_df_sorted = sort_df(ab_prob_df)
ab_prob_df_scaled = pd.DataFrame(index=ab_prob_df_sorted.index, columns=ab_prob_df_sorted.columns)
for roi in ab_prob_df_sorted.columns:
vals = ab_prob_df_sorted[roi]
vals_idx = np.arange(0, len(vals))
popt, pcov = curve_fit(fsigmoid, vals_idx, vals, method='dogbox', bounds=([0,0],[1, len(vals)]))
x = np.linspace(0, len(vals), num=len(vals))
y = fsigmoid(x, *popt)
# wt1 and wt2 correspond to how much we're scaling the contribution of original
# and rescaled signals
wt1, wt2 = 1, 1
vals_scaled = (wt1*y + wt2*vals) / 2
ab_prob_df_scaled.loc[:, roi] = vals_scaled.values
ab_prob_df_scaled = ab_prob_df_scaled.loc[ab_prob_df.index, ab_prob_df.columns]
return ab_prob_df_scaled
def plot_roi_sub_heatmap(ab_prob_df, roi_cols):
path = os.path.join("../../figures/roi_sub_heatmap.png")
esm.Plot_Probabilites(ab_prob_df[roi_cols], cmap="Spectral_r", figsize=(20,10), path=path)
def main(args):
parser = ArgumentParser()
parser.add_argument("--ab_prob_matrix_dir",
help="Please pass the files directory containing the PiB-PET probability matrices")
parser.add_argument("--esm_input_file",
help="Please provide desired ESM input filename.")
parser.add_argument("--connectivity_type",
help="Specify type of connectivity, e.g. FC or ACP",
default="ACP")
parser.add_argument("--epicenters_for_esm",
help="Please provide a list of regions to test as \
epicenters (all lower-case)",
nargs="+",
type=str,
default=None)
parser.add_argument("--zscore",
help="Should the amyloid beta probabilities be z-scored.",
default=False,
type=bool)
parser.add_argument("--threshold",
help="Should the amyloid beta probabilities be thresholded.",
default=False,
type=bool)
parser.add_argument("--scale",
type=bool,
default=False,
help="Should the amyloid beta probabilities be within ROI sigmoid normalized.")
parser.add_argument("--visitNumber",
default=1,
type=int)
results = parser.parse_args() if args is None else parser.parse_args(args)
#results = parser.parse_args(args)
ab_prob_matrix_dir = results.ab_prob_matrix_dir
print(ab_prob_matrix_dir)
esm_input_file = results.esm_input_file
connectivity_type = results.connectivity_type
epicenters_for_esm = results.epicenters_for_esm
zscore = results.zscore
scale = results.scale
threshold = results.threshold
visitNumber = results.visitNumber
if connectivity_type == "ACP":
conn_file = '../../data/DIAN/connectivity_matrices/Matrix_ACP.mat'
elif connectivity_type == "FC":
conn_file = '../../data/DIAN/connectivity_matrices/DIAN_FC_NC_Correlation_Matrix_Avg_ReducedConfounds.mat'
if scale == True:
esm_input_file = esm_input_file + "_scaled"
file_paths = sorted(glob.glob(ab_prob_matrix_dir))
pib_df = pd.read_csv("../../data/DIAN/participant_metadata/pib_D1801.csv")
genetic_df = pd.read_csv("../../data/DIAN/participant_metadata/GENETIC_D1801.csv")
clinical_df = pd.read_csv("../../data/DIAN/participant_metadata/CLINICAL_D1801.csv")
ab_prob_all_visits_df = create_ab_prob_all_visits_df(file_paths, genetic_df, clinical_df, pib_df)
# get column names corresponding to ROIs
roi_cols = ab_prob_all_visits_df.columns[0:78]
roi_cols_to_keep = [y for y in roi_cols if not all([x==0 for x in ab_prob_all_visits_df[y]])]
# get MATLAB compatible indices of ROIs to use as epicenters
epicenters_idx = []
for i, roi in enumerate(roi_cols_to_keep):
if roi.lower() in epicenters_for_esm:
print(roi)
epicenters_idx.append(i+1)
#stripplot_subcortical_mc_nc(ab_prob_all_visits_df)
# extract df for subjects' first timepoint for both mutation carriers and noncarriers
# For each region, create a null distribution from noncarriers' signal
# Calculate a z-score for each subject (with regards the non-carrier distribution)
# Take the absolute value of this z-score
# Normalize to 0-1
ab_prob_mc = ab_prob_all_visits_df[ab_prob_all_visits_df.Mutation == 1]
ab_prob_t1_mc = ab_prob_mc[ab_prob_mc.visitNumber == visitNumber]
ab_prob_nc = ab_prob_all_visits_df[ab_prob_all_visits_df.Mutation == 0]
if zscore == True:
ab_prob_mc_zscore = ab_prob_mc.copy()
ab_prob_mc_zscore = zscore_mc_nc(ab_prob_mc, ab_prob_nc, roi_cols_to_keep)
ab_prob_t1_mc_zscore = ab_prob_mc_zscore[ab_prob_mc_zscore.visitNumber == visitNumber]
if scale == True:
ab_prob_t1_mc_zscore_sigmoid = ab_prob_t1_mc_zscore.copy()
ab_prob_t1_mc_zscore_sigmoid[roi_cols_to_keep] = sigmoid_normalization(ab_prob_t1_mc_zscore[roi_cols_to_keep])
if threshold == True:
ab_prob_t1_mc_zscore_threshold = ab_prob_t1_mc_zscore.copy()
for col in roi_cols:
ab_prob_t1_mc_zscore_threshold[col].values[ab_prob_t1_mc_zscore_threshold[col] < 0.15] = 0
# prepare inputs for ESM
output_dir = '../../data/DIAN/esm_input_mat_files/'
conn_matrices = [conn_file, '../../data/DIAN/connectivity_matrices/Matrix_LONG.mat']
conn_mat_names = ['Map', 'Map']
conn_out_names = ['ACP', 'LONG']
file_names = esm_input_file + '.mat'
ages = list(ab_prob_t1_mc.loc[:, 'VISITAGEc'])
sub_ids = list(ab_prob_t1_mc.index)
visit_labels = list(ab_prob_t1_mc.loc[:, 'visit'])
# specify whether sigmoid normalized data is used as the test data. always include the un-normalized data.
plot_roi_sub_heatmap(ab_prob_t1_mc_zscore, roi_cols)
if scale == True:
prob_matrices = {'test_data': ab_prob_t1_mc_zscore_sigmoid.loc[:, roi_cols], 'orig_data': ab_prob_t1_mc_zscore.loc[:, roi_cols]}
elif threshold == True:
prob_matrices = {'test_data': ab_prob_t1_mc_zscore_threshold.loc[:, roi_cols], 'orig_data': ab_prob_t1_mc_zscore_threshold.loc[:, roi_cols]}
elif zscore == True:
prob_matrices = {'test_data': ab_prob_t1_mc_zscore.loc[:, roi_cols], 'orig_data': ab_prob_t1_mc_zscore.loc[:, roi_cols]}
else:
prob_matrices = {'test_data': ab_prob_t1_mc.loc[:, roi_cols], 'orig_data': ab_prob_t1_mc.loc[:, roi_cols]}
esm.Prepare_Inputs_for_ESM(prob_matrices,
ages,
output_dir,
file_names,
conn_matrices,
conn_mat_names,
conn_out_names,
epicenters_idx,
sub_ids,
visit_labels,
roi_cols_to_keep,
figure=False)
if __name__ == "__main__":
main(sys.argv[1:]) | [
"matplotlib.pyplot.title",
"numpy.absolute",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"ESM_utils.Prepare_Inputs_for_ESM",
"matplotlib.pyplot.figure",
"numpy.exp",
"glob.glob",
"os.path.join",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"ma... | [((237, 261), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (252, 261), False, 'import sys\n'), ((1093, 1119), 'pandas.concat', 'pd.concat', (['ab_prob_df_list'], {}), '(ab_prob_df_list)\n', (1102, 1119), True, 'import pandas as pd\n'), ((2921, 2949), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2931, 2949), True, 'import matplotlib.pyplot as plt\n'), ((3369, 3387), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3385, 3387), True, 'import matplotlib.pyplot as plt\n'), ((3466, 3477), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3475, 3477), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3580), 'pandas.DataFrame', 'pd.DataFrame', (['ab_prob_df'], {'copy': '(True)'}), '(ab_prob_df, copy=True)\n', (3557, 3580), True, 'import pandas as pd\n'), ((3748, 3783), 'pandas.DataFrame', 'pd.DataFrame', (['ab_prob_df'], {'copy': '(True)'}), '(ab_prob_df, copy=True)\n', (3760, 3783), True, 'import pandas as pd\n'), ((4507, 4521), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4519, 4521), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((5169, 5247), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'ab_prob_df_sorted.index', 'columns': 'ab_prob_df_sorted.columns'}), '(index=ab_prob_df_sorted.index, columns=ab_prob_df_sorted.columns)\n', (5181, 5247), True, 'import pandas as pd\n'), ((5987, 6036), 'os.path.join', 'os.path.join', (['"""../../figures/roi_sub_heatmap.png"""'], {}), "('../../figures/roi_sub_heatmap.png')\n", (5999, 6036), False, 'import os\n'), ((6041, 6136), 'ESM_utils.Plot_Probabilites', 'esm.Plot_Probabilites', (['ab_prob_df[roi_cols]'], {'cmap': '"""Spectral_r"""', 'figsize': '(20, 10)', 'path': 'path'}), "(ab_prob_df[roi_cols], cmap='Spectral_r', figsize=(20,\n 10), path=path)\n", (6062, 6136), True, 'import ESM_utils as esm\n'), ((6163, 6179), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (6177, 6179), False, 'from argparse import ArgumentParser\n'), ((8509, 8574), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/DIAN/participant_metadata/pib_D1801.csv"""'], {}), "('../../data/DIAN/participant_metadata/pib_D1801.csv')\n", (8520, 8574), True, 'import pandas as pd\n'), ((8592, 8661), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/DIAN/participant_metadata/GENETIC_D1801.csv"""'], {}), "('../../data/DIAN/participant_metadata/GENETIC_D1801.csv')\n", (8603, 8661), True, 'import pandas as pd\n'), ((8680, 8750), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/DIAN/participant_metadata/CLINICAL_D1801.csv"""'], {}), "('../../data/DIAN/participant_metadata/CLINICAL_D1801.csv')\n", (8691, 8750), True, 'import pandas as pd\n'), ((11825, 12022), 'ESM_utils.Prepare_Inputs_for_ESM', 'esm.Prepare_Inputs_for_ESM', (['prob_matrices', 'ages', 'output_dir', 'file_names', 'conn_matrices', 'conn_mat_names', 'conn_out_names', 'epicenters_idx', 'sub_ids', 'visit_labels', 'roi_cols_to_keep'], {'figure': '(False)'}), '(prob_matrices, ages, output_dir, file_names,\n conn_matrices, conn_mat_names, conn_out_names, epicenters_idx, sub_ids,\n visit_labels, roi_cols_to_keep, figure=False)\n', (11851, 12022), True, 'import ESM_utils as esm\n'), ((543, 582), 'pandas.read_csv', 'pd.read_csv', (['file_paths[i]'], {'index_col': '(0)'}), '(file_paths[i], index_col=0)\n', (554, 582), True, 'import pandas as pd\n'), ((3151, 3179), 'matplotlib.pyplot.subplot', 'plt.subplot', (['nrows', 'ncols', 'j'], {}), '(nrows, ncols, j)\n', (3162, 3179), True, 'import matplotlib.pyplot as plt\n'), ((3188, 3247), 'seaborn.stripplot', 'sns.stripplot', ([], {'x': '"""Mutation"""', 'y': 'roi', 'data': 'ab_prob_df', 'size': '(3)'}), "(x='Mutation', y=roi, data=ab_prob_df, size=3)\n", (3201, 3247), True, 'import seaborn as sns\n'), ((3256, 3283), 'matplotlib.pyplot.title', 'plt.title', (['roi'], {'fontsize': '(12)'}), '(roi, fontsize=12)\n', (3265, 3283), True, 'import matplotlib.pyplot as plt\n'), ((3293, 3307), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (3303, 3307), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3460), 'os.path.join', 'os.path.join', (['"""../../figures"""', '"""mc_nc_roi_stripplot.png"""'], {}), "('../../figures', 'mc_nc_roi_stripplot.png')\n", (3416, 3460), False, 'import os\n'), ((4461, 4492), 'numpy.absolute', 'np.absolute', (['mc_roi_vals_zscore'], {}), '(mc_roi_vals_zscore)\n', (4472, 4492), True, 'import numpy as np\n'), ((8464, 8493), 'glob.glob', 'glob.glob', (['ab_prob_matrix_dir'], {}), '(ab_prob_matrix_dir)\n', (8473, 8493), False, 'import glob\n'), ((4084, 4104), 'numpy.exp', 'np.exp', (['(-a * (x - b))'], {}), '(-a * (x - b))\n', (4090, 4104), True, 'import numpy as np\n')] |
"""
The model's parameters module
=============================
This module defines the main classes containing the model configuration parameters.
The parameters are typically specified as :class:`~.params.parameter.Parameter` objects.
There are seven types of parameters arranged in classes:
* :class:`ScaleParams` contains the model scale parameters. These parameters are used to scale and
`nondimentionalize`_ the :class:`~.params.parameter.Parameter` of the other parameters classes according to
their :attr:`~.params.parameter.Parameter.units` attribute.
* :class:`AtmosphericParams` contains the atmospheric dynamical parameters.
* :class:`AtmosphericTemperatureParams` containing the atmosphere's temperature and heat-exchange parameters.
* :class:`OceanicParams` contains the oceanic dynamical parameters.
* :class:`OceanicTemperatureParams` contains the ocean's temperature and heat-exchange parameters.
* :class:`GroundParams` contains the ground dynamical parameters (e.g. orography).
* :class:`GroundTemperatureParams` contains the ground's temperature and heat-exchange parameters.
These parameters classes are regrouped into a global structure :class:`QgParams` which also contains
* spectral modes definition of the model
* physical constants
* parameters derived from the ones provided by the user
* helper functions to initialize and parameterize the model
This global parameters structure is used by the other modules to construct the model's ordinary differential
equations.
Warning
-------
If a model's parameter is set to `None`, it is assumed to be disabled.
---------------------
Description of the classes
--------------------------
.. _nondimentionalize: https://en.wikipedia.org/wiki/Nondimensionalization
"""
import numpy as np
import pickle
import warnings
from abc import ABC
from qgs.params.parameter import Parameter
from qgs.basis.fourier import contiguous_channel_basis, contiguous_basin_basis
from qgs.basis.fourier import ChannelFourierBasis, BasinFourierBasis
class Params(ABC):
"""Base class for a model's parameters container.
Parameters
----------
dic: dict(float or Parameter), optional
A dictionary with the parameters names and values to be assigned.
"""
_name = ""
def __init__(self, dic=None):
self.set_params(dic)
def set_params(self, dic):
"""Set the specified parameters values.
Parameters
----------
dic: dict(float or Parameter)
A dictionary with the parameters names and values to be assigned.
"""
if dic is not None:
for key, val in zip(dic.keys(), dic.values()):
if key in self.__dict__.keys():
if isinstance(self.__dict__[key], Parameter):
if isinstance(val, Parameter):
self.__dict__[key] = val
else:
d = self.__dict__[key].__dict__
self.__dict__[key] = Parameter(val, input_dimensional=d['_input_dimensional'],
units=d['_units'],
description=d['_description'],
scale_object=d['_scale_object'],
return_dimensional=d['_return_dimensional'])
else:
self.__dict__[key] = val
def __str__(self):
s = ""
for key, val in zip(self.__dict__.keys(), self.__dict__.values()):
if 'params' not in key and key[0] != '_':
if val is None:
pass
elif isinstance(val, Parameter):
if val.input_dimensional:
units = val.units
efval = val.dimensional_value
else:
efval = val.nondimensional_value
if val.nondimensional_value == val.dimensional_value:
units = ""
else:
units = "[nondim]"
s += "'" + key + "': " + str(efval) + " " + units + " (" + val.description + "),\n"
elif isinstance(val, (np.ndarray, list, tuple)) and isinstance(val[0], Parameter):
for i, v in enumerate(val):
if v.input_dimensional:
units = v.units
efval = v.dimensional_value
else:
efval = v.nondimensional_value
if v.nondimensional_value == v.dimensional_value:
units = ""
else:
units = "[nondim]"
s += "'" + key + "["+str(i+1)+"]': " + str(efval) + " " + units + " (" + v.description + "),\n"
else:
s += "'"+key+"': "+str(val)+",\n"
return s
def _list_params(self):
return self._name+" Parameters:\n"+self.__str__()
def print_params(self):
"""Print the parameters contained in the container."""
print(self._list_params())
@staticmethod
def create_params_array(values, input_dimensional=None, units=None, scale_object=None, description=None,
return_dimensional=None):
if hasattr(values, "__iter__"):
ls = len(values)
if not isinstance(input_dimensional, list):
if input_dimensional is None:
input_dimensional = True
idx = ls * [input_dimensional]
else:
idx = input_dimensional
if not isinstance(units, list):
if units is None:
units = ""
u = ls * [units]
else:
u = units
if not isinstance(description, list):
if description is None:
description = ""
d = ls * [description]
else:
d = description
if not isinstance(scale_object, list):
s = ls * [scale_object]
else:
s = scale_object
if not isinstance(return_dimensional, list):
if return_dimensional is None:
return_dimensional = False
rd = ls * [return_dimensional]
else:
rd = return_dimensional
arr = list()
for i, val in enumerate(values):
arr.append(Parameter(val, input_dimensional=idx[i], units=u[i], scale_object=s[i], description=d[i],
return_dimensional=rd[i]))
else:
arr = values * [Parameter(0.e0, input_dimensional=input_dimensional, units=units, scale_object=scale_object,
description=description, return_dimensional=return_dimensional)]
return np.array(arr, dtype=object)
def __repr__(self):
s = super(Params, self).__repr__()+"\n"+self._list_params()
return s
def load_from_file(self, filename, **kwargs):
"""Function to load previously saved Params object with the method :meth:`save_to_file`.
Parameters
----------
filename: str
The file name where the Params object was saved.
kwargs: dict
Keyword arguments to pass to the :mod:`pickle` module method.
"""
f = open(filename, 'rb')
tmp_dict = pickle.load(f, **kwargs)
f.close()
self.__dict__.clear()
self.__dict__.update(tmp_dict)
def save_to_file(self, filename, **kwargs):
"""Function to save the Params object to a file with the :mod:`pickle` module.
Parameters
----------
filename: str
The file name where to save the Params object.
kwargs: dict
Keyword arguments to pass to the :mod:`pickle` module method.
"""
f = open(filename, 'wb')
pickle.dump(self.__dict__, f, **kwargs)
f.close()
class ScaleParams(Params):
"""Class containing the model scales parameters.
Parameters
----------
dic: dict(float or Parameter), optional
A dictionary with the parameters names and values to be assigned.
Attributes
----------
scale: Parameter
The characteristic meridional space scale, :math:`L_y = \\pi \\, L`, in meters [:math:`m`].
f0: Parameter
Coriolis parameter, in [:math:`s^{-1}`].
n: Parameter
Model domain aspect ratio, :math:`n = 2 L_y/L_x` .
rra: Parameter
Earth radius, in meters [:math:`m`].
phi0_npi: Parameter
Latitude expressed in fraction of :math:`\\pi` .
deltap: Parameter
Difference of pressure between the center of the two atmospheric layers, in [:math:`Pa`].
"""
_name = "Scale"
def __init__(self, dic=None):
Params.__init__(self, dic)
# -----------------------------------------------------------
# Scale parameters for the ocean and the atmosphere
# -----------------------------------------------------------
self.scale = Parameter(5.e6, units='[m]', description="characteristic space scale (L*pi)",
return_dimensional=True)
self.f0 = Parameter(1.032e-4, units='[s^-1]', description="Coriolis parameter at the middle of the domain",
return_dimensional=True)
self.n = Parameter(1.3e0, input_dimensional=False, description="aspect ratio (n = 2 L_y / L_x)")
self.rra = Parameter(6370.e3, units='[m]', description="earth radius", return_dimensional=True)
self.phi0_npi = Parameter(0.25e0, input_dimensional=False, description="latitude expressed in fraction of pi")
self.deltap = Parameter(5.e4, units='[Pa]', description='pressure difference between the two atmospheric layers',
return_dimensional=True)
self.set_params(dic)
# ----------------------------------------
# Some derived parameters (Domain, beta)
# ----------------------------------------
@property
def L(self):
"""Parameter: Typical length scale :math:`L` of the model, in meters [:math:`m`]."""
return Parameter(self.scale / np.pi, units=self.scale.units, description='Typical length scale L',
return_dimensional=True)
@property
def L_y(self):
"""Parameter: The meridional extent :math:`L_y = \\pi \\, L` of the model's domain, in meters [:math:`m`]."""
return Parameter(self.scale, units=self.scale.units, description='The meridional extent of the model domain',
return_dimensional=True)
@property
def L_x(self):
"""Parameter: The zonal extent :math:`L_x = 2 \\pi \\, L / n` of the model's domain, in meters [:math:`m`]."""
return Parameter(2 * self.scale / self.n, units=self.scale.units,
description='The zonal extent of the model domain',
return_dimensional=True)
@property
def phi0(self):
"""Parameter: The reference latitude :math:`\\phi_0` at the center of the domain, expressed in radians [:math:`rad`]."""
return Parameter(self.phi0_npi * np.pi, units='[rad]',
description="The reference latitude of the center of the domain",
return_dimensional=True)
@property
def beta(self):
"""Parameter: The meridional gradient of the Coriolis parameter at :math:`\\phi_0`, expressed in [:math:`m^{-1} s^{-1}`]. """
return Parameter(self.L / self.rra * np.cos(self.phi0) / np.sin(self.phi0), input_dimensional=False,
units='[m^-1][s^-1]', scale_object=self,
description="Meridional gradient of the Coriolis parameter at phi_0")
class AtmosphericParams(Params):
"""Class containing the atmospheric parameters.
Parameters
----------
scale_params: ScaleParams
The scale parameters object of the model.
dic: dict(float or Parameter), optional
A dictionary with the parameters names and values to be assigned.
Attributes
----------
kd: Parameter
Atmosphere bottom friction coefficient [:math:`s^{-1}`].
kdp: Parameter
Atmosphere internal friction coefficient [:math:`s^{-1}`].
sigma: Parameter
Static stability of the atmosphere [:math:`[m^2 s^{-2} Pa^{-2}`].
"""
_name = "Atmospheric"
def __init__(self, scale_params, dic=None):
Params.__init__(self, dic)
self._scale_params = scale_params
# Parameters for the atmosphere
self.kd = Parameter(0.1, input_dimensional=False, scale_object=scale_params, units='[s^-1]',
description="atmosphere bottom friction coefficient")
self.kdp = Parameter(0.01, input_dimensional=False, scale_object=scale_params, units='[s^-1]',
description="atmosphere internal friction coefficient")
self.sigma = Parameter(0.2e0, input_dimensional=False, scale_object=scale_params, units='[m^2][s^-2][Pa^-2]',
description="static stability of the atmosphere")
self.set_params(dic)
@property
def sig0(self):
"""Parameter: Static stability of the atmosphere divided by 2."""
return Parameter(self.sigma / 2, input_dimensional=False, scale_object=self._scale_params, units='[m^2][s^-2][Pa^-2]',
description="0.5 * static stability of the atmosphere")
class AtmosphericTemperatureParams(Params):
"""Class containing the atmospheric temperature parameters.
Parameters
----------
scale_params: ScaleParams
The scale parameters object of the model.
dic: dict(float or Parameter), optional
A dictionary with the parameters names and values to be assigned.
Attributes
----------
hd: None or Parameter
Newtonian cooling coefficient.
Newtonian cooling is disabled if `None`.
thetas: None or ~numpy.ndarray(float)
Coefficients of the Newtonian cooling spectral decomposition (non-dimensional).
Newtonian cooling is disabled if `None`.
gamma: None or Parameter
Specific heat capacity of the atmosphere [:math:`J m^{-2} K^{-1}`].
Heat exchange scheme is disabled if `None`.
C: None or ~numpy.ndarray(Parameter)
Spectral decomposition of the constant short-wave radiation of the atmosphere [:math:`W m^{-2}`].
Heat exchange scheme is disabled if `None`.
eps: None or Parameter
Emissivity coefficient for the grey-body atmosphere
Heat exchange scheme is disabled if `None`.
T0: None or Parameter
Stationary solution for the 0-th order atmospheric temperature [:math:`K`].
Heat exchange scheme is disabled if `None`.
sc: None or Parameter
Ratio of surface to atmosphere temperature
Heat exchange scheme is disabled if `None`.
hlambda: None or Parameter
Sensible + turbulent heat exchange between ocean and atmosphere [:math:`W m^{-2} K^{-1}`].
Heat exchange scheme is disabled if `None`.
"""
_name = "Atmospheric Temperature"
def __init__(self, scale_params, dic=None):
Params.__init__(self, dic)
self._scale_params = scale_params
self.hd = Parameter(0.045, input_dimensional=False, units='[s]', scale_object=scale_params,
description="Newtonian cooling coefficient")
self.thetas = None # Radiative equilibrium mean temperature decomposition on the model's modes
self.gamma = None
self.C = None
self.eps = None
self.T0 = None
self.sc = None
self.hlambda = None
self.set_params(dic)
def set_insolation(self, value, pos=None):
"""Function to define the spectral decomposition of the constant short-wave radiation of the atmosphere (insolation)
:math:`C_{{\\rm a}, i}` (:attr:`~.AtmosphericTemperatureParams.C`).
Parameters
----------
value: float, int or iterable
Value to set. If a scalar is given, the `pos` parameter should be provided to indicate which component to set.
If an iterable is provided, create a vector of spectral decomposition parameters corresponding to it.
pos: int, optional
Indicate in which component to set the `value`.
"""
# TODO: - check for the dimensionality of the arguments
if isinstance(value, (float, int)) and pos is not None and self.C is not None:
self.C[pos] = Parameter(value, units='[W][m^-2]', scale_object=self._scale_params,
description="spectral component "+str(pos+1)+" of the short-wave radiation of the atmosphere",
return_dimensional=True)
elif hasattr(value, "__iter__"):
self._create_insolation(value)
else:
warnings.warn('A scalar value was provided, but without the `pos` argument indicating in which ' +
'component of the spectral decomposition to put it: Spectral decomposition unchanged !' +
'Please specify it or give a vector as `value`.')
def _create_insolation(self, values):
if hasattr(values, "__iter__"):
dim = len(values)
else:
dim = values
values = dim * [0.]
d = ["spectral component "+str(pos+1)+" of the short-wave radiation of the atmosphere" for pos in range(dim)]
self.C = self.create_params_array(values, units='[W][m^-2]', scale_object=self._scale_params,
description=d, return_dimensional=True)
def set_thetas(self, value, pos=None):
"""Function to define the spectral decomposition of the Newtonian cooling
:math:`\\theta^\\star` (:attr:`~.AtmosphericTemperatureParams.thetas`).
Parameters
----------
value: float, int or iterable
Value to set. If a scalar is given, the `pos` parameter should be provided to indicate which component to set.
If an iterable is provided, create a vector of spectral decomposition parameters corresponding to it.
pos: int, optional
Indicate in which component to set the `value`.
"""
# TODO: - check for the dimensionality of the arguments
if isinstance(value, (float, int)) and pos is not None and self.thetas is not None:
self.thetas[pos] = Parameter(value, scale_object=self._scale_params,
description="spectral components "+str(pos+1)+" of the temperature profile",
return_dimensional=False, input_dimensional=False)
elif hasattr(value, "__iter__"):
self._create_thetas(value)
else:
warnings.warn('A scalar value was provided, but without the `pos` argument indicating in which ' +
'component of the spectral decomposition to put it: Spectral decomposition unchanged !' +
'Please specify it or give a vector as `value`.')
def _create_thetas(self, values):
if hasattr(values, "__iter__"):
dim = len(values)
else:
dim = values
values = dim * [0.]
d = ["spectral component "+str(pos+1)+" of the temperature profile" for pos in range(dim)]
self.thetas = self.create_params_array(values, scale_object=self._scale_params,
description=d, return_dimensional=False, input_dimensional=False)
class OceanicParams(Params):
"""Class containing the oceanic parameters
Parameters
----------
scale_params: ScaleParams
The scale parameters object of the model.
dic: dict(float or Parameter), optional
A dictionary with the parameters names and values to be assigned.
Attributes
----------
gp: Parameter
Reduced gravity in [:math:`m \\, s^{-2}`].
r: Parameter
Friction coefficient at the bottom of the ocean in [:math:`s^{-1}`].
h: Parameter
Depth of the water layer of the ocean, in meters [:math:`m`].
d: Parameter
The strength of the ocean-atmosphere mechanical coupling in [:math:`s^{-1}`].
"""
_name = "Oceanic"
def __init__(self, scale_params, dic=None):
Params.__init__(self, dic)
self._scale_params = scale_params
self.gp = Parameter(3.1e-2, units='[m][s^-2]', return_dimensional=True, scale_object=scale_params,
description='reduced gravity')
self.r = Parameter(1.e-8, units='[s^-1]', scale_object=scale_params,
description="frictional coefficient at the bottom of the ocean")
self.h = Parameter(5.e2, units='[m]', return_dimensional=True, scale_object=scale_params,
description="depth of the water layer of the ocean")
self.d = Parameter(1.e-8, units='[s^-1]', scale_object=scale_params,
description="strength of the ocean-atmosphere mechanical coupling")
self.set_params(dic)
class OceanicTemperatureParams(Params):
"""Class containing the oceanic temperature parameters
Parameters
----------
scale_params: ScaleParams
The scale parameters object of the model.
dic: dict(float or Parameter), optional
A dictionary with the parameters names and values to be assigned.
Attributes
----------
gamma: None or Parameter
Specific heat capacity of the ocean [:math:`J m^{-2} K^{-1}`].
Heat exchange scheme is disabled if `None`.
C: None or ~numpy.ndarray(Parameter)
Spectral Decomposition of the constant short-wave radiation of the ocean [:math:`W m^{-2}`].
Heat exchange scheme is disabled if `None`.
T0: None or Parameter
Stationary solution for the 0-th order oceanic temperature [:math:`K`].
Heat exchange scheme is disabled if `None`.
"""
_name = "Oceanic Temperature"
def __init__(self, scale_params, dic=None):
Params.__init__(self, dic)
self._scale_params = scale_params
self.gamma = Parameter(2.e8, units='[J][m^-2][K^-1]', scale_object=scale_params, return_dimensional=True,
description='specific heat capacity of the ocean')
self.C = None
self.T0 = Parameter(285.0, units='[K]', scale_object=scale_params, return_dimensional=True,
description="stationary solution for the 0-th order oceanic temperature")
self.set_params(dic)
def set_insolation(self, value, pos=None):
"""Function to define the spectral decomposition of the constant short-wave radiation of the ocean (insolation)
:math:`C_{{\\rm o}, i}` (:attr:`~.OceanicTemperatureParams.C`).
Parameters
----------
value: float, int or iterable
Value to set. If a scalar is given, the `pos` parameter should be provided to indicate which component to set.
If an iterable is provided, create a vector of spectral decomposition parameters corresponding to it.
pos: int, optional
Indicate in which component to set the `value`.
"""
if isinstance(value, (float, int)) and pos is not None and self.C is not None:
self.C[pos] = Parameter(value, units='[W][m^-2]', scale_object=self._scale_params,
description="spectral component "+str(pos+1)+" of the short-wave radiation of the ocean",
return_dimensional=True)
elif hasattr(value, "__iter__"):
self._create_insolation(value)
else:
warnings.warn('A scalar value was provided, but without the `pos` argument indicating in which ' +
'component of the spectral decomposition to put it: Spectral decomposition unchanged !' +
'Please specify it or give a vector as `value`.')
def _create_insolation(self, values):
if hasattr(values, "__iter__"):
dim = len(values)
else:
dim = values
values = dim * [0.]
d = ["spectral component "+str(pos+1)+" of the short-wave radiation of the ocean" for pos in range(dim)]
self.C = self.create_params_array(values, units='[W][m^-2]', scale_object=self._scale_params,
description=d, return_dimensional=True)
class GroundParams(Params):
"""Class containing the ground parameters
Parameters
----------
scale_params: ScaleParams
The scale parameters object of the model.
dic: dict(float or Parameter), optional
A dictionary with the parameters names and values to be assigned.
Attributes
----------
hk: None or ~numpy.ndarray(float)
Orography spectral decomposition coefficients (non-dimensional), an array of shape (:attr:`~QgParams.nmod` [0],).
Orography is disabled (flat) if `None`.
orographic_basis: str
String to select which component basis modes to use to develop the orography in series.
Can be either 'atmospheric' or 'ground'. Default to 'atmospheric'.
"""
_name = "Ground"
def __init__(self, scale_params, dic=None):
Params.__init__(self, dic)
self._scale_params = scale_params
self.hk = None # spectral orography coefficients
self.orographic_basis = "atmospheric"
self.set_params(dic)
def set_orography(self, value, pos=None, basis="atmospheric"):
"""Function to define the spectral decomposition of the orography profile
:math:`h_k` (:attr:`~.GroundParams.hk`).
Parameters
----------
value: float, int or iterable
Value to set. If a scalar is given, the `pos` parameter should be provided to indicate which component to set.
If an iterable is provided, create a vector of spectral decomposition parameters corresponding to it.
pos: int, optional
Indicate in which component to set the `value`.
basis: str, optional
Indicate which basis should be used to decompose the orography. Can be either `atmospheric`, `oceanic` or `ground`.
Default to `atmospheric`.
"""
# TODO: - check for the dimensionality of the arguments
# - check that inner products are symbolic if basis is not 'atmospheric'
self.orographic_basis = basis
if isinstance(value, (float, int)) and pos is not None and self.hk is not None:
self.hk[pos] = Parameter(value, scale_object=self._scale_params,
description="spectral components "+str(pos+1)+" of the orography",
return_dimensional=False, input_dimensional=False)
elif hasattr(value, "__iter__"):
self._create_orography(value)
else:
warnings.warn('A scalar value was provided, but without the `pos` argument indicating in which ' +
'component of the spectral decomposition to put it: Spectral decomposition unchanged !' +
'Please specify it or give a vector as `value`.')
def _create_orography(self, values):
if hasattr(values, "__iter__"):
dim = len(values)
else:
dim = values
values = dim * [0.]
d = ["spectral component "+str(pos+1)+" of the orography" for pos in range(dim)]
self.hk = self.create_params_array(values, scale_object=self._scale_params,
description=d, return_dimensional=False, input_dimensional=False)
class GroundTemperatureParams(Params):
"""Class containing the ground temperature parameters
Parameters
----------
scale_params: ScaleParams
The scale parameters object of the model.
dic: dict(float or Parameter), optional
A dictionary with the parameters names and values to be assigned.
Attributes
----------
gamma: None or Parameter
Specific heat capacity of the ground [:math:`J m^{-2} K^{-1}`].
Heat exchange scheme is disabled if `None`.
C: None or ~numpy.ndarray(Parameter)
Spectral decomposition of the constant short-wave radiation of the ground [:math:`W m^{-2}`].
Heat exchange scheme is disabled if `None`.
T0: None or Parameter
Stationary solution for the 0-th order ground temperature [:math:`K`].
Heat exchange scheme is disabled if `None`.
"""
_name = "Ground Temperature"
def __init__(self, scale_params, dic=None):
Params.__init__(self, dic)
self._scale_params = scale_params
self.gamma = Parameter(2.e8, units='[J][m^-2][K^-1]', scale_object=scale_params, return_dimensional=True,
description='specific heat capacity of the ground')
self.C = None
self.T0 = Parameter(285.0, units='[K]', scale_object=scale_params, return_dimensional=True,
description="stationary solution for the 0-th order ground temperature")
self.set_params(dic)
def set_insolation(self, value, pos=None):
"""Function to define the decomposition of the constant short-wave radiation of the ground (insolation)
:math:`C_{{\\rm g}, i}` (:attr:`~.GroundTemperatureParams.C`).
Parameters
----------
value: float, int or iterable
Value to set. If a scalar is given, the `pos` parameter should be provided to indicate which component to set.
If an iterable is provided, create a vector of spectral decomposition parameters corresponding to it.
pos: int, optional
Indicate in which component to set the `value`.
"""
# TODO: - check for the dimensionality of the arguments
if isinstance(value, (float, int)) and pos is not None and self.C is not None:
self.C[pos] = Parameter(value, units='[W][m^-2]', scale_object=self._scale_params,
description="spectral component "+str(pos+1)+" of the short-wave radiation of the ground",
return_dimensional=True)
elif hasattr(value, "__iter__"):
self._create_insolation(value)
else:
warnings.warn('A scalar value was provided, but without the `pos` argument indicating in which ' +
'component of the spectral decomposition to put it: Spectral decomposition unchanged !' +
'Please specify it or give a vector as `value`.')
def _create_insolation(self, values):
if hasattr(values, "__iter__"):
dim = len(values)
else:
dim = values
values = dim * [0.]
d = ["spectral component "+str(pos+1)+" of the short-wave radiation of the ground" for pos in range(dim)]
self.C = self.create_params_array(values, units='[W][m^-2]', scale_object=self._scale_params,
description=d, return_dimensional=True)
class QgParams(Params):
"""General qgs parameters container.
Parameters
----------
dic: dict(float or Parameter), optional
A dictionary with the parameters names and values to be assigned.
scale_params: None or ScaleParams, optional
Scale parameters instance.
If `None`, create a new ScaleParams instance. Default to None.
Default to `None`.
atmospheric_params: bool, None or AtmosphericParams, optional
Atmospheric parameters instance.
If 'True`, create a new AtmosphericParams instance.
If `None`, atmospheric parameters are disabled.
Default to `True`.
atemperature_params: bool, None or AtmosphericTemperatureParams, optional
Atmospheric temperature parameters instance.
If 'True`, create a new AtmosphericTemperatureParams instance.
If `None`, atmospheric temperature parameters are disabled.
Default to `True`.
oceanic_params: bool, None or OceanicParams, optional
Oceanic parameters instance.
If 'True`, create a new OceanicParams instance.
If `None`, oceanic parameters are disabled.
Default to `None`.
otemperature_params: bool, None or OceanicTemperatureParams, optional
Oceanic temperature parameters instance.
If 'True`, create a new OceanicTemperatureParams instance.
If `None`, oceanic temperature parameters are disabled.
Default to `None`.
ground_params: bool, None or GroundParams, optional
Ground parameters instance.
If 'True`, create a new GroundParams instance.
If `None`, ground parameters are disabled.
Default to `True`.
gtemperature_params: bool, None or GroundTemperatureParams, optional
Ground temperature parameters instance.
If 'True`, create a new GroundTemperatureParams instance.
If `None`, ground temperature parameters are disabled.
Default to `None`.
Attributes
----------
scale_params: ScaleParams
Scale parameters instance.
atmospheric_params: None or AtmosphericParams
Atmospheric parameters instance.
If `None`, atmospheric parameters are disabled.
atemperature_params: None or AtmosphericTemperatureParams
Atmospheric temperature parameters instance.
If `None`, atmospheric temperature parameters are disabled.
oceanic_params: None or OceanicParams
Oceanic parameters instance.
If `None`, oceanic parameters are disabled.
ground_params: None or GroundParams
Ground parameters instance
If `None`, ground parameters are disabled.
gotemperature_params: None, OceanicTemperatureParams or GroundTemperatureParams
Ground or Oceanic temperature parameters instance.
If `None`, ground and oceanic temperature parameters are disabled.
time_unit: float
Dimensional unit of time to be used to represent the data.
rr: Parameter
`Gas constant`_ of `dry air`_ in [:math:`J \\, kg^{-1} \\, K^{-1}`].
sb: float
`Stefan-Boltzmann constant`_ in [:math:`J \\, m^{-2} \\, s^{-1} \\, K^{-4}`].
.. _Gas constant: https://en.wikipedia.org/wiki/Gas_constant
.. _dry air: https://en.wikipedia.org/wiki/Gas_constant#Specific_gas_constant
.. _Stefan-Boltzmann constant: https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_constant
"""
_name = "General"
def __init__(self, dic=None, scale_params=None,
atmospheric_params=True, atemperature_params=True,
oceanic_params=None, otemperature_params=None,
ground_params=True, gtemperature_params=None):
Params.__init__(self, dic)
# General scale parameters object (Mandatory param block)
if scale_params is None:
self.scale_params = ScaleParams(dic)
else:
self.scale_params = scale_params
# Atmospheric parameters object
if atmospheric_params is True:
self.atmospheric_params = AtmosphericParams(self.scale_params, dic=dic)
else:
self.atmospheric_params = atmospheric_params
# Atmospheric temperature parameters object
if atmospheric_params is True:
self.atemperature_params = AtmosphericTemperatureParams(self.scale_params, dic=dic)
else:
self.atemperature_params = atemperature_params
if oceanic_params is True:
self.oceanic_params = OceanicParams(self.scale_params, dic)
else:
self.oceanic_params = oceanic_params
if ground_params is True:
self.ground_params = GroundParams(self.scale_params, dic)
else:
self.ground_params = ground_params
if otemperature_params is True:
self.gotemperature_params = OceanicTemperatureParams(self.scale_params, dic)
else:
self.gotemperature_params = otemperature_params
if gtemperature_params is True:
self.gotemperature_params = GroundTemperatureParams(self.scale_params, dic)
else:
self.gotemperature_params = gtemperature_params
self._atmospheric_basis = None
self._oceanic_basis = None
self._ground_basis = None
self._number_of_dimensions = 0
self._number_of_atmospheric_modes = 0
self._number_of_oceanic_modes = 0
self._number_of_ground_modes = 0
self._ams = None
self._oms = None
self._gms = None
self._atmospheric_latex_var_string = list()
self._atmospheric_var_string = list()
self._oceanic_latex_var_string = list()
self._oceanic_var_string = list()
self._ground_latex_var_string = list()
self._ground_var_string = list()
self._components_units = [r'm$^2$s$^{-1}$', r'K', r'm$^2$s$^{-1}$', r'K']
self.time_unit = 'days'
# Physical constants
self.rr = Parameter(287.058e0, return_dimensional=True, units='[J][kg^-1][K^-1]',
scale_object=self.scale_params, description="gas constant of dry air")
self.sb = Parameter(5.67e-8, return_dimensional=True, units='[J][m^-2][s^-1][K^-4]',
scale_object=self.scale_params, description="Stefan-Boltzmann constant")
self.set_params(dic)
# -----------------------------------------------------------
# Derived Quantities (Parameters)
# -----------------------------------------------------------
@property
def LR(self):
"""float: Reduced Rossby deformation radius :math:`L_\\text{R} = \\sqrt{g' \\, h } / f_0` ."""
op = self.oceanic_params
scp = self.scale_params
if op is not None:
try:
return np.sqrt(op.gp * op.h) / scp.f0
except:
return None
else:
return None
@property
def G(self):
"""float: The :math:`G = - L^2/L_R^2` parameter."""
scp = self.scale_params
if self.LR is not None:
try:
return -scp.L**2 / self.LR**2
except:
return None
else:
return None
@property
def Cpgo(self):
"""float: The :math:`C\'_{{\\rm g/\\rm o},i} = R C_{{\\rm g/\\rm o},i} / (\\gamma_{\\rm g/\\rm o} L^2 f_0^3)` parameter."""
gotp = self.gotemperature_params
scp = self.scale_params
if gotp is not None:
try:
return gotp.C / (gotp.gamma * scp.f0) * self.rr / (scp.f0 ** 2 * scp.L ** 2)
except:
return None
else:
return None
@property
def Lpgo(self):
"""float: The :math:`\\lambda\'_{{\\rm g/\\rm o}} = \\lambda/(\\gamma_{\\rm g/\\rm o} f_0)` parameter."""
atp = self.atemperature_params
gotp = self.gotemperature_params
scp = self.scale_params
if atp is not None and gotp is not None:
try:
return atp.hlambda / (gotp.gamma * scp.f0)
except:
return None
else:
return None
@property
def Cpa(self):
"""float: The :math:`C\'_{{\\rm a},i} = R C_{{\\rm a},i} / (2 \\gamma_{\\rm a} L^2 f_0^3)` parameter."""
atp = self.atemperature_params
scp = self.scale_params
if atp is not None:
try:
return atp.C / (atp.gamma * scp.f0) * self.rr / (scp.f0 ** 2 * scp.L ** 2) / 2
except:
return None
else:
return None
@property
def Lpa(self):
"""float: The :math:`\\lambda\'_{\\rm a} = \\lambda / (\\gamma_{\\rm a} f_0)` parameter."""
atp = self.atemperature_params
scp = self.scale_params
if atp is not None:
try:
return atp.hlambda / (atp.gamma * scp.f0)
except:
return None
else:
return None
@property
def sbpgo(self):
"""float: Long wave radiation lost by ground/ocean to the atmosphere :math:`s_{B,{\\rm g/\\rm o}} = 4\\,\\sigma_B \\, T_{{\\rm a},0}^3 / (\\gamma_{\\rm g/\\rm o} f_0)`."""
gotp = self.gotemperature_params
scp = self.scale_params
if gotp is not None:
try:
return 4 * self.sb * gotp.T0 ** 3 / (gotp.gamma * scp.f0)
except:
return None
else:
return None
@property
def sbpa(self):
"""float: Long wave radiation from atmosphere absorbed by ground/ocean :math:`s_{B,{\\rm a}} = 4\\,\\epsilon_{\\rm a}\\, \\sigma_B \\, T_{{\\rm a},0}^3 / (\\gamma_{\\rm g/\\rm o} f_0)`."""
atp = self.atemperature_params
gotp = self.gotemperature_params
scp = self.scale_params
if gotp is not None and atp is not None:
try:
return 8 * atp.eps * self.sb * atp.T0 ** 3 / (gotp.gamma * scp.f0)
except:
return None
else:
return None
@property
def LSBpgo(self):
"""float: Long wave radiation from ground/ocean absorbed by atmosphere :math:`S_{B,{\\rm g/\\rm o}} = 2\\,\\epsilon_{\\rm a}\\, \\sigma_B \\, T_{{\\rm a},0}^3 / (\\gamma_{\\rm a} f_0)`."""
atp = self.atemperature_params
gotp = self.gotemperature_params
scp = self.scale_params
if atp is not None and gotp is not None:
try:
return 2 * atp.eps * self.sb * gotp.T0 ** 3 / (atp.gamma * scp.f0)
except:
return None
else:
return None
@property
def LSBpa(self):
"""float: Long wave radiation lost by atmosphere to space & ground/ocean :math:`S_{B,{\\rm a}} = 8\\,\\epsilon_{\\rm a}\\, \\sigma_B \\, T_{{\\rm a},0}^3 / (\\gamma_{\\rm a} f_0)`."""
atp = self.atemperature_params
scp = self.scale_params
if atp is not None:
try:
return 8 * atp.eps * self.sb * atp.T0 ** 3 / (atp.gamma * scp.f0)
except:
return None
else:
return None
# The following properties might be refactored if the unit system of the model get more widespread across modules.
@property
def streamfunction_scaling(self):
"""float: Dimensional scaling of the streamfunction fields."""
return self.scale_params.L**2 * self.scale_params.f0
@property
def temperature_scaling(self):
"""float: Dimensional scaling of the temperature fields."""
return self.streamfunction_scaling * self.scale_params.f0 / self.rr
@property
def geopotential_scaling(self):
"""float: Dimensional scaling of the geopotential height."""
return self.scale_params.f0 / 9.81
def set_params(self, dic):
"""Set the specified parameters values.
Parameters
----------
dic: dict(float or Parameter)
A dictionary with the parameters names and values to be assigned.
"""
if dic is not None:
for key, val in zip(dic.keys(), dic.values()):
if key in self.__dict__.keys():
self.__dict__[key] = val
if 'scale_params' in self.__dict__.keys():
self.scale_params.set_params(dic)
if 'atmospheric_params' in self.__dict__.keys():
if self.atmospheric_params is not None:
self.atmospheric_params.set_params(dic)
if 'atemperature_params' in self.__dict__.keys():
if self.atemperature_params is not None:
self.atemperature_params.set_params(dic)
if 'oceanic_params' in self.__dict__.keys():
if self.oceanic_params is not None:
self.oceanic_params.set_params(dic)
if 'ground_params' in self.__dict__.keys():
if self.ground_params is not None:
self.ground_params.set_params(dic)
if 'otemperature_params' in self.__dict__.keys():
if self.gotemperature_params is not None:
self.gotemperature_params.set_params(dic)
if 'gtemperature_params' in self.__dict__.keys():
if self.gotemperature_params is not None:
self.gotemperature_params.set_params(dic)
def print_params(self):
"""Print all the parameters in the container."""
s = self._list_params()+"\n"
if 'scale_params' in self.__dict__.keys():
s += self.scale_params._list_params()+"\n"
if 'atmospheric_params' in self.__dict__.keys():
if self.atmospheric_params is not None:
s += self.atmospheric_params._list_params()+"\n"
if 'atemperature_params' in self.__dict__.keys():
if self.atemperature_params is not None:
s += self.atemperature_params._list_params()+"\n"
if 'oceanic_params' in self.__dict__.keys():
if self.oceanic_params is not None:
s += self.oceanic_params._list_params()+"\n"
if 'ground_params' in self.__dict__.keys():
if self.ground_params is not None:
s += self.ground_params._list_params()+"\n"
if 'gotemperature_params' in self.__dict__.keys():
if self.gotemperature_params is not None:
s += self.gotemperature_params._list_params() + "\n"
print("Qgs v0.2.5 parameters summary")
print("=============================\n")
print(s)
@property
def ndim(self):
"""int: Total number of variables of the model."""
return self._number_of_dimensions
@property
def nmod(self):
"""(int, int): Atmospheric and ground/oceanic number of modes."""
if self._number_of_oceanic_modes != 0:
return [self._number_of_atmospheric_modes, self._number_of_oceanic_modes]
else:
return [self._number_of_atmospheric_modes, self._number_of_ground_modes]
@property
def var_string(self):
"""list(str): List of model's variable names."""
ls = list()
for var in self._atmospheric_var_string+self._oceanic_var_string+self._ground_var_string:
ls.append(var)
return ls
@property
def latex_var_string(self):
"""list(str): List of model's variable names, ready for use in latex."""
ls = list()
for var in self._atmospheric_latex_var_string+self._oceanic_latex_var_string+self._ground_latex_var_string:
ls.append(r'{\ '[0:-1] + var + r'}')
return ls
@property
def latex_components_units(self):
"""list(str): The units of every model's components variables, as a list of latex strings."""
return self._components_units
def get_variable_units(self, i):
"""Return the units of a model's variable as a string containing latex symbols.
Parameters
----------
i: int
The number of the variable.
Returns
-------
str:
The string with the units of the variable.
"""
if i >= self.ndim:
warnings.warn("Variable " + str(i) + " doesn't exist, cannot return its units.")
return None
else:
natm = self.nmod[0]
ngoc = self.nmod[1]
if i < natm:
return self._components_units[0]
if natm <= i < 2 * natm:
return self._components_units[1]
if self.oceanic_basis is not None:
if 2 * natm <= i < 2 * natm + ngoc:
return self._components_units[2]
if 2 * natm + ngoc <= i < 2 * natm + 2 * ngoc:
return self._components_units[3]
if self.ground_basis is not None:
if 2 * natm <= i < 2 * natm + ngoc:
return self._components_units[3]
@property
def dimensional_time(self):
"""float: Return the conversion factor between the non-dimensional time and the dimensional time unit specified
in :attr:`.time_unit`"""
c = 24 * 3600
if self.time_unit == 'hours':
c = 3600
if self.time_unit == 'days':
c = 24 * 3600
if self.time_unit == 'years':
c = 24 * 3600 * 365
return 1 / (self.scale_params.f0 * c)
# -------------------------------------------------------------------
# Config setters to be used with symbolic inner products
# -------------------------------------------------------------------
@property
def atmospheric_basis(self):
"""Basis: The atmospheric basis of functions used to project the PDEs onto."""
return self._atmospheric_basis
@atmospheric_basis.setter
def atmospheric_basis(self, basis):
self._ams = None
self._oms = None
self._gms = None
self._atmospheric_basis = basis
self._number_of_atmospheric_modes = len(basis.functions)
self._number_of_dimensions = 2 * self._number_of_atmospheric_modes
if self._number_of_oceanic_modes != 0:
self._number_of_dimensions += 2 * self._number_of_oceanic_modes
if self._number_of_ground_modes != 0:
self._number_of_dimensions += self._number_of_ground_modes
if self.ground_params is not None and self.ground_params.orographic_basis == "atmospheric_basis":
self.ground_params.set_orography(self._number_of_atmospheric_modes * [0.e0])
if self.atemperature_params is not None:
self.atemperature_params.set_thetas(self._number_of_atmospheric_modes * [0.e0])
@property
def oceanic_basis(self):
"""Basis: The oceanic basis of functions used to project the PDEs onto."""
return self._oceanic_basis
@oceanic_basis.setter
def oceanic_basis(self, basis):
self._ams = None
self._oms = None
self._gms = None
self._oceanic_basis = basis
if self.atemperature_params is not None:
# disable the Newtonian cooling
self.atemperature_params.thetas = None
self.atemperature_params.hd = None
self.atemperature_params.gamma = Parameter(1.e7, units='[J][m^-2][K^-1]', scale_object=self.scale_params,
description='specific heat capacity of the atmosphere',
return_dimensional=True)
self.atemperature_params.set_insolation(self.nmod[0] * [0.e0])
self.atemperature_params.set_insolation(100.0, 0)
self.atemperature_params.eps = Parameter(0.76e0, input_dimensional=False,
description="emissivity coefficient for the grey-body atmosphere")
self.atemperature_params.T0 = Parameter(270.0, units='[K]', scale_object=self.scale_params,
return_dimensional=True,
description="stationary solution for the 0-th order atmospheric temperature")
self.atemperature_params.sc = Parameter(1., input_dimensional=False,
description="ratio of surface to atmosphere temperature")
self.atemperature_params.hlambda = Parameter(20.00, units='[W][m^-2][K^-1]', scale_object=self.scale_params,
return_dimensional=True,
description="sensible+turbulent heat exchange between ocean and atmosphere")
if self.gotemperature_params is not None:
self._number_of_ground_modes = 0
self._number_of_oceanic_modes = len(basis)
self._number_of_dimensions = 2 * self._number_of_atmospheric_modes + 2 * self._number_of_oceanic_modes
self.gotemperature_params.set_insolation(self.nmod[0] * [0.e0])
self.gotemperature_params.set_insolation(350.0, 0)
# if setting an ocean, then disable the orography
if self.ground_params is not None:
self.ground_params.hk = None
@property
def ground_basis(self):
"""Basis: The ground basis of functions used to project the PDEs onto."""
return self._ground_basis
@ground_basis.setter
def ground_basis(self, basis):
self._ams = None
self._oms = None
self._gms = None
self._ground_basis = basis
if self.atemperature_params is not None:
# disable the Newtonian cooling
self.atemperature_params.thetas = None
self.atemperature_params.hd = None
self.atemperature_params.gamma = Parameter(1.e7, units='[J][m^-2][K^-1]', scale_object=self.scale_params,
description='specific heat capacity of the atmosphere',
return_dimensional=True)
self.atemperature_params.set_insolation(self.nmod[0] * [0.e0])
self.atemperature_params.set_insolation(100.0, 0)
self.atemperature_params.eps = Parameter(0.76e0, input_dimensional=False,
description="emissivity coefficient for the grey-body atmosphere")
self.atemperature_params.T0 = Parameter(270.0, units='[K]', scale_object=self.scale_params,
return_dimensional=True,
description="stationary solution for the 0-th order atmospheric temperature")
self.atemperature_params.sc = Parameter(1., input_dimensional=False,
description="ratio of surface to atmosphere temperature")
self.atemperature_params.hlambda = Parameter(20.00, units='[W][m^-2][K^-1]', scale_object=self.scale_params,
return_dimensional=True,
description="sensible+turbulent heat exchange between ocean and atmosphere")
if self.gotemperature_params is not None:
self._number_of_ground_modes = len(basis)
self._number_of_oceanic_modes = 0
self._number_of_dimensions = 2 * self._number_of_atmospheric_modes + 2 * self._number_of_oceanic_modes
# if orography is disabled, enable it!
if self.ground_params is not None:
if self.ground_params.hk is None:
if self.ground_params.orographic_basis == 'atmospheric':
self.ground_params.set_orography(self._number_of_atmospheric_modes * [0.e0])
else:
self.ground_params.set_orography(self._number_of_ground_modes * [0.e0])
self.ground_params.set_orography(0.1, 1)
self.gotemperature_params.set_insolation(self.nmod[0] * [0.e0])
self.gotemperature_params.set_insolation(350.0, 0)
def set_atmospheric_modes(self, basis, auto=False):
"""Function to configure the atmospheric modes (basis functions) used to project the PDEs onto.
Parameters
----------
basis: Basis
Basis object containing the definition of the atmospheric modes.
auto: bool, optional
Automatically instantiate the parameters container needed to describe the atmospheric models parameters.
Default is False.
Examples
--------
>>> from qgs.params.params import QgParams
>>> from qgs.basis.fourier import contiguous_channel_basis
>>> q = QgParams()
>>> atm_basis = contiguous_channel_basis(2, 2, 1.5)
>>> q.set_atmospheric_modes(atm_basis)
>>> q.atmospheric_basis
[sqrt(2)*cos(y), 2*sin(y)*cos(n*x), 2*sin(y)*sin(n*x), sqrt(2)*cos(2*y), 2*sin(2*y)*cos(n*x), 2*sin(2*y)*sin(n*x), 2*sin(y)*cos(2*n*x), 2*sin(y)*sin(2*n*x), 2*sin(2*y)*cos(2*n*x), 2*sin(2*y)*sin(2*n*x)]
"""
if auto:
if self.atemperature_params is None:
self.atemperature_params = AtmosphericTemperatureParams(self.scale_params)
if self.atmospheric_params is None:
self.atmospheric_params = AtmosphericParams(self.scale_params)
self.atmospheric_basis = basis
self._atmospheric_latex_var_string = list()
self._atmospheric_var_string = list()
for i in range(self.nmod[0]):
self._atmospheric_latex_var_string.append(r'psi_{\rm a,' + str(i + 1) + "}")
self._atmospheric_var_string.append(r'psi_a_' + str(i + 1))
for i in range(self.nmod[0]):
self._atmospheric_latex_var_string.append(r'theta_{\rm a,' + str(i + 1) + "}")
self._atmospheric_var_string.append(r'theta_a_' + str(i + 1))
def set_oceanic_modes(self, basis, auto=True):
"""Function to configure the oceanic modes (basis functions) used to project the PDEs onto.
Parameters
----------
basis: Basis
Basis object containing the definition of the oceanic modes.
auto: bool, optional
Automatically instantiate or not the parameters container needed to describe the oceanic models parameters.
Default is True.
Examples
--------
>>> from qgs.params.params import QgParams
>>> from qgs.basis.fourier import contiguous_channel_basis, contiguous_basin_basis
>>> q = QgParams()
>>> atm_basis = contiguous_channel_basis(2, 2, 1.5)
>>> oc_basis = contiguous_basin_basis(2, 4, 1.5)
>>> q.set_atmospheric_modes(atm_basis)
>>> q.set_oceanic_modes(oc_basis)
>>> q.oceanic_basis
[2*sin(y)*sin(0.5*n*x), 2*sin(2*y)*sin(0.5*n*x), 2*sin(3*y)*sin(0.5*n*x), 2*sin(4*y)*sin(0.5*n*x), 2*sin(y)*sin(1.0*n*x), 2*sin(2*y)*sin(1.0*n*x), 2*sin(3*y)*sin(1.0*n*x), 2*sin(4*y)*sin(1.0*n*x)]
"""
if self._atmospheric_basis is None: # Presently, the ocean can not yet be set independently of an atmosphere.
print('Atmosphere modes not set up. Add an atmosphere before adding an ocean!')
print('Oceanic setup aborted.')
return
if auto:
if self.gotemperature_params is None or isinstance(self.gotemperature_params, GroundTemperatureParams):
self.gotemperature_params = OceanicTemperatureParams(self.scale_params)
if self.oceanic_params is None:
self.oceanic_params = OceanicParams(self.scale_params)
self.ground_params = None
self.oceanic_basis = basis
self._oceanic_latex_var_string = list()
self._oceanic_var_string = list()
self._ground_latex_var_string = list()
self._ground_var_string = list()
for i in range(self.nmod[1]):
self._oceanic_latex_var_string.append(r'psi_{\rm o,' + str(i + 1) + "}")
self._oceanic_var_string.append(r'psi_o_' + str(i + 1))
for i in range(self.nmod[1]):
self._oceanic_latex_var_string.append(r'theta_{\rm o,' + str(i + 1) + "}")
self._oceanic_var_string.append(r'theta_o_' + str(i + 1))
def set_ground_modes(self, basis=None, auto=True):
"""Function to configure the ground modes (basis functions) used to project the PDEs onto.
Parameters
----------
basis: None or Basis, optional
Basis object containing the definition of the ground modes. If `None`, use the basis of the atmosphere.
Default to `None`.
auto: bool, optional
Automatically instantiate or not the parameters container needed to describe the ground models parameters.
Default is True.
Examples
--------
>>> from qgs.params.params import QgParams
>>> from qgs.basis.fourier import contiguous_channel_basis, contiguous_basin_basis
>>> q = QgParams()
>>> atm_basis = contiguous_channel_basis(2, 2, 1.5)
>>> q.set_atmospheric_modes(atm_basis)
>>> q.set_ground_modes()
>>> q.ground_basis
[sqrt(2)*cos(y), 2*sin(y)*cos(n*x), 2*sin(y)*sin(n*x), sqrt(2)*cos(2*y), 2*sin(2*y)*cos(n*x), 2*sin(2*y)*sin(n*x), 2*sin(y)*cos(2*n*x), 2*sin(y)*sin(2*n*x), 2*sin(2*y)*cos(2*n*x), 2*sin(2*y)*sin(2*n*x)]
"""
if self._atmospheric_basis is None: # Presently, the ground can not yet be set independently of an atmosphere.
print('Atmosphere modes not set up. Add an atmosphere before adding the ground!')
print('Ground setup aborted.')
return
if auto:
if self.gotemperature_params is None or isinstance(self.gotemperature_params, OceanicTemperatureParams):
self.gotemperature_params = GroundTemperatureParams(self.scale_params)
if self.ground_params is None:
self.ground_params = GroundParams(self.scale_params)
self.oceanic_params = None
if basis is not None:
self.ground_basis = basis
else:
self.ground_basis = self._atmospheric_basis
self._oceanic_var_string = list()
self._oceanic_latex_var_string = list()
self._ground_latex_var_string = list()
self._ground_var_string = list()
for i in range(self.nmod[1]):
self._ground_latex_var_string.append(r'theta_{\rm g,' + str(i + 1) + "}")
self._ground_var_string.append(r'theta_g_' + str(i + 1))
# -------------------------------------------------------------------
# Specific basis setters
# -------------------------------------------------------------------
def set_atmospheric_channel_fourier_modes(self, nxmax, nymax, auto=False, mode='analytic'):
"""Function to configure and set the basis for contiguous spectral blocks of atmospheric modes on a channel.
Parameters
----------
nxmax: int
Maximum x-wavenumber to fill the spectral block up to.
nymax: int
Maximum :math:`y`-wavenumber to fill the spectral block up to.
auto: bool, optional
Automatically instantiate the parameters container needed to describe the atmospheric models parameters.
Default is `False`.
mode: str, optional
Mode to set the inner products: Either `analytic` or `symbolic`:
`analytic` for inner products computed with formula or `symbolic` using `Sympy`_.
Default to `analytic`.
Examples
--------
>>> from qgs.params.params import QgParams
>>> q = QgParams()
>>> q.set_atmospheric_channel_fourier_modes(2, 2)
>>> q.ablocks
array([[1, 1],
[1, 2],
[2, 1],
[2, 2]])
.. _Sympy: https://www.sympy.org/
"""
if mode == 'symbolic':
basis = contiguous_channel_basis(nxmax, nymax, self.scale_params.n)
self.set_atmospheric_modes(basis, auto)
else:
self._set_atmospheric_analytic_fourier_modes(nxmax, nymax, auto)
def set_oceanic_basin_fourier_modes(self, nxmax, nymax, auto=True, mode='analytic'):
"""Function to configure and set the basis for contiguous spectral blocks of oceanic modes on a closed basin.
Parameters
----------
nxmax: int
Maximum x-wavenumber to fill the spectral block up to.
nymax: int
Maximum :math:`y`-wavenumber to fill the spectral block up to.
auto: bool, optional
Automatically instantiate the parameters container needed to describe the atmospheric models parameters.
Default is `True`.
mode: str, optional
Mode to set the inner products: Either `analytic` or `symbolic`.
`analytic` for inner products computed with formula or `symbolic` using `Sympy`_.
Default to `analytic`.
Examples
--------
>>> from qgs.params.params import QgParams
>>> q = QgParams()
>>> q.set_atmospheric_channel_fourier_modes(2, 2)
>>> q.set_oceanic_basin_fourier_modes(2, 4)
>>> q.oblocks
array([[1, 1],
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4]])
.. _Sympy: https://www.sympy.org/
"""
if mode == 'symbolic':
basis = contiguous_basin_basis(nxmax, nymax, self.scale_params.n)
self.set_oceanic_modes(basis, auto)
else:
self._set_oceanic_analytic_fourier_modes(nxmax, nymax, auto)
def set_ground_channel_fourier_modes(self, nxmax=None, nymax=None, auto=True, mode='analytic'):
"""Function to configure and set the basis for contiguous spectral blocks of ground modes on a channel.
Parameters
----------
nxmax: int
Maximum x-wavenumber to fill the spectral block up to.
nymax: int
Maximum :math:`y`-wavenumber to fill the spectral block up to.
auto: bool, optional
Automatically instantiate the parameters container needed to describe the atmospheric models parameters.
Default is `True`.
mode: str, optional
Mode to set the inner products: Either `analytic` or `symbolic`.
`analytic` for inner products computed with formula or `symbolic` using `Sympy`_.
Default to `analytic`.
Examples
--------
>>> from qgs.params.params import QgParams
>>> q = QgParams()
>>> q.set_atmospheric_channel_fourier_modes(2,4)
>>> q.set_ground_channel_fourier_modes()
>>> q.gblocks
array([[1, 1],
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4]])
.. _Sympy: https://www.sympy.org/
"""
if mode == "symbolic":
if nxmax is not None and nymax is not None:
basis = contiguous_channel_basis(nxmax, nymax, self.scale_params.n)
else:
basis = None
self.set_ground_modes(basis, auto)
else:
self._set_ground_analytic_fourier_modes(nxmax, nymax, auto)
# -------------------------------------------------------------------
# Model configs setter to be used with analytic inner products
# -------------------------------------------------------------------
@property
def ablocks(self):
"""~numpy.ndarray(int): Spectral blocks detailing the model's atmospheric modes :math:`x`- and :math:`y`-wavenumber.
Array of shape (:attr:`~QgParams.nmod` [0], 2)."""
return self._ams
@ablocks.setter
def ablocks(self, value):
self._ams = value
basis = ChannelFourierBasis(self._ams, self.scale_params.n)
self._atmospheric_basis = basis
namod = 0
for i in range(self.ablocks.shape[0]):
if self.ablocks[i, 0] == 1:
namod += 3
else:
namod += 2
self._number_of_atmospheric_modes = namod
self._number_of_dimensions = 2 * namod
if self._number_of_oceanic_modes != 0:
self._number_of_dimensions += self._number_of_oceanic_modes
if self._number_of_ground_modes != 0:
self._number_of_dimensions += self._number_of_ground_modes
if self.ground_params is not None:
self.ground_params.orographic_basis = 'atmospheric'
self.ground_params.set_orography(namod * [0.e0])
self.ground_params.set_orography(0.1, 1)
if self.atemperature_params is not None:
self.atemperature_params.set_thetas(namod * [0.e0])
self.atemperature_params.set_thetas(0.1, 0)
@property
def oblocks(self):
"""~numpy.ndarray(int): Spectral blocks detailing the model's oceanic modes :math:`x`-and :math:`y`-wavenumber.
Array of shape (:attr:`~QgParams.nmod` [1], 2)."""
return self._oms
@oblocks.setter
def oblocks(self, value):
self._oms = value
self._gms = None
basis = BasinFourierBasis(self._oms, self.scale_params.n)
self._oceanic_basis = basis
self._ground_basis = None
if self.atemperature_params is not None:
# disable the Newtonian cooling
self.atemperature_params.thetas = None # np.zeros(self.nmod[0])
self.atemperature_params.hd = None # Parameter(0.0, input_dimensional=False)
self.atemperature_params.gamma = Parameter(1.e7, units='[J][m^-2][K^-1]', scale_object=self.scale_params,
description='specific heat capacity of the atmosphere',
return_dimensional=True)
self.atemperature_params.set_insolation(self.nmod[0] * [0.e0])
self.atemperature_params.set_insolation(100.0, 0)
self.atemperature_params.eps = Parameter(0.76e0, input_dimensional=False,
description="emissivity coefficient for the grey-body atmosphere")
self.atemperature_params.T0 = Parameter(270.0, units='[K]', scale_object=self.scale_params,
return_dimensional=True,
description="stationary solution for the 0-th order atmospheric temperature")
self.atemperature_params.sc = Parameter(1., input_dimensional=False,
description="ratio of surface to atmosphere temperature")
self.atemperature_params.hlambda = Parameter(20.00, units='[W][m^-2][K^-1]', scale_object=self.scale_params,
return_dimensional=True,
description="sensible+turbulent heat exchange between ocean and atmosphere")
if self.gotemperature_params is not None:
self._number_of_ground_modes = 0
self._number_of_oceanic_modes = self.oblocks.shape[0]
self._number_of_dimensions = 2 * (self._number_of_oceanic_modes + self._number_of_atmospheric_modes)
self.gotemperature_params.set_insolation(self.nmod[0] * [0.e0])
self.gotemperature_params.set_insolation(350.0, 0)
# if setting an ocean, then disable the orography
if self.ground_params is not None:
self.ground_params.hk = None
@property
def gblocks(self):
"""~numpy.ndarray(int): Spectral blocks detailing the model's ground modes :math:`x`-and :math:`y`-wavenumber.
Array of shape (:attr:`~QgParams.nmod` [1], 2)."""
return self._gms
@gblocks.setter
def gblocks(self, value):
self._oms = None
self._gms = value
basis = ChannelFourierBasis(self._gms, self.scale_params.n)
self._oceanic_basis = None
self._ground_basis = basis
if self.atemperature_params is not None:
# disable the Newtonian cooling
self.atemperature_params.thetas = None # np.zeros(self.nmod[0])
self.atemperature_params.hd = None # Parameter(0.0, input_dimensional=False)
self.atemperature_params.gamma = Parameter(1.e7, units='[J][m^-2][K^-1]',
scale_object=self.scale_params,
description='specific heat capacity of the atmosphere',
return_dimensional=True)
self.atemperature_params.set_insolation(self.nmod[0] * [0.e0])
self.atemperature_params.set_insolation(100.0, 0)
self.atemperature_params.eps = Parameter(0.76e0, input_dimensional=False,
description="emissivity coefficient for the grey-body atmosphere")
self.atemperature_params.T0 = Parameter(270.0, units='[K]', scale_object=self.scale_params,
return_dimensional=True,
description="stationary solution for the 0-th order atmospheric temperature")
self.atemperature_params.sc = Parameter(1., input_dimensional=False,
description="ratio of surface to atmosphere temperature")
self.atemperature_params.hlambda = Parameter(20.00, units='[W][m^-2][K^-1]',
scale_object=self.scale_params,
return_dimensional=True,
description="sensible+turbulent heat exchange between ocean and atmosphere")
if self.gotemperature_params is not None:
gmod = 0
for i in range(self.gblocks.shape[0]):
if self.ablocks[i, 0] == 1:
gmod += 3
else:
gmod += 2
self._number_of_ground_modes = gmod
self._number_of_oceanic_modes = 0
self._number_of_dimensions = 2 * self._number_of_atmospheric_modes + self._number_of_ground_modes
# if orography is disabled, enable it!
if self.ground_params is not None:
self.ground_params.orographic_basis = 'atmospheric'
if self.ground_params.hk is None:
self.ground_params.set_orography(self.nmod[0] * [0.e0])
self.ground_params.set_orography(0.1, 1)
self.gotemperature_params.set_insolation(self.nmod[0] * [0.e0])
self.gotemperature_params.set_insolation(350.0, 0)
def _set_atmospheric_analytic_fourier_modes(self, nxmax, nymax, auto=False):
res = np.zeros((nxmax * nymax, 2), dtype=int)
i = 0
for nx in range(1, nxmax + 1):
for ny in range(1, nymax+1):
res[i, 0] = nx
res[i, 1] = ny
i += 1
if auto:
if self.atemperature_params is None:
self.atemperature_params = AtmosphericTemperatureParams(self.scale_params)
if self.atmospheric_params is None:
self.atmospheric_params = AtmosphericParams(self.scale_params)
self.ablocks = res
self._atmospheric_latex_var_string = list()
self._atmospheric_var_string = list()
for i in range(self.nmod[0]):
self._atmospheric_latex_var_string.append(r'psi_{\rm a,' + str(i + 1) + "}")
self._atmospheric_var_string.append(r'psi_a_' + str(i + 1))
for i in range(self.nmod[0]):
self._atmospheric_latex_var_string.append(r'theta_{\rm a,' + str(i + 1) + "}")
self._atmospheric_var_string.append(r'theta_a_' + str(i + 1))
def _set_oceanic_analytic_fourier_modes(self, nxmax, nymax, auto=True):
if self._ams is None:
print('Atmosphere modes not set up. Add an atmosphere before adding an ocean!')
print('Oceanic setup aborted.')
return
res = np.zeros((nxmax * nymax, 2), dtype=int)
i = 0
for nx in range(1, nxmax + 1):
for ny in range(1, nymax+1):
res[i, 0] = nx
res[i, 1] = ny
i += 1
if auto:
if self.gotemperature_params is None or isinstance(self.gotemperature_params, GroundTemperatureParams):
self.gotemperature_params = OceanicTemperatureParams(self.scale_params)
if self.oceanic_params is None:
self.oceanic_params = OceanicParams(self.scale_params)
self.ground_params = None
self.oblocks = res
self._oceanic_latex_var_string = list()
self._oceanic_var_string = list()
self._ground_latex_var_string = list()
self._ground_var_string = list()
for i in range(self.nmod[1]):
self._oceanic_latex_var_string.append(r'psi_{\rm o,' + str(i + 1) + "}")
self._oceanic_var_string.append(r'psi_o_' + str(i + 1))
for i in range(self.nmod[1]):
self._oceanic_latex_var_string.append(r'theta_{\rm o,' + str(i + 1) + "}")
self._oceanic_var_string.append(r'theta_o_' + str(i + 1))
def _set_ground_analytic_fourier_modes(self, nxmax=None, nymax=None, auto=True):
if self._ams is None:
print('Atmosphere modes not set up. Add an atmosphere before adding the ground!')
print('Ground setup aborted.')
return
if nxmax is None or nymax is None:
res = self._ams.copy()
else:
res = np.zeros((nxmax * nymax, 2), dtype=int)
i = 0
for nx in range(1, nxmax + 1):
for ny in range(1, nymax+1):
res[i, 0] = nx
res[i, 1] = ny
i += 1
if auto:
if self.gotemperature_params is None or isinstance(self.gotemperature_params, OceanicTemperatureParams):
self.gotemperature_params = GroundTemperatureParams(self.scale_params)
if self.ground_params is None:
self.ground_params = GroundParams(self.scale_params)
self.oceanic_params = None
self.gblocks = res
self._oceanic_var_string = list()
self._oceanic_latex_var_string = list()
self._ground_latex_var_string = list()
self._ground_var_string = list()
for i in range(self.nmod[1]):
self._ground_latex_var_string.append(r'theta_{\rm g,' + str(i + 1) + "}")
self._ground_var_string.append(r'theta_g_' + str(i + 1))
| [
"pickle.dump",
"qgs.basis.fourier.ChannelFourierBasis",
"qgs.params.parameter.Parameter",
"qgs.basis.fourier.BasinFourierBasis",
"numpy.zeros",
"pickle.load",
"numpy.array",
"qgs.basis.fourier.contiguous_basin_basis",
"numpy.sin",
"numpy.cos",
"warnings.warn",
"qgs.basis.fourier.contiguous_cha... | [((7259, 7286), 'numpy.array', 'np.array', (['arr'], {'dtype': 'object'}), '(arr, dtype=object)\n', (7267, 7286), True, 'import numpy as np\n'), ((7826, 7850), 'pickle.load', 'pickle.load', (['f'], {}), '(f, **kwargs)\n', (7837, 7850), False, 'import pickle\n'), ((8343, 8382), 'pickle.dump', 'pickle.dump', (['self.__dict__', 'f'], {}), '(self.__dict__, f, **kwargs)\n', (8354, 8382), False, 'import pickle\n'), ((9514, 9626), 'qgs.params.parameter.Parameter', 'Parameter', (['(5000000.0)'], {'units': '"""[m]"""', 'description': '"""characteristic space scale (L*pi)"""', 'return_dimensional': '(True)'}), "(5000000.0, units='[m]', description=\n 'characteristic space scale (L*pi)', return_dimensional=True)\n", (9523, 9626), False, 'from qgs.params.parameter import Parameter\n'), ((9666, 9794), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.0001032)'], {'units': '"""[s^-1]"""', 'description': '"""Coriolis parameter at the middle of the domain"""', 'return_dimensional': '(True)'}), "(0.0001032, units='[s^-1]', description=\n 'Coriolis parameter at the middle of the domain', return_dimensional=True)\n", (9675, 9794), False, 'from qgs.params.parameter import Parameter\n'), ((9834, 9924), 'qgs.params.parameter.Parameter', 'Parameter', (['(1.3)'], {'input_dimensional': '(False)', 'description': '"""aspect ratio (n = 2 L_y / L_x)"""'}), "(1.3, input_dimensional=False, description=\n 'aspect ratio (n = 2 L_y / L_x)')\n", (9843, 9924), False, 'from qgs.params.parameter import Parameter\n'), ((9941, 10031), 'qgs.params.parameter.Parameter', 'Parameter', (['(6370000.0)'], {'units': '"""[m]"""', 'description': '"""earth radius"""', 'return_dimensional': '(True)'}), "(6370000.0, units='[m]', description='earth radius',\n return_dimensional=True)\n", (9950, 10031), False, 'from qgs.params.parameter import Parameter\n'), ((10050, 10147), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.25)'], {'input_dimensional': '(False)', 'description': '"""latitude expressed in fraction of pi"""'}), "(0.25, input_dimensional=False, description=\n 'latitude expressed in fraction of pi')\n", (10059, 10147), False, 'from qgs.params.parameter import Parameter\n'), ((10167, 10303), 'qgs.params.parameter.Parameter', 'Parameter', (['(50000.0)'], {'units': '"""[Pa]"""', 'description': '"""pressure difference between the two atmospheric layers"""', 'return_dimensional': '(True)'}), "(50000.0, units='[Pa]', description=\n 'pressure difference between the two atmospheric layers',\n return_dimensional=True)\n", (10176, 10303), False, 'from qgs.params.parameter import Parameter\n'), ((10634, 10755), 'qgs.params.parameter.Parameter', 'Parameter', (['(self.scale / np.pi)'], {'units': 'self.scale.units', 'description': '"""Typical length scale L"""', 'return_dimensional': '(True)'}), "(self.scale / np.pi, units=self.scale.units, description=\n 'Typical length scale L', return_dimensional=True)\n", (10643, 10755), False, 'from qgs.params.parameter import Parameter\n'), ((10943, 11075), 'qgs.params.parameter.Parameter', 'Parameter', (['self.scale'], {'units': 'self.scale.units', 'description': '"""The meridional extent of the model domain"""', 'return_dimensional': '(True)'}), "(self.scale, units=self.scale.units, description=\n 'The meridional extent of the model domain', return_dimensional=True)\n", (10952, 11075), False, 'from qgs.params.parameter import Parameter\n'), ((11264, 11404), 'qgs.params.parameter.Parameter', 'Parameter', (['(2 * self.scale / self.n)'], {'units': 'self.scale.units', 'description': '"""The zonal extent of the model domain"""', 'return_dimensional': '(True)'}), "(2 * self.scale / self.n, units=self.scale.units, description=\n 'The zonal extent of the model domain', return_dimensional=True)\n", (11273, 11404), False, 'from qgs.params.parameter import Parameter\n'), ((11629, 11776), 'qgs.params.parameter.Parameter', 'Parameter', (['(self.phi0_npi * np.pi)'], {'units': '"""[rad]"""', 'description': '"""The reference latitude of the center of the domain"""', 'return_dimensional': '(True)'}), "(self.phi0_npi * np.pi, units='[rad]', description=\n 'The reference latitude of the center of the domain',\n return_dimensional=True)\n", (11638, 11776), False, 'from qgs.params.parameter import Parameter\n'), ((13090, 13231), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.1)'], {'input_dimensional': '(False)', 'scale_object': 'scale_params', 'units': '"""[s^-1]"""', 'description': '"""atmosphere bottom friction coefficient"""'}), "(0.1, input_dimensional=False, scale_object=scale_params, units=\n '[s^-1]', description='atmosphere bottom friction coefficient')\n", (13099, 13231), False, 'from qgs.params.parameter import Parameter\n'), ((13274, 13418), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.01)'], {'input_dimensional': '(False)', 'scale_object': 'scale_params', 'units': '"""[s^-1]"""', 'description': '"""atmosphere internal friction coefficient"""'}), "(0.01, input_dimensional=False, scale_object=scale_params, units=\n '[s^-1]', description='atmosphere internal friction coefficient')\n", (13283, 13418), False, 'from qgs.params.parameter import Parameter\n'), ((13464, 13613), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.2)'], {'input_dimensional': '(False)', 'scale_object': 'scale_params', 'units': '"""[m^2][s^-2][Pa^-2]"""', 'description': '"""static stability of the atmosphere"""'}), "(0.2, input_dimensional=False, scale_object=scale_params, units=\n '[m^2][s^-2][Pa^-2]', description='static stability of the atmosphere')\n", (13473, 13613), False, 'from qgs.params.parameter import Parameter\n'), ((13796, 13973), 'qgs.params.parameter.Parameter', 'Parameter', (['(self.sigma / 2)'], {'input_dimensional': '(False)', 'scale_object': 'self._scale_params', 'units': '"""[m^2][s^-2][Pa^-2]"""', 'description': '"""0.5 * static stability of the atmosphere"""'}), "(self.sigma / 2, input_dimensional=False, scale_object=self.\n _scale_params, units='[m^2][s^-2][Pa^-2]', description=\n '0.5 * static stability of the atmosphere')\n", (13805, 13973), False, 'from qgs.params.parameter import Parameter\n'), ((15813, 15944), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.045)'], {'input_dimensional': '(False)', 'units': '"""[s]"""', 'scale_object': 'scale_params', 'description': '"""Newtonian cooling coefficient"""'}), "(0.045, input_dimensional=False, units='[s]', scale_object=\n scale_params, description='Newtonian cooling coefficient')\n", (15822, 15944), False, 'from qgs.params.parameter import Parameter\n'), ((21057, 21180), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.031)'], {'units': '"""[m][s^-2]"""', 'return_dimensional': '(True)', 'scale_object': 'scale_params', 'description': '"""reduced gravity"""'}), "(0.031, units='[m][s^-2]', return_dimensional=True, scale_object=\n scale_params, description='reduced gravity')\n", (21066, 21180), False, 'from qgs.params.parameter import Parameter\n'), ((21222, 21351), 'qgs.params.parameter.Parameter', 'Parameter', (['(1e-08)'], {'units': '"""[s^-1]"""', 'scale_object': 'scale_params', 'description': '"""frictional coefficient at the bottom of the ocean"""'}), "(1e-08, units='[s^-1]', scale_object=scale_params, description=\n 'frictional coefficient at the bottom of the ocean')\n", (21231, 21351), False, 'from qgs.params.parameter import Parameter\n'), ((21391, 21530), 'qgs.params.parameter.Parameter', 'Parameter', (['(500.0)'], {'units': '"""[m]"""', 'return_dimensional': '(True)', 'scale_object': 'scale_params', 'description': '"""depth of the water layer of the ocean"""'}), "(500.0, units='[m]', return_dimensional=True, scale_object=\n scale_params, description='depth of the water layer of the ocean')\n", (21400, 21530), False, 'from qgs.params.parameter import Parameter\n'), ((21569, 21701), 'qgs.params.parameter.Parameter', 'Parameter', (['(1e-08)'], {'units': '"""[s^-1]"""', 'scale_object': 'scale_params', 'description': '"""strength of the ocean-atmosphere mechanical coupling"""'}), "(1e-08, units='[s^-1]', scale_object=scale_params, description=\n 'strength of the ocean-atmosphere mechanical coupling')\n", (21578, 21701), False, 'from qgs.params.parameter import Parameter\n'), ((22812, 22966), 'qgs.params.parameter.Parameter', 'Parameter', (['(200000000.0)'], {'units': '"""[J][m^-2][K^-1]"""', 'scale_object': 'scale_params', 'return_dimensional': '(True)', 'description': '"""specific heat capacity of the ocean"""'}), "(200000000.0, units='[J][m^-2][K^-1]', scale_object=scale_params,\n return_dimensional=True, description='specific heat capacity of the ocean')\n", (22821, 22966), False, 'from qgs.params.parameter import Parameter\n'), ((23028, 23193), 'qgs.params.parameter.Parameter', 'Parameter', (['(285.0)'], {'units': '"""[K]"""', 'scale_object': 'scale_params', 'return_dimensional': '(True)', 'description': '"""stationary solution for the 0-th order oceanic temperature"""'}), "(285.0, units='[K]', scale_object=scale_params, return_dimensional\n =True, description=\n 'stationary solution for the 0-th order oceanic temperature')\n", (23037, 23193), False, 'from qgs.params.parameter import Parameter\n'), ((29464, 29624), 'qgs.params.parameter.Parameter', 'Parameter', (['(200000000.0)'], {'units': '"""[J][m^-2][K^-1]"""', 'scale_object': 'scale_params', 'return_dimensional': '(True)', 'description': '"""specific heat capacity of the ground"""'}), "(200000000.0, units='[J][m^-2][K^-1]', scale_object=scale_params,\n return_dimensional=True, description='specific heat capacity of the ground'\n )\n", (29473, 29624), False, 'from qgs.params.parameter import Parameter\n'), ((29681, 29845), 'qgs.params.parameter.Parameter', 'Parameter', (['(285.0)'], {'units': '"""[K]"""', 'scale_object': 'scale_params', 'return_dimensional': '(True)', 'description': '"""stationary solution for the 0-th order ground temperature"""'}), "(285.0, units='[K]', scale_object=scale_params, return_dimensional\n =True, description=\n 'stationary solution for the 0-th order ground temperature')\n", (29690, 29845), False, 'from qgs.params.parameter import Parameter\n'), ((37806, 37950), 'qgs.params.parameter.Parameter', 'Parameter', (['(287.058)'], {'return_dimensional': '(True)', 'units': '"""[J][kg^-1][K^-1]"""', 'scale_object': 'self.scale_params', 'description': '"""gas constant of dry air"""'}), "(287.058, return_dimensional=True, units='[J][kg^-1][K^-1]',\n scale_object=self.scale_params, description='gas constant of dry air')\n", (37815, 37950), False, 'from qgs.params.parameter import Parameter\n'), ((37995, 38147), 'qgs.params.parameter.Parameter', 'Parameter', (['(5.67e-08)'], {'return_dimensional': '(True)', 'units': '"""[J][m^-2][s^-1][K^-4]"""', 'scale_object': 'self.scale_params', 'description': '"""Stefan-Boltzmann constant"""'}), "(5.67e-08, return_dimensional=True, units='[J][m^-2][s^-1][K^-4]',\n scale_object=self.scale_params, description='Stefan-Boltzmann constant')\n", (38004, 38147), False, 'from qgs.params.parameter import Parameter\n'), ((68060, 68111), 'qgs.basis.fourier.ChannelFourierBasis', 'ChannelFourierBasis', (['self._ams', 'self.scale_params.n'], {}), '(self._ams, self.scale_params.n)\n', (68079, 68111), False, 'from qgs.basis.fourier import ChannelFourierBasis, BasinFourierBasis\n'), ((69415, 69464), 'qgs.basis.fourier.BasinFourierBasis', 'BasinFourierBasis', (['self._oms', 'self.scale_params.n'], {}), '(self._oms, self.scale_params.n)\n', (69432, 69464), False, 'from qgs.basis.fourier import ChannelFourierBasis, BasinFourierBasis\n'), ((72216, 72267), 'qgs.basis.fourier.ChannelFourierBasis', 'ChannelFourierBasis', (['self._gms', 'self.scale_params.n'], {}), '(self._gms, self.scale_params.n)\n', (72235, 72267), False, 'from qgs.basis.fourier import ChannelFourierBasis, BasinFourierBasis\n'), ((75245, 75284), 'numpy.zeros', 'np.zeros', (['(nxmax * nymax, 2)'], {'dtype': 'int'}), '((nxmax * nymax, 2), dtype=int)\n', (75253, 75284), True, 'import numpy as np\n'), ((76555, 76594), 'numpy.zeros', 'np.zeros', (['(nxmax * nymax, 2)'], {'dtype': 'int'}), '((nxmax * nymax, 2), dtype=int)\n', (76563, 76594), True, 'import numpy as np\n'), ((51174, 51342), 'qgs.params.parameter.Parameter', 'Parameter', (['(10000000.0)'], {'units': '"""[J][m^-2][K^-1]"""', 'scale_object': 'self.scale_params', 'description': '"""specific heat capacity of the atmosphere"""', 'return_dimensional': '(True)'}), "(10000000.0, units='[J][m^-2][K^-1]', scale_object=self.\n scale_params, description='specific heat capacity of the atmosphere',\n return_dimensional=True)\n", (51183, 51342), False, 'from qgs.params.parameter import Parameter\n'), ((51618, 51730), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.76)'], {'input_dimensional': '(False)', 'description': '"""emissivity coefficient for the grey-body atmosphere"""'}), "(0.76, input_dimensional=False, description=\n 'emissivity coefficient for the grey-body atmosphere')\n", (51627, 51730), False, 'from qgs.params.parameter import Parameter\n'), ((51823, 51996), 'qgs.params.parameter.Parameter', 'Parameter', (['(270.0)'], {'units': '"""[K]"""', 'scale_object': 'self.scale_params', 'return_dimensional': '(True)', 'description': '"""stationary solution for the 0-th order atmospheric temperature"""'}), "(270.0, units='[K]', scale_object=self.scale_params,\n return_dimensional=True, description=\n 'stationary solution for the 0-th order atmospheric temperature')\n", (51832, 51996), False, 'from qgs.params.parameter import Parameter\n'), ((52134, 52236), 'qgs.params.parameter.Parameter', 'Parameter', (['(1.0)'], {'input_dimensional': '(False)', 'description': '"""ratio of surface to atmosphere temperature"""'}), "(1.0, input_dimensional=False, description=\n 'ratio of surface to atmosphere temperature')\n", (52143, 52236), False, 'from qgs.params.parameter import Parameter\n'), ((52330, 52513), 'qgs.params.parameter.Parameter', 'Parameter', (['(20.0)'], {'units': '"""[W][m^-2][K^-1]"""', 'scale_object': 'self.scale_params', 'return_dimensional': '(True)', 'description': '"""sensible+turbulent heat exchange between ocean and atmosphere"""'}), "(20.0, units='[W][m^-2][K^-1]', scale_object=self.scale_params,\n return_dimensional=True, description=\n 'sensible+turbulent heat exchange between ocean and atmosphere')\n", (52339, 52513), False, 'from qgs.params.parameter import Parameter\n'), ((53748, 53916), 'qgs.params.parameter.Parameter', 'Parameter', (['(10000000.0)'], {'units': '"""[J][m^-2][K^-1]"""', 'scale_object': 'self.scale_params', 'description': '"""specific heat capacity of the atmosphere"""', 'return_dimensional': '(True)'}), "(10000000.0, units='[J][m^-2][K^-1]', scale_object=self.\n scale_params, description='specific heat capacity of the atmosphere',\n return_dimensional=True)\n", (53757, 53916), False, 'from qgs.params.parameter import Parameter\n'), ((54192, 54304), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.76)'], {'input_dimensional': '(False)', 'description': '"""emissivity coefficient for the grey-body atmosphere"""'}), "(0.76, input_dimensional=False, description=\n 'emissivity coefficient for the grey-body atmosphere')\n", (54201, 54304), False, 'from qgs.params.parameter import Parameter\n'), ((54397, 54570), 'qgs.params.parameter.Parameter', 'Parameter', (['(270.0)'], {'units': '"""[K]"""', 'scale_object': 'self.scale_params', 'return_dimensional': '(True)', 'description': '"""stationary solution for the 0-th order atmospheric temperature"""'}), "(270.0, units='[K]', scale_object=self.scale_params,\n return_dimensional=True, description=\n 'stationary solution for the 0-th order atmospheric temperature')\n", (54406, 54570), False, 'from qgs.params.parameter import Parameter\n'), ((54708, 54810), 'qgs.params.parameter.Parameter', 'Parameter', (['(1.0)'], {'input_dimensional': '(False)', 'description': '"""ratio of surface to atmosphere temperature"""'}), "(1.0, input_dimensional=False, description=\n 'ratio of surface to atmosphere temperature')\n", (54717, 54810), False, 'from qgs.params.parameter import Parameter\n'), ((54904, 55087), 'qgs.params.parameter.Parameter', 'Parameter', (['(20.0)'], {'units': '"""[W][m^-2][K^-1]"""', 'scale_object': 'self.scale_params', 'return_dimensional': '(True)', 'description': '"""sensible+turbulent heat exchange between ocean and atmosphere"""'}), "(20.0, units='[W][m^-2][K^-1]', scale_object=self.scale_params,\n return_dimensional=True, description=\n 'sensible+turbulent heat exchange between ocean and atmosphere')\n", (54913, 55087), False, 'from qgs.params.parameter import Parameter\n'), ((64054, 64113), 'qgs.basis.fourier.contiguous_channel_basis', 'contiguous_channel_basis', (['nxmax', 'nymax', 'self.scale_params.n'], {}), '(nxmax, nymax, self.scale_params.n)\n', (64078, 64113), False, 'from qgs.basis.fourier import contiguous_channel_basis, contiguous_basin_basis\n'), ((65633, 65690), 'qgs.basis.fourier.contiguous_basin_basis', 'contiguous_basin_basis', (['nxmax', 'nymax', 'self.scale_params.n'], {}), '(nxmax, nymax, self.scale_params.n)\n', (65655, 65690), False, 'from qgs.basis.fourier import contiguous_channel_basis, contiguous_basin_basis\n'), ((69842, 70010), 'qgs.params.parameter.Parameter', 'Parameter', (['(10000000.0)'], {'units': '"""[J][m^-2][K^-1]"""', 'scale_object': 'self.scale_params', 'description': '"""specific heat capacity of the atmosphere"""', 'return_dimensional': '(True)'}), "(10000000.0, units='[J][m^-2][K^-1]', scale_object=self.\n scale_params, description='specific heat capacity of the atmosphere',\n return_dimensional=True)\n", (69851, 70010), False, 'from qgs.params.parameter import Parameter\n'), ((70286, 70398), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.76)'], {'input_dimensional': '(False)', 'description': '"""emissivity coefficient for the grey-body atmosphere"""'}), "(0.76, input_dimensional=False, description=\n 'emissivity coefficient for the grey-body atmosphere')\n", (70295, 70398), False, 'from qgs.params.parameter import Parameter\n'), ((70491, 70664), 'qgs.params.parameter.Parameter', 'Parameter', (['(270.0)'], {'units': '"""[K]"""', 'scale_object': 'self.scale_params', 'return_dimensional': '(True)', 'description': '"""stationary solution for the 0-th order atmospheric temperature"""'}), "(270.0, units='[K]', scale_object=self.scale_params,\n return_dimensional=True, description=\n 'stationary solution for the 0-th order atmospheric temperature')\n", (70500, 70664), False, 'from qgs.params.parameter import Parameter\n'), ((70802, 70904), 'qgs.params.parameter.Parameter', 'Parameter', (['(1.0)'], {'input_dimensional': '(False)', 'description': '"""ratio of surface to atmosphere temperature"""'}), "(1.0, input_dimensional=False, description=\n 'ratio of surface to atmosphere temperature')\n", (70811, 70904), False, 'from qgs.params.parameter import Parameter\n'), ((70998, 71181), 'qgs.params.parameter.Parameter', 'Parameter', (['(20.0)'], {'units': '"""[W][m^-2][K^-1]"""', 'scale_object': 'self.scale_params', 'return_dimensional': '(True)', 'description': '"""sensible+turbulent heat exchange between ocean and atmosphere"""'}), "(20.0, units='[W][m^-2][K^-1]', scale_object=self.scale_params,\n return_dimensional=True, description=\n 'sensible+turbulent heat exchange between ocean and atmosphere')\n", (71007, 71181), False, 'from qgs.params.parameter import Parameter\n'), ((72645, 72813), 'qgs.params.parameter.Parameter', 'Parameter', (['(10000000.0)'], {'units': '"""[J][m^-2][K^-1]"""', 'scale_object': 'self.scale_params', 'description': '"""specific heat capacity of the atmosphere"""', 'return_dimensional': '(True)'}), "(10000000.0, units='[J][m^-2][K^-1]', scale_object=self.\n scale_params, description='specific heat capacity of the atmosphere',\n return_dimensional=True)\n", (72654, 72813), False, 'from qgs.params.parameter import Parameter\n'), ((73144, 73256), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.76)'], {'input_dimensional': '(False)', 'description': '"""emissivity coefficient for the grey-body atmosphere"""'}), "(0.76, input_dimensional=False, description=\n 'emissivity coefficient for the grey-body atmosphere')\n", (73153, 73256), False, 'from qgs.params.parameter import Parameter\n'), ((73349, 73522), 'qgs.params.parameter.Parameter', 'Parameter', (['(270.0)'], {'units': '"""[K]"""', 'scale_object': 'self.scale_params', 'return_dimensional': '(True)', 'description': '"""stationary solution for the 0-th order atmospheric temperature"""'}), "(270.0, units='[K]', scale_object=self.scale_params,\n return_dimensional=True, description=\n 'stationary solution for the 0-th order atmospheric temperature')\n", (73358, 73522), False, 'from qgs.params.parameter import Parameter\n'), ((73660, 73762), 'qgs.params.parameter.Parameter', 'Parameter', (['(1.0)'], {'input_dimensional': '(False)', 'description': '"""ratio of surface to atmosphere temperature"""'}), "(1.0, input_dimensional=False, description=\n 'ratio of surface to atmosphere temperature')\n", (73669, 73762), False, 'from qgs.params.parameter import Parameter\n'), ((73856, 74039), 'qgs.params.parameter.Parameter', 'Parameter', (['(20.0)'], {'units': '"""[W][m^-2][K^-1]"""', 'scale_object': 'self.scale_params', 'return_dimensional': '(True)', 'description': '"""sensible+turbulent heat exchange between ocean and atmosphere"""'}), "(20.0, units='[W][m^-2][K^-1]', scale_object=self.scale_params,\n return_dimensional=True, description=\n 'sensible+turbulent heat exchange between ocean and atmosphere')\n", (73865, 74039), False, 'from qgs.params.parameter import Parameter\n'), ((78127, 78166), 'numpy.zeros', 'np.zeros', (['(nxmax * nymax, 2)'], {'dtype': 'int'}), '((nxmax * nymax, 2), dtype=int)\n', (78135, 78166), True, 'import numpy as np\n'), ((12052, 12069), 'numpy.sin', 'np.sin', (['self.phi0'], {}), '(self.phi0)\n', (12058, 12069), True, 'import numpy as np\n'), ((17461, 17718), 'warnings.warn', 'warnings.warn', (["(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')"], {}), "(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')\n", (17474, 17718), False, 'import warnings\n'), ((19415, 19672), 'warnings.warn', 'warnings.warn', (["(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')"], {}), "(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')\n", (19428, 19672), False, 'import warnings\n'), ((24375, 24632), 'warnings.warn', 'warnings.warn', (["(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')"], {}), "(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')\n", (24388, 24632), False, 'import warnings\n'), ((27649, 27906), 'warnings.warn', 'warnings.warn', (["(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')"], {}), "(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')\n", (27662, 27906), False, 'import warnings\n'), ((31084, 31341), 'warnings.warn', 'warnings.warn', (["(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')"], {}), "(\n 'A scalar value was provided, but without the `pos` argument indicating in which '\n +\n 'component of the spectral decomposition to put it: Spectral decomposition unchanged !'\n + 'Please specify it or give a vector as `value`.')\n", (31097, 31341), False, 'import warnings\n'), ((67263, 67322), 'qgs.basis.fourier.contiguous_channel_basis', 'contiguous_channel_basis', (['nxmax', 'nymax', 'self.scale_params.n'], {}), '(nxmax, nymax, self.scale_params.n)\n', (67287, 67322), False, 'from qgs.basis.fourier import contiguous_channel_basis, contiguous_basin_basis\n'), ((6851, 6970), 'qgs.params.parameter.Parameter', 'Parameter', (['val'], {'input_dimensional': 'idx[i]', 'units': 'u[i]', 'scale_object': 's[i]', 'description': 'd[i]', 'return_dimensional': 'rd[i]'}), '(val, input_dimensional=idx[i], units=u[i], scale_object=s[i],\n description=d[i], return_dimensional=rd[i])\n', (6860, 6970), False, 'from qgs.params.parameter import Parameter\n'), ((7047, 7211), 'qgs.params.parameter.Parameter', 'Parameter', (['(0.0)'], {'input_dimensional': 'input_dimensional', 'units': 'units', 'scale_object': 'scale_object', 'description': 'description', 'return_dimensional': 'return_dimensional'}), '(0.0, input_dimensional=input_dimensional, units=units,\n scale_object=scale_object, description=description, return_dimensional=\n return_dimensional)\n', (7056, 7211), False, 'from qgs.params.parameter import Parameter\n'), ((12032, 12049), 'numpy.cos', 'np.cos', (['self.phi0'], {}), '(self.phi0)\n', (12038, 12049), True, 'import numpy as np\n'), ((38640, 38661), 'numpy.sqrt', 'np.sqrt', (['(op.gp * op.h)'], {}), '(op.gp * op.h)\n', (38647, 38661), True, 'import numpy as np\n'), ((3135, 3328), 'qgs.params.parameter.Parameter', 'Parameter', (['val'], {'input_dimensional': "d['_input_dimensional']", 'units': "d['_units']", 'description': "d['_description']", 'scale_object': "d['_scale_object']", 'return_dimensional': "d['_return_dimensional']"}), "(val, input_dimensional=d['_input_dimensional'], units=d['_units'],\n description=d['_description'], scale_object=d['_scale_object'],\n return_dimensional=d['_return_dimensional'])\n", (3144, 3328), False, 'from qgs.params.parameter import Parameter\n')] |
#!/usr/bin/python
"""
c60mc gives yout the equilibrium configuration of the anti-ferromagnetic Ising model on the c60 lattice using the Monte Carlo simulation method.
"""
import numpy as np
def totalE(x, l):
"""
Parmeters:
x The spin configuration on the Buckyball lattice.
l The Buckyball lattice edge labels.
Returns:
E The total energy of the given spin configuration on the Buckyball lattice.
"""
E = 0
for i in range(90):
E+=x[int(l[i,0])]*x[int(l[i,1])]
return E
def localE(x, label, l):
"""
Parmeters:
x The spin configuration on the Buckyball lattice.
label The one chosen site of the Buckyball lattice.
l The Buckyball lattice nearest node labels of one chosen site.
Returns:
E The local energy on one site of the Buckyball lattice.
"""
E = x[label]*x[int(l[label,0])]+x[label]*x[int(l[label,1])]+x[label]*x[int(l[label,2])]
return E
def MCMC_Ising(beta, num, l_local, l_total):
"""
Parameters:
beta The temperature $\beta=1/(kT)$
num The number of equilibrium configuration data
Returns:
res The euqilibrium Ising spin configuration data
Z The partition function of the Ising spin model.
"""
cut = 10000000 #The cut of iteration
iterations = cut+num
res = np.zeros((num, 60))
x = np.random.randint(-1,1,60)
E_total = totalE(x,l_total)
Energy_chain = []
#M_chain = []
Z_chain = []
labelvalueseri = np.random.randint(0, 60, iterations)
for i in range(iterations):
#print("i=", i)
labelvalue = labelvalueseri[i]
E_0 = localE(x, labelvalue, l_local)
E_change = -2*E_0
x[labelvalue] = -x[labelvalue]
E_total = E_total+E_change
if E_change >0:
probability = np.exp(-beta*E_change)
if np.random.rand()>probability:
x[labelvalue] = -x[labelvalue]
E_total = E_total - E_change
if i>cut-1:
res[i-cut] = x
#M_chain.append((np.sum(x)*(np.sum(x)))/60.) ##Calculate magnetization
Energy_chain.append(E_total) ##Calculate Energy
#Z_chain.append(np.exp(-beta*E_total)) ##Calculate the exp E
return res, np.mean(Energy_chain)
if __name__=="__main__":
beta = 1. #temperature
num = 100000
c60labelEdge = np.load('data/c60labelEdge.npy') #The edge pair labels of c60 lattice
c60labelNode = np.load('data/c60labelNode.npy') #The nearist-node labels of c60 lattice
#print('c60labelEdge:',c60labelEdge)
#print('c60labelNode:',c60labelNode)
# x = np.ones(60)
# TE = totalE(x, c60labelEdge)
# print('TE',TE)
config_c60, E = MCMC_Ising(beta, num, c60labelNode, c60labelEdge)
print('E=',E)
datafile = 'data/mcdata.dat'
with open(datafile, 'w') as f1:
f1.write("SSH chanllenge 1\n")
f1.write("beta=%.2f\n" % beta)
f1.write("E=%.2f\n" % E)
#for i in range(100):
# print('i=',i,'energypersite',ene[i]/60.)
| [
"numpy.load",
"numpy.zeros",
"numpy.mean",
"numpy.random.randint",
"numpy.exp",
"numpy.random.rand"
] | [((1381, 1400), 'numpy.zeros', 'np.zeros', (['(num, 60)'], {}), '((num, 60))\n', (1389, 1400), True, 'import numpy as np\n'), ((1414, 1442), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(1)', '(60)'], {}), '(-1, 1, 60)\n', (1431, 1442), True, 'import numpy as np\n'), ((1552, 1588), 'numpy.random.randint', 'np.random.randint', (['(0)', '(60)', 'iterations'], {}), '(0, 60, iterations)\n', (1569, 1588), True, 'import numpy as np\n'), ((2454, 2486), 'numpy.load', 'np.load', (['"""data/c60labelEdge.npy"""'], {}), "('data/c60labelEdge.npy')\n", (2461, 2486), True, 'import numpy as np\n'), ((2543, 2575), 'numpy.load', 'np.load', (['"""data/c60labelNode.npy"""'], {}), "('data/c60labelNode.npy')\n", (2550, 2575), True, 'import numpy as np\n'), ((2340, 2361), 'numpy.mean', 'np.mean', (['Energy_chain'], {}), '(Energy_chain)\n', (2347, 2361), True, 'import numpy as np\n'), ((1881, 1905), 'numpy.exp', 'np.exp', (['(-beta * E_change)'], {}), '(-beta * E_change)\n', (1887, 1905), True, 'import numpy as np\n'), ((1919, 1935), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1933, 1935), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from NeuralNetwork import NeuralNetwork
from sklearn.cross_validation import train_test_split
digits = load_digits()
X = digits.data
Y = digits.target
X -= X.min()
X /= X.max()
nn = NeuralNetwork([64, 100, 10], "logistic")
X_train, X_test, Y_train, Y_test = train_test_split(X, Y)
labels_train = LabelBinarizer().fit_transform(Y_train)
labels_test = LabelBinarizer().fit_transform(Y_test)
print("start fitting")
nn.fit(X_train, labels_train, epochs=3000)
predictions = []
for i in range(X_test.shape[0]):
o = nn.predict(X_test[i])
predictions.append(np.argmax(o))
print(confusion_matrix(Y_test, predictions))
print(classification_report(Y_test, predictions)) | [
"sklearn.datasets.load_digits",
"sklearn.cross_validation.train_test_split",
"sklearn.preprocessing.LabelBinarizer",
"numpy.argmax",
"sklearn.metrics.classification_report",
"sklearn.metrics.confusion_matrix",
"NeuralNetwork.NeuralNetwork"
] | [((305, 318), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (316, 318), False, 'from sklearn.datasets import load_digits\n'), ((385, 425), 'NeuralNetwork.NeuralNetwork', 'NeuralNetwork', (['[64, 100, 10]', '"""logistic"""'], {}), "([64, 100, 10], 'logistic')\n", (398, 425), False, 'from NeuralNetwork import NeuralNetwork\n'), ((461, 483), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'Y'], {}), '(X, Y)\n', (477, 483), False, 'from sklearn.cross_validation import train_test_split\n'), ((781, 818), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y_test', 'predictions'], {}), '(Y_test, predictions)\n', (797, 818), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((826, 868), 'sklearn.metrics.classification_report', 'classification_report', (['Y_test', 'predictions'], {}), '(Y_test, predictions)\n', (847, 868), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((499, 515), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (513, 515), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((553, 569), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (567, 569), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((761, 773), 'numpy.argmax', 'np.argmax', (['o'], {}), '(o)\n', (770, 773), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import sys
import numpy as np
import pickle
import matplotlib.pyplot as plt
from collections import Counter
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from sklearn.manifold import TSNE
import random
import time
from ogm import viz_ogm, grid_index_to_array
device_name = "/gpu:0"
#def cmp(a,b):
# '''
# a: listA
# b: listB
# '''
# return list(set(a).difference(set(b))) == []
#
#def in_unique_words(item, UNIQUE_WORDS):
# for uw in UNIQUE_WORDS:
# if set(uw) == set(item):
# return True
# return False
#def filter_sample(int_words):
# t = 1e-5
# threshold = 0.8
# int_word_counts = Counter(int_words)
# total_count = len(int_words)
# word_freqs = {w: c/total_count for w, c in int_word_counts.items()}
# prob_drop = {w: 1 - np.sqrt(t / word_freqs[w]) for w in int_word_counts}
# train_words = [w for w in int_words if prob_drop[w] < threshold]
# print(len(train_words))
# return train_words
def get_targets(words, idx, window_size=5):
'''
get input word context
para
---
words: word list
idx: input word index
window_size: the size of window
'''
target_window = np.random.randint(1, window_size+1)
start_point = idx - target_window if (idx - target_window) > 0 else 0
end_point = idx + target_window
targets = set(words[start_point: idx] + words[idx+1: end_point+1])
return list(targets)
def get_batches(words, batch_size, window_size=5):
'''
batch generator
'''
n_batches = len(words) // batch_size
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx: idx+batch_size]
for i in range(len(batch)):
batch_x = batch[i]
batch_y = get_targets(batch, i, window_size)
x.extend([batch_x]*len(batch_y))
y.extend(batch_y)
yield x, y
if __name__ == '__main__':
print('ogm embedding')
path1=r'TrainedData/'
UNIQUE_WORDS = pickle.load(open(path1+'corpus', 'rb'), encoding='bytes')
vocab_to_int = {k: v for k, v in UNIQUE_WORDS.items()}
int_to_vocab = {v: k for k, v in UNIQUE_WORDS.items()}
int_words = list(UNIQUE_WORDS.values())
#pickle.dump(int_words, open(path1+'int_words', 'wb'), protocol=2)
#train_words = filter_sample(int_words) #option (if Subsampling)
train_words = int_words
with tf.device(device_name):
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, shape=[None], name='inputs')
labels = tf.placeholder(tf.int32, shape=[None, None], name='labels')
vocab_size = len(int_to_vocab)
embedding_size = 20
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs)
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal([vocab_size, embedding_size], stddev=0.1))
softmax_b = tf.Variable(tf.zeros(vocab_size))
#negative sampling loss
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, vocab_size)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
valid_size = 16
valid_window = 50
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(50,50+valid_window), valid_size//2))
valid_size = len(valid_examples)
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
epochs = 20
batch_size = 10
window_size = 5
with train_graph.as_default():
saver = tf.train.Saver()
xlim = [-47,47]
ylim = [-25,25]
delta = 3.0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
with tf.Session(graph=train_graph, config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 1
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to [%s]:' % valid_word
print('query')
viz_ogm(grid_index_to_array(set(valid_word), xlim, ylim, delta))
print('topk:',top_k)
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
viz_ogm(grid_index_to_array(set(close_word), xlim, ylim, delta))
#print(log)
iteration += 1
save_path = saver.save(sess, path1+"checkpoints/ogm.ckpt")
embed_mat = sess.run(normalized_embedding)
pickle.dump(embed_mat, open(path1+'embed_mat', 'wb'), protocol=2)
viz_words = 100
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(idx, (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7) | [
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.GPUOptions",
"numpy.random.randint",
"tensorflow.compat.v1.truncated_normal",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.square",
"tensorflow.comp... | [((191, 215), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (213, 215), True, 'import tensorflow.compat.v1 as tf\n'), ((1236, 1273), 'numpy.random.randint', 'np.random.randint', (['(1)', '(window_size + 1)'], {}), '(1, window_size + 1)\n', (1253, 1273), True, 'import numpy as np\n'), ((4545, 4595), 'tensorflow.compat.v1.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.7)'}), '(per_process_gpu_memory_fraction=0.7)\n', (4558, 4595), True, 'import tensorflow.compat.v1 as tf\n'), ((6708, 6714), 'sklearn.manifold.TSNE', 'TSNE', ([], {}), '()\n', (6712, 6714), False, 'from sklearn.manifold import TSNE\n'), ((6791, 6821), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 14)'}), '(figsize=(14, 14))\n', (6803, 6821), True, 'import matplotlib.pyplot as plt\n'), ((2492, 2514), 'tensorflow.compat.v1.device', 'tf.device', (['device_name'], {}), '(device_name)\n', (2501, 2514), True, 'import tensorflow.compat.v1 as tf\n'), ((2538, 2548), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (2546, 2548), True, 'import tensorflow.compat.v1 as tf\n'), ((4449, 4465), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4463, 4465), True, 'import tensorflow.compat.v1 as tf\n'), ((6863, 6914), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*embed_tsne[idx, :]'], {'color': '"""steelblue"""'}), "(*embed_tsne[idx, :], color='steelblue')\n", (6874, 6914), True, 'import matplotlib.pyplot as plt\n'), ((6923, 6993), 'matplotlib.pyplot.annotate', 'plt.annotate', (['idx', '(embed_tsne[idx, 0], embed_tsne[idx, 1])'], {'alpha': '(0.7)'}), '(idx, (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)\n', (6935, 6993), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2671), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""inputs"""'}), "(tf.int32, shape=[None], name='inputs')\n", (2632, 2671), True, 'import tensorflow.compat.v1 as tf\n'), ((2693, 2752), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""labels"""'}), "(tf.int32, shape=[None, None], name='labels')\n", (2707, 2752), True, 'import tensorflow.compat.v1 as tf\n'), ((2984, 3025), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'inputs'], {}), '(embedding, inputs)\n', (3006, 3025), True, 'import tensorflow.compat.v1 as tf\n'), ((3332, 3422), 'tensorflow.compat.v1.nn.sampled_softmax_loss', 'tf.nn.sampled_softmax_loss', (['softmax_w', 'softmax_b', 'labels', 'embed', 'n_sampled', 'vocab_size'], {}), '(softmax_w, softmax_b, labels, embed, n_sampled,\n vocab_size)\n', (3358, 3422), True, 'import tensorflow.compat.v1 as tf\n'), ((3451, 3471), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (3465, 3471), True, 'import tensorflow.compat.v1 as tf\n'), ((3974, 4017), 'tensorflow.compat.v1.constant', 'tf.constant', (['valid_examples'], {'dtype': 'tf.int32'}), '(valid_examples, dtype=tf.int32)\n', (3985, 4017), True, 'import tensorflow.compat.v1 as tf\n'), ((4183, 4242), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['normalized_embedding', 'valid_dataset'], {}), '(normalized_embedding, valid_dataset)\n', (4205, 4242), True, 'import tensorflow.compat.v1 as tf\n'), ((4748, 4781), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4779, 4781), True, 'import tensorflow.compat.v1 as tf\n'), ((4917, 4928), 'time.time', 'time.time', ([], {}), '()\n', (4926, 4928), False, 'import time\n'), ((2908, 2962), 'tensorflow.compat.v1.random_uniform', 'tf.random_uniform', (['[vocab_size, embedding_size]', '(-1)', '(1)'], {}), '([vocab_size, embedding_size], -1, 1)\n', (2925, 2962), True, 'import tensorflow.compat.v1 as tf\n'), ((3143, 3204), 'tensorflow.compat.v1.truncated_normal', 'tf.truncated_normal', (['[vocab_size, embedding_size]'], {'stddev': '(0.1)'}), '([vocab_size, embedding_size], stddev=0.1)\n', (3162, 3204), True, 'import tensorflow.compat.v1 as tf\n'), ((3242, 3262), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['vocab_size'], {}), '(vocab_size)\n', (3250, 3262), True, 'import tensorflow.compat.v1 as tf\n'), ((4295, 4329), 'tensorflow.compat.v1.transpose', 'tf.transpose', (['normalized_embedding'], {}), '(normalized_embedding)\n', (4307, 4329), True, 'import tensorflow.compat.v1 as tf\n'), ((4642, 4681), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (4656, 4681), True, 'import tensorflow.compat.v1 as tf\n'), ((3496, 3520), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (3518, 3520), True, 'import tensorflow.compat.v1 as tf\n'), ((4059, 4079), 'tensorflow.compat.v1.square', 'tf.square', (['embedding'], {}), '(embedding)\n', (4068, 4079), True, 'import tensorflow.compat.v1 as tf\n'), ((5264, 5275), 'time.time', 'time.time', ([], {}), '()\n', (5273, 5275), False, 'import time\n'), ((5596, 5607), 'time.time', 'time.time', ([], {}), '()\n', (5605, 5607), False, 'import time\n'), ((5029, 5040), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5037, 5040), True, 'import numpy as np\n')] |
import unittest
from dynd import nd, ndt
import numpy as np
from numpy.testing import *
@unittest.skip('Test disabled since callables were reworked')
class TestNumpyDTypeInterop(unittest.TestCase):
def setUp(self):
if sys.byteorder == 'little':
self.nonnative = '>'
else:
self.nonnative = '<'
def test__type_from_numpy_scalar_types(self):
# Tests converting numpy scalar types to dynd types
self.assertEqual(ndt.bool, ndt.type(np.bool))
self.assertEqual(ndt.bool, ndt.type(np.bool_))
self.assertEqual(ndt.int8, ndt.type(np.int8))
self.assertEqual(ndt.int16, ndt.type(np.int16))
self.assertEqual(ndt.int32, ndt.type(np.int32))
self.assertEqual(ndt.int64, ndt.type(np.int64))
self.assertEqual(ndt.uint8, ndt.type(np.uint8))
self.assertEqual(ndt.uint16, ndt.type(np.uint16))
self.assertEqual(ndt.uint32, ndt.type(np.uint32))
self.assertEqual(ndt.uint64, ndt.type(np.uint64))
self.assertEqual(ndt.float32, ndt.type(np.float32))
self.assertEqual(ndt.float64, ndt.type(np.float64))
self.assertEqual(ndt.complex_float32, ndt.type(np.complex64))
self.assertEqual(ndt.complex_float64, ndt.type(np.complex128))
def test__type_from_numpy_dtype(self):
# Tests converting numpy dtypes to dynd types
# native byte order
self.assertEqual(ndt.bool, ndt.type(np.dtype(np.bool)))
self.assertEqual(ndt.int8, ndt.type(np.dtype(np.int8)))
self.assertEqual(ndt.int16, ndt.type(np.dtype(np.int16)))
self.assertEqual(ndt.int32, ndt.type(np.dtype(np.int32)))
self.assertEqual(ndt.int64, ndt.type(np.dtype(np.int64)))
self.assertEqual(ndt.uint8, ndt.type(np.dtype(np.uint8)))
self.assertEqual(ndt.uint16, ndt.type(np.dtype(np.uint16)))
self.assertEqual(ndt.uint32, ndt.type(np.dtype(np.uint32)))
self.assertEqual(ndt.uint64, ndt.type(np.dtype(np.uint64)))
self.assertEqual(ndt.float32, ndt.type(np.dtype(np.float32)))
self.assertEqual(ndt.float64, ndt.type(np.dtype(np.float64)))
self.assertEqual(ndt.complex_float32, ndt.type(np.dtype(np.complex64)))
self.assertEqual(ndt.complex_float64, ndt.type(np.dtype(np.complex128)))
self.assertEqual(ndt.make_fixed_string(10, 'ascii'),
ndt.type(np.dtype('S10')))
self.assertEqual(ndt.make_fixed_string(10, 'utf_32'),
ndt.type(np.dtype('U10')))
# non-native byte order
nonnative = self.nonnative
"""
self.assertEqual(ndt.make_byteswap(ndt.int16),
ndt.type(np.dtype(nonnative + 'i2')))
self.assertEqual(ndt.make_byteswap(ndt.int32),
ndt.type(np.dtype(nonnative + 'i4')))
self.assertEqual(ndt.make_byteswap(ndt.int64),
ndt.type(np.dtype(nonnative + 'i8')))
self.assertEqual(ndt.make_byteswap(ndt.uint16),
ndt.type(np.dtype(nonnative + 'u2')))
self.assertEqual(ndt.make_byteswap(ndt.uint32),
ndt.type(np.dtype(nonnative + 'u4')))
self.assertEqual(ndt.make_byteswap(ndt.uint64),
ndt.type(np.dtype(nonnative + 'u8')))
self.assertEqual(ndt.make_byteswap(ndt.float32),
ndt.type(np.dtype(nonnative + 'f4')))
self.assertEqual(ndt.make_byteswap(ndt.float64),
ndt.type(np.dtype(nonnative + 'f8')))
self.assertEqual(ndt.make_byteswap(ndt.complex_float32),
ndt.type(np.dtype(nonnative + 'c8')))
self.assertEqual(ndt.make_byteswap(ndt.complex_float64),
ndt.type(np.dtype(nonnative + 'c16')))
"""
def test__type_from_numpy_dtype_struct(self):
# aligned struct
tp0 = ndt.type(np.dtype([('x', np.int32), ('y', np.int64)],
align=True))
tp1 = ndt.type('{x : int32, y : int64}')
self.assertEqual(tp0, tp1)
def test__type_from_h5py_special(self):
# h5py 2.3 style "special dtype"
dt = np.dtype(object, metadata={'vlen' : str})
self.assertEqual(ndt.type(dt), ndt.string)
if sys.version_info < (3, 0):
dt = np.dtype(object, metadata={'vlen' : unicode})
self.assertEqual(ndt.type(dt), ndt.string)
# h5py 2.2 style "special dtype"
dt = np.dtype(('O', [( ({'type': str},'vlen'), 'O' )] ))
self.assertEqual(ndt.type(dt), ndt.string)
if sys.version_info < (3, 0):
dt = np.dtype(('O', [( ({'type': unicode},'vlen'), 'O' )] ))
self.assertEqual(ndt.type(dt), ndt.string)
# Should be able to roundtrip dynd -> numpy -> dynd
x = nd.array(['testing', 'one', 'two'])
self.assertEqual(nd.type_of(x), ndt.type('3 * string'))
y = x.to(np.ndarray)
self.assertEqual(y.shape, (3,))
self.assertEqual(y[0], 'testing')
self.assertEqual(y[1], 'one')
self.assertEqual(y[2], 'two')
self.assertEqual(y.dtype.kind, 'O')
if sys.version_info < (3, 0):
self.assertEqual(y.dtype.metadata, {'vlen' : unicode})
else:
self.assertEqual(y.dtype.metadata, {'vlen' : str})
z = nd.array(y)
self.assertEqual(nd.type_of(z), nd.type_of(x))
self.assertEqual(nd.as_py(z), nd.as_py(x))
def test__type_as_numpy(self):
self.assertEqual(ndt.bool.as_numpy(), np.dtype('bool'))
self.assertEqual(ndt.int8.as_numpy(), np.dtype('int8'))
self.assertEqual(ndt.int16.as_numpy(), np.dtype('int16'))
self.assertEqual(ndt.int32.as_numpy(), np.dtype('int32'))
self.assertEqual(ndt.int64.as_numpy(), np.dtype('int64'))
self.assertEqual(ndt.uint8.as_numpy(), np.dtype('uint8'))
self.assertEqual(ndt.uint16.as_numpy(), np.dtype('uint16'))
self.assertEqual(ndt.uint32.as_numpy(), np.dtype('uint32'))
self.assertEqual(ndt.uint64.as_numpy(), np.dtype('uint64'))
self.assertEqual(ndt.float32.as_numpy(), np.dtype('float32'))
self.assertEqual(ndt.float64.as_numpy(), np.dtype('float64'))
self.assertEqual(ndt.complex_float32.as_numpy(), np.dtype('complex64'))
self.assertEqual(ndt.complex_float64.as_numpy(), np.dtype('complex128'))
# nonnative byte order
nonnative = self.nonnative
# self.assertEqual(ndt.make_byteswap(ndt.int16).as_numpy(),
# np.dtype(nonnative + 'i2'))
# self.assertEqual(ndt.make_byteswap(ndt.float64).as_numpy(),
# np.dtype(nonnative + 'f8'))
"""
TODO: This test fails since we changed cstruct -> struct.
# aligned struct
tp0 = ndt.type('{x : int32, y : int64}').as_numpy()
tp1 = np.dtype([('x', np.int32), ('y', np.int64)], align=True)
self.assertEqual(tp0, tp1)
# unaligned struct
tp0 = ndt.make_struct([ndt.make_unaligned(ndt.int32),
ndt.make_unaligned(ndt.int64)],
['x', 'y']).as_numpy()
tp1 = np.dtype([('x', np.int32), ('y', np.int64)])
self.assertEqual(tp0, tp1)
"""
# check some types which can't be converted
self.assertRaises(TypeError, ndt.bytes.as_numpy)
self.assertRaises(TypeError, ndt.string.as_numpy)
@unittest.skip('Test disabled since callables were reworked')
class TestNumpyViewInterop(unittest.TestCase):
def setUp(self):
if sys.byteorder == 'little':
self.nonnative = '>'
else:
self.nonnative = '<'
def test_dynd_scalar_view(self):
a = np.array(3, dtype='int64')
n = nd.view(a)
self.assertEqual(nd.type_of(n), ndt.int64)
self.assertEqual(nd.as_py(n), 3)
self.assertEqual(n.access_flags, 'readwrite')
# Ensure it's a view
n[...] = 4
self.assertEqual(a[()], 4)
def test_dynd_scalar_array(self):
a = np.array(3, dtype='int64')
n = nd.array(a)
self.assertEqual(nd.type_of(n), ndt.int64)
self.assertEqual(nd.as_py(n), 3)
self.assertEqual(n.access_flags, 'readwrite')
# Ensure it's not a view
a[...] = 4
self.assertEqual(nd.as_py(n), 3)
def test_dynd_scalar_asarray(self):
a = np.array(3, dtype='int64')
n = nd.asarray(a)
self.assertEqual(nd.type_of(n), ndt.int64)
self.assertEqual(nd.as_py(n), 3)
self.assertEqual(n.access_flags, 'readwrite')
# Ensure it's a view
n[...] = 4
self.assertEqual(a[()], 4)
def test_dynd_view_of_numpy_array(self):
# Tests viewing a numpy array as a dynd.array
nonnative = self.nonnative
a = np.arange(10, dtype=np.int32)
n = nd.view(a)
self.assertEqual(nd.dtype_of(n), ndt.int32)
self.assertEqual(nd.ndim_of(n), a.ndim)
self.assertEqual(n.shape, a.shape)
self.assertEqual(n.strides, a.strides)
"""
a = np.arange(12, dtype=(nonnative + 'i4')).reshape(3,4)
n = nd.view(a)
self.assertEqual(nd.dtype_of(n), ndt.make_byteswap(ndt.int32))
self.assertEqual(nd.ndim_of(n), a.ndim)
self.assertEqual(n.shape, a.shape)
self.assertEqual(n.strides, a.strides)
"""
"""
a = np.arange(49, dtype='i1')
a = a[1:].view(dtype=(nonnative + 'i4')).reshape(2,2,3)
n = nd.view(a)
self.assertEqual(nd.dtype_of(n),
ndt.make_unaligned(ndt.make_byteswap(ndt.int32)))
self.assertEqual(nd.ndim_of(n), a.ndim)
self.assertEqual(n.shape, a.shape)
self.assertEqual(n.strides, a.strides)
"""
"""
def test_numpy_view_of_dynd_array(self):
# Tests viewing a dynd.array as a numpy array
nonnative = self.nonnative
n = nd.range(10, dtype=ndt.int32)
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype(np.int32))
self.assertTrue(a.flags.aligned)
self.assertEqual(a.ndim, nd.ndim_of(n))
self.assertEqual(a.shape, n.shape)
self.assertEqual(a.strides, n.strides)
# Make sure it's a view
a[1] = 100
self.assertEqual(nd.as_py(n[1]), 100)
n = nd.view(np.arange(12, dtype=(nonnative + 'i4')).reshape(3,4))
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype(nonnative + 'i4'))
self.assertTrue(a.flags.aligned)
self.assertEqual(a.ndim, nd.ndim_of(n))
self.assertEqual(a.shape, n.shape)
self.assertEqual(a.strides, n.strides)
# Make sure it's a view
a[1,2] = 100
self.assertEqual(nd.as_py(n[1,2]), 100)
n = nd.view(np.arange(49, dtype='i1')[1:].view(dtype=np.int32).reshape(4,3))
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype(np.int32))
self.assertFalse(a.flags.aligned)
self.assertEqual(a.ndim, nd.ndim_of(n))
self.assertEqual(a.shape, n.shape)
self.assertEqual(a.strides, n.strides)
# Make sure it's a view
a[1,2] = 100
self.assertEqual(nd.as_py(n[1,2]), 100)
n = nd.view(np.arange(49, dtype='i1')[1:].view(
dtype=(nonnative + 'i4')).reshape(2,2,3))
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype(nonnative + 'i4'))
self.assertFalse(a.flags.aligned)
self.assertEqual(a.ndim, nd.ndim_of(n))
self.assertEqual(a.shape, n.shape)
self.assertEqual(a.strides, n.strides)
# Make sure it's a view
a[1,1,1] = 100
self.assertEqual(nd.as_py(n[1,1,1]), 100)
"""
def test_numpy_view_of_noncontig_dynd_array(self):
n = nd.range(10)[1::3]
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype('i4'))
self.assertFalse(a.flags.c_contiguous)
self.assertEqual(a.ndim, nd.ndim_of(n))
self.assertEqual(a.shape, n.shape)
self.assertEqual(a.strides, n.strides)
# Make sure it's a view as needed
a[1] = 100
self.assertEqual(nd.as_py(n[1]), 100)
def test_numpy_view_of_dynd_struct(self):
n = nd.array([(1, 2), (3, 4)], type='2 * {a: int32, b: float64}')
a = np.asarray(n)
self.assertEqual(a.dtype, np.dtype({'names':['a','b'],
'formats':['i4','f8'],
'offsets':[0,8], 'itemsize':16}))
assert_array_equal(a['a'], [1, 3])
assert_array_equal(a['b'], [2, 4])
# If the memory of the struct is out of order, PEP 3118 doesn't work
n = n[:, ::-1]
if sys.version_info >= (2, 7):
self.assertRaises(BufferError, lambda: memoryview(n))
def test_numpy_dynd_fixed_string_interop(self):
# Tests converting fixed-size string arrays to/from numpy
# ASCII Numpy -> dynd
a = np.array(['abc', 'testing', 'array'])
b = nd.view(a)
if sys.version_info >= (3, 0):
self.assertEqual(ndt.make_fixed_string(7, 'utf_32'), nd.dtype_of(b))
else:
self.assertEqual(ndt.make_fixed_string(7, 'ascii'), nd.dtype_of(b))
self.assertEqual(nd.dtype_of(b), ndt.type(a.dtype))
# Make sure it's ascii
a = a.astype('S7')
b = nd.view(a)
# ASCII dynd -> Numpy
c = np.asarray(b)
self.assertEqual(a.dtype, c.dtype)
assert_array_equal(a, c)
# verify 'a' and 'c' are looking at the same data
a[1] = 'modify'
assert_array_equal(a, c)
# ASCII dynd -> UTF32 dynd
b_u = b.cast(ndt.make_fixed_dim(3, ndt.make_fixed_string(7, 'utf_32')))
# Evaluate to its value array
b_u = b_u
self.assertEqual(
ndt.make_fixed_string(7, 'utf_32'),
nd.dtype_of(b_u))
# UTF32 dynd -> Numpy
c_u = np.asarray(b_u)
self.assertEqual(nd.dtype_of(b_u), ndt.type(c_u.dtype))
assert_array_equal(a.astype('U'), c_u)
# 'a' and 'c_u' are not looking at the same data
a[1] = 'diff'
self.assertFalse(np.all(a == c_u))
def test_numpy_blockref_string(self):
# Blockref strings don't have a corresponding Numpy construct
# Therefore numpy makes an object array scalar out of them.
a = nd.array("abcdef")
self.assertEqual(nd.dtype_of(a), ndt.string)
# Some versions of NumPy produce an error instead,
# so this assertion is removed
#self.assertEqual(np.asarray(a).dtype, np.dtype(object))
a = nd.array(u"abcdef \uc548\ub155")
self.assertEqual(nd.dtype_of(a), ndt.string)
# Some versions of NumPy produce an error instead,
# so this assertion is removed
#self.assertEqual(np.asarray(a).dtype, np.dtype(object))
def test_readwrite_access_flags(self):
def assign_to(x, y):
x[0] = y
# Tests that read/write access control is preserved to/from numpy
a = np.arange(10.)
# Writeable
b = nd.view(a)
b[0] = 2.0
self.assertEqual(nd.as_py(b[0]), 2.0)
self.assertEqual(a[0], 2.0)
# should still be 2.0
self.assertEqual(nd.as_py(b[0]), 2.0)
self.assertEqual(a[0], 2.0)
# Not writeable
a.flags.writeable = False
b = nd.view(a)
# self.assertRaises(RuntimeError, assign_to, b, 3.0)
# should still be 2.0
self.assertEqual(nd.as_py(b[0]), 2.0)
self.assertEqual(a[0], 2.0)
class TestAsNumpy(unittest.TestCase):
def test_struct_as_numpy(self):
# Aligned struct
a = nd.array([[1, 2], [3, 4]], type='2 * {x : int32, y: int64}')
b = a.to(np.ndarray)
self.assertEqual(b.dtype,
np.dtype([('x', np.int32), ('y', np.int64)], align=True))
self.assertEqual(nd.as_py(a.x), b['x'].tolist())
self.assertEqual(nd.as_py(a.y), b['y'].tolist())
# Unaligned struct
# a = nd.array([[1, 2], [3, 4]],
# type='2 * {x : unaligned[int32], y: unaligned[int64]}')
# b = nd.as_numpy(a)
# self.assertEqual(b.dtype, np.dtype([('x', np.int32), ('y', np.int64)]))
# self.assertEqual(nd.as_py(a.x), b['x'].tolist())
# self.assertEqual(nd.as_py(a.y), b['y'].tolist())
def test_struct_via_pep3118(self):
# Aligned struct
a = nd.array([[1, 2], [3, 4]], type='2 * {x : int32, y: int64}')
b = np.asarray(a)
self.assertEqual(b.dtype,
np.dtype([('x', np.int32), ('y', np.int64)], align=True))
self.assertEqual(nd.as_py(a.x), b['x'].tolist())
self.assertEqual(nd.as_py(a.y), b['y'].tolist())
def test_fixed_dim(self):
a = nd.array([1, 3, 5], type='3 * int32')
b = a.to(np.ndarray)
self.assertEqual(b.dtype, np.dtype('int32'))
self.assertEqual(b.tolist(), [1, 3, 5])
def test_fixed_dim_via_pep3118(self):
a = nd.array([1, 3, 5], type='3 * int32')
b = np.asarray(a)
self.assertEqual(b.dtype, np.dtype('int32'))
self.assertEqual(b.tolist(), [1, 3, 5])
@unittest.skip('Test disabled since callables were reworked')
class TestNumpyScalarInterop(unittest.TestCase):
def test_numpy_scalar_conversion_dtypes(self):
self.assertEqual(nd.dtype_of(nd.array(np.bool_(True))), ndt.bool)
self.assertEqual(nd.dtype_of(nd.array(np.bool(True))), ndt.bool)
self.assertEqual(nd.dtype_of(nd.array(np.int8(100))), ndt.int8)
self.assertEqual(nd.dtype_of(nd.array(np.int16(100))), ndt.int16)
self.assertEqual(nd.dtype_of(nd.array(np.int32(100))), ndt.int32)
self.assertEqual(nd.dtype_of(nd.array(np.int64(100))), ndt.int64)
self.assertEqual(nd.dtype_of(nd.array(np.uint8(100))), ndt.uint8)
self.assertEqual(nd.dtype_of(nd.array(np.uint16(100))), ndt.uint16)
self.assertEqual(nd.dtype_of(nd.array(np.uint32(100))), ndt.uint32)
self.assertEqual(nd.dtype_of(nd.array(np.uint64(100))), ndt.uint64)
self.assertEqual(nd.dtype_of(nd.array(np.float32(100.))), ndt.float32)
self.assertEqual(nd.dtype_of(nd.array(np.float64(100.))), ndt.float64)
self.assertEqual(nd.dtype_of(nd.array(np.complex64(100j))),
ndt.complex_float32)
self.assertEqual(nd.dtype_of(nd.array(np.complex128(100j))),
ndt.complex_float64)
# if np.__version__ >= '1.7':
# self.assertEqual(nd.dtype_of(nd.array(np.datetime64('2000-12-13'))),
# ndt.date)
# self.assertEqual(nd.dtype_of(nd.array(np.datetime64('2000-12-13T12:30'))),
# ndt.type('datetime[tz="UTC"]'))
def test_numpy_scalar_conversion_values(self):
self.assertEqual(nd.as_py(nd.array(np.bool_(True))), True)
self.assertEqual(nd.as_py(nd.array(np.bool_(False))), False)
self.assertEqual(nd.as_py(nd.array(np.int8(100))), 100)
self.assertEqual(nd.as_py(nd.array(np.int8(-100))), -100)
self.assertEqual(nd.as_py(nd.array(np.int16(20000))), 20000)
self.assertEqual(nd.as_py(nd.array(np.int16(-20000))), -20000)
self.assertEqual(nd.as_py(nd.array(np.int32(1000000000))), 1000000000)
self.assertEqual(nd.as_py(nd.array(np.int64(-1000000000000))),
-1000000000000)
self.assertEqual(nd.as_py(nd.array(np.int64(1000000000000))),
1000000000000)
self.assertEqual(nd.as_py(nd.array(np.int32(-1000000000))),
-1000000000)
self.assertEqual(nd.as_py(nd.array(np.uint8(200))), 200)
self.assertEqual(nd.as_py(nd.array(np.uint16(50000))), 50000)
self.assertEqual(nd.as_py(nd.array(np.uint32(3000000000))), 3000000000)
self.assertEqual(nd.as_py(nd.array(np.uint64(10000000000000000000))),
10000000000000000000)
self.assertEqual(nd.as_py(nd.array(np.float32(2.5))), 2.5)
self.assertEqual(nd.as_py(nd.array(np.float64(2.5))), 2.5)
self.assertEqual(nd.as_py(nd.array(np.complex64(2.5-1j))), 2.5-1j)
self.assertEqual(nd.as_py(nd.array(np.complex128(2.5-1j))), 2.5-1j)
# if np.__version__ >= '1.7':
# # Various date units
# self.assertEqual(nd.as_py(nd.array(np.datetime64('2000'))),
# date(2000, 1, 1))
# self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12'))),
# date(2000, 12, 1))
# self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13'))),
# date(2000, 12, 13))
# # Various datetime units
# self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13T12Z'))),
# datetime(2000, 12, 13, 12, 0))
# self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13T12:30Z'))),
# datetime(2000, 12, 13, 12, 30))
# self.assertEqual(nd.as_py(nd.array(np.datetime64('1823-12-13T12:30Z'))),
# datetime(1823, 12, 13, 12, 30))
# self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13T12:30:24Z'))),
# datetime(2000, 12, 13, 12, 30, 24))
# self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13T12:30:24.123Z'))),
# datetime(2000, 12, 13, 12, 30, 24, 123000))
# self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13T12:30:24.123456Z'))),
# datetime(2000, 12, 13, 12, 30, 24, 123456))
# self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13T12:30:24.123456124Z'))),
# datetime(2000, 12, 13, 12, 30, 24, 123456))
# self.assertEqual(str(nd.array(np.datetime64('2000-12-13T12:30:24.123456124Z'))),
# '2000-12-13T12:30:24.1234561Z')
# self.assertEqual(str(nd.array(np.datetime64('1842-12-13T12:30:24.123456124Z'))),
# '1842-12-13T12:30:24.1234561Z')
"""
TODO: This test fails since we changed cstruct -> struct.
def test_numpy_struct_scalar(self):
# Create a NumPy struct scalar object, by indexing into
# a structured array
a = np.array([(10, 11, 12)], dtype='i4,i8,f8')[0]
aligned_tp = ndt.type('{f0: int32, f1: int64, f2: float64}')
val = {'f0': 10, 'f1': 11, 'f2': 12}
# Construct using nd.array
b = nd.array(a)
self.assertEqual(nd.type_of(b), aligned_tp)
self.assertEqual(nd.as_py(b), val)
self.assertEqual(b.access_flags, 'readwrite')
b = nd.array(a, access='rw')
self.assertEqual(nd.type_of(b), aligned_tp)
self.assertEqual(nd.as_py(b), val)
self.assertEqual(b.access_flags, 'readwrite')
# Construct using nd.asarray
b = nd.asarray(a)
self.assertEqual(nd.type_of(b), aligned_tp)
self.assertEqual(nd.as_py(b), val)
self.assertEqual(b.access_flags, 'readwrite')
b = nd.asarray(a, access='rw')
self.assertEqual(nd.type_of(b), aligned_tp)
self.assertEqual(nd.as_py(b), val)
self.assertEqual(b.access_flags, 'readwrite')
# nd.view should fail
self.assertRaises(RuntimeError, nd.view, a)
"""
def test_var_dim_conversion(self):
# A simple instantiated var_dim array should be
# viewable with numpy without changes
a = nd.array([1, 2, 3, 4, 5], type='var * int32')
b = a.to(np.ndarray)
self.assertTrue(isinstance(b, np.ndarray))
self.assertEqual(b.dtype, np.dtype('int32'))
# Use the NumPy assertions which support arrays
assert_equal(b, [1, 2, 3, 4, 5])
"""
def test_date_from_numpy(self):
a = np.array(['2000-12-13', '1995-05-02'], dtype='M8[D]')
b = nd.array(a)
self.assertEqual(nd.type_of(b), ndt.type('2 * date'))
self.assertEqual(nd.as_py(b), [date(2000, 12, 13), date(1995, 5, 2)])
def test_date_as_numpy(self):
a = nd.array([date(2000, 12, 13), date(1995, 5, 2)])
b = nd.as_numpy(a, allow_copy=True)
assert_equal(b, np.array(['2000-12-13', '1995-05-02'], dtype='M8[D]'))
def test_datetime_from_numpy(self):
# NumPy hours unit
a = np.array(['2000-12-13T12Z', '1955-05-02T02Z'],
dtype='M8[h]')
b = nd.array(a)
self.assertEqual(nd.type_of(b), ndt.type('2 * datetime[tz="UTC"]'))
self.assertEqual(nd.as_py(b), [datetime(2000, 12, 13, 12),
datetime(1955, 5, 2, 2)])
# NumPy minutes unit
a = np.array(['2000-12-13T12:30Z', '1955-05-02T02:23Z'],
dtype='M8[m]')
b = nd.array(a)
self.assertEqual(nd.type_of(b), ndt.type('2 * datetime[tz="UTC"]'))
self.assertEqual(nd.as_py(b), [datetime(2000, 12, 13, 12, 30),
datetime(1955, 5, 2, 2, 23)])
# NumPy seconds unit
a = np.array(['2000-12-13T12:30:51Z', '1955-05-02T02:23:29Z'],
dtype='M8[s]')
b = nd.array(a)
self.assertEqual(nd.type_of(b), ndt.type('2 * datetime[tz="UTC"]'))
self.assertEqual(nd.as_py(b), [datetime(2000, 12, 13, 12, 30, 51),
datetime(1955, 5, 2, 2, 23, 29)])
# NumPy milliseconds unit
a = np.array(['2000-12-13T12:30:51.123Z', '1955-05-02T02:23:29.456Z'],
dtype='M8[ms]')
b = nd.array(a)
self.assertEqual(nd.type_of(b), ndt.type('2 * datetime[tz="UTC"]'))
self.assertEqual(nd.as_py(b), [datetime(2000, 12, 13, 12, 30, 51, 123000),
datetime(1955, 5, 2, 2, 23, 29, 456000)])
# NumPy microseconds unit
a = np.array(['2000-12-13T12:30:51.123456Z', '1955-05-02T02:23:29.456123Z'],
dtype='M8[us]')
b = nd.array(a)
self.assertEqual(nd.type_of(b), ndt.type('2 * datetime[tz="UTC"]'))
self.assertEqual(nd.as_py(b), [datetime(2000, 12, 13, 12, 30, 51, 123456),
datetime(1955, 5, 2, 2, 23, 29, 456123)])
# NumPy nanoseconds unit (truncated to 100 nanosecond ticks)
a = np.array(['2000-12-13T12:30:51.123456987Z',
'1955-05-02T02:23:29.456123798Z'],
dtype='M8[ns]')
b = nd.array(a)
self.assertEqual(nd.type_of(b),
ndt.type('2 * datetime[tz="UTC"]'))
self.assertEqual([str(x) for x in b], ["2000-12-13T12:30:51.1234569Z",
"1955-05-02T02:23:29.4561237Z"])
def test_misaligned_datetime_from_numpy(self):
a = np.array([(1, "2000-12-25T00:00:01Z"),
(2, "2001-12-25T00:00:01Z"),
(3, "2002-12-25T00:00:01Z")],
dtype=[('id', 'int8'), ('ts', 'M8[us]')])
b = nd.view(a)
self.assertEqual(nd.type_of(b),
ndt.type("3 * {id : int8, ts: adapt[(unaligned[int64]) -> datetime[tz='UTC'], 'microseconds since 1970']}"))
self.assertEqual(nd.as_py(b),
[{'id': 1, 'ts': datetime(2000, 12, 25, 0, 0, 1)},
{'id': 2, 'ts': datetime(2001, 12, 25, 0, 0, 1)},
{'id': 3, 'ts': datetime(2002, 12, 25, 0, 0, 1)}])
def test_datetime_as_numpy(self):
a = nd.array(['2000-12-13T12:30',
'1995-05-02T2:15:33'],
dtype='datetime[tz="UTC"]')
b = nd.as_numpy(a, allow_copy=True)
assert_equal(b, np.array(['2000-12-13T12:30Z', '1995-05-02T02:15:33Z'],
dtype='M8[us]'))
"""
def test_string_as_numpy(self):
a = nd.array(["this", "is", "a", "test of varlen strings"])
b = a.to(np.ndarray)
self.assertEqual(b.dtype, np.dtype('O'))
assert_equal(b, np.array(["this", "is", "a", "test of varlen strings"],
dtype='O'))
# Also in a struct
a = nd.array([(1, "testing", 1.5), (10, "abc", 2)],
type="2 * {x: int, y: string, z: real}")
b = a.to(np.ndarray)
self.assertEqual(b.dtype, np.dtype([('x', 'int32'),
('y', 'O'),
('z', 'float64')], align=True))
self.assertEqual(b.tolist(), [(1, "testing", 1.5), (10, "abc", 2)])
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"numpy.bool_",
"dynd.ndt.float32.as_numpy",
"dynd.ndt.complex_float32.as_numpy",
"numpy.uint32",
"numpy.uint64",
"dynd.ndt.float64.as_numpy",
"dynd.nd.asarray",
"numpy.arange",
"dynd.nd.type_of",
"dynd.ndt.type",
"numpy.float64",
"numpy.complex64",
"numpy.int8",
"unittest.main",
"dynd.nd... | [((90, 150), 'unittest.skip', 'unittest.skip', (['"""Test disabled since callables were reworked"""'], {}), "('Test disabled since callables were reworked')\n", (103, 150), False, 'import unittest\n'), ((7349, 7409), 'unittest.skip', 'unittest.skip', (['"""Test disabled since callables were reworked"""'], {}), "('Test disabled since callables were reworked')\n", (7362, 7409), False, 'import unittest\n'), ((17164, 17224), 'unittest.skip', 'unittest.skip', (['"""Test disabled since callables were reworked"""'], {}), "('Test disabled since callables were reworked')\n", (17177, 17224), False, 'import unittest\n'), ((28673, 28699), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (28686, 28699), False, 'import unittest\n'), ((3921, 3955), 'dynd.ndt.type', 'ndt.type', (['"""{x : int32, y : int64}"""'], {}), "('{x : int32, y : int64}')\n", (3929, 3955), False, 'from dynd import nd, ndt\n'), ((4090, 4130), 'numpy.dtype', 'np.dtype', (['object'], {'metadata': "{'vlen': str}"}), "(object, metadata={'vlen': str})\n", (4098, 4130), True, 'import numpy as np\n'), ((4393, 4442), 'numpy.dtype', 'np.dtype', (["('O', [(({'type': str}, 'vlen'), 'O')])"], {}), "(('O', [(({'type': str}, 'vlen'), 'O')]))\n", (4401, 4442), True, 'import numpy as np\n'), ((4734, 4769), 'dynd.nd.array', 'nd.array', (["['testing', 'one', 'two']"], {}), "(['testing', 'one', 'two'])\n", (4742, 4769), False, 'from dynd import nd, ndt\n'), ((5259, 5270), 'dynd.nd.array', 'nd.array', (['y'], {}), '(y)\n', (5267, 5270), False, 'from dynd import nd, ndt\n'), ((7646, 7672), 'numpy.array', 'np.array', (['(3)'], {'dtype': '"""int64"""'}), "(3, dtype='int64')\n", (7654, 7672), True, 'import numpy as np\n'), ((7685, 7695), 'dynd.nd.view', 'nd.view', (['a'], {}), '(a)\n', (7692, 7695), False, 'from dynd import nd, ndt\n'), ((7976, 8002), 'numpy.array', 'np.array', (['(3)'], {'dtype': '"""int64"""'}), "(3, dtype='int64')\n", (7984, 8002), True, 'import numpy as np\n'), ((8015, 8026), 'dynd.nd.array', 'nd.array', (['a'], {}), '(a)\n', (8023, 8026), False, 'from dynd import nd, ndt\n'), ((8319, 8345), 'numpy.array', 'np.array', (['(3)'], {'dtype': '"""int64"""'}), "(3, dtype='int64')\n", (8327, 8345), True, 'import numpy as np\n'), ((8358, 8371), 'dynd.nd.asarray', 'nd.asarray', (['a'], {}), '(a)\n', (8368, 8371), False, 'from dynd import nd, ndt\n'), ((8749, 8778), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.int32'}), '(10, dtype=np.int32)\n', (8758, 8778), True, 'import numpy as np\n'), ((8791, 8801), 'dynd.nd.view', 'nd.view', (['a'], {}), '(a)\n', (8798, 8801), False, 'from dynd import nd, ndt\n'), ((11740, 11753), 'numpy.asarray', 'np.asarray', (['n'], {}), '(n)\n', (11750, 11753), True, 'import numpy as np\n'), ((12155, 12216), 'dynd.nd.array', 'nd.array', (['[(1, 2), (3, 4)]'], {'type': '"""2 * {a: int32, b: float64}"""'}), "([(1, 2), (3, 4)], type='2 * {a: int32, b: float64}')\n", (12163, 12216), False, 'from dynd import nd, ndt\n'), ((12229, 12242), 'numpy.asarray', 'np.asarray', (['n'], {}), '(n)\n', (12239, 12242), True, 'import numpy as np\n'), ((12904, 12941), 'numpy.array', 'np.array', (["['abc', 'testing', 'array']"], {}), "(['abc', 'testing', 'array'])\n", (12912, 12941), True, 'import numpy as np\n'), ((12954, 12964), 'dynd.nd.view', 'nd.view', (['a'], {}), '(a)\n', (12961, 12964), False, 'from dynd import nd, ndt\n'), ((13310, 13320), 'dynd.nd.view', 'nd.view', (['a'], {}), '(a)\n', (13317, 13320), False, 'from dynd import nd, ndt\n'), ((13364, 13377), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (13374, 13377), True, 'import numpy as np\n'), ((13898, 13913), 'numpy.asarray', 'np.asarray', (['b_u'], {}), '(b_u)\n', (13908, 13913), True, 'import numpy as np\n'), ((14340, 14358), 'dynd.nd.array', 'nd.array', (['"""abcdef"""'], {}), "('abcdef')\n", (14348, 14358), False, 'from dynd import nd, ndt\n'), ((14588, 14610), 'dynd.nd.array', 'nd.array', (['u"""abcdef 안녕"""'], {}), "(u'abcdef 안녕')\n", (14596, 14610), False, 'from dynd import nd, ndt\n'), ((15017, 15032), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (15026, 15032), True, 'import numpy as np\n'), ((15065, 15075), 'dynd.nd.view', 'nd.view', (['a'], {}), '(a)\n', (15072, 15075), False, 'from dynd import nd, ndt\n'), ((15361, 15371), 'dynd.nd.view', 'nd.view', (['a'], {}), '(a)\n', (15368, 15371), False, 'from dynd import nd, ndt\n'), ((15656, 15716), 'dynd.nd.array', 'nd.array', (['[[1, 2], [3, 4]]'], {'type': '"""2 * {x : int32, y: int64}"""'}), "([[1, 2], [3, 4]], type='2 * {x : int32, y: int64}')\n", (15664, 15716), False, 'from dynd import nd, ndt\n'), ((16418, 16478), 'dynd.nd.array', 'nd.array', (['[[1, 2], [3, 4]]'], {'type': '"""2 * {x : int32, y: int64}"""'}), "([[1, 2], [3, 4]], type='2 * {x : int32, y: int64}')\n", (16426, 16478), False, 'from dynd import nd, ndt\n'), ((16491, 16504), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (16501, 16504), True, 'import numpy as np\n'), ((16774, 16811), 'dynd.nd.array', 'nd.array', (['[1, 3, 5]'], {'type': '"""3 * int32"""'}), "([1, 3, 5], type='3 * int32')\n", (16782, 16811), False, 'from dynd import nd, ndt\n'), ((16997, 17034), 'dynd.nd.array', 'nd.array', (['[1, 3, 5]'], {'type': '"""3 * int32"""'}), "([1, 3, 5], type='3 * int32')\n", (17005, 17034), False, 'from dynd import nd, ndt\n'), ((17047, 17060), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (17057, 17060), True, 'import numpy as np\n'), ((23573, 23618), 'dynd.nd.array', 'nd.array', (['[1, 2, 3, 4, 5]'], {'type': '"""var * int32"""'}), "([1, 2, 3, 4, 5], type='var * int32')\n", (23581, 23618), False, 'from dynd import nd, ndt\n'), ((27935, 27990), 'dynd.nd.array', 'nd.array', (["['this', 'is', 'a', 'test of varlen strings']"], {}), "(['this', 'is', 'a', 'test of varlen strings'])\n", (27943, 27990), False, 'from dynd import nd, ndt\n'), ((28233, 28326), 'dynd.nd.array', 'nd.array', (["[(1, 'testing', 1.5), (10, 'abc', 2)]"], {'type': '"""2 * {x: int, y: string, z: real}"""'}), "([(1, 'testing', 1.5), (10, 'abc', 2)], type=\n '2 * {x: int, y: string, z: real}')\n", (28241, 28326), False, 'from dynd import nd, ndt\n'), ((484, 501), 'dynd.ndt.type', 'ndt.type', (['np.bool'], {}), '(np.bool)\n', (492, 501), False, 'from dynd import nd, ndt\n'), ((538, 556), 'dynd.ndt.type', 'ndt.type', (['np.bool_'], {}), '(np.bool_)\n', (546, 556), False, 'from dynd import nd, ndt\n'), ((593, 610), 'dynd.ndt.type', 'ndt.type', (['np.int8'], {}), '(np.int8)\n', (601, 610), False, 'from dynd import nd, ndt\n'), ((648, 666), 'dynd.ndt.type', 'ndt.type', (['np.int16'], {}), '(np.int16)\n', (656, 666), False, 'from dynd import nd, ndt\n'), ((704, 722), 'dynd.ndt.type', 'ndt.type', (['np.int32'], {}), '(np.int32)\n', (712, 722), False, 'from dynd import nd, ndt\n'), ((760, 778), 'dynd.ndt.type', 'ndt.type', (['np.int64'], {}), '(np.int64)\n', (768, 778), False, 'from dynd import nd, ndt\n'), ((816, 834), 'dynd.ndt.type', 'ndt.type', (['np.uint8'], {}), '(np.uint8)\n', (824, 834), False, 'from dynd import nd, ndt\n'), ((873, 892), 'dynd.ndt.type', 'ndt.type', (['np.uint16'], {}), '(np.uint16)\n', (881, 892), False, 'from dynd import nd, ndt\n'), ((931, 950), 'dynd.ndt.type', 'ndt.type', (['np.uint32'], {}), '(np.uint32)\n', (939, 950), False, 'from dynd import nd, ndt\n'), ((989, 1008), 'dynd.ndt.type', 'ndt.type', (['np.uint64'], {}), '(np.uint64)\n', (997, 1008), False, 'from dynd import nd, ndt\n'), ((1048, 1068), 'dynd.ndt.type', 'ndt.type', (['np.float32'], {}), '(np.float32)\n', (1056, 1068), False, 'from dynd import nd, ndt\n'), ((1108, 1128), 'dynd.ndt.type', 'ndt.type', (['np.float64'], {}), '(np.float64)\n', (1116, 1128), False, 'from dynd import nd, ndt\n'), ((1176, 1198), 'dynd.ndt.type', 'ndt.type', (['np.complex64'], {}), '(np.complex64)\n', (1184, 1198), False, 'from dynd import nd, ndt\n'), ((1246, 1269), 'dynd.ndt.type', 'ndt.type', (['np.complex128'], {}), '(np.complex128)\n', (1254, 1269), False, 'from dynd import nd, ndt\n'), ((2319, 2353), 'dynd.ndt.make_fixed_string', 'ndt.make_fixed_string', (['(10)', '"""ascii"""'], {}), "(10, 'ascii')\n", (2340, 2353), False, 'from dynd import nd, ndt\n'), ((2427, 2462), 'dynd.ndt.make_fixed_string', 'ndt.make_fixed_string', (['(10)', '"""utf_32"""'], {}), "(10, 'utf_32')\n", (2448, 2462), False, 'from dynd import nd, ndt\n'), ((3821, 3877), 'numpy.dtype', 'np.dtype', (["[('x', np.int32), ('y', np.int64)]"], {'align': '(True)'}), "([('x', np.int32), ('y', np.int64)], align=True)\n", (3829, 3877), True, 'import numpy as np\n'), ((4157, 4169), 'dynd.ndt.type', 'ndt.type', (['dt'], {}), '(dt)\n', (4165, 4169), False, 'from dynd import nd, ndt\n'), ((4238, 4282), 'numpy.dtype', 'np.dtype', (['object'], {'metadata': "{'vlen': unicode}"}), "(object, metadata={'vlen': unicode})\n", (4246, 4282), True, 'import numpy as np\n'), ((4470, 4482), 'dynd.ndt.type', 'ndt.type', (['dt'], {}), '(dt)\n', (4478, 4482), False, 'from dynd import nd, ndt\n'), ((4551, 4604), 'numpy.dtype', 'np.dtype', (["('O', [(({'type': unicode}, 'vlen'), 'O')])"], {}), "(('O', [(({'type': unicode}, 'vlen'), 'O')]))\n", (4559, 4604), True, 'import numpy as np\n'), ((4795, 4808), 'dynd.nd.type_of', 'nd.type_of', (['x'], {}), '(x)\n', (4805, 4808), False, 'from dynd import nd, ndt\n'), ((4810, 4832), 'dynd.ndt.type', 'ndt.type', (['"""3 * string"""'], {}), "('3 * string')\n", (4818, 4832), False, 'from dynd import nd, ndt\n'), ((5296, 5309), 'dynd.nd.type_of', 'nd.type_of', (['z'], {}), '(z)\n', (5306, 5309), False, 'from dynd import nd, ndt\n'), ((5311, 5324), 'dynd.nd.type_of', 'nd.type_of', (['x'], {}), '(x)\n', (5321, 5324), False, 'from dynd import nd, ndt\n'), ((5351, 5362), 'dynd.nd.as_py', 'nd.as_py', (['z'], {}), '(z)\n', (5359, 5362), False, 'from dynd import nd, ndt\n'), ((5364, 5375), 'dynd.nd.as_py', 'nd.as_py', (['x'], {}), '(x)\n', (5372, 5375), False, 'from dynd import nd, ndt\n'), ((5438, 5457), 'dynd.ndt.bool.as_numpy', 'ndt.bool.as_numpy', ([], {}), '()\n', (5455, 5457), False, 'from dynd import nd, ndt\n'), ((5459, 5475), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (5467, 5475), True, 'import numpy as np\n'), ((5502, 5521), 'dynd.ndt.int8.as_numpy', 'ndt.int8.as_numpy', ([], {}), '()\n', (5519, 5521), False, 'from dynd import nd, ndt\n'), ((5523, 5539), 'numpy.dtype', 'np.dtype', (['"""int8"""'], {}), "('int8')\n", (5531, 5539), True, 'import numpy as np\n'), ((5566, 5586), 'dynd.ndt.int16.as_numpy', 'ndt.int16.as_numpy', ([], {}), '()\n', (5584, 5586), False, 'from dynd import nd, ndt\n'), ((5588, 5605), 'numpy.dtype', 'np.dtype', (['"""int16"""'], {}), "('int16')\n", (5596, 5605), True, 'import numpy as np\n'), ((5632, 5652), 'dynd.ndt.int32.as_numpy', 'ndt.int32.as_numpy', ([], {}), '()\n', (5650, 5652), False, 'from dynd import nd, ndt\n'), ((5654, 5671), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (5662, 5671), True, 'import numpy as np\n'), ((5698, 5718), 'dynd.ndt.int64.as_numpy', 'ndt.int64.as_numpy', ([], {}), '()\n', (5716, 5718), False, 'from dynd import nd, ndt\n'), ((5720, 5737), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (5728, 5737), True, 'import numpy as np\n'), ((5764, 5784), 'dynd.ndt.uint8.as_numpy', 'ndt.uint8.as_numpy', ([], {}), '()\n', (5782, 5784), False, 'from dynd import nd, ndt\n'), ((5786, 5803), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (5794, 5803), True, 'import numpy as np\n'), ((5830, 5851), 'dynd.ndt.uint16.as_numpy', 'ndt.uint16.as_numpy', ([], {}), '()\n', (5849, 5851), False, 'from dynd import nd, ndt\n'), ((5853, 5871), 'numpy.dtype', 'np.dtype', (['"""uint16"""'], {}), "('uint16')\n", (5861, 5871), True, 'import numpy as np\n'), ((5898, 5919), 'dynd.ndt.uint32.as_numpy', 'ndt.uint32.as_numpy', ([], {}), '()\n', (5917, 5919), False, 'from dynd import nd, ndt\n'), ((5921, 5939), 'numpy.dtype', 'np.dtype', (['"""uint32"""'], {}), "('uint32')\n", (5929, 5939), True, 'import numpy as np\n'), ((5966, 5987), 'dynd.ndt.uint64.as_numpy', 'ndt.uint64.as_numpy', ([], {}), '()\n', (5985, 5987), False, 'from dynd import nd, ndt\n'), ((5989, 6007), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (5997, 6007), True, 'import numpy as np\n'), ((6034, 6056), 'dynd.ndt.float32.as_numpy', 'ndt.float32.as_numpy', ([], {}), '()\n', (6054, 6056), False, 'from dynd import nd, ndt\n'), ((6058, 6077), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (6066, 6077), True, 'import numpy as np\n'), ((6104, 6126), 'dynd.ndt.float64.as_numpy', 'ndt.float64.as_numpy', ([], {}), '()\n', (6124, 6126), False, 'from dynd import nd, ndt\n'), ((6128, 6147), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (6136, 6147), True, 'import numpy as np\n'), ((6174, 6204), 'dynd.ndt.complex_float32.as_numpy', 'ndt.complex_float32.as_numpy', ([], {}), '()\n', (6202, 6204), False, 'from dynd import nd, ndt\n'), ((6206, 6227), 'numpy.dtype', 'np.dtype', (['"""complex64"""'], {}), "('complex64')\n", (6214, 6227), True, 'import numpy as np\n'), ((6254, 6284), 'dynd.ndt.complex_float64.as_numpy', 'ndt.complex_float64.as_numpy', ([], {}), '()\n', (6282, 6284), False, 'from dynd import nd, ndt\n'), ((6286, 6308), 'numpy.dtype', 'np.dtype', (['"""complex128"""'], {}), "('complex128')\n", (6294, 6308), True, 'import numpy as np\n'), ((7721, 7734), 'dynd.nd.type_of', 'nd.type_of', (['n'], {}), '(n)\n', (7731, 7734), False, 'from dynd import nd, ndt\n'), ((7772, 7783), 'dynd.nd.as_py', 'nd.as_py', (['n'], {}), '(n)\n', (7780, 7783), False, 'from dynd import nd, ndt\n'), ((8052, 8065), 'dynd.nd.type_of', 'nd.type_of', (['n'], {}), '(n)\n', (8062, 8065), False, 'from dynd import nd, ndt\n'), ((8103, 8114), 'dynd.nd.as_py', 'nd.as_py', (['n'], {}), '(n)\n', (8111, 8114), False, 'from dynd import nd, ndt\n'), ((8250, 8261), 'dynd.nd.as_py', 'nd.as_py', (['n'], {}), '(n)\n', (8258, 8261), False, 'from dynd import nd, ndt\n'), ((8397, 8410), 'dynd.nd.type_of', 'nd.type_of', (['n'], {}), '(n)\n', (8407, 8410), False, 'from dynd import nd, ndt\n'), ((8448, 8459), 'dynd.nd.as_py', 'nd.as_py', (['n'], {}), '(n)\n', (8456, 8459), False, 'from dynd import nd, ndt\n'), ((8827, 8841), 'dynd.nd.dtype_of', 'nd.dtype_of', (['n'], {}), '(n)\n', (8838, 8841), False, 'from dynd import nd, ndt\n'), ((8879, 8892), 'dynd.nd.ndim_of', 'nd.ndim_of', (['n'], {}), '(n)\n', (8889, 8892), False, 'from dynd import nd, ndt\n'), ((11709, 11721), 'dynd.nd.range', 'nd.range', (['(10)'], {}), '(10)\n', (11717, 11721), False, 'from dynd import nd, ndt\n'), ((11788, 11802), 'numpy.dtype', 'np.dtype', (['"""i4"""'], {}), "('i4')\n", (11796, 11802), True, 'import numpy as np\n'), ((11884, 11897), 'dynd.nd.ndim_of', 'nd.ndim_of', (['n'], {}), '(n)\n', (11894, 11897), False, 'from dynd import nd, ndt\n'), ((12075, 12089), 'dynd.nd.as_py', 'nd.as_py', (['n[1]'], {}), '(n[1])\n', (12083, 12089), False, 'from dynd import nd, ndt\n'), ((12277, 12372), 'numpy.dtype', 'np.dtype', (["{'names': ['a', 'b'], 'formats': ['i4', 'f8'], 'offsets': [0, 8],\n 'itemsize': 16}"], {}), "({'names': ['a', 'b'], 'formats': ['i4', 'f8'], 'offsets': [0, 8],\n 'itemsize': 16})\n", (12285, 12372), True, 'import numpy as np\n'), ((13204, 13218), 'dynd.nd.dtype_of', 'nd.dtype_of', (['b'], {}), '(b)\n', (13215, 13218), False, 'from dynd import nd, ndt\n'), ((13220, 13237), 'dynd.ndt.type', 'ndt.type', (['a.dtype'], {}), '(a.dtype)\n', (13228, 13237), False, 'from dynd import nd, ndt\n'), ((13783, 13817), 'dynd.ndt.make_fixed_string', 'ndt.make_fixed_string', (['(7)', '"""utf_32"""'], {}), "(7, 'utf_32')\n", (13804, 13817), False, 'from dynd import nd, ndt\n'), ((13835, 13851), 'dynd.nd.dtype_of', 'nd.dtype_of', (['b_u'], {}), '(b_u)\n', (13846, 13851), False, 'from dynd import nd, ndt\n'), ((13939, 13955), 'dynd.nd.dtype_of', 'nd.dtype_of', (['b_u'], {}), '(b_u)\n', (13950, 13955), False, 'from dynd import nd, ndt\n'), ((13957, 13976), 'dynd.ndt.type', 'ndt.type', (['c_u.dtype'], {}), '(c_u.dtype)\n', (13965, 13976), False, 'from dynd import nd, ndt\n'), ((14129, 14145), 'numpy.all', 'np.all', (['(a == c_u)'], {}), '(a == c_u)\n', (14135, 14145), True, 'import numpy as np\n'), ((14384, 14398), 'dynd.nd.dtype_of', 'nd.dtype_of', (['a'], {}), '(a)\n', (14395, 14398), False, 'from dynd import nd, ndt\n'), ((14646, 14660), 'dynd.nd.dtype_of', 'nd.dtype_of', (['a'], {}), '(a)\n', (14657, 14660), False, 'from dynd import nd, ndt\n'), ((15120, 15134), 'dynd.nd.as_py', 'nd.as_py', (['b[0]'], {}), '(b[0])\n', (15128, 15134), False, 'from dynd import nd, ndt\n'), ((15233, 15247), 'dynd.nd.as_py', 'nd.as_py', (['b[0]'], {}), '(b[0])\n', (15241, 15247), False, 'from dynd import nd, ndt\n'), ((15487, 15501), 'dynd.nd.as_py', 'nd.as_py', (['b[0]'], {}), '(b[0])\n', (15495, 15501), False, 'from dynd import nd, ndt\n'), ((15800, 15856), 'numpy.dtype', 'np.dtype', (["[('x', np.int32), ('y', np.int64)]"], {'align': '(True)'}), "([('x', np.int32), ('y', np.int64)], align=True)\n", (15808, 15856), True, 'import numpy as np\n'), ((15883, 15896), 'dynd.nd.as_py', 'nd.as_py', (['a.x'], {}), '(a.x)\n', (15891, 15896), False, 'from dynd import nd, ndt\n'), ((15940, 15953), 'dynd.nd.as_py', 'nd.as_py', (['a.y'], {}), '(a.y)\n', (15948, 15953), False, 'from dynd import nd, ndt\n'), ((16559, 16615), 'numpy.dtype', 'np.dtype', (["[('x', np.int32), ('y', np.int64)]"], {'align': '(True)'}), "([('x', np.int32), ('y', np.int64)], align=True)\n", (16567, 16615), True, 'import numpy as np\n'), ((16642, 16655), 'dynd.nd.as_py', 'nd.as_py', (['a.x'], {}), '(a.x)\n', (16650, 16655), False, 'from dynd import nd, ndt\n'), ((16699, 16712), 'dynd.nd.as_py', 'nd.as_py', (['a.y'], {}), '(a.y)\n', (16707, 16712), False, 'from dynd import nd, ndt\n'), ((16875, 16892), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (16883, 16892), True, 'import numpy as np\n'), ((17095, 17112), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (17103, 17112), True, 'import numpy as np\n'), ((23733, 23750), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (23741, 23750), True, 'import numpy as np\n'), ((28054, 28067), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (28062, 28067), True, 'import numpy as np\n'), ((28093, 28159), 'numpy.array', 'np.array', (["['this', 'is', 'a', 'test of varlen strings']"], {'dtype': '"""O"""'}), "(['this', 'is', 'a', 'test of varlen strings'], dtype='O')\n", (28101, 28159), True, 'import numpy as np\n'), ((28406, 28474), 'numpy.dtype', 'np.dtype', (["[('x', 'int32'), ('y', 'O'), ('z', 'float64')]"], {'align': '(True)'}), "([('x', 'int32'), ('y', 'O'), ('z', 'float64')], align=True)\n", (28414, 28474), True, 'import numpy as np\n'), ((1441, 1458), 'numpy.dtype', 'np.dtype', (['np.bool'], {}), '(np.bool)\n', (1449, 1458), True, 'import numpy as np\n'), ((1505, 1522), 'numpy.dtype', 'np.dtype', (['np.int8'], {}), '(np.int8)\n', (1513, 1522), True, 'import numpy as np\n'), ((1570, 1588), 'numpy.dtype', 'np.dtype', (['np.int16'], {}), '(np.int16)\n', (1578, 1588), True, 'import numpy as np\n'), ((1636, 1654), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (1644, 1654), True, 'import numpy as np\n'), ((1702, 1720), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (1710, 1720), True, 'import numpy as np\n'), ((1768, 1786), 'numpy.dtype', 'np.dtype', (['np.uint8'], {}), '(np.uint8)\n', (1776, 1786), True, 'import numpy as np\n'), ((1835, 1854), 'numpy.dtype', 'np.dtype', (['np.uint16'], {}), '(np.uint16)\n', (1843, 1854), True, 'import numpy as np\n'), ((1903, 1922), 'numpy.dtype', 'np.dtype', (['np.uint32'], {}), '(np.uint32)\n', (1911, 1922), True, 'import numpy as np\n'), ((1971, 1990), 'numpy.dtype', 'np.dtype', (['np.uint64'], {}), '(np.uint64)\n', (1979, 1990), True, 'import numpy as np\n'), ((2040, 2060), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (2048, 2060), True, 'import numpy as np\n'), ((2110, 2130), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (2118, 2130), True, 'import numpy as np\n'), ((2188, 2210), 'numpy.dtype', 'np.dtype', (['np.complex64'], {}), '(np.complex64)\n', (2196, 2210), True, 'import numpy as np\n'), ((2268, 2291), 'numpy.dtype', 'np.dtype', (['np.complex128'], {}), '(np.complex128)\n', (2276, 2291), True, 'import numpy as np\n'), ((2384, 2399), 'numpy.dtype', 'np.dtype', (['"""S10"""'], {}), "('S10')\n", (2392, 2399), True, 'import numpy as np\n'), ((2493, 2508), 'numpy.dtype', 'np.dtype', (['"""U10"""'], {}), "('U10')\n", (2501, 2508), True, 'import numpy as np\n'), ((4313, 4325), 'dynd.ndt.type', 'ndt.type', (['dt'], {}), '(dt)\n', (4321, 4325), False, 'from dynd import nd, ndt\n'), ((4636, 4648), 'dynd.ndt.type', 'ndt.type', (['dt'], {}), '(dt)\n', (4644, 4648), False, 'from dynd import nd, ndt\n'), ((13033, 13067), 'dynd.ndt.make_fixed_string', 'ndt.make_fixed_string', (['(7)', '"""utf_32"""'], {}), "(7, 'utf_32')\n", (13054, 13067), False, 'from dynd import nd, ndt\n'), ((13069, 13083), 'dynd.nd.dtype_of', 'nd.dtype_of', (['b'], {}), '(b)\n', (13080, 13083), False, 'from dynd import nd, ndt\n'), ((13128, 13161), 'dynd.ndt.make_fixed_string', 'ndt.make_fixed_string', (['(7)', '"""ascii"""'], {}), "(7, 'ascii')\n", (13149, 13161), False, 'from dynd import nd, ndt\n'), ((13163, 13177), 'dynd.nd.dtype_of', 'nd.dtype_of', (['b'], {}), '(b)\n', (13174, 13177), False, 'from dynd import nd, ndt\n'), ((13648, 13682), 'dynd.ndt.make_fixed_string', 'ndt.make_fixed_string', (['(7)', '"""utf_32"""'], {}), "(7, 'utf_32')\n", (13669, 13682), False, 'from dynd import nd, ndt\n'), ((17371, 17385), 'numpy.bool_', 'np.bool_', (['(True)'], {}), '(True)\n', (17379, 17385), True, 'import numpy as np\n'), ((17445, 17458), 'numpy.bool', 'np.bool', (['(True)'], {}), '(True)\n', (17452, 17458), True, 'import numpy as np\n'), ((17518, 17530), 'numpy.int8', 'np.int8', (['(100)'], {}), '(100)\n', (17525, 17530), True, 'import numpy as np\n'), ((17590, 17603), 'numpy.int16', 'np.int16', (['(100)'], {}), '(100)\n', (17598, 17603), True, 'import numpy as np\n'), ((17664, 17677), 'numpy.int32', 'np.int32', (['(100)'], {}), '(100)\n', (17672, 17677), True, 'import numpy as np\n'), ((17738, 17751), 'numpy.int64', 'np.int64', (['(100)'], {}), '(100)\n', (17746, 17751), True, 'import numpy as np\n'), ((17812, 17825), 'numpy.uint8', 'np.uint8', (['(100)'], {}), '(100)\n', (17820, 17825), True, 'import numpy as np\n'), ((17886, 17900), 'numpy.uint16', 'np.uint16', (['(100)'], {}), '(100)\n', (17895, 17900), True, 'import numpy as np\n'), ((17962, 17976), 'numpy.uint32', 'np.uint32', (['(100)'], {}), '(100)\n', (17971, 17976), True, 'import numpy as np\n'), ((18038, 18052), 'numpy.uint64', 'np.uint64', (['(100)'], {}), '(100)\n', (18047, 18052), True, 'import numpy as np\n'), ((18114, 18131), 'numpy.float32', 'np.float32', (['(100.0)'], {}), '(100.0)\n', (18124, 18131), True, 'import numpy as np\n'), ((18193, 18210), 'numpy.float64', 'np.float64', (['(100.0)'], {}), '(100.0)\n', (18203, 18210), True, 'import numpy as np\n'), ((18272, 18292), 'numpy.complex64', 'np.complex64', (['(100.0j)'], {}), '(100.0j)\n', (18284, 18292), True, 'import numpy as np\n'), ((18386, 18407), 'numpy.complex128', 'np.complex128', (['(100.0j)'], {}), '(100.0j)\n', (18399, 18407), True, 'import numpy as np\n'), ((18859, 18873), 'numpy.bool_', 'np.bool_', (['(True)'], {}), '(True)\n', (18867, 18873), True, 'import numpy as np\n'), ((18926, 18941), 'numpy.bool_', 'np.bool_', (['(False)'], {}), '(False)\n', (18934, 18941), True, 'import numpy as np\n'), ((18995, 19007), 'numpy.int8', 'np.int8', (['(100)'], {}), '(100)\n', (19002, 19007), True, 'import numpy as np\n'), ((19059, 19072), 'numpy.int8', 'np.int8', (['(-100)'], {}), '(-100)\n', (19066, 19072), True, 'import numpy as np\n'), ((19125, 19140), 'numpy.int16', 'np.int16', (['(20000)'], {}), '(20000)\n', (19133, 19140), True, 'import numpy as np\n'), ((19194, 19210), 'numpy.int16', 'np.int16', (['(-20000)'], {}), '(-20000)\n', (19202, 19210), True, 'import numpy as np\n'), ((19265, 19285), 'numpy.int32', 'np.int32', (['(1000000000)'], {}), '(1000000000)\n', (19273, 19285), True, 'import numpy as np\n'), ((19344, 19368), 'numpy.int64', 'np.int64', (['(-1000000000000)'], {}), '(-1000000000000)\n', (19352, 19368), True, 'import numpy as np\n'), ((19456, 19479), 'numpy.int64', 'np.int64', (['(1000000000000)'], {}), '(1000000000000)\n', (19464, 19479), True, 'import numpy as np\n'), ((19566, 19587), 'numpy.int32', 'np.int32', (['(-1000000000)'], {}), '(-1000000000)\n', (19574, 19587), True, 'import numpy as np\n'), ((19672, 19685), 'numpy.uint8', 'np.uint8', (['(200)'], {}), '(200)\n', (19680, 19685), True, 'import numpy as np\n'), ((19737, 19753), 'numpy.uint16', 'np.uint16', (['(50000)'], {}), '(50000)\n', (19746, 19753), True, 'import numpy as np\n'), ((19807, 19828), 'numpy.uint32', 'np.uint32', (['(3000000000)'], {}), '(3000000000)\n', (19816, 19828), True, 'import numpy as np\n'), ((19887, 19918), 'numpy.uint64', 'np.uint64', (['(10000000000000000000)'], {}), '(10000000000000000000)\n', (19896, 19918), True, 'import numpy as np\n'), ((20012, 20027), 'numpy.float32', 'np.float32', (['(2.5)'], {}), '(2.5)\n', (20022, 20027), True, 'import numpy as np\n'), ((20079, 20094), 'numpy.float64', 'np.float64', (['(2.5)'], {}), '(2.5)\n', (20089, 20094), True, 'import numpy as np\n'), ((20146, 20170), 'numpy.complex64', 'np.complex64', (['(2.5 - 1.0j)'], {}), '(2.5 - 1.0j)\n', (20158, 20170), True, 'import numpy as np\n'), ((20221, 20246), 'numpy.complex128', 'np.complex128', (['(2.5 - 1.0j)'], {}), '(2.5 - 1.0j)\n', (20234, 20246), True, 'import numpy as np\n')] |
import os.path
import numpy as np
import librosa
import matplotlib.pyplot as plt
from madmom.audio.signal import *
def featureExtract(FILE_NAME):
try:
y= Signal(FILE_NAME, sample_rate=16000,dtype=np.float32,num_channels=1)
sr = y.sample_rate
mel_S = librosa.feature.melspectrogram(y, sr=sr, n_fft=1024, hop_length=160, n_mels=80)
log_mel_S = librosa.power_to_db(mel_S,ref=np.max)
log_mel_S = log_mel_S.astype(np.float32)
return log_mel_S
except Exception as ex:
print('ERROR: ', ex)
def makingTensor(feature,stride):
num_frames = feature.shape[1]
x_data = np.zeros(shape=(num_frames, 75, 80, 1))
total_num = 0
HALF_WIN_LEN = 75 // 2
for j in range(HALF_WIN_LEN, num_frames - HALF_WIN_LEN - 2, stride):
mf_spec = feature[:, range(j - HALF_WIN_LEN, j + HALF_WIN_LEN + 1)]
x_data[total_num, :, :, 0] = mf_spec.T
total_num = total_num + 1
x_data = x_data[:total_num]
x_train_mean = np.load('./x_data_mean_svad_75.npy')
x_train_std = np.load('./x_data_std_svad_75.npy')
x_test = (x_data - x_train_mean) / (x_train_std + 0.0001)
return x_test
if __name__ == '__main__':
featureExtract()
| [
"numpy.load",
"librosa.power_to_db",
"numpy.zeros",
"librosa.feature.melspectrogram"
] | [((641, 680), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_frames, 75, 80, 1)'}), '(shape=(num_frames, 75, 80, 1))\n', (649, 680), True, 'import numpy as np\n'), ((1011, 1047), 'numpy.load', 'np.load', (['"""./x_data_mean_svad_75.npy"""'], {}), "('./x_data_mean_svad_75.npy')\n", (1018, 1047), True, 'import numpy as np\n'), ((1066, 1101), 'numpy.load', 'np.load', (['"""./x_data_std_svad_75.npy"""'], {}), "('./x_data_std_svad_75.npy')\n", (1073, 1101), True, 'import numpy as np\n'), ((281, 360), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['y'], {'sr': 'sr', 'n_fft': '(1024)', 'hop_length': '(160)', 'n_mels': '(80)'}), '(y, sr=sr, n_fft=1024, hop_length=160, n_mels=80)\n', (311, 360), False, 'import librosa\n'), ((381, 419), 'librosa.power_to_db', 'librosa.power_to_db', (['mel_S'], {'ref': 'np.max'}), '(mel_S, ref=np.max)\n', (400, 419), False, 'import librosa\n')] |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import numpy as np
from pyannote.database import get_annotated
from abc import ABCMeta, abstractmethod
import chocolate
class Pipeline:
"""Base class for jointly optimized pipelines"""
__metaclass__ = ABCMeta
@abstractmethod
def get_tune_space(self):
pass
@abstractmethod
def get_tune_metric(self):
pass
def with_params(self, **params):
"""Instantiate pipeline with given set of keyword parameters
Must be overridden by sub-classes.
"""
return self
@abstractmethod
def apply(self, current_file):
"""Apply pipeline on current file
Must be overridden by sub-classes.
"""
pass
def objective(self, protocol, subset='development', learning=False):
"""Compute the value of the objective function (the lower, the better)
Parameters
----------
protocol : pyannote.database.Protocol
Protocol on which to compute the value of the objective function.
subset : {'train', 'development', 'test'}, optional
Subset on which to compute the value of the objective function.
Defaults to 'development'.
learning : bool, optional
Set to True to indicate that the pipeline is being tuned and that
the reference can be passed safely to the pipeline. Default
behavior is to remove it from `current_file`. This is useful for
pipelines that may take a looooong time to proceed when the
hypothesis is completely wrong (e.g. too many segments to cluster).
Returns
-------
metric : float
Value of the objective function (the lower, the better).
"""
metric = self.get_tune_metric()
value, duration = [], []
# NOTE -- embarrasingly parallel
# TODO -- parallelize this
for current_file in getattr(protocol, subset)():
uem = get_annotated(current_file)
if learning:
reference = current_file['annotation']
else:
reference = current_file.pop('annotation')
hypothesis = self.apply(current_file)
if hypothesis is None:
return 1.
metric_value = metric(reference, hypothesis, uem=uem)
value.append(metric_value)
duration.append(uem.duration())
# support for pyannote.metrics
if hasattr(metric, '__abs__'):
return abs(metric)
# support for any other metric
else:
return np.average(value, weights=duration)
def best(self, tune_db=None, connection=None):
"""Get (current) best set of hyper-parameters
Parameters
----------
connection : chocolate.SQLiteConnection, optional
Existing connection to SQLite database.
tune_db : str, optional
Path to SQLite database where trial results will be stored. Has no
effect when `connection` is provided.
At least one of `tune_db` or `connection` must be provided.
Returns
-------
status : dict
['loss'] (`float`) best loss so far
['params'] (`dict`) corresponding set of hyper-parameters
['n_trials'] (`int`) total number of trials
"""
if connection is None:
# start connection to SQLite database
# (this is where trials are stored)
connection = chocolate.SQLiteConnection(f'sqlite:///{tune_db}')
# get current best set of hyper-parameter (and its loss)
trials = connection.results_as_dataframe()
best_params = dict(trials.iloc[trials['_loss'].idxmin()])
best_loss = best_params.pop('_loss')
best_params = {name: np.asscalar(value)
for name, value in best_params.items()}
return {'loss': best_loss,
'params': best_params,
'n_trials': len(trials)}
def tune(self, tune_db, protocol, subset='development', n_calls=1):
"""Tune pipeline
Parameters
----------
tune_db : str
Path to SQLite database where trial results will be stored.
protocol : pyannote.database.Protocol
Protocol on which to tune the pipeline.
subset : {'train', 'development', 'test'}, optional
Subset on which to tune the pipeline. Defaults to 'development'.
sampler : chocolate sampler, optional
Defaults to chocolate.CMAES
n_calls : int, optional
Number of trials. Defaults to 1.
Set `n_calls` to 0 to obtain best set of params.
Returns
-------
best : dict
['loss'] (`float`) best loss so far
['params'] (`dict`) corresponding set of hyper-parameters
['n_trials'] (`int`) total number of trials
"""
iterations = self.tune_iter(tune_db, protocol, subset=subset,
sampler=sampler)
for i in range(n_calls):
_ = next(iterations)
return self.best(tune_db=tune_db)
def tune_iter(self, tune_db, protocol, subset='development',
sampler=None):
"""Tune pipeline forever
Parameters
----------
tune_db : str
Path to SQLite database where trial results will be stored.
protocol : pyannote.database.Protocol
Protocol on which to tune the pipeline.
subset : {'train', 'development', 'test'}, optional
Subset on which to tune the pipeline. Defaults to 'development'.
sampler : chocolate sampler, optional
Defaults to chocolate.CMAES
Yields
------
status : dict
['latest']['loss'] (`float`) loss obtained by the latest trial
['latest']['params'] (`dict`) corresponding set of hyper-parameters
['latest']['n_trials'] (`int`) total number of trials in thes session
['new_best']['loss'] (`float`) best loss so far
['new_best']['params'] (`dict`) corresponding set of hyper-parameters
['new_best']['n_trials'] (`int`) total number of trials
"""
# start connection to SQLite database
# (this is where trials are stored)
connection = chocolate.SQLiteConnection(f'sqlite:///{tune_db}')
# get hyper-parameter space
space = self.get_tune_space()
# instantiate sampler
if sampler is None:
sampler = chocolate.CMAES
sampler = sampler(connection, space)
# TODO add option to use another sampler
i = 0
best = {'loss': np.inf}
while True:
i += 1
# get next set of hyper-parameters to try
token, params = sampler.next()
# instantiate pipeline with this set of parameters
# and compute the objective function
loss = self.with_params(**params).objective(
protocol, subset=subset, learning=True)
latest = {'loss': loss, 'params': params, 'n_trials': i}
# tell the sampler what was the result
sampler.update(token, loss)
if loss < best['loss'] or i == 1:
# if loss is better than previous known best
# check in the database what is the current best
best = self.best(connection=connection)
yield {'latest': latest, 'new_best': best}
else:
yield {'latest': latest}
| [
"chocolate.SQLiteConnection",
"numpy.average",
"pyannote.database.get_annotated",
"numpy.asscalar"
] | [((7585, 7635), 'chocolate.SQLiteConnection', 'chocolate.SQLiteConnection', (['f"""sqlite:///{tune_db}"""'], {}), "(f'sqlite:///{tune_db}')\n", (7611, 7635), False, 'import chocolate\n'), ((3157, 3184), 'pyannote.database.get_annotated', 'get_annotated', (['current_file'], {}), '(current_file)\n', (3170, 3184), False, 'from pyannote.database import get_annotated\n'), ((3788, 3823), 'numpy.average', 'np.average', (['value'], {'weights': 'duration'}), '(value, weights=duration)\n', (3798, 3823), True, 'import numpy as np\n'), ((4705, 4755), 'chocolate.SQLiteConnection', 'chocolate.SQLiteConnection', (['f"""sqlite:///{tune_db}"""'], {}), "(f'sqlite:///{tune_db}')\n", (4731, 4755), False, 'import chocolate\n'), ((5013, 5031), 'numpy.asscalar', 'np.asscalar', (['value'], {}), '(value)\n', (5024, 5031), True, 'import numpy as np\n')] |
import warnings
from copy import deepcopy
import numpy as np
from pydace import Dace
from scipy.linalg import norm
from ..core.nlp import optimize_nlp
from ..core.options.nlp import DockerNLPOptions, NLPOptions
from ..core.procedures import InfillProcedure
from ..core.procedures.output import Report
from ..core.utils import (get_samples_index, is_row_member,
point_domain_distance)
from .problem import CaballeroOptions, CaballeroReport, is_inside_hypercube
class Caballero(InfillProcedure):
# TODO: (docstring) Add notes and example section. Also the optimization
# problem description
"""Rigorous optimization of nonlinear programming problems (NLP) in which
the objective function and/or some constraints are represented by noisy
implicit black box functions. [1]_
Parameters
----------
x : np.ndarray
Input variables of the Design of Experiments (DOE). Has to be a 2D
array, with no duplicated rows.
g : np.ndarray
Constraints values (:math:`g(x) <= 0`) of the DOE. Has to be a 2D array
with no duplicated rows.
f : np.ndarray
Objective function values of the DOE. Has to be a 1D array with no
duplicated values.
model_function : callable function
Black box function callable object that evaluates the objective and
constraints functions and returns them as dictionary object. See Notes
on how to implement such function.
lb : np.ndarray
Lower bound of the input variables. Has to be 1D array with the number
of elements being the same as the number of columns of `x`.
ub : np.ndarray
Upper bound of the input variables. Has to be 1D array with the number
of elements being the same as the number of columns of `x`.
regression : str
Kriging mean regression model. Valid values are: 'poly0', 'poly1' and
'poly2'.
options : CaballeroOptions, optional
Optimization procedure options. See `CaballeroOptions` for which
parameters can be tweaked. Default is None, where a default instance of
the options class is initialized. (The default values are described in
the class `CaballeroOptions`).
nlp_options : NLOPtions subclass, optional
NLP solver options structure. Default is None, where a default instance
of `DockerNLPOptions` is created with a server url pointing to the
localhost address through port 5000 in order to the optimization
problem to be solved by a flask application inside a WSL (Windows
Subsystem for Linux) environment. This application uses IpOpt solver.
report_options : Report (class or subclass), optional
How to report the optimization procedure progress. Whether to print in
terminal or plot each iteration. Default is set to print in terminal
References
----------
.. [1] <NAME>, <NAME>. "An Algorithm for the Use of
Surrogate Models in Modular Flowsheet Optimization". AIChE journal,
vol. 54.10 (2008), pp. 2633-2650, 2008.
"""
def __init__(self, x: np.ndarray, g: np.ndarray, f: np.ndarray,
model_function, lb: np.ndarray, ub: np.ndarray,
regression: str, options: CaballeroOptions = None,
nlp_options: NLPOptions = None,
report_options: Report = None):
# kriging regression model
self.regression = regression
# proceed with default options for caballero procedure if none defined
options = CaballeroOptions() if options is None else options
# proceed with default options for NLP solver as docker server
nlp_options = DockerNLPOptions(name='wsl-server',
server_url='http://localhost:5000') \
if nlp_options is None else nlp_options
# proceed with default options for procedure report output
report_options = CaballeroReport(terminal=True, plot=False) \
if report_options is None else report_options
# initialize mother class
super().__init__(x=x, g=g, f=f, model_function=model_function, lb=lb,
ub=ub, options=options, nlp_options=nlp_options,
report_options=report_options)
# perform a setup check
self.check_setup()
# initial surrogate construction
self.build_surrogate(optimize=True)
def check_setup(self):
# perform basic checkup
super().check_setup()
# search for feasible points in the initial sampling
best_feas_idx = self.search_best_feasible_index(g=self.g, f=self.f)
if best_feas_idx is None:
raise IndexError("No initial feasible point found. You need at "
"least one feasible case in the sample.")
else:
self.best_feas_idx = best_feas_idx
def search_best_feasible_index(self, g: np.ndarray, f: np.ndarray) -> int:
# get feasible points indexes for a given set of samples
feas_idx = np.nonzero(np.all(g <= self.options.feasible_tol,
axis=1))[0]
if feas_idx.size == 0:
# no feasible point found, return None
return None
else:
# get feasible objective function values
feas_obj = f[feas_idx]
# get best feasible index
# the np.argmin already handles multiple minima as first ocurrence
return feas_idx[np.argmin(feas_obj)].item()
def build_surrogate(self, optimize=False, x=None, f=None, g=None) -> None:
"""Builds the objective and constraints surrogates based on whether or
not to optimize their hyperparameters.
Parameters
----------
optimize : bool, optional
Whether or not optimize the hyperparameters, by default False.
x : np.ndarray, optional
The input data to be used. If None, use the initial values from
construction.
f : np.ndarray, optional
The function objective data to be used. If None, use the initial
values from construction.
g : np.ndarray, optional
The constraint data to be used. If None, use the initial values
from construction.
"""
x = self.x if x is None else x
g = self.g if g is None else g
f = self.f if f is None else f
n = x.shape[1]
q = g.shape[1]
if optimize:
# optimize hyperparameters
# initial theta
one = np.ones((n,))
theta0 = one
lob = 1e-12 * theta0
upb = 1e2 * theta0
surr_obj = Dace(regression=self.regression,
correlation='corrgauss')
surr_obj.fit(S=x, Y=f, theta0=theta0, lob=lob, upb=upb)
# constraints metamodel
surr_con = []
for i in range(q):
obj_ph = Dace(regression=self.regression,
correlation='corrgauss')
obj_ph.fit(S=x, Y=g[:, i], theta0=theta0, lob=lob, upb=upb)
surr_con.append(obj_ph)
# store models
self.surr_obj = surr_obj
self.surr_con = surr_con
else:
# do not optimize hyperparameters, use previous model data
if not hasattr(self, 'surr_obj') or not hasattr(self, 'surr_con'):
# no previous model found, use recursion to build it
self.build_surrogate(optimize=True, x=x, g=g, f=f)
else:
# objective function
self.surr_obj.fit(S=x, Y=f,
theta0=self.surr_obj.theta.flatten())
# constraints
for idx, obj in enumerate(self.surr_con):
obj.fit(S=x, Y=g[:, idx], theta0=obj.theta.flatten())
def optimize(self):
super().optimize()
# initialize counter and domains
self.k, self.j = 1, 1
lb, ub = self.lb, self.ub
self.lbopt, self.hlb, self.dlb = deepcopy(lb), deepcopy(lb), \
deepcopy(lb)
self.ubopt, self.hub, self.dub = deepcopy(ub), deepcopy(ub), \
deepcopy(ub)
self.fun_evals, self.move_num, self.contract_num = 0, 0, 0
# initial NLP solver estimate
x0 = self.x[self.best_feas_idx, :].flatten()
# create internal variables of caballero
self._xlhs = deepcopy(self.x)
self._gobs = deepcopy(self.g)
self._fobs = deepcopy(self.f)
# initialize xstar, gstar and fstar
self._xstar = x0.reshape(1, -1)
self._gstar = self._gobs[self.best_feas_idx, :].reshape(1, -1)
self._fstar = self._fobs[self.best_feas_idx].flatten()
# termination flag
self.terminated = False
# movement flag
self._last_move = 'None'
# print headers if terminal is specified
rpt = self.report_options.print_iteration(movement=self._last_move,
iter_count=None,
x=x0.tolist(),
f_pred=None,
f_actual=None,
g_actual=None,
color_font=None,
header=True)
while not self.terminated:
sol = optimize_nlp(procedure=self, x=self._xlhs, g=self._gobs,
f=self._fobs, nlp_options=self.nlp_options,
x0=x0.tolist(), lb=self.lbopt.tolist(),
ub=self.ubopt.tolist())
xjk, fjk, exitflag = sol['x'], sol['fval'], sol['exitflag']
if self.fun_evals >= self.options.max_fun_evals:
warnings.warn("Maximum number of function evaluations "
"achieved!")
# search feasible indexes
feas_idx = self.search_best_feasible_index(self._gobs,
self._fobs)
if feas_idx is not None:
rpt = self.report_options
rpt.get_results_report(index=feas_idx,
r=0.005, x=self._xlhs,
f=self._fobs, lb=self.lb,
ub=self.ub,
fun_evals=self.fun_evals)
# break loop
self.terminated = True
# store results as class variables
self.xopt = self._xlhs[feas_idx, :].flatten()
self.gopt = self._gobs[feas_idx, :].flatten()
self.fopt = self._fobs[feas_idx]
if not is_row_member(xjk, self._xlhs):
sampled_results = self.sample_model(xjk)
# update sample data
self._xlhs = np.vstack((self._xlhs, xjk))
self._gobs = np.vstack((self._gobs, sampled_results['g']))
self._fobs = np.append(self._fobs, sampled_results['f'])
self.fun_evals += 1
# iteration display
if exitflag < 1:
# infeasible
color_font = 'red'
else:
# feasible
color_font = None
max_feas = np.max(sampled_results['g'])
self.report_options.print_iteration(movement=self._last_move,
iter_count=self.j,
x=xjk.tolist(),
f_pred=fjk,
f_actual=sampled_results['f'],
g_actual=max_feas,
color_font=color_font)
self.report_options.plot_iteration()
else:
# couldn't improve from last iteration
xjk = self.refine()
x0 = deepcopy(xjk)
# wheter or not to update kriging parameters after refinement phase
optimize_hyp = True if self.j == 1 else False
self.build_surrogate(x=self._xlhs, f=self._fobs, g=self._gobs,
optimize=optimize_hyp)
# Starting from xjk solve the NLP to get xj1k
sol = optimize_nlp(procedure=self, x=self._xlhs, g=self._gobs,
f=self._fobs, nlp_options=self.nlp_options,
x0=xjk.tolist(), lb=self.lbopt.tolist(),
ub=self.ubopt.tolist())
xj1k, fj1k, exitflag = sol['x'], sol['fval'], sol['exitflag']
if point_domain_distance(xjk, xj1k, lb, ub) >= \
self.options.ref_tol:
# there was improvement, keep going
xjk = xj1k
self.j += 1
else:
xjk = self.refine()
x0 = deepcopy(xjk)
def sample_model(self, x: np.ndarray):
sampled_data = self.model_function(x)
f = sampled_data['f']
g = sampled_data['g']
# TODO: implement other parameters capture in 'extras' key
# TODO: output data sanitation (g, f, and extras)
return {'g': g, 'f': f}
def refine(self):
# find best value inserted
x_ins = self._xlhs[self.m:, :]
g_ins = self._gobs[self.m:, :]
f_ins = self._fobs[self.m:]
best_idx = self.search_best_feasible_index(g_ins, f_ins)
if best_idx is not None:
# TODO: check for duplicated. If so, perform contraction on best
self._xstar = np.vstack((self._xstar, x_ins[best_idx, :]))
self._gstar = np.vstack((self._gstar, g_ins[best_idx, :]))
self._fstar = np.append(self._fstar, f_ins[best_idx])
else:
# no best sampled value found in the inserted points, just insert
# the best value in the initial sample (i.e. no improvement).
# This block shouldn't be possible. The else is a "just in case".
# TODO: perform a contraction on the best feasible point instead
best_init = self.search_best_feasible_index(self.g, self.f)
raise NotImplementedError("Perform a contraction move.")
# select the best point to be centered
best_star = self.search_best_feasible_index(self._gstar, self._fstar)
xstark = self._xstar[best_star, :].flatten()
if self.contract_num == 0:
contract_factor = self.options.first_factor
else:
contract_factor = self.options.second_factor
# refine the hypercube limits
self.refine_hypercube(xstark, contract_factor)
# search for best feasible point
feas_idx = self.search_best_feasible_index(self._gobs,
self._fobs)
# check for termination
if point_domain_distance(self._xstar[-1, :],
self._xstar[-2, :], self.lb, self.ub) <= \
self.options.term_tol:
xstark = self._xlhs[feas_idx, :].flatten()
# check if at least a contraction was made
if self.contract_num > 0:
# if so, terminate the algorithm
if feas_idx is not None:
rpt = self.report_options
rpt.get_results_report(index=feas_idx,
r=0.005,
x=self._xlhs,
f=self._fobs,
lb=self.lb,
ub=self.ub,
fun_evals=self.fun_evals)
# break loop
self.terminated = True
# store results as class variables
self.xopt = self._xlhs[feas_idx, :].flatten()
self.gopt = self._gobs[feas_idx, :].flatten()
self.fopt = self._fobs[feas_idx]
else:
# perform a large contraction if no contraction done
self.refine_hypercube(xstark, contract_factor=0.9999)
# update move counter and reset iteration counter
self.k += 1
self.j = 1
return xstark
def refine_hypercube(self, xstark: np.ndarray, contract_factor: float):
d_range = self.hub - self.hlb
# check if xstark is at domain bound
if is_inside_hypercube(xstark, self.dlb, self.dub):
# inside original domain
if is_inside_hypercube(xstark, self.hlb, self.hub) and \
norm(self.hub - self.hlb) / norm(self.dub - self.dlb) >= \
self.options.contraction_tol:
# its inside hypercube, center and contract
self._perform_contraction(xstark, contract_factor, d_range)
else:
# its at hypercube limit, center and move
self._perform_move(xstark, d_range)
else:
# at domain limit
if is_inside_hypercube(xstark, self.hlb, self.hub) and \
norm(self.hub - self.hlb) / norm(self.dub - self.dlb) >= \
self.options.contraction_tol:
# its inside hypercube, center and contract
self._perform_contraction(xstark, contract_factor, d_range)
else:
# its at hypercube limit, center and move
self._perform_move(xstark, d_range)
# update optimization bounds and adjust them if needed
self.lbopt = deepcopy(self.hlb)
self.ubopt = deepcopy(self.hub)
floor_lb = np.less(self.lbopt, self.dlb)
if np.any(floor_lb):
self.lbopt[floor_lb] = self.dlb[floor_lb]
ceil_ub = np.greater(self.ubopt, self.dub)
if np.any(ceil_ub):
self.ubopt[ceil_ub] = self.dub[ceil_ub]
def _perform_contraction(self, xstark, contract_factor, d_range):
red_factor = (1 - contract_factor) * d_range / 2
self.hlb = xstark - red_factor
self.hub = xstark + red_factor
self.contract_num += 1
# inserted points
x_ins = self._xlhs[self.m:, :]
g_ins = self._gobs[self.m:, :]
f_ins = self._fobs[self.m:]
# each contraction discards the points outside new hypercube
idx = get_samples_index(x_ins, self.hlb, self.hub)
self._xlhs = np.vstack((self.x, x_ins[idx, :]))
self._gobs = np.vstack((self.g, g_ins[idx, :]))
self._fobs = np.append(self.f, f_ins[idx])
self._last_move = 'Contraction'
def _perform_move(self, xstark, d_range):
red_factor = d_range / 2
self.hlb = xstark - red_factor
self.hub = xstark + red_factor
self.move_num += 1
self._last_move = 'Movement'
| [
"copy.deepcopy",
"numpy.greater",
"numpy.ones",
"numpy.all",
"numpy.argmin",
"numpy.any",
"numpy.append",
"numpy.max",
"scipy.linalg.norm",
"numpy.less",
"warnings.warn",
"pydace.Dace",
"numpy.vstack"
] | [((8576, 8592), 'copy.deepcopy', 'deepcopy', (['self.x'], {}), '(self.x)\n', (8584, 8592), False, 'from copy import deepcopy\n'), ((8614, 8630), 'copy.deepcopy', 'deepcopy', (['self.g'], {}), '(self.g)\n', (8622, 8630), False, 'from copy import deepcopy\n'), ((8652, 8668), 'copy.deepcopy', 'deepcopy', (['self.f'], {}), '(self.f)\n', (8660, 8668), False, 'from copy import deepcopy\n'), ((18105, 18123), 'copy.deepcopy', 'deepcopy', (['self.hlb'], {}), '(self.hlb)\n', (18113, 18123), False, 'from copy import deepcopy\n'), ((18145, 18163), 'copy.deepcopy', 'deepcopy', (['self.hub'], {}), '(self.hub)\n', (18153, 18163), False, 'from copy import deepcopy\n'), ((18184, 18213), 'numpy.less', 'np.less', (['self.lbopt', 'self.dlb'], {}), '(self.lbopt, self.dlb)\n', (18191, 18213), True, 'import numpy as np\n'), ((18225, 18241), 'numpy.any', 'np.any', (['floor_lb'], {}), '(floor_lb)\n', (18231, 18241), True, 'import numpy as np\n'), ((18316, 18348), 'numpy.greater', 'np.greater', (['self.ubopt', 'self.dub'], {}), '(self.ubopt, self.dub)\n', (18326, 18348), True, 'import numpy as np\n'), ((18360, 18375), 'numpy.any', 'np.any', (['ceil_ub'], {}), '(ceil_ub)\n', (18366, 18375), True, 'import numpy as np\n'), ((18959, 18993), 'numpy.vstack', 'np.vstack', (['(self.x, x_ins[idx, :])'], {}), '((self.x, x_ins[idx, :]))\n', (18968, 18993), True, 'import numpy as np\n'), ((19015, 19049), 'numpy.vstack', 'np.vstack', (['(self.g, g_ins[idx, :])'], {}), '((self.g, g_ins[idx, :]))\n', (19024, 19049), True, 'import numpy as np\n'), ((19071, 19100), 'numpy.append', 'np.append', (['self.f', 'f_ins[idx]'], {}), '(self.f, f_ins[idx])\n', (19080, 19100), True, 'import numpy as np\n'), ((6653, 6666), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (6660, 6666), True, 'import numpy as np\n'), ((6780, 6837), 'pydace.Dace', 'Dace', ([], {'regression': 'self.regression', 'correlation': '"""corrgauss"""'}), "(regression=self.regression, correlation='corrgauss')\n", (6784, 6837), False, 'from pydace import Dace\n'), ((8195, 8207), 'copy.deepcopy', 'deepcopy', (['lb'], {}), '(lb)\n', (8203, 8207), False, 'from copy import deepcopy\n'), ((8209, 8221), 'copy.deepcopy', 'deepcopy', (['lb'], {}), '(lb)\n', (8217, 8221), False, 'from copy import deepcopy\n'), ((8237, 8249), 'copy.deepcopy', 'deepcopy', (['lb'], {}), '(lb)\n', (8245, 8249), False, 'from copy import deepcopy\n'), ((8291, 8303), 'copy.deepcopy', 'deepcopy', (['ub'], {}), '(ub)\n', (8299, 8303), False, 'from copy import deepcopy\n'), ((8305, 8317), 'copy.deepcopy', 'deepcopy', (['ub'], {}), '(ub)\n', (8313, 8317), False, 'from copy import deepcopy\n'), ((8333, 8345), 'copy.deepcopy', 'deepcopy', (['ub'], {}), '(ub)\n', (8341, 8345), False, 'from copy import deepcopy\n'), ((14084, 14128), 'numpy.vstack', 'np.vstack', (['(self._xstar, x_ins[best_idx, :])'], {}), '((self._xstar, x_ins[best_idx, :]))\n', (14093, 14128), True, 'import numpy as np\n'), ((14155, 14199), 'numpy.vstack', 'np.vstack', (['(self._gstar, g_ins[best_idx, :])'], {}), '((self._gstar, g_ins[best_idx, :]))\n', (14164, 14199), True, 'import numpy as np\n'), ((14226, 14265), 'numpy.append', 'np.append', (['self._fstar', 'f_ins[best_idx]'], {}), '(self._fstar, f_ins[best_idx])\n', (14235, 14265), True, 'import numpy as np\n'), ((5118, 5164), 'numpy.all', 'np.all', (['(g <= self.options.feasible_tol)'], {'axis': '(1)'}), '(g <= self.options.feasible_tol, axis=1)\n', (5124, 5164), True, 'import numpy as np\n'), ((7053, 7110), 'pydace.Dace', 'Dace', ([], {'regression': 'self.regression', 'correlation': '"""corrgauss"""'}), "(regression=self.regression, correlation='corrgauss')\n", (7057, 7110), False, 'from pydace import Dace\n'), ((10049, 10114), 'warnings.warn', 'warnings.warn', (['"""Maximum number of function evaluations achieved!"""'], {}), "('Maximum number of function evaluations achieved!')\n", (10062, 10114), False, 'import warnings\n'), ((11208, 11236), 'numpy.vstack', 'np.vstack', (['(self._xlhs, xjk)'], {}), '((self._xlhs, xjk))\n', (11217, 11236), True, 'import numpy as np\n'), ((11266, 11311), 'numpy.vstack', 'np.vstack', (["(self._gobs, sampled_results['g'])"], {}), "((self._gobs, sampled_results['g']))\n", (11275, 11311), True, 'import numpy as np\n'), ((11341, 11384), 'numpy.append', 'np.append', (['self._fobs', "sampled_results['f']"], {}), "(self._fobs, sampled_results['f'])\n", (11350, 11384), True, 'import numpy as np\n'), ((11683, 11711), 'numpy.max', 'np.max', (["sampled_results['g']"], {}), "(sampled_results['g'])\n", (11689, 11711), True, 'import numpy as np\n'), ((12405, 12418), 'copy.deepcopy', 'deepcopy', (['xjk'], {}), '(xjk)\n', (12413, 12418), False, 'from copy import deepcopy\n'), ((13387, 13400), 'copy.deepcopy', 'deepcopy', (['xjk'], {}), '(xjk)\n', (13395, 13400), False, 'from copy import deepcopy\n'), ((5561, 5580), 'numpy.argmin', 'np.argmin', (['feas_obj'], {}), '(feas_obj)\n', (5570, 5580), True, 'import numpy as np\n'), ((17142, 17167), 'scipy.linalg.norm', 'norm', (['(self.hub - self.hlb)'], {}), '(self.hub - self.hlb)\n', (17146, 17167), False, 'from scipy.linalg import norm\n'), ((17170, 17195), 'scipy.linalg.norm', 'norm', (['(self.dub - self.dlb)'], {}), '(self.dub - self.dlb)\n', (17174, 17195), False, 'from scipy.linalg import norm\n'), ((17646, 17671), 'scipy.linalg.norm', 'norm', (['(self.hub - self.hlb)'], {}), '(self.hub - self.hlb)\n', (17650, 17671), False, 'from scipy.linalg import norm\n'), ((17674, 17699), 'scipy.linalg.norm', 'norm', (['(self.dub - self.dlb)'], {}), '(self.dub - self.dlb)\n', (17678, 17699), False, 'from scipy.linalg import norm\n')] |
from rollerpy.models.curve import Curve, ParametricCurve, NoramlizedCurve
import numpy as np
class HelixCircleParam(Curve, ParametricCurve, NoramlizedCurve):
def __init__(
self, A, B, C=1, tmin=0, tmax=np.pi, n=100, initialPosition=[0, 0, 0]
):
# Helix parameters
self.A = A
self.B = B
self.C = C
self.tmin = tmin
self.tmax = tmax
# Initial positions
self._setInitialPosition(initialPosition)
# Calculations
self.t = np.linspace(self.tmin, self.tmax, n)
self._calcParameters()
self._calcDerivative()
def _setInitialPosition(self, initialPosition):
self.x0 = initialPosition[0]
self.y0 = initialPosition[1]
self.z0 = initialPosition[2]
def _calcParameters(self):
self.x = self.B*np.cos(self.t) + self.x0
self.y = self.C*self.t + self.y0 - self.tmin
self.z = self.A*np.sin(self.t) + self.z0
def _calcDerivative(self):
self.dx = -1*self.B*np.sin(self.t)
self.dy = self.t*0 + self.C
self.dz = self.A*np.cos(self.t)
class InvHelixCircleParam(HelixCircleParam):
def _calcParameters(self):
self.x = self.B*np.cos(self.t) + self.x0
self.y = -self.C*(self.t - self.tmax) + self.y0 - self.tmin
self.z = self.A*np.sin(self.t) + self.z0
def _calcDerivative(self):
self.dx = -1*self.B*np.sin(self.t)
self.dy = self.t*0 - self.C
self.dz = self.A*np.cos(self.t)
class Line(Curve, ParametricCurve, NoramlizedCurve):
def __init__(self, point1, point2, tmin=0, tmax=1, n=100):
self._point1 = point1
self._point2 = point2
self._v = [
(point2[0] - point1[0])/tmax,
(point2[1] - point1[1])/tmax,
(point2[2] - point1[2])/tmax
]
self.t = np.linspace(tmin, tmax, n)
self._calcParameters()
self._calcDerivative()
def _calcParameters(self):
self.x = self._point1[0] + self.t*self._v[0]
self.y = self._point1[1] + self.t*self._v[1]
self.z = self._point1[2] + self.t*self._v[2]
def _calcDerivative(self):
self.dx = 0*self.t + (self._point2[0] - self._point1[0])/self.t[-1]
self.dy = 0*self.t + (self._point2[1] - self._point1[1])/self.t[-1]
self.dz = 0*self.t + (self._point2[2] - self._point1[2])/self.t[-1]
| [
"numpy.sin",
"numpy.cos",
"numpy.linspace"
] | [((518, 554), 'numpy.linspace', 'np.linspace', (['self.tmin', 'self.tmax', 'n'], {}), '(self.tmin, self.tmax, n)\n', (529, 554), True, 'import numpy as np\n'), ((1866, 1892), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', 'n'], {}), '(tmin, tmax, n)\n', (1877, 1892), True, 'import numpy as np\n'), ((1025, 1039), 'numpy.sin', 'np.sin', (['self.t'], {}), '(self.t)\n', (1031, 1039), True, 'import numpy as np\n'), ((1101, 1115), 'numpy.cos', 'np.cos', (['self.t'], {}), '(self.t)\n', (1107, 1115), True, 'import numpy as np\n'), ((1421, 1435), 'numpy.sin', 'np.sin', (['self.t'], {}), '(self.t)\n', (1427, 1435), True, 'import numpy as np\n'), ((1497, 1511), 'numpy.cos', 'np.cos', (['self.t'], {}), '(self.t)\n', (1503, 1511), True, 'import numpy as np\n'), ((838, 852), 'numpy.cos', 'np.cos', (['self.t'], {}), '(self.t)\n', (844, 852), True, 'import numpy as np\n'), ((940, 954), 'numpy.sin', 'np.sin', (['self.t'], {}), '(self.t)\n', (946, 954), True, 'import numpy as np\n'), ((1219, 1233), 'numpy.cos', 'np.cos', (['self.t'], {}), '(self.t)\n', (1225, 1233), True, 'import numpy as np\n'), ((1336, 1350), 'numpy.sin', 'np.sin', (['self.t'], {}), '(self.t)\n', (1342, 1350), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
""" Code for generating plots
"""
import matplotlib.pyplot as plt
import mne
import neo
import nibabel as nib
from nibabel.affines import apply_affine
import nilearn
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols
from nilearn.plotting import plot_stat_map
import numpy as np
import numpy.linalg as npl
import pandas as pd
from scipy import stats as sps
from . import gin
def plot_eeg(eeg, depth, label_color='black'):
""" Plots eeg data for a given depth electrode
Arguments:
eeg : MNE raw object
contains EEG data to plot
depth : string
name of depth electrode
Keyword Arguments:
label_color : string
color of axis labels (default: {'black'})
Returns:
matplotlib figure
"""
channels = [channel for channel in eeg.ch_names if depth in channel]
electrode = eeg.copy().pick_channels(channels)
data, times = electrode.get_data(return_times=True)
rows = len(channels)
fig, ax = plt.subplots(rows, 1, sharex=True)
for i in range(rows):
ax[i].plot(times, data[i, :])
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].spines['bottom'].set_visible(False)
ax[i].spines['left'].set_visible(False)
ax[i].yaxis.set_major_locator(plt.NullLocator())
ax[i].tick_params(bottom=False, left=False)
ax[i].set_ylabel(channels[i], labelpad=10, rotation=0,
color=label_color)
ax[-1].spines['bottom'].set_visible(True)
ax[-1].tick_params(bottom=True, colors=label_color)
ax[-1].set_xlabel('time (s)', color=label_color)
return fig
def plot_channel_ave_power(seizure, channel='g7-g8'):
""" plot average power for both baseline and seizure eeg data
for a single channel
Parameters
----------
seizure : EEG | dict
eeg data
channel : str, optional
bipolar eeg channel name, by default 'g7-g8'
"""
plt.figure()
b_times = seizure['baseline']['bipolar'].times
s_times = seizure['seizure']['bipolar'].times
index = seizure['baseline']['bipolar'].ch_names.index(channel)
plt.plot(b_times, seizure['baseline']['ave_power'][index, :],
s_times, seizure['seizure']['ave_power'][index, :])
def plot_power(power, ch_names, depth, label_color='black'):
""" plot EEG power
Parameters
----------
power : ndarray
array of EEG power
ch_names : list
list of channel names
depth : string
name of depth electrode whose power is being plotted
label_color : str, optional
color of axis labels, by default 'black'
Returns
-------
matplotlib figure
plot of EEG power
"""
rows = [i for i in range(len(ch_names)) if depth in ch_names[i]]
labels = [ch_names[row] for row in rows]
fig, ax = plt.subplots(len(rows), 1, sharex=True)
for i, row in enumerate(rows):
ax[i].imshow(power[0, row, :, :])
ax[i].set_ylabel(labels[i], labelpad=25, rotation=0, color=label_color)
ax[i].yaxis.set_major_locator(plt.NullLocator())
ax[i].tick_params(bottom=False, left=False)
ax[-1].spines['bottom'].set_visible(True)
ax[-1].tick_params(bottom=True, colors=label_color)
ax[-1].set_xlabel('time (s)', color=label_color)
return fig
def calc_z_scores(baseline, seizure):
""" This function is meant to generate the figures shown in the Brainstorm
demo used to select the 120-200 Hz frequency band. It should also
be similar to panel 2 in figure 1 in David et al 2011.
This function will compute a z-score for each value of the seizure power
spectrum using the mean and sd of the control power spectrum at each
frequency. In the demo, the power spectrum is calculated for the 1st
10 seconds of all three seizures and then averaged. Controls are
similarly averaged
Parameters
----------
baseline : ndarray
power spectrum of baseline EEG
seizure : ndarray
power spectrum of seizure EEG
Returns
-------
ndarray
seizure power spectrum scaled to a z-score by baseline power spectrum
mean and SD
"""
mean = np.mean(baseline, 1)
sd = np.std(baseline, 1)
z_scores = (seizure - mean)/sd
return z_scores
def plot_z_scores(times, freqs, z_scores, ch_names, depth,
label_color='black'):
""" Plots Z-scores
Parameters
----------
times : ndarray
x-axis of plot
freqs : ndarray
y-axis of plot
z_scores : ndarray
array of Z-scores being plotted by color code
ch_names : list
list of channel names
depth : string
name of depth being plotted
label_color : str, optional
color of axis labels, by default 'black'
Returns
-------
matplotlib figure
color coded Z-score plot
"""
rows = [i for i in range(len(ch_names)) if depth in ch_names[i]]
labels = [ch_names[row] for row in rows]
fig, ax = plt.subplots(len(rows), 1, sharex=True)
for i, row in enumerate(rows):
im = ax[i].pcolormesh(times, freqs, z_scores[0, row, :, :], cmap='hot')
ax[i].set_ylabel(labels[i], labelpad=25, rotation=0, color=label_color)
ax[i].yaxis.set_major_locator(plt.NullLocator())
ax[i].tick_params(bottom=False, left=False)
ax[-1].spines['bottom'].set_visible(True)
ax[-1].tick_params(bottom=True, colors=label_color)
ax[-1].set_xlabel('time (s)', color=label_color)
cb = fig.colorbar(im, ax=ax)
cb.ax.tick_params(axis='y', colors=label_color)
return fig
| [
"matplotlib.pyplot.NullLocator",
"matplotlib.pyplot.plot",
"numpy.std",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.subplots"
] | [((1059, 1093), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', '(1)'], {'sharex': '(True)'}), '(rows, 1, sharex=True)\n', (1071, 1093), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2066, 2068), True, 'import matplotlib.pyplot as plt\n'), ((2241, 2358), 'matplotlib.pyplot.plot', 'plt.plot', (['b_times', "seizure['baseline']['ave_power'][index, :]", 's_times', "seizure['seizure']['ave_power'][index, :]"], {}), "(b_times, seizure['baseline']['ave_power'][index, :], s_times,\n seizure['seizure']['ave_power'][index, :])\n", (2249, 2358), True, 'import matplotlib.pyplot as plt\n'), ((4303, 4323), 'numpy.mean', 'np.mean', (['baseline', '(1)'], {}), '(baseline, 1)\n', (4310, 4323), True, 'import numpy as np\n'), ((4333, 4352), 'numpy.std', 'np.std', (['baseline', '(1)'], {}), '(baseline, 1)\n', (4339, 4352), True, 'import numpy as np\n'), ((1390, 1407), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (1405, 1407), True, 'import matplotlib.pyplot as plt\n'), ((3188, 3205), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (3203, 3205), True, 'import matplotlib.pyplot as plt\n'), ((5402, 5419), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (5417, 5419), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 20:01:20 2020
@author: login
"""
import pandas as pd
import matplotlib.pyplot as plt
import pylab as pl
import numpy as np
import sklearn.linear_model as sklm
from sklearn.metrics import r2_score as skmr2
#File Path
file_Address="F:\\KamyabJawan Program\\Machine Learning\\Practice\\Files\\FuelConsumption.csv"
#Reading File
df=pd.read_csv(file_Address)
#Printing Data
print(df.head())
#Describing Data
print(df.describe())
#Selecting Some Feature to Explore Data of that Features
some_features=df[["ENGINESIZE","CYLINDERS","FUELCONSUMPTION_COMB","CO2EMISSIONS"]]
#printing
print(some_features.head())
"""
#ploting all feature separately
#Assigning a variable
visualization=some_features[["ENGINESIZE","CYLINDERS","FUELCONSUMPTION_COMB","CO2EMISSIONS"]]
#showing through variable
visualization.hist()
"""
#plot each of these features vs the Emission, to see how linear is their relation
"""
#Relation of Fuel Consumption and CO2 Emission
plt.scatter(some_features.FUELCONSUMPTION_COMB, some_features.CO2EMISSIONS, color='blue')
plt.xlabel("Fuel Consumption")
plt.ylabel("CO2 Emission")
plt.title("Relation of CO2 and Fuel Consumption")
"""
"""
#Relation of Engine Size and CO2 Emission
plt.scatter(some_features.ENGINESIZE,some_features.CO2EMISSIONS,color='red')
plt.xlabel("Engine Size")
plt.ylabel("CO2 Emission")
plt.title("Relation of CO2 and Engine Size")
"""
"""
#Relation of Clynders and CO2 Emission
plt.scatter(some_features.CYLINDERS, some_features.CO2EMISSIONS, color="green")
plt.xlabel("Cylinders")
plt.ylabel("CO2 Emission")
plt.title("Relation of CO2 and Engine Size")
"""
#Creating Training and Test Data Sets
testData=np.random.rand(len(df))<0.8
train=some_features[testData]
test=some_features[~testData]
print("Training Data Set: ",train)
print("Testing Data Set: ",test)
#Making Simple Regression Model
#Modeling our Data
regr=sklm.LinearRegression()
train_x=np.asanyarray(train[["ENGINESIZE"]])
train_y=np.asanyarray(train[["CO2EMISSIONS"]])
regr.fit(train_x,train_y)
#Printing Coefficiets and Intercepts
print("Coefficient: ",regr.coef_)
print("Intercept: ",regr.intercept_)
#Ploting our Model on Graph
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color="blue")
plt.plot(train_x,regr.coef_[0][0]*train_x + regr.intercept_[0], '-r')
plt.xlabel("Engine Size")
plt.ylabel("CO2 Emission")
plt.title("SRM of Engine vs Emission")
#Evaluation of Our Model
test_x=np.asanyarray(test[["ENGINESIZE"]])
test_y=np.asanyarray(test[["CO2EMISSIONS"]])
test_y_hat=regr.predict(test_x)
plt.plot(test_x,regr.coef_[0][0]*test_x + regr.intercept_[0], 'y')
#printing Values
print("Mean Absolute Error: %.2f"% np.mean(np.absolute(test_y_hat-test_y)))
print("Residual Sum of Squares (MSE): %.2f"% np.mean(test_y_hat-test_y)**2)
print("R2-Score: %.2f"% skmr2(test_y_hat,test_y))
| [
"matplotlib.pyplot.title",
"numpy.absolute",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"numpy.asanyarray",
"sklearn.metrics.r2_score",
"sklearn.linear_model.LinearRegression",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((400, 425), 'pandas.read_csv', 'pd.read_csv', (['file_Address'], {}), '(file_Address)\n', (411, 425), True, 'import pandas as pd\n'), ((1973, 1996), 'sklearn.linear_model.LinearRegression', 'sklm.LinearRegression', ([], {}), '()\n', (1994, 1996), True, 'import sklearn.linear_model as sklm\n'), ((2006, 2042), 'numpy.asanyarray', 'np.asanyarray', (["train[['ENGINESIZE']]"], {}), "(train[['ENGINESIZE']])\n", (2019, 2042), True, 'import numpy as np\n'), ((2052, 2090), 'numpy.asanyarray', 'np.asanyarray', (["train[['CO2EMISSIONS']]"], {}), "(train[['CO2EMISSIONS']])\n", (2065, 2090), True, 'import numpy as np\n'), ((2261, 2324), 'matplotlib.pyplot.scatter', 'plt.scatter', (['train.ENGINESIZE', 'train.CO2EMISSIONS'], {'color': '"""blue"""'}), "(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')\n", (2272, 2324), True, 'import matplotlib.pyplot as plt\n'), ((2326, 2398), 'matplotlib.pyplot.plot', 'plt.plot', (['train_x', '(regr.coef_[0][0] * train_x + regr.intercept_[0])', '"""-r"""'], {}), "(train_x, regr.coef_[0][0] * train_x + regr.intercept_[0], '-r')\n", (2334, 2398), True, 'import matplotlib.pyplot as plt\n'), ((2397, 2422), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Engine Size"""'], {}), "('Engine Size')\n", (2407, 2422), True, 'import matplotlib.pyplot as plt\n'), ((2424, 2450), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CO2 Emission"""'], {}), "('CO2 Emission')\n", (2434, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2452, 2490), 'matplotlib.pyplot.title', 'plt.title', (['"""SRM of Engine vs Emission"""'], {}), "('SRM of Engine vs Emission')\n", (2461, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2527, 2562), 'numpy.asanyarray', 'np.asanyarray', (["test[['ENGINESIZE']]"], {}), "(test[['ENGINESIZE']])\n", (2540, 2562), True, 'import numpy as np\n'), ((2571, 2608), 'numpy.asanyarray', 'np.asanyarray', (["test[['CO2EMISSIONS']]"], {}), "(test[['CO2EMISSIONS']])\n", (2584, 2608), True, 'import numpy as np\n'), ((2643, 2712), 'matplotlib.pyplot.plot', 'plt.plot', (['test_x', '(regr.coef_[0][0] * test_x + regr.intercept_[0])', '"""y"""'], {}), "(test_x, regr.coef_[0][0] * test_x + regr.intercept_[0], 'y')\n", (2651, 2712), True, 'import matplotlib.pyplot as plt\n'), ((2907, 2932), 'sklearn.metrics.r2_score', 'skmr2', (['test_y_hat', 'test_y'], {}), '(test_y_hat, test_y)\n', (2912, 2932), True, 'from sklearn.metrics import r2_score as skmr2\n'), ((2772, 2804), 'numpy.absolute', 'np.absolute', (['(test_y_hat - test_y)'], {}), '(test_y_hat - test_y)\n', (2783, 2804), True, 'import numpy as np\n'), ((2851, 2879), 'numpy.mean', 'np.mean', (['(test_y_hat - test_y)'], {}), '(test_y_hat - test_y)\n', (2858, 2879), True, 'import numpy as np\n')] |
"""
Performance check of AutoGL model + PYG (trainer + dataset)
"""
import os
import random
import numpy as np
from tqdm import tqdm
import pickle
import torch
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GCNConv, GATConv, SAGEConv
import logging
logging.basicConfig(level=logging.ERROR)
class GCN(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(GCN, self).__init__()
self.conv1 = GCNConv(num_features, 16)
self.conv2 = GCNConv(16, num_classes)
def forward(self, data):
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, edge_weight)
return F.log_softmax(x, dim=1)
class GAT(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(GAT, self).__init__()
self.conv1 = GATConv(num_features, 8, heads=8, dropout=0.6)
self.conv2 = GATConv(8 * 8, num_classes, heads=1, concat=False,
dropout=0.6)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = F.dropout(x, p=0.6, training=self.training)
x = F.elu(self.conv1(x, edge_index))
x = F.dropout(x, p=0.6, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=-1)
class SAGE(torch.nn.Module):
def __init__(self, num_features, hidden_channels, num_layers, num_classes):
super(SAGE, self).__init__()
self.num_layers = num_layers
self.convs = torch.nn.ModuleList()
for i in range(num_layers):
inc = outc = hidden_channels
if i == 0:
inc = num_features
if i == num_layers - 1:
outc = num_classes
self.convs.append(SAGEConv(inc, outc))
def forward(self, data):
x, edge_index = data.x, data.edge_index
for i, conv in enumerate(self.convs):
x = conv(x, edge_index)
if i != self.num_layers - 1:
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
return F.log_softmax(x, dim=-1)
def test(model, data, mask):
model.eval()
pred = model(data)[mask].max(1)[1]
acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
return acc
def train(model, data, args):
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
parameters = model.state_dict()
best_acc = 0.
for epoch in range(args.epoch):
model.train()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
val_acc = test(model, data, data.val_mask)
if val_acc > best_acc:
best_acc = val_acc
parameters = pickle.dumps(model.state_dict())
model.load_state_dict(pickle.loads(parameters))
return model
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('pyg model')
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--dataset', type=str, choices=['Cora', 'CiteSeer', 'PubMed'], default='Cora')
parser.add_argument('--repeat', type=int, default=50)
parser.add_argument('--model', type=str, choices=['gat', 'gcn', 'sage'], default='gat')
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--weight_decay', type=float, default=0.0)
parser.add_argument('--epoch', type=int, default=200)
args = parser.parse_args()
# seed = 100
dataset = Planetoid(os.path.expanduser('~/.cache-autogl'), args.dataset, transform=T.NormalizeFeatures())
data = dataset[0].to(args.device)
accs = []
for seed in tqdm(range(args.repeat)):
np.random.seed(seed)
torch.manual_seed(seed)
if args.model == 'gat':
model = GAT(dataset.num_node_features, dataset.num_classes)
elif args.model == 'gcn':
model = GCN(dataset.num_node_features, dataset.num_classes)
elif args.model == 'sage':
model = SAGE(dataset.num_node_features, 64, 2, dataset.num_classes)
model.to(args.device)
train(model, data, args)
acc = test(model, data, data.test_mask)
accs.append(acc)
print('{:.4f} ~ {:.4f}'.format(np.mean(accs), np.std(accs)))
| [
"pickle.loads",
"torch_geometric.nn.GCNConv",
"numpy.random.seed",
"argparse.ArgumentParser",
"logging.basicConfig",
"torch.nn.ModuleList",
"numpy.std",
"torch.manual_seed",
"torch_geometric.transforms.NormalizeFeatures",
"torch_geometric.nn.SAGEConv",
"torch.nn.functional.dropout",
"numpy.mea... | [((353, 393), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR'}), '(level=logging.ERROR)\n', (372, 393), False, 'import logging\n'), ((3246, 3282), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""pyg model"""'], {}), "('pyg model')\n", (3269, 3282), False, 'import argparse\n'), ((531, 556), 'torch_geometric.nn.GCNConv', 'GCNConv', (['num_features', '(16)'], {}), '(num_features, 16)\n', (538, 556), False, 'from torch_geometric.nn import GCNConv, GATConv, SAGEConv\n'), ((578, 602), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(16)', 'num_classes'], {}), '(16, num_classes)\n', (585, 602), False, 'from torch_geometric.nn import GCNConv, GATConv, SAGEConv\n'), ((781, 817), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (790, 817), True, 'import torch.nn.functional as F\n'), ((884, 907), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (897, 907), True, 'import torch.nn.functional as F\n'), ((1046, 1092), 'torch_geometric.nn.GATConv', 'GATConv', (['num_features', '(8)'], {'heads': '(8)', 'dropout': '(0.6)'}), '(num_features, 8, heads=8, dropout=0.6)\n', (1053, 1092), False, 'from torch_geometric.nn import GCNConv, GATConv, SAGEConv\n'), ((1114, 1177), 'torch_geometric.nn.GATConv', 'GATConv', (['(8 * 8)', 'num_classes'], {'heads': '(1)', 'concat': '(False)', 'dropout': '(0.6)'}), '(8 * 8, num_classes, heads=1, concat=False, dropout=0.6)\n', (1121, 1177), False, 'from torch_geometric.nn import GCNConv, GATConv, SAGEConv\n'), ((1297, 1340), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.6)', 'training': 'self.training'}), '(x, p=0.6, training=self.training)\n', (1306, 1340), True, 'import torch.nn.functional as F\n'), ((1398, 1441), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.6)', 'training': 'self.training'}), '(x, p=0.6, training=self.training)\n', (1407, 1441), True, 'import torch.nn.functional as F\n'), ((1495, 1519), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(-1)'}), '(x, dim=-1)\n', (1508, 1519), True, 'import torch.nn.functional as F\n'), ((1725, 1746), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (1744, 1746), False, 'import torch\n'), ((2313, 2337), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(-1)'}), '(x, dim=-1)\n', (2326, 2337), True, 'import torch.nn.functional as F\n'), ((2820, 2880), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output[data.train_mask]', 'data.y[data.train_mask]'], {}), '(output[data.train_mask], data.y[data.train_mask])\n', (2830, 2880), True, 'import torch.nn.functional as F\n'), ((3141, 3165), 'pickle.loads', 'pickle.loads', (['parameters'], {}), '(parameters)\n', (3153, 3165), False, 'import pickle\n'), ((3855, 3892), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.cache-autogl"""'], {}), "('~/.cache-autogl')\n", (3873, 3892), False, 'import os\n'), ((4045, 4065), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4059, 4065), True, 'import numpy as np\n'), ((4074, 4097), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (4091, 4097), False, 'import torch\n'), ((3918, 3939), 'torch_geometric.transforms.NormalizeFeatures', 'T.NormalizeFeatures', ([], {}), '()\n', (3937, 3939), True, 'import torch_geometric.transforms as T\n'), ((4605, 4618), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (4612, 4618), True, 'import numpy as np\n'), ((4620, 4632), 'numpy.std', 'np.std', (['accs'], {}), '(accs)\n', (4626, 4632), True, 'import numpy as np\n'), ((1983, 2002), 'torch_geometric.nn.SAGEConv', 'SAGEConv', (['inc', 'outc'], {}), '(inc, outc)\n', (1991, 2002), False, 'from torch_geometric.nn import GCNConv, GATConv, SAGEConv\n'), ((2254, 2297), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.5)', 'training': 'self.training'}), '(x, p=0.5, training=self.training)\n', (2263, 2297), True, 'import torch.nn.functional as F\n')] |
from .lin_op import LinOp
import numpy as np
import cv2
from proximal.halide.halide import Halide
from proximal.utils.utils import Impl
class warp(LinOp):
"""Warp using a homography.
"""
def __init__(self, arg, H, implem=None):
self.H = H.copy()
# Compute inverse
self.Hinv = np.zeros(H.shape)
if len(H.shape) > 2:
for j in range(self.H.shape[2]):
self.Hinv[:, :, j] = np.linalg.pinv(H[:, :, j])
else:
self.Hinv = np.linalg.pinv(H)
# Check for the shape
if len(H.shape) < 2 or len(H.shape) > 3:
raise Exception(
'Error, warp supports only up to 4d inputs (expects first 3 to be image).')
# Has to have third dimension
#if len(arg.shape) != 3:
# raise Exception('Images must have third dimension')
shape = arg.shape
if len(H.shape) == 3:
shape += (H.shape[2],)
# Temp array for halide
self.tmpfwd = np.zeros((shape[0], shape[1],
shape[2] if (len(shape) > 2) else 1,
H.shape[2] if (len(H.shape) > 2) else 1),
dtype=np.float32, order='F')
self.tmpadj = np.zeros((shape[0], shape[1], shape[2] if (
len(shape) > 2) else 1), dtype=np.float32, order='F')
super(warp, self).__init__([arg], shape, implem)
def forward(self, inputs, outputs):
"""The forward operator.
Reads from inputs and writes to outputs.
"""
if self.implementation == Impl['halide']:
# Halide implementation
Halide('A_warp').A_warp(inputs[0], self.H, self.tmpfwd) # Call
np.copyto(outputs[0], np.reshape(self.tmpfwd, self.shape))
else:
# CV2 version
inimg = inputs[0]
if len(self.H.shape) == 2:
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg), self.H,
inimg.shape[1::-1], flags=cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
# Necessary due to array layout in opencv
np.copyto(outputs[0], warpedInput)
else:
for j in range(self.H.shape[2]):
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg),
self.H[:, :, j], inimg.shape[1::-1],
flags=cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0.)
# Necessary due to array layout in opencv
np.copyto(outputs[0][:, :, :, j], warpedInput)
def adjoint(self, inputs, outputs):
"""The adjoint operator.
Reads from inputs and writes to outputs.
"""
if self.implementation == Impl['halide']:
# Halide implementation
Halide('At_warp').At_warp(inputs[0], self.Hinv, self.tmpadj) # Call
if outputs[0].ndim == 2:
np.copyto(outputs[0], self.tmpadj[..., 0])
else:
np.copyto(outputs[0], self.tmpadj)
else:
# CV2 version
inimg = inputs[0]
if len(self.H.shape) == 2:
# + cv2.WARP_INVERSE_MAP
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg), self.H,
inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
np.copyto(outputs[0], warpedInput)
else:
outputs[0][:] = 0.0
for j in range(self.H.shape[2]):
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg[:, :, :, j]),
self.H, inimg.shape[1::-1],
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0.)
# Necessary due to array layout in opencv
outputs[0] += warpedInput
# TODO what is the spectral norm of a warp?
| [
"numpy.zeros",
"numpy.asfortranarray",
"numpy.reshape",
"proximal.halide.halide.Halide",
"numpy.copyto",
"numpy.linalg.pinv"
] | [((317, 334), 'numpy.zeros', 'np.zeros', (['H.shape'], {}), '(H.shape)\n', (325, 334), True, 'import numpy as np\n'), ((511, 528), 'numpy.linalg.pinv', 'np.linalg.pinv', (['H'], {}), '(H)\n', (525, 528), True, 'import numpy as np\n'), ((446, 472), 'numpy.linalg.pinv', 'np.linalg.pinv', (['H[:, :, j]'], {}), '(H[:, :, j])\n', (460, 472), True, 'import numpy as np\n'), ((1771, 1806), 'numpy.reshape', 'np.reshape', (['self.tmpfwd', 'self.shape'], {}), '(self.tmpfwd, self.shape)\n', (1781, 1806), True, 'import numpy as np\n'), ((2292, 2326), 'numpy.copyto', 'np.copyto', (['outputs[0]', 'warpedInput'], {}), '(outputs[0], warpedInput)\n', (2301, 2326), True, 'import numpy as np\n'), ((3311, 3353), 'numpy.copyto', 'np.copyto', (['outputs[0]', 'self.tmpadj[..., 0]'], {}), '(outputs[0], self.tmpadj[..., 0])\n', (3320, 3353), True, 'import numpy as np\n'), ((3388, 3422), 'numpy.copyto', 'np.copyto', (['outputs[0]', 'self.tmpadj'], {}), '(outputs[0], self.tmpadj)\n', (3397, 3422), True, 'import numpy as np\n'), ((3867, 3901), 'numpy.copyto', 'np.copyto', (['outputs[0]', 'warpedInput'], {}), '(outputs[0], warpedInput)\n', (3876, 3901), True, 'import numpy as np\n'), ((1673, 1689), 'proximal.halide.halide.Halide', 'Halide', (['"""A_warp"""'], {}), "('A_warp')\n", (1679, 1689), False, 'from proximal.halide.halide import Halide\n'), ((1969, 1993), 'numpy.asfortranarray', 'np.asfortranarray', (['inimg'], {}), '(inimg)\n', (1986, 1993), True, 'import numpy as np\n'), ((2906, 2952), 'numpy.copyto', 'np.copyto', (['outputs[0][:, :, :, j]', 'warpedInput'], {}), '(outputs[0][:, :, :, j], warpedInput)\n', (2915, 2952), True, 'import numpy as np\n'), ((3189, 3206), 'proximal.halide.halide.Halide', 'Halide', (['"""At_warp"""'], {}), "('At_warp')\n", (3195, 3206), False, 'from proximal.halide.halide import Halide\n'), ((3625, 3649), 'numpy.asfortranarray', 'np.asfortranarray', (['inimg'], {}), '(inimg)\n', (3642, 3649), True, 'import numpy as np\n'), ((2449, 2473), 'numpy.asfortranarray', 'np.asfortranarray', (['inimg'], {}), '(inimg)\n', (2466, 2473), True, 'import numpy as np\n'), ((4060, 4096), 'numpy.asfortranarray', 'np.asfortranarray', (['inimg[:, :, :, j]'], {}), '(inimg[:, :, :, j])\n', (4077, 4096), True, 'import numpy as np\n')] |
import numpy as np
import scipy.special as sp
# import matplotlib.pyplot as plt
import math
import pyvista as pv
# from pyvistaqt import BackgroundPlotter
# import time
#
# Purpose: Check out the kernel function for imaginary frequency
# Parameters
# L = 30.0 # 2.0 * np.pi / 1.4 # 10.0 # larmor radii
# k = 2.0 * np.pi / L # 1.4 # 1.11 # 0.628
# k = 1.4 # 1.5 # 0.7 # 0.7 # 0.4 # 0.2 # 1.11 # 1.4
# L = 2.0 * np.pi / k
k = 0.05 # 1.4 # 1.3
L = 2.0 * np.pi / k
print('Wave-number is ' + str(k))
v = np.linspace(1.0e-6, 8.5, num=75)
phi = np.linspace(0, 2.0*np.pi, num=75)
space = np.linspace(0, L, num=75)
b = - k * v # 0.0 + 0.3110j
om_p = 1.414 # 3 + 4.0e-8 # 2.00093 # 1.414 # 1.2 - 0.1j # 1.0524 # 4.912e-1j # 0.3110j # 1.414 # 1.2 + 0.2j
om_n = -om_p # -2.00093 # -1.414 # -1.2 - 0.1j # -4.912e-1j # -1.414 # -0.3110j # -1.0e-5 # -2.05 # -3.1 # -1.414
# Distribution
a = 1
ring_j = 0
x = 0.5 * (v/a) ** 2.0
f0 = 1/(2.0 * np.pi * (a ** 2.0) * math.factorial(ring_j)) * np.multiply(x ** ring_j, np.exp(-x))
dfdv = np.multiply(f0, (ring_j/x - 1.0)) / (a ** 2.0)
# Fourier series components
def inner_series(n, om):
return n / (n - om) * np.tensordot(sp.jv(n, b), np.exp(-1j * n * phi), axes=0)
# Sum up to "terms" in angular part
terms_n = 20
factor = np.tensordot(b, np.sin(phi), axes=0)
# Positive frequency part
series = np.array([inner_series(n, om_p) for n in range(-terms_n, terms_n+1)]).sum(axis=0)
Gam_p = -1j * np.tensordot(np.exp(1j * k * space), np.multiply(np.exp(1j * factor), series), axes=0)
# Negative frequency part
series = np.array([inner_series(n, om_n) for n in range(-terms_n, terms_n+1)]).sum(axis=0)
Gam_n = -1j * np.tensordot(np.exp(1j * k * space), np.multiply(np.exp(1j * factor), series), axes=0)
# Convert from cylindrical to cartesian
vx = np.tensordot(v, np.cos(phi), axes=0)
vy = np.tensordot(v, np.sin(phi), axes=0)
scale = 0.1
x3 = np.tensordot(scale * space, np.ones_like(vx), axes=0)
vx3 = np.tensordot(np.ones_like(space), vx, axes=0)
vy3 = np.tensordot(np.ones_like(space), vy, axes=0)
grid = pv.StructuredGrid(x3, vx3, vy3)
Gam = np.real(Gam_p + Gam_n)
perturbation = np.multiply(Gam, dfdv[None, :, None])
low = np.amin(perturbation)
high = np.amax(perturbation)
contour_array = np.linspace(0.9 * low, 0.9 * high, num=6)
grid["vol"] = perturbation.transpose().flatten()
# slice0 = grid.slice_orthogonal(x=scale * L/4, y=0, z=0)
# slice1 = grid.slice_orthogonal(x=scale * 3*L/4, y=0, z=0)
contour = grid.contour(contour_array)
# contour = grid.contour([0.8*high])
clim = [low, high]
p = pv.Plotter()
# actor0 = p.add_mesh(slice0, clim=clim)
# actor1 = p.add_mesh(slice1, clim=clim)
actor = p.add_mesh(contour, clim=clim, opacity='linear')
p.show_grid()
p.show(auto_close=False)
p.open_movie('test_bernstein4.mp4', framerate=12)
# Real part and contour plot
t = np.linspace(0, 20, num=100)
for idx_t in range(t.shape[0]):
idx = 10
Gamr = np.real(Gam_p * np.exp(-1j * om_p * t[idx_t]) + Gam_n * np.exp(-1j * om_n * t[idx_t]))
cb = np.linspace(np.amin(Gamr), np.amax(Gamr), num=100)
perturbation = -np.multiply(Gamr, dfdv[None, :, None])
cbp = np.linspace(np.amin(perturbation), np.amax(perturbation), num=100)
# plt.figure()
# plt.contourf(vx, vy, Gamr[idx, :, :], cb)
# plt.xlabel(r'Velocity $v_x$')
# plt.ylabel(r'Velocity $v_y$')
# plt.colorbar()
# plt.title(str(terms_n) + r' term approximation of $\Omega(k_0v, \varphi)$')
# plt.tight_layout()
# plt.figure()
# plt.contourf(vx, vy, perturbation[idx, :, :], cbp)
# plt.title(str(terms_n) + r' term approximation of $f_1(\omega = $' + str(om) + r'$\omega_{c})$ and $\lambda = $ ' + str(L) + r'$r_L$')
# plt.xlabel(r'Velocity $v_x$')
# plt.ylabel(r'Velocity $v_y$')
# plt.colorbar()
# plt.tight_layout()
# plt.show()
grid["vol"] = perturbation.transpose().flatten()
contours = grid.contour(contour_array)
# slice0 = grid.slice_orthogonal(x= scale * L/2, y=0, z=0)
# 3D plotter
p.remove_actor(actor)
# actor = p.add_mesh(slice0, clim=clim)
actor = p.add_mesh(contours, clim=clim, opacity='linear')
p.write_frame()
p.close()
quit()
| [
"numpy.multiply",
"pyvista.StructuredGrid",
"numpy.amin",
"numpy.ones_like",
"pyvista.Plotter",
"numpy.amax",
"numpy.sin",
"numpy.exp",
"numpy.real",
"numpy.linspace",
"numpy.cos",
"math.factorial",
"scipy.special.jv"
] | [((504, 535), 'numpy.linspace', 'np.linspace', (['(1e-06)', '(8.5)'], {'num': '(75)'}), '(1e-06, 8.5, num=75)\n', (515, 535), True, 'import numpy as np\n'), ((543, 578), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0 * np.pi)'], {'num': '(75)'}), '(0, 2.0 * np.pi, num=75)\n', (554, 578), True, 'import numpy as np\n'), ((585, 610), 'numpy.linspace', 'np.linspace', (['(0)', 'L'], {'num': '(75)'}), '(0, L, num=75)\n', (596, 610), True, 'import numpy as np\n'), ((2063, 2094), 'pyvista.StructuredGrid', 'pv.StructuredGrid', (['x3', 'vx3', 'vy3'], {}), '(x3, vx3, vy3)\n', (2080, 2094), True, 'import pyvista as pv\n'), ((2102, 2124), 'numpy.real', 'np.real', (['(Gam_p + Gam_n)'], {}), '(Gam_p + Gam_n)\n', (2109, 2124), True, 'import numpy as np\n'), ((2140, 2177), 'numpy.multiply', 'np.multiply', (['Gam', 'dfdv[None, :, None]'], {}), '(Gam, dfdv[None, :, None])\n', (2151, 2177), True, 'import numpy as np\n'), ((2184, 2205), 'numpy.amin', 'np.amin', (['perturbation'], {}), '(perturbation)\n', (2191, 2205), True, 'import numpy as np\n'), ((2213, 2234), 'numpy.amax', 'np.amax', (['perturbation'], {}), '(perturbation)\n', (2220, 2234), True, 'import numpy as np\n'), ((2251, 2292), 'numpy.linspace', 'np.linspace', (['(0.9 * low)', '(0.9 * high)'], {'num': '(6)'}), '(0.9 * low, 0.9 * high, num=6)\n', (2262, 2292), True, 'import numpy as np\n'), ((2560, 2572), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (2570, 2572), True, 'import pyvista as pv\n'), ((2835, 2862), 'numpy.linspace', 'np.linspace', (['(0)', '(20)'], {'num': '(100)'}), '(0, 20, num=100)\n', (2846, 2862), True, 'import numpy as np\n'), ((1036, 1069), 'numpy.multiply', 'np.multiply', (['f0', '(ring_j / x - 1.0)'], {}), '(f0, ring_j / x - 1.0)\n', (1047, 1069), True, 'import numpy as np\n'), ((1297, 1308), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1303, 1308), True, 'import numpy as np\n'), ((1817, 1828), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1823, 1828), True, 'import numpy as np\n'), ((1859, 1870), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1865, 1870), True, 'import numpy as np\n'), ((1926, 1942), 'numpy.ones_like', 'np.ones_like', (['vx'], {}), '(vx)\n', (1938, 1942), True, 'import numpy as np\n'), ((1971, 1990), 'numpy.ones_like', 'np.ones_like', (['space'], {}), '(space)\n', (1983, 1990), True, 'import numpy as np\n'), ((2023, 2042), 'numpy.ones_like', 'np.ones_like', (['space'], {}), '(space)\n', (2035, 2042), True, 'import numpy as np\n'), ((1017, 1027), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1023, 1027), True, 'import numpy as np\n'), ((1462, 1486), 'numpy.exp', 'np.exp', (['(1.0j * k * space)'], {}), '(1.0j * k * space)\n', (1468, 1486), True, 'import numpy as np\n'), ((1680, 1704), 'numpy.exp', 'np.exp', (['(1.0j * k * space)'], {}), '(1.0j * k * space)\n', (1686, 1704), True, 'import numpy as np\n'), ((3027, 3040), 'numpy.amin', 'np.amin', (['Gamr'], {}), '(Gamr)\n', (3034, 3040), True, 'import numpy as np\n'), ((3042, 3055), 'numpy.amax', 'np.amax', (['Gamr'], {}), '(Gamr)\n', (3049, 3055), True, 'import numpy as np\n'), ((3091, 3129), 'numpy.multiply', 'np.multiply', (['Gamr', 'dfdv[None, :, None]'], {}), '(Gamr, dfdv[None, :, None])\n', (3102, 3129), True, 'import numpy as np\n'), ((3152, 3173), 'numpy.amin', 'np.amin', (['perturbation'], {}), '(perturbation)\n', (3159, 3173), True, 'import numpy as np\n'), ((3175, 3196), 'numpy.amax', 'np.amax', (['perturbation'], {}), '(perturbation)\n', (3182, 3196), True, 'import numpy as np\n'), ((966, 988), 'math.factorial', 'math.factorial', (['ring_j'], {}), '(ring_j)\n', (980, 988), False, 'import math\n'), ((1177, 1188), 'scipy.special.jv', 'sp.jv', (['n', 'b'], {}), '(n, b)\n', (1182, 1188), True, 'import scipy.special as sp\n'), ((1190, 1213), 'numpy.exp', 'np.exp', (['(-1.0j * n * phi)'], {}), '(-1.0j * n * phi)\n', (1196, 1213), True, 'import numpy as np\n'), ((1498, 1519), 'numpy.exp', 'np.exp', (['(1.0j * factor)'], {}), '(1.0j * factor)\n', (1504, 1519), True, 'import numpy as np\n'), ((1716, 1737), 'numpy.exp', 'np.exp', (['(1.0j * factor)'], {}), '(1.0j * factor)\n', (1722, 1737), True, 'import numpy as np\n'), ((2935, 2966), 'numpy.exp', 'np.exp', (['(-1.0j * om_p * t[idx_t])'], {}), '(-1.0j * om_p * t[idx_t])\n', (2941, 2966), True, 'import numpy as np\n'), ((2975, 3006), 'numpy.exp', 'np.exp', (['(-1.0j * om_n * t[idx_t])'], {}), '(-1.0j * om_n * t[idx_t])\n', (2981, 3006), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.manifold import TSNE
from sklearn.preprocessing import scale
import os
import sys
from collections import Counter
sys.path.append('../../../../PlotUtils')
from Population import Population
def get_y_est(path_to_experiment, I):
path = [f'{path_to_experiment}/img/txt/y{i}_mean.csv'
for i in range(1, I + 1)]
y = [np.loadtxt(p, delimiter=',') for p in path]
return y
def compute_combined_tsne(y, seed=0):
# Stack y for each sample into one matrix
Y = np.vstack(y)
# Compute tsne for big matrix (scaled)
tsne = TSNE(verbose=2, random_state=seed).fit(scale(Y))
# indices
idx = [np.full(y[i].shape[0], i + 1) for i in range(len(y))]
idx = np.concatenate(idx, axis=0)
N = idx.shape[0]
# First columns are Y, second and third to last are the embeddings, last
# column are indices.
output = np.concatenate((Y, tsne.embedding_, idx.reshape(-1, 1)),
axis=-1)
# Create column header
ncol = Y.shape[1]
columns = [f'marker{j}' for j in np.arange(1, ncol + 1)]
columns += ['tsne1', 'tsne2', 'sample_idx']
return pd.DataFrame(output, columns=columns)
def relabel_lam(lams, Zs):
assert len(lams) == len(Zs)
I = len(lams)
# Create a population dict.
population = Population()
# First, label most predominant subpopulations.
for i in range(I):
c = Counter(lams[i])
for ci in c.most_common():
k = ci[0] - 1
population.label(Zs[i][:, k])
# Now relabel after the most predominant subpopulations
new_labels = [[population.label(Zs[i][:, lin - 1]) for lin in lams[i]]
for i in range(I)]
return new_labels
# NOTE: Hardcoded...
def make_cluster_df(tsne_df, path_to_experiment):
lam1 = np.loadtxt(f'{path_to_experiment}/img/txt/lam1_best.txt').astype(int)
lam2 = np.loadtxt(f'{path_to_experiment}/img/txt/lam2_best.txt').astype(int)
Z1 = np.loadtxt(f'{path_to_experiment}/img/txt/Z1_best.txt')
Z2 = np.loadtxt(f'{path_to_experiment}/img/txt/Z2_best.txt')
new_labels = relabel_lam(lams=[lam1, lam2], Zs=[Z1, Z2])
new_labels = np.concatenate(new_labels)
cluster_df = tsne_df.copy()
cluster_df['cluster'] = new_labels
return cluster_df
if __name__ == '__main__':
nsamples = 2
path_to_default_experiment = 'results/phi25-pi0.2'
y = get_y_est(path_to_default_experiment, I=nsamples)
tsne_df = compute_combined_tsne(y, seed=0)
# Save results.
os.makedirs('results/tsne', exist_ok=True)
tsne_df.round(3).to_csv('results/tsne/tsne.csv')
| [
"sys.path.append",
"pandas.DataFrame",
"numpy.full",
"os.makedirs",
"sklearn.manifold.TSNE",
"sklearn.preprocessing.scale",
"collections.Counter",
"numpy.vstack",
"numpy.arange",
"numpy.loadtxt",
"Population.Population",
"numpy.concatenate"
] | [((167, 207), 'sys.path.append', 'sys.path.append', (['"""../../../../PlotUtils"""'], {}), "('../../../../PlotUtils')\n", (182, 207), False, 'import sys\n'), ((536, 548), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (545, 548), True, 'import numpy as np\n'), ((743, 770), 'numpy.concatenate', 'np.concatenate', (['idx'], {'axis': '(0)'}), '(idx, axis=0)\n', (757, 770), True, 'import numpy as np\n'), ((1174, 1211), 'pandas.DataFrame', 'pd.DataFrame', (['output'], {'columns': 'columns'}), '(output, columns=columns)\n', (1186, 1211), True, 'import pandas as pd\n'), ((1340, 1352), 'Population.Population', 'Population', ([], {}), '()\n', (1350, 1352), False, 'from Population import Population\n'), ((2001, 2056), 'numpy.loadtxt', 'np.loadtxt', (['f"""{path_to_experiment}/img/txt/Z1_best.txt"""'], {}), "(f'{path_to_experiment}/img/txt/Z1_best.txt')\n", (2011, 2056), True, 'import numpy as np\n'), ((2066, 2121), 'numpy.loadtxt', 'np.loadtxt', (['f"""{path_to_experiment}/img/txt/Z2_best.txt"""'], {}), "(f'{path_to_experiment}/img/txt/Z2_best.txt')\n", (2076, 2121), True, 'import numpy as np\n'), ((2201, 2227), 'numpy.concatenate', 'np.concatenate', (['new_labels'], {}), '(new_labels)\n', (2215, 2227), True, 'import numpy as np\n'), ((2554, 2596), 'os.makedirs', 'os.makedirs', (['"""results/tsne"""'], {'exist_ok': '(True)'}), "('results/tsne', exist_ok=True)\n", (2565, 2596), False, 'import os\n'), ((386, 414), 'numpy.loadtxt', 'np.loadtxt', (['p'], {'delimiter': '""","""'}), "(p, delimiter=',')\n", (396, 414), True, 'import numpy as np\n'), ((643, 651), 'sklearn.preprocessing.scale', 'scale', (['Y'], {}), '(Y)\n', (648, 651), False, 'from sklearn.preprocessing import scale\n'), ((679, 708), 'numpy.full', 'np.full', (['y[i].shape[0]', '(i + 1)'], {}), '(y[i].shape[0], i + 1)\n', (686, 708), True, 'import numpy as np\n'), ((1441, 1457), 'collections.Counter', 'Counter', (['lams[i]'], {}), '(lams[i])\n', (1448, 1457), False, 'from collections import Counter\n'), ((604, 638), 'sklearn.manifold.TSNE', 'TSNE', ([], {'verbose': '(2)', 'random_state': 'seed'}), '(verbose=2, random_state=seed)\n', (608, 638), False, 'from sklearn.manifold import TSNE\n'), ((1090, 1112), 'numpy.arange', 'np.arange', (['(1)', '(ncol + 1)'], {}), '(1, ncol + 1)\n', (1099, 1112), True, 'import numpy as np\n'), ((1841, 1898), 'numpy.loadtxt', 'np.loadtxt', (['f"""{path_to_experiment}/img/txt/lam1_best.txt"""'], {}), "(f'{path_to_experiment}/img/txt/lam1_best.txt')\n", (1851, 1898), True, 'import numpy as np\n'), ((1922, 1979), 'numpy.loadtxt', 'np.loadtxt', (['f"""{path_to_experiment}/img/txt/lam2_best.txt"""'], {}), "(f'{path_to_experiment}/img/txt/lam2_best.txt')\n", (1932, 1979), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy.testing as npt
import dlpy.maths as dlm
import numpy as np
class Test(TestCase):
def test_linear_interp(self):
npt.assert_almost_equal(dlm.linear_interp(0.5, 0, 0, 1, 1), 0.5)
npt.assert_almost_equal(dlm.linear_interp(400, 100, 0, 500, 1), 0.75)
npt.assert_almost_equal(dlm.linear_interp(0, 1, 0, 2, 1), -1.0)
def test_decay_linear(self):
value = dlm.decay_linear(100, 1, 1, -1, 0)
npt.assert_almost_equal(value, 1/100)
params = dlm.decay_linear(100, 1, 1, -1, 0, return_params=True)
npt.assert_array_equal(params, (1,0,0))
def test_decay_exponential(self):
value = dlm.decay_exponential(2, 1, 1, -1, 0)
npt.assert_almost_equal(value, 1/np.e)
params = dlm.decay_exponential(100, 1, 1, -1, 0, return_params=True)
npt.assert_array_equal(params, (np.e,-1,0))
def test_pw_linear(self):
pts = [(0, 0), (1, 2), (3, 4)]
fx = lambda x: dlm.pw_linear(x, pts, 0, 6)
fxExp = lambda x: dlm.pw_linear(x, pts, 0, 6, dlm.DecayType.EXPONENTIAL)
npt.assert_almost_equal(fx(0), 0)
npt.assert_almost_equal(fx(1), 2)
npt.assert_almost_equal(fx(0.5), 1)
npt.assert_almost_equal(fx(0.75), 1.5)
npt.assert_almost_equal(fx(-2), 0)
npt.assert_almost_equal(fx(2), 3)
npt.assert_almost_equal(fx(3), 4)
npt.assert_almost_equal(fx(4), 14/3)
npt.assert_almost_equal(fx(1001), 5.996)
npt.assert_almost_equal(fxExp(3.000001), 4, 6)
npt.assert_almost_equal(fxExp(31), 6.0, 5)
npt.assert_almost_equal(fxExp(-0.000001), 0, 5)
npt.assert_almost_equal(fxExp(-30), 0)
x=0
(LHS,RHS) = dlm.pw_linear(x, pts, 0, 6, return_params=True)
npt.assert_array_almost_equal(LHS, (0,0,0))
npt.assert_array_almost_equal(RHS, (-2,2,6))
| [
"numpy.testing.assert_almost_equal",
"dlpy.maths.decay_exponential",
"numpy.testing.assert_array_equal",
"dlpy.maths.pw_linear",
"dlpy.maths.decay_linear",
"dlpy.maths.linear_interp",
"numpy.testing.assert_array_almost_equal"
] | [((433, 467), 'dlpy.maths.decay_linear', 'dlm.decay_linear', (['(100)', '(1)', '(1)', '(-1)', '(0)'], {}), '(100, 1, 1, -1, 0)\n', (449, 467), True, 'import dlpy.maths as dlm\n'), ((476, 515), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['value', '(1 / 100)'], {}), '(value, 1 / 100)\n', (499, 515), True, 'import numpy.testing as npt\n'), ((531, 585), 'dlpy.maths.decay_linear', 'dlm.decay_linear', (['(100)', '(1)', '(1)', '(-1)', '(0)'], {'return_params': '(True)'}), '(100, 1, 1, -1, 0, return_params=True)\n', (547, 585), True, 'import dlpy.maths as dlm\n'), ((594, 635), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['params', '(1, 0, 0)'], {}), '(params, (1, 0, 0))\n', (616, 635), True, 'import numpy.testing as npt\n'), ((689, 726), 'dlpy.maths.decay_exponential', 'dlm.decay_exponential', (['(2)', '(1)', '(1)', '(-1)', '(0)'], {}), '(2, 1, 1, -1, 0)\n', (710, 726), True, 'import dlpy.maths as dlm\n'), ((735, 775), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['value', '(1 / np.e)'], {}), '(value, 1 / np.e)\n', (758, 775), True, 'import numpy.testing as npt\n'), ((791, 850), 'dlpy.maths.decay_exponential', 'dlm.decay_exponential', (['(100)', '(1)', '(1)', '(-1)', '(0)'], {'return_params': '(True)'}), '(100, 1, 1, -1, 0, return_params=True)\n', (812, 850), True, 'import dlpy.maths as dlm\n'), ((859, 904), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['params', '(np.e, -1, 0)'], {}), '(params, (np.e, -1, 0))\n', (881, 904), True, 'import numpy.testing as npt\n'), ((1742, 1789), 'dlpy.maths.pw_linear', 'dlm.pw_linear', (['x', 'pts', '(0)', '(6)'], {'return_params': '(True)'}), '(x, pts, 0, 6, return_params=True)\n', (1755, 1789), True, 'import dlpy.maths as dlm\n'), ((1798, 1843), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['LHS', '(0, 0, 0)'], {}), '(LHS, (0, 0, 0))\n', (1827, 1843), True, 'import numpy.testing as npt\n'), ((1850, 1896), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['RHS', '(-2, 2, 6)'], {}), '(RHS, (-2, 2, 6))\n', (1879, 1896), True, 'import numpy.testing as npt\n'), ((192, 226), 'dlpy.maths.linear_interp', 'dlm.linear_interp', (['(0.5)', '(0)', '(0)', '(1)', '(1)'], {}), '(0.5, 0, 0, 1, 1)\n', (209, 226), True, 'import dlpy.maths as dlm\n'), ((265, 303), 'dlpy.maths.linear_interp', 'dlm.linear_interp', (['(400)', '(100)', '(0)', '(500)', '(1)'], {}), '(400, 100, 0, 500, 1)\n', (282, 303), True, 'import dlpy.maths as dlm\n'), ((343, 375), 'dlpy.maths.linear_interp', 'dlm.linear_interp', (['(0)', '(1)', '(0)', '(2)', '(1)'], {}), '(0, 1, 0, 2, 1)\n', (360, 375), True, 'import dlpy.maths as dlm\n'), ((996, 1023), 'dlpy.maths.pw_linear', 'dlm.pw_linear', (['x', 'pts', '(0)', '(6)'], {}), '(x, pts, 0, 6)\n', (1009, 1023), True, 'import dlpy.maths as dlm\n'), ((1050, 1104), 'dlpy.maths.pw_linear', 'dlm.pw_linear', (['x', 'pts', '(0)', '(6)', 'dlm.DecayType.EXPONENTIAL'], {}), '(x, pts, 0, 6, dlm.DecayType.EXPONENTIAL)\n', (1063, 1104), True, 'import dlpy.maths as dlm\n')] |
# @verbatim
# This file contains functions that can be used to build circuit models
# representing the electrical behaviour of neural electrodes.
# The models used are
# @endverbatim
import numpy as np
import matplotlib.pyplot as plt
## Impedances
def imp_cap(cap, f):
# Equivalent impedance of a capacitor
# cap -> capacitance
# f -> frequency
return 1/(1j*2*np.pi*f*cap)
def imp_series(z1, z2):
# Equivalent series impedance of two elements z1 & z2
return (z1 + z2)
def imp_parallel(z1, z2):
# Equivalent parallel impedance of two elements z1 & z2
return (z1*z2)/(z1 + z2)
def res_elyte(k, l, cross_a):
# Electrolyte Resistance (depends of electrolyte properties (k) and geometry)
# k -> conductivity of the solution
# l -> length of electrolyte
# cross_a -> crossectional area
return l/(k*cross_a)
def imp_warburg(sigma, freq):
# Impedance due to Diffusion of infinite thickness
# sigma -> Warburg coefficient
# freq -> frequency
return (sigma / np.sqrt(2*np.pi*freq)) * (1-1j)
def imp_warburg_nonideal(sigma, freq, thck, diff_coef):
# Impedance due to Diffusion for a finite thickness
# sigma -> Warburg coefficient
# freq -> frequency
# thck -> Nerst Diffusion layer thickness
# diff_coef -> some avg value of diffusion coefficients (anode & cathode)
return (sigma / np.sqrt(2*np.pi*freq)) * (1 - 1j) \
* np.tanh(thck*np.sqrt(1j*2*np.pi*freq / diff_coef))
def warburg_coeff(R, T, n, F, surf_a, Cob, Do, Crb, Dr):
# Warburg coefficient
# R -> gas constant
# T -> temperature
# n -> number of electrons involved
# F -> Faraday's constant
# surf_a -> Surface area of electrode
# Cob -> Bulk Concentration of oxidant
# Do -> Diffusion coefficient of oxidant
# Crb -> Bulk Concentration of reductant
# Dr -> Diffusion coefficient of reductant
return (R*T/(np.sqrt(2)*n**2*F**2*surf_a)) * (1/(Cob*np.sqrt(Do)) + 1/(Crb*np.sqrt(Dr)))
def cap_double_plate(eps_r, eps_o, surf_a, d):
# Capacitance of a double plate capacitor
# eps_r -> relative permitivity of the electrolyte (and/or other materials)
# eps_o -> permittivity of free space
# surf_a-> surface area of plates
# d -> thickness between plates
return eps_r*eps_o*surf_a/d
def imp_cpe(f, n, c_cpe):
# Impedance of a constant phase element (relevant for double layer capacitors)
# f -> frequency
# n -> non-ideality constant, represents inhomogeneities. n <= 1 Generally 0.9 - 1.0 (1.0 -> ideal cap)
# c_cpe -> Interface Capacitance
return 1/((1j*2*np.pi*f * c_cpe)**n)
def cap_double_layer(cap_stern, cap_diff):
# Capacitance of a constant phase element based on double layer (diff + boundry in series)
# cap_stern --> capacitance of stern layer
# cap_diff --> capacitance of diffuse layer
return 1/( 1/cap_stern + 1/cap_diff )
def cap_stern_layer(d_OHP, eps_o, eps_r):
# Capacitance of the stern layer (part of the double layer)
# d_OHP --> thickness of layer
# eps_o --> permitivitty of free space
# eps_r --> permitivitty of the double layer
return eps_o*eps_r/d_OHP
def cap_diff_layer(len_deb, eps_o, eps_r, z, Vo, v_t):
# Capacitance of the diffusive layer (part of the double layer)
# eps_o --> permitivitty of free space
# eps_r --> permitivitty of the double layer
# len_deb --> Debye length (diffusion length)
# z --> charge on the ion in solution
# Vo --> applied electrode potential
# v_t --> thermal voltage
return (eps_o*eps_r*np.cosh(0.5*z*Vo/v_t))/len_deb
def length_debye(eps_0, eps_r, v_t, n0, z, q):
# Debye Length, representing the length of diffusion concentration gradient
# eps_o --> permitivitty of free space
# eps_r --> permitivitty of the double layer
# v_t --> thermal voltage
# n0 --> bulk number concentration
# z --> charge on the ion in solution
# q --> elementary charge
return np.sqrt(eps_0*eps_r*v_t/(2*n0*z**2*q))
def jo_eq(F, k_c, C_a, beta, V_eq, R, T):
# At equilibrium equal, and oposite reduction and oxidation currents flow
# accross the electrode-electrolyte interface. The mag of this current -> Jo_eq
# The equilibrium current is a measure of the eletrode's ability to participate
# in a exchange current reactions.
# For ideally polarizable electrode Jo_eq = 0
# For ideally unpolarizable electrode Jo_eq = infinity
# F --> Faraday's constant
# k_c --> reduction reaction rate constant
# c_a --> concentration of electron-acceptor ions "a" in solution plane of the interface
# beta --> symetry factor (between oxidation and reduction?)
# V_eq --> equilibrium potential
# R --> gas constant
# T --> temperature
return F*k_c*C_a*np.exp(-beta*F*V_eq/(R*T))
def j_low_field_approx(J_o, F, eta, R, T):
# Current density at equilibrium using the low-field approximation to the
# Butler-Volmer equation (eta < 0.005/z)
# J_o --> equilibrium exchange current density
# F --> Faraday's constant
# eta --> applied overpotential
# R --> gas constant
# T --> temperature
return (J_o*F*eta)/(R*T)
def res_charge_transf(R, T, z, F, J_o):
# Charge Transfer Resistance (based on speed of reaction) at equilibrium
# exchange current
# R --> gas constant
# T --> temperature
# z --> number of electrons involved
# F --> Faraday's constant
# J_o --> equilibrium exchange current density
return (R*T)/(z*F*J_o)
## Plots
def lissajous_fig(x, y, xlabel='', ylabel='', tle=''):
# Plot Lissajous Graph
# x -> x values
# y -> y values
# xlabel -> x-axis label
# ylabel -> y-axis label
# tle -> title of plot
plt.figure()
plt.plot(x, y)
plt.xlabel(xlabel)
plt.xlim(xmin=0)
plt.ylabel(ylabel)
plt.ylim(ymin=0)
plt.title(tle)
plt.grid(True, which='both')
plt.show(block = False)
return
def bode_plot(Z, f, title=''):
# Bode Plot for magnitude and phase in same plot
# Z -> impedance
# f -> frequency
# title -> title of plot
mag = np.log10(np.abs(Z))
phase = 180 / np.pi * np.angle(Z)
fig, ax1 = plt.subplots()
ax1.plot(np.log10(f), mag, color='tab:red')
ax1.set_xlabel(r'$log_{10}$ of Freq (Hz)')
ax1.set_ylabel(r'$log_{10}$ of |Z|', color='tab:red')
ax1.set_yticks(np.linspace(ax1.get_yticks()[0], ax1.get_yticks()[-1], \
len(ax1.get_yticks())))
ax1.tick_params(axis='y', labelcolor='tab:red')
ax1.grid()
ax2 = ax1.twinx() # second y-axis with same x-axis values
ax2.plot(np.log10(f), phase, color='tab:blue')
ax2.set_ylabel(r'Phase ($\phi$)', color='tab:blue')
ax2.set_yticks(np.linspace(ax2.get_yticks()[0], ax2.get_yticks()[-1], \
len(ax1.get_yticks())))
ax2.tick_params(axis='y', labelcolor='tab:blue')
ax2.grid()
plt.title(title)
#plt.grid(True, which='both')
plt.show(block = False)
return
def nyquist_plot(Z, xlabel='', ylabel='', tle=''):
# Nyquist Plot for given impedance
# Z -> impedance
# xlabel -> x-axis label
# ylabel -> y-axis label
# tle -> title of plot
Re = Z.real
negIm = -Z.imag
plt.figure()
plt.plot(Re, negIm, color='tab:red')
plt.xlabel(xlabel)
plt.xlim(xmin=0)
plt.ylabel(ylabel)
plt.ylim(ymin=0)
plt.title(tle)
plt.grid(True, which='both')
plt.show(block = False)
return | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.angle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.cosh",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"ma... | [((4067, 4119), 'numpy.sqrt', 'np.sqrt', (['(eps_0 * eps_r * v_t / (2 * n0 * z ** 2 * q))'], {}), '(eps_0 * eps_r * v_t / (2 * n0 * z ** 2 * q))\n', (4074, 4119), True, 'import numpy as np\n'), ((5887, 5899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5897, 5899), True, 'import matplotlib.pyplot as plt\n'), ((5904, 5918), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (5912, 5918), True, 'import matplotlib.pyplot as plt\n'), ((5923, 5941), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (5933, 5941), True, 'import matplotlib.pyplot as plt\n'), ((5946, 5962), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmin': '(0)'}), '(xmin=0)\n', (5954, 5962), True, 'import matplotlib.pyplot as plt\n'), ((5967, 5985), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (5977, 5985), True, 'import matplotlib.pyplot as plt\n'), ((5990, 6006), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (5998, 6006), True, 'import matplotlib.pyplot as plt\n'), ((6011, 6025), 'matplotlib.pyplot.title', 'plt.title', (['tle'], {}), '(tle)\n', (6020, 6025), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6058), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""both"""'}), "(True, which='both')\n", (6038, 6058), True, 'import matplotlib.pyplot as plt\n'), ((6063, 6084), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (6071, 6084), True, 'import matplotlib.pyplot as plt\n'), ((6337, 6351), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6349, 6351), True, 'import matplotlib.pyplot as plt\n'), ((7084, 7100), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7093, 7100), True, 'import matplotlib.pyplot as plt\n'), ((7139, 7160), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (7147, 7160), True, 'import matplotlib.pyplot as plt\n'), ((7417, 7429), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7427, 7429), True, 'import matplotlib.pyplot as plt\n'), ((7434, 7470), 'matplotlib.pyplot.plot', 'plt.plot', (['Re', 'negIm'], {'color': '"""tab:red"""'}), "(Re, negIm, color='tab:red')\n", (7442, 7470), True, 'import matplotlib.pyplot as plt\n'), ((7475, 7493), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (7485, 7493), True, 'import matplotlib.pyplot as plt\n'), ((7498, 7514), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmin': '(0)'}), '(xmin=0)\n', (7506, 7514), True, 'import matplotlib.pyplot as plt\n'), ((7519, 7537), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (7529, 7537), True, 'import matplotlib.pyplot as plt\n'), ((7542, 7558), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (7550, 7558), True, 'import matplotlib.pyplot as plt\n'), ((7563, 7577), 'matplotlib.pyplot.title', 'plt.title', (['tle'], {}), '(tle)\n', (7572, 7577), True, 'import matplotlib.pyplot as plt\n'), ((7582, 7610), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""both"""'}), "(True, which='both')\n", (7590, 7610), True, 'import matplotlib.pyplot as plt\n'), ((7615, 7636), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (7623, 7636), True, 'import matplotlib.pyplot as plt\n'), ((4899, 4933), 'numpy.exp', 'np.exp', (['(-beta * F * V_eq / (R * T))'], {}), '(-beta * F * V_eq / (R * T))\n', (4905, 4933), True, 'import numpy as np\n'), ((6273, 6282), 'numpy.abs', 'np.abs', (['Z'], {}), '(Z)\n', (6279, 6282), True, 'import numpy as np\n'), ((6310, 6321), 'numpy.angle', 'np.angle', (['Z'], {}), '(Z)\n', (6318, 6321), True, 'import numpy as np\n'), ((6366, 6377), 'numpy.log10', 'np.log10', (['f'], {}), '(f)\n', (6374, 6377), True, 'import numpy as np\n'), ((6782, 6793), 'numpy.log10', 'np.log10', (['f'], {}), '(f)\n', (6790, 6793), True, 'import numpy as np\n'), ((1035, 1060), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * freq)'], {}), '(2 * np.pi * freq)\n', (1042, 1060), True, 'import numpy as np\n'), ((3650, 3677), 'numpy.cosh', 'np.cosh', (['(0.5 * z * Vo / v_t)'], {}), '(0.5 * z * Vo / v_t)\n', (3657, 3677), True, 'import numpy as np\n'), ((1397, 1422), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * freq)'], {}), '(2 * np.pi * freq)\n', (1404, 1422), True, 'import numpy as np\n'), ((1461, 1505), 'numpy.sqrt', 'np.sqrt', (['(1.0j * 2 * np.pi * freq / diff_coef)'], {}), '(1.0j * 2 * np.pi * freq / diff_coef)\n', (1468, 1505), True, 'import numpy as np\n'), ((2004, 2015), 'numpy.sqrt', 'np.sqrt', (['Do'], {}), '(Do)\n', (2011, 2015), True, 'import numpy as np\n'), ((2026, 2037), 'numpy.sqrt', 'np.sqrt', (['Dr'], {}), '(Dr)\n', (2033, 2037), True, 'import numpy as np\n'), ((1964, 1974), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1971, 1974), True, 'import numpy as np\n')] |
import numpy as np
import sys
import math
import pickle
import os
import matplotlib
matplotlib.use('Agg')
import skimage.io as skio
from skimage.transform import resize
from bell2014.params import IntrinsicParameters
from bell2014.solver import IntrinsicSolver
from bell2014.input import IntrinsicInput
from bell2014 import image_util
from bell2014.krahenbuhl2013.krahenbuhl2013 import DenseCRF
channel_mean = np.array([104, 117, 123])
def get_context_feat(im, net):
""" Extract the context feature of the whole image """
ct_size = net.blobs['context'].data.shape[2]
ct_im = resize(im, (ct_size, ct_size))
ct_im = ct_im.transpose([2, 0, 1])[::-1] - channel_mean[:, None, None]
net.blobs['context'].data[0] = ct_im
net._forward(list(net._layer_names).index('Convolution9'),
list(net._layer_names).index('Convolution12'))
context_feat = np.copy(net.blobs['Convolution12'].data[0])
return context_feat
def get_local_feat(im, net):
""" Extract dense conv4 features """
h = im.shape[0]
w = im.shape[1]
# Half of the local Patch Size (63 - 1)/2. This is needed for padding
# boundaries
hps = 31
im = np.lib.pad(im, ((hps, hps), (hps, hps), (0,0)), 'symmetric')
rem_y = (im.shape[0]) % 8
rem_x = (im.shape[1]) % 8
if rem_y != 0:
pad_y = 8 - rem_y
else:
pad_y = 0
if rem_x != 0:
pad_x = 8 - rem_x
else:
pad_x = 0
padim = np.lib.pad(im, ((0, pad_y), (0, pad_x), (0,0)), 'symmetric')
padim = padim.transpose([2,0,1])[::-1] - channel_mean[:,None,None]
net.blobs['input'].reshape(1, 3, padim.shape[1], padim.shape[2])
net.blobs['input'].data[...] = np.reshape(padim, (1, 3, padim.shape[1], padim.shape[2]))
net._forward(1, len(net.layers)-1)
feat = np.copy(net.blobs['DenseFeat'].data)
assert feat.shape[2] == padim.shape[1] and feat.shape[3] == padim.shape[2], \
'ERROR! REPACK FEATURE SHAPE DOES NOT MATCH!'
feat = feat[0, :, hps:h+hps, hps:w+hps]
return feat
def sample_points(im, ns_sample):
""" Divide the image into grids and sample pixels at the grid points.
The number of returned samples might be less than what is specified by
ns_sample """
h = im.shape[0]
w = im.shape[1]
asp_ratio = float(h)/w
col_grids = math.floor(math.sqrt(ns_sample/asp_ratio))
row_grids = math.floor(ns_sample/col_grids)
col_space = w/col_grids
row_space = h/row_grids
sx = range(int(col_space/2), w, int(col_space))
sy = range(int(row_space/2), h, int(row_space))
sx, sy = np.meshgrid(sx, sy,sparse=False, indexing='xy')
sx = sx.flatten()
sy = sy.flatten()
return sx, sy
def nystrom(C, si):
"""
Input:
C - sampled rows of the full comparison matrix
si - indices of the sampled rows
Output:
W_ns - nystrom approximation of the full comparison matrix.
The full matrix W = W_ns' * W_ns
"""
D = C[:,si]
E,V = np.linalg.eigh(D)
L = (V[:,E>1e-3]/(np.sqrt(E[None,E>1e-3])))
W_ns = L.T.dot(C)
return W_ns
def nystrom_single_image(image_file, feat_net, rref_net, ns_sample=64, sp_sigma=1e-5):
"""
Input:
image_file - file path to the input image
feat_net - caffe net for extracting dense local features
rref_net - caffe net for relative reflectance judgment
ns_sample - number of rows to sample for nystrom approximation
sp_sigma - standard deviation for distance weighting of nystrom
approximation
Output:
W_ns - nystrom approximation of the pairwise comparison matrix.
"""
im = skio.imread(image_file)
h = im.shape[0]
w = im.shape[1]
npix = h * w
sx, sy = sample_points(im/255.0, ns_sample)
ns_sample = len(sx)
# Fill in the blobs with extracted features
local_feat = get_local_feat(im, feat_net)
rref_net.blobs['Convolution4'].reshape(w*h, local_feat.shape[0], 1, 1)
rref_net.blobs['Convolution8'].reshape(w*h, local_feat.shape[0], 1, 1)
rref_net.blobs['Convolution8'].data[...] = local_feat.reshape((local_feat.shape[0], -1)).T[:,:,None,None]
gx, gy = np.meshgrid(range(w), range(h), sparse=False, indexing='xy')
pix_coords = np.zeros((npix, 2))
pix_coords[:, 0] = gx.flatten()
pix_coords[:, 1] = gy.flatten()
rref_net.blobs['coords'].reshape(w*h, 4, 1, 1)
rref_net.blobs['coords'].data[:,2,0,0] = gx.flatten()/w
rref_net.blobs['coords'].data[:,3,0,0] = gy.flatten()/h
context_feat = get_context_feat(im, rref_net)
rref_net.blobs['Convolution12'].reshape(w*h, context_feat.shape[0], 1, 1)
rref_net.blobs['Convolution12'].data[None,:,:,:] = context_feat
# Sampled rows of the full comparison matrix
sample_mat = np.zeros((2*ns_sample, h*w*2))
# Distance weighting matrix
dist_mat = np.zeros((2*ns_sample, h*w*2))
# For each sampled pixel, predict its relative reflectance against all other pixels
for p in range(ns_sample):
x = sx[p]
y = sy[p]
rref_net.blobs['Convolution4'].data[...] = local_feat[:,y,x][None,:,None,None]
rref_net.blobs['coords'].data[:,0,0,0] = float(x)/w
rref_net.blobs['coords'].data[:,1,0,0] = float(y)/h
rref_net._forward(list(rref_net._layer_names).index('ConcatAll'), len(rref_net.layers)-1)
scores = rref_net.blobs['pred_sm'].data[:,:].reshape((h*w, 3)).T
# Block ordering: [w=, w>; w<, w=]
sample_mat[2*p, ::2] = scores[0,:]
sample_mat[2*p+1, 1::2] = scores[0,:]
sample_mat[2*p, 1::2] = scores[2,:]
sample_mat[2*p+1, ::2] = scores[1,:]
xy = np.array([x, y])
# Spatial distance weights that are later applied to the sampled comparison matrix.
# The classifier is less reliable in judging pairs that are spatially far from each
# other, since the distant pairwise judgment is augmented instead of being labeled
# when training the network. Thus, less weight is enforced on distant pairs for
# nystrom approximation.
dist = np.sum((xy[None,:] - pix_coords)**2, axis=1)
dist_mat[2*p, ::2] = dist
dist_mat[2*p+1, 1::2] = dist
dist_mat[2*p, 1::2] = dist
dist_mat[2*p+1, ::2] = dist
# Do Nystrom
sample_inds = np.zeros((2*ns_sample))
sample_inds[::2] = 2*(sx + sy*w)
sample_inds[1::2] = 2*(sx + sy*w)+1
sample_inds = sample_inds.astype(int)
# Symmetrize the sample matrix
sample_mat[:,sample_inds] = 0.5 * sample_mat[:,sample_inds] + \
0.5 * sample_mat[:,sample_inds].T
# Empirically we found that multiplying the sample matrix with a
# small value (e.g. 0.01) makes the nystrom slightly more stable.
sample_mat = 0.01 * sample_mat * (np.exp(-sp_sigma*dist_mat))
W_ns = nystrom(sample_mat, sample_inds)
return W_ns
def decompose_single_image(image_file, feat_net, rref_net, nystrom_file=None, srgb=True,
save_reflectance_file=None, save_shading_file=None):
"""
Input:
image_file - file path to the input image
feat_net - caffe net for extracting dense local features
rref_net - caffe net for relative reflectance judgment
Output:
Estimated reflectance and shading layers of the input image
"""
if nystrom_file is not None:
W_ns = pickle.load(open(nystrom_file, 'rb'))
else:
W_ns = nystrom_single_image(image_file, feat_net, rref_net)
input = IntrinsicInput.from_file(image_file, image_is_srgb=srgb)
params = IntrinsicParameters()
solver = IntrinsicSolver(input, params, W_ns)
reflectance, shading, decomposition = solver.solve()
if save_reflectance_file is not None:
image_util.save(save_reflectance_file, reflectance, rescale=True, srgb=srgb)
if save_shading_file is not None:
image_util.save(save_shading_file, shading, rescale=True, srgb=srgb)
return reflectance, shading | [
"numpy.sum",
"bell2014.input.IntrinsicInput.from_file",
"skimage.transform.resize",
"numpy.exp",
"numpy.lib.pad",
"numpy.meshgrid",
"numpy.copy",
"numpy.reshape",
"bell2014.solver.IntrinsicSolver",
"skimage.io.imread",
"math.sqrt",
"matplotlib.use",
"bell2014.image_util.save",
"math.floor"... | [((85, 106), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (99, 106), False, 'import matplotlib\n'), ((413, 438), 'numpy.array', 'np.array', (['[104, 117, 123]'], {}), '([104, 117, 123])\n', (421, 438), True, 'import numpy as np\n'), ((591, 621), 'skimage.transform.resize', 'resize', (['im', '(ct_size, ct_size)'], {}), '(im, (ct_size, ct_size))\n', (597, 621), False, 'from skimage.transform import resize\n'), ((890, 933), 'numpy.copy', 'np.copy', (["net.blobs['Convolution12'].data[0]"], {}), "(net.blobs['Convolution12'].data[0])\n", (897, 933), True, 'import numpy as np\n'), ((1182, 1243), 'numpy.lib.pad', 'np.lib.pad', (['im', '((hps, hps), (hps, hps), (0, 0))', '"""symmetric"""'], {}), "(im, ((hps, hps), (hps, hps), (0, 0)), 'symmetric')\n", (1192, 1243), True, 'import numpy as np\n'), ((1461, 1522), 'numpy.lib.pad', 'np.lib.pad', (['im', '((0, pad_y), (0, pad_x), (0, 0))', '"""symmetric"""'], {}), "(im, ((0, pad_y), (0, pad_x), (0, 0)), 'symmetric')\n", (1471, 1522), True, 'import numpy as np\n'), ((1697, 1754), 'numpy.reshape', 'np.reshape', (['padim', '(1, 3, padim.shape[1], padim.shape[2])'], {}), '(padim, (1, 3, padim.shape[1], padim.shape[2]))\n', (1707, 1754), True, 'import numpy as np\n'), ((1805, 1841), 'numpy.copy', 'np.copy', (["net.blobs['DenseFeat'].data"], {}), "(net.blobs['DenseFeat'].data)\n", (1812, 1841), True, 'import numpy as np\n'), ((2379, 2412), 'math.floor', 'math.floor', (['(ns_sample / col_grids)'], {}), '(ns_sample / col_grids)\n', (2389, 2412), False, 'import math\n'), ((2584, 2632), 'numpy.meshgrid', 'np.meshgrid', (['sx', 'sy'], {'sparse': '(False)', 'indexing': '"""xy"""'}), "(sx, sy, sparse=False, indexing='xy')\n", (2595, 2632), True, 'import numpy as np\n'), ((3015, 3032), 'numpy.linalg.eigh', 'np.linalg.eigh', (['D'], {}), '(D)\n', (3029, 3032), True, 'import numpy as np\n'), ((3712, 3735), 'skimage.io.imread', 'skio.imread', (['image_file'], {}), '(image_file)\n', (3723, 3735), True, 'import skimage.io as skio\n'), ((4311, 4330), 'numpy.zeros', 'np.zeros', (['(npix, 2)'], {}), '((npix, 2))\n', (4319, 4330), True, 'import numpy as np\n'), ((4836, 4872), 'numpy.zeros', 'np.zeros', (['(2 * ns_sample, h * w * 2)'], {}), '((2 * ns_sample, h * w * 2))\n', (4844, 4872), True, 'import numpy as np\n'), ((4914, 4950), 'numpy.zeros', 'np.zeros', (['(2 * ns_sample, h * w * 2)'], {}), '((2 * ns_sample, h * w * 2))\n', (4922, 4950), True, 'import numpy as np\n'), ((6363, 6386), 'numpy.zeros', 'np.zeros', (['(2 * ns_sample)'], {}), '(2 * ns_sample)\n', (6371, 6386), True, 'import numpy as np\n'), ((7552, 7608), 'bell2014.input.IntrinsicInput.from_file', 'IntrinsicInput.from_file', (['image_file'], {'image_is_srgb': 'srgb'}), '(image_file, image_is_srgb=srgb)\n', (7576, 7608), False, 'from bell2014.input import IntrinsicInput\n'), ((7622, 7643), 'bell2014.params.IntrinsicParameters', 'IntrinsicParameters', ([], {}), '()\n', (7641, 7643), False, 'from bell2014.params import IntrinsicParameters\n'), ((7657, 7693), 'bell2014.solver.IntrinsicSolver', 'IntrinsicSolver', (['input', 'params', 'W_ns'], {}), '(input, params, W_ns)\n', (7672, 7693), False, 'from bell2014.solver import IntrinsicSolver\n'), ((2331, 2363), 'math.sqrt', 'math.sqrt', (['(ns_sample / asp_ratio)'], {}), '(ns_sample / asp_ratio)\n', (2340, 2363), False, 'import math\n'), ((3055, 3082), 'numpy.sqrt', 'np.sqrt', (['E[None, E > 0.001]'], {}), '(E[None, E > 0.001])\n', (3062, 3082), True, 'import numpy as np\n'), ((5712, 5728), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (5720, 5728), True, 'import numpy as np\n'), ((6141, 6188), 'numpy.sum', 'np.sum', (['((xy[None, :] - pix_coords) ** 2)'], {'axis': '(1)'}), '((xy[None, :] - pix_coords) ** 2, axis=1)\n', (6147, 6188), True, 'import numpy as np\n'), ((6830, 6858), 'numpy.exp', 'np.exp', (['(-sp_sigma * dist_mat)'], {}), '(-sp_sigma * dist_mat)\n', (6836, 6858), True, 'import numpy as np\n'), ((7801, 7877), 'bell2014.image_util.save', 'image_util.save', (['save_reflectance_file', 'reflectance'], {'rescale': '(True)', 'srgb': 'srgb'}), '(save_reflectance_file, reflectance, rescale=True, srgb=srgb)\n', (7816, 7877), False, 'from bell2014 import image_util\n'), ((7924, 7992), 'bell2014.image_util.save', 'image_util.save', (['save_shading_file', 'shading'], {'rescale': '(True)', 'srgb': 'srgb'}), '(save_shading_file, shading, rescale=True, srgb=srgb)\n', (7939, 7992), False, 'from bell2014 import image_util\n')] |
"""Covid analysis utility functions"""
import yaml
import numpy as np
import pandas as pd
import h5py
import xarray
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import dtype_util
tfd = tfp.distributions
tfs = tfp.stats
def copy_nc_attrs(src, dest):
"""Copies dataset attributes between two NetCDF datasets"""
with xarray.open_dataset(src) as s:
attrs = s.attrs
# Write empty root dataset with attributes
ds = xarray.Dataset(attrs=attrs)
ds.to_netcdf(dest, mode="a")
def load_config(config_filename):
with open(config_filename, "r") as f:
return yaml.load(f, Loader=yaml.FullLoader)
def sanitise_parameter(par_dict):
"""Sanitises a dictionary of parameters"""
d = {key: np.float64(val) for key, val in par_dict.items()}
return d
def sanitise_settings(par_dict):
d = {
"inference_period": np.array(
par_dict["inference_period"], dtype=np.datetime64
),
"prediction_period": np.array(
par_dict["prediction_period"], dtype=np.datetime64
),
"time_step": float(par_dict["time_step"]),
"holiday": np.array(
[np.datetime64(date) for date in par_dict["holiday"]]
),
"lockdown": np.array(
[np.datetime64(date) for date in par_dict["lockdown"]]
),
}
return d
@tf.function
def generate_case_numbers(n, rate):
dtype = dtype_util.common_dtype([n, rate], dtype_hint=tf.float64)
n = tf.convert_to_tensor(n, dtype=dtype)
rate = tf.convert_to_tensor(rate, dtype=dtype)
def cond(n_, i_, accum_):
return tf.greater(tf.reduce_sum(n_), tf.constant(0.0, dtype=dtype))
def body(n_, i_, accum_):
new_n = tfd.Binomial(
n_, probs=tf.constant(1.0, dtype=dtype) - tf.math.exp(-rate)
).sample()
accum_ = accum_.write(i_, new_n)
return n_ - new_n, i_ + 1, accum_
accum = tf.TensorArray(dtype=n.dtype, size=20, dynamic_size=True)
n, i, accum = tf.while_loop(cond, body, (n, 0, accum))
return accum.gather(tf.range(i))
def squared_jumping_distance(chain):
diff = chain[1:] - chain[:-1]
cumdiff = np.cumsum(diff, axis=-1)
sqjumpdist = np.sum(cumdiff, axis=-1) ** 2
return sqjumpdist
def p_null(results):
accepted = results[:, 1] == 1.0
pnull = np.mean(results[accepted, 2:].sum(axis=-1) == 0)
return pnull
def jump_summary(posterior_file):
f = h5py.File(posterior_file, "r")
# SJD
sjd_se = squared_jumping_distance(f["samples/events"][..., 0])
sjd_ei = squared_jumping_distance(f["samples/events"][..., 1])
# Acceptance
accept_se = np.mean(f["acceptance/S->E"][:, 1])
accept_ei = np.mean(f["acceptance/E->I"][:, 1])
# Pr(null move | accepted)
p_null_se = p_null(f["acceptance/S->E"])
p_null_ei = p_null(f["acceptance/E->I"])
f.close()
return {
"S->E": {
"sjd": np.mean(sjd_se),
"accept": accept_se,
"p_null": p_null_se,
},
"E->I": {
"sjd": np.mean(sjd_ei),
"accept": accept_ei,
"p_null": p_null_ei,
},
}
def distribute_geom(events, rate, delta_t=1.0):
"""Given a tensor `events`, returns a tensor of shape `events.shape + [t]`
representing the events distributed over a number of days given geometric
waiting times with rate `1-exp(-rate*delta_t)`"""
events = tf.convert_to_tensor(events)
rate = tf.convert_to_tensor(rate, dtype=events.dtype)
accum = tf.TensorArray(events.dtype, size=0, dynamic_size=True)
prob = 1.0 - tf.exp(-rate * delta_t)
def body(i, events_, accum_):
rv = tfd.Binomial(total_count=events_, probs=prob)
failures = rv.sample()
accum_ = accum_.write(i, failures)
i += 1
return i, events_ - failures, accum_
def cond(_1, events_, _2):
res = tf.reduce_sum(events_) > tf.constant(0, dtype=events.dtype)
return res
_1, _2, accum = tf.while_loop(cond, body, loop_vars=[1, events, accum])
accum = accum.stack()
return tf.transpose(accum, perm=(1, 0, 2))
def reduce_diagonals(m):
def fn(m_):
idx = (
tf.range(m_.shape[-1])
- tf.range(m_.shape[-2])[:, tf.newaxis]
+ m_.shape[-2]
- 1
)
idx = tf.expand_dims(idx, axis=-1)
return tf.scatter_nd(idx, m_, [m_.shape[-2] + m_.shape[-1] - 1])
return tf.vectorized_map(fn, m)
def impute_previous_cases(events, rate, delta_t=1.0):
"""Imputes previous numbers of cases by using a geometric distribution
:param events: a [M, T] tensor
:param rate: the failure rate per `delta_t`
:param delta_t: the size of the time step
:returns: a tuple containing the matrix of events and the maximum
number of timesteps into the past to allow padding of `events`.
"""
prev_case_distn = distribute_geom(events, rate, delta_t)
prev_cases = reduce_diagonals(prev_case_distn)
# Trim preceding zero days
total_events = tf.reduce_sum(prev_cases, axis=-2)
num_zero_days = total_events.shape[-1] - tf.math.count_nonzero(
tf.cumsum(total_events, axis=-1)
)
return (
prev_cases[..., num_zero_days:],
prev_case_distn.shape[-2] - num_zero_days,
)
def mean_sojourn(in_events, out_events, init_state):
"""Calculated the mean sojourn time for individuals in a state
within `in_events` and `out_events` given initial state `init_state`"""
# state.shape = [..., M, T]
state = (
tf.cumsum(in_events - out_events, axis=-1, exclusive=True) + init_state
)
state = tf.reduce_sum(state, axis=(-2, -1))
events = tf.reduce_sum(out_events, axis=(-2, -1))
return 1.0 + state / events
def regularize_occults(events, occults, init_state, stoichiometry):
"""Regularizes an occult matrix such that counting
processes are valid
:param events: a [M, T, X] events tensor
:param occults: a [M, T, X] occults tensor
:param init_state: a [M, S] initial state tensor
:param stoichiometry: a [X, S] stoichiometry matrix
:returns: an tuple containing updated (state, occults) tensors
"""
from gemlib.util import compute_state
def body(state_, occults_):
state_t1 = tf.roll(state_, shift=-1, axis=-2)
neg_state_idx = tf.where(state_t1 < 0)
first_neg_state_idx = tf.gather(
neg_state_idx,
tf.concat(
[
[[0]],
tf.where(neg_state_idx[:-1, 0] - neg_state_idx[1:, 0]) + 1,
],
axis=0,
),
)
mask = tf.scatter_nd(
first_neg_state_idx,
tf.ones([first_neg_state_idx.shape[0], 1], dtype=state_t1.dtype),
state_t1.shape,
)
delta_occults = tf.einsum("mts,xs->mtx", state_t1 * mask, stoichiometry)
new_occults = tf.clip_by_value(
occults_ - delta_occults, clip_value_min=0.0, clip_value_max=1.0e6
)
new_state = compute_state(
init_state, events + new_occults, stoichiometry
)
return new_state, new_occults
def cond(state_, _):
return tf.reduce_any(state_ < 0)
state = compute_state(init_state, events + occults, stoichiometry)
new_state, new_occults = tf.while_loop(cond, body, (state, occults))
return new_state, new_occults
| [
"tensorflow.einsum",
"yaml.load",
"tensorflow.reduce_sum",
"numpy.sum",
"tensorflow.clip_by_value",
"tensorflow.cumsum",
"numpy.mean",
"gemlib.util.compute_state",
"numpy.float64",
"tensorflow.scatter_nd",
"tensorflow_probability.python.internal.dtype_util.common_dtype",
"tensorflow.roll",
"... | [((497, 524), 'xarray.Dataset', 'xarray.Dataset', ([], {'attrs': 'attrs'}), '(attrs=attrs)\n', (511, 524), False, 'import xarray\n'), ((1465, 1522), 'tensorflow_probability.python.internal.dtype_util.common_dtype', 'dtype_util.common_dtype', (['[n, rate]'], {'dtype_hint': 'tf.float64'}), '([n, rate], dtype_hint=tf.float64)\n', (1488, 1522), False, 'from tensorflow_probability.python.internal import dtype_util\n'), ((1531, 1567), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (1551, 1567), True, 'import tensorflow as tf\n'), ((1579, 1618), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['rate'], {'dtype': 'dtype'}), '(rate, dtype=dtype)\n', (1599, 1618), True, 'import tensorflow as tf\n'), ((1975, 2032), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'n.dtype', 'size': '(20)', 'dynamic_size': '(True)'}), '(dtype=n.dtype, size=20, dynamic_size=True)\n', (1989, 2032), True, 'import tensorflow as tf\n'), ((2051, 2091), 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body', '(n, 0, accum)'], {}), '(cond, body, (n, 0, accum))\n', (2064, 2091), True, 'import tensorflow as tf\n'), ((2216, 2240), 'numpy.cumsum', 'np.cumsum', (['diff'], {'axis': '(-1)'}), '(diff, axis=-1)\n', (2225, 2240), True, 'import numpy as np\n'), ((2491, 2521), 'h5py.File', 'h5py.File', (['posterior_file', '"""r"""'], {}), "(posterior_file, 'r')\n", (2500, 2521), False, 'import h5py\n'), ((2701, 2736), 'numpy.mean', 'np.mean', (["f['acceptance/S->E'][:, 1]"], {}), "(f['acceptance/S->E'][:, 1])\n", (2708, 2736), True, 'import numpy as np\n'), ((2753, 2788), 'numpy.mean', 'np.mean', (["f['acceptance/E->I'][:, 1]"], {}), "(f['acceptance/E->I'][:, 1])\n", (2760, 2788), True, 'import numpy as np\n'), ((3482, 3510), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['events'], {}), '(events)\n', (3502, 3510), True, 'import tensorflow as tf\n'), ((3522, 3568), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['rate'], {'dtype': 'events.dtype'}), '(rate, dtype=events.dtype)\n', (3542, 3568), True, 'import tensorflow as tf\n'), ((3582, 3637), 'tensorflow.TensorArray', 'tf.TensorArray', (['events.dtype'], {'size': '(0)', 'dynamic_size': '(True)'}), '(events.dtype, size=0, dynamic_size=True)\n', (3596, 3637), True, 'import tensorflow as tf\n'), ((4053, 4108), 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body'], {'loop_vars': '[1, events, accum]'}), '(cond, body, loop_vars=[1, events, accum])\n', (4066, 4108), True, 'import tensorflow as tf\n'), ((4147, 4182), 'tensorflow.transpose', 'tf.transpose', (['accum'], {'perm': '(1, 0, 2)'}), '(accum, perm=(1, 0, 2))\n', (4159, 4182), True, 'import tensorflow as tf\n'), ((4510, 4534), 'tensorflow.vectorized_map', 'tf.vectorized_map', (['fn', 'm'], {}), '(fn, m)\n', (4527, 4534), True, 'import tensorflow as tf\n'), ((5115, 5149), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['prev_cases'], {'axis': '(-2)'}), '(prev_cases, axis=-2)\n', (5128, 5149), True, 'import tensorflow as tf\n'), ((5719, 5754), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['state'], {'axis': '(-2, -1)'}), '(state, axis=(-2, -1))\n', (5732, 5754), True, 'import tensorflow as tf\n'), ((5768, 5808), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['out_events'], {'axis': '(-2, -1)'}), '(out_events, axis=(-2, -1))\n', (5781, 5808), True, 'import tensorflow as tf\n'), ((7343, 7401), 'gemlib.util.compute_state', 'compute_state', (['init_state', '(events + occults)', 'stoichiometry'], {}), '(init_state, events + occults, stoichiometry)\n', (7356, 7401), False, 'from gemlib.util import compute_state\n'), ((7431, 7474), 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body', '(state, occults)'], {}), '(cond, body, (state, occults))\n', (7444, 7474), True, 'import tensorflow as tf\n'), ((386, 410), 'xarray.open_dataset', 'xarray.open_dataset', (['src'], {}), '(src)\n', (405, 410), False, 'import xarray\n'), ((651, 687), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (660, 687), False, 'import yaml\n'), ((785, 800), 'numpy.float64', 'np.float64', (['val'], {}), '(val)\n', (795, 800), True, 'import numpy as np\n'), ((921, 980), 'numpy.array', 'np.array', (["par_dict['inference_period']"], {'dtype': 'np.datetime64'}), "(par_dict['inference_period'], dtype=np.datetime64)\n", (929, 980), True, 'import numpy as np\n'), ((1033, 1093), 'numpy.array', 'np.array', (["par_dict['prediction_period']"], {'dtype': 'np.datetime64'}), "(par_dict['prediction_period'], dtype=np.datetime64)\n", (1041, 1093), True, 'import numpy as np\n'), ((2116, 2127), 'tensorflow.range', 'tf.range', (['i'], {}), '(i)\n', (2124, 2127), True, 'import tensorflow as tf\n'), ((2258, 2282), 'numpy.sum', 'np.sum', (['cumdiff'], {'axis': '(-1)'}), '(cumdiff, axis=-1)\n', (2264, 2282), True, 'import numpy as np\n'), ((3655, 3678), 'tensorflow.exp', 'tf.exp', (['(-rate * delta_t)'], {}), '(-rate * delta_t)\n', (3661, 3678), True, 'import tensorflow as tf\n'), ((4396, 4424), 'tensorflow.expand_dims', 'tf.expand_dims', (['idx'], {'axis': '(-1)'}), '(idx, axis=-1)\n', (4410, 4424), True, 'import tensorflow as tf\n'), ((4440, 4497), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['idx', 'm_', '[m_.shape[-2] + m_.shape[-1] - 1]'], {}), '(idx, m_, [m_.shape[-2] + m_.shape[-1] - 1])\n', (4453, 4497), True, 'import tensorflow as tf\n'), ((5629, 5687), 'tensorflow.cumsum', 'tf.cumsum', (['(in_events - out_events)'], {'axis': '(-1)', 'exclusive': '(True)'}), '(in_events - out_events, axis=-1, exclusive=True)\n', (5638, 5687), True, 'import tensorflow as tf\n'), ((6363, 6397), 'tensorflow.roll', 'tf.roll', (['state_'], {'shift': '(-1)', 'axis': '(-2)'}), '(state_, shift=-1, axis=-2)\n', (6370, 6397), True, 'import tensorflow as tf\n'), ((6422, 6444), 'tensorflow.where', 'tf.where', (['(state_t1 < 0)'], {}), '(state_t1 < 0)\n', (6430, 6444), True, 'import tensorflow as tf\n'), ((6934, 6990), 'tensorflow.einsum', 'tf.einsum', (['"""mts,xs->mtx"""', '(state_t1 * mask)', 'stoichiometry'], {}), "('mts,xs->mtx', state_t1 * mask, stoichiometry)\n", (6943, 6990), True, 'import tensorflow as tf\n'), ((7013, 7105), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(occults_ - delta_occults)'], {'clip_value_min': '(0.0)', 'clip_value_max': '(1000000.0)'}), '(occults_ - delta_occults, clip_value_min=0.0,\n clip_value_max=1000000.0)\n', (7029, 7105), True, 'import tensorflow as tf\n'), ((7140, 7202), 'gemlib.util.compute_state', 'compute_state', (['init_state', '(events + new_occults)', 'stoichiometry'], {}), '(init_state, events + new_occults, stoichiometry)\n', (7153, 7202), False, 'from gemlib.util import compute_state\n'), ((7304, 7329), 'tensorflow.reduce_any', 'tf.reduce_any', (['(state_ < 0)'], {}), '(state_ < 0)\n', (7317, 7329), True, 'import tensorflow as tf\n'), ((1676, 1693), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['n_'], {}), '(n_)\n', (1689, 1693), True, 'import tensorflow as tf\n'), ((1695, 1724), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'dtype'}), '(0.0, dtype=dtype)\n', (1706, 1724), True, 'import tensorflow as tf\n'), ((2976, 2991), 'numpy.mean', 'np.mean', (['sjd_se'], {}), '(sjd_se)\n', (2983, 2991), True, 'import numpy as np\n'), ((3107, 3122), 'numpy.mean', 'np.mean', (['sjd_ei'], {}), '(sjd_ei)\n', (3114, 3122), True, 'import numpy as np\n'), ((3953, 3975), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['events_'], {}), '(events_)\n', (3966, 3975), True, 'import tensorflow as tf\n'), ((3978, 4012), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'events.dtype'}), '(0, dtype=events.dtype)\n', (3989, 4012), True, 'import tensorflow as tf\n'), ((5226, 5258), 'tensorflow.cumsum', 'tf.cumsum', (['total_events'], {'axis': '(-1)'}), '(total_events, axis=-1)\n', (5235, 5258), True, 'import tensorflow as tf\n'), ((6806, 6870), 'tensorflow.ones', 'tf.ones', (['[first_neg_state_idx.shape[0], 1]'], {'dtype': 'state_t1.dtype'}), '([first_neg_state_idx.shape[0], 1], dtype=state_t1.dtype)\n', (6813, 6870), True, 'import tensorflow as tf\n'), ((1210, 1229), 'numpy.datetime64', 'np.datetime64', (['date'], {}), '(date)\n', (1223, 1229), True, 'import numpy as np\n'), ((1317, 1336), 'numpy.datetime64', 'np.datetime64', (['date'], {}), '(date)\n', (1330, 1336), True, 'import numpy as np\n'), ((4254, 4276), 'tensorflow.range', 'tf.range', (['m_.shape[-1]'], {}), '(m_.shape[-1])\n', (4262, 4276), True, 'import tensorflow as tf\n'), ((4291, 4313), 'tensorflow.range', 'tf.range', (['m_.shape[-2]'], {}), '(m_.shape[-2])\n', (4299, 4313), True, 'import tensorflow as tf\n'), ((6602, 6656), 'tensorflow.where', 'tf.where', (['(neg_state_idx[:-1, 0] - neg_state_idx[1:, 0])'], {}), '(neg_state_idx[:-1, 0] - neg_state_idx[1:, 0])\n', (6610, 6656), True, 'import tensorflow as tf\n'), ((1809, 1838), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'dtype': 'dtype'}), '(1.0, dtype=dtype)\n', (1820, 1838), True, 'import tensorflow as tf\n'), ((1841, 1859), 'tensorflow.math.exp', 'tf.math.exp', (['(-rate)'], {}), '(-rate)\n', (1852, 1859), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.patches import Ellipse
import rosparam
plt.style.use("seaborn-talk")
matplotlib.rcParams["pdf.fonttype"] = 42
matplotlib.rcParams["ps.fonttype"] = 42
"""
This script is to load logs from ros LOG_DIR
that are produced by em_exploration_server and then plot
occupancy map with virtual landmarks.
"""
LOG_DIR = os.path.expanduser("~/.ros/")
params = rosparam.load_file(LOG_DIR + "em_server.yaml")
# params = params[0][0]["bruce"]
pose_nstd = 1
# sigma0 = params["exploration"]["server"]["virtual_map"]["sigma0"]
sigma0 = 3.0
# resolution = params["exploration"]["server"]["virtual_map"]["resolution"]
resolution = 2.0
# Ellipse will fill the entire cell.
vm_nstd = resolution / 2.0 / sigma0
# icp noise model (sigma x when p(z) = 1.0)
# sx0 = params["slam"]["icp_odom_sigmas"][0]
sx0 = 0.1
def plot_cov_ellipse(pos, cov, nstd=2, ax=None, **kwargs):
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def load_states(step):
bs = []
vm = []
graph = []
for i in range(100000):
if not os.path.exists(LOG_DIR + "em-step-{}-vm{}.csv".format(step, i)):
break
bs_i = np.loadtxt(LOG_DIR + "em-step-{}-bs{}.csv".format(step, i))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
graph_i = np.loadtxt(
LOG_DIR + "em-step-{}-graph{}.csv".format(step, i), ndmin=2
)
f_vm = open(LOG_DIR + "em-step-{}-vm{}.csv".format(step, i))
header = np.loadtxt(f_vm, max_rows=1)
vm_i = {"extent": header[:4]}
n_rows = int(header[4])
while True:
if f_vm.tell() == os.fstat(f_vm.fileno()).st_size:
break
layer = np.loadtxt(f_vm, max_rows=1, dtype=str).tostring()
vm_i[layer] = np.loadtxt(f_vm, max_rows=n_rows)
bs.append(bs_i)
vm.append(vm_i)
graph.append(graph_i)
return bs, vm, graph
def rot(mat):
return np.fliplr(np.rot90(mat))
if __name__ == "__main__":
import sys
step = int(sys.argv[1])
print("Read logs at step {}".format(step))
bs, vm, graph = load_states(step)
bs0, vm0, graph0 = bs[0], vm[0], graph[0]
occ0 = vm0["occupancy"]
occ0[np.isnan(occ0)] = 50
occ0 = rot(occ0)
extent = np.array(vm0["extent"])
extent[2], extent[3] = extent[3], extent[2]
res = (extent[1] - extent[0]) / occ0.shape[1]
cov011 = rot(vm0["cov11"])
cov012 = rot(vm0["cov12"])
cov022 = rot(vm0["cov22"])
for i in range(len(bs)):
fig, ax = plt.subplots(1, 1, True, True, figsize=(10, 10))
bs0, vm0, graph0 = bs[0], vm[0], graph[0]
bsi, vmi, graphi = bs[i], vm[i], graph[i]
# plot occupancy grid map
occi = vmi["occupancy"]
occi[np.isnan(occi)] = 50
occi = rot(occi)
extent = np.array(vmi["extent"])
extent[2], extent[3] = extent[3], extent[2]
ax.imshow(
occi, extent=extent, origin="upper", cmap="gray_r", vmax=100, alpha=0.5
)
ax.plot(bs0[:, 0], bs0[:, 1], "k", lw=1)
if i > 0:
ax.plot(bsi[len(bs0) - 1 :, 0], bsi[len(bs0) - 1 :, 1], "r", lw=1)
for j in range(len(bs0)):
c, s = np.cos(bs0[j, 2]), np.sin(bs0[j, 2])
cov = bs0[j, -9:].reshape(3, 3)
plot_cov_ellipse(
bs0[j, :2], cov[:2, :2], nstd=pose_nstd, ax=ax, fill=False, color="k"
)
if i > 0:
# Propagate cov using odom model
c, s = np.cos(bs0[-1, 2]), np.sin(bs0[-1, 2])
R1 = np.array([[c, -s], [s, c]])
t1 = bs0[-1, :2]
# make a copy!
cov1 = np.array(bs0[-1, -9:].reshape(3, 3))
# global -> local cov
cov1[:2, :] = R1.T.dot(cov1[:2, :])
cov1[:, :2] = cov1[:, :2].dot(R1)
for j in range(len(bs0), len(bsi)):
c, s = np.cos(bsi[j, 2]), np.sin(bsi[j, 2])
R2 = np.array([[c, -s], [s, c]])
t2 = bsi[j, :2]
# jacobian in local coordinates
H = np.identity(3, np.float32)
H[:2, :2] = R2.T.dot(R1)
H[:2, 2] = np.array([[0, 1], [-1, 0]]).dot(R2.T).dot(t1 - t2)
cov2 = H.dot(cov1).dot(H.T) + np.diag([0.2, 0.2, 0.02]) ** 2
R1, t1, cov1 = R2, t2, cov2
# local -> global cov
gcov2 = R2.dot(cov2[:2, :2]).dot(R2.T)
plot_cov_ellipse(
t2, gcov2, nstd=pose_nstd, ax=ax, fill=False, color="k"
)
for j in range(len(bsi)):
c, s = np.cos(bsi[j, 2]), np.sin(bsi[j, 2])
cov = bsi[j, -9:].reshape(3, 3)
plot_cov_ellipse(
bsi[j, :2],
cov[:2, :2],
nstd=pose_nstd,
ax=ax,
fill=False,
color="r",
)
# plot non-sequential constraints
if graph0.size:
for x1, y1, _, x2, y2, _, sx, _, _ in graph0:
ax.plot((x1, x2), (y1, y2), "k--", lw=0.5, alpha=(sx0 / sx) ** 2)
if i > 0:
if graphi.size:
for x1, y1, _, x2, y2, _, sx, _, _ in graphi[len(graph0) :]:
ax.plot((x1, x2), (y1, y2), "r--", lw=0.5, alpha=(sx0 / sx) ** 2)
covi11 = rot(vmi["cov11"])
covi12 = rot(vmi["cov12"])
covi22 = rot(vmi["cov22"])
for r, c in np.ndindex(occi.shape):
if occi[r, c] == 0:
continue
pos = extent[0] + res * (c + 0.5), extent[3] + res * (r + 0.5)
cov = np.array([[covi11[r, c], covi12[r, c]], [covi12[r, c], covi22[r, c]]])
plot_cov_ellipse(
pos, cov, nstd=vm_nstd, ax=ax, color="k", alpha=0.5, fill=False
)
ax.set_xlabel("x (m)")
ax.set_ylabel("y (m)")
plt.tight_layout()
plt.savefig(
LOG_DIR + "step-{}-path-{}.png".format(step, i),
dpi=200,
bbox_inches="tight",
)
plt.close("all")
print("Finished {}/{}".format(i, len(bs) - 1))
| [
"rosparam.load_file",
"numpy.arctan2",
"numpy.isnan",
"matplotlib.pyplot.style.use",
"numpy.rot90",
"numpy.sin",
"matplotlib.pyplot.gca",
"numpy.diag",
"matplotlib.pyplot.tight_layout",
"warnings.simplefilter",
"matplotlib.pyplot.close",
"numpy.identity",
"warnings.catch_warnings",
"numpy.... | [((173, 202), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-talk"""'], {}), "('seaborn-talk')\n", (186, 202), True, 'import matplotlib.pyplot as plt\n'), ((445, 474), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.ros/"""'], {}), "('~/.ros/')\n", (463, 474), False, 'import os\n'), ((484, 530), 'rosparam.load_file', 'rosparam.load_file', (["(LOG_DIR + 'em_server.yaml')"], {}), "(LOG_DIR + 'em_server.yaml')\n", (502, 530), False, 'import rosparam\n'), ((1318, 1384), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'pos', 'width': 'width', 'height': 'height', 'angle': 'theta'}), '(xy=pos, width=width, height=height, angle=theta, **kwargs)\n', (1325, 1384), False, 'from matplotlib.patches import Ellipse\n'), ((2776, 2799), 'numpy.array', 'np.array', (["vm0['extent']"], {}), "(vm0['extent'])\n", (2784, 2799), True, 'import numpy as np\n'), ((1030, 1049), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (1044, 1049), True, 'import numpy as np\n'), ((1163, 1172), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1170, 1172), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1258), 'numpy.arctan2', 'np.arctan2', (['*vecs[:, 0][::-1]'], {}), '(*vecs[:, 0][::-1])\n', (1239, 1258), True, 'import numpy as np\n'), ((1292, 1305), 'numpy.sqrt', 'np.sqrt', (['vals'], {}), '(vals)\n', (1299, 1305), True, 'import numpy as np\n'), ((1987, 2015), 'numpy.loadtxt', 'np.loadtxt', (['f_vm'], {'max_rows': '(1)'}), '(f_vm, max_rows=1)\n', (1997, 2015), True, 'import numpy as np\n'), ((2464, 2477), 'numpy.rot90', 'np.rot90', (['mat'], {}), '(mat)\n', (2472, 2477), True, 'import numpy as np\n'), ((2721, 2735), 'numpy.isnan', 'np.isnan', (['occ0'], {}), '(occ0)\n', (2729, 2735), True, 'import numpy as np\n'), ((3040, 3088), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)', '(True)', '(True)'], {'figsize': '(10, 10)'}), '(1, 1, True, True, figsize=(10, 10))\n', (3052, 3088), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3356), 'numpy.array', 'np.array', (["vmi['extent']"], {}), "(vmi['extent'])\n", (3341, 3356), True, 'import numpy as np\n'), ((6016, 6038), 'numpy.ndindex', 'np.ndindex', (['occi.shape'], {}), '(occi.shape)\n', (6026, 6038), True, 'import numpy as np\n'), ((6457, 6475), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6473, 6475), True, 'import matplotlib.pyplot as plt\n'), ((6630, 6646), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6639, 6646), True, 'import matplotlib.pyplot as plt\n'), ((1706, 1731), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1729, 1731), False, 'import warnings\n'), ((1745, 1776), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1766, 1776), False, 'import warnings\n'), ((2288, 2321), 'numpy.loadtxt', 'np.loadtxt', (['f_vm'], {'max_rows': 'n_rows'}), '(f_vm, max_rows=n_rows)\n', (2298, 2321), True, 'import numpy as np\n'), ((3270, 3284), 'numpy.isnan', 'np.isnan', (['occi'], {}), '(occi)\n', (3278, 3284), True, 'import numpy as np\n'), ((4072, 4099), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (4080, 4099), True, 'import numpy as np\n'), ((6191, 6261), 'numpy.array', 'np.array', (['[[covi11[r, c], covi12[r, c]], [covi12[r, c], covi22[r, c]]]'], {}), '([[covi11[r, c], covi12[r, c]], [covi12[r, c], covi22[r, c]]])\n', (6199, 6261), True, 'import numpy as np\n'), ((3723, 3740), 'numpy.cos', 'np.cos', (['bs0[j, 2]'], {}), '(bs0[j, 2])\n', (3729, 3740), True, 'import numpy as np\n'), ((3742, 3759), 'numpy.sin', 'np.sin', (['bs0[j, 2]'], {}), '(bs0[j, 2])\n', (3748, 3759), True, 'import numpy as np\n'), ((4016, 4034), 'numpy.cos', 'np.cos', (['bs0[-1, 2]'], {}), '(bs0[-1, 2])\n', (4022, 4034), True, 'import numpy as np\n'), ((4036, 4054), 'numpy.sin', 'np.sin', (['bs0[-1, 2]'], {}), '(bs0[-1, 2])\n', (4042, 4054), True, 'import numpy as np\n'), ((4469, 4496), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (4477, 4496), True, 'import numpy as np\n'), ((4597, 4623), 'numpy.identity', 'np.identity', (['(3)', 'np.float32'], {}), '(3, np.float32)\n', (4608, 4623), True, 'import numpy as np\n'), ((2211, 2250), 'numpy.loadtxt', 'np.loadtxt', (['f_vm'], {'max_rows': '(1)', 'dtype': 'str'}), '(f_vm, max_rows=1, dtype=str)\n', (2221, 2250), True, 'import numpy as np\n'), ((4411, 4428), 'numpy.cos', 'np.cos', (['bsi[j, 2]'], {}), '(bsi[j, 2])\n', (4417, 4428), True, 'import numpy as np\n'), ((4430, 4447), 'numpy.sin', 'np.sin', (['bsi[j, 2]'], {}), '(bsi[j, 2])\n', (4436, 4447), True, 'import numpy as np\n'), ((5146, 5163), 'numpy.cos', 'np.cos', (['bsi[j, 2]'], {}), '(bsi[j, 2])\n', (5152, 5163), True, 'import numpy as np\n'), ((5165, 5182), 'numpy.sin', 'np.sin', (['bsi[j, 2]'], {}), '(bsi[j, 2])\n', (5171, 5182), True, 'import numpy as np\n'), ((4789, 4814), 'numpy.diag', 'np.diag', (['[0.2, 0.2, 0.02]'], {}), '([0.2, 0.2, 0.02])\n', (4796, 4814), True, 'import numpy as np\n'), ((4692, 4719), 'numpy.array', 'np.array', (['[[0, 1], [-1, 0]]'], {}), '([[0, 1], [-1, 0]])\n', (4700, 4719), True, 'import numpy as np\n')] |
"""
Take the results from generate2.py, and bucket by caption
From looking at how jda's code works, negative samples are
drawn approximately uniformly from all examples.
This latest version is basically heavily based on jda's code now, just basically using the already-saved
image-caption pairs, rather than calling into spatial_jda. No matter what I do, his code is about 5
times more concise and readable though :P
In this latest version, we treat the presaved files as a stream, rather than first sorting into buckets
This script just stores the indexes. 'ex_json_to_tensors.py' then fetches the images,
and writes the images out.
"""
import argparse
import os
from os import path
from os.path import join
import random
from collections import defaultdict
import json
import time
import numpy as np
import h5py
import torch
from shapeworld.dataset import Dataset
from ulfs import h5_utils
from ulfs import git_info
from ulfs.utils import expand, die
def parse_object(o):
o_split = o.split()
if len(o_split) == 1:
if o == 'shape':
return '*', '*'
else:
return '*', o
else:
if o_split[1] == 'shape':
return o_split[0], '*'
else:
return o_split
def parse_caption(capt):
"""
split into noun phrase, prep, noun phrase; thence into
color shape prep color shape
"""
c = capt
assert ' not ' not in c
try:
if c.startswith('an '):
c = 'a ' + c[3:]
c = c.replace(' an ', ' a ')
c = ' ' + c
first_obj = c.split(' a ')[1].split(' is ')[0]
second_obj = c.split(' a ')[2].split(' .')[0]
s0, c0 = parse_object(first_obj)
s1, c1 = parse_object(second_obj)
prep = c.split(' is ')[1].split(' a ')[0].replace(' ', '-')
parsed = [s0, c0, prep, s1, c1]
except Exception as e:
print('e', e)
print('c', c)
raise e
return parsed
def normalize(parsed):
if parsed[2] == 'to-the-right-of':
parsed[2] = 'to-the-left-of'
elif parsed[2] == 'below':
parsed[2] = 'above'
else:
return parsed
l_c = parsed[0]
l_s = parsed[1]
parsed[0] = parsed[3]
parsed[1] = parsed[4]
parsed[3] = l_c
parsed[4] = l_s
return parsed
# class Vectorizer(object):
# def __init__(self):
# self.w2i = {}
# self.i2w = []
# def __getitem__(self, word):
# if word in self.w2i:
# return self.w2i[word]
# else:
# self.w2i[word] = len(self.i2w)
# self.i2w.append(word)
# return self.w2i[word]
# def vectorize(self, tokens):
class FileSegmentStreamer(object):
def __init__(self, filepath, segment_id):
self.filepath = filepath
self.segment_id = segment_id
self.pos = 0
try:
self.in_h5 = h5py.File(filepath, 'r')
except Exception as e:
print('couldnt open file', filepath, '=>skipping')
self.N = 0
self.images_h5 = self.in_h5['images']
self.meta = json.loads(h5_utils.get_value(self.in_h5, 'meta'))
print('meta', json.dumps(self.meta, indent=2))
self.vocab_h5 = self.in_h5['vocab']
self.caption_wordids_h5 = self.in_h5['caption_wordids']
# print('vocab', self.vocab_h5)
self.vocab = list(self.vocab_h5)
# print('vocab', self.vocab)
self.N = self.images_h5.shape[0]
print('N', self.N)
def __iter__(self):
for n in range(self.N):
caption_ids = self.caption_wordids_h5[n]
caption = ' '.join([self.vocab[i] for i in list(caption_ids)])
yield (self.segment_id, n), caption
class FileSeriesStreamer(object):
"""
yields: (segment_id, intra_segment_index), caption
"""
def __init__(self, ref, in_filepath_templ, max_segments, max_in_samples):
self.ref = ref
self.in_filepath_templ = in_filepath_templ
def __iter__(self):
segment_id = 0
while True:
in_filepath = expand(self.in_filepath_templ.format(ref=self.ref, i=segment_id))
if not path.isfile(in_filepath):
if segment_id == 0:
in_filepath = in_filepath.replace('_0.h5', '.h5')
assert path.isfile(in_filepath)
else:
print('all files read')
break
print(in_filepath)
for s_n, caption in FileSegmentStreamer(filepath=in_filepath, segment_id=segment_id):
yield s_n, caption
segment_id += 1
class CaptionsSet(object):
"""
this just ends up wrapping a list... :P
"""
def __init__(self, name, captions_l):
self.name = name
self.captions_l = captions_l
def __iter__(self):
return self.captions_l.__iter__()
def __repr__(self):
return f'CaptionsSet({self.name}, {len(self.captions_l)})'
def __len__(self):
return len(self.captions_l)
def __getitem__(self, i):
return self.captions_l[i]
class CaptionHarvester(object):
"""
takes a dict of caption counts by name, and
drinks on a feed to create appropriate CaptionsSet's
"""
def __init__(self, captions_count_by_name):
self.captions_count_by_name = captions_count_by_name
self.total_captions = np.sum([size for size in captions_count_by_name.values()]).item()
print('total_captions', self.total_captions)
def drink(self, feed):
i = 0
self.captions = set()
for s_n, caption in feed:
self.captions.add(caption)
if len(self.captions) >= self.total_captions:
break
if i % 10000 == 0:
print(i, s_n, caption, 'len(self.captions)', len(self.captions))
i += 1
print('read', len(self.captions), 'captions')
self.captions = list(self.captions)
np.random.shuffle(self.captions)
print('self.captions[:20]', self.captions[:20])
def __iter__(self):
"""
return CaptionsSet objects
"""
for name, count in self.captions_count_by_name.items():
_captions = self.captions[-count:]
self.captions = self.captions[:len(self.captions) - count]
yield name, CaptionsSet(name=name, captions_l=_captions)
def parse_caption_set_sizes(caption_set_sizes_str):
captions_count_by_name = {}
for name_size in caption_set_sizes_str.split(','):
name, size = name_size.split('=')
captions_count_by_name[name] = int(size)
return captions_count_by_name
def parse_ds_splits(ds_splits):
ds_splits_str = ds_splits
split_def_by_name = {}
for dsplit_csplit_size in ds_splits_str.split(','):
ds_split, csplit_size = dsplit_csplit_size.split('=')
caption_set, size = csplit_size.split(':')
split_def_by_name[ds_split] = {'caption_set': caption_set, 'size': int(size)}
return split_def_by_name
class NotEnoughData(Exception):
pass
class Full(Exception):
pass
class ExamplesSet(object):
def __init__(self, name, captions_set_name, captions_l, size, num_sender_pos, num_sender_neg):
self.name = name
self.captions_set_name = captions_set_name
self.captions_l = captions_l
self.size = size
self.examples = []
self.idxes_by_caption = defaultdict(list)
self.num_sender_pos = num_sender_pos
self.num_sender_neg = num_sender_neg
def draw_negative_sample(self, not_c):
"""
avoid choosing samples from not_c caption
(mostly to simplify counting...)
"""
idxes_by_caption = self.idxes_by_caption
at_least_one_available = False
for c, l in idxes_by_caption.items():
if c != not_c and len(l) >= 1:
at_least_one_available = True
break
if not at_least_one_available:
print([(c, len(l)) for c, l in idxes_by_caption.items()])
raise NotEnoughData()
while True:
caption = self.captions_l[np.random.randint(len(self.captions_l))]
if caption == not_c:
continue
if len(idxes_by_caption[caption]) == 0:
continue
s_n = idxes_by_caption[caption].pop()
return s_n
def drink(self, s, n, caption):
self.idxes_by_caption[caption].append((s, n))
if len(self.idxes_by_caption[caption]) >= self.num_sender_pos + 1:
pos_idxes = self.idxes_by_caption[caption]
sender_pos_exs = []
for j in range(self.num_sender_pos):
sender_pos_exs.append(pos_idxes.pop())
sender_neg_exs = []
for j in range(self.num_sender_neg):
sender_neg_exs.append(self.draw_negative_sample(not_c=caption))
is_neg = np.random.randint(2)
if is_neg == 1:
receiver_ex = self.draw_negative_sample(not_c=caption)
else:
receiver_ex = pos_idxes.pop()
ex = {
'sender_pos': sender_pos_exs,
'sender_neg': sender_neg_exs,
'receiver_ex': receiver_ex,
'receiver_label': 1 - is_neg,
'c': caption
}
# print('ex', ex)
self.examples.append(ex)
if len(self.examples) >= self.size:
raise Full()
def __iter__(self):
return self.examples.__iter__()
class ExamplesHarvester(object):
def __init__(self, captions_set_by_name, split_def_by_name, num_sender_pos, num_sender_neg):
self.captions_set_by_name = captions_set_by_name
self.split_def_by_name = split_def_by_name
self.examples_set_by_name = {}
self.num_sender_pos = num_sender_pos
self.num_sender_neg = num_sender_neg
for name, split_def in split_def_by_name.items():
size = split_def['size']
captions_set_name = split_def['caption_set']
captions_l = self.captions_set_by_name[captions_set_name]
self.examples_set_by_name[name] = ExamplesSet(
name=name,
captions_l=captions_l,
captions_set_name=captions_set_name,
size=size,
num_sender_pos=num_sender_pos,
num_sender_neg=num_sender_neg
)
self.pending_examples_sets_by_caption_set_name = defaultdict(list)
for name, examples_set in self.examples_set_by_name.items():
self.pending_examples_sets_by_caption_set_name[examples_set.captions_set_name].append(examples_set)
self.captions_set_name_by_caption = {}
for name, captions_set in captions_set_by_name.items():
for caption in captions_set:
self.captions_set_name_by_caption[caption] = name
def drink(self, feed):
for (s, n), caption in feed:
captions_set_name = self.captions_set_name_by_caption.get(caption, None)
if captions_set_name is None:
continue
examples_set_l = self.pending_examples_sets_by_caption_set_name[captions_set_name]
if len(examples_set_l) == 0:
continue
examples_set = examples_set_l[-1]
try:
examples_set.drink(s, n, caption)
except Full as e:
examples_set_l.pop()
print('completed', examples_set.name)
sets_left = np.sum([len(l) for l in self.pending_examples_sets_by_caption_set_name.values()]).item()
print('sets_left', sets_left)
if sets_left == 0:
print('all examples created :)')
return
def __iter__(self):
return self.examples_set_by_name.items().__iter__()
def run(
ref, pos_ref, seed, in_filepath, max_in_samples, max_segments, caption_set_sizes, ds_splits,
num_sender_pos, num_sender_neg, out_examples_json
):
random.seed(seed)
np.random.seed(seed)
r = np.random.RandomState(seed)
meta = {
'ref': ref,
'caption_set_sizes': caption_set_sizes,
'ds_splits': ds_splits,
'pos_ref': pos_ref,
'num_sender_pos': num_sender_pos,
'num_sender_neg': num_sender_neg,
'seed': seed,
'pos_filepath_templ': in_filepath,
'num_segments': max_segments,
'max_in_samples': max_in_samples,
'gitlog': git_info.get_git_log(),
'gitdiff': git_info.get_git_diff()
}
image_caption_stream = FileSeriesStreamer(ref=pos_ref, in_filepath_templ=in_filepath, max_segments=max_segments, max_in_samples=max_in_samples)
captions_count_by_name = parse_caption_set_sizes(caption_set_sizes)
caption_harvester = CaptionHarvester(captions_count_by_name=captions_count_by_name)
caption_harvester.drink(image_caption_stream)
captions_set_by_name = dict(caption_harvester)
print('captions_set_by_name', captions_set_by_name)
split_def_by_name = parse_ds_splits(ds_splits)
print('split_def_by_name', split_def_by_name)
examples_harvester = ExamplesHarvester(
captions_set_by_name=captions_set_by_name,
split_def_by_name=split_def_by_name,
num_sender_pos=num_sender_pos,
num_sender_neg=num_sender_neg
)
examples_harvester.drink(image_caption_stream)
last_print = time.time()
for name, examples_set in examples_harvester:
filepath = expand(out_examples_json.format(ref=ref, split=name))
print('writing', filepath)
with open(filepath, 'w') as f:
meta['split'] = name
f.write(json.dumps(meta) + '\n')
for ex in examples_set:
f.write(json.dumps(ex) + '\n')
print('all done')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ref', type=str, required=True)
parser.add_argument('--pos-ref', type=str, required=True)
parser.add_argument('--max-in-samples', type=int)
parser.add_argument('--max-segments', type=int)
parser.add_argument('--caption-set-sizes', type=str, default='train=2000,val=500,test=500')
parser.add_argument('--ds-splits', type=str, default='train=train:9000,val=val:500,test=test:500,val_same=train:500,test_same=train:500')
# parser.add_argument('--caption-set-sizes', type=str, default='train=50,val=20,test=20')
# parser.add_argument('--ds-splits', type=str, default='train=train:90,val=val:5,test=test:5,val_same=train:5,val_test=train:5')
parser.add_argument('--num-sender-pos', type=int, default=6)
parser.add_argument('--num-sender-neg', type=int, default=6)
parser.add_argument('--seed', type=int, default=123)
parser.add_argument('--in-filepath', type=str, default='~/data/shapeworld/rawstream_{ref}_{i}.h5')
parser.add_argument('--out-examples-json', type=str, default='~/data/shapeworld/examples_{ref}_{split}.txt')
args = parser.parse_args()
run(**args.__dict__)
| [
"h5py.File",
"numpy.random.seed",
"argparse.ArgumentParser",
"ulfs.git_info.get_git_diff",
"numpy.random.RandomState",
"time.time",
"collections.defaultdict",
"ulfs.git_info.get_git_log",
"json.dumps",
"numpy.random.randint",
"random.seed",
"os.path.isfile",
"numpy.random.shuffle",
"ulfs.h... | [((12130, 12147), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (12141, 12147), False, 'import random\n'), ((12152, 12172), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (12166, 12172), True, 'import numpy as np\n'), ((12181, 12208), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (12202, 12208), True, 'import numpy as np\n'), ((13533, 13544), 'time.time', 'time.time', ([], {}), '()\n', (13542, 13544), False, 'import time\n'), ((13968, 13993), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13991, 13993), False, 'import argparse\n'), ((5985, 6017), 'numpy.random.shuffle', 'np.random.shuffle', (['self.captions'], {}), '(self.captions)\n', (6002, 6017), True, 'import numpy as np\n'), ((7452, 7469), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7463, 7469), False, 'from collections import defaultdict\n'), ((10554, 10571), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10565, 10571), False, 'from collections import defaultdict\n'), ((12598, 12620), 'ulfs.git_info.get_git_log', 'git_info.get_git_log', ([], {}), '()\n', (12618, 12620), False, 'from ulfs import git_info\n'), ((12641, 12664), 'ulfs.git_info.get_git_diff', 'git_info.get_git_diff', ([], {}), '()\n', (12662, 12664), False, 'from ulfs import git_info\n'), ((2885, 2909), 'h5py.File', 'h5py.File', (['filepath', '"""r"""'], {}), "(filepath, 'r')\n", (2894, 2909), False, 'import h5py\n'), ((3104, 3142), 'ulfs.h5_utils.get_value', 'h5_utils.get_value', (['self.in_h5', '"""meta"""'], {}), "(self.in_h5, 'meta')\n", (3122, 3142), False, 'from ulfs import h5_utils\n'), ((3166, 3197), 'json.dumps', 'json.dumps', (['self.meta'], {'indent': '(2)'}), '(self.meta, indent=2)\n', (3176, 3197), False, 'import json\n'), ((8955, 8975), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (8972, 8975), True, 'import numpy as np\n'), ((4164, 4188), 'os.path.isfile', 'path.isfile', (['in_filepath'], {}), '(in_filepath)\n', (4175, 4188), False, 'from os import path\n'), ((4323, 4347), 'os.path.isfile', 'path.isfile', (['in_filepath'], {}), '(in_filepath)\n', (4334, 4347), False, 'from os import path\n'), ((13795, 13811), 'json.dumps', 'json.dumps', (['meta'], {}), '(meta)\n', (13805, 13811), False, 'import json\n'), ((13881, 13895), 'json.dumps', 'json.dumps', (['ex'], {}), '(ex)\n', (13891, 13895), False, 'import json\n')] |
import pyzbar.pyzbar as pyzbar
from pyzbar.pyzbar import decode, ZBarSymbol
import cv2
import numpy as np
image = cv2.imread('C:\\Users\\GEFORCE\\Documents\\img-qr2.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# calculate points for each contour
hull = []
for i in range(len(contours)):
# creating convex hull object for each contour
hull.append(cv2.convexHull(contours[i], False))
drawing = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)
# draw contours and hull points
for i in range(len(contours)):
color_contours = (0, 255, 0) # green - color for contours
color = (255, 0, 0) # blue - color for convex hull
# draw ith contour
cv2.drawContours(drawing, contours, i, color_contours, 1, 8, hierarchy)
# draw ith convex hull object
#cv2.drawContours(drawing, hull, i, color, 1, 8)
cv2.imshow("Thresh", drawing)
cv2.waitKey() | [
"cv2.cvtColor",
"cv2.waitKey",
"cv2.threshold",
"numpy.zeros",
"cv2.imread",
"cv2.convexHull",
"cv2.drawContours",
"cv2.imshow",
"cv2.findContours"
] | [((116, 172), 'cv2.imread', 'cv2.imread', (['"""C:\\\\Users\\\\GEFORCE\\\\Documents\\\\img-qr2.png"""'], {}), "('C:\\\\Users\\\\GEFORCE\\\\Documents\\\\img-qr2.png')\n", (126, 172), False, 'import cv2\n'), ((182, 221), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (194, 221), False, 'import cv2\n'), ((237, 285), 'cv2.threshold', 'cv2.threshold', (['gray', '(220)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray, 220, 255, cv2.THRESH_BINARY)\n', (250, 285), False, 'import cv2\n'), ((308, 372), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (324, 372), False, 'import cv2\n'), ((564, 621), 'numpy.zeros', 'np.zeros', (['(thresh.shape[0], thresh.shape[1], 3)', 'np.uint8'], {}), '((thresh.shape[0], thresh.shape[1], 3), np.uint8)\n', (572, 621), True, 'import numpy as np\n'), ((991, 1020), 'cv2.imshow', 'cv2.imshow', (['"""Thresh"""', 'drawing'], {}), "('Thresh', drawing)\n", (1001, 1020), False, 'import cv2\n'), ((1021, 1034), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1032, 1034), False, 'import cv2\n'), ((831, 902), 'cv2.drawContours', 'cv2.drawContours', (['drawing', 'contours', 'i', 'color_contours', '(1)', '(8)', 'hierarchy'], {}), '(drawing, contours, i, color_contours, 1, 8, hierarchy)\n', (847, 902), False, 'import cv2\n'), ((517, 551), 'cv2.convexHull', 'cv2.convexHull', (['contours[i]', '(False)'], {}), '(contours[i], False)\n', (531, 551), False, 'import cv2\n')] |
import os,sys
import json
import tensorflow as tf
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
import numpy as np
import string
import gzip
from nltk.stem import PorterStemmer
class MsMarcoData:
settings = None
vocabulary = None
vocab_idxs = None
max_list_length = 0
max_doc_length = 0
max_query_length = 0
example_pid_feature_map = None
context_qid_feature_map = None
smoothing = 10e-9
padding_number = -10e9
mu = 200
jm_lambda = 0.5
b = 0.75
k = 0.6
theta = 0.5
vocab_stat_map = {}
corpus_length = 0.0
doc_count = 0.0
q_term_map = {}
numerical_feature_scale_map = None
ps = PorterStemmer()
SET_LIST = ['train', 'dev', 'eval']
def create_feature_columns(self):
"""Returns the example feature columns."""
# build embedding features
vocabulary_size = len(self.vocabulary.keys())
doc_unigram_column = tf.feature_column.categorical_column_with_identity(
key="doc_unigrams", num_buckets=vocabulary_size)
query_unigram_column = tf.feature_column.categorical_column_with_identity(
key="query_unigrams", num_buckets=vocabulary_size)
doc_embed_column, query_embed_column = tf.feature_column.shared_embedding_columns(
[doc_unigram_column, query_unigram_column],
dimension=self.settings["embedding_size"],
shared_embedding_collection_name="words")
context_feature_columns = {"query_unigrams" : query_embed_column}
example_feature_columns = {"doc_unigrams" : doc_embed_column}
# build example numerical features
for name in ['query_len', 'idfs']:
context_feature_columns[name] = tf.feature_column.numeric_column(
name, shape=(1,), default_value=0.0)
# build example numerical features
for name in ['doc_len', 'tfs', 'tfidfs', 'bm25s', 'lmabs', 'lmdirs', 'lmjrs']:
example_feature_columns[name] = tf.feature_column.numeric_column(
name, shape=(1,), default_value=0.0)
return context_feature_columns, example_feature_columns
def _tokenize(self, line):
text = line.strip()
for ch in string.punctuation:
text = text.replace(ch, '')
return [self.ps.stem(w.lower()) for w in text.split(' ')]
def __init__(self, data_json_file_path, list_size):
self.settings = json.load(open(data_json_file_path))
self.vocabulary = {}
self.vocab_info = []
self.vocab_idxs = {}
self.list_size = list_size
vocab_file = self.settings["WORKING_PATH"] + '/vocab_min_count_%d.txt' % self.settings["word_min_count"]
if os.path.isfile(vocab_file):
print('Load vocabulary')
with open(vocab_file) as fin:
idx = 0
for line in fin:
arr = line.strip().split(' ')
self.vocabulary[arr[0]] = [float(x) for x in arr[1:]]
self.vocab_idxs[arr[0]] = idx
self.vocab_info.append(self.vocabulary[arr[0]])
idx += 1
if idx != len(self.vocabulary):
print(idx)
else:
print('Build vocabulary')
if not os.path.exists(self.settings["WORKING_PATH"]):
os.makedirs(self.settings["WORKING_PATH"])
def _add_words_to_vocab(raw_word_list, in_corpus):
word_list = [w.strip() for w in raw_word_list]
word_list = [w.lower() for w in word_list if len(w) > 0]
for word in word_list:
if word not in self.vocabulary:
self.vocabulary[word] = [0,0] #cf, df, cf in both corpus and queries
if in_corpus:
self.vocabulary[word][0] += 1 #cf
if in_corpus:
for word in set(word_list):
self.vocabulary[word][1] += 1 #df
# add collection words
with open(self.settings["COLLECTION_PATH"] + '/corpus_text.txt') as text_fin:
for line in text_fin:
words = self._tokenize(line)
_add_words_to_vocab(words, True)
# add query words
query_words = set()
query_files = { x: self.settings['QUERY_PATH'] + '/queries.%s.json' %x for x in self.SET_LIST}
for set_name in self.SET_LIST:
if not os.path.exists(query_files[set_name]):
continue
with open(query_files[set_name]) as fin:
data = json.load(fin)
for query in data['queries']:
qid = int(query['number'])
query_text = query['text'].replace('#combine( ', '').replace(' )', '')
words = self._tokenize(query_text)
for w in words:
query_words.add(w)
_add_words_to_vocab(words, False)
# remove words with low frequency
words = self.vocabulary.keys()
del_words = set()
for w in words:
# remove words that has not appeared in the queries and not reach the min_count in the corpus
if self.vocabulary[w][0] < self.settings['word_min_count'] and w not in query_words:
del_words.add(w)
for w in del_words:
self.vocabulary.pop(w, None)
with open(vocab_file, 'w') as fout:
idx = 0
for w in self.vocabulary:
self.vocab_idxs[w] = idx
self.vocab_info.append(self.vocabulary[w])
fout.write('%s %d %d\n' % (w, self.vocabulary[w][0], self.vocabulary[w][1]))
idx += 1
print('Vocabulary loading finished, size %d' % (len(self.vocabulary)))
# load basic corpus information
statistic_file = self.settings["WORKING_PATH"] + '/stats_min_count_%d.json' % self.settings["word_min_count"]
if os.path.isfile(statistic_file):
print('Load Statistic Information')
stats = json.load(open(statistic_file))
self.max_doc_length = stats['max_doc_length']
self.max_query_length = stats['max_query_length']
self.corpus_length = stats['corpus_length']
self.doc_count = stats['doc_count']
self.avg_doc_len = stats['avg_doc_len']
self.numerical_feature_scale_map = stats['numerical_feature_scale_map']
else:
# load corpus to build stats
self.load_corpus_data()
stats = {
'max_doc_length' : self.max_doc_length,
'max_query_length' : self.max_query_length,
'corpus_length' : self.corpus_length,
'doc_count' : self.doc_count,
'avg_doc_len' : float(self.corpus_length)/float(self.doc_count)
}
# compute dense feature scale
self.compute_dense_feature_scale('train', self.list_size)
stats['numerical_feature_scale_map'] = self.numerical_feature_scale_map
# write to file
with open(statistic_file, 'w') as fout:
json.dump(stats, fout, sort_keys = True, indent = 4)
def load_corpus_data(self):
# Build vocabulary
print('Start loading data')
# Build doc_term stats
def _build_term_stats(raw_terms):
doc_terms = {}
for t in raw_terms:
if t not in doc_terms:
doc_terms[t] = 0.0
doc_terms[t] += 1
return doc_terms
# Load passages and build example features
doc_file = self.settings["WORKING_PATH"] + '/collection_min_count_%d.txt.gz' % self.settings["word_min_count"]
self.example_pid_feature_map = {}
self.max_doc_length = 0
self.context_qid_feature_map = {}
self.max_query_length = 0
self.corpus_length = 0
def _get_word_idxs(words):
word_idxs = []
for i in range(len(words)):
if words[i] in self.vocab_idxs:
word_idxs.append(self.vocab_idxs[words[i]])
return word_idxs
if os.path.isfile(doc_file): # if the processed collection exists, read it
print('Load passage features...')
with gzip.open(doc_file, 'rt') as fin:
for line in fin:
arr = line.strip().split('\t')
pid = int(arr[0])
words = []
if len(arr) > 1:
words = [int(x) for x in arr[1].split(' ')]
if pid not in self.example_pid_feature_map:
self.example_pid_feature_map[pid] = {}
self.example_pid_feature_map[pid]["doc_unigrams"] = words
self.example_pid_feature_map[pid]['doc_len'] = len(words)
# add term distribution
self.example_pid_feature_map[pid]['term_stats'] = _build_term_stats(words)
if len(words) > self.max_doc_length:
self.max_doc_length = len(words)
if len(self.example_pid_feature_map) % 10000 == 0:
print('Read %d docs' % len(self.example_pid_feature_map))
self.corpus_length += self.example_pid_feature_map[pid]['doc_len']
else: # if the collection hasn't been processed yet, process it and store the file.
# load raw collection
print('Create passage features...')
pid_list = []
with open(self.settings["COLLECTION_PATH"] + '/corpus_text.txt') as text_fin:
with open(self.settings["COLLECTION_PATH"] + 'corpus_id.txt') as id_fin:
for line in id_fin:
text = text_fin.readline().strip()
pid = int(line.strip())
words = self._tokenize(text)
pid_list.append(pid)
if pid not in self.example_pid_feature_map:
self.example_pid_feature_map[pid] = {}
word_idxs = _get_word_idxs(words)
self.example_pid_feature_map[pid]["doc_unigrams"] = word_idxs
self.example_pid_feature_map[pid]['doc_len'] = len(word_idxs)
# add term distribution
self.example_pid_feature_map[pid]['term_stats'] = _build_term_stats(word_idxs)
if len(word_idxs) > self.max_doc_length:
self.max_doc_length = len(word_idxs)
if len(self.example_pid_feature_map) % 10000 == 0:
print('Read %d docs' % len(self.example_pid_feature_map))
self.corpus_length += self.example_pid_feature_map[pid]['doc_len']
# write processed collection
with gzip.open(doc_file, 'wt') as fout:
for pid in pid_list:
word_idxs = self.example_pid_feature_map[pid]["doc_unigrams"]
fout.write('%d\t%s\n' % (pid, ' '.join([str(x) for x in word_idxs])))
self.doc_count = len(self.example_pid_feature_map)
self.avg_doc_len = float(self.corpus_length) / self.doc_count
for set_name in self.SET_LIST:
print('Read %s queries' % set_name)
query_file = self.settings["WORKING_PATH"] + '/%s_QUERY_min_count_%d.txt.gz' % (set_name, self.settings["word_min_count"])
if os.path.isfile(query_file):
with gzip.open(query_file, 'rt') as fin:
for line in fin:
arr = line.strip().split('\t')
qid = int(arr[0])
words = []
if len(arr) > 1:
words = [int(x) for x in arr[1].split(' ')]
if qid not in self.context_qid_feature_map:
self.context_qid_feature_map[qid] = {}
self.context_qid_feature_map[qid]["query_unigrams"] = words
self.context_qid_feature_map[qid]["query_len"] = len(words)
idf_list = [self.doc_count/(self.vocab_info[w][1]+0.5) for w in words]
self.context_qid_feature_map[qid]["idfs"] = sum(idf_list) / float(len(idf_list))
if len(words) > self.max_query_length:
self.max_query_length = len(words)
else:
# Load queries and build context features
print('Create %s query files' % set_name)
qid_list = []
with open(self.settings['QUERY_PATH'] + '/queries.%s.json' % set_name) as fin:
data = json.load(fin)
for query in data['queries']:
qid = int(query['number'])
query_text = query['text'].replace('#combine( ', '').replace(' )', '')
words = self._tokenize(query_text)
qid_list.append(qid)
if qid not in self.context_qid_feature_map:
self.context_qid_feature_map[qid] = {}
word_idxs = _get_word_idxs(words)
self.context_qid_feature_map[qid]["query_unigrams"] = word_idxs
self.context_qid_feature_map[qid]["query_len"] = len(word_idxs)
idf_list = [self.doc_count/(self.vocab_info[w][1]+0.5) for w in word_idxs]
self.context_qid_feature_map[qid]["idfs"] = sum(idf_list) / float(len(idf_list))
if len(word_idxs) > self.max_query_length:
self.max_query_length = len(word_idxs)
# write processed collection
with gzip.open(query_file, 'wt') as fout:
for qid in qid_list:
word_idxs = self.context_qid_feature_map[qid]["query_unigrams"]
fout.write('%d\t%s\n' % (qid, " ".join([str(x) for x in word_idxs])))
tf.logging.info("Collection size {}".format(str(self.corpus_length)))
tf.logging.info("Collection doc count {}".format(str(self.doc_count)))
tf.logging.info("Max doc length {}".format(str(self.max_doc_length)))
tf.logging.info("Max query length {}".format(str(self.max_query_length)))
print('Load finish')
def compute_dense_feature_scale(self, set_name, list_size):
tf.logging.info("Computing numerical feature scales for {}".format(set_name))
# if raw data are not loaded, load them
if self.example_pid_feature_map is None:
self.load_corpus_data()
# Read data
qid_to_doc = {} # The list of docs seen so far for a query.
max_list_length = 0
def _create_line_parser(data_type):
if data_type == 'pair':
def pair_line_parser(line):
arr = line.strip().split('\t')
qid = int(arr[0])
pid = int(arr[1])
return qid, pid
return pair_line_parser
elif data_type == 'trec_ranklist':
def trec_ranklist_line_parser(line):
arr = line.strip().split(' ')
qid = int(arr[0])
pid = int(arr[2])
return qid, pid
return trec_ranklist_line_parser
line_parser = _create_line_parser(self.settings["%s_data_path" % set_name]['type'])
with open(self.settings["%s_data_path" % set_name]['path']) as fin:
for line in fin:
qid, pid = line_parser(line)
if qid not in self.context_qid_feature_map or pid not in self.example_pid_feature_map:
continue
label = 0
if qid not in qid_to_doc:
qid_to_doc[qid] = []
qid_to_doc[qid].append((pid, label))
if len(qid_to_doc[qid]) > max_list_length:
max_list_length = len(qid_to_doc[qid])
list_size = list_size if list_size > 0 else max_list_length
if max_list_length > self.max_list_length:
self.max_list_length = max_list_length
# Build feature map
context_feature_columns, example_feature_columns = self.create_feature_columns()
numerical_feature_scale_map = {}
total_docs = 0
discarded_docs = 0
for qid in qid_to_doc:
feature_map = {}
# create context features
for k in context_feature_columns:
if not k.endswith('unigrams'): # this is a numerical feature
feature_map[k] = self.context_qid_feature_map[qid][k]
if k not in numerical_feature_scale_map:
numerical_feature_scale_map[k] = [feature_map[k], feature_map[k]]
else:
if feature_map[k] < numerical_feature_scale_map[k][0]:
numerical_feature_scale_map[k][0] = feature_map[k]
if feature_map[k] > numerical_feature_scale_map[k][1]:
numerical_feature_scale_map[k][1] = feature_map[k]
# compute dense example features
for i in range(len(qid_to_doc[qid])):
if i < list_size:
pid = qid_to_doc[qid][i][0]
feature_list, name_list = self.get_example_dense_features(
self.context_qid_feature_map[qid]['query_unigrams'],
self.example_pid_feature_map[pid]['term_stats'],
self.example_pid_feature_map[pid]['doc_len'])
for k in range(len(name_list)):
key = name_list[k]
value = feature_list[k]
if key not in numerical_feature_scale_map:
numerical_feature_scale_map[key] = [value, value]
else:
if value < numerical_feature_scale_map[key][0]:
numerical_feature_scale_map[key][0] = value
if value > numerical_feature_scale_map[key][1]:
numerical_feature_scale_map[key][1] = value
# print feature scale information
for key in numerical_feature_scale_map:
tf.logging.info("%s feature scale: [%.3f, %.3f]" % (
key, numerical_feature_scale_map[key][0], numerical_feature_scale_map[key][1]))
self.numerical_feature_scale_map = numerical_feature_scale_map
def get_example_dense_features(self, query_terms, doc_terms, doc_len):
doc_len = max(doc_len, 0.1)
tfs, tfidfs, bm25s, lmabs, lmdirs, lmjrs = [],[],[],[],[],[]
for t in query_terms:
tf = 0.0
cf, df = self.vocab_info[t]
cf += 0.5
df += 0.5
idf = self.doc_count/df
if t in doc_terms:
tf = doc_terms[t]
# TF, IDF, TF-IDF
tfs.append(tf)
tfidfs.append(tf * idf)
# BM25
numerator = tf * (1 + self.k)
denominator = tf + self.k * (1 - self.b + self.b * doc_len / self.avg_doc_len)
bm25 = idf * numerator / denominator
bm25s.append(bm25)
# LMABS, LMDIR, LMJR
background = cf / self.corpus_length
gamma = self.theta * len(doc_terms) / float(doc_len)
lmab = max(tf - self.theta, 0.0) / doc_len + gamma * background
lmab = max(lmab, self.theta * background)
lmdir = (tf + self.mu * background) / (doc_len + self.mu)
lmjr = (1-self.jm_lambda)*tf/doc_len + self.jm_lambda * background
lmabs.append(np.log(lmab))
lmdirs.append(np.log(lmdir))
lmjrs.append(np.log(lmjr))
feature = [
sum(x) / len(x) for x in [tfs, tfidfs, bm25s, lmabs, lmdirs, lmjrs]
]
feature.append(doc_len)
feature_names = ['tfs', 'tfidfs', 'bm25s', 'lmabs', 'lmdirs', 'lmjrs', 'doc_len']
return feature, feature_names
def get_TFReord_parser(self):
''' Create the parser used to parse data read from TFRecord'''
context_feature_columns, example_feature_columns = self.create_feature_columns()
# build feature map
feature_map = {}
feature_map['label'] = tf.FixedLenFeature([self.list_size], tf.float32)
for k in context_feature_columns:
if k.endswith('unigrams'):
feature_map[k] = tf.SparseFeature(index_key=['%s_idx' % k],
value_key='%s_int_value' % k,
dtype=tf.int64,
size=[self.max_query_length])
else:
feature_map[k] = tf.FixedLenFeature([1], tf.float32)
for k in example_feature_columns:
if k.endswith('unigrams'):
feature_map[k] = tf.SparseFeature(index_key=['%s_list_idx' % k, '%s_idx' % k],
value_key='%s_int_value' % k,
dtype=tf.int64,
size=[self.list_size, self.max_doc_length])
else:
feature_map[k] = tf.FixedLenFeature([self.list_size], tf.float32)
def parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(serialized_example,
features=feature_map)
label = features.pop('label')
print(features['bm25s'])
return features, label
return parser
def get_file_paths(self, set_name, list_size):
# return a list of file paths for TFRecord dataset
# check if corresponding files exists
file_paths = []
root_path = self.settings["WORKING_PATH"] + '/list_size_%d/%s/' % (list_size, set_name)
if not os.path.exists(root_path):
os.makedirs(root_path)
data_info_file = root_path + '/info.json'
if os.path.isfile(data_info_file):
data_info = json.load(open(data_info_file))
file_paths = data_info['file_paths']
max_list_length = data_info['max_list_length']
#list_size = list_size if list_size > 0 else max_list_length
if max_list_length > self.max_list_length:
self.max_list_length = max_list_length
# TODO: load feature scales
else:
if self.example_pid_feature_map is None:
self.load_corpus_data()
tf.logging.info("Creating TFRecord data for {}".format(set_name))
# if raw data are not loaded, load them
#if self.max_doc_length < 1:
# self.load_corpus_data()
# Read labels
qrel_map = {}
with open(self.settings['QRELS_PATH'] + '%s.qrels' % set_name) as fin:
for line in fin:
arr = line.strip().split(' ')
qid = int(arr[0])
pid = int(arr[2])
label = int(arr[3])
if qid not in qrel_map:
qrel_map[qid] = set()
if label > 0:
qrel_map[qid].add(pid)
# Read data
qid_to_doc = {} # The list of docs seen so far for a query.
max_list_length = 0
def _create_line_parser(data_type):
if data_type == 'pair':
def pair_line_parser(line):
arr = line.strip().split('\t')
qid = int(arr[0])
pid = int(arr[1])
return qid, pid
return pair_line_parser
elif data_type == 'trec_ranklist':
def trec_ranklist_line_parser(line):
arr = line.strip().split(' ')
qid = int(arr[0])
pid = int(arr[2])
return qid, pid
return trec_ranklist_line_parser
line_parser = _create_line_parser(self.settings["%s_data_path" % set_name]['type'])
with open(self.settings["%s_data_path" % set_name]['path']) as fin:
for line in fin:
qid, pid = line_parser(line)
if qid not in self.context_qid_feature_map or pid not in self.example_pid_feature_map:
continue
label = 0
if qid in qrel_map and pid in qrel_map[qid]:
label = 1
if qid not in qid_to_doc:
qid_to_doc[qid] = []
qid_to_doc[qid].append((pid, label))
if len(qid_to_doc[qid]) > max_list_length:
max_list_length = len(qid_to_doc[qid])
list_size = list_size if list_size > 0 else max_list_length
if max_list_length > self.max_list_length:
self.max_list_length = max_list_length
# Build feature map
context_feature_columns, example_feature_columns = self.create_feature_columns()
total_docs = 0
discarded_docs = 0
count_record = 0
record_file_path = root_path + '%d.tfrecord' % count_record
fout = tf.python_io.TFRecordWriter(record_file_path)
file_paths.append(record_file_path)
example_id_fout = gzip.open(root_path + 'qid_pid_list.txt.gz', 'wb')
for qid in qid_to_doc:
feature_map = {}
id_list = [str(qid)]
no_rel_doc = True
all_empty_doc = True
# create label
feature_map['label'] = [-1.0 for _ in range(list_size)]
for i in range(len(qid_to_doc[qid])):
if i < list_size:
pid = qid_to_doc[qid][i][0]
label = qid_to_doc[qid][i][1]
feature_map['label'][i] = label
if label > 0:
no_rel_doc = False
id_list.append(str(pid))
else:
discarded_docs += 1
total_docs += 1
if no_rel_doc: # if there are no relevant document, discard the query
continue
# create context features
for k in context_feature_columns:
if k.endswith('unigrams'): # use sparse feature
idx_key = '%s_idx' % k
value_key = '%s_int_value' % k
feature_map[idx_key] = []
feature_map[value_key] = []
context_feature_vector = self.context_qid_feature_map[qid][k]
for i in range(len(context_feature_vector)):
feature_map[idx_key].append(i)
feature_map[value_key].append(context_feature_vector[i])
else: # use dense features
feature_map[k] = [self.context_qid_feature_map[qid][k]]
# compute dense example features
dense_features = {}
for i in range(len(qid_to_doc[qid])):
if i < list_size:
pid = qid_to_doc[qid][i][0]
feature_list, name_list = self.get_example_dense_features(
self.context_qid_feature_map[qid]['query_unigrams'],
self.example_pid_feature_map[pid]['term_stats'],
self.example_pid_feature_map[pid]['doc_len'])
for k in range(len(name_list)):
key = name_list[k]
value = feature_list[k]
if key not in dense_features:
dense_features[key] = [self.padding_number for _ in range(list_size)]
dense_features[key][k] = value
# create example features
for k in example_feature_columns:
if k.endswith('unigrams'): # use sparse feature
list_idx_key = '%s_list_idx' % k
idx_key = '%s_idx' % k
value_key = '%s_int_value' % k
feature_map[list_idx_key] = []
feature_map[idx_key] = []
feature_map[value_key] = []
for i in range(len(qid_to_doc[qid])):
if i < list_size:
pid = qid_to_doc[qid][i][0]
label = qid_to_doc[qid][i][1]
example_feature_vector = self.example_pid_feature_map[pid][k]
for j in range(len(example_feature_vector)):
feature_map[list_idx_key].append(i)
feature_map[idx_key].append(j)
feature_map[value_key].append(example_feature_vector[j])
if len(example_feature_vector) > 0:
all_empty_doc = False
else:
feature_map[k] = dense_features[k]
if all_empty_doc: # if all docs are empty, discard the query
continue
# convert feature map to example
for key in feature_map:
if key.endswith('idx') or key.endswith('int_value'): # key for sparse features
feature_map[key] = tf.train.Feature(int64_list=tf.train.Int64List(value=feature_map[key]))
elif key != 'label': # key for numerical features
for i in range(len(feature_map[key])):
if feature_map[key][i] == self.padding_number: # its a padding, just set as 0
feature_map[key][i] = 0.0
else:
feature_map[key][i] = (
feature_map[key][i] - self.numerical_feature_scale_map[key][0])/(
self.numerical_feature_scale_map[key][1] - self.numerical_feature_scale_map[key][0] + self.smoothing
)
feature_map[key] = tf.train.Feature(float_list=tf.train.FloatList(value=feature_map[key]))
else: # this is a key for 'label'
feature_map[key] = tf.train.Feature(float_list=tf.train.FloatList(value=feature_map[key]))
feature_example = tf.train.Example(features=tf.train.Features(feature=feature_map))
# write to TFRecord file
fout.write(feature_example.SerializeToString())
example_id_fout.write(bytes('%s\n' % '\t'.join(id_list), 'UTF-8')) # output qid and corresponding pid list
count_record += 1
if count_record % 10000 == 0:
fout.close()
record_file_path = root_path + '%d.tfrecord' % count_record
fout = tf.python_io.TFRecordWriter(record_file_path)
file_paths.append(record_file_path)
example_id_fout.close()
fout.close()
# write data info
data_info = {
'file_paths' : file_paths,
'example_id_file' : root_path + 'qid_pid_list.txt.gz',
'max_list_length' : max_list_length,
'query_number' : len(qid_to_doc),
'total_document' : total_docs,
'discarded_document' : discarded_docs,
}
with open(data_info_file, 'w') as fout:
json.dump(data_info, fout, sort_keys = True, indent = 4)
tf.logging.info("Number of queries: {}".format(len(qid_to_doc)))
tf.logging.info("Number of documents in total: {}".format(total_docs))
tf.logging.info("Number of documents discarded: {}".format(discarded_docs))
self.list_size = list_size
return file_paths
def generate_trec_ranklist_with_result_generator(self, set_name, list_size, model_name,
result_generator, file_writer, rank_cut):
''' Generate TREC format ranklist with result generator (estimator.predict())'''
root_path = self.settings["WORKING_PATH"] + '/list_size_%d/%s/' % (list_size, set_name)
data_info = json.load(open(root_path + '/info.json'))
with gzip.open(data_info['example_id_file']) as fin:
for x in result_generator:
arr = fin.readline().decode('UTF-8').strip().split('\t')
qid = arr[0]
pid_list = arr[1:]
actual_list_size = len(pid_list)
sorted_id_list = sorted(range(len(x[:actual_list_size])), key=lambda k: x[k], reverse=True)
rank = 1
for idx in sorted_id_list[:rank_cut]:
file_writer.write('%s Q0 %s %d %.4f %s\n' % (qid, pid_list[idx], rank, x[idx], model_name))
rank += 1
| [
"nltk.stem.PorterStemmer",
"tensorflow.logging.info",
"tensorflow.train.Int64List",
"os.path.isfile",
"tensorflow.train.FloatList",
"tensorflow.feature_column.categorical_column_with_identity",
"os.path.exists",
"tensorflow.parse_single_example",
"tensorflow.SparseFeature",
"tensorflow.feature_col... | [((630, 645), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (643, 645), False, 'from nltk.stem import PorterStemmer\n'), ((867, 970), 'tensorflow.feature_column.categorical_column_with_identity', 'tf.feature_column.categorical_column_with_identity', ([], {'key': '"""doc_unigrams"""', 'num_buckets': 'vocabulary_size'}), "(key='doc_unigrams',\n num_buckets=vocabulary_size)\n", (917, 970), True, 'import tensorflow as tf\n'), ((1001, 1106), 'tensorflow.feature_column.categorical_column_with_identity', 'tf.feature_column.categorical_column_with_identity', ([], {'key': '"""query_unigrams"""', 'num_buckets': 'vocabulary_size'}), "(key='query_unigrams',\n num_buckets=vocabulary_size)\n", (1051, 1106), True, 'import tensorflow as tf\n'), ((1153, 1332), 'tensorflow.feature_column.shared_embedding_columns', 'tf.feature_column.shared_embedding_columns', (['[doc_unigram_column, query_unigram_column]'], {'dimension': "self.settings['embedding_size']", 'shared_embedding_collection_name': '"""words"""'}), "([doc_unigram_column,\n query_unigram_column], dimension=self.settings['embedding_size'],\n shared_embedding_collection_name='words')\n", (1195, 1332), True, 'import tensorflow as tf\n'), ((2450, 2476), 'os.path.isfile', 'os.path.isfile', (['vocab_file'], {}), '(vocab_file)\n', (2464, 2476), False, 'import os, sys\n'), ((5073, 5103), 'os.path.isfile', 'os.path.isfile', (['statistic_file'], {}), '(statistic_file)\n', (5087, 5103), False, 'import os, sys\n'), ((6880, 6904), 'os.path.isfile', 'os.path.isfile', (['doc_file'], {}), '(doc_file)\n', (6894, 6904), False, 'import os, sys\n'), ((16671, 16719), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[self.list_size]', 'tf.float32'], {}), '([self.list_size], tf.float32)\n', (16689, 16719), True, 'import tensorflow as tf\n'), ((18064, 18094), 'os.path.isfile', 'os.path.isfile', (['data_info_file'], {}), '(data_info_file)\n', (18078, 18094), False, 'import os, sys\n'), ((1580, 1649), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['name'], {'shape': '(1,)', 'default_value': '(0.0)'}), '(name, shape=(1,), default_value=0.0)\n', (1612, 1649), True, 'import tensorflow as tf\n'), ((1816, 1885), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['name'], {'shape': '(1,)', 'default_value': '(0.0)'}), '(name, shape=(1,), default_value=0.0)\n', (1848, 1885), True, 'import tensorflow as tf\n'), ((9529, 9555), 'os.path.isfile', 'os.path.isfile', (['query_file'], {}), '(query_file)\n', (9543, 9555), False, 'import os, sys\n'), ((14951, 15086), 'tensorflow.logging.info', 'tf.logging.info', (["('%s feature scale: [%.3f, %.3f]' % (key, numerical_feature_scale_map[key][\n 0], numerical_feature_scale_map[key][1]))"], {}), "('%s feature scale: [%.3f, %.3f]' % (key,\n numerical_feature_scale_map[key][0], numerical_feature_scale_map[key][1]))\n", (14966, 15086), True, 'import tensorflow as tf\n'), ((17511, 17576), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['serialized_example'], {'features': 'feature_map'}), '(serialized_example, features=feature_map)\n', (17534, 17576), True, 'import tensorflow as tf\n'), ((17962, 17987), 'os.path.exists', 'os.path.exists', (['root_path'], {}), '(root_path)\n', (17976, 17987), False, 'import os, sys\n'), ((17992, 18014), 'os.makedirs', 'os.makedirs', (['root_path'], {}), '(root_path)\n', (18003, 18014), False, 'import os, sys\n'), ((20573, 20618), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['record_file_path'], {}), '(record_file_path)\n', (20600, 20618), True, 'import tensorflow as tf\n'), ((20679, 20729), 'gzip.open', 'gzip.open', (["(root_path + 'qid_pid_list.txt.gz')", '"""wb"""'], {}), "(root_path + 'qid_pid_list.txt.gz', 'wb')\n", (20688, 20729), False, 'import gzip\n'), ((26078, 26117), 'gzip.open', 'gzip.open', (["data_info['example_id_file']"], {}), "(data_info['example_id_file'])\n", (26087, 26117), False, 'import gzip\n'), ((2869, 2914), 'os.path.exists', 'os.path.exists', (["self.settings['WORKING_PATH']"], {}), "(self.settings['WORKING_PATH'])\n", (2883, 2914), False, 'import os, sys\n'), ((2920, 2962), 'os.makedirs', 'os.makedirs', (["self.settings['WORKING_PATH']"], {}), "(self.settings['WORKING_PATH'])\n", (2931, 2962), False, 'import os, sys\n'), ((6049, 6097), 'json.dump', 'json.dump', (['stats', 'fout'], {'sort_keys': '(True)', 'indent': '(4)'}), '(stats, fout, sort_keys=True, indent=4)\n', (6058, 6097), False, 'import json\n'), ((6997, 7022), 'gzip.open', 'gzip.open', (['doc_file', '"""rt"""'], {}), "(doc_file, 'rt')\n", (7006, 7022), False, 'import gzip\n'), ((9002, 9027), 'gzip.open', 'gzip.open', (['doc_file', '"""wt"""'], {}), "(doc_file, 'wt')\n", (9011, 9027), False, 'import gzip\n'), ((16117, 16129), 'numpy.log', 'np.log', (['lmab'], {}), '(lmab)\n', (16123, 16129), True, 'import numpy as np\n'), ((16148, 16161), 'numpy.log', 'np.log', (['lmdir'], {}), '(lmdir)\n', (16154, 16161), True, 'import numpy as np\n'), ((16179, 16191), 'numpy.log', 'np.log', (['lmjr'], {}), '(lmjr)\n', (16185, 16191), True, 'import numpy as np\n'), ((16807, 16929), 'tensorflow.SparseFeature', 'tf.SparseFeature', ([], {'index_key': "['%s_idx' % k]", 'value_key': "('%s_int_value' % k)", 'dtype': 'tf.int64', 'size': '[self.max_query_length]'}), "(index_key=['%s_idx' % k], value_key='%s_int_value' % k,\n dtype=tf.int64, size=[self.max_query_length])\n", (16823, 16929), True, 'import tensorflow as tf\n'), ((16992, 17027), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.float32'], {}), '([1], tf.float32)\n', (17010, 17027), True, 'import tensorflow as tf\n'), ((17115, 17276), 'tensorflow.SparseFeature', 'tf.SparseFeature', ([], {'index_key': "['%s_list_idx' % k, '%s_idx' % k]", 'value_key': "('%s_int_value' % k)", 'dtype': 'tf.int64', 'size': '[self.list_size, self.max_doc_length]'}), "(index_key=['%s_list_idx' % k, '%s_idx' % k], value_key=\n '%s_int_value' % k, dtype=tf.int64, size=[self.list_size, self.\n max_doc_length])\n", (17131, 17276), True, 'import tensorflow as tf\n'), ((17333, 17381), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[self.list_size]', 'tf.float32'], {}), '([self.list_size], tf.float32)\n', (17351, 17381), True, 'import tensorflow as tf\n'), ((25362, 25414), 'json.dump', 'json.dump', (['data_info', 'fout'], {'sort_keys': '(True)', 'indent': '(4)'}), '(data_info, fout, sort_keys=True, indent=4)\n', (25371, 25414), False, 'import json\n'), ((3817, 3854), 'os.path.exists', 'os.path.exists', (['query_files[set_name]'], {}), '(query_files[set_name])\n', (3831, 3854), False, 'import os, sys\n'), ((3927, 3941), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (3936, 3941), False, 'import json\n'), ((9566, 9593), 'gzip.open', 'gzip.open', (['query_file', '"""rt"""'], {}), "(query_file, 'rt')\n", (9575, 9593), False, 'import gzip\n'), ((10469, 10483), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (10478, 10483), False, 'import json\n'), ((11278, 11305), 'gzip.open', 'gzip.open', (['query_file', '"""wt"""'], {}), "(query_file, 'wt')\n", (11287, 11305), False, 'import gzip\n'), ((24890, 24935), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['record_file_path'], {}), '(record_file_path)\n', (24917, 24935), True, 'import tensorflow as tf\n'), ((24506, 24544), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature_map'}), '(feature=feature_map)\n', (24523, 24544), True, 'import tensorflow as tf\n'), ((23721, 23763), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'feature_map[key]'}), '(value=feature_map[key])\n', (23739, 23763), True, 'import tensorflow as tf\n'), ((24278, 24320), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'feature_map[key]'}), '(value=feature_map[key])\n', (24296, 24320), True, 'import tensorflow as tf\n'), ((24414, 24456), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'feature_map[key]'}), '(value=feature_map[key])\n', (24432, 24456), True, 'import tensorflow as tf\n')] |
import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
from tqdm import trange
X = np.random.randint(1,9,(1000,2))
y = np.prod(X,axis=1).reshape(1000,1)
X,y = torch.from_numpy(X)*.1, torch.from_numpy(y)*1.
class mul(nn.Module):
def __init__(self):
super(mul,self).__init__()
self.l1 = nn.Linear(2,4)
self.act = nn.ReLU()
self.l2 = nn.Linear(4,1)
def forward(self, x):
x = self.l1(x)
x = self.act(x)
x = self.l2(x)
return x
net = mul().to('cuda')
optimizer = optim.Adam(net.parameters())
loss_function = nn.BCELoss()
net.train()
for epoch in (n:=trange(100)):
X = X.to('cuda')
y = y.to('cuda')
output = net(X)
loss = loss_function(output, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
n.set_description(f'{epoch}, loss = {loss.item()}')
with torch.no_grad():
X = torch.tensor([2.,2.]).to('cuda')
output = net(X)
print(output)
| [
"torch.nn.ReLU",
"torch.nn.BCELoss",
"tqdm.trange",
"numpy.random.randint",
"torch.nn.Linear",
"torch.tensor",
"torch.no_grad",
"numpy.prod",
"torch.from_numpy"
] | [((112, 146), 'numpy.random.randint', 'np.random.randint', (['(1)', '(9)', '(1000, 2)'], {}), '(1, 9, (1000, 2))\n', (129, 146), True, 'import numpy as np\n'), ((609, 621), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (619, 621), True, 'import torch.nn as nn\n'), ((653, 664), 'tqdm.trange', 'trange', (['(100)'], {}), '(100)\n', (659, 664), False, 'from tqdm import trange\n'), ((900, 915), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (913, 915), False, 'import torch\n'), ((148, 166), 'numpy.prod', 'np.prod', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (155, 166), True, 'import numpy as np\n'), ((189, 208), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (205, 208), False, 'import torch\n'), ((213, 232), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (229, 232), False, 'import torch\n'), ((336, 351), 'torch.nn.Linear', 'nn.Linear', (['(2)', '(4)'], {}), '(2, 4)\n', (345, 351), True, 'import torch.nn as nn\n'), ((370, 379), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (377, 379), True, 'import torch.nn as nn\n'), ((398, 413), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(1)'], {}), '(4, 1)\n', (407, 413), True, 'import torch.nn as nn\n'), ((925, 949), 'torch.tensor', 'torch.tensor', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (937, 949), False, 'import torch\n')] |
# @Author: <NAME>
# @Date: Tue, March 31st 2020, 12:34 am
# @Email: <EMAIL>
# @Filename: base_dataset.py
'''
Script for defining base class BaseDataset for managing information about a particular subset or collection of datasets during preparation for a particular experiment.
'''
from boltons.dictutils import OneToOne
from collections import OrderedDict
import dataset
import json
import numpy as np
import os
import pandas as pd
import random
from stuf import stuf
from toolz.itertoolz import frequencies
from pyleaves import leavesdb
import pyleaves
from pyleaves.tests.test_utils import MetaData
from typing import List
class BaseDataset(object):
__version__ = '0.1'
def __init__(self, name='', src_db=pyleaves.DATABASE_PATH, columns = ['path','family','catalog_number'], id_col=None):
"""
Base class meant to be subclassed for unique named datasets. Implements some property setters/getters for maintaining consistency
of data and filters (like min class count threshold).
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>> dataset = BaseDataset()
>>> leaves_dataset = LeavesDataset()
... fossil_dataset = FossilDataset()
... pnas_dataset = PNASDataset()
... pnas_fossil_data = pnas_data+fossil_data
... pnas_leaves_data = pnas_data+leaves_data
>>>
"""
self.name = name
self.columns = columns
self.x_col = 'path'
self.y_col = 'family'
self.id_col = id_col
if src_db:
self.local_db = leavesdb.init_local_db(src_db = src_db, verbose=False)
self._threshold = 0
self._data = pd.DataFrame(columns=self.columns)
def load_from_db(self, x_col='path', y_col='family', all_cols=False):
"""
Load a dataframe from the SQLite db with 2 columns, paths and labels.
Subclasses should use this function in their __init__ method to instantiate self._data
-set all_cols=True in order to ignore x_col and y_col and instead load all columns in table
Returns
-------
pd.DataFrame
Description of returned object.
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>>
"""
db = dataset.connect(f"sqlite:///{self.local_db}", row_type=stuf)
if all_cols:
data = pd.DataFrame(db['dataset'].all())
if self.name in data.dataset.values:
data = data[data.dataset == self.name]
else:
data = pd.DataFrame(leavesdb.db_query.load_data(db=db, x_col=x_col, y_col=y_col, dataset=self.name))
return data
def load_from_csv(self, filepath):
"""Load a dataframe from a CSV file with 2 columns, paths and labels.
Returns
-------
pd.DataFrame
Description of returned object.
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>>
"""
data = pd.read_csv(filepath, drop_index=True)
return data
def load_from_tfds(self):
self.tfds_builder = tfds.builder(self.name)
# Download the dataset
self.tfds_builder.download_and_prepare()
def load_from_lists(self, x: List, y: List, columns: List[str]=None):
columns = columns or self.columns
data = pd.DataFrame({columns[0]:np.array(x),
columns[1]:np.array(y)})
return data
@classmethod
def from_dataframe(cls, df: pd.DataFrame, name='', threshold=0):
new_dataset = cls(name=name, src_db=None, columns=df.columns.tolist())
new_dataset._threshold = threshold
new_dataset.data = df
return new_dataset
def exclude_rare_classes(self, threshold):
"""
Uses helper function filter_low_count_labels to keep only classes with the number of samples equal to or greater than threshold.
Updates the self._data dataframe in place
Parameters
----------
threshold : int
Keep classes with num_samples >= threshold
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>>
"""
self._data = pyleaves.data_pipeline.preprocessing.filter_low_count_labels(self.data,
threshold,
self.y_col,
verbose=False)
self._threshold = threshold
def merge_with(self, other):
#TODO: combine this with __add__ method, since it's just a wrapper
assert issubclass(type(other), BaseDataset)
merged_dataset = BaseDataset()
#Keep highest threshold between the 2 instances
merged_dataset._threshold = max([self.threshold,other.threshold])
#Use the Base class setter method for self.data to concatenate the 2 instances' dataframes then performing a round of
#filtering out duplicates and class thresholding
merged_dataset.data = pd.concat({self.name:self.data,
other.name:other.data})
merged_dataset.name = '+'.join([self.name, other.name])
return merged_dataset
def __add__(self, other):
return self.merge_with(other)
def __eq__(self, other):
if self.name != other.name:
return False
elif self.threshold != other.threshold:
return False
elif not np.all(self.data == other.data):
return False
elif not np.all(self.classes == other.classes):
return False
return True
@property
def data(self):
return self._data
@data.setter
def data(self, new_data: pd.DataFrame):
# import pdb; pdb.set_trace()
self._data = new_data.drop_duplicates(subset='path')
self.exclude_rare_classes(self.threshold)
self.columns = self._data.columns.tolist()
if len(self._data) != len(new_data):
print(f'dropped {len(new_data)-len(self._data)} duplicate rows')
@property
def class_counts(self):
'''
Returns
-------
dict
mapping {class_name:class_count} values
'''
# y_col = self.columns[1]
return frequencies(self.data[self.y_col])
@property
def classes(self):
'''
Returns
-------
list
Sorted list of class names
'''
return sorted(self.class_counts.keys())
# return pyleaves.data_pipeline.preprocessing.get_class_counts(self.data,'family', verbose=False)[0]
@property
def num_samples(self):
return len(self.data)
@property
def num_classes(self):
return len(self.classes)
@property
def threshold(self):
return self._threshold
def __repr__(self):
return f'''{self.name}:
num_samples: {self.num_samples}
num_classes: {self.num_classes}
class_count_threshold: {self.threshold}
'''
def get_instance_metadata(self):
return MetaData.from_Dataset(self)
# (name=self.name,
# num_samples=self.num_samples,
# num_classes=self.num_classes,
# threshold=threshold,
# class_distribution=self.
def select_data_by_source_dataset(self, source_name):
"""
Returns a pd.DataFrame containing rows from data that originate from the dataset indicated by source_name.
data must be the result of at least one addition of 2 or more datasets.
e.g. pnas_dataset + leaves_dataset.
The output of this addition results in a new dataframe with a multiIndex, where index level 0 is the
dataset source name and index level 1 is the row number within the original dataset
Parameters
----------
data : pd.DataFrame
Should be extracted from a subclass of BaseDataset, by accessing the .data property, after combining
2 or more datasets
source_name : str
Should refer to one of the 2 or more datasets used to construct data
Returns
-------
pd.DataFrame
Contains only rows belonging to source_name's dataset, same columns and index levels as self.data previously
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>>
"""
idx = np.where(self.data.index.get_level_values(0)==source_name)[0]
return self.data.iloc[idx,:]
def leave_one_class_out(self, class_name: str):
"""
LEAVE-ONE-OUT EXPERIMENT helper function
Returns a tuple with length==2. The first item is a DataFrame where every row comes from self.data, but does not
belong to the class indicated by class_name. The second item is a DataFrame containing all the rows that do
belong to class_name.
Parameters
----------
class_name : str
The class to be separated out
Returns
-------
tuple(pd.DataFrame, pd.DataFrame)
tuple corresponding to (included classes, excluded class)
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>>
"""
label_col = self.columns[1]
include = self.data[self.data[label_col]!=class_name]
exclude = self.data[self.data[label_col]==class_name]
return (BaseDataset.from_dataframe(include, threshold=self.threshold),
BaseDataset.from_dataframe(exclude, threshold=self.threshold))
# assert include.shape[0]+exclude.shape[0]==self.data.shape[0]
# return (include, exclude)
def enforce_class_whitelist(self, class_names: list):
"""
Similar task as leave_one_class_out, but opposite approach. User provides a list of classes to include, while
the rest are excluded.
Useful for limiting a dataset to only classes that exist in another
Parameters
----------
class_names : list
The classes to be kept
Returns
-------
tuple(pd.DataFrame, pd.DataFrame)
tuple corresponding to (included classes, excluded class)
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>>
"""
label_col = self.columns[1]
idx = self.data[label_col].isin(class_names)
include = self.data[idx]
exclude = self.data[~idx]
return (BaseDataset.from_dataframe(include, name=self.name, threshold=self.threshold),
BaseDataset.from_dataframe(exclude, name=self.name, threshold=self.threshold))
# assert include.shape[0]+exclude.shape[0]==self.data.shape[0]
# return (include, exclude)
#############################################################################################
class LabelEncoder:
fname = 'label_encoder.json'
def __init__(self, labels):
self.classes = tuple(np.unique(sorted(labels)))
self._encoder = OneToOne(enumerate(self.classes)).inv
self.fname = 'label_encoder.json'
@property
def num_classes(self):
return len(self.classes)
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, data):
if type(data)==list:
self._encoder = OneToOne(enumerate(data)).inv
elif type(data) in [dict, OrderedDict]:
self._encoder = OneToOne(data)
else:
assert False
@property
def decoder(self):
return self.encoder.inv
def encode(self, labels):
'''str->int'''
return [self.encoder[l] for l in list(labels)]
def decode(self, labels):
'''int->str'''
return [self.decoder[l] for l in labels]
@classmethod
def load_config(cls, label_dir):
with open(os.path.join(label_dir, cls.fname), 'r') as file:
data = json.load(file)
# cls(**data)
loaded = cls(list(data.keys()))
loaded.encoder = data
return loaded
def save_config(self, out_dir):
with open(os.path.join(out_dir, self.fname), 'w') as file:
json.dump(self.encoder, file)
def partition_data(data, partitions=OrderedDict({'train':0.5,'test':0.5})):
'''
Split data into named partitions by fraction
Example:
--------
#split_data will be a dict with the same keys as partitions, and the values will be the corresponding samples from data.
#i.e. 'train' will get the first 40% of samples, 'val' the next 10%, and 'test' the last 50%
>> split_data = partition_data(data, partitions=OrderedDict({'train':0.4,'val':0.1,'test':0.5}))
'''
num_rows = len(data)
output={}
taken = 0.0
for k,v in partitions.items():
idx = (int(taken*num_rows),int((taken+v)*num_rows))
print(k, v, idx)
output.update({k:data[idx[0]:idx[1]]})
taken+=v
assert taken <= 1.0
return output
def preprocess_data(dataset, encoder, config):
"""
Function to perform 4 preprocessing steps:
1. Exclude classes below minimum threshold defined in config.threshold
2. Exclude all classes that are not referenced in encoder.classes
3. Encode and normalize data into (path: str, label: int) tuples
4. Partition data samples into fractional splits defined in config.data_splits_meta
Parameters
----------
dataset : BaseDataset
Any instance of BaseDataset or its subclasses
encoder : LabelEncoder
Description of parameter `encoder`.
config : Namespace or stuf.stuf
Config object containing the attributes/properties:
config.threshold
config.data_splits_meta
Returns
-------
dict
Dictionary mapping from keys defined in config.data_splits_meta.keys(), to lists of tuples representing each sample.
Examples
-------
Examples should be written in doctest format, and
should illustrate how to use the function/class.
>>> dataset = LeavesDataset()
... encoder = LabelEncoder(dataset.data.family)
... data_splits = preprocess_data(dataset, encoder, config)
"""
dataset.exclude_rare_classes(threshold=config.threshold)
encoder.encoder = dataset.classes
dataset, _ = dataset.enforce_class_whitelist(class_names=encoder.classes)
x = list(dataset.data['path'].values)#.reshape((-1,1))
y = np.array(encoder.encode(dataset.data['family']))
# import pdb;pdb.set_trace()
shuffled_data = list(zip(x,y))
random.shuffle(shuffled_data)
partitioned_data = partition_data(data=shuffled_data,
partitions=OrderedDict(config.data_splits_meta)
)
return {k:v for k,v in partitioned_data.items() if len(v)>0}
def calculate_class_counts(y_data : list):
labels, label_counts = np.unique(y_data, return_counts=True)
if type(labels[0])!=str:
labels = [int(label) for label in labels]
label_counts = [int(count) for count in label_counts]
return {label: count for label,count in zip(labels, label_counts)}
def calculate_class_weights(y_data : list):
"""
Calculate class weights as w[i] = <total # of samples>/(<total # of classes>*<class[i] count>)
Parameters
----------
y_data : list
List of y labels to be counted per class for calculating class weights
Returns
-------
dict
Contains key:value pairs corresponding to unique class labels: corresponding weights
e.g. {0:1.0,
1:2.344,
2:5.456}
"""
# labels, label_counts = np.unique(y_data, return_counts=True)
class_counts_dict = calculate_class_counts(y_data)
total = sum(class_counts_dict.values())
num_classes = len(class_counts_dict)
calc_weight = lambda count: total / (num_classes * count)
class_weights = {label:calc_weight(count) for label, count in class_counts_dict.items()}
# class_weights = {k: v/np.min(list(class_weights.values())) for k,v in class_weights.items()}
# class_weights = {k: v/np.max(list(class_weights.values())) for k,v in class_weights.items()}
return class_weights
# total = sum(label_counts)
# num_classes = len(labels)
# class_weights = {}
# for label, c in zip(labels,label_counts):
# if type(label) != str:
# label = int(label)
# class_weights[label] = total / (num_classes * c)
# return class_weights
| [
"pyleaves.leavesdb.init_local_db",
"pandas.read_csv",
"random.shuffle",
"dataset.enforce_class_whitelist",
"pyleaves.tests.test_utils.MetaData.from_Dataset",
"os.path.join",
"numpy.unique",
"pandas.DataFrame",
"dataset.exclude_rare_classes",
"pandas.concat",
"json.dump",
"boltons.dictutils.One... | [((12977, 13017), 'collections.OrderedDict', 'OrderedDict', (["{'train': 0.5, 'test': 0.5}"], {}), "({'train': 0.5, 'test': 0.5})\n", (12988, 13017), False, 'from collections import OrderedDict\n'), ((14934, 14990), 'dataset.exclude_rare_classes', 'dataset.exclude_rare_classes', ([], {'threshold': 'config.threshold'}), '(threshold=config.threshold)\n', (14962, 14990), False, 'import dataset\n'), ((15046, 15106), 'dataset.enforce_class_whitelist', 'dataset.enforce_class_whitelist', ([], {'class_names': 'encoder.classes'}), '(class_names=encoder.classes)\n', (15077, 15106), False, 'import dataset\n'), ((15297, 15326), 'random.shuffle', 'random.shuffle', (['shuffled_data'], {}), '(shuffled_data)\n', (15311, 15326), False, 'import random\n'), ((15648, 15685), 'numpy.unique', 'np.unique', (['y_data'], {'return_counts': '(True)'}), '(y_data, return_counts=True)\n', (15657, 15685), True, 'import numpy as np\n'), ((1766, 1800), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.columns'}), '(columns=self.columns)\n', (1778, 1800), True, 'import pandas as pd\n'), ((2447, 2507), 'dataset.connect', 'dataset.connect', (['f"""sqlite:///{self.local_db}"""'], {'row_type': 'stuf'}), "(f'sqlite:///{self.local_db}', row_type=stuf)\n", (2462, 2507), False, 'import dataset\n'), ((3238, 3276), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'drop_index': '(True)'}), '(filepath, drop_index=True)\n', (3249, 3276), True, 'import pandas as pd\n'), ((4531, 4644), 'pyleaves.data_pipeline.preprocessing.filter_low_count_labels', 'pyleaves.data_pipeline.preprocessing.filter_low_count_labels', (['self.data', 'threshold', 'self.y_col'], {'verbose': '(False)'}), '(self.data,\n threshold, self.y_col, verbose=False)\n', (4591, 4644), False, 'import pyleaves\n'), ((5467, 5524), 'pandas.concat', 'pd.concat', (['{self.name: self.data, other.name: other.data}'], {}), '({self.name: self.data, other.name: other.data})\n', (5476, 5524), True, 'import pandas as pd\n'), ((6717, 6751), 'toolz.itertoolz.frequencies', 'frequencies', (['self.data[self.y_col]'], {}), '(self.data[self.y_col])\n', (6728, 6751), False, 'from toolz.itertoolz import frequencies\n'), ((7511, 7538), 'pyleaves.tests.test_utils.MetaData.from_Dataset', 'MetaData.from_Dataset', (['self'], {}), '(self)\n', (7532, 7538), False, 'from pyleaves.tests.test_utils import MetaData\n'), ((1662, 1714), 'pyleaves.leavesdb.init_local_db', 'leavesdb.init_local_db', ([], {'src_db': 'src_db', 'verbose': '(False)'}), '(src_db=src_db, verbose=False)\n', (1684, 1714), False, 'from pyleaves import leavesdb\n'), ((12659, 12674), 'json.load', 'json.load', (['file'], {}), '(file)\n', (12668, 12674), False, 'import json\n'), ((12905, 12934), 'json.dump', 'json.dump', (['self.encoder', 'file'], {}), '(self.encoder, file)\n', (12914, 12934), False, 'import json\n'), ((15435, 15471), 'collections.OrderedDict', 'OrderedDict', (['config.data_splits_meta'], {}), '(config.data_splits_meta)\n', (15446, 15471), False, 'from collections import OrderedDict\n'), ((2732, 2811), 'pyleaves.leavesdb.db_query.load_data', 'leavesdb.db_query.load_data', ([], {'db': 'db', 'x_col': 'x_col', 'y_col': 'y_col', 'dataset': 'self.name'}), '(db=db, x_col=x_col, y_col=y_col, dataset=self.name)\n', (2759, 2811), False, 'from pyleaves import leavesdb\n'), ((3617, 3628), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3625, 3628), True, 'import numpy as np\n'), ((3670, 3681), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3678, 3681), True, 'import numpy as np\n'), ((12178, 12192), 'boltons.dictutils.OneToOne', 'OneToOne', (['data'], {}), '(data)\n', (12186, 12192), False, 'from boltons.dictutils import OneToOne\n'), ((12590, 12624), 'os.path.join', 'os.path.join', (['label_dir', 'cls.fname'], {}), '(label_dir, cls.fname)\n', (12602, 12624), False, 'import os\n'), ((12844, 12877), 'os.path.join', 'os.path.join', (['out_dir', 'self.fname'], {}), '(out_dir, self.fname)\n', (12856, 12877), False, 'import os\n'), ((5900, 5931), 'numpy.all', 'np.all', (['(self.data == other.data)'], {}), '(self.data == other.data)\n', (5906, 5931), True, 'import numpy as np\n'), ((5975, 6012), 'numpy.all', 'np.all', (['(self.classes == other.classes)'], {}), '(self.classes == other.classes)\n', (5981, 6012), True, 'import numpy as np\n')] |
from PIL import Image
import astropy.io.fits as pyfits
from astropy.coordinates import SkyCoord
import glob
import numpy as np
import sys
import os
FLIPUD = True # Important: PNGs are inverted from fits
hdu = 0
bands_des = "griz"
magzps_des = [30, 30, 30, 30]
bands_cfhtls = "ugriz"
pixel_scale = .263 # DES
scale = 4
bands = bands_des
def main(args):
indir = args[0]
doboth = False
mags = {}
outputfile = "mags.csv"
if not os.path.isdir(indir):
print("Directory %s not found.")
sys.exit(0)
if os.path.exists(indir + "/masks_lens") and os.path.exists(indir + "/masks_source"):
doboth = True
else:
if not os.path.exists(indir + "/masks"):
print("Masks directory not found.")
sys.exit(0)
if doboth:
columns = ["lens", "source"]
mags['lens'] = getmags(indir, masksdir="masks_lens")
mags['source'] = getmags(indir, masksdir="masks_source")
else:
columns = ["mag"]
mags['mag'] = getmags(indir, masksdir="masks")
# output
with open(indir + "/" + outputfile, "w") as f:
f.write("objid,")
for column in columns:
f.write(",".join(
[b + "_mag_" + column for b in bands])
)
f.write(",")
f.write(",".join(
[b + "_sb_" + column for b in bands])
)
if column != columns[-1]:
f.write(",")
f.write("\n")
objids = list(mags[columns[0]].keys())
for obj in objids:
line = ""
f.write("%s," % obj)
for column in columns:
for band in bands:
line += "%.2f," % mags[column][obj][0][band]
for band in bands:
line += "%.2f," % mags[column][obj][1][band]
f.write(line[:-1] + "\n")
def getmags(indir, bands=bands, masksdir="masks"):
output_mags = {}
mask_files = glob.glob(indir + "/" + masksdir + "/*.bmp")
for mask in mask_files:
objid = mask.replace("-mask", "").split(".")[0].split("/")[-1]
mask = np.array(Image.open(mask))
mask = mask / 255.
if FLIPUD:
mask = np.flipud(mask)
fits_files = glob.glob(indir + "/fits/*" + objid + "*.fits")
mags = {}
sb = {}
for band in bands:
mags[band] = 99
ff = [f for f in fits_files if "_" + band + "." in f]
if len(ff) != 1:
print("No FITS files found for " + objid)
break
hdulist = pyfits.open(ff[0])
data = hdulist[hdu].data
try:
masked = data * mask
except Error as e:
# print(e)
continue
magzp = None
header = hdulist[hdu].header
if "MAGZP" in header:
magzp = float(hdulist[hdu].header['MAGZP'])
elif "MAGZERO" in header:
magzp = float(hdulist[hdu].header['MAGZERO'])
if magzp is None:
magzp = 30
print("Warning: No zero point found, using default")
mag = -2.5 * np.log10(masked.sum()) + magzp
mags[band] = mag
A = mask.sum() * pixel_scale**2
surf_bri = mag + 2.5 * np.log10(A) # flux_pixel * (1./pixel_scale)**2
sb[band] = surf_bri
output_mags[objid] = (mags, sb)
return output_mags
if __name__ == "__main__":
main(sys.argv[1:])
| [
"os.path.isdir",
"os.path.exists",
"numpy.flipud",
"PIL.Image.open",
"astropy.io.fits.open",
"glob.glob",
"numpy.log10",
"sys.exit"
] | [((1972, 2016), 'glob.glob', 'glob.glob', (["(indir + '/' + masksdir + '/*.bmp')"], {}), "(indir + '/' + masksdir + '/*.bmp')\n", (1981, 2016), False, 'import glob\n'), ((449, 469), 'os.path.isdir', 'os.path.isdir', (['indir'], {}), '(indir)\n', (462, 469), False, 'import os\n'), ((520, 531), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (528, 531), False, 'import sys\n'), ((539, 576), 'os.path.exists', 'os.path.exists', (["(indir + '/masks_lens')"], {}), "(indir + '/masks_lens')\n", (553, 576), False, 'import os\n'), ((581, 620), 'os.path.exists', 'os.path.exists', (["(indir + '/masks_source')"], {}), "(indir + '/masks_source')\n", (595, 620), False, 'import os\n'), ((2260, 2307), 'glob.glob', 'glob.glob', (["(indir + '/fits/*' + objid + '*.fits')"], {}), "(indir + '/fits/*' + objid + '*.fits')\n", (2269, 2307), False, 'import glob\n'), ((669, 701), 'os.path.exists', 'os.path.exists', (["(indir + '/masks')"], {}), "(indir + '/masks')\n", (683, 701), False, 'import os\n'), ((763, 774), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (771, 774), False, 'import sys\n'), ((2140, 2156), 'PIL.Image.open', 'Image.open', (['mask'], {}), '(mask)\n', (2150, 2156), False, 'from PIL import Image\n'), ((2223, 2238), 'numpy.flipud', 'np.flipud', (['mask'], {}), '(mask)\n', (2232, 2238), True, 'import numpy as np\n'), ((2594, 2612), 'astropy.io.fits.open', 'pyfits.open', (['ff[0]'], {}), '(ff[0])\n', (2605, 2612), True, 'import astropy.io.fits as pyfits\n'), ((3339, 3350), 'numpy.log10', 'np.log10', (['A'], {}), '(A)\n', (3347, 3350), True, 'import numpy as np\n')] |
import os
import struct
from array import array
import numpy as np
import png
from PIL import Image
from tqdm import tqdm
# Begin 'raw_to_png'
def read(dataset="train", path="."):
if dataset is "train":
fname_img = os.path.join(path, 'train-images-idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')
elif dataset is "test":
fname_img = os.path.join(path, 't10k-images-idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')
else:
raise ValueError("dataset must be 'train' or 'test'")
flbl = open(fname_lbl, 'rb')
magic_nr, size = struct.unpack(">II", flbl.read(8))
lbl = array("b", flbl.read())
flbl.close()
fimg = open(fname_img, 'rb')
magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = array("B", fimg.read())
fimg.close()
return lbl, img, size, rows, cols
def write_dataset(labels, data, size, rows, cols, output_path, dataset):
output_dir = os.path.join(output_path, dataset)
# create output directories
output_dirs = [os.path.join(output_dir, str(i)) for i in range(10)]
for dir in output_dirs:
if not os.path.exists(dir):
os.makedirs(dir)
txt_path = os.path.join(output_path, dataset + ".txt")
# erase .txt file if it exists
open(txt_path, "w").close()
# write data
i = 0
for label in tqdm(labels):
output_filename = os.path.join(output_dirs[label], str(i) + ".png")
with open(output_filename, "wb") as h:
w = png.Writer(cols, rows, greyscale=True)
data_i = [data[(i*rows*cols + j*cols): (i*rows*cols + (j+1)*cols)] for j in range(rows)]
w.write(h, data_i)
with open(txt_path, "a") as f:
f.write(output_filename + "\n")
i += 1
def raw_to_png(raw_dir, png_dir):
raw_dir = os.path.abspath(raw_dir)
png_dir = os.path.abspath(png_dir)
if not os.path.exists(png_dir):
os.makedirs(png_dir)
for dataset in ["train", "test"]:
print("Writing {} dataset".format(dataset))
labels, data, size, rows, cols = read(dataset, raw_dir)
write_dataset(labels, data, size, rows, cols, png_dir, dataset)
# Begin 'png_to_npy'
def mnist_png_to_npy(split, png_dir, npy_dir):
with open(os.path.join(png_dir, split + '.txt'), 'r') as f:
paths = f.read().splitlines()
# number of labels
labels = 0
# doesn't matter what path we use to get the split directory name
split_dir = os.path.dirname(os.path.dirname(paths[0]))
for _, dirnames, _ in os.walk(split_dir):
labels += len(dirnames)
np_images = []
np_labels = []
for path in tqdm(paths):
# np_image shape: [1 x 784]
np_image = np.array(Image.open(path)).flatten()
# remember to divide by 256
np_image = np_image / 256.0
np_images.append(np_image)
# get label from folder name of file
label = int(os.path.basename(os.path.dirname(path)))
# np_label shape: [1 x #labels]
np_label = np.zeros(shape=labels)
np_label[label] = 1.0
np_labels.append(np_label)
np.save(os.path.join(npy_dir, split + '-images.npy'), np_images)
np.save(os.path.join(npy_dir, split + '-labels.npy'), np_labels)
def png_to_npy(png_dir, npy_dir):
png_dir = os.path.realpath(png_dir)
npy_dir = os.path.realpath(npy_dir)
if not os.path.exists(npy_dir):
os.makedirs(npy_dir)
splits = ['train', 'test']
for split in splits:
print('Current split: ' + split)
mnist_png_to_npy(split, png_dir, npy_dir)
| [
"tqdm.tqdm",
"os.path.abspath",
"os.makedirs",
"os.path.realpath",
"os.walk",
"os.path.exists",
"os.path.dirname",
"numpy.zeros",
"PIL.Image.open",
"png.Writer",
"os.path.join"
] | [((1003, 1037), 'os.path.join', 'os.path.join', (['output_path', 'dataset'], {}), '(output_path, dataset)\n', (1015, 1037), False, 'import os\n'), ((1251, 1294), 'os.path.join', 'os.path.join', (['output_path', "(dataset + '.txt')"], {}), "(output_path, dataset + '.txt')\n", (1263, 1294), False, 'import os\n'), ((1408, 1420), 'tqdm.tqdm', 'tqdm', (['labels'], {}), '(labels)\n', (1412, 1420), False, 'from tqdm import tqdm\n'), ((1880, 1904), 'os.path.abspath', 'os.path.abspath', (['raw_dir'], {}), '(raw_dir)\n', (1895, 1904), False, 'import os\n'), ((1919, 1943), 'os.path.abspath', 'os.path.abspath', (['png_dir'], {}), '(png_dir)\n', (1934, 1943), False, 'import os\n'), ((2604, 2622), 'os.walk', 'os.walk', (['split_dir'], {}), '(split_dir)\n', (2611, 2622), False, 'import os\n'), ((2711, 2722), 'tqdm.tqdm', 'tqdm', (['paths'], {}), '(paths)\n', (2715, 2722), False, 'from tqdm import tqdm\n'), ((3366, 3391), 'os.path.realpath', 'os.path.realpath', (['png_dir'], {}), '(png_dir)\n', (3382, 3391), False, 'import os\n'), ((3406, 3431), 'os.path.realpath', 'os.path.realpath', (['npy_dir'], {}), '(npy_dir)\n', (3422, 3431), False, 'import os\n'), ((233, 278), 'os.path.join', 'os.path.join', (['path', '"""train-images-idx3-ubyte"""'], {}), "(path, 'train-images-idx3-ubyte')\n", (245, 278), False, 'import os\n'), ((299, 344), 'os.path.join', 'os.path.join', (['path', '"""train-labels-idx1-ubyte"""'], {}), "(path, 'train-labels-idx1-ubyte')\n", (311, 344), False, 'import os\n'), ((1956, 1979), 'os.path.exists', 'os.path.exists', (['png_dir'], {}), '(png_dir)\n', (1970, 1979), False, 'import os\n'), ((1989, 2009), 'os.makedirs', 'os.makedirs', (['png_dir'], {}), '(png_dir)\n', (2000, 2009), False, 'import os\n'), ((2551, 2576), 'os.path.dirname', 'os.path.dirname', (['paths[0]'], {}), '(paths[0])\n', (2566, 2576), False, 'import os\n'), ((3090, 3112), 'numpy.zeros', 'np.zeros', ([], {'shape': 'labels'}), '(shape=labels)\n', (3098, 3112), True, 'import numpy as np\n'), ((3190, 3234), 'os.path.join', 'os.path.join', (['npy_dir', "(split + '-images.npy')"], {}), "(npy_dir, split + '-images.npy')\n", (3202, 3234), False, 'import os\n'), ((3259, 3303), 'os.path.join', 'os.path.join', (['npy_dir', "(split + '-labels.npy')"], {}), "(npy_dir, split + '-labels.npy')\n", (3271, 3303), False, 'import os\n'), ((3443, 3466), 'os.path.exists', 'os.path.exists', (['npy_dir'], {}), '(npy_dir)\n', (3457, 3466), False, 'import os\n'), ((3476, 3496), 'os.makedirs', 'os.makedirs', (['npy_dir'], {}), '(npy_dir)\n', (3487, 3496), False, 'import os\n'), ((393, 437), 'os.path.join', 'os.path.join', (['path', '"""t10k-images-idx3-ubyte"""'], {}), "(path, 't10k-images-idx3-ubyte')\n", (405, 437), False, 'import os\n'), ((458, 502), 'os.path.join', 'os.path.join', (['path', '"""t10k-labels-idx1-ubyte"""'], {}), "(path, 't10k-labels-idx1-ubyte')\n", (470, 502), False, 'import os\n'), ((1185, 1204), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (1199, 1204), False, 'import os\n'), ((1218, 1234), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (1229, 1234), False, 'import os\n'), ((1561, 1599), 'png.Writer', 'png.Writer', (['cols', 'rows'], {'greyscale': '(True)'}), '(cols, rows, greyscale=True)\n', (1571, 1599), False, 'import png\n'), ((2323, 2360), 'os.path.join', 'os.path.join', (['png_dir', "(split + '.txt')"], {}), "(png_dir, split + '.txt')\n", (2335, 2360), False, 'import os\n'), ((3006, 3027), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (3021, 3027), False, 'import os\n'), ((2788, 2804), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (2798, 2804), False, 'from PIL import Image\n')] |
from plato.backend.units import Units
from plato.backend.datasources.pevent_trace.datasource import PeventDataSource
from ..general_trace.adapter import GeneralTraceAdapter
import numpy as np
# Example adapter for a random pseudo-data-source
class PeventTraceAdapter(GeneralTraceAdapter):
statsForUnits = {Units.CYCLES: PeventDataSource.cyclesStat}
def __init__(self, data_source: PeventDataSource):
super().__init__(data_source)
# Override: We know this trace has trn_idx and cycles columns
def get_stat_for_units(self, units):
if units in PeventTraceAdapter.statsForUnits:
return PeventTraceAdapter.statsForUnits[units]
return super().get_stat_for_units(units)
def get_points(self, first, last, units, stat_cols, max_points_per_series):
'''
just get all the points for these stat_cols since they're traces
'''
# TODO no downsampling yet
# TODO no maximum points yet, this is to be used for histograms
returnArrayList = []
for stat in stat_cols:
currentColumn = self.pdf[stat]
firstIndex = np.searchsorted(currentColumn, first, side="left")
lastIndex = max(np.searchsorted(currentColumn, last, side="left") - 1, firstIndex)
returnArrayList.append(currentColumn[firstIndex:lastIndex])
return returnArrayList
| [
"numpy.searchsorted"
] | [((1136, 1186), 'numpy.searchsorted', 'np.searchsorted', (['currentColumn', 'first'], {'side': '"""left"""'}), "(currentColumn, first, side='left')\n", (1151, 1186), True, 'import numpy as np\n'), ((1215, 1264), 'numpy.searchsorted', 'np.searchsorted', (['currentColumn', 'last'], {'side': '"""left"""'}), "(currentColumn, last, side='left')\n", (1230, 1264), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Interstellar Technologies Inc. All Rights Reserved.
# Authors : <NAME>
#
# Lisence : MIT Lisence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import sys
import os
import io
import numpy as np
import pandas as pd
import json
from jinja2 import Template
from bokeh.embed import components
from bokeh.models import Range1d
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.util.browser import view
from bokeh.palettes import d3
from bokeh.layouts import gridplot
# from bokeh.io import output_file, show
from bokeh.models import PrintfTickFormatter, HoverTool
g = 9.80665
if (len(sys.argv) != 1):
input_json = sys.argv[1]
else:
input_json = "param_sample_01.json"
# ==== Read input file ====
with open(input_json, 'r') as f:
json_dict = json.load(f)
# rocket_name = json_dict['name']
rocket_name = json_dict['name(str)']
# ==== Bokeh setup ====
TOOLS="pan,wheel_zoom,box_zoom,reset,save,hover"
PLOT_OPTIONS = dict(tools=TOOLS, plot_width=550, plot_height=300)
HOVER_SET = [("date (x,y)", "($x{0,0}, $y{0,0})")]
HOVER_SET_F = [("date (x,y)", "($x{0,0}, $y{0,0.00})")]
C = d3["Category10"][10]
# ==== Plot each stages ====
for stage_str in ['1', '2', '3']:
st = stage_str + ' stage: ' # stage string for title
file_name = "output/" + rocket_name + "_dynamics_" + stage_str + ".csv"
if not os.path.exists(file_name): continue
df1 = pd.read_csv(file_name, index_col=False)
# ==== 燃焼終了 or 遠地点までのプロットの場合コメントオンオフ ====
time_burnout = df1[df1["thrust(N)"] == 0]["time(s)"][1:].min()
time_apogee = df1[df1["altitude(m)"] == df1["altitude(m)"].max()]["time(s)"]
# import pdb; pdb.set_trace()
if not isinstance(time_apogee, float):
time_apogee = time_apogee.iloc[0]
# df1 = df1[df1["time(s)"] < float(time_burnout)]
# df1 = df1[df1["time(s)"] < float(time_apogee)]
# ==== 燃焼終了 or 遠地点までのプロットの場合コメントオンオフ ====
xr1 = Range1d(start=0, end=df1["time(s)"].max())
p_mass = figure(title=st+"質量", x_axis_label="時刻 [sec]", y_axis_label="質量 [kg]",
x_range=xr1, **PLOT_OPTIONS)
p_mass.line(df1["time(s)"], df1["mass(kg)"], color=C[0])
p_mass.select_one(HoverTool).tooltips = HOVER_SET
p_thrust = figure(title=st+"推力", x_axis_label="時刻 [sec]", y_axis_label="推力 [N]",
x_range=xr1, **PLOT_OPTIONS)
p_thrust.line(df1["time(s)"], df1["thrust(N)"], color=C[1])
p_thrust.yaxis[0].formatter = PrintfTickFormatter(format="%f")
p_thrust.select_one(HoverTool).tooltips = HOVER_SET
p_alt = figure(title=st+"高度", x_axis_label="時刻 [sec]", y_axis_label="高度 [m]",
x_range=xr1, **PLOT_OPTIONS)
p_alt.line(df1["time(s)"], df1["altitude(m)"], color=C[2])
p_alt.yaxis[0].formatter = PrintfTickFormatter(format="%f")
p_alt.select_one(HoverTool).tooltips = HOVER_SET
p_Isp = figure(title=st+"比推力", x_axis_label="時刻 [sec]", y_axis_label="比推力 [秒]",
x_range=xr1, **PLOT_OPTIONS)
p_Isp.line(df1["time(s)"], df1["Isp(s)"], color=C[3])
p_Isp.select_one(HoverTool).tooltips = HOVER_SET
p_downrange = figure(title=st+"ダウンレンジ", x_axis_label="時刻 [sec]", y_axis_label="ダウンレンジ [m]",
x_range=xr1, **PLOT_OPTIONS)
p_downrange.line(df1["time(s)"], df1["downrange(m)"], color=C[4])
p_downrange.yaxis[0].formatter = PrintfTickFormatter(format="%f")
p_downrange.select_one(HoverTool).tooltips = HOVER_SET
p_profile = figure(title=st+"飛翔プロファイル", x_axis_label="ダウンレンジ [m]", y_axis_label="高度 [m]",
**PLOT_OPTIONS)
p_profile.line(df1["downrange(m)"], df1["altitude(m)"], color=C[5])
p_profile.xaxis[0].formatter = PrintfTickFormatter(format="%f")
p_profile.yaxis[0].formatter = PrintfTickFormatter(format="%f")
p_profile.select_one(HoverTool).tooltips = HOVER_SET
p_velh = figure(title=st+"水平面速度", x_axis_label="時刻 [sec]", y_axis_label="速度 [m/s]",
x_range=xr1, **PLOT_OPTIONS)
vel_horizontal = np.sqrt(df1["vel_NED_X(m/s)"] ** 2 + df1["vel_NED_Y(m/s)"] ** 2)
p_velh.line(df1["time(s)"], df1["vel_NED_X(m/s)"], legend_label="North", color=C[6])
p_velh.line(df1["time(s)"], df1["vel_NED_Y(m/s)"], legend_label="East", color=C[7])
p_velh.line(df1["time(s)"], vel_horizontal, legend_label="Horizon", color=C[8])
p_velh.select_one(HoverTool).tooltips = HOVER_SET
p_velv = figure(title=st+"垂直速度", x_axis_label="時刻 [sec]", y_axis_label="速度 [m/s]",
x_range=xr1, **PLOT_OPTIONS)
p_velv.line(df1["time(s)"], -df1["vel_NED_Z(m/s)"], legend_label="Up", color=C[8])
p_velv.select_one(HoverTool).tooltips = HOVER_SET
p_q = figure(title=st+"動圧", x_axis_label="時刻 [sec]", y_axis_label="動圧 [kPa]",
x_range=xr1, **PLOT_OPTIONS)
p_q.line(df1["time(s)"], df1["dynamic pressure(Pa)"] / 1000, color=C[9])
p_q.select_one(HoverTool).tooltips = HOVER_SET
p_mach = figure(title=st+"機体のその場高度でのマッハ数", x_axis_label="時刻 [sec]", y_axis_label="マッハ数 [-]",
x_range=xr1, **PLOT_OPTIONS)
p_mach.line(df1["time(s)"], df1["Mach number"], color=C[0])
p_mach.select_one(HoverTool).tooltips = HOVER_SET_F
p_acc = figure(title=st+"加速度", x_axis_label="時刻 [sec]", y_axis_label="加速度 [G]",
x_range=xr1, **PLOT_OPTIONS)
acc = np.sqrt(df1["acc_Body_X(m/s2)"] **2 + df1["acc_Body_X(m/s2)"] ** 2 + df1["acc_Body_X(m/s2)"] ** 2)
p_acc.line(df1["time(s)"], acc / g, color=C[1])
p_acc.select_one(HoverTool).tooltips = HOVER_SET_F
p_acc3 = figure(title=st+"機体の各軸にかかる加速度", x_axis_label="時刻 [sec]", y_axis_label="加速度 [G]",
x_range=xr1, **PLOT_OPTIONS)
p_acc3.line(df1["time(s)"], df1["acc_Body_X(m/s2)"] / g, legend_label="X", color=C[2])
p_acc3.line(df1["time(s)"], df1["acc_Body_Y(m/s2)"] / g, legend_label="Y", color=C[3])
p_acc3.line(df1["time(s)"], df1["acc_Body_Z(m/s2)"] / g, legend_label="Z", color=C[4])
p_acc3.select_one(HoverTool).tooltips = HOVER_SET_F
p_az = figure(title=st+"姿勢:方位角", x_axis_label="時刻 [sec]", y_axis_label="角度 [deg]",
x_range=xr1, **PLOT_OPTIONS)
if 'attitude_azimuth(deg)' in df1:
p_az.line(df1["time(s)"], df1["attitude_azimuth(deg)"], color=C[5])
else:
p_az.line(df1["time(s)"], df1["attitude_azimth(deg)"], color=C[5])
p_az.select_one(HoverTool).tooltips = HOVER_SET
p_el = figure(title=st+"姿勢:仰角", x_axis_label="時刻 [sec]", y_axis_label="角度 [deg]",
x_range=xr1, **PLOT_OPTIONS)
p_el.line(df1["time(s)"], df1["attitude_elevation(deg)"], color=C[6])
p_el.select_one(HoverTool).tooltips = HOVER_SET
if 'attitude_roll(deg)' in df1:
p_ro = figure(title=st+"姿勢:ロール角", x_axis_label="時刻 [sec]", y_axis_label="角度 [deg]",
x_range=xr1, **PLOT_OPTIONS)
p_ro.line(df1["time(s)"], df1["attitude_roll(deg)"], color=C[7])
p_ro.select_one(HoverTool).tooltips = HOVER_SET
else:
p_ro = None
p_AoA = figure(title=st+"迎角", x_axis_label="時刻 [sec]", y_axis_label="迎角 [deg]",
x_range=xr1, **PLOT_OPTIONS)
p_AoA.line(df1["time(s)"], df1["angle of attack alpha(deg)"], legend_label="alpha", color=C[7])
p_AoA.line(df1["time(s)"], df1["angle of attack beta(deg)"], legend_label="beta", color=C[8])
p_AoA.select_one(HoverTool).tooltips = HOVER_SET_F
p_AoAg = figure(title=st+"全迎角:γ", x_axis_label="時刻 [sec]", y_axis_label="迎角 [deg]",
x_range=xr1, **PLOT_OPTIONS)
p_AoAg.line(df1["time(s)"], df1["all angle of attack gamma(deg)"], color=C[7])
p_AoAg.select_one(HoverTool).tooltips = HOVER_SET_F
p_Qa = figure(title=st+"Qγ", x_axis_label="時刻 [sec]", y_axis_label="Qγ [kPa.rad]",
x_range=xr1, **PLOT_OPTIONS)
p_Qa.line(df1["time(s)"], df1["dynamic pressure(Pa)"].values * 1e-3 * df1["all angle of attack gamma(deg)"].values * np.deg2rad(1.), color=C[7])
p_Qa.select_one(HoverTool).tooltips = HOVER_SET_F
# p_drag = figure(title=st+"抗力", x_axis_label="時刻 [sec]", y_axis_label="抗力 [N]",
# x_range=xr1, **PLOT_OPTIONS)
# p_drag.line(df1["time(s)"], df1["aero Drag(N)"], color=C[9])
# p_drag.select_one(HoverTool).tooltips = HOVER_SET
#
#
# p_lift = figure(title=st+"揚力", x_axis_label="時刻 [sec]", y_axis_label="揚力 [N]",
# x_range=xr1, **PLOT_OPTIONS)
# p_lift.line(df1["time(s)"], df1["aero Lift(N)"], color=C[0])
# p_lift.select_one(HoverTool).tooltips = HOVER_SET
p_aeroforce3 = figure(title=st+"機体の各軸にかかる空気力", x_axis_label="時刻 [sec]", y_axis_label="空気力 [N]",
x_range=xr1, **PLOT_OPTIONS)
p_aeroforce3.line(df1["time(s)"], df1["aeroforce_Body_X[N]"], legend_label="X", color=C[2])
p_aeroforce3.line(df1["time(s)"], df1["aeroforce_Body_Y[N]"], legend_label="Y", color=C[3])
p_aeroforce3.line(df1["time(s)"], df1["aeroforce_Body_Z[N]"], legend_label="Z", color=C[4])
p_aeroforce3.select_one(HoverTool).tooltips = HOVER_SET_F
p_gimbal = figure(title=st+"ジンバル角", x_axis_label="時刻 [sec]", y_axis_label="ジンバル角 [deg]",
x_range=xr1, **PLOT_OPTIONS)
p_gimbal.line(df1["time(s)"], df1["gimbal_angle_pitch(deg)"], legend_label="pitch", color=C[0])
p_gimbal.line(df1["time(s)"], df1["gimbal_angle_yaw(deg)"], legend_label="yaw", color=C[1])
p_gimbal.select_one(HoverTool).tooltips = HOVER_SET_F
# plots can be a single Bokeh model, a list/tuple, or even a dictionary
# plots = {'mass': p_mass, 'thrust': p_thrust, 'Blue': p2, 'Green': p3}
plots = gridplot([
[p_mass, p_thrust],
[p_alt, p_Isp],
[p_downrange, p_profile],
[p_velh, p_velv],
[p_q, p_mach],
[p_acc, p_acc3],
[p_az, p_el] if p_ro is None else [p_az, p_el, p_ro],
[p_AoA, p_AoAg, p_Qa],
[p_gimbal, p_aeroforce3],
])
if stage_str == '1':
script1, div1 = components(plots)
script2, div2 = "", ""
script3, div3 = "", ""
elif stage_str == '2':
script2, div2 = components(plots)
elif stage_str == '3':
script3, div3 = components(plots)
# ==== Output HTML ====
filename = "output/" + rocket_name + "_output.html"
template = Template('''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Bokeh Scatter Plots</title>
{{ js_resources }}
{{ css_resources }}
{{ script1 }}
{{ script2 }}
{{ script3 }}
<style>
.embed-wrapper {
width: 95%;
height: 2800px;
margin: auto;
}
</style>
</head>
<body>
<H1>1st stage</H1>
<div class="embed-wrapper">
{{ div1 }}
</div>
<HR>
<H1>2nd stage</H1>
<div class="embed-wrapper">
{{ div2 }}
</div>
<HR>
<H1>3rd stage</H1>
<div class="embed-wrapper">
{{ div3 }}
</div>
</body>
</html>
''')
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
html = template.render(js_resources=js_resources,
css_resources=css_resources,
script1=script1,
script2=script2,
script3=script3,
div1=div1,
div2=div2,
div3=div3)
with io.open(filename, mode='w', encoding='utf-8') as f:
f.write(html)
view(filename)
| [
"jinja2.Template",
"json.load",
"bokeh.plotting.figure",
"bokeh.util.browser.view",
"pandas.read_csv",
"numpy.deg2rad",
"os.path.exists",
"bokeh.resources.INLINE.render_css",
"bokeh.models.PrintfTickFormatter",
"bokeh.layouts.gridplot",
"bokeh.resources.INLINE.render_js",
"io.open",
"bokeh.e... | [((11253, 12056), 'jinja2.Template', 'Template', (['"""<!DOCTYPE html>\n<html lang="en">\n <head>\n <meta charset="utf-8">\n <title>Bokeh Scatter Plots</title>\n {{ js_resources }}\n {{ css_resources }}\n {{ script1 }}\n {{ script2 }}\n {{ script3 }}\n <style>\n .embed-wrapper {\n width: 95%;\n height: 2800px;\n margin: auto;\n }\n </style>\n </head>\n <body>\n <H1>1st stage</H1>\n <div class="embed-wrapper">\n {{ div1 }}\n </div>\n <HR>\n <H1>2nd stage</H1>\n <div class="embed-wrapper">\n {{ div2 }}\n </div>\n <HR>\n <H1>3rd stage</H1>\n <div class="embed-wrapper">\n {{ div3 }}\n </div>\n </body>\n</html>\n"""'], {}), '(\n """<!DOCTYPE html>\n<html lang="en">\n <head>\n <meta charset="utf-8">\n <title>Bokeh Scatter Plots</title>\n {{ js_resources }}\n {{ css_resources }}\n {{ script1 }}\n {{ script2 }}\n {{ script3 }}\n <style>\n .embed-wrapper {\n width: 95%;\n height: 2800px;\n margin: auto;\n }\n </style>\n </head>\n <body>\n <H1>1st stage</H1>\n <div class="embed-wrapper">\n {{ div1 }}\n </div>\n <HR>\n <H1>2nd stage</H1>\n <div class="embed-wrapper">\n {{ div2 }}\n </div>\n <HR>\n <H1>3rd stage</H1>\n <div class="embed-wrapper">\n {{ div3 }}\n </div>\n </body>\n</html>\n"""\n )\n', (11261, 12056), False, 'from jinja2 import Template\n'), ((12063, 12081), 'bokeh.resources.INLINE.render_js', 'INLINE.render_js', ([], {}), '()\n', (12079, 12081), False, 'from bokeh.resources import INLINE\n'), ((12098, 12117), 'bokeh.resources.INLINE.render_css', 'INLINE.render_css', ([], {}), '()\n', (12115, 12117), False, 'from bokeh.resources import INLINE\n'), ((12414, 12428), 'bokeh.util.browser.view', 'view', (['filename'], {}), '(filename)\n', (12418, 12428), False, 'from bokeh.util.browser import view\n'), ((1919, 1931), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1928, 1931), False, 'import json\n'), ((2529, 2568), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'index_col': '(False)'}), '(file_name, index_col=False)\n', (2540, 2568), True, 'import pandas as pd\n'), ((3103, 3208), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '質量')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""質量 [kg]"""', 'x_range': 'xr1'}), "(title=st + '質量', x_axis_label='時刻 [sec]', y_axis_label='質量 [kg]',\n x_range=xr1, **PLOT_OPTIONS)\n", (3109, 3208), False, 'from bokeh.plotting import figure\n'), ((3347, 3451), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '推力')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""推力 [N]"""', 'x_range': 'xr1'}), "(title=st + '推力', x_axis_label='時刻 [sec]', y_axis_label='推力 [N]',\n x_range=xr1, **PLOT_OPTIONS)\n", (3353, 3451), False, 'from bokeh.plotting import figure\n'), ((3556, 3588), 'bokeh.models.PrintfTickFormatter', 'PrintfTickFormatter', ([], {'format': '"""%f"""'}), "(format='%f')\n", (3575, 3588), False, 'from bokeh.models import PrintfTickFormatter, HoverTool\n'), ((3659, 3763), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '高度')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""高度 [m]"""', 'x_range': 'xr1'}), "(title=st + '高度', x_axis_label='時刻 [sec]', y_axis_label='高度 [m]',\n x_range=xr1, **PLOT_OPTIONS)\n", (3665, 3763), False, 'from bokeh.plotting import figure\n'), ((3864, 3896), 'bokeh.models.PrintfTickFormatter', 'PrintfTickFormatter', ([], {'format': '"""%f"""'}), "(format='%f')\n", (3883, 3896), False, 'from bokeh.models import PrintfTickFormatter, HoverTool\n'), ((3964, 4070), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '比推力')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""比推力 [秒]"""', 'x_range': 'xr1'}), "(title=st + '比推力', x_axis_label='時刻 [sec]', y_axis_label='比推力 [秒]',\n x_range=xr1, **PLOT_OPTIONS)\n", (3970, 4070), False, 'from bokeh.plotting import figure\n'), ((4208, 4321), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + 'ダウンレンジ')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""ダウンレンジ [m]"""', 'x_range': 'xr1'}), "(title=st + 'ダウンレンジ', x_axis_label='時刻 [sec]', y_axis_label=\n 'ダウンレンジ [m]', x_range=xr1, **PLOT_OPTIONS)\n", (4214, 4321), False, 'from bokeh.plotting import figure\n'), ((4434, 4466), 'bokeh.models.PrintfTickFormatter', 'PrintfTickFormatter', ([], {'format': '"""%f"""'}), "(format='%f')\n", (4453, 4466), False, 'from bokeh.models import PrintfTickFormatter, HoverTool\n'), ((4544, 4644), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '飛翔プロファイル')", 'x_axis_label': '"""ダウンレンジ [m]"""', 'y_axis_label': '"""高度 [m]"""'}), "(title=st + '飛翔プロファイル', x_axis_label='ダウンレンジ [m]', y_axis_label=\n '高度 [m]', **PLOT_OPTIONS)\n", (4550, 4644), False, 'from bokeh.plotting import figure\n'), ((4757, 4789), 'bokeh.models.PrintfTickFormatter', 'PrintfTickFormatter', ([], {'format': '"""%f"""'}), "(format='%f')\n", (4776, 4789), False, 'from bokeh.models import PrintfTickFormatter, HoverTool\n'), ((4825, 4857), 'bokeh.models.PrintfTickFormatter', 'PrintfTickFormatter', ([], {'format': '"""%f"""'}), "(format='%f')\n", (4844, 4857), False, 'from bokeh.models import PrintfTickFormatter, HoverTool\n'), ((4930, 5039), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '水平面速度')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""速度 [m/s]"""', 'x_range': 'xr1'}), "(title=st + '水平面速度', x_axis_label='時刻 [sec]', y_axis_label='速度 [m/s]',\n x_range=xr1, **PLOT_OPTIONS)\n", (4936, 5039), False, 'from bokeh.plotting import figure\n'), ((5067, 5131), 'numpy.sqrt', 'np.sqrt', (["(df1['vel_NED_X(m/s)'] ** 2 + df1['vel_NED_Y(m/s)'] ** 2)"], {}), "(df1['vel_NED_X(m/s)'] ** 2 + df1['vel_NED_Y(m/s)'] ** 2)\n", (5074, 5131), True, 'import numpy as np\n'), ((5462, 5570), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '垂直速度')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""速度 [m/s]"""', 'x_range': 'xr1'}), "(title=st + '垂直速度', x_axis_label='時刻 [sec]', y_axis_label='速度 [m/s]',\n x_range=xr1, **PLOT_OPTIONS)\n", (5468, 5570), False, 'from bokeh.plotting import figure\n'), ((5730, 5836), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '動圧')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""動圧 [kPa]"""', 'x_range': 'xr1'}), "(title=st + '動圧', x_axis_label='時刻 [sec]', y_axis_label='動圧 [kPa]',\n x_range=xr1, **PLOT_OPTIONS)\n", (5736, 5836), False, 'from bokeh.plotting import figure\n'), ((5986, 6105), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '機体のその場高度でのマッハ数')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""マッハ数 [-]"""', 'x_range': 'xr1'}), "(title=st + '機体のその場高度でのマッハ数', x_axis_label='時刻 [sec]', y_axis_label=\n 'マッハ数 [-]', x_range=xr1, **PLOT_OPTIONS)\n", (5992, 6105), False, 'from bokeh.plotting import figure\n'), ((6245, 6351), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '加速度')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""加速度 [G]"""', 'x_range': 'xr1'}), "(title=st + '加速度', x_axis_label='時刻 [sec]', y_axis_label='加速度 [G]',\n x_range=xr1, **PLOT_OPTIONS)\n", (6251, 6351), False, 'from bokeh.plotting import figure\n'), ((6368, 6472), 'numpy.sqrt', 'np.sqrt', (["(df1['acc_Body_X(m/s2)'] ** 2 + df1['acc_Body_X(m/s2)'] ** 2 + df1[\n 'acc_Body_X(m/s2)'] ** 2)"], {}), "(df1['acc_Body_X(m/s2)'] ** 2 + df1['acc_Body_X(m/s2)'] ** 2 + df1[\n 'acc_Body_X(m/s2)'] ** 2)\n", (6375, 6472), True, 'import numpy as np\n'), ((6589, 6705), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '機体の各軸にかかる加速度')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""加速度 [G]"""', 'x_range': 'xr1'}), "(title=st + '機体の各軸にかかる加速度', x_axis_label='時刻 [sec]', y_axis_label=\n '加速度 [G]', x_range=xr1, **PLOT_OPTIONS)\n", (6595, 6705), False, 'from bokeh.plotting import figure\n'), ((7053, 7164), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '姿勢:方位角')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""角度 [deg]"""', 'x_range': 'xr1'}), "(title=st + '姿勢:方位角', x_axis_label='時刻 [sec]', y_axis_label=\n '角度 [deg]', x_range=xr1, **PLOT_OPTIONS)\n", (7059, 7164), False, 'from bokeh.plotting import figure\n'), ((7435, 7544), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '姿勢:仰角')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""角度 [deg]"""', 'x_range': 'xr1'}), "(title=st + '姿勢:仰角', x_axis_label='時刻 [sec]', y_axis_label='角度 [deg]',\n x_range=xr1, **PLOT_OPTIONS)\n", (7441, 7544), False, 'from bokeh.plotting import figure\n'), ((8024, 8130), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '迎角')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""迎角 [deg]"""', 'x_range': 'xr1'}), "(title=st + '迎角', x_axis_label='時刻 [sec]', y_axis_label='迎角 [deg]',\n x_range=xr1, **PLOT_OPTIONS)\n", (8030, 8130), False, 'from bokeh.plotting import figure\n'), ((8412, 8521), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '全迎角:γ')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""迎角 [deg]"""', 'x_range': 'xr1'}), "(title=st + '全迎角:γ', x_axis_label='時刻 [sec]', y_axis_label='迎角 [deg]',\n x_range=xr1, **PLOT_OPTIONS)\n", (8418, 8521), False, 'from bokeh.plotting import figure\n'), ((8679, 8790), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + 'Qγ')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""Qγ [kPa.rad]"""', 'x_range': 'xr1'}), "(title=st + 'Qγ', x_axis_label='時刻 [sec]', y_axis_label=\n 'Qγ [kPa.rad]', x_range=xr1, **PLOT_OPTIONS)\n", (8685, 8790), False, 'from bokeh.plotting import figure\n'), ((9544, 9660), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '機体の各軸にかかる空気力')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""空気力 [N]"""', 'x_range': 'xr1'}), "(title=st + '機体の各軸にかかる空気力', x_axis_label='時刻 [sec]', y_axis_label=\n '空気力 [N]', x_range=xr1, **PLOT_OPTIONS)\n", (9550, 9660), False, 'from bokeh.plotting import figure\n'), ((10041, 10154), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + 'ジンバル角')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""ジンバル角 [deg]"""', 'x_range': 'xr1'}), "(title=st + 'ジンバル角', x_axis_label='時刻 [sec]', y_axis_label=\n 'ジンバル角 [deg]', x_range=xr1, **PLOT_OPTIONS)\n", (10047, 10154), False, 'from bokeh.plotting import figure\n'), ((10587, 10825), 'bokeh.layouts.gridplot', 'gridplot', (['[[p_mass, p_thrust], [p_alt, p_Isp], [p_downrange, p_profile], [p_velh,\n p_velv], [p_q, p_mach], [p_acc, p_acc3], [p_az, p_el] if p_ro is None else\n [p_az, p_el, p_ro], [p_AoA, p_AoAg, p_Qa], [p_gimbal, p_aeroforce3]]'], {}), '([[p_mass, p_thrust], [p_alt, p_Isp], [p_downrange, p_profile], [\n p_velh, p_velv], [p_q, p_mach], [p_acc, p_acc3], [p_az, p_el] if p_ro is\n None else [p_az, p_el, p_ro], [p_AoA, p_AoAg, p_Qa], [p_gimbal,\n p_aeroforce3]])\n', (10595, 10825), False, 'from bokeh.layouts import gridplot\n'), ((12343, 12388), 'io.open', 'io.open', (['filename'], {'mode': '"""w"""', 'encoding': '"""utf-8"""'}), "(filename, mode='w', encoding='utf-8')\n", (12350, 12388), False, 'import io\n'), ((2483, 2508), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (2497, 2508), False, 'import os\n'), ((7729, 7841), 'bokeh.plotting.figure', 'figure', ([], {'title': "(st + '姿勢:ロール角')", 'x_axis_label': '"""時刻 [sec]"""', 'y_axis_label': '"""角度 [deg]"""', 'x_range': 'xr1'}), "(title=st + '姿勢:ロール角', x_axis_label='時刻 [sec]', y_axis_label=\n '角度 [deg]', x_range=xr1, **PLOT_OPTIONS)\n", (7735, 7841), False, 'from bokeh.plotting import figure\n'), ((10946, 10963), 'bokeh.embed.components', 'components', (['plots'], {}), '(plots)\n', (10956, 10963), False, 'from bokeh.embed import components\n'), ((8917, 8932), 'numpy.deg2rad', 'np.deg2rad', (['(1.0)'], {}), '(1.0)\n', (8927, 8932), True, 'import numpy as np\n'), ((11077, 11094), 'bokeh.embed.components', 'components', (['plots'], {}), '(plots)\n', (11087, 11094), False, 'from bokeh.embed import components\n'), ((11146, 11163), 'bokeh.embed.components', 'components', (['plots'], {}), '(plots)\n', (11156, 11163), False, 'from bokeh.embed import components\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 3 18:26:32 2017
@author: TT
"""
import numpy as np
import tensorflow as tf
from params import MLP_model_params as hp
def softmax_layers(inputs,
num_units,
activation=tf.nn.softmax):
length, width = inputs.get_shape().as_list()
with tf.variable_scope("softmax_layers"):
W = tf.Variable(tf.zeros([width, num_units]))
b = tf.Variable(tf.zeros([num_units]))
inputs_ = tf.matmul(inputs, W) + b
if activation:
outputs = activation(inputs_)
tf.summary.histogram('activations', outputs)
return outputs
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def time_to_batch(inputs, rate):
'''If necessary zero-pads inputs and reshape by rate.
Used to perform 1D dilated convolution.
Args:
inputs: (tensor)
rate: (int)
Outputs:
outputs: (tensor)
pad_left: (int)
'''
_, width, num_channels = inputs.get_shape().as_list()
width_pad = int(rate * np.ceil((width + rate) * 1.0 / rate))
pad_left = width_pad - width
perm = (1, 0, 2)
shape = (int(width_pad / rate), -1, num_channels) # missing dim: batch_size * rate
padded = tf.pad(inputs, [[0, 0], [pad_left, 0], [0, 0]])
transposed = tf.transpose(padded, perm)
reshaped = tf.reshape(transposed, shape)
outputs = tf.transpose(reshaped, perm)
return outputs
def batch_to_time(inputs, rate, crop_left=0):
''' Reshape to 1d signal, and remove excess zero-padding.
Used to perform 1D dilated convolution.
Args:
inputs: (tensor)
crop_left: (int)
rate: (int)
Ouputs:
outputs: (tensor)
'''
shape = tf.shape(inputs)
batch_size = shape[0] / rate
width = shape[1]
out_width = tf.to_int32(width * rate)
_, _, num_channels = inputs.get_shape().as_list()
perm = (1, 0, 2)
new_shape = (out_width, -1, num_channels) # missing dim: batch_size
transposed = tf.transpose(inputs, perm)
reshaped = tf.reshape(transposed, new_shape)
outputs = tf.transpose(reshaped, perm)
cropped = tf.slice(outputs, [0, crop_left, 0], [-1, -1, -1])
return cropped
def conv1d(inputs,
out_channels,
filter_width=2,
stride=1,
padding='VALID',
data_format='NHWC',
gain=np.sqrt(2),
activation=tf.nn.relu,
bias=False):
'''One dimension convolution helper function.
Sets variables with good defaults.
Args:
inputs:
out_channels:
filter_width:
stride:
paddding:
data_format:
gain:
activation:
bias:
Outputs:
outputs:
'''
in_channels = inputs.get_shape().as_list()[-1]
stddev = gain / np.sqrt(filter_width**2 * in_channels)
w_init = tf.random_normal_initializer(stddev=stddev)
w = tf.get_variable(name='w',
shape=(filter_width, in_channels, out_channels),
initializer=w_init)
outputs = tf.nn.conv1d(inputs,
w,
stride=stride,
padding=padding,
data_format=data_format)
if bias:
b_init = tf.constant_initializer(0.0)
b = tf.get_variable(name='b',
shape=(out_channels, ),
initializer=b_init)
outputs = outputs + tf.expand_dims(tf.expand_dims(b, 0), 0)
if activation:
outputs = activation(outputs)
return outputs
def dilated_conv1d(inputs,
out_channels,
name=None,
filter_width=2,
rate=1,
padding='VALID',
gain=np.sqrt(2),
activation=tf.nn.relu):
'''A good example to build a layer.
Args:
inputs: (tensor)
output_channels:
filter_width:
rate:
padding:
name:
gain:
activation:
Outputs:
outputs: (tensor)
'''
# Check and allocate tensoroard variable_scope
assert name
with tf.variable_scope(name):
# Before using a tensor, get it's shape
_, width, _ = inputs.get_shape().as_list()
# Do something to connect tensor
inputs_ = time_to_batch(inputs, rate=rate)
outputs_ = conv1d(inputs_,
out_channels=out_channels,
filter_width=filter_width,
padding=padding,
gain=gain,
activation=activation)
# Again, before using a tensor, get it's shape
_, conv_out_width, _ = outputs_.get_shape().as_list()
new_width = conv_out_width * rate
diff = new_width - width
outputs = batch_to_time(outputs_, rate=rate, crop_left=diff)
# Add additional shape information.
tensor_shape = [tf.Dimension(None),
tf.Dimension(width),
tf.Dimension(out_channels)]
outputs.set_shape(tf.TensorShape(tensor_shape))
return outputs
def _causal_linear(inputs, state, name=None, activation=None):
assert name
'''
'''
with tf.variable_scope(name, reuse=True) as scope:
w = tf.get_variable('w')
w_r = w[0, :, :]
w_e = w[1, :, :]
output = tf.matmul(inputs, w_e) + tf.matmul(state, w_r)
if activation:
output = activation(output)
return output
def _output_linear(h, name=''):
with tf.variable_scope(name, reuse=True):
w = tf.get_variable('w')[0, :, :]
b = tf.get_variable('b')
output = tf.matmul(h, w) + tf.expand_dims(b, 0)
return output | [
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.nn.conv1d",
"tensorflow.truncated_normal",
"tensorflow.get_variable",
"tensorflow.pad",
"tensorflow.TensorShape",
"tensorflow.variable_scope",
"tensorflow.to... | [((773, 829), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (785, 829), True, 'import tensorflow as tf\n'), ((920, 995), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (934, 995), True, 'import tensorflow as tf\n'), ((1140, 1178), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (1159, 1178), True, 'import tensorflow as tf\n'), ((1190, 1210), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1201, 1210), True, 'import tensorflow as tf\n'), ((1321, 1350), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (1332, 1350), True, 'import tensorflow as tf\n'), ((1362, 1382), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1373, 1382), True, 'import tensorflow as tf\n'), ((1927, 1974), 'tensorflow.pad', 'tf.pad', (['inputs', '[[0, 0], [pad_left, 0], [0, 0]]'], {}), '(inputs, [[0, 0], [pad_left, 0], [0, 0]])\n', (1933, 1974), True, 'import tensorflow as tf\n'), ((1992, 2018), 'tensorflow.transpose', 'tf.transpose', (['padded', 'perm'], {}), '(padded, perm)\n', (2004, 2018), True, 'import tensorflow as tf\n'), ((2034, 2063), 'tensorflow.reshape', 'tf.reshape', (['transposed', 'shape'], {}), '(transposed, shape)\n', (2044, 2063), True, 'import tensorflow as tf\n'), ((2078, 2106), 'tensorflow.transpose', 'tf.transpose', (['reshaped', 'perm'], {}), '(reshaped, perm)\n', (2090, 2106), True, 'import tensorflow as tf\n'), ((2419, 2435), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (2427, 2435), True, 'import tensorflow as tf\n'), ((2511, 2536), 'tensorflow.to_int32', 'tf.to_int32', (['(width * rate)'], {}), '(width * rate)\n', (2522, 2536), True, 'import tensorflow as tf\n'), ((2706, 2732), 'tensorflow.transpose', 'tf.transpose', (['inputs', 'perm'], {}), '(inputs, perm)\n', (2718, 2732), True, 'import tensorflow as tf\n'), ((2752, 2785), 'tensorflow.reshape', 'tf.reshape', (['transposed', 'new_shape'], {}), '(transposed, new_shape)\n', (2762, 2785), True, 'import tensorflow as tf\n'), ((2800, 2828), 'tensorflow.transpose', 'tf.transpose', (['reshaped', 'perm'], {}), '(reshaped, perm)\n', (2812, 2828), True, 'import tensorflow as tf\n'), ((2843, 2893), 'tensorflow.slice', 'tf.slice', (['outputs', '[0, crop_left, 0]', '[-1, -1, -1]'], {}), '(outputs, [0, crop_left, 0], [-1, -1, -1])\n', (2851, 2893), True, 'import tensorflow as tf\n'), ((3081, 3091), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3088, 3091), True, 'import numpy as np\n'), ((3572, 3615), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev'}), '(stddev=stddev)\n', (3600, 3615), True, 'import tensorflow as tf\n'), ((3625, 3723), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""w"""', 'shape': '(filter_width, in_channels, out_channels)', 'initializer': 'w_init'}), "(name='w', shape=(filter_width, in_channels, out_channels),\n initializer=w_init)\n", (3640, 3723), True, 'import tensorflow as tf\n'), ((3783, 3868), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['inputs', 'w'], {'stride': 'stride', 'padding': 'padding', 'data_format': 'data_format'}), '(inputs, w, stride=stride, padding=padding, data_format=data_format\n )\n', (3795, 3868), True, 'import tensorflow as tf\n'), ((4530, 4540), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4537, 4540), True, 'import numpy as np\n'), ((333, 368), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""softmax_layers"""'], {}), "('softmax_layers')\n", (350, 368), True, 'import tensorflow as tf\n'), ((3520, 3560), 'numpy.sqrt', 'np.sqrt', (['(filter_width ** 2 * in_channels)'], {}), '(filter_width ** 2 * in_channels)\n', (3527, 3560), True, 'import numpy as np\n'), ((4003, 4031), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (4026, 4031), True, 'import tensorflow as tf\n'), ((4044, 4112), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""b"""', 'shape': '(out_channels,)', 'initializer': 'b_init'}), "(name='b', shape=(out_channels,), initializer=b_init)\n", (4059, 4112), True, 'import tensorflow as tf\n'), ((4897, 4920), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (4914, 4920), True, 'import tensorflow as tf\n'), ((6038, 6073), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': '(True)'}), '(name, reuse=True)\n', (6055, 6073), True, 'import tensorflow as tf\n'), ((6096, 6116), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""'], {}), "('w')\n", (6111, 6116), True, 'import tensorflow as tf\n'), ((6356, 6391), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': '(True)'}), '(name, reuse=True)\n', (6373, 6391), True, 'import tensorflow as tf\n'), ((6447, 6467), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""'], {}), "('b')\n", (6462, 6467), True, 'import tensorflow as tf\n'), ((394, 422), 'tensorflow.zeros', 'tf.zeros', (['[width, num_units]'], {}), '([width, num_units])\n', (402, 422), True, 'import tensorflow as tf\n'), ((448, 469), 'tensorflow.zeros', 'tf.zeros', (['[num_units]'], {}), '([num_units])\n', (456, 469), True, 'import tensorflow as tf\n'), ((489, 509), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'W'], {}), '(inputs, W)\n', (498, 509), True, 'import tensorflow as tf\n'), ((600, 644), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""activations"""', 'outputs'], {}), "('activations', outputs)\n", (620, 644), True, 'import tensorflow as tf\n'), ((1734, 1770), 'numpy.ceil', 'np.ceil', (['((width + rate) * 1.0 / rate)'], {}), '((width + rate) * 1.0 / rate)\n', (1741, 1770), True, 'import numpy as np\n'), ((5740, 5758), 'tensorflow.Dimension', 'tf.Dimension', (['None'], {}), '(None)\n', (5752, 5758), True, 'import tensorflow as tf\n'), ((5784, 5803), 'tensorflow.Dimension', 'tf.Dimension', (['width'], {}), '(width)\n', (5796, 5803), True, 'import tensorflow as tf\n'), ((5829, 5855), 'tensorflow.Dimension', 'tf.Dimension', (['out_channels'], {}), '(out_channels)\n', (5841, 5855), True, 'import tensorflow as tf\n'), ((5883, 5911), 'tensorflow.TensorShape', 'tf.TensorShape', (['tensor_shape'], {}), '(tensor_shape)\n', (5897, 5911), True, 'import tensorflow as tf\n'), ((6185, 6207), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'w_e'], {}), '(inputs, w_e)\n', (6194, 6207), True, 'import tensorflow as tf\n'), ((6210, 6231), 'tensorflow.matmul', 'tf.matmul', (['state', 'w_r'], {}), '(state, w_r)\n', (6219, 6231), True, 'import tensorflow as tf\n'), ((6405, 6425), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""'], {}), "('w')\n", (6420, 6425), True, 'import tensorflow as tf\n'), ((6486, 6501), 'tensorflow.matmul', 'tf.matmul', (['h', 'w'], {}), '(h, w)\n', (6495, 6501), True, 'import tensorflow as tf\n'), ((6504, 6524), 'tensorflow.expand_dims', 'tf.expand_dims', (['b', '(0)'], {}), '(b, 0)\n', (6518, 6524), True, 'import tensorflow as tf\n'), ((4214, 4234), 'tensorflow.expand_dims', 'tf.expand_dims', (['b', '(0)'], {}), '(b, 0)\n', (4228, 4234), True, 'import tensorflow as tf\n')] |
"""
Created on Mon 20, 2020
"""
import numpy as np
import modern_robotics as mr
import yaml
import csv
from math import cos,sin, acos, atan2, copysign, fabs
class Odometry:
def __init__(self,bot_params):
# Vb = F*Δθ
l = bot_params["chasis"]["l"]
w = bot_params["chasis"]["w"]
self.rad = bot_params["chasis"]["wheel_rad"]
self.height = bot_params["chasis"]["height"]
row_val = 1.0/(l+w)
F = (self.rad/4) * np.array([[-row_val, row_val, row_val, -row_val],[1,1,1,1],[-1,1,-1,1]])
self.F6 = np.array([[0,0,0,0],[0,0,0,0],F[0],F[1],F[2],[0,0,0,0]]) # this maps change in wheel angle to body twist
self.limit_arm = bot_params["velocity_limits"]["arm"]
self.limit_wheel = bot_params["velocity_limits"]["wheel"]
def NextState(self, config, controls, timestep):
'''
Input : Chasis configuration, arm and wheel controls, timestep and dimensions of the robot
config : { chasis: [phi,x,y], arm: [0,0,0,0,0], wheel : [0,0,0,0] }
controls : {arm : [joint_1_speed, 2,3,4], wheel : [wheel 1 speed, 2,3,4]}
timestep : delta time
Output : New configuration after the controls for timestep
new_config : { chasis: [phi,x,y], arm: [0,0,0,0,0], wheel : [0,0,0,0] }
'''
new_config = {}
new_config["chasis"] = [0,0,0]
new_config["arm"] = [0,0,0,0,0]
new_config["wheel"] = [0,0,0,0]
# New wheel configuration
del_theta_wheel = [0]*len(new_config["wheel"])
del_theta_joint = [0]*len(new_config["arm"])
for wheel_no in range(len(new_config["wheel"])):
# Checking if wheel speed is off limits
if fabs(controls["wheel"][wheel_no]) > fabs(self.limit_wheel[wheel_no]):
print ("Wheel velocity exceeded for wheel ",wheel_no + 1, controls["wheel"][wheel_no])
controls["wheel"][wheel_no] = copysign(self.limit_wheel[wheel_no],controls["wheel"][wheel_no])
del_theta_wheel[wheel_no] = controls["wheel"][wheel_no] * timestep # delta theta to be multiplied with F6 to find Vb6
new_config["wheel"][wheel_no] = config["wheel"][wheel_no] + del_theta_wheel[wheel_no]
# New joint angles
for joint_no in range(len(new_config["arm"])):
# Checking if arm joint speed is off limits
if fabs(controls["arm"][joint_no]) > fabs(self.limit_arm[joint_no]):
print ("Joint velocity exceeded for Joint ",joint_no + 1, controls["arm"][joint_no])
controls["arm"][joint_no] = copysign(self.limit_arm[joint_no],controls["arm"][joint_no])
del_theta_joint[joint_no] = controls["arm"][joint_no] * timestep
new_config["arm"][joint_no] = config["arm"][joint_no] + del_theta_joint[joint_no]
phi = config["chasis"][0]
x = config["chasis"][1]
y = config["chasis"][2]
Tsb = np.array([[cos(phi),sin(phi),0,0],[-sin(phi),cos(phi),0,0],[0,0,1,0],[x,y,self.height,1]]).T
# Finding skew symmetric matrix and integrate to find the delta transformation matrix
Vb6 = self.F6.dot(del_theta_wheel) # Multiplied with timestep as Vb6 is the twist for unit time
se3mat = mr.VecTose3(Vb6)
Tbk_b1k = mr.MatrixExp6(se3mat)
# New Position of the bot wrt space frame
Tsb1k = Tsb.dot(Tbk_b1k)
# theta = acos(Tsb1k[0,0]) # 0th element is cos(phi)..so inverse gives phi
theta = atan2(Tsb1k[1,0],Tsb1k[0,0]) # phi in range of -pi to pi
new_config["chasis"][0] = theta
new_config["chasis"][1] = Tsb1k[0,-1]
new_config["chasis"][2] = Tsb1k[1,-1]
# print config, controls, limits
return new_config # chasis, arm, wheel
if __name__ == "__main__":
params_filename = 'config/test_odom.yaml'
bot_params_filename = 'config/bot_params.yaml'
csv_filename = '../results/Debug/test_odom.csv'
# Reading params from the params file
with open(params_filename) as file:
params = yaml.load(file)
# Reading params from the params file
with open(bot_params_filename) as file:
bot_params = yaml.load(file)
# new_config = NextState(params["config"], params["controls"], params["limits"], params["timestep"],bot_params)
iterations = 1000
test_odom = Odometry(bot_params)
configs = list()
for iter in range(iterations):
new_config = test_odom.NextState(params["config"], params["controls"], params["timestep"])
params["config"] = new_config
configs.append(new_config["chasis"] + new_config["arm"] + new_config["wheel"] +[0]) # chasis, arm, wheel, gripper state
with open(csv_filename, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for config in configs:
writer.writerow(config)
print ("\nControls : ",params["controls"])
# print (params["controls"])
print ("\nTimestep(s) : ",params["timestep"]*iterations)
for key,val in new_config.items():
new_config[key] = [round(ele,3) for ele in val]
print ("\nNew configuration : \n",new_config)
print ("\n")
| [
"yaml.load",
"modern_robotics.VecTose3",
"csv.writer",
"math.atan2",
"math.fabs",
"math.sin",
"math.copysign",
"numpy.array",
"math.cos",
"modern_robotics.MatrixExp6"
] | [((572, 642), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 0, 0, 0], F[0], F[1], F[2], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 0, 0, 0], F[0], F[1], F[2], [0, 0, 0, 0]])\n', (580, 642), True, 'import numpy as np\n'), ((3333, 3349), 'modern_robotics.VecTose3', 'mr.VecTose3', (['Vb6'], {}), '(Vb6)\n', (3344, 3349), True, 'import modern_robotics as mr\n'), ((3368, 3389), 'modern_robotics.MatrixExp6', 'mr.MatrixExp6', (['se3mat'], {}), '(se3mat)\n', (3381, 3389), True, 'import modern_robotics as mr\n'), ((3573, 3604), 'math.atan2', 'atan2', (['Tsb1k[1, 0]', 'Tsb1k[0, 0]'], {}), '(Tsb1k[1, 0], Tsb1k[0, 0])\n', (3578, 3604), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((4136, 4151), 'yaml.load', 'yaml.load', (['file'], {}), '(file)\n', (4145, 4151), False, 'import yaml\n'), ((4266, 4281), 'yaml.load', 'yaml.load', (['file'], {}), '(file)\n', (4275, 4281), False, 'import yaml\n'), ((4854, 4889), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (4864, 4889), False, 'import csv\n'), ((481, 566), 'numpy.array', 'np.array', (['[[-row_val, row_val, row_val, -row_val], [1, 1, 1, 1], [-1, 1, -1, 1]]'], {}), '([[-row_val, row_val, row_val, -row_val], [1, 1, 1, 1], [-1, 1, -1, 1]]\n )\n', (489, 566), True, 'import numpy as np\n'), ((1749, 1782), 'math.fabs', 'fabs', (["controls['wheel'][wheel_no]"], {}), "(controls['wheel'][wheel_no])\n", (1753, 1782), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((1785, 1817), 'math.fabs', 'fabs', (['self.limit_wheel[wheel_no]'], {}), '(self.limit_wheel[wheel_no])\n', (1789, 1817), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((1968, 2033), 'math.copysign', 'copysign', (['self.limit_wheel[wheel_no]', "controls['wheel'][wheel_no]"], {}), "(self.limit_wheel[wheel_no], controls['wheel'][wheel_no])\n", (1976, 2033), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((2441, 2472), 'math.fabs', 'fabs', (["controls['arm'][joint_no]"], {}), "(controls['arm'][joint_no])\n", (2445, 2472), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((2475, 2505), 'math.fabs', 'fabs', (['self.limit_arm[joint_no]'], {}), '(self.limit_arm[joint_no])\n', (2479, 2505), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((2652, 2713), 'math.copysign', 'copysign', (['self.limit_arm[joint_no]', "controls['arm'][joint_no]"], {}), "(self.limit_arm[joint_no], controls['arm'][joint_no])\n", (2660, 2713), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((3035, 3043), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (3038, 3043), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((3044, 3052), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (3047, 3052), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((3069, 3077), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (3072, 3077), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n'), ((3060, 3068), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (3063, 3068), False, 'from math import cos, sin, acos, atan2, copysign, fabs\n')] |
#!/usr/bin/env python3
"""Inference the predictions of the clinical datasets using the supervised model."""
import argparse
from pathlib import Path
import joblib
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tqdm import tqdm
from utils import COLUMNS_NAME, load_dataset
PROJECT_ROOT = Path.cwd()
def main(dataset_name):
"""Make predictions using trained normative models."""
# ----------------------------------------------------------------------------
n_bootstrap = 1000
model_name = 'supervised_aae'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
# ----------------------------------------------------------------------------
# Create directories structure
outputs_dir = PROJECT_ROOT / 'outputs'
bootstrap_dir = outputs_dir / 'bootstrap_analysis'
model_dir = bootstrap_dir / model_name
ids_path = outputs_dir / (dataset_name + '_homogeneous_ids.csv')
# ----------------------------------------------------------------------------
# Set random seed
random_seed = 42
tf.random.set_seed(random_seed)
np.random.seed(random_seed)
# ----------------------------------------------------------------------------
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
# ----------------------------------------------------------------------------
# Loading data
clinical_df = load_dataset(participants_path, ids_path, freesurfer_path)
x_dataset = clinical_df[COLUMNS_NAME].values
tiv = clinical_df['EstimatedTotalIntraCranialVol'].values
tiv = tiv[:, np.newaxis]
x_dataset = (np.true_divide(x_dataset, tiv)).astype('float32')
# ----------------------------------------------------------------------------
encoder = keras.models.load_model(bootstrap_model_dir / 'encoder.h5', compile=False)
decoder = keras.models.load_model(bootstrap_model_dir / 'decoder.h5', compile=False)
scaler = joblib.load(bootstrap_model_dir / 'scaler.joblib')
enc_age = joblib.load(bootstrap_model_dir / 'age_encoder.joblib')
enc_gender = joblib.load(bootstrap_model_dir / 'gender_encoder.joblib')
# ----------------------------------------------------------------------------
x_normalized = scaler.transform(x_dataset)
normalized_df = pd.DataFrame(columns=['participant_id'] + COLUMNS_NAME)
normalized_df['participant_id'] = clinical_df['participant_id']
normalized_df[COLUMNS_NAME] = x_normalized
normalized_df.to_csv(output_dataset_dir / 'normalized.csv', index=False)
# ----------------------------------------------------------------------------
age = clinical_df['Age'].values[:, np.newaxis].astype('float32')
one_hot_age = enc_age.transform(age)
gender = clinical_df['Gender'].values[:, np.newaxis].astype('float32')
one_hot_gender = enc_gender.transform(gender)
y_data = np.concatenate((one_hot_age, one_hot_gender), axis=1).astype('float32')
# ----------------------------------------------------------------------------
encoded = encoder(x_normalized, training=False)
reconstruction = decoder(tf.concat([encoded, y_data], axis=1), training=False)
reconstruction_df = pd.DataFrame(columns=['participant_id'] + COLUMNS_NAME)
reconstruction_df['participant_id'] = clinical_df['participant_id']
reconstruction_df[COLUMNS_NAME] = reconstruction.numpy()
reconstruction_df.to_csv(output_dataset_dir / 'reconstruction.csv', index=False)
encoded_df = pd.DataFrame(columns=['participant_id'] + list(range(encoded.shape[1])))
encoded_df['participant_id'] = clinical_df['participant_id']
encoded_df[list(range(encoded.shape[1]))] = encoded.numpy()
encoded_df.to_csv(output_dataset_dir / 'encoded.csv', index=False)
# ----------------------------------------------------------------------------
reconstruction_error = np.mean((x_normalized - reconstruction) ** 2, axis=1)
reconstruction_error_df = pd.DataFrame(columns=['participant_id', 'Reconstruction error'])
reconstruction_error_df['participant_id'] = clinical_df['participant_id']
reconstruction_error_df['Reconstruction error'] = reconstruction_error
reconstruction_error_df.to_csv(output_dataset_dir / 'reconstruction_error.csv', index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-D', '--dataset_name',
dest='dataset_name',
help='Dataset name to calculate deviations.')
args = parser.parse_args()
main(args.dataset_name) | [
"tensorflow.random.set_seed",
"pandas.DataFrame",
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.keras.models.load_model",
"numpy.true_divide",
"joblib.load",
"tensorflow.concat",
"utils.load_dataset",
"numpy.mean",
"pathlib.Path.cwd",
"numpy.concatenate"
] | [((340, 350), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (348, 350), False, 'from pathlib import Path\n'), ((1201, 1232), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['random_seed'], {}), '(random_seed)\n', (1219, 1232), True, 'import tensorflow as tf\n'), ((1237, 1264), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1251, 1264), True, 'import numpy as np\n'), ((4781, 4806), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4804, 4806), False, 'import argparse\n'), ((1715, 1773), 'utils.load_dataset', 'load_dataset', (['participants_path', 'ids_path', 'freesurfer_path'], {}), '(participants_path, ids_path, freesurfer_path)\n', (1727, 1773), False, 'from utils import COLUMNS_NAME, load_dataset\n'), ((2105, 2179), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (["(bootstrap_model_dir / 'encoder.h5')"], {'compile': '(False)'}), "(bootstrap_model_dir / 'encoder.h5', compile=False)\n", (2128, 2179), False, 'from tensorflow import keras\n'), ((2198, 2272), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (["(bootstrap_model_dir / 'decoder.h5')"], {'compile': '(False)'}), "(bootstrap_model_dir / 'decoder.h5', compile=False)\n", (2221, 2272), False, 'from tensorflow import keras\n'), ((2291, 2341), 'joblib.load', 'joblib.load', (["(bootstrap_model_dir / 'scaler.joblib')"], {}), "(bootstrap_model_dir / 'scaler.joblib')\n", (2302, 2341), False, 'import joblib\n'), ((2361, 2416), 'joblib.load', 'joblib.load', (["(bootstrap_model_dir / 'age_encoder.joblib')"], {}), "(bootstrap_model_dir / 'age_encoder.joblib')\n", (2372, 2416), False, 'import joblib\n'), ((2438, 2496), 'joblib.load', 'joblib.load', (["(bootstrap_model_dir / 'gender_encoder.joblib')"], {}), "(bootstrap_model_dir / 'gender_encoder.joblib')\n", (2449, 2496), False, 'import joblib\n'), ((2661, 2716), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "(['participant_id'] + COLUMNS_NAME)"}), "(columns=['participant_id'] + COLUMNS_NAME)\n", (2673, 2716), True, 'import pandas as pd\n'), ((3611, 3666), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "(['participant_id'] + COLUMNS_NAME)"}), "(columns=['participant_id'] + COLUMNS_NAME)\n", (3623, 3666), True, 'import pandas as pd\n'), ((4323, 4376), 'numpy.mean', 'np.mean', (['((x_normalized - reconstruction) ** 2)'], {'axis': '(1)'}), '((x_normalized - reconstruction) ** 2, axis=1)\n', (4330, 4376), True, 'import numpy as np\n'), ((4412, 4476), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['participant_id', 'Reconstruction error']"}), "(columns=['participant_id', 'Reconstruction error'])\n", (4424, 4476), True, 'import pandas as pd\n'), ((3528, 3564), 'tensorflow.concat', 'tf.concat', (['[encoded, y_data]'], {'axis': '(1)'}), '([encoded, y_data], axis=1)\n', (3537, 3564), True, 'import tensorflow as tf\n'), ((1950, 1980), 'numpy.true_divide', 'np.true_divide', (['x_dataset', 'tiv'], {}), '(x_dataset, tiv)\n', (1964, 1980), True, 'import numpy as np\n'), ((3279, 3332), 'numpy.concatenate', 'np.concatenate', (['(one_hot_age, one_hot_gender)'], {'axis': '(1)'}), '((one_hot_age, one_hot_gender), axis=1)\n', (3293, 3332), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import collections as cl
import json
from .util import *
class Waterbank():
def __init__(self, df, name, key):
self.T = len(df)
self.index = df.index
self.number_years = self.index.year[self.T - 1] - self.index.year[0]
self.key = key
self.name = name
for k,v in json.load(open('cord/banks/%s_properties.json' % key)).items():
setattr(self,k,v)
self.recharge_rate = self.initial_recharge*cfs_tafd
self.tot_current_storage = 0.0#total above-ground storage being used in water bank
self.loss_rate = 0.06#how much of banked deliveries is lost duing spreading
#dictionaries for individual member use of the bank
self.storage = {}#how much water delivered to bank this time step
self.recovery_use = {}#how much recovery capacity is being used by a memeber this time step
self.banked = {} #how much water is stored in the groundwater banking account of the member
#timeseries for export to csv
self.bank_timeseries = {}#daily
self.annual_timeseries = {}#annual
self.recharge_rate_series = np.zeros(self.T)#daily recharge rate
for x in self.participant_list:
self.storage[x] = 0.0
self.bank_timeseries[x] = np.zeros(self.T)
self.annual_timeseries[x] = np.zeros(self.number_years)
self.recovery_use[x] = 0.0
self.banked[x] = 0.0
#counters to keeps track of the duration of waterbank use (recharge rate declines after continuous use)
self.thismonthuse = 0
self.monthusecounter = 0
self.monthemptycounter = 0
def object_equals(self, other):
##This function compares two instances of an object, returns True if all attributes are identical.
equality = {}
if (self.__dict__.keys() != other.__dict__.keys()):
return ('Different Attributes')
else:
differences = 0
for i in self.__dict__.keys():
if type(self.__getattribute__(i)) is dict:
equality[i] = True
for j in self.__getattribute__(i).keys():
if (type(self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) is bool):
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) == False):
equality[i] = False
differences += 1
else:
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]).all() == False):
equality[i] = False
differences += 1
else:
if (type(self.__getattribute__(i) == other.__getattribute__(i)) is bool):
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i))
if equality[i] == False:
differences += 1
else:
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i)).all()
if equality[i] == False:
differences += 1
return (differences == 0)
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################DETERMINE DELIVERIES ON CANAL######################################################
#####################################################################################################################
def find_node_demand(self,contract_list,xx,num_members, search_type):
#this function finds the maximum 'demand' available at each node - if
#in recovery mode, max demand is the available recovery capacity
#all other modes, max demand is the available recharge space
if search_type == "recovery":
#recovery mode - sum the (pumping) capacity use of each wb member
current_recovery_use = 0.0
for x in self.recovery_use:
current_recovery_use += self.recovery_use[x]
demand_constraint = max(self.recovery - current_recovery_use, 0.0)
else:
#recharge mode - sum the (spreading basin) capacity use of each wb member
current_storage = 0.0
for xx in self.participant_list:
current_storage += self.storage[xx]
demand_constraint = max(self.tot_storage - current_storage, 0.0)
return demand_constraint
def find_priority_space(self, num_members, xx, search_type):
#this function finds how much 'priority' space in the recharge/recovery capacity is owned by a member (member_name) in a given bank
if search_type == "recovery":
initial_capacity = max(self.recovery*self.ownership[xx]/num_members - self.recovery_use[xx], 0.0)
available_banked = self.banked[xx]
return min(initial_capacity, available_banked)
else:
initial_capacity = max(self.tot_storage*self.ownership[xx]/num_members - self.storage[xx], 0.0)
return initial_capacity
def set_demand_priority(self, priority_list, contract_list, demand, delivery, demand_constraint, search_type, contract_canal, current_canal, member_contracts):
#this function creates a dictionary (demand_dict) that has a key for each 'priority type' associated with the flow
#different types of flow (flood, delivery, banking, recovery) have different priority types
demand_dict = {}
#for flood flows, determine if the wb members have contracts w/ the flooding reservoir - 1st priority
#if not, do they have turnouts on the 'priority' canals - 2nd priority
#if not, the demand is 'excess' - 3rd priority (so that flood waters only use certain canals unless the flood releases are big enough)
if search_type == 'flood':
priority_toggle = 0
contractor_toggle = 0
canal_toggle = 0
for yy in priority_list:
if yy.name == contract_canal:
priority_toggle = 1
if priority_toggle == 1:
for y in contract_list:
for yx in member_contracts:
if y.name == yx:
contractor_toggle = 1
for yy in self.get_iterable(self.canal_rights):
if yy == current_canal:
canal_toggle = 1
if contractor_toggle == 1 and canal_toggle == 1:
demand_dict['contractor'] = demand
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = 0.0
demand_dict['excess'] = 0.0
elif contractor_toggle == 1:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = demand
demand_dict['turnout'] = 0.0
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = demand
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = 0.0
demand_dict['excess'] = demand
#if the flows are for delivery, the don't come to a water bank
elif search_type == 'delivery':
demand_dict[contract_canal] = 0.0
#banking flows are priority for flows that can be taken by a wb member under their 'owned' capacity
#secondary priority is assigned to districts that are usuing 'excess' space in the wb that they do not own (but the owner does not want to use)
elif search_type == 'banking':
canal_toggle = 0
for yy in self.get_iterable(self.canal_rights):
if yy == current_canal:
canal_toggle = 1
if canal_toggle == 1:
demand_dict['priority'] = min(max(min(demand,delivery), 0.0), demand_constraint)
demand_dict['secondary'] = min(delivery - max(min(demand,delivery), 0.0), demand_constraint - demand_dict['priority'])
else:
demand_dict['priority'] = 0.0
demand_dict['secondary'] = min(max(delivery, 0.0), demand_constraint)
#recovery flows are similar to banking flows - first priority for wb members that are using capacity they own, second priority for wb members using 'excess' capacity
elif search_type == 'recovery':
demand_dict['initial'] = min(max(min(demand,delivery), 0.0), demand_constraint)
demand_dict['supplemental'] = min(delivery - max(min(demand,delivery), 0.0), demand_constraint - demand_dict['initial'])
return demand_dict
def set_deliveries(self, priorities,type_fractions,type_list,member_name):
final_deliveries = 0.0
for zz in type_list:
#deliveries at this priority level
total_deliveries = priorities[zz]*type_fractions[zz]
#running total of all deliveries at this node
final_deliveries += total_deliveries
#deliveries first go to direct irrigation, if demand remains
#adjust demand/recharge space
self.storage[member_name] += total_deliveries
return final_deliveries
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
###################### UPDATE/SAVE STATE VARIABLES (BANK ACCOUT BALANCES) ################################
#####################################################################################################################
def adjust_recovery(self, deliveries, member_name, wateryear):
#this function adjusts the waterbank accounts & capacity usage after
#a wb member uses recovery
self.banked[member_name] -= deliveries#bank account
self.recovery_use[member_name] += deliveries#capacity use
def sum_storage(self):
#this function calculates the total capacity use in a recharge basin
self.tot_current_storage = 0.0
for x in self.participant_list:
self.tot_current_storage += self.storage[x]
def absorb_storage(self):
#this function takes water applied to a recharge basin and 'absorbs' it into the
#ground, clearing up capacity in the recharge basin and adding to the 'bank' accounts
#of the wb member that applied it
if self.tot_current_storage > self.recharge_rate*0.75:
self.thismonthuse = 1
if self.tot_current_storage > 0.0:
absorb_fraction = min(self.recharge_rate/self.tot_current_storage,1.0)
self.tot_current_storage -= self.tot_current_storage*absorb_fraction
for x in self.participant_list:
self.banked[x] += self.storage[x]*absorb_fraction*(1.0-self.loss_rate)#bank account (only credit a portion of the recharge to the bank acct)
self.storage[x] -= self.storage[x]*absorb_fraction#capacity use
def accounting(self, t, m, da, wateryear):
#this stores bank account balances in a daily dictionary (for export to
stacked_amount = 0.0
self.recharge_rate_series[t] = self.recharge_rate
for x in self.participant_list:
self.bank_timeseries[x][t] = self.banked[x] + stacked_amount
stacked_amount += self.banked[x]
if m == 9 and da == 29:
#annual dictionary stores the annual change in gw bank balances
for x in self.participant_list:
sum_total = 0.0
for year_counter in range(0, wateryear):
sum_total += self.annual_timeseries[x][year_counter]
self.annual_timeseries[x][wateryear] = self.banked[x] - sum_total
def bank_as_df(self, index):
#take daily bank account balances (w/running recharge capacities) and save them as a data frame (for export to csv)
df = pd.DataFrame()
for n in self.participant_list:
df['%s_%s' % (self.key,n)] = pd.Series(self.bank_timeseries[n], index = index)
df['%s_rate' % self.key] = pd.Series(self.recharge_rate_series, index = index)
return df
def annual_bank_as_df(self):
#save annual bank changes as data frame (for export to csv)
df = pd.DataFrame()
for n in self.participant_list:
df['%s_%s_leiu' % (self.key,n)] = pd.Series(self.annual_timeseries[n])
return df
def get_iterable(self, x):
if isinstance(x, cl.Iterable):
return x
else:
return (x,)
| [
"pandas.DataFrame",
"numpy.zeros",
"pandas.Series"
] | [((1170, 1186), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (1178, 1186), True, 'import numpy as np\n'), ((11616, 11630), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11628, 11630), True, 'import pandas as pd\n'), ((11783, 11832), 'pandas.Series', 'pd.Series', (['self.recharge_rate_series'], {'index': 'index'}), '(self.recharge_rate_series, index=index)\n', (11792, 11832), True, 'import pandas as pd\n'), ((11955, 11969), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11967, 11969), True, 'import pandas as pd\n'), ((1303, 1319), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (1311, 1319), True, 'import numpy as np\n'), ((1354, 1381), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (1362, 1381), True, 'import numpy as np\n'), ((11702, 11749), 'pandas.Series', 'pd.Series', (['self.bank_timeseries[n]'], {'index': 'index'}), '(self.bank_timeseries[n], index=index)\n', (11711, 11749), True, 'import pandas as pd\n'), ((12046, 12082), 'pandas.Series', 'pd.Series', (['self.annual_timeseries[n]'], {}), '(self.annual_timeseries[n])\n', (12055, 12082), True, 'import pandas as pd\n')] |
import numpy as np
from PIL import Image, ImageFilter
import matplotlib.pyplot as plt
import glob, os
import pandas as pd
# パスは各環境に合わせて書き換える
coordspath = 'data/coords.csv'
train_folder = 'H:/KaggleNOAASeaLions/Train/'
save_folder = 'H:/KaggleNOAASeaLions/classified_images/'
data = pd.read_csv(coordspath)
print(data)
coord = np.asarray(data.as_matrix())
print(coord.shape)
crop_size = 512
num_opened_image = 'XX'
for i in range(coord.shape[0]):
num_image = coord[i, 0]
x = coord[i, 3]
y = coord[i, 2]
cls = coord[i,1]
if num_image != num_opened_image:
filepath = train_folder + str(coord[i,0]) + '.jpg'
image_pil = Image.open(filepath)
image = np.asarray(image_pil)
width = image.shape[1]
height = image.shape[0]
close2edge = (x-crop_size//2 < 0) or (x+crop_size//2 > width) or (y-crop_size//2 < 0) or (y+crop_size//2 > height)
# print(close2edge)
if not close2edge:
# print(image.shape)
# print(coord[i])
crop_image = image[y-crop_size//2:y+crop_size//2, x-crop_size//2:x+crop_size//2]
# plt.imshow(crop_image)
# plt.show()
crop_image_pil = Image.fromarray(crop_image)
savepath = save_folder + str(cls+1) +'/' + str(i) + '.png'
crop_image_pil.save(savepath)
print(savepath, ' saved')
num_opened_image = num_image | [
"pandas.read_csv",
"numpy.asarray",
"PIL.Image.fromarray",
"PIL.Image.open"
] | [((284, 307), 'pandas.read_csv', 'pd.read_csv', (['coordspath'], {}), '(coordspath)\n', (295, 307), True, 'import pandas as pd\n'), ((657, 677), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (667, 677), False, 'from PIL import Image, ImageFilter\n'), ((694, 715), 'numpy.asarray', 'np.asarray', (['image_pil'], {}), '(image_pil)\n', (704, 715), True, 'import numpy as np\n'), ((1168, 1195), 'PIL.Image.fromarray', 'Image.fromarray', (['crop_image'], {}), '(crop_image)\n', (1183, 1195), False, 'from PIL import Image, ImageFilter\n')] |
import numpy as np
import cv2
import socket
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('0.0.0.0', 7777)
print('starting up on %s port %s' % server_address)
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
def applyImage(data):
decoded = np.frombuffer(data, dtype=np.uint8)
decoded = decoded.reshape((270, 480,3))
return decoded;
while True:
# Wait for a connection
print( 'waiting for a connection')
connection, client_address = sock.accept()
try:
print( 'connection from', client_address)
# Receive the data in once
while True:
#Image size is (480x270), with 3 color channel : (480 x 270) x 3 = 388800 bytes
data = connection.recv(388800)
if data:
#Visualize the received data
cv2.imshow('IMG',applyImage(data))
if (cv2.waitKey(1) and 0xFF == ord('q')):
break
else:
print('no more data from', client_address);
break;
finally:
# Clean up the connection
connection.close()
cv2.waitKey(0)
cv2.destroyAllWindows()
connection.close()
| [
"cv2.waitKey",
"numpy.frombuffer",
"socket.socket",
"cv2.destroyAllWindows"
] | [((84, 133), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (97, 133), False, 'import socket\n'), ((376, 411), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (389, 411), True, 'import numpy as np\n'), ((1301, 1315), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1312, 1315), False, 'import cv2\n'), ((1325, 1348), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1346, 1348), False, 'import cv2\n'), ((1005, 1019), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1016, 1019), False, 'import cv2\n')] |
import numpy, os
def load_data(filepath):
return numpy.load(filepath);
| [
"numpy.load"
] | [((54, 74), 'numpy.load', 'numpy.load', (['filepath'], {}), '(filepath)\n', (64, 74), False, 'import numpy, os\n')] |
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import f1_score
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
# Settings
scaler = 'minmax' # or standard
baseline_step_count = 100 # how many tresholds are evaluated to find the optimum
# Class of abstract classifier. Performs preprocessing, too
class Classifier:
# Use machine to predict labels of test data
def apply(self, X_train, y_train, X_test, idx_baseline):
# Resample training data
sm = SMOTE(random_state=42, sampling_strategy='not majority')
X_res, y_res = sm.fit_resample(X_train, y_train)
# Print infos about training data
# print("X_train" + str(X_train.shape) + " y_train" + str(y_train.shape) + " Visual Changes: " + str(np.count_nonzero(y_train)))
# print("X_res" + str(X_res.shape) + " y_res" + str(y_res.shape) + " Visual Changes: " + str(np.count_nonzero(y_res)))
# Normalize dataset on the basis of the training data
if scaler == 'standard':
self.scaler = StandardScaler()
else:
self.scaler = MinMaxScaler()
self.scaler.fit(X_res)
X_res = self.scaler.transform(X_res)
# Logistic Regression
self.clf_logreg = LogisticRegression(
random_state=42,
solver='liblinear',
multi_class='ovr', # binary data
class_weight=None) # 'balanced'
self.clf_logreg.fit(X_res, y_res)
# Support Vector Machine
self.clf_svc = svm.SVC(
random_state=42,
kernel='rbf',
gamma='scale',
decision_function_shape='ovr',
class_weight=None) # 'balanced'
self.clf_svc.fit(X_res, y_res)
# Random Forest
self.clf_forest = RandomForestClassifier(
random_state=42,
n_estimators=100,
class_weight=None, # 'balanced', 'balanced_subsample'
criterion='entropy', # 'gini'
max_depth=None,
min_samples_split=2,
min_samples_leaf=1)
self.clf_forest.fit(X_res, y_res)
# Baseline
base_values = X_res[:,idx_baseline]
min_base_value = np.min(base_values)
max_base_value = np.max(base_values)
step_base = (max_base_value - min_base_value) / baseline_step_count
# Iterate over possible threshold and find optimal threshold
best_thresh = min_base_value
best_score = 0.0
for i in range(0,baseline_step_count):
pred_thresh = min_base_value + (step_base * i)
pred_base = [int(v > pred_thresh) for v in base_values]
pred_score = f1_score(y_res, pred_base, average='weighted')
if pred_score > best_score:
best_thresh = pred_thresh
best_score = pred_score
# Store predictions
X_test = self.scaler.transform(X_test)
pred = {
'logreg' : self.clf_logreg.predict(X_test).astype(int),
'svc' : self.clf_svc.predict(X_test).astype(int),
'forest' : self.clf_forest.predict(X_test).astype(int),
'baseline' : [int(v > best_thresh) for v in X_test[:,idx_baseline]],
'importance': self.clf_forest.feature_importances_}
# Return predictions
return pred | [
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.linear_model.LogisticRegression",
"numpy.min",
"imblearn.over_sampling.SMOTE",
"numpy.max",
"sklearn.metrics.f1_score",
"sklearn.svm.SVC"
] | [((695, 751), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': '(42)', 'sampling_strategy': '"""not majority"""'}), "(random_state=42, sampling_strategy='not majority')\n", (700, 751), False, 'from imblearn.over_sampling import SMOTE\n'), ((1459, 1556), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)', 'solver': '"""liblinear"""', 'multi_class': '"""ovr"""', 'class_weight': 'None'}), "(random_state=42, solver='liblinear', multi_class='ovr',\n class_weight=None)\n", (1477, 1556), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1736, 1843), 'sklearn.svm.SVC', 'svm.SVC', ([], {'random_state': '(42)', 'kernel': '"""rbf"""', 'gamma': '"""scale"""', 'decision_function_shape': '"""ovr"""', 'class_weight': 'None'}), "(random_state=42, kernel='rbf', gamma='scale',\n decision_function_shape='ovr', class_weight=None)\n", (1743, 1843), False, 'from sklearn import svm\n'), ((2013, 2175), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(42)', 'n_estimators': '(100)', 'class_weight': 'None', 'criterion': '"""entropy"""', 'max_depth': 'None', 'min_samples_split': '(2)', 'min_samples_leaf': '(1)'}), "(random_state=42, n_estimators=100, class_weight=None,\n criterion='entropy', max_depth=None, min_samples_split=2,\n min_samples_leaf=1)\n", (2035, 2175), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2436, 2455), 'numpy.min', 'np.min', (['base_values'], {}), '(base_values)\n', (2442, 2455), True, 'import numpy as np\n'), ((2481, 2500), 'numpy.max', 'np.max', (['base_values'], {}), '(base_values)\n', (2487, 2500), True, 'import numpy as np\n'), ((1246, 1262), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1260, 1262), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1303, 1317), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1315, 1317), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2917, 2963), 'sklearn.metrics.f1_score', 'f1_score', (['y_res', 'pred_base'], {'average': '"""weighted"""'}), "(y_res, pred_base, average='weighted')\n", (2925, 2963), False, 'from sklearn.metrics import f1_score\n')] |
# -*- coding: utf-8 -*-
# CIELAB Chroma Enhancement
import numpy as np
import cv2
ep=1e-06
def rgb_gamma(rgb):
rgb2=np.zeros((rgb.shape[0],rgb.shape[1]),dtype=np.float)
rgb2[rgb[:,0]<=0.03928,0] = rgb[rgb[:,0]<=0.03928,0]/12.92
rgb2[rgb[:,1]<=0.03928,1] = rgb[rgb[:,1]<=0.03928,1]/12.92
rgb2[rgb[:,2]<=0.03928,2] = rgb[rgb[:,2]<=0.03928,2]/12.92
rgb2[rgb[:,0]>0.03928,0] = ((rgb[rgb[:,0]>0.03928,0]+0.055)/1.055)**2.4
rgb2[rgb[:,1]>0.03928,1] = ((rgb[rgb[:,1]>0.03928,1]+0.055)/1.055)**2.4
rgb2[rgb[:,2]>0.03928,2] = ((rgb[rgb[:,2]>0.03928,2]+0.055)/1.055)**2.4
return rgb2
def rgb_inv_gamma(rgb):
rgb2=np.zeros((rgb.shape[0],rgb.shape[1]),dtype=np.float)
rgb2[rgb[:,0]<=0.00304,0] = 12.92*rgb[rgb[:,0]<=0.00304,0]
rgb2[rgb[:,1]<=0.00304,1] = 12.92*rgb[rgb[:,1]<=0.00304,1]
rgb2[rgb[:,2]<=0.00304,2] = 12.92*rgb[rgb[:,2]<=0.00304,2]
rgb2[rgb[:,0]>0.00304,0] = 1.055*rgb[rgb[:,0]>0.00304,0]**(1/2.4)-0.055
rgb2[rgb[:,1]>0.00304,1] = 1.055*rgb[rgb[:,1]>0.00304,1]**(1/2.4)-0.055
rgb2[rgb[:,2]>0.00304,2] = 1.055*rgb[rgb[:,2]>0.00304,2]**(1/2.4)-0.055
return rgb2
def rgb2xyz_2(rgb):
A=np.array([[0.4124,0.3576,0.1805],[0.2126,0.7152,0.0722],[0.0193,0.1192,0.9505]])
return rgb@A.T
def xyz2rgb_2(xyz):
B=np.array([[3.2410,-1.5374,-0.4986],[-0.9692,1.8760,0.0416],[0.0556,-0.2040,1.0570]])
return xyz@B.T
def lsasbs_f(x,xn):
f=np.zeros((x.shape[0]),dtype=np.float)
x=x/xn
a=0.008856
f[x>a]=x[x>a]**(1/3)
f[x<=a]=7.787*x[x<=a]+16/116
return f
def lsasbs_invf(f):
x=np.zeros((f.shape[0]),dtype=np.float)
b=0.20689
x[f>b]=f[f>b]**(3)
x[f<=b]=(f[f<=b]-16/116)/7.787
return x
def xyz2lsasbs_2(xyz):
lsasbs=np.zeros((xyz.shape[0],xyz.shape[1]),dtype=np.float)
xn=0.9505;yn=1.000;zn=1.089
fx=lsasbs_f(xyz[:,0],xn)
fy=lsasbs_f(xyz[:,1],yn)
fz=lsasbs_f(xyz[:,2],zn)
lsasbs[:,0]=116*fy-16
lsasbs[:,1]=500*(fx-fy)
lsasbs[:,2]=200*(fy-fz)
return lsasbs
def lsasbs2xyz_2(lsasbs):
xyz_out=np.zeros((lsasbs.shape[0],lsasbs.shape[1]),dtype=np.float)
xn=0.9505;yn=1.000;zn=1.089
fy=(lsasbs[:,0]+16)/116
fx=lsasbs[:,1]/500+fy
fz=-lsasbs[:,2]/200+fy
xyz_out[:,0]=xn*lsasbs_invf(fx)
xyz_out[:,1]=yn*lsasbs_invf(fy)
xyz_out[:,2]=zn*lsasbs_invf(fz)
return xyz_out
def gamut_descript(data):
if data[0] <-0.001 or data[0] > 1.001:
r=0
else:
r=1
if data[1] <-0.001 or data[1] > 1.001:
g=0
else:
g=1
if data[2] <-0.001 or data[2] > 1.001:
b=0
else:
b=1
return r*g*b
file_inp='cat.jpg'
file_out='cat_out.jpg'
file_out_correct='cat_out_correct.jpg'
rgb_in=cv2.imread(file_inp,1)
cstar_gmax = np.loadtxt(fname="cmax.csv",dtype="float",delimiter=",")
rgb=cv2.cvtColor(rgb_in,cv2.COLOR_BGR2RGB)
rgb=rgb/255
cx, cy, cc=rgb.shape[:3]
rgb=rgb.reshape((cx*cy,3),order="F")
rgb=rgb_gamma(rgb)
xyz=rgb2xyz_2(rgb)
lsasbs=xyz2lsasbs_2(xyz)
lsasbs=lsasbs.reshape((cx,cy,cc),order="F")
#Chroma enhancement
k1=5
########################################
las_out=k1*lsasbs[:,:,1]
lbs_out=k1*lsasbs[:,:,2]
########################################
#Lightness enhancement
k2=1.5
########################################
ls_out=100*(lsasbs[:,:,0]/100)**(1/k2)
########################################
cs_out=np.sqrt(las_out**2+lbs_out**2)
las_out[np.abs(las_out)<ep]=ep
h_out=lbs_out/las_out
h_out[cs_out<0.1]=ep;
hangle_out=np.arctan2(lbs_out,las_out)*180/np.pi +360*(lbs_out<0)
fY=(ls_out+16)/116;
fX=np.sign(las_out)*cs_out/(500*np.sqrt(1+h_out**2))+fY;
fZ=-np.sign(lbs_out)*cs_out/(200*np.sqrt(1+(1./h_out**2)))+fY
X=np.zeros((fX.shape[0],fX.shape[1]),dtype=np.float)
X[fX>0.20689]=0.9505*fX[fX>0.20689]**3
X[fX<=0.20689]=(fX[fX<=0.20689]-16/116)*(0.9505/7.78)
Z=np.zeros((fZ.shape[0],fZ.shape[1]),dtype=np.float)
Z[fZ>0.20689]=1.089*fZ[fZ>0.20689]**3
Z[fZ<=0.20689]=(fZ[fZ<=0.20689]-16/116)*(1.089/7.78)
Y=np.zeros((fY.shape[0],fY.shape[1]),dtype=np.float)
Y[fY>0.20689]=1*fY[fY>0.20689]**3
Y[fY<=0.20689]=(fY[fY<=0.20689]-16/116)*(1/7.78)
indexY=np.round(100*Y).astype(int)-1
indexh=np.round(hangle_out).astype(int)-1
X=X.reshape((cx*cy,1),order="F")
Y=Y.reshape((cx*cy,1),order="F")
Z=Z.reshape((cx*cy,1),order="F")
xyz_out=np.hstack([X,Y,Z])
rgb_out=xyz2rgb_2(xyz_out)
rgb_out=rgb_inv_gamma(rgb_out)
rgb_out=255*rgb_out
rgb_out[rgb_out>255]=255
rgb_out[rgb_out<0]=0
rgb_out=rgb_out.astype(np.uint8)
rgb_out=rgb_out.reshape((cx,cy,cc),order="F")
rgb_out=cv2.cvtColor(rgb_out,cv2.COLOR_RGB2BGR)
X=X.reshape((cx,cy),order="F")
Y=Y.reshape((cx,cy),order="F")
Z=Z.reshape((cx,cy),order="F")
Xn=0.9505
Zn=1.089
cs_out_max=np.zeros((cs_out.shape[0],cs_out.shape[1]),dtype=np.float)
for i in range(cx):
for j in range(cy):
if gamut_descript(xyz2rgb_2([Xn*fX[i,j]**3,1*fY[i,j]**3,Zn*fZ[i,j]**3])) == 1:
cs_out_max[i,j]=cs_out[i,j]
else:
if indexY[i,j]==-1:
if indexh[i,j]==-1 or indexh[i,j]==359:
cs_out_max[i,j]=np.min([cstar_gmax[indexY[i,j]+1,0],cstar_gmax[indexY[i,j]+1,359]])
else:
cs_out_max[i,j]=np.min([cstar_gmax[indexY[i,j]+1,indexh[i,j]],cstar_gmax[indexY[i,j]+1,indexh[i,j]+1]])
elif indexY[i,j]==99:
if indexh[i,j]==-1 or indexh[i,j]==359:
cs_out_max[i,j]=np.min([cstar_gmax[indexY[i,j],0],cstar_gmax[indexY[i,j],359]])
else:
cs_out_max[i,j]=np.min([cstar_gmax[indexY[i,j],indexh[i,j]],cstar_gmax[indexY[i,j],indexh[i,j]+1]])
elif indexh[i,j]==-1 or indexh[i,j]==359:
cs_out_max[i,j]=np.min([cstar_gmax[indexY[i,j],0],cstar_gmax[indexY[i,j]+1,0],cstar_gmax[indexY[i,j],359],cstar_gmax[indexY[i,j]+1,359]])
else:
cs_out_max[i,j]=np.min([cstar_gmax[indexY[i,j],indexh[i,j]],cstar_gmax[indexY[i,j]+1,indexh[i,j]],cstar_gmax[indexY[i,j],indexh[i,j]+1],cstar_gmax[indexY[i,j]+1,indexh[i,j]+1]])
fX=np.sign(las_out)*cs_out_max/(500*np.sqrt(1+h_out**2))+fY
fZ=-np.sign(lbs_out)*cs_out_max/(200*np.sqrt(1+(1/h_out**2)))+fY
X[fX>0.20689]=0.9505*fX[fX>0.20689]**3
X[fX<=0.20689]=(fX[fX<=0.20689]-16/116)*(0.9505/7.78)
Z[fZ>0.20689]=1.089*fZ[fZ>0.20689]**3
Z[fZ<=0.20689]=(fZ[fZ<=0.20689]-16/116)*(1.089/7.78)
Y[fY>0.20689]=1*fY[fY>0.20689]**3
Y[fY<=0.20689]=(fY[fY<=0.20689]-16/116)*(1/7.78)
X=X.reshape((cx*cy,1),order="F")
Y=Y.reshape((cx*cy,1),order="F")
Z=Z.reshape((cx*cy,1),order="F")
XYZ_out=np.hstack([X,Y,Z])
RGB_correct=xyz2rgb_2(XYZ_out)
RGB_correct=rgb_inv_gamma(RGB_correct)
RGB_correct=255*RGB_correct
RGB_correct[RGB_correct>255]=255
RGB_correct[RGB_correct<0]=0
RGB_correct=RGB_correct.astype(np.uint8)
RGB_correct=RGB_correct.reshape((cx,cy,cc),order="F")
RGB_correct=cv2.cvtColor(RGB_correct,cv2.COLOR_RGB2BGR)
cv2.imwrite(file_out,rgb_out)
cv2.imwrite(file_out_correct,RGB_correct)
cv2.namedWindow('img', cv2.WINDOW_NORMAL)
cv2.imshow('img',rgb_in)
cv2.namedWindow('out', cv2.WINDOW_NORMAL)
cv2.imshow('out',rgb_out)
cv2.namedWindow('out_correct', cv2.WINDOW_NORMAL)
cv2.imshow('out_correct',RGB_correct)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"numpy.abs",
"numpy.arctan2",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"numpy.hstack",
"cv2.imread",
"numpy.min",
"numpy.array",
"numpy.loadtxt",
"numpy.sign",
"cv2.imshow",
"numpy.round",
"cv2.namedWindow",
"numpy.sqrt"
] | [((2727, 2750), 'cv2.imread', 'cv2.imread', (['file_inp', '(1)'], {}), '(file_inp, 1)\n', (2737, 2750), False, 'import cv2\n'), ((2763, 2821), 'numpy.loadtxt', 'np.loadtxt', ([], {'fname': '"""cmax.csv"""', 'dtype': '"""float"""', 'delimiter': '""","""'}), "(fname='cmax.csv', dtype='float', delimiter=',')\n", (2773, 2821), True, 'import numpy as np\n'), ((2824, 2863), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb_in', 'cv2.COLOR_BGR2RGB'], {}), '(rgb_in, cv2.COLOR_BGR2RGB)\n', (2836, 2863), False, 'import cv2\n'), ((3362, 3398), 'numpy.sqrt', 'np.sqrt', (['(las_out ** 2 + lbs_out ** 2)'], {}), '(las_out ** 2 + lbs_out ** 2)\n', (3369, 3398), True, 'import numpy as np\n'), ((3677, 3729), 'numpy.zeros', 'np.zeros', (['(fX.shape[0], fX.shape[1])'], {'dtype': 'np.float'}), '((fX.shape[0], fX.shape[1]), dtype=np.float)\n', (3685, 3729), True, 'import numpy as np\n'), ((3824, 3876), 'numpy.zeros', 'np.zeros', (['(fZ.shape[0], fZ.shape[1])'], {'dtype': 'np.float'}), '((fZ.shape[0], fZ.shape[1]), dtype=np.float)\n', (3832, 3876), True, 'import numpy as np\n'), ((3969, 4021), 'numpy.zeros', 'np.zeros', (['(fY.shape[0], fY.shape[1])'], {'dtype': 'np.float'}), '((fY.shape[0], fY.shape[1]), dtype=np.float)\n', (3977, 4021), True, 'import numpy as np\n'), ((4292, 4312), 'numpy.hstack', 'np.hstack', (['[X, Y, Z]'], {}), '([X, Y, Z])\n', (4301, 4312), True, 'import numpy as np\n'), ((4523, 4563), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb_out', 'cv2.COLOR_RGB2BGR'], {}), '(rgb_out, cv2.COLOR_RGB2BGR)\n', (4535, 4563), False, 'import cv2\n'), ((4688, 4748), 'numpy.zeros', 'np.zeros', (['(cs_out.shape[0], cs_out.shape[1])'], {'dtype': 'np.float'}), '((cs_out.shape[0], cs_out.shape[1]), dtype=np.float)\n', (4696, 4748), True, 'import numpy as np\n'), ((6530, 6550), 'numpy.hstack', 'np.hstack', (['[X, Y, Z]'], {}), '([X, Y, Z])\n', (6539, 6550), True, 'import numpy as np\n'), ((6816, 6860), 'cv2.cvtColor', 'cv2.cvtColor', (['RGB_correct', 'cv2.COLOR_RGB2BGR'], {}), '(RGB_correct, cv2.COLOR_RGB2BGR)\n', (6828, 6860), False, 'import cv2\n'), ((6862, 6892), 'cv2.imwrite', 'cv2.imwrite', (['file_out', 'rgb_out'], {}), '(file_out, rgb_out)\n', (6873, 6892), False, 'import cv2\n'), ((6892, 6934), 'cv2.imwrite', 'cv2.imwrite', (['file_out_correct', 'RGB_correct'], {}), '(file_out_correct, RGB_correct)\n', (6903, 6934), False, 'import cv2\n'), ((6934, 6975), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img"""', 'cv2.WINDOW_NORMAL'], {}), "('img', cv2.WINDOW_NORMAL)\n", (6949, 6975), False, 'import cv2\n'), ((6976, 7001), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'rgb_in'], {}), "('img', rgb_in)\n", (6986, 7001), False, 'import cv2\n'), ((7001, 7042), 'cv2.namedWindow', 'cv2.namedWindow', (['"""out"""', 'cv2.WINDOW_NORMAL'], {}), "('out', cv2.WINDOW_NORMAL)\n", (7016, 7042), False, 'import cv2\n'), ((7043, 7069), 'cv2.imshow', 'cv2.imshow', (['"""out"""', 'rgb_out'], {}), "('out', rgb_out)\n", (7053, 7069), False, 'import cv2\n'), ((7069, 7118), 'cv2.namedWindow', 'cv2.namedWindow', (['"""out_correct"""', 'cv2.WINDOW_NORMAL'], {}), "('out_correct', cv2.WINDOW_NORMAL)\n", (7084, 7118), False, 'import cv2\n'), ((7119, 7157), 'cv2.imshow', 'cv2.imshow', (['"""out_correct"""', 'RGB_correct'], {}), "('out_correct', RGB_correct)\n", (7129, 7157), False, 'import cv2\n'), ((7158, 7172), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7169, 7172), False, 'import cv2\n'), ((7173, 7196), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7194, 7196), False, 'import cv2\n'), ((121, 175), 'numpy.zeros', 'np.zeros', (['(rgb.shape[0], rgb.shape[1])'], {'dtype': 'np.float'}), '((rgb.shape[0], rgb.shape[1]), dtype=np.float)\n', (129, 175), True, 'import numpy as np\n'), ((644, 698), 'numpy.zeros', 'np.zeros', (['(rgb.shape[0], rgb.shape[1])'], {'dtype': 'np.float'}), '((rgb.shape[0], rgb.shape[1]), dtype=np.float)\n', (652, 698), True, 'import numpy as np\n'), ((1159, 1252), 'numpy.array', 'np.array', (['[[0.4124, 0.3576, 0.1805], [0.2126, 0.7152, 0.0722], [0.0193, 0.1192, 0.9505]]'], {}), '([[0.4124, 0.3576, 0.1805], [0.2126, 0.7152, 0.0722], [0.0193, \n 0.1192, 0.9505]])\n', (1167, 1252), True, 'import numpy as np\n'), ((1286, 1379), 'numpy.array', 'np.array', (['[[3.241, -1.5374, -0.4986], [-0.9692, 1.876, 0.0416], [0.0556, -0.204, 1.057]]'], {}), '([[3.241, -1.5374, -0.4986], [-0.9692, 1.876, 0.0416], [0.0556, -\n 0.204, 1.057]])\n', (1294, 1379), True, 'import numpy as np\n'), ((1417, 1453), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {'dtype': 'np.float'}), '(x.shape[0], dtype=np.float)\n', (1425, 1453), True, 'import numpy as np\n'), ((1579, 1615), 'numpy.zeros', 'np.zeros', (['f.shape[0]'], {'dtype': 'np.float'}), '(f.shape[0], dtype=np.float)\n', (1587, 1615), True, 'import numpy as np\n'), ((1737, 1791), 'numpy.zeros', 'np.zeros', (['(xyz.shape[0], xyz.shape[1])'], {'dtype': 'np.float'}), '((xyz.shape[0], xyz.shape[1]), dtype=np.float)\n', (1745, 1791), True, 'import numpy as np\n'), ((2048, 2108), 'numpy.zeros', 'np.zeros', (['(lsasbs.shape[0], lsasbs.shape[1])'], {'dtype': 'np.float'}), '((lsasbs.shape[0], lsasbs.shape[1]), dtype=np.float)\n', (2056, 2108), True, 'import numpy as np\n'), ((3401, 3416), 'numpy.abs', 'np.abs', (['las_out'], {}), '(las_out)\n', (3407, 3416), True, 'import numpy as np\n'), ((3479, 3507), 'numpy.arctan2', 'np.arctan2', (['lbs_out', 'las_out'], {}), '(lbs_out, las_out)\n', (3489, 3507), True, 'import numpy as np\n'), ((3558, 3574), 'numpy.sign', 'np.sign', (['las_out'], {}), '(las_out)\n', (3565, 3574), True, 'import numpy as np\n'), ((3587, 3610), 'numpy.sqrt', 'np.sqrt', (['(1 + h_out ** 2)'], {}), '(1 + h_out ** 2)\n', (3594, 3610), True, 'import numpy as np\n'), ((3645, 3674), 'numpy.sqrt', 'np.sqrt', (['(1 + 1.0 / h_out ** 2)'], {}), '(1 + 1.0 / h_out ** 2)\n', (3652, 3674), True, 'import numpy as np\n'), ((4111, 4128), 'numpy.round', 'np.round', (['(100 * Y)'], {}), '(100 * Y)\n', (4119, 4128), True, 'import numpy as np\n'), ((4148, 4168), 'numpy.round', 'np.round', (['hangle_out'], {}), '(hangle_out)\n', (4156, 4168), True, 'import numpy as np\n'), ((6029, 6045), 'numpy.sign', 'np.sign', (['las_out'], {}), '(las_out)\n', (6036, 6045), True, 'import numpy as np\n'), ((6062, 6085), 'numpy.sqrt', 'np.sqrt', (['(1 + h_out ** 2)'], {}), '(1 + h_out ** 2)\n', (6069, 6085), True, 'import numpy as np\n'), ((6123, 6150), 'numpy.sqrt', 'np.sqrt', (['(1 + 1 / h_out ** 2)'], {}), '(1 + 1 / h_out ** 2)\n', (6130, 6150), True, 'import numpy as np\n'), ((3616, 3632), 'numpy.sign', 'np.sign', (['lbs_out'], {}), '(lbs_out)\n', (3623, 3632), True, 'import numpy as np\n'), ((6090, 6106), 'numpy.sign', 'np.sign', (['lbs_out'], {}), '(lbs_out)\n', (6097, 6106), True, 'import numpy as np\n'), ((5059, 5135), 'numpy.min', 'np.min', (['[cstar_gmax[indexY[i, j] + 1, 0], cstar_gmax[indexY[i, j] + 1, 359]]'], {}), '([cstar_gmax[indexY[i, j] + 1, 0], cstar_gmax[indexY[i, j] + 1, 359]])\n', (5065, 5135), True, 'import numpy as np\n'), ((5185, 5289), 'numpy.min', 'np.min', (['[cstar_gmax[indexY[i, j] + 1, indexh[i, j]], cstar_gmax[indexY[i, j] + 1, \n indexh[i, j] + 1]]'], {}), '([cstar_gmax[indexY[i, j] + 1, indexh[i, j]], cstar_gmax[indexY[i, j] +\n 1, indexh[i, j] + 1]])\n', (5191, 5289), True, 'import numpy as np\n'), ((5399, 5467), 'numpy.min', 'np.min', (['[cstar_gmax[indexY[i, j], 0], cstar_gmax[indexY[i, j], 359]]'], {}), '([cstar_gmax[indexY[i, j], 0], cstar_gmax[indexY[i, j], 359]])\n', (5405, 5467), True, 'import numpy as np\n'), ((5521, 5618), 'numpy.min', 'np.min', (['[cstar_gmax[indexY[i, j], indexh[i, j]], cstar_gmax[indexY[i, j], indexh[i,\n j] + 1]]'], {}), '([cstar_gmax[indexY[i, j], indexh[i, j]], cstar_gmax[indexY[i, j], \n indexh[i, j] + 1]])\n', (5527, 5618), True, 'import numpy as np\n'), ((5691, 5831), 'numpy.min', 'np.min', (['[cstar_gmax[indexY[i, j], 0], cstar_gmax[indexY[i, j] + 1, 0], cstar_gmax[\n indexY[i, j], 359], cstar_gmax[indexY[i, j] + 1, 359]]'], {}), '([cstar_gmax[indexY[i, j], 0], cstar_gmax[indexY[i, j] + 1, 0],\n cstar_gmax[indexY[i, j], 359], cstar_gmax[indexY[i, j] + 1, 359]])\n', (5697, 5831), True, 'import numpy as np\n'), ((5863, 6056), 'numpy.min', 'np.min', (['[cstar_gmax[indexY[i, j], indexh[i, j]], cstar_gmax[indexY[i, j] + 1,\n indexh[i, j]], cstar_gmax[indexY[i, j], indexh[i, j] + 1], cstar_gmax[\n indexY[i, j] + 1, indexh[i, j] + 1]]'], {}), '([cstar_gmax[indexY[i, j], indexh[i, j]], cstar_gmax[indexY[i, j] + 1,\n indexh[i, j]], cstar_gmax[indexY[i, j], indexh[i, j] + 1], cstar_gmax[\n indexY[i, j] + 1, indexh[i, j] + 1]])\n', (5869, 6056), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import argparse
import math
import numpy as np
import unireedsolomon as urs
from lib import *
parser = argparse.ArgumentParser(
description='Listen to a pyaudio device, or read data from a file, and try to decode messages.'
)
parser.add_argument(
'-x', '--hex',
default=False, const=True, action='store_const',
help='Print the message in hexadecimal instead of ASCII text.'
)
parser.add_argument(
'-f', '--file',
metavar='FILE', type=str,
help='Read from this file.'
)
parser.add_argument(
'-d', '--device',
metavar='DEVICE', default=-1, type=int,
help='PyAudio device index. Use devices.py to get a list of devices. The default is -1, the system-wide default device.'
)
parser.add_argument(
'-e', '--exit-after-success',
default=False, const=True, action='store_const',
help='Exit after the first successfully decoded message'
)
args = parser.parse_args()
datalen = SYMBOL_LENGTH*len(syncBits)
if args.file:
generator = s16_read(datalen, args.file)
else:
generator = pyaudio_read(datalen, args.device)
data = np.zeros(datalen*2, dtype=np.float32)
datapos = 0
while True:
data[:datalen] = data[datalen:]
datapos = datalen
chunk = generator.send(None)
if len(chunk) == 0:
print("End of data stream reached, terminating")
exit(1)
data[datapos:datapos+len(chunk)] = chunk
datapos = datapos+len(chunk)
corr = np.correlate(normalize(data[:datapos]), syncSig)
signal_strength = np.max(corr)
best_pos = np.argmax(corr)
if math.isnan(signal_strength) or signal_strength < 2000:
continue
print("Got sync signal, decoding... ")
ns, erasures = receive_frame(normalize(data[best_pos:best_pos+len(syncSig)]))
try:
bs = nibbles2bytes(ns, erasures)
if args.hex:
msg = ''
for b in bs:
msg += "{:02x}".format(b)
else:
msg = str(bs, 'ASCII').strip()
print("DECODED: >>> " + msg + " <<<")
if args.exit_after_success:
exit(0)
except urs.rs.RSCodecError:
print("NO DECODE.")
| [
"math.isnan",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.zeros",
"numpy.max"
] | [((151, 281), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Listen to a pyaudio device, or read data from a file, and try to decode messages."""'}), "(description=\n 'Listen to a pyaudio device, or read data from a file, and try to decode messages.'\n )\n", (174, 281), False, 'import argparse\n'), ((1122, 1161), 'numpy.zeros', 'np.zeros', (['(datalen * 2)'], {'dtype': 'np.float32'}), '(datalen * 2, dtype=np.float32)\n', (1130, 1161), True, 'import numpy as np\n'), ((1536, 1548), 'numpy.max', 'np.max', (['corr'], {}), '(corr)\n', (1542, 1548), True, 'import numpy as np\n'), ((1564, 1579), 'numpy.argmax', 'np.argmax', (['corr'], {}), '(corr)\n', (1573, 1579), True, 'import numpy as np\n'), ((1588, 1615), 'math.isnan', 'math.isnan', (['signal_strength'], {}), '(signal_strength)\n', (1598, 1615), False, 'import math\n')] |
# Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""Unit tests for GKP-specific functions in the GKP module."""
import numpy as np
from numpy import sqrt, pi
from numpy.random import default_rng as rng
import pytest
from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond
N = 50
# Construct random numbers from an integer and fractional part.
alpha_vals = np.append(np.random.rand(5) * 5, np.sqrt(np.pi))
class TestGKPBinning:
"""Tests for GKP binning functions."""
@pytest.mark.parametrize("alpha", alpha_vals)
def test_integer_fractional(self, alpha):
"""Test that the integer and fractional part as obtained by
integer_fractional matches that of constructed numbers."""
integers = rng().integers(-N // 2, N // 2, N)
fractions = (rng().random(N) - 0.5) * alpha
numbers = integers * alpha + fractions
int_part, frac_part = integer_fractional(numbers, alpha)
assert np.all(int_part == integers)
assert np.allclose(frac_part, fractions)
def test_gkp_binner(self):
"""Tests that GKP_binner gives the integer part mod 2, and returns the
fractional part if asked."""
alpha = np.sqrt(np.pi)
integers = rng().integers(-N // 2, N // 2, N)
fractions = rng().random(N) * (alpha / 2)
numbers = integers * alpha + fractions
bits = integers % 2
int_part, frac_part = integer_fractional(numbers, alpha)
assert np.all(GKP_binner(numbers) == bits)
assert np.allclose(GKP_binner(numbers, return_fraction=True)[1], fractions)
# Even and odd homodyne outcomes mod sqrt(pi), and outcomes in the middle.
even_homs = np.array([2 * i * sqrt(pi) for i in range(-N // 2, N // 2)])
odd_homs = np.array([(2 * i + 1) * sqrt(pi) for i in range(-N // 2, N // 2)])
middle_homs = np.array([(2 * i + 1) * sqrt(pi) / 2 for i in range(-N // 2, N // 2)])
# Limit for summations.
lim = int(2 * N * np.sqrt(np.pi))
# Random low and high delta values
low_delta = rng().uniform(0.0001, 0.001, N)
high_delta = rng().uniform(10, 15, N)
class TestPhaseProbs:
"""Test the phase error proability functions."""
def test_Z_err(self):
"""Ensure phase errors are 0 for low deltas and 0.5 for high deltas."""
low_probs = Z_err(low_delta)
high_probs = Z_err(high_delta)
assert np.allclose(low_probs, 0)
assert np.allclose(high_probs, 0.5)
@pytest.mark.parametrize("use_hom_val", [False, True])
def test_Z_err_cond(self, use_hom_val):
"""Test high-squeezing (low delta) regime."""
for delta in (high_delta, low_delta):
even_probs = Z_err_cond(delta, even_homs, var_num=lim, use_hom_val=use_hom_val)
odd_probs = Z_err_cond(delta, odd_homs, var_num=lim, use_hom_val=use_hom_val)
mid_probs = Z_err_cond(delta, middle_homs, var_num=lim, use_hom_val=use_hom_val)
# Ensure that conditional phase error probabilities are close
# to 0 for low delta values and 0.5 for high delta values.
if np.array_equal(delta, high_delta):
ref_prob = 0.5
else:
ref_prob = 0
assert np.allclose(even_probs, ref_prob, rtol=0.001)
assert np.allclose(odd_probs, ref_prob, rtol=0.001)
assert np.allclose(mid_probs, ref_prob, rtol=0.001)
# TODO: Test use_hom_val argument, changing summation limit,
# 0-denominator behaviour
| [
"flamingpy.cv.gkp.Z_err",
"numpy.array_equal",
"numpy.allclose",
"flamingpy.cv.gkp.Z_err_cond",
"flamingpy.cv.gkp.integer_fractional",
"numpy.random.default_rng",
"numpy.random.rand",
"pytest.mark.parametrize",
"flamingpy.cv.gkp.GKP_binner",
"numpy.all",
"numpy.sqrt"
] | [((963, 977), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (970, 977), True, 'import numpy as np\n'), ((1052, 1096), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""alpha"""', 'alpha_vals'], {}), "('alpha', alpha_vals)\n", (1075, 1096), False, 'import pytest\n'), ((2988, 3041), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_hom_val"""', '[False, True]'], {}), "('use_hom_val', [False, True])\n", (3011, 3041), False, 'import pytest\n'), ((940, 957), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (954, 957), True, 'import numpy as np\n'), ((1461, 1495), 'flamingpy.cv.gkp.integer_fractional', 'integer_fractional', (['numbers', 'alpha'], {}), '(numbers, alpha)\n', (1479, 1495), False, 'from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond\n'), ((1511, 1539), 'numpy.all', 'np.all', (['(int_part == integers)'], {}), '(int_part == integers)\n', (1517, 1539), True, 'import numpy as np\n'), ((1555, 1588), 'numpy.allclose', 'np.allclose', (['frac_part', 'fractions'], {}), '(frac_part, fractions)\n', (1566, 1588), True, 'import numpy as np\n'), ((1753, 1767), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1760, 1767), True, 'import numpy as np\n'), ((1978, 2012), 'flamingpy.cv.gkp.integer_fractional', 'integer_fractional', (['numbers', 'alpha'], {}), '(numbers, alpha)\n', (1996, 2012), False, 'from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond\n'), ((2503, 2517), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (2510, 2517), True, 'import numpy as np\n'), ((2567, 2572), 'numpy.random.default_rng', 'rng', ([], {}), '()\n', (2570, 2572), True, 'from numpy.random import default_rng as rng\n'), ((2612, 2617), 'numpy.random.default_rng', 'rng', ([], {}), '()\n', (2615, 2617), True, 'from numpy.random import default_rng as rng\n'), ((2841, 2857), 'flamingpy.cv.gkp.Z_err', 'Z_err', (['low_delta'], {}), '(low_delta)\n', (2846, 2857), False, 'from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond\n'), ((2879, 2896), 'flamingpy.cv.gkp.Z_err', 'Z_err', (['high_delta'], {}), '(high_delta)\n', (2884, 2896), False, 'from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond\n'), ((2912, 2937), 'numpy.allclose', 'np.allclose', (['low_probs', '(0)'], {}), '(low_probs, 0)\n', (2923, 2937), True, 'import numpy as np\n'), ((2953, 2981), 'numpy.allclose', 'np.allclose', (['high_probs', '(0.5)'], {}), '(high_probs, 0.5)\n', (2964, 2981), True, 'import numpy as np\n'), ((2255, 2263), 'numpy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (2259, 2263), False, 'from numpy import sqrt, pi\n'), ((2333, 2341), 'numpy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (2337, 2341), False, 'from numpy import sqrt, pi\n'), ((3211, 3277), 'flamingpy.cv.gkp.Z_err_cond', 'Z_err_cond', (['delta', 'even_homs'], {'var_num': 'lim', 'use_hom_val': 'use_hom_val'}), '(delta, even_homs, var_num=lim, use_hom_val=use_hom_val)\n', (3221, 3277), False, 'from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond\n'), ((3302, 3367), 'flamingpy.cv.gkp.Z_err_cond', 'Z_err_cond', (['delta', 'odd_homs'], {'var_num': 'lim', 'use_hom_val': 'use_hom_val'}), '(delta, odd_homs, var_num=lim, use_hom_val=use_hom_val)\n', (3312, 3367), False, 'from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond\n'), ((3392, 3460), 'flamingpy.cv.gkp.Z_err_cond', 'Z_err_cond', (['delta', 'middle_homs'], {'var_num': 'lim', 'use_hom_val': 'use_hom_val'}), '(delta, middle_homs, var_num=lim, use_hom_val=use_hom_val)\n', (3402, 3460), False, 'from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond\n'), ((3621, 3654), 'numpy.array_equal', 'np.array_equal', (['delta', 'high_delta'], {}), '(delta, high_delta)\n', (3635, 3654), True, 'import numpy as np\n'), ((3753, 3798), 'numpy.allclose', 'np.allclose', (['even_probs', 'ref_prob'], {'rtol': '(0.001)'}), '(even_probs, ref_prob, rtol=0.001)\n', (3764, 3798), True, 'import numpy as np\n'), ((3818, 3862), 'numpy.allclose', 'np.allclose', (['odd_probs', 'ref_prob'], {'rtol': '(0.001)'}), '(odd_probs, ref_prob, rtol=0.001)\n', (3829, 3862), True, 'import numpy as np\n'), ((3882, 3926), 'numpy.allclose', 'np.allclose', (['mid_probs', 'ref_prob'], {'rtol': '(0.001)'}), '(mid_probs, ref_prob, rtol=0.001)\n', (3893, 3926), True, 'import numpy as np\n'), ((1297, 1302), 'numpy.random.default_rng', 'rng', ([], {}), '()\n', (1300, 1302), True, 'from numpy.random import default_rng as rng\n'), ((1787, 1792), 'numpy.random.default_rng', 'rng', ([], {}), '()\n', (1790, 1792), True, 'from numpy.random import default_rng as rng\n'), ((2035, 2054), 'flamingpy.cv.gkp.GKP_binner', 'GKP_binner', (['numbers'], {}), '(numbers)\n', (2045, 2054), False, 'from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond\n'), ((2091, 2132), 'flamingpy.cv.gkp.GKP_binner', 'GKP_binner', (['numbers'], {'return_fraction': '(True)'}), '(numbers, return_fraction=True)\n', (2101, 2132), False, 'from flamingpy.cv.gkp import integer_fractional, GKP_binner, Z_err, Z_err_cond\n'), ((2414, 2422), 'numpy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (2418, 2422), False, 'from numpy import sqrt, pi\n'), ((1842, 1847), 'numpy.random.default_rng', 'rng', ([], {}), '()\n', (1845, 1847), True, 'from numpy.random import default_rng as rng\n'), ((1353, 1358), 'numpy.random.default_rng', 'rng', ([], {}), '()\n', (1356, 1358), True, 'from numpy.random import default_rng as rng\n')] |
import os
import numpy as np
from matplotlib import pyplot as pp
from matplotlib.backends.backend_pdf import PdfPages
SPINE_COLOR = 'gray'
class Plot:
def __init__(self, title, for_print: bool = False, small: bool = False):
if small:
self.setsize(fig_width=8, fig_height=6)
else:
self.setsize()
title_string = title
if for_print:
self.latexify()
title_string = r"\textbf{%s}" % title
self.for_print = for_print
self.ax = pp.axes()
pp.gcf().patch.set_alpha(0.5)
self.ax.set_xlabel('Training episodes', fontsize=14)
self.ax.set_ylabel('Grade', fontsize=14)
self.markers = {"o", "D", "^", "8", "h", "s"}
self.ax.set_title(title_string, fontsize=18, fontweight='bold', y=1.05)
self.ax = self.format_axes(self.ax)
def setsize(self, fig_width=None, fig_height=None, columns=1):
assert (columns in [1, 2])
if fig_width is None:
fig_width = 3.39 if columns == 1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_height = int(fig_width * golden_mean) # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + str(fig_height) +
"so will reduce to" + str(MAX_HEIGHT_INCHES) + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {
'figure.figsize': [fig_width, fig_height],
}
pp.rcParams.update(params)
def latexify(self):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
params = {'backend': 'ps',
'text.latex.preamble': [r'\usepackage{gensymb}'],
'axes.labelsize': 2,
'font.size': 14,
'font.weight': "bold",
'legend.fontsize': 6,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'text.usetex': True,
'figure.titleweight': "bold",
'font.family': 'serif'
}
# We change the fontsize of minor ticks label
pp.tick_params(axis='both', which='major', labelsize=12, pad=-2)
pp.rcParams.update(params)
pp.rc('font', family="serif", serif="palatino")
def format_axes(self, ax):
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(which="both", color="white", linestyle="-", linewidth=1)
ax.set_axisbelow(True)
ax.set_axis_bgcolor("#EEEEEE")
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color="white")
return ax
def plot_evaluations(self, series, means, confidences,
label):
pp.errorbar(series, means, yerr=confidences, label=label)
def save(self, name: str, path: str):
pp.legend(loc="lower right")
pp.tight_layout()
full_out = os.path.join(path, name + ".pdf")
pdf = PdfPages(full_out)
pdf.savefig()
pdf.close()
| [
"matplotlib.backends.backend_pdf.PdfPages",
"os.path.join",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.errorbar",
... | [((527, 536), 'matplotlib.pyplot.axes', 'pp.axes', ([], {}), '()\n', (534, 536), True, 'from matplotlib import pyplot as pp\n'), ((1609, 1635), 'matplotlib.pyplot.rcParams.update', 'pp.rcParams.update', (['params'], {}), '(params)\n', (1627, 1635), True, 'from matplotlib import pyplot as pp\n'), ((2724, 2788), 'matplotlib.pyplot.tick_params', 'pp.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(12)', 'pad': '(-2)'}), "(axis='both', which='major', labelsize=12, pad=-2)\n", (2738, 2788), True, 'from matplotlib import pyplot as pp\n'), ((2798, 2824), 'matplotlib.pyplot.rcParams.update', 'pp.rcParams.update', (['params'], {}), '(params)\n', (2816, 2824), True, 'from matplotlib import pyplot as pp\n'), ((2833, 2880), 'matplotlib.pyplot.rc', 'pp.rc', (['"""font"""'], {'family': '"""serif"""', 'serif': '"""palatino"""'}), "('font', family='serif', serif='palatino')\n", (2838, 2880), True, 'from matplotlib import pyplot as pp\n'), ((3653, 3710), 'matplotlib.pyplot.errorbar', 'pp.errorbar', (['series', 'means'], {'yerr': 'confidences', 'label': 'label'}), '(series, means, yerr=confidences, label=label)\n', (3664, 3710), True, 'from matplotlib import pyplot as pp\n'), ((3762, 3790), 'matplotlib.pyplot.legend', 'pp.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3771, 3790), True, 'from matplotlib import pyplot as pp\n'), ((3799, 3816), 'matplotlib.pyplot.tight_layout', 'pp.tight_layout', ([], {}), '()\n', (3814, 3816), True, 'from matplotlib import pyplot as pp\n'), ((3836, 3869), 'os.path.join', 'os.path.join', (['path', "(name + '.pdf')"], {}), "(path, name + '.pdf')\n", (3848, 3869), False, 'import os\n'), ((3884, 3902), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['full_out'], {}), '(full_out)\n', (3892, 3902), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((545, 553), 'matplotlib.pyplot.gcf', 'pp.gcf', ([], {}), '()\n', (551, 553), True, 'from matplotlib import pyplot as pp\n'), ((1132, 1142), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (1139, 1142), True, 'import numpy as np\n')] |
# coding:UTF-8
import numpy as np
# d_2 = {'9244.png': '108', '3293.png': '108', '6532.png': '108', '2661.png': '108', '9715.png': '108', '3310.png': '108', '7264.png': '108', '9406.png': '108', '5155.png': '108', '5521.png': '108'}
# d_1 = {'6777.png': '92', '7049.png': '92', '9510.png': '92', '15189.png': '92', '8806.png': '92', '12840.png': '92'}
# print(d_1 + d_2)
# labels = []
# labels.extend([0] * 5)
# labels.extend([1] * 5)
# print(len(labels))
# ret = np.linspace(0, 20, 4 + 1).astype(np.int)
# print(ret)
# ret = np.arange(1,4)
# print(ret)
dst = np.zeros((2,2,3),np.uint8)
print(dst[0])
import os
print(os.path.join("11", "ewe", "ede")) | [
"numpy.zeros",
"os.path.join"
] | [((566, 595), 'numpy.zeros', 'np.zeros', (['(2, 2, 3)', 'np.uint8'], {}), '((2, 2, 3), np.uint8)\n', (574, 595), True, 'import numpy as np\n'), ((625, 657), 'os.path.join', 'os.path.join', (['"""11"""', '"""ewe"""', '"""ede"""'], {}), "('11', 'ewe', 'ede')\n", (637, 657), False, 'import os\n')] |
__author__ = 'fnaiser'
import pickle
import cv2
import numpy as np
from core.region import cyMser
from core.region.mser_operations import children_filter
from core.region.region import Region
from core.config import config
from .mser_operations import get_region_groups_dict_, margin_filter_dict_, min_intensity_filter_dict_
from utils.video_manager import get_auto_video_manager
class Mser():
def __init__(self, max_area=0.005, min_margin=5, min_area=5):
self.mser = cyMser.PyMser()
self.mser.set_min_margin(min_margin)
self.mser.set_max_area(max_area)
self.mser.set_min_size(min_area)
def process_image(self, img, frame=-1, intensity_threshold=256, prefiltered=False,
region_min_intensity=None, intensity_percentile=-1, use_margin_filter=True,
use_children_filter=True):
if len(img.shape) > 2:
if img.shape[2] > 1:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img[:, :, 0]
else:
gray = img
if intensity_threshold > 256:
intensity_threshold = 256
self.mser.process_image(gray, intensity_threshold)
mser_regions = self.mser.get_regions()
if prefiltered:
groups = get_region_groups_dict_(mser_regions)
if use_margin_filter:
ids = margin_filter_dict_(mser_regions, groups)
else:
ids = list(range(len(mser_regions)))
if region_min_intensity is not None and region_min_intensity < 256:
# fix minI:
for r_id in ids:
r = mser_regions[r_id]
min_i_ = 255
if intensity_percentile > 0:
dd = []
for it in r['rle']:
d = img[it['line'], it['col1']:it['col2'] + 1]
if intensity_percentile > 0:
dd.extend(d)
m_ = d.min()
min_i_ = min(min_i_, m_)
r['minI'] = min_i_
if intensity_percentile > 0:
r['intensity_percentile'] = np.percentile(dd, intensity_percentile)
ids = min_intensity_filter_dict_(mser_regions, ids, region_min_intensity, intensity_percentile > 0)
regions = [Region(mser_regions[i], frame) for i in ids]
else:
regions = [Region(dr, frame) for i, dr in enumerate(mser_regions)]
if use_children_filter:
ids = children_filter(regions, list(range(len(regions))))
return [regions[i] for i in ids]
else:
return regions
def set_max_area_relative(self, max_area_relative):
self.mser.set_max_area(max_area_relative)
def get_mser(frame_number, id, video_paths, working_dir):
"""
Tries to use cached MSERs, if cache is empty, MSERs are computed and if caching is allowed, then stored.
Returns region based on id
"""
return get_all_msers(frame_number, video_paths, working_dir)[id]
def get_all_msers(frame_number, project):
"""
Tries to use cached MSERs, if cache is empty, MSERs are computed and if caching is allowed, then stored.
Returns all regions
"""
if config['cache']['mser']:
try:
with open(project.working_directory+'/mser/'+str(frame_number)+'.pkl', 'rb') as f:
msers = pickle.load(f)
return msers
except IOError:
vid = get_auto_video_manager(project)
msers = get_regions_in_img(vid.seek_frame(frame_number), frame_number)
try:
with open(project.working_directory+'/mser/'+str(frame_number)+'.pkl', 'wb') as f:
pickle.dump(msers, f)
except IOError:
pass
return msers
else:
vid = get_auto_video_manager(project)
return get_regions_in_img(vid.seek_frame(frame_number))
def get_regions_in_img(img, project, frame=-1, prefiltered=False):
"""
Returns msers as list of Region objects using MSER algorithm with default settings.
"""
max_area = project.mser_parameters.max_area
min_area = project.mser_parameters.min_area
min_margin = project.mser_parameters.min_margin
max_area_relative = max_area / float(img.shape[0]*img.shape[1])
region_min_intensity = project.mser_parameters.region_min_intensity
intensity_percentile = -1
try:
if project.mser_parameters.use_intensity_percentile_threshold:
intensity_percentile = project.mser_parameters.intensity_percentile
except:
pass
use_margin_filter = False
if not hasattr(project.mser_parameters, 'use_min_margin_filter') or project.mser_parameters.use_min_margin_filter:
use_margin_filter = True
use_children_filter = False
if project.mser_parameters.use_children_filter:
use_children_filter = True
mser = Mser(max_area=max_area_relative, min_margin=min_margin, min_area=min_area)
return mser.process_image(img,
frame,
intensity_threshold=project.mser_parameters.intensity_threshold,
prefiltered=prefiltered,
region_min_intensity=region_min_intensity,
intensity_percentile=intensity_percentile,
use_margin_filter=use_margin_filter,
use_children_filter=use_children_filter
)
def get_filtered_regions(img, project, frame=-1):
"""
Extracts maximally stable extremal regions from an image and return filtered results.
:param img: input image
:param project:
:param frame:
:return: list of Region() objects
"""
# if project.mser_parameters.use_children_filter:
# m = get_msers_(img, project, frame, prefiltered=True)
# groups = get_region_groups(m)
# ids = range(len(m))
# if not hasattr(project.mser_parameters, 'use_min_margin_filter') or project.mser_parameters.use_min_margin_filter:
# ids = margin_filter(m, groups)
# # min_area = project.stats.area_median * 0.2
# # ids = area_filter(m, ids, min_area)
# if project.mser_parameters.use_children_filter:
# ids = children_filter(m, ids)
# if project.stats:
# num_before = len(ids)
# ids = antlikeness_filter(project.stats.antlikeness_svm, project.solver_parameters.antlikeness_threshold, m, ids)
# if len(ids) == 0 and num_before > 0:
# warnings.warn("There is something fishy with antlikeness filter. After filtering, there is 0 regions")
# return [m[id] for id in ids]
# else:
regions = get_regions_in_img(img, project, frame, prefiltered=True)
ratio_th = project.mser_parameters.area_roi_ratio_threshold
if project.mser_parameters.area_roi_ratio_threshold > ratio_th:
regions = [r for r in regions if r.area() / float(r.roi().width() * r.roi().height()) > ratio_th]
return regions
| [
"pickle.dump",
"cv2.cvtColor",
"core.region.cyMser.PyMser",
"numpy.percentile",
"pickle.load",
"core.region.region.Region",
"utils.video_manager.get_auto_video_manager"
] | [((484, 499), 'core.region.cyMser.PyMser', 'cyMser.PyMser', ([], {}), '()\n', (497, 499), False, 'from core.region import cyMser\n'), ((3968, 3999), 'utils.video_manager.get_auto_video_manager', 'get_auto_video_manager', (['project'], {}), '(project)\n', (3990, 3999), False, 'from utils.video_manager import get_auto_video_manager\n'), ((950, 987), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (962, 987), False, 'import cv2\n'), ((2433, 2463), 'core.region.region.Region', 'Region', (['mser_regions[i]', 'frame'], {}), '(mser_regions[i], frame)\n', (2439, 2463), False, 'from core.region.region import Region\n'), ((2515, 2532), 'core.region.region.Region', 'Region', (['dr', 'frame'], {}), '(dr, frame)\n', (2521, 2532), False, 'from core.region.region import Region\n'), ((3511, 3525), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3522, 3525), False, 'import pickle\n'), ((3594, 3625), 'utils.video_manager.get_auto_video_manager', 'get_auto_video_manager', (['project'], {}), '(project)\n', (3616, 3625), False, 'from utils.video_manager import get_auto_video_manager\n'), ((2252, 2291), 'numpy.percentile', 'np.percentile', (['dd', 'intensity_percentile'], {}), '(dd, intensity_percentile)\n', (2265, 2291), True, 'import numpy as np\n'), ((3846, 3867), 'pickle.dump', 'pickle.dump', (['msers', 'f'], {}), '(msers, f)\n', (3857, 3867), False, 'import pickle\n')] |
#!/usr/bin/env python3
from matplotlib import pylab as plt
from mpl_toolkits.mplot3d import axes3d
import sys
import numpy as np
from numpy.fft import rfftn, fftshift
import flash as FLASH
from shellavg import shell_avg_3d
import ulz
sys.argv.reverse()
progpath = sys.argv.pop()
flsfp = sys.argv.pop()
flsfp2 = sys.argv.pop()
sinkfp = sys.argv.pop()
flash = FLASH.File(flsfp)
time = flash.realscalars['time']
#step = flash.integerscalars['nstep']
c_s = flash.realruntime['c_ambient']
rho0 = flash.realruntime['rho_ambient']
# c_s = flash.realruntime['sim_cambient']
# rho0 = flash.realruntime['sim_rhoambient']
Vgrid = np.prod(flash.gridsize)
Vcell = np.prod(flash.cellsize)
Vdomain = np.prod(flash.domainsize)
density = flash.data('dens')
velocity = tuple(flash.data('vel'+dim) for dim in 'x y z'.split())
#ekin = 0.5*Vcell*density * ulz.norm(*velocity)
#fekin = fftshift(np.abs(rfftn(ekin)))
#input = density**(1/3) * np.sqrt(ulz.norm(*velocity))
input = np.sqrt(ulz.norm(*velocity))
#vels = ulz.norm(*velocity)
fvels = fftshift(np.abs(rfftn(input)))
nsamples = 200
radii,totals = shell_avg_3d(fvels**2, nsamples)
rs = radii[1:]
ps = rs**2 * totals[1:]
plt.grid()
plt.xlim(1,1000)
xs = rs
ys = ps/rs**(-5/3)
plt.loglog(xs,ys, '-', label='b3')
#########################
flash = FLASH.File(flsfp2)
time = flash.realscalars['time']
#step = flash.integerscalars['nstep']
c_s = flash.realruntime['c_ambient']
rho0 = flash.realruntime['rho_ambient']
# c_s = flash.realruntime['sim_cambient']
# rho0 = flash.realruntime['sim_rhoambient']
Vgrid = np.prod(flash.gridsize)
Vcell = np.prod(flash.cellsize)
Vdomain = np.prod(flash.domainsize)
density = flash.data('dens')
velocity = tuple(flash.data('vel'+dim) for dim in 'x y z'.split())
#ekin = 0.5*Vcell*density * ulz.norm(*velocity)
#fekin = fftshift(np.abs(rfftn(ekin)))
#input = density**(1/3) * np.sqrt(ulz.norm(*velocity))
input = np.sqrt(ulz.norm(*velocity))
#vels = ulz.norm(*velocity)
fvels = fftshift(np.abs(rfftn(input)))
nsamples = 200
radii,totals = shell_avg_3d(fvels**2, nsamples)
rs = radii[1:]
ps = rs**2 * totals[1:]
#########################
xs = rs
ys = ps/rs**(-2)
plt.loglog(xs,ys, '-', label='b5')
plt.legend(loc='upper right')
plt.savefig(sinkfp,bbox_inches='tight')
| [
"matplotlib.pylab.savefig",
"matplotlib.pylab.legend",
"sys.argv.pop",
"ulz.norm",
"flash.File",
"sys.argv.reverse",
"numpy.fft.rfftn",
"matplotlib.pylab.xlim",
"matplotlib.pylab.grid",
"matplotlib.pylab.loglog",
"numpy.prod",
"shellavg.shell_avg_3d"
] | [((239, 257), 'sys.argv.reverse', 'sys.argv.reverse', ([], {}), '()\n', (255, 257), False, 'import sys\n'), ((269, 283), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (281, 283), False, 'import sys\n'), ((292, 306), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (304, 306), False, 'import sys\n'), ((316, 330), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (328, 330), False, 'import sys\n'), ((340, 354), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (352, 354), False, 'import sys\n'), ((364, 381), 'flash.File', 'FLASH.File', (['flsfp'], {}), '(flsfp)\n', (374, 381), True, 'import flash as FLASH\n'), ((633, 656), 'numpy.prod', 'np.prod', (['flash.gridsize'], {}), '(flash.gridsize)\n', (640, 656), True, 'import numpy as np\n'), ((667, 690), 'numpy.prod', 'np.prod', (['flash.cellsize'], {}), '(flash.cellsize)\n', (674, 690), True, 'import numpy as np\n'), ((702, 727), 'numpy.prod', 'np.prod', (['flash.domainsize'], {}), '(flash.domainsize)\n', (709, 727), True, 'import numpy as np\n'), ((1112, 1146), 'shellavg.shell_avg_3d', 'shell_avg_3d', (['(fvels ** 2)', 'nsamples'], {}), '(fvels ** 2, nsamples)\n', (1124, 1146), False, 'from shellavg import shell_avg_3d\n'), ((1187, 1197), 'matplotlib.pylab.grid', 'plt.grid', ([], {}), '()\n', (1195, 1197), True, 'from matplotlib import pylab as plt\n'), ((1198, 1215), 'matplotlib.pylab.xlim', 'plt.xlim', (['(1)', '(1000)'], {}), '(1, 1000)\n', (1206, 1215), True, 'from matplotlib import pylab as plt\n'), ((1243, 1278), 'matplotlib.pylab.loglog', 'plt.loglog', (['xs', 'ys', '"""-"""'], {'label': '"""b3"""'}), "(xs, ys, '-', label='b3')\n", (1253, 1278), True, 'from matplotlib import pylab as plt\n'), ((1315, 1333), 'flash.File', 'FLASH.File', (['flsfp2'], {}), '(flsfp2)\n', (1325, 1333), True, 'import flash as FLASH\n'), ((1585, 1608), 'numpy.prod', 'np.prod', (['flash.gridsize'], {}), '(flash.gridsize)\n', (1592, 1608), True, 'import numpy as np\n'), ((1619, 1642), 'numpy.prod', 'np.prod', (['flash.cellsize'], {}), '(flash.cellsize)\n', (1626, 1642), True, 'import numpy as np\n'), ((1654, 1679), 'numpy.prod', 'np.prod', (['flash.domainsize'], {}), '(flash.domainsize)\n', (1661, 1679), True, 'import numpy as np\n'), ((2064, 2098), 'shellavg.shell_avg_3d', 'shell_avg_3d', (['(fvels ** 2)', 'nsamples'], {}), '(fvels ** 2, nsamples)\n', (2076, 2098), False, 'from shellavg import shell_avg_3d\n'), ((2190, 2225), 'matplotlib.pylab.loglog', 'plt.loglog', (['xs', 'ys', '"""-"""'], {'label': '"""b5"""'}), "(xs, ys, '-', label='b5')\n", (2200, 2225), True, 'from matplotlib import pylab as plt\n'), ((2226, 2255), 'matplotlib.pylab.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2236, 2255), True, 'from matplotlib import pylab as plt\n'), ((2256, 2296), 'matplotlib.pylab.savefig', 'plt.savefig', (['sinkfp'], {'bbox_inches': '"""tight"""'}), "(sinkfp, bbox_inches='tight')\n", (2267, 2296), True, 'from matplotlib import pylab as plt\n'), ((992, 1011), 'ulz.norm', 'ulz.norm', (['*velocity'], {}), '(*velocity)\n', (1000, 1011), False, 'import ulz\n'), ((1944, 1963), 'ulz.norm', 'ulz.norm', (['*velocity'], {}), '(*velocity)\n', (1952, 1963), False, 'import ulz\n'), ((1066, 1078), 'numpy.fft.rfftn', 'rfftn', (['input'], {}), '(input)\n', (1071, 1078), False, 'from numpy.fft import rfftn, fftshift\n'), ((2018, 2030), 'numpy.fft.rfftn', 'rfftn', (['input'], {}), '(input)\n', (2023, 2030), False, 'from numpy.fft import rfftn, fftshift\n')] |
import cv2
import numpy as np
# mouse callback function
def pick_color(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
pixel = image[y,x]
#you might want to adjust the ranges(+-10, etc):
upper = np.array([pixel[0] + 20, pixel[1] + 50, pixel[2] + 50])
lower = np.array([pixel[0] - 20, pixel[1] - 50, pixel[2] - 50])
print(pixel, lower, upper)
cap = cv2.VideoCapture(0)
while(True):
# 從攝影機擷取一張影像
ret, image = cap.read()
cv2.setMouseCallback('image', pick_color)
# 顯示圖片
cv2.imshow('image', image)
cv2.waitKey(1)
if cv2.getWindowProperty('image', cv2.WND_PROP_AUTOSIZE) == -1:
break
# 釋放攝影機
cap.release()
# 關閉所有 OpenCV 視窗
cv2.destroyAllWindows()
| [
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.setMouseCallback",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.getWindowProperty"
] | [((418, 437), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (434, 437), False, 'import cv2\n'), ((715, 738), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (736, 738), False, 'import cv2\n'), ((494, 535), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'pick_color'], {}), "('image', pick_color)\n", (514, 535), False, 'import cv2\n'), ((547, 573), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (557, 573), False, 'import cv2\n'), ((576, 590), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (587, 590), False, 'import cv2\n'), ((237, 292), 'numpy.array', 'np.array', (['[pixel[0] + 20, pixel[1] + 50, pixel[2] + 50]'], {}), '([pixel[0] + 20, pixel[1] + 50, pixel[2] + 50])\n', (245, 292), True, 'import numpy as np\n'), ((310, 365), 'numpy.array', 'np.array', (['[pixel[0] - 20, pixel[1] - 50, pixel[2] - 50]'], {}), '([pixel[0] - 20, pixel[1] - 50, pixel[2] - 50])\n', (318, 365), True, 'import numpy as np\n'), ((596, 649), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['"""image"""', 'cv2.WND_PROP_AUTOSIZE'], {}), "('image', cv2.WND_PROP_AUTOSIZE)\n", (617, 649), False, 'import cv2\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
def calc_normal_matrix(data, k, mean_vec, cov_mat):
'''
Given data, integer k denoting number of mixtures, and
the corresponding means and covariance, returns the matrix of
multiv normal probabilities
Inputs:
- data: (np array - N x d) of input data
- k: (int) number of Gaussian mixtures
- mean_vec: (np array - k x d) of Gaussian centers
- cov_mat: (np array - k x d x d) of Gaussian var-covar matrices
Returns:
- pdf_matrix: (np array - N x k) of multiv normal pdf for data
'''
N = data.shape[0]
pdf_matrix = np.empty( (N, k) )
for cur_cluster in range(k):
cluster_pdf = multivariate_normal.pdf(data, mean=mean_vec[cur_cluster], cov=cov_mat[cur_cluster])
pdf_matrix[:, cur_cluster] = cluster_pdf
return pdf_matrix
def calc_cluster_prob(data, k, mean_vec, cov_mat, pi_vec):
'''
Given data, k mixtures with mean and covariance, and mixtures with
priors of pi_vec, calculates the probability that each observation
is in each respective k mixture
Inputs:
- data: (np array - N x d) of input data
- k: (int) number of Gaussian mixtures
- mean_vec: (np array - k x d) of Gaussian centers
- cov_mat: (np array - k x d x d) of Gaussian var-covar matrices
- pi_vec: (np array - k x 1) prior probability of each mixture
Returns:
- prob_mat: (np array - N x k) prob obs i is in cluster j
'''
N = data.shape[0]
prob_mat = np.empty( (N, k) )
normal_pdfs = calc_normal_matrix(data, k, mean_vec, cov_mat)
prob_mat = normal_pdfs * pi_vec.T
prob_mat = prob_mat / prob_mat.sum(axis=1).reshape( (N,1) )
return prob_mat
def calc_expected_likelihood(data, k, mean_vec, cov_mat, pi_vec):
'''
Calculates the expected log likelihood given parameters. Needed
to test convergence of Gaussian Mixture algorithm
Inputs:
- data: (np array - N x d) of input data
- k: (int) number of Gaussian mixtures
- mean_vec: (np array - k x d) of Gaussian centers
- cov_mat: (np array - k x d x d) of Gaussian var-covar matrices
- pi_vec: (np array - k x 1) prior probability of each mixture
Returns:
- exp_ll: (float) expected log likelihood
'''
log_like_ij = np.log(pi_vec).T + np.log(calc_normal_matrix(data, k, mean_vec, cov_mat))
prob_in_cluster = calc_cluster_prob(data, k, mean_vec, cov_mat, pi_vec)
exp_ll_matrix = log_like_ij * prob_in_cluster
exp_ll = np.sum(exp_ll_matrix)
return exp_ll
def calc_m_step(data, k, prob_mat):
'''
Given data, integer k, and updated probability assignments,
update the pi_vec, mean_vec, and cov_mat
Inputs:
- data: (np array - N x d) of input data
- k: (int) number of Gaussian mixtures
- prob_mat: (np array - N x k) prob obs i is in cluster j
Returns: a tuple containing (in order)...
- mean_vec: (np array - k x d) of Gaussian centers
- cov_mat: (np array - k x d x d) of Gaussian var-covar matrices
- pi_vec: (np array - k x 1) prior probability of each mixture
'''
N = data.shape[0]
d = data.shape[1]
pi_vec = prob_mat.mean(axis=0)
weights = prob_mat / prob_mat.sum(axis=0).reshape( (1,3) )
assert np.abs(weights.sum() - k) < 0.01
mean_vec = np.empty( (k,d) )
for cluster_num in range(k):
product = data * (weights[:, cluster_num].reshape( (N,1) ))
center = product.sum(axis = 0)
mean_vec[cluster_num] = center
cov_mat = np.empty( (k, d, d) )
for cluster_num in range(k):
covar = np.zeros( (d,d) )
centered_mean = data - mean_vec[cluster_num]
for i in range(N):
obs = centered_mean[i].reshape( (1,d) )
covar += (obs.T @ obs) * weights[i,cluster_num]
cov_mat[cluster_num] = covar
return mean_vec, cov_mat, pi_vec
def initialize_k_mixture(data, k):
'''
Given data and k desired mixtures, returns initial
values for the target parameters
Inputs:
- data: (np array - N x d) of input data
- k: (int) number of Gaussian mixtures
Returns: a tuple containing (in order)...
- mean_vec: (np array - k x d) of Gaussian centers
- cov_mat: (np array - k x d x d) of Gaussian var-covar matrices
- pi_vec: (np array - k x 1) prior probability of each mixture
'''
N = data.shape[0]
rand_indeces = np.random.randint(0, N, size=k)
mean_vec = data[rand_indeces,:]
pi_vec = np.full( (k,1), 1/k )
centered = data - data.mean(axis=0)
cov_mat = np.array( [(centered.T @ centered) / N]*k )
return mean_vec, cov_mat, pi_vec
def k_gaussian_mixture(data, k, conv_tolerance, mean_vec=None, cov_mat=None, pi_vec=None, conv_ts=None):
'''
Given data and desired k gaussian mixtures, finds MLE for parameters
Inputs:
- data: (np array - N x d) of input data
- k: (int) number of Gaussian mixtures
- conv_tolerance: (float) algorithm converges once exp LL changes by less than this amount
- mean_vec: (np array - k x d) of Gaussian centers
- cov_mat: (np array - k x d x d) of Gaussian var-covar matrices
- pi_vec: (np array - k x 1) prior probability of each mixture
Output:
- TBD
'''
if mean_vec is None:
mean_vec, cov_mat, pi_vec = initialize_k_mixture(data, k)
conv_ts = []
cur_exp_ll = calc_expected_likelihood(data, k, mean_vec, cov_mat, pi_vec)
conv_ts.append(cur_exp_ll)
prob_mat = calc_cluster_prob(data, k, mean_vec, cov_mat, pi_vec)
new_mean_vec, new_cov_mat, new_pi_vec = calc_m_step(data, k, prob_mat)
new_exp_ll = calc_expected_likelihood(data, k, new_mean_vec, new_cov_mat, new_pi_vec)
if np.abs(cur_exp_ll - new_exp_ll) < conv_tolerance:
return (new_mean_vec, new_cov_mat, new_pi_vec, conv_ts)
else:
return k_gaussian_mixture(data, k, conv_tolerance, new_mean_vec, new_cov_mat, new_pi_vec, conv_ts)
def plot_dist_graph(data, k, iterations, conv, file_name):
'''
Creates plot of exp log likelihood graph through iteration
'''
for _ in range(iterations):
dist_series = k_gaussian_mixture(data, k, conv)[3]
plt.plot(dist_series, color='black', alpha=.7)
plt.xlabel("Iteration")
plt.ylabel("Exp log likelihood")
title = "Toy data k-means Gaussian mixture"
plt.title(title)
plt.savefig(file_name+".png", format='png')
# 3.g - Create graph of 2D assignment and compare convergence to k-means
toy_data = np.loadtxt('toydata.txt')
N = toy_data.shape[0]
k = 3
conv = 0.1
mean, cov, pi, ts = k_gaussian_mixture(toy_data, k, conv)
# 2D assignment graph, assign to largest p_(i,j)
prob_matrix = calc_cluster_prob(toy_data, k, mean, cov, pi)
assignments = np.argmin(prob_matrix, axis = 1).reshape( (N,1) )
plt.clf()
colors = ['red', 'orange', 'green']
for cluster in range(k):
b = assignments == cluster
b.reshape(N)
plt.scatter(toy_data[b.reshape(N),0], toy_data[b.reshape(N),1], color=colors[cluster], alpha=.3)
plt.scatter(mean[:,0], mean[:,1], color='black')
plt.title('Toy data gaussian mixture k=3')
plt.savefig('2D_gaussian_mixture.png', format='png')
# Time series of convergence thru 20 runs
plt.clf()
plot_dist_graph(toy_data, k, 20, conv, "distortion_gaussian")
| [
"matplotlib.pyplot.title",
"numpy.full",
"numpy.sum",
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"numpy.log",
"matplotlib.pyplot.scatter",
"numpy.empty",
"numpy.zeros",
"numpy.argmin",
"numpy.random.randint",
"numpy.array",
"numpy.loadtxt",
"scipy.stats.multivariate_... | [((6142, 6167), 'numpy.loadtxt', 'np.loadtxt', (['"""toydata.txt"""'], {}), "('toydata.txt')\n", (6152, 6167), True, 'import numpy as np\n'), ((6441, 6450), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6448, 6450), True, 'import matplotlib.pyplot as plt\n'), ((6654, 6704), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mean[:, 0]', 'mean[:, 1]'], {'color': '"""black"""'}), "(mean[:, 0], mean[:, 1], color='black')\n", (6665, 6704), True, 'import matplotlib.pyplot as plt\n'), ((6703, 6745), 'matplotlib.pyplot.title', 'plt.title', (['"""Toy data gaussian mixture k=3"""'], {}), "('Toy data gaussian mixture k=3')\n", (6712, 6745), True, 'import matplotlib.pyplot as plt\n'), ((6746, 6798), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""2D_gaussian_mixture.png"""'], {'format': '"""png"""'}), "('2D_gaussian_mixture.png', format='png')\n", (6757, 6798), True, 'import matplotlib.pyplot as plt\n'), ((6843, 6852), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6850, 6852), True, 'import matplotlib.pyplot as plt\n'), ((633, 649), 'numpy.empty', 'np.empty', (['(N, k)'], {}), '((N, k))\n', (641, 649), True, 'import numpy as np\n'), ((1469, 1485), 'numpy.empty', 'np.empty', (['(N, k)'], {}), '((N, k))\n', (1477, 1485), True, 'import numpy as np\n'), ((2418, 2439), 'numpy.sum', 'np.sum', (['exp_ll_matrix'], {}), '(exp_ll_matrix)\n', (2424, 2439), True, 'import numpy as np\n'), ((3176, 3192), 'numpy.empty', 'np.empty', (['(k, d)'], {}), '((k, d))\n', (3184, 3192), True, 'import numpy as np\n'), ((3364, 3383), 'numpy.empty', 'np.empty', (['(k, d, d)'], {}), '((k, d, d))\n', (3372, 3383), True, 'import numpy as np\n'), ((4167, 4198), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N'], {'size': 'k'}), '(0, N, size=k)\n', (4184, 4198), True, 'import numpy as np\n'), ((4243, 4265), 'numpy.full', 'np.full', (['(k, 1)', '(1 / k)'], {}), '((k, 1), 1 / k)\n', (4250, 4265), True, 'import numpy as np\n'), ((4314, 4355), 'numpy.array', 'np.array', (['([centered.T @ centered / N] * k)'], {}), '([centered.T @ centered / N] * k)\n', (4322, 4355), True, 'import numpy as np\n'), ((5890, 5913), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (5900, 5913), True, 'import matplotlib.pyplot as plt\n'), ((5915, 5947), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Exp log likelihood"""'], {}), "('Exp log likelihood')\n", (5925, 5947), True, 'import matplotlib.pyplot as plt\n'), ((5994, 6010), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6003, 6010), True, 'import matplotlib.pyplot as plt\n'), ((6012, 6057), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(file_name + '.png')"], {'format': '"""png"""'}), "(file_name + '.png', format='png')\n", (6023, 6057), True, 'import matplotlib.pyplot as plt\n'), ((699, 787), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['data'], {'mean': 'mean_vec[cur_cluster]', 'cov': 'cov_mat[cur_cluster]'}), '(data, mean=mean_vec[cur_cluster], cov=cov_mat[\n cur_cluster])\n', (722, 787), False, 'from scipy.stats import multivariate_normal\n'), ((3427, 3443), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (3435, 3443), True, 'import numpy as np\n'), ((5408, 5439), 'numpy.abs', 'np.abs', (['(cur_exp_ll - new_exp_ll)'], {}), '(cur_exp_ll - new_exp_ll)\n', (5414, 5439), True, 'import numpy as np\n'), ((5841, 5888), 'matplotlib.pyplot.plot', 'plt.plot', (['dist_series'], {'color': '"""black"""', 'alpha': '(0.7)'}), "(dist_series, color='black', alpha=0.7)\n", (5849, 5888), True, 'import matplotlib.pyplot as plt\n'), ((6390, 6420), 'numpy.argmin', 'np.argmin', (['prob_matrix'], {'axis': '(1)'}), '(prob_matrix, axis=1)\n', (6399, 6420), True, 'import numpy as np\n'), ((2212, 2226), 'numpy.log', 'np.log', (['pi_vec'], {}), '(pi_vec)\n', (2218, 2226), True, 'import numpy as np\n')] |
from .custom_json import *
import json
import pytest
import numpy as np
from numpy import testing as npt
from . import test_utils
class TestJSONSerializerDeserializer(object):
def test_add_codec(self):
# without bytes codec, can't serialize numpy
serialization = JSONSerializerDeserializer([numpy_codec])
obj = np.array([[1.0, 0.0], [2.0, 3.2]])
with pytest.raises(TypeError):
serialization.serializer(obj)
# add the codec and it will work
serialization.add_codec(bytes_codec)
serialized = serialization.serializer(obj)
assert len(serialization.codecs) == 2
reconstructed = serialization.deserializer(serialized)
npt.assert_equal(obj, reconstructed)
class CustomJSONCodingTest(object):
def test_default(self):
for (obj, dct) in zip(self.objs, self.dcts):
assert self.codec.default(obj) == dct
def test_object_hook(self):
for (obj, dct) in zip(self.objs, self.dcts):
assert self.codec.object_hook(dct) == obj
def _test_round_trip(self, encoder, decoder):
for (obj, dct) in zip(self.objs, self.dcts):
json_str = json.dumps(obj, cls=encoder)
reconstructed = json.loads(json_str, cls=decoder)
assert reconstructed == obj
json_str_2 = json.dumps(obj, cls=encoder)
assert json_str == json_str_2
def test_round_trip(self):
encoder, decoder = custom_json_factory([self.codec])
self._test_round_trip(encoder, decoder)
def test_not_mine(self):
# test that the default behavior is obeyed
obj = {'test': 5}
json_str = '{"test": 5}'
encoder, decoder = custom_json_factory([self.codec])
assert json.dumps(obj, cls=encoder) == json_str
assert json.loads(json_str, cls=decoder) == obj
class TestNumpyCoding(CustomJSONCodingTest):
def setup(self):
self.codec = numpy_codec
self.objs = [np.array([[1.0, 0.0], [2.0, 3.2]]),
np.array([1, 0])]
shapes = [(2, 2), (2,)]
dtypes = [str(arr.dtype) for arr in self.objs] # may change by system?
string_reps = [arr.tobytes() for arr in self.objs]
self.dcts = [
{
'__class__': 'ndarray',
'__module__': 'numpy',
'shape': shape,
'dtype': dtype,
'string': string_rep
}
for shape, dtype, string_rep in zip(shapes, dtypes, string_reps)
]
def test_object_hook(self):
# to get custom equality testing for numpy
for (obj, dct) in zip(self.objs, self.dcts):
reconstructed = self.codec.object_hook(dct)
npt.assert_array_equal(reconstructed, obj)
def test_round_trip(self):
encoder, decoder = custom_json_factory([self.codec, bytes_codec])
for (obj, dct) in zip(self.objs, self.dcts):
json_str = json.dumps(obj, cls=encoder)
reconstructed = json.loads(json_str, cls=decoder)
npt.assert_array_equal(reconstructed, obj)
json_str_2 = json.dumps(obj, cls=encoder)
assert json_str == json_str_2
class TestUUIDCoding(object):
def setup(self):
self.codec = uuid_object_codec
all_objs = test_utils.all_objects
self.objs = [all_objs['int'], all_objs['str']]
updates = [{'normal_attr': 5, 'name': 'int'},
{'normal_attr': 'foo', 'name': 'str'}]
module = str(test_utils)
self.dcts = [
{
'__class__': 'MockUUIDObject',
'__module__': test_utils.__name__,
'normal_attr': None,
'obj_attr': None,
'list_attr': None,
'dict_attr': None,
'lazy_attr': None,
}
for _ in self.objs
]
for dct, update in zip(self.dcts, updates):
dct.update(update)
test_default = CustomJSONCodingTest.test_default
test_not_mine = CustomJSONCodingTest.test_not_mine
def test_object_hook(self):
for (obj, dct) in zip(self.objs, self.dcts):
assert self.codec.object_hook(dct) == dct
| [
"json.loads",
"numpy.testing.assert_array_equal",
"json.dumps",
"pytest.raises",
"numpy.array",
"numpy.testing.assert_equal"
] | [((342, 376), 'numpy.array', 'np.array', (['[[1.0, 0.0], [2.0, 3.2]]'], {}), '([[1.0, 0.0], [2.0, 3.2]])\n', (350, 376), True, 'import numpy as np\n'), ((712, 748), 'numpy.testing.assert_equal', 'npt.assert_equal', (['obj', 'reconstructed'], {}), '(obj, reconstructed)\n', (728, 748), True, 'from numpy import testing as npt\n'), ((390, 414), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (403, 414), False, 'import pytest\n'), ((1185, 1213), 'json.dumps', 'json.dumps', (['obj'], {'cls': 'encoder'}), '(obj, cls=encoder)\n', (1195, 1213), False, 'import json\n'), ((1242, 1275), 'json.loads', 'json.loads', (['json_str'], {'cls': 'decoder'}), '(json_str, cls=decoder)\n', (1252, 1275), False, 'import json\n'), ((1341, 1369), 'json.dumps', 'json.dumps', (['obj'], {'cls': 'encoder'}), '(obj, cls=encoder)\n', (1351, 1369), False, 'import json\n'), ((1769, 1797), 'json.dumps', 'json.dumps', (['obj'], {'cls': 'encoder'}), '(obj, cls=encoder)\n', (1779, 1797), False, 'import json\n'), ((1825, 1858), 'json.loads', 'json.loads', (['json_str'], {'cls': 'decoder'}), '(json_str, cls=decoder)\n', (1835, 1858), False, 'import json\n'), ((1989, 2023), 'numpy.array', 'np.array', (['[[1.0, 0.0], [2.0, 3.2]]'], {}), '([[1.0, 0.0], [2.0, 3.2]])\n', (1997, 2023), True, 'import numpy as np\n'), ((2046, 2062), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2054, 2062), True, 'import numpy as np\n'), ((2760, 2802), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['reconstructed', 'obj'], {}), '(reconstructed, obj)\n', (2782, 2802), True, 'from numpy import testing as npt\n'), ((2985, 3013), 'json.dumps', 'json.dumps', (['obj'], {'cls': 'encoder'}), '(obj, cls=encoder)\n', (2995, 3013), False, 'import json\n'), ((3042, 3075), 'json.loads', 'json.loads', (['json_str'], {'cls': 'decoder'}), '(json_str, cls=decoder)\n', (3052, 3075), False, 'import json\n'), ((3088, 3130), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['reconstructed', 'obj'], {}), '(reconstructed, obj)\n', (3110, 3130), True, 'from numpy import testing as npt\n'), ((3156, 3184), 'json.dumps', 'json.dumps', (['obj'], {'cls': 'encoder'}), '(obj, cls=encoder)\n', (3166, 3184), False, 'import json\n')] |
import pickle
import random
import math
import numpy as np
from nltk.stem import WordNetLemmatizer
import string
random.seed(a=101)
wordnet_lemmatizer = WordNetLemmatizer()
window_size = 3
with open('Processed_Data/vocab_and_embd.pkl', 'rb') as fp:
data = pickle.load(fp)
vocab2idx = data[0]
def vectorize(tweets):
vec_tweets = []
for tweet in tweets:
vec_tweet = [vocab2idx.get(word, vocab2idx['<unk>']) for word in tweet]
vec_tweets.append(vec_tweet)
return vec_tweets
def lemmatize(word):
word = word.strip("").strip(" ").strip("\t").strip("\n").strip("\b").strip("\n\n")
if word in disaster_vocab:
return word
else:
word = word.split("'s")[0]
if word == "sos" or word == "stuck":
return word
elif word in string.punctuation:
return "<NOT_IN_LIST>"
else:
l1 = wordnet_lemmatizer.lemmatize(word, 'v')
l2 = wordnet_lemmatizer.lemmatize(l1, 'a')
l3 = wordnet_lemmatizer.lemmatize(l2, 'r')
l4 = wordnet_lemmatizer.lemmatize(l3, 'n')
return l4
max_char_len = 20
with open('Processed_Data/word_to_ipa_vec.pkl', 'rb') as fp:
data = pickle.load(fp)
word2ipa_vec = data[0]
word2phono_vec = data[1]
phono_dim = data[2]
tweet_pos_vocab = ['N', 'O', 'S', '^', 'Z', 'V', 'L', 'M', 'A', 'R', '!',
'D', 'P', '&', 'T', 'X', 'Y', '~', 'U', 'E', '$', ',', 'G']
pos2idx = {}
for pos in tweet_pos_vocab:
pos2idx[pos] = len(pos2idx)
def ipafy(word):
pad = np.zeros((max_char_len), np.float32)
return word2ipa_vec.get(word, pad)
def phonofy(word):
pad = np.zeros((max_char_len, phono_dim), np.float32)
return word2phono_vec.get(word, pad)
def vectorize_pos(pos_tweets):
vec_pos_tweets = []
for pos_tweet in pos_tweets:
vec_pos_tweet = [pos2idx.get(pos, pos2idx[","]) for pos in pos_tweet]
vec_pos_tweets.append(vec_pos_tweet)
return vec_pos_tweets
with open('Processed_Data/Intermediate_Data.pkl', 'rb') as fp:
data = pickle.load(fp)
label2idx2label1idx = {0: 0, 1: 1, 2: 1, 3: 1, 4: 1}
train_tweets = data[0]
train_labels_2 = data[1]
train_labels_1 = []
for tweet in train_labels_2:
label_1 = [label2idx2label1idx[label] for label in tweet]
train_labels_1.append(label_1)
train_pos = data[2]
val_tweets = data[3]
val_labels_2 = data[4]
val_labels_1 = []
for tweet in val_labels_2:
val_labels_1.append([label2idx2label1idx[label] for label in tweet])
val_pos = data[5]
test_tweets = data[6]
test_labels_2 = data[7]
test_labels_1 = {}
for disaster_type in test_labels_2:
test_labels_1[disaster_type] = []
for tweet in test_labels_2[disaster_type]:
test_labels_1[disaster_type].append([label2idx2label1idx[label] for label in tweet])
test_pos = data[8]
x = [i for i in range(0, len(train_tweets))]
random.shuffle(x)
train_tweets = [train_tweets[x[i]]
for i in range(0, len(train_tweets))]
train_labels_1 = [train_labels_1[x[i]]
for i in range(0, len(train_tweets))]
train_labels_2 = [train_labels_2[x[i]]
for i in range(0, len(train_tweets))]
train_pos = [train_pos[x[i]]
for i in range(0, len(train_tweets))]
print("\nSome sample training data\n")
for i in range(0, 5):
print("Sentence: {}".format(train_tweets[i]))
print("Label_1: {}".format(train_labels_1[i]))
print("Label_2: {}".format(train_labels_2[i]))
print("POS tags: {}".format(train_pos[i]))
print("\n\n")
print("\nSome sample testing data:\n")
# for disaster in test_tweets:
# print(len(test_tweets[disaster]))
for disaster_type in test_tweets:
print("FROM {}\n".format(disaster_type))
for i in range(0, 5):
print("Sentence: {}".format(test_tweets[disaster_type][i]))
print("Label_1: {}".format(test_labels_1[disaster_type][i]))
print("Label_2: {}".format(test_labels_2[disaster_type][i]))
print("POS tags: {}".format(test_pos[disaster_type][i]))
print("\n\n")
train_ipa = [list(map(ipafy, tweet)) for tweet in train_tweets]
test_ipa = {}
for disaster_type in test_tweets:
test_ipa[disaster_type] = [list(map(ipafy, tweet)) for tweet in test_tweets[disaster_type]]
val_ipa = [list(map(ipafy, tweet)) for tweet in val_tweets]
train_phono = [list(map(phonofy, tweet)) for tweet in train_tweets]
val_phono = [list(map(phonofy, tweet)) for tweet in val_tweets]
test_phono = {}
for disaster_type in test_tweets:
test_phono[disaster_type] = [list(map(phonofy, tweet)) for tweet in test_tweets[disaster_type]]
train_pos = vectorize_pos(train_pos)
val_pos = vectorize_pos(val_pos)
test_pos_vec = {}
for disaster_type in test_tweets:
test_pos_vec[disaster_type] = vectorize_pos(test_pos[disaster_type])
test_pos = test_pos_vec
train_tweets_vec = vectorize(train_tweets)
val_tweets_vec = vectorize(val_tweets)
test_tweets_vec = {}
for disaster_type in test_tweets:
test_tweets_vec[disaster_type] = vectorize(test_tweets[disaster_type])
print("AFTER VECTORIZATION:\n\n")
print("\nSome sample training data\n")
for i in range(0, 5):
print("Sentence: {}".format(train_tweets[i]))
print("Sentence: {}".format(train_tweets_vec[i]))
print("Label_1: {}".format(train_labels_1[i]))
print("Label_2: {}".format(train_labels_2[i]))
print("POS: {}".format(train_pos[i]))
print("IPA: {}".format(train_ipa[i]))
print("Phono: {}".format(train_phono[i][0]))
print("\n\n")
print("\nSome sample testing data:\n")
for disaster_type in test_tweets:
print("FROM {}\n".format(disaster_type))
for i in range(0, 5):
print("Sentence: {}".format(test_tweets[disaster_type][i]))
print("Sentence: {}".format(test_tweets_vec[disaster_type][i]))
print("Label_1: {}".format(test_labels_1[disaster_type][i]))
print("Label_2: {}".format(test_labels_2[disaster_type][i]))
print("POS tags: {}".format(test_pos[disaster_type][i]))
print("IPA: {}".format(test_ipa[disaster_type][i]))
print("Phono: {}".format(test_phono[disaster_type][i][0]))
print("\n\n")
def set_window(tweets):
windowed_tweets = []
for tweet in tweets:
windowed_tweet = []
for i in range(0, len(tweet)):
window_word = []
j = 0
d = window_size // 2
while j < window_size:
if j <= window_size//2-1:
if i-d == -1:
window_word.append(vocab2idx['<sos>'])
elif i-d < -1:
window_word.append(vocab2idx['<pad>'])
else:
window_word.append(tweet[i-d])
else:
# d should become non-positive here
if i-d == len(tweet):
window_word.append(vocab2idx['<eos>'])
elif i-d > len(tweet):
window_word.append(vocab2idx['<pad>'])
else:
window_word.append(tweet[i-d])
d -= 1
j += 1
windowed_tweet.append(window_word)
windowed_tweets.append(windowed_tweet)
return windowed_tweets
train_tweets_window = set_window(train_tweets_vec)
val_tweets_window = set_window(val_tweets_vec)
test_tweets_window = {}
for disaster_type in test_tweets:
test_tweets_window[disaster_type] = set_window(test_tweets_vec[disaster_type])
print("\nAFTER WINDOWING: \n")
print("\nSome sample training data\n")
for i in range(0, 5):
print("Sentence: {}".format(train_tweets[i]))
print("Sentence: {}".format(train_tweets_vec[i]))
print("Windowed Sentence: {}".format(train_tweets_window[i]))
print("Label_1: {}".format(train_labels_1[i]))
print("Label_2: {}".format(train_labels_2[i]))
print("POS: {}".format(train_pos[i]))
print("IPA: {}".format(train_ipa[i]))
print("Phono: {}".format(train_phono[i][0]))
print("\n\n")
print("\nSome sample testing data:\n")
for disaster_type in test_tweets:
print("FROM {}\n".format(disaster_type))
for i in range(0, 5):
print("Sentence: {}".format(test_tweets[disaster_type][i]))
print("Sentence: {}".format(test_tweets_vec[disaster_type][i]))
print("Sentence: {}".format(test_tweets_window[disaster_type][i]))
print("Label_1: {}".format(test_labels_1[disaster_type][i]))
print("Label_2: {}".format(test_labels_2[disaster_type][i]))
print("POS tags: {}".format(test_pos[disaster_type][i]))
print("IPA: {}".format(test_ipa[disaster_type][i]))
print("Phono: {}".format(test_phono[disaster_type][i][0]))
print("\n\n")
pickle_list = [train_tweets,
train_tweets_vec,
train_tweets_window,
train_labels_1,
train_labels_2,
train_pos,
train_ipa,
train_phono,
[],
val_tweets,
val_tweets_vec,
val_tweets_window,
val_labels_1,
val_labels_2,
val_pos,
val_ipa,
val_phono,
[],
test_tweets,
test_tweets_vec,
test_tweets_window,
test_labels_1,
test_labels_2,
test_pos,
test_ipa,
test_phono,
[]]
with open('Processed_Data/Processed_Data.pkl', 'wb') as fp:
pickle.dump(pickle_list, fp)
| [
"pickle.dump",
"nltk.stem.WordNetLemmatizer",
"random.shuffle",
"numpy.zeros",
"pickle.load",
"random.seed"
] | [((114, 132), 'random.seed', 'random.seed', ([], {'a': '(101)'}), '(a=101)\n', (125, 132), False, 'import random\n'), ((154, 173), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (171, 173), False, 'from nltk.stem import WordNetLemmatizer\n'), ((2875, 2892), 'random.shuffle', 'random.shuffle', (['x'], {}), '(x)\n', (2889, 2892), False, 'import random\n'), ((262, 277), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (273, 277), False, 'import pickle\n'), ((1210, 1225), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1221, 1225), False, 'import pickle\n'), ((1553, 1587), 'numpy.zeros', 'np.zeros', (['max_char_len', 'np.float32'], {}), '(max_char_len, np.float32)\n', (1561, 1587), True, 'import numpy as np\n'), ((1660, 1707), 'numpy.zeros', 'np.zeros', (['(max_char_len, phono_dim)', 'np.float32'], {}), '((max_char_len, phono_dim), np.float32)\n', (1668, 1707), True, 'import numpy as np\n'), ((2064, 2079), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2075, 2079), False, 'import pickle\n'), ((9524, 9552), 'pickle.dump', 'pickle.dump', (['pickle_list', 'fp'], {}), '(pickle_list, fp)\n', (9535, 9552), False, 'import pickle\n')] |
from typing import Tuple, Union, Iterable, List, Callable, Dict, Optional
import os
import json
import copy
import numpy as np
import scipy.stats as spstats
from nnuncert.models._pred_base import BasePred, PredConditionalGaussian
from nnuncert.models.nlm import NLM
from nnuncert.models.pnn import PNN
class Ensemble():
def __init__(self, N: int):
self.num_models = N
self._model_paras = {"num_models" : N}
def compile(self, *args, **kwargs):
"""Compile all ensemble members."""
# compile all ensemble members
[m.compile(*args, **kwargs) for m in self.models]
def save(self, path: str, *args, **kwargs):
"""Save model to path."""
# save all ensemble members individually
for i, m in enumerate(self.models):
m.save(os.path.join(path, str(i)), *args, **kwargs)
def save_model_parameters(self, path: str, **kw):
"""Save model paramaters as .json to path.
Further key-value pairs can be added with kw.
"""
# create folder
os.makedirs(path, exist_ok=True)
# add kw
d = copy.deepcopy(self._model_paras)
d.update(kw)
# dump settings to path
path = os.path.join(path, "settings.json")
with open(path, "w") as f:
json.dump({k: d[k] for k in sorted(d)}, f, indent=2)
def fit(self,
x_train: np.ndarray,
y_train: np.ndarray,
path: Optional[str] = None,
*args, **kwargs):
"""Fit all ensebmle members to data.
Parameters
----------
x_train : np.ndarray
y_train : np.ndarray
path : Optional[str]
If 'path' is given, ensemble members will be loaded from 'path'.
"""
# load/fit all ensemble members
for i, m in enumerate(self.models):
# 'path' given -> load ensemble member
if path is not None:
pathi = os.path.join(path, str(i))
# 'path' is None -> fit ensemble member
else:
pathi = None
m.fit(x_train, y_train, path=pathi, *args, **kwargs)
def pred(self, x: np.ndarray):
"""Get predictive means and variance for all ensemble members."""
# get predictions for all ensemble members for x
pred = [m.make_prediction(x) for m in self.models]
# extract means and variances
means = np.vstack(([p.pred_mean for p in pred])).T
vars = np.vstack(([p.var_total for p in pred])).T
return means, vars
def make_prediction(self, x:np.ndarray, method="gauss", *args, **kw):
"""Get prediction object for features 'x'."""
if method == "gauss":
return EnsPredGauss(self, x, *args, **kw)
elif method == "gmm":
return PredEnsGMM(self, x, *args, **kw)
raise ValueError()
class PNNEnsemble(Ensemble):
# disagreement! footnote 9 in Ens paper:
# -> UPDATE: what? they calculate 'disagreement via KL divergence'
def __init__(self, net, N: int = 5, *args, **kwargs):
super(PNNEnsemble, self).__init__(N)
def make_dnn(net):
return PNN(net, *args, **kwargs)
# make 'N' PNNs
self.models = [make_dnn(net.clone_with_prefix(str(i)))
for i in range(N)]
class NLMEnsemble(Ensemble):
def __init__(self, net, N: int = 5, *args, **kwargs):
super(NLMEnsemble, self).__init__(N)
def make_nlm(net):
return NLM(net, *args, **kwargs)
# make 'N' NLMs
self.models = [make_nlm(net.clone_with_prefix(str(i)))
for i in range(N)]
def fit(self,
x_train: np.ndarray,
y_train: np.ndarray,
path: Optional[str] = None,
*args, **kwargs):
"""Fit all ensebmle members to data.
Parameters
----------
x_train : np.ndarray
y_train : np.ndarray
path : Optional[str]
If 'path' is given, ensemble members will be loaded from 'path'.
"""
# load/fit all ensemble members
for i, m in enumerate(self.models):
# 'path' given -> load ensemble member
if path is not None:
pathi = os.path.join(path, str(i))
# 'path' is None -> fit ensemble member
else:
pathi = None
m.fit(x_train, y_train, path=pathi, *args, **kwargs)
self._model_paras["tau2"] = copy.copy(self.models[0]._model_paras["tau2"])
self._model_paras["val_ratio"] = copy.copy(self.models[0]._model_paras["val_ratio"])
class EnsPredGauss(PredConditionalGaussian):
def __init__(self, ens, x):
self.xlen = x.shape[0]
# make Gaussians from means and variances
self.means, self.vars = ens.pred(x)
self._make_gaussians()
@property
def var_epistemic(self):
return None
@property
def var_aleatoric(self):
return None
@property
def var_total(self):
return (self.vars + self.means**2).mean(axis=1) - (self.pred_mean**2)
@property
def pred_mean(self):
return self.means.mean(axis=1).ravel()
# ███ ███ █████ ██ ██ ██████ ███████
# ████ ████ ██ ██ ██ ██ ██ ██ ██
# ██ ████ ██ ███████ ████ ██████ █████
# ██ ██ ██ ██ ██ ██ ██ ██ ██
# ██ ██ ██ ██ ██ ██████ ███████
class GMM():
def __init__(self, means, vars, weights=None):
assert len(means) == len(vars) <= 10
self.means = np.array(means)
self.vars = np.array(vars)
if weights is None:
self.weights = np.array([1/self.n]*self.n)
else:
self.weights = weights
@property
def components(self):
return list(zip(self.weights, self.means, self.vars**0.5))
@property
def bounds(self):
left = min([spstats.norm.ppf(0.001, m, v) for (_, m, v) in self.components])
right = max([spstats.norm.ppf(0.999, m, v) for (_, m, v) in self.components])
return (left, right)
@property
def E(self):
return np.mean(self.means)
@property
def Var(self):
return np.mean(self.means**2 + self.vars) - self.E**2
@property
def n(self):
return len(self.means)
def pdf(self, x):
return np.sum([w*spstats.norm.pdf(x, m, s) for (w, m, s) in self.components], axis=0)
def cdf(self, x):
return np.sum([w*spstats.norm.cdf(x, m, s) for (w, m, s) in self.components], axis=0)
def ppf(self, q):
assert q <= 0.05 or q >= 0.95
if q < 0.5:
left = min([spstats.norm.ppf(0.001, m, v) for (_, m, v) in self.components])
x0 = np.linspace(left, min(self.means), 100)
else:
right = max([spstats.norm.ppf(0.999, m, v) for (_, m, v) in self.components])
x0 = np.linspace(max(self.means), right, 100)
Fx0 = self.cdf(x0)
return spinterpol.interp1d(Fx0, x0)(q)
class PredEnsGMM(BasePred):
def __init__(self, ens, x):
self.xlen = x.shape[0]
self.means, self.vars = ens.pred(x)
self.gmms = [GMM(m, v) for (m, v) in list(zip(self.means, self.vars))]
@property
def var_total(self):
return (self.vars + self.means**2).mean(axis=1) - (self.pred_mean**2)
@property
def pred_mean(self):
return self.means.mean(axis=1)
def pdf(self, y):
return np.array([gmm.pdf(y_) for (gmm, y_) in list(zip(self.gmms, y))])
def logpdf(self, y):
return np.log(self.pdf(y))
def cdf(self, y):
return np.array([gmm.cdf(y_) for (gmm, y_) in list(zip(self.gmms, y))])
def ppf(self, q, *args, **kwds):
return np.array([gmm.ppf(q) for gmm in self.gmms])
def pdfi(self, i, y):
return self.gmms[i].pdf(y)
def cdfi(self, i, y):
return self.gmms[i].cdf(y)
| [
"nnuncert.models.pnn.PNN",
"scipy.stats.norm.ppf",
"copy.deepcopy",
"os.makedirs",
"nnuncert.models.nlm.NLM",
"copy.copy",
"scipy.stats.norm.pdf",
"scipy.stats.norm.cdf",
"numpy.mean",
"numpy.array",
"os.path.join",
"numpy.vstack"
] | [((1059, 1091), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (1070, 1091), False, 'import os\n'), ((1122, 1154), 'copy.deepcopy', 'copy.deepcopy', (['self._model_paras'], {}), '(self._model_paras)\n', (1135, 1154), False, 'import copy\n'), ((1224, 1259), 'os.path.join', 'os.path.join', (['path', '"""settings.json"""'], {}), "(path, 'settings.json')\n", (1236, 1259), False, 'import os\n'), ((4517, 4563), 'copy.copy', 'copy.copy', (["self.models[0]._model_paras['tau2']"], {}), "(self.models[0]._model_paras['tau2'])\n", (4526, 4563), False, 'import copy\n'), ((4605, 4656), 'copy.copy', 'copy.copy', (["self.models[0]._model_paras['val_ratio']"], {}), "(self.models[0]._model_paras['val_ratio'])\n", (4614, 4656), False, 'import copy\n'), ((5589, 5604), 'numpy.array', 'np.array', (['means'], {}), '(means)\n', (5597, 5604), True, 'import numpy as np\n'), ((5625, 5639), 'numpy.array', 'np.array', (['vars'], {}), '(vars)\n', (5633, 5639), True, 'import numpy as np\n'), ((6164, 6183), 'numpy.mean', 'np.mean', (['self.means'], {}), '(self.means)\n', (6171, 6183), True, 'import numpy as np\n'), ((2442, 2480), 'numpy.vstack', 'np.vstack', (['[p.pred_mean for p in pred]'], {}), '([p.pred_mean for p in pred])\n', (2451, 2480), True, 'import numpy as np\n'), ((2500, 2538), 'numpy.vstack', 'np.vstack', (['[p.var_total for p in pred]'], {}), '([p.var_total for p in pred])\n', (2509, 2538), True, 'import numpy as np\n'), ((3188, 3213), 'nnuncert.models.pnn.PNN', 'PNN', (['net', '*args'], {}), '(net, *args, **kwargs)\n', (3191, 3213), False, 'from nnuncert.models.pnn import PNN\n'), ((3524, 3549), 'nnuncert.models.nlm.NLM', 'NLM', (['net', '*args'], {}), '(net, *args, **kwargs)\n', (3527, 3549), False, 'from nnuncert.models.nlm import NLM\n'), ((5695, 5726), 'numpy.array', 'np.array', (['([1 / self.n] * self.n)'], {}), '([1 / self.n] * self.n)\n', (5703, 5726), True, 'import numpy as np\n'), ((6233, 6269), 'numpy.mean', 'np.mean', (['(self.means ** 2 + self.vars)'], {}), '(self.means ** 2 + self.vars)\n', (6240, 6269), True, 'import numpy as np\n'), ((5937, 5966), 'scipy.stats.norm.ppf', 'spstats.norm.ppf', (['(0.001)', 'm', 'v'], {}), '(0.001, m, v)\n', (5953, 5966), True, 'import scipy.stats as spstats\n'), ((6023, 6052), 'scipy.stats.norm.ppf', 'spstats.norm.ppf', (['(0.999)', 'm', 'v'], {}), '(0.999, m, v)\n', (6039, 6052), True, 'import scipy.stats as spstats\n'), ((6391, 6416), 'scipy.stats.norm.pdf', 'spstats.norm.pdf', (['x', 'm', 's'], {}), '(x, m, s)\n', (6407, 6416), True, 'import scipy.stats as spstats\n'), ((6508, 6533), 'scipy.stats.norm.cdf', 'spstats.norm.cdf', (['x', 'm', 's'], {}), '(x, m, s)\n', (6524, 6533), True, 'import scipy.stats as spstats\n'), ((6682, 6711), 'scipy.stats.norm.ppf', 'spstats.norm.ppf', (['(0.001)', 'm', 'v'], {}), '(0.001, m, v)\n', (6698, 6711), True, 'import scipy.stats as spstats\n'), ((6843, 6872), 'scipy.stats.norm.ppf', 'spstats.norm.ppf', (['(0.999)', 'm', 'v'], {}), '(0.999, m, v)\n', (6859, 6872), True, 'import scipy.stats as spstats\n')] |
#-*- coding:utf-8 -*-
# &Author AnFany
# 适用于多维输出
from BPNN_DATA_Reg import model_data as R_data
import numpy as np
import tensorflow as tf
'''第一部分:数据准备'''
train_x_data = R_data[0] # 训练输入
train_y_data = R_data[1] # 训练输出
predict_x_data = R_data[2] # 测试输入
predict_y_data = R_data[3] # 测试输出
'''第二部分: 基于TensorFlow构建训练函数'''
# 创建激活函数
def activate(input_layer, weights, biases, actfunc):
layer = tf.add(tf.matmul(input_layer, weights), biases)
if actfunc == 'relu':
return tf.nn.relu(layer)
elif actfunc == 'tanh':
return tf.nn.tanh(layer)
elif actfunc == 'sigmoid':
return tf.nn.sigmoid(layer)
# 权重初始化的方式和利用激活函数的关系很大
# sigmoid: xavir tanh: xavir relu: he
# 构建训练函数
def Ten_train(xdata, ydata, prexdata, hiddenlayers=3, hiddennodes=100, \
learn_rate=0.05, itertimes=100000, batch_size=200, activate_func='sigmoid', break_error=0.0043):
# 开始搭建神经网络
Input_Dimen = len(xdata[0])
Unit_Layers = [Input_Dimen] + [hiddennodes] * hiddenlayers + [len(ydata[0])] # 输入的维数,隐层的神经数,输出的维数1
# 创建占位符
x_data = tf.placeholder(shape=[None, Input_Dimen], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, len(ydata[0])], dtype=tf.float32)
# 实现动态命名变量
VAR_NAME = locals()
for jj in range(hiddenlayers + 1):
VAR_NAME['weight%s' % jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]), dtype=tf.float32,\
name='weight%s' % jj) / np.sqrt(Unit_Layers[jj]) # sigmoid tanh
# VAR_NAME['weight%s'%jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]), dtype=tf.float32,name='weight%s' % jj) \/ np.sqrt(Unit_Layers[jj] / 2) # relu
VAR_NAME['bias%s' % jj] = tf.Variable(tf.random_normal([Unit_Layers[jj + 1]], stddev=10, name='bias%s' % jj),
dtype=tf.float32)
if jj == 0:
VAR_NAME['ooutda%s' % jj] = activate(x_data, eval('weight%s' % jj), eval('bias%s' % jj), actfunc=activate_func)
else:
VAR_NAME['ooutda%s' % jj] = activate(eval('ooutda%s' % (jj - 1)), eval('weight%s' % jj), \
eval('bias%s' % jj), actfunc=activate_func)
# 均方误差
loss = tf.reduce_mean(tf.reduce_sum(tf.square(y_target - eval('ooutda%s' % (hiddenlayers))), reduction_indices=[1]))
# 优化的方法
my_opt = tf.train.AdamOptimizer(learn_rate)
train_step = my_opt.minimize(loss)
# 初始化
init = tf.global_variables_initializer()
loss_vec = [] # 训练误差
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(init)
for i in range(itertimes):
rand_index = np.random.choice(len(xdata), size=batch_size, replace=False)
rand_x = xdata[rand_index]
rand_y = ydata[rand_index]
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: xdata, y_target: ydata})
loss_vec.append(temp_loss)
# 根据输出的误差,判断训练的情况
if (i + 1) % 25 == 0:
print('Generation: ' + str(i + 1) + '. 归一误差:Loss = ' + str(temp_loss))
# 提前退出的判断
if temp_loss < break_error: # 根据经验获得此数值, 因为采用的是随机下降,因此误差在前期可能出现浮动
break
# 计算预测数据的输出
pre_in_data0 = np.array(prexdata, dtype=np.float32)
for ipre in range(hiddenlayers + 1):
VAR_NAME['pre_in_data%s' % (ipre + 1)] = activate(eval('pre_in_data%s' % ipre), eval('weight%s' % ipre).eval(),\
eval('bias%s' % ipre).eval(), actfunc=activate_func)
# 计算训练数据的输出
train_in_data0 = np.array(xdata, dtype=np.float32)
for ipre in range(hiddenlayers + 1):
VAR_NAME['train_in_data%s' % (ipre + 1)] = activate(eval('train_in_data%s' % ipre), eval('weight%s' % ipre).eval(),\
eval('bias%s' % ipre).eval(), actfunc=activate_func)
return eval('train_in_data%s'%(hiddenlayers+1)).eval(), eval('pre_in_data%s'%(hiddenlayers+1)).eval(), loss_vec
'''第三部分: 结果展示函数'''
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体
# 绘制图像
def figure(real, net, le='训练', real_line='ko-', net_line='r.-', width=3):
length = len(real[0])
# 绘制每个维度的对比图
for iwe in range(length):
plt.subplot(length, 1, iwe+1)
plt.plot(list(range(len(real.T[iwe]))), real.T[iwe], real_line, linewidth=width)
plt.plot(list(range(len(net.T[iwe]))), net.T[iwe], net_line, linewidth=width - 1)
plt.legend(['%s真实值'%le, '网络输出值'])
if length == 1:
plt.title('%s结果对比'%le)
else:
if iwe == 0:
plt.title('%s结果: %s维度对比'%(le, iwe))
else:
plt.title('%s维度对比'%iwe)
plt.show()
# 绘制成本函数曲线图
def costfig(errlist, le='成本函数曲线图'):
plt.plot(list(range(len(errlist))), errlist, linewidth=3)
plt.title(le)
plt.xlabel('迭代次数')
plt.ylabel('成本函数值')
plt.show()
# 因为训练数据较多,为了不影响展示效果,按序随机选取一定数量的数据,便于展示
def select(datax, datay, count=200):
sign = list(range(len(datax)))
selectr_sign = np.random.choice(sign, count, replace=False)
return datax[selectr_sign], datay[selectr_sign]
# 将输出的数据转换尺寸,变为原始数据的尺寸
def trans(ydata, minumber=R_data[4][0], maxumber=R_data[4][1]):
return ydata * (maxumber - minumber) + minumber
if __name__ == '__main__':
# 训练
tfrelu = Ten_train(train_x_data, train_y_data, predict_x_data)
# 真实的数据转换尺寸
train_y_data_tran = trans(train_y_data)
predict_y_data_tran = trans(predict_y_data)
# 网络预测的数据转换尺寸
train_output = trans(tfrelu[0])
predict_output = trans(tfrelu[1])
# 数据多影响展示,随机挑选100条数据
random_train_x_data = select(train_output, train_y_data_tran, 200)
random_predict_x_data = select(predict_output, predict_y_data_tran, 100)
figure(random_train_x_data[1], random_train_x_data[0], le='训练')
figure(random_predict_x_data[1], random_predict_x_data[0], le='预测')
costfig(tfrelu[2])
| [
"matplotlib.pyplot.title",
"tensorflow.nn.tanh",
"tensorflow.train.AdamOptimizer",
"tensorflow.matmul",
"matplotlib.pyplot.xlabel",
"tensorflow.nn.relu",
"tensorflow.placeholder",
"numpy.random.choice",
"matplotlib.pyplot.show",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer"... | [((1111, 1170), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, Input_Dimen]', 'dtype': 'tf.float32'}), '(shape=[None, Input_Dimen], dtype=tf.float32)\n', (1125, 1170), True, 'import tensorflow as tf\n'), ((2461, 2495), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learn_rate'], {}), '(learn_rate)\n', (2483, 2495), True, 'import tensorflow as tf\n'), ((2561, 2594), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2592, 2594), True, 'import tensorflow as tf\n'), ((5082, 5092), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5090, 5092), True, 'import matplotlib.pyplot as plt\n'), ((5213, 5226), 'matplotlib.pyplot.title', 'plt.title', (['le'], {}), '(le)\n', (5222, 5226), True, 'import matplotlib.pyplot as plt\n'), ((5232, 5250), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""迭代次数"""'], {}), "('迭代次数')\n", (5242, 5250), True, 'import matplotlib.pyplot as plt\n'), ((5256, 5275), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""成本函数值"""'], {}), "('成本函数值')\n", (5266, 5275), True, 'import matplotlib.pyplot as plt\n'), ((5281, 5291), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5289, 5291), True, 'import matplotlib.pyplot as plt\n'), ((5430, 5474), 'numpy.random.choice', 'np.random.choice', (['sign', 'count'], {'replace': '(False)'}), '(sign, count, replace=False)\n', (5446, 5474), True, 'import numpy as np\n'), ((428, 459), 'tensorflow.matmul', 'tf.matmul', (['input_layer', 'weights'], {}), '(input_layer, weights)\n', (437, 459), True, 'import tensorflow as tf\n'), ((512, 529), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (522, 529), True, 'import tensorflow as tf\n'), ((2636, 2648), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2646, 2648), True, 'import tensorflow as tf\n'), ((2675, 2691), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2689, 2691), True, 'import tensorflow as tf\n'), ((3460, 3496), 'numpy.array', 'np.array', (['prexdata'], {'dtype': 'np.float32'}), '(prexdata, dtype=np.float32)\n', (3468, 3496), True, 'import numpy as np\n'), ((3837, 3870), 'numpy.array', 'np.array', (['xdata'], {'dtype': 'np.float32'}), '(xdata, dtype=np.float32)\n', (3845, 3870), True, 'import numpy as np\n'), ((4608, 4639), 'matplotlib.pyplot.subplot', 'plt.subplot', (['length', '(1)', '(iwe + 1)'], {}), '(length, 1, iwe + 1)\n', (4619, 4639), True, 'import matplotlib.pyplot as plt\n'), ((4828, 4863), 'matplotlib.pyplot.legend', 'plt.legend', (["['%s真实值' % le, '网络输出值']"], {}), "(['%s真实值' % le, '网络输出值'])\n", (4838, 4863), True, 'import matplotlib.pyplot as plt\n'), ((575, 592), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['layer'], {}), '(layer)\n', (585, 592), True, 'import tensorflow as tf\n'), ((1529, 1553), 'numpy.sqrt', 'np.sqrt', (['Unit_Layers[jj]'], {}), '(Unit_Layers[jj])\n', (1536, 1553), True, 'import numpy as np\n'), ((1800, 1870), 'tensorflow.random_normal', 'tf.random_normal', (['[Unit_Layers[jj + 1]]'], {'stddev': '(10)', 'name': "('bias%s' % jj)"}), "([Unit_Layers[jj + 1]], stddev=10, name='bias%s' % jj)\n", (1816, 1870), True, 'import tensorflow as tf\n'), ((4900, 4924), 'matplotlib.pyplot.title', 'plt.title', (["('%s结果对比' % le)"], {}), "('%s结果对比' % le)\n", (4909, 4924), True, 'import matplotlib.pyplot as plt\n'), ((641, 661), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['layer'], {}), '(layer)\n', (654, 661), True, 'import tensorflow as tf\n'), ((1383, 1435), 'numpy.random.rand', 'np.random.rand', (['Unit_Layers[jj]', 'Unit_Layers[jj + 1]'], {}), '(Unit_Layers[jj], Unit_Layers[jj + 1])\n', (1397, 1435), True, 'import numpy as np\n'), ((4981, 5018), 'matplotlib.pyplot.title', 'plt.title', (["('%s结果: %s维度对比' % (le, iwe))"], {}), "('%s结果: %s维度对比' % (le, iwe))\n", (4990, 5018), True, 'import matplotlib.pyplot as plt\n'), ((5053, 5078), 'matplotlib.pyplot.title', 'plt.title', (["('%s维度对比' % iwe)"], {}), "('%s维度对比' % iwe)\n", (5062, 5078), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
INST_SIZE = 4
instructions = np.loadtxt('input.csv', delimiter=',', dtype=np.int)
def run_program(instructions, noun=None, verb=None):
# Enter error code instructions
instructions[1] = noun
instructions[2] = verb
ip = 0
while True:
opcode, param1, param2, dst = instructions[ip:ip + 4]
if opcode == 1:
instructions[dst] = instructions[param1] + instructions[param2]
elif opcode == 2:
instructions[dst] = instructions[param1] * instructions[param2]
elif opcode == 99:
return instructions[0]
else:
raise ValueError(f"Unknown opcode: {opcode}")
ip += 4
if __name__ == "__main__":
print(run_program(instructions.copy(), noun=12, verb=2))
| [
"numpy.loadtxt"
] | [((50, 102), 'numpy.loadtxt', 'np.loadtxt', (['"""input.csv"""'], {'delimiter': '""","""', 'dtype': 'np.int'}), "('input.csv', delimiter=',', dtype=np.int)\n", (60, 102), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.