code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Copyright 2017- IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .attention import Attention
from .beam_search import Beam
class AttendSpellRNN(nn.Module):
r"""
Provides functionality for decoding in a seq2seq framework, with an option for attention.
Args:
vocab_size (int): size of the vocabulary
max_len (int): a maximum allowed length for the sequence to be processed
hidden_size (int): the number of features in the hidden state `h`
sos_id (int): index of the start of sentence symbol
eos_id (int): index of the end of sentence symbol
n_layers (int, optional): number of recurrent layers (default: 1)
rnn_cell (str, optional): type of RNN cell (default: gru)
input_dropout_p (float, optional): dropout probability for the input sequence (default: 0)
dropout_p (float, optional): dropout probability for the output sequence (default: 0)
Attributes:
KEY_ATTN_SCORE (str): key used to indicate attention weights in `ret_dict`
KEY_LENGTH (str): key used to indicate a list representing lengths of output sequences in `ret_dict`
KEY_SEQUENCE (str): key used to indicate a list of sequences in `ret_dict`
Inputs: inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio
- **inputs** (batch, seq_len, input_size): list of sequences, whose length is the batch size and within which
each sequence is a list of token IDs. It is used for teacher forcing when provided. (default `None`)
- **encoder_hidden** (num_layers * num_directions, batch_size, hidden_size): tensor containing the features in the
hidden state `h` of encoder. Used as the initial hidden state of the decoder. (default `None`)
- **encoder_outputs** (batch, seq_len, hidden_size): tensor with containing the outputs of the encoder.
Used for attention mechanism (default is `None`).
- **teacher_forcing_ratio** (float): The probability that teacher forcing will be used. A random number is
drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,
teacher forcing would be used (default is 0).
Outputs: decoder_outputs, decoder_hidden, ret_dict
- **decoder_outputs** (batch, seq_len, vocab_size): list of tensors with size (batch_size, vocab_size) containing
the outputs of the decoding function.
- **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden
state of the decoder.
- **ret_dict**: dictionary containing additional information as follows {*KEY_LENGTH* : list of integers
representing lengths of output sequences, *KEY_SEQUENCE* : list of sequences, where each sequence is a list of
predicted token IDs }.
"""
KEY_ATTN_SCORE = 'attention_score'
KEY_LENGTH = 'length'
KEY_SEQUENCE = 'sequence'
BEAM_INDEX = 'beam_index'
PROBABILITY = 'probability'
def __init__(self, vocab_size, max_len, hidden_size, sos_id, eos_id, n_layers=2, rnn_cell='gru',
embedding_size=512, input_dropout_p=0, dropout_p=0, beam_width=1, device='cpu'):
super().__init__()
self.device = device
self.hidden_size = hidden_size
self.max_length = max_len
self.eos_id = eos_id
self.sos_id = sos_id
self.init_input = None
self.n_layers = n_layers
self.beam_width = beam_width
if rnn_cell.lower() == 'lstm':
self.rnn_cell = nn.LSTM
elif rnn_cell.lower() == 'gru':
self.rnn_cell = nn.GRU
else:
raise ValueError("Unsupported RNN Cell: {0}".format(rnn_cell))
assert n_layers > 1
dropout_p = 0 if n_layers == 2 else dropout_p
self.bottom_rnn = self.rnn_cell(hidden_size + embedding_size, hidden_size, batch_first=True)
self.upper_rnn = self.rnn_cell(hidden_size, hidden_size, n_layers-1, batch_first=True, dropout=dropout_p)
# TODO word embedding dimension parameter 추가하고 바꾸기
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.input_dropout = nn.Dropout(p=input_dropout_p)
self.attention = Attention(self.hidden_size)
self.out = nn.Linear(self.hidden_size, vocab_size)
def forward_step(self, input_var, last_bottom_hidden, last_upper_hidden, encoder_outputs, function):
# input_var = [list of int] = [B]
# last_~~~_hidden = [layer x B x hidden_size]
# encoder_outputs = [B x max_len x hidden_dim]
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded).unsqueeze(1) # B x 1 x H
# if self.training:
self.bottom_rnn.flatten_parameters()
self.upper_rnn.flatten_parameters()
attn = self.attention(encoder_outputs, last_bottom_hidden) # (batch, max_len)
context = attn.unsqueeze(1).bmm(encoder_outputs) # B x 1 x H
x = torch.cat([embedded, context], 2) # B x 1 x (2 * H)
x, bottom_hidden = self.bottom_rnn(x, last_bottom_hidden)
x, upper_hidden = self.upper_rnn(x, last_upper_hidden) # B x 1 x H
predicted_prob = function(self.out(x.squeeze(1)), dim=-1) # B x vocab_size
return predicted_prob, bottom_hidden, upper_hidden, attn
def forward_step_beam(self, input_var, last_bottom_hidden, last_upper_hidden, encoder_outputs, function):
# input_var = [list of int] = [B]
# last_~~~_hidden = [layer x B x hidden_size]
# encoder_outputs = [B x max_len x hidden_dim]
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded).unsqueeze(1) # B x 1 x H
batch_size, encoder_length, encoder_hidden_dim = encoder_outputs.size()
encoder_outputs = encoder_outputs.repeat(self.beam_width, 1, 1)
encoder_outputs = encoder_outputs.view(self.beam_width, batch_size, encoder_length, encoder_hidden_dim)
encoder_outputs = encoder_outputs.transpose(0, 1)
encoder_outputs = encoder_outputs.reshape(self.beam_width * batch_size, encoder_length, encoder_hidden_dim)
# if self.training:
self.bottom_rnn.flatten_parameters()
self.upper_rnn.flatten_parameters()
attn = self.attention(encoder_outputs, last_bottom_hidden) # (batch, max_len)
context = attn.unsqueeze(1).bmm(encoder_outputs) # B x 1 x H
x = torch.cat([embedded, context], 2) # B x 1 x (2 * H)
x, bottom_hidden = self.bottom_rnn(x, last_bottom_hidden)
x, upper_hidden = self.upper_rnn(x, last_upper_hidden) # B x 1 x H
predicted_prob = function(self.out(x.squeeze(1)), dim=-1) # B x vocab_size
return predicted_prob, bottom_hidden, upper_hidden, attn
def forward(self, inputs=None, encoder_hidden=None, encoder_outputs=None, function=F.log_softmax, teacher_forcing_ratio=0):
ret_dict = dict()
ret_dict[AttendSpellRNN.KEY_ATTN_SCORE] = list()
ret_dict[AttendSpellRNN.BEAM_INDEX] = list()
ret_dict[AttendSpellRNN.KEY_SEQUENCE] = list()
ret_dict[AttendSpellRNN.PROBABILITY] = list()
inputs, batch_size, max_length = self._validate_args(inputs, encoder_outputs, teacher_forcing_ratio)
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
decoder_outputs = []
sequence_symbols = []
lengths = np.array([max_length] * batch_size)
def decode(step, step_output, step_attn):
decoder_outputs.append(step_output)
ret_dict[AttendSpellRNN.KEY_ATTN_SCORE].append(step_attn)
# TODO : BEAM Search 추가하기
symbols = step_output.topk(1)[1] # topk(n) [0]는 값 [1]은 index
sequence_symbols.append(symbols)
eos_batches = symbols.data.eq(self.eos_id)
if eos_batches.dim() > 0:
eos_batches = eos_batches.cpu().view(-1).numpy()
update_idx = ((lengths > step) & eos_batches) != 0
lengths[update_idx] = len(sequence_symbols) # eos 처음 나타나는 곳에서 그 길이로 update
return symbols
if self.training:
bottom_hidden, upper_hidden = self._init_state_zero(batch_size)
if use_teacher_forcing:
decoder_input = inputs[:, :-1]
for di in range(max_length):
decoder_output, bottom_hidden, upper_hidden, attn \
= self.forward_step(decoder_input[:, di], bottom_hidden, upper_hidden, encoder_outputs,
function)
decode(di, decoder_output, attn)
else:
decoder_input = inputs[:, 0]
for di in range(max_length):
decoder_output, bottom_hidden, upper_hidden, step_attn \
= self.forward_step(decoder_input, bottom_hidden, upper_hidden, encoder_outputs, function)
symbols = decode(di, decoder_output, step_attn) # batch x 1
decoder_input = symbols.squeeze(1)
ret_dict[AttendSpellRNN.KEY_SEQUENCE] = sequence_symbols
ret_dict[AttendSpellRNN.KEY_LENGTH] = lengths.tolist()
decoder_outputs_temp = torch.stack(decoder_outputs, dim=1) # batch x seq_len x vocab_size
hyps = decoder_outputs_temp.max(-1)[1]
else:
bottom_hidden, upper_hidden = self._init_state_zero_beam(batch_size, self.beam_width)
beam = [
Beam(self.beam_width, self.sos_id, self.eos_id, cuda=True)
for _ in range(batch_size)
]
for di in range(max_length):
# if all((b.done for b in beam)):
# break
# (a) Construct batch x beam_size nxt words.
# Get all the pending current beam words and arrange for forward.
decoder_input = torch.stack([b.current_predictions for b in beam]).to(self.device)
decoder_input = decoder_input.view(-1)
decoder_output, bottom_hidden, upper_hidden, step_attn \
= self.forward_step_beam(decoder_input, bottom_hidden, upper_hidden, encoder_outputs, function)
decoder_output = decoder_output.view(batch_size, self.beam_width, -1)
step_attn = step_attn.view(batch_size, self.beam_width, -1)
select_indices_array = []
# Loop over the batch_size number of beam
for j, b in enumerate(beam):
b.advance(decoder_output[j, :], step_attn.data[j, :, :])
select_indices_array.append(
list(map(lambda x: x + j * self.beam_width, b.current_origin))
)
select_indices = torch.tensor(select_indices_array, dtype=torch.int64).view(-1).to(self.device)
bottom_hidden, upper_hidden = self._select_indices_hidden(select_indices, bottom_hidden, upper_hidden)
for b in beam:
_, ks = b.sort_finished()
times, k = ks[0]
hyp, beam_index, prob = b.get_hyp(times, k)
prob = torch.stack(prob)
prob = b.fill_empty_sequence(prob, max_length) # make length max_length of sequence
hyp = torch.stack(hyp)
hyp = b.fill_empty_sequence(hyp, max_length)
ret_dict[AttendSpellRNN.PROBABILITY].append(prob)
ret_dict[AttendSpellRNN.KEY_SEQUENCE].append(hyp)
hyps = torch.stack(ret_dict[AttendSpellRNN.KEY_SEQUENCE])
probs = torch.stack(ret_dict[AttendSpellRNN.PROBABILITY])
probs = torch.transpose(probs, 0, 1)
for i in range(probs.size(0)):
decoder_outputs.append(probs[i])
# decoder_outputs = [seq_len, batch, vocab_size]
return decoder_outputs, hyps, bottom_hidden, upper_hidden
def _select_indices_hidden(self, select_indices, bottom_hidden, upper_hidden):
return torch.index_select(bottom_hidden, 1, select_indices), torch.index_select(upper_hidden, 1, select_indices)
def _init_state(self, encoder_hidden):
""" Initialize the encoder hidden state. """
if encoder_hidden is None:
return None
if isinstance(encoder_hidden, tuple):
encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])
else:
encoder_hidden = self._cat_directions(encoder_hidden)
bottom_hidden = encoder_hidden[-self.n_layers, :, :].unsqueeze(0)
upper_hidden = encoder_hidden[(-self.n_layers + 1):, :, :]
return bottom_hidden, upper_hidden
def _init_state_beam(self, encoder_hidden):
"""
Initialize the encoder hidden state.
encoder_hidden : [layer x B x hidden_size]
"""
if encoder_hidden is None:
return None
if isinstance(encoder_hidden, tuple):
encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])
else:
encoder_hidden = self._cat_directions(encoder_hidden)
_, batch_size, hidden_size = encoder_hidden.size()
bottom_hidden = encoder_hidden[-self.n_layers, :, :].unsqueeze(0) # make beam * batch to batch * beam
bottom_hidden = bottom_hidden.repeat(1, self.beam_width,1)
bottom_hidden = bottom_hidden.view(1, self.beam_width, batch_size, hidden_size)
bottom_hidden = torch.transpose(bottom_hidden, 1, 2).reshape(1, self.beam_width * batch_size, hidden_size)
upper_hidden = encoder_hidden[(-self.n_layers + 1):, :, :]
upper_hidden = upper_hidden.repeat(1, self.beam_width, 1)
upper_hidden = upper_hidden.view(self.n_layers - 1, self.beam_width, batch_size, hidden_size)
upper_hidden = torch.transpose(upper_hidden, 1, 2).reshape(self.n_layers - 1, self.beam_width * batch_size, hidden_size)
return bottom_hidden, upper_hidden
def _init_state_zero(self, batch_size):
bottom_init = torch.zeros(1, batch_size, self.hidden_size).to(self.device)
upper_init = torch.zeros(self.n_layers - 1, batch_size, self.hidden_size).to(self.device)
return bottom_init, upper_init
def _init_state_zero_beam(self, batch_size, beam_width):
bottom_init = torch.zeros(1, batch_size*beam_width, self.hidden_size).to(self.device)
upper_init = torch.zeros(self.n_layers - 1, batch_size*beam_width, self.hidden_size).to(self.device)
return bottom_init, upper_init
def _cat_directions(self, h):
"""
(#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size)
"""
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
return h
def _validate_args(self, inputs, encoder_outputs, teacher_forcing_ratio):
if encoder_outputs is None:
raise ValueError("Argument encoder_outputs cannot be None.")
batch_size = encoder_outputs.size(0)
# set default input and max decoding length
if inputs is None:
if teacher_forcing_ratio > 0:
raise ValueError("Teacher forcing has to be disabled (set 0) when no inputs is provided.")
inputs = torch.LongTensor([self.sos_id] * batch_size).view(batch_size, 1)
if torch.cuda.is_available():
inputs = inputs.cuda()
max_length = self.max_length
else:
max_length = inputs.size(1) - 1 # minus the start of sequence symbol
return inputs, batch_size, max_length
| [
"torch.index_select",
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.LongTensor",
"torch.stack",
"torch.transpose",
"numpy.array",
"torch.tensor",
"torch.cuda.is_available",
"torch.nn.Linear",
"random.random",
"torch.zeros",
"torch.cat"
] | [((4746, 4786), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embedding_size'], {}), '(vocab_size, embedding_size)\n', (4758, 4786), True, 'import torch.nn as nn\n'), ((4817, 4846), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'input_dropout_p'}), '(p=input_dropout_p)\n', (4827, 4846), True, 'import torch.nn as nn\n'), ((4919, 4958), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size', 'vocab_size'], {}), '(self.hidden_size, vocab_size)\n', (4928, 4958), True, 'import torch.nn as nn\n'), ((5625, 5658), 'torch.cat', 'torch.cat', (['[embedded, context]', '(2)'], {}), '([embedded, context], 2)\n', (5634, 5658), False, 'import torch\n'), ((7081, 7114), 'torch.cat', 'torch.cat', (['[embedded, context]', '(2)'], {}), '([embedded, context], 2)\n', (7090, 7114), False, 'import torch\n'), ((8079, 8114), 'numpy.array', 'np.array', (['([max_length] * batch_size)'], {}), '([max_length] * batch_size)\n', (8087, 8114), True, 'import numpy as np\n'), ((9917, 9952), 'torch.stack', 'torch.stack', (['decoder_outputs'], {'dim': '(1)'}), '(decoder_outputs, dim=1)\n', (9928, 9952), False, 'import torch\n'), ((12243, 12293), 'torch.stack', 'torch.stack', (['ret_dict[AttendSpellRNN.KEY_SEQUENCE]'], {}), '(ret_dict[AttendSpellRNN.KEY_SEQUENCE])\n', (12254, 12293), False, 'import torch\n'), ((12314, 12363), 'torch.stack', 'torch.stack', (['ret_dict[AttendSpellRNN.PROBABILITY]'], {}), '(ret_dict[AttendSpellRNN.PROBABILITY])\n', (12325, 12363), False, 'import torch\n'), ((12384, 12412), 'torch.transpose', 'torch.transpose', (['probs', '(0)', '(1)'], {}), '(probs, 0, 1)\n', (12399, 12412), False, 'import torch\n'), ((12728, 12780), 'torch.index_select', 'torch.index_select', (['bottom_hidden', '(1)', 'select_indices'], {}), '(bottom_hidden, 1, select_indices)\n', (12746, 12780), False, 'import torch\n'), ((12782, 12833), 'torch.index_select', 'torch.index_select', (['upper_hidden', '(1)', 'select_indices'], {}), '(upper_hidden, 1, select_indices)\n', (12800, 12833), False, 'import torch\n'), ((16060, 16085), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16083, 16085), False, 'import torch\n'), ((7950, 7965), 'random.random', 'random.random', ([], {}), '()\n', (7963, 7965), False, 'import random\n'), ((11871, 11888), 'torch.stack', 'torch.stack', (['prob'], {}), '(prob)\n', (11882, 11888), False, 'import torch\n'), ((12012, 12028), 'torch.stack', 'torch.stack', (['hyp'], {}), '(hyp)\n', (12023, 12028), False, 'import torch\n'), ((14184, 14220), 'torch.transpose', 'torch.transpose', (['bottom_hidden', '(1)', '(2)'], {}), '(bottom_hidden, 1, 2)\n', (14199, 14220), False, 'import torch\n'), ((14534, 14569), 'torch.transpose', 'torch.transpose', (['upper_hidden', '(1)', '(2)'], {}), '(upper_hidden, 1, 2)\n', (14549, 14569), False, 'import torch\n'), ((14751, 14795), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.hidden_size'], {}), '(1, batch_size, self.hidden_size)\n', (14762, 14795), False, 'import torch\n'), ((14833, 14893), 'torch.zeros', 'torch.zeros', (['(self.n_layers - 1)', 'batch_size', 'self.hidden_size'], {}), '(self.n_layers - 1, batch_size, self.hidden_size)\n', (14844, 14893), False, 'import torch\n'), ((15033, 15090), 'torch.zeros', 'torch.zeros', (['(1)', '(batch_size * beam_width)', 'self.hidden_size'], {}), '(1, batch_size * beam_width, self.hidden_size)\n', (15044, 15090), False, 'import torch\n'), ((15126, 15199), 'torch.zeros', 'torch.zeros', (['(self.n_layers - 1)', '(batch_size * beam_width)', 'self.hidden_size'], {}), '(self.n_layers - 1, batch_size * beam_width, self.hidden_size)\n', (15137, 15199), False, 'import torch\n'), ((15980, 16024), 'torch.LongTensor', 'torch.LongTensor', (['([self.sos_id] * batch_size)'], {}), '([self.sos_id] * batch_size)\n', (15996, 16024), False, 'import torch\n'), ((10599, 10649), 'torch.stack', 'torch.stack', (['[b.current_predictions for b in beam]'], {}), '([b.current_predictions for b in beam])\n', (10610, 10649), False, 'import torch\n'), ((11486, 11539), 'torch.tensor', 'torch.tensor', (['select_indices_array'], {'dtype': 'torch.int64'}), '(select_indices_array, dtype=torch.int64)\n', (11498, 11539), False, 'import torch\n')] |
#!/usr/bin/python3
from binance.exceptions import BinanceAPIException, BinanceRequestException
from pyti.smoothed_moving_average import smoothed_moving_average as sma
from pyti.bollinger_bands import upper_bollinger_band as ubb # Examples of indicators/strategies
from pyti.bollinger_bands import lower_bollinger_band as lbb # Examples of indicators/strategies
from decimal import Decimal as D, ROUND_DOWN, ROUND_UP
from itertools import tee, islice, chain
from datetime import time, datetime
from binance.client import Client
from plotly.offline import plot
import plotly.graph_objs as go
from binance.enums import *
import pandas_ta as ta # Examples of indicators/strategies
from time import sleep
import pandas as pd
import numpy as np
import traceback
import datetime
import decimal
import time
import bikeys
log = open("BinanceMT.txt", "w")
loline = '____________________________________________________________________'
sleepsec = 2.4
trend_count = 0
client = Client(api_key=bikeys.Pass, api_secret=bikeys.Sec)
global trend
pairs = ['BTCUSDT', 'BCHABCUSDT', 'LTCUSDT', 'ETHUSDT', 'ETCUSDT', 'DASHUSDT',
'EOSUSDT', 'LINKUSDT', 'BNBUSDT', 'ZILUSDT', 'VETUSDT', 'ADAUSDT', 'XRPUSDT',
'RVNUSDT']
def Trend(pair):
global trend_count
global pairsmas
altc = pair[:-4]
ticker = client.get_symbol_ticker(symbol=pair)
price = ticker['price']
print('\n')
print(f'Start________Gathering Trend for {altc}______Price:{float(price)}______\n')
candle_no = 480
interval = Client.KLINE_INTERVAL_2HOUR
candles = client.get_klines(symbol=pair, interval=interval, limit=candle_no)
df = pd.DataFrame(data=candles)
# New lists of data
open = df.iloc[:,1].astype(float)
high = df.iloc[:,2].astype(float)
low = df.iloc[:,3].astype(float)
close = df.iloc[:,4].astype(float)
volume = df.iloc[:,5].astype(float)
no_ofTrades = df.iloc[0:100,[8]] #This returns as an integer from the API and the last value is incomplete per interval as is ongoing
# Removes the columns to not use here:
# df.pop(0) # Open time
df.pop(6) # Close time
df.pop(7) # Quote asset volume
df.pop(9) # Taker buy base asset volume
df.pop(10) # Taker buy quote asset volume
df.pop(11) # Can be ignored
df.columns = ['Time','Open','High','Low','Close','Volume', 'Trades'] #Titles the colms
df['Time'] = pd.to_datetime(df['Time'] * 1000000, infer_datetime_format=True)
# Calculates Smoothed moving avgs
fastsma = sma(close,14)
pairsmas = sma(close,30)
slowsma = sma(close, 50)
#
fastsma = float(D("{0:.5f}".format(fastsma[-1])))
pairsma = float(D("{0:.5f}".format(pairsmas[-1])))
slowsma = float(D("{0:.5f}".format(slowsma[-1])))
volma = sma(volume,11)
# print(volma)
vol = volma[-2]
lvol = volma[-1]
candle_no = candle_no-1
avg_vol = (sum(volma) - lvol) / candle_no
vol_perc = vol / avg_vol
vol_perc_txt = D("{0:.3f}".format(vol_perc))
print(f'The volume avg is: {vol_perc_txt} times the norm. Approx ~ {vol_perc_txt*100}%')
# Gets details from other Columns Lows, Highs, No of trades:
late_no_trades = df.Trades.iat[-1]
trades = float(df.Trades.iat[-2])
avg_trades = ((no_ofTrades.sum()) - late_no_trades) /candle_no
avg_trades = float("{0:.4f}".format(avg_trades[8]))
# print(f'Number of trades is in the prev. 5mins is {late_no_trades} and average is {avg_trades} in 90 mins')
trend_lowest = min(low)
# p_lowest = D("{0:.8f}".format(lowest))
# print(f'The lowest candle for {altc} is {p_lowest}')
trend_highest = max(high)
# Calculates Volume Weighted Avg Price:
vwap = float(sum(pairsmas))*float(vol)/sum(volma)
vwap_dec = D("{0:.4f}".format(vwap))
vwap_ratio = (vwap/float(price))*100
vwap_ratio = D("{0:.2f}".format(vwap_ratio))
if vwap_ratio > 100:
oversold = True
else:
oversold = False
trend = 'SIDEWYS'
print(f' the VWAP is : {vwap_dec} and that is {vwap_ratio}% of the price')
late_close = close[99] #The close (interval mins/hrs ago) != price
if float(price) > fastsma and fastsma > pairsma and pairsma > slowsma:
print(f'Classic TREND UP for {altc}')
trend = 'UP'
elif float(price) > fastsma and fastsma > pairsma:
if oversold is False and float(price) > slowsma:
trend = 'UP'
# __________________________________________________________________
if slowsma > pairsma and pairsma > fastsma and fastsma > float(price):
print(f'Classic TREND DWN for {altc}')
trend = 'DWN'
elif pairsma > fastsma and fastsma > float(price):
if float(price) < slowsma and oversold:
trend = 'DWN'
if trend == 'SIDEWYS':
print(f'Trend is Sideways for {altc} on 2h, going for 4h chart')
if trend_count < 1:
interval = Client.KLINE_INTERVAL_4HOUR
trend_count +=1
Trend(pair)
else:
trend = 'SIDEWYS'
print(f'Trend is Sidewys for {altc} on Daily charts')
return trend
def Strategy(pair): # Gets precise Precise data and act from it.
global df
global altc
global price
global profit
global long
global short
global up_bb
global low_bb
global tme_critical
altc = pair[:-4]
ticker = client.get_symbol_ticker(symbol=pair)
price = ticker['price']
# Pivots the daily candles in case your strategy requires daily a pivot:
utc = datetime.datetime.utcnow() # time now
mid_utc = utc.replace(hour=0, minute=0, second=0, microsecond=0)
mins_utc = int((mid_utc-utc).total_seconds() / 60.0)*-1 # time in minutes from UTC
candls_utc = int(mins_utc/5)
interval = Client.KLINE_INTERVAL_5MINUTE
if candls_utc < 24:
if mins_utc < 20:
mins_utc = 20
candls_utc = int(mins_utc/3)
interval = Client.KLINE_INTERVAL_3MINUTE
daily_factor = 0.40
if candls_utc > 118:
daily_factor = candls_utc/296
candle_no = candls_utc
m_one = int(candle_no-1)
print('\n')
print(f'Start________Gathering Strategy for {altc}__________Trend:{trend}______\n')
print(f'From utc--- {candls_utc} :{interval} Candles')
candles = client.get_klines(symbol=pair, interval=interval, limit=candle_no)
df = pd.DataFrame(data=candles)
# New lists of data
open = df.iloc[:,1].astype(float)
high = df.iloc[:,2].astype(float)
low = df.iloc[:,3].astype(float)
close = df.iloc[:,4].astype(float)
volume = df.iloc[:,5].astype(float)
no_ofTrades = df.iloc[0:100,[8]] #This returns as an integer from the API and the last value is incomplete per interval as is ongoing
# Removes the columns to not use here:
# df.pop(0) # Open time
df.pop(6) # Close time
df.pop(7) # Quote asset volume
df.pop(9) # Taker buy base asset volume
df.pop(10) # Taker buy quote asset volume
df.pop(11) # Can be ignored
df.columns = ['time','open','high','low','close','volume','trades'] #Titles the colms
df['time'] = pd.to_datetime(df['time'] * 1000000, infer_datetime_format=True)
open = np.array(open)
l_open = float(open[-1])
if candls_utc < 7:
fastsma = sma(close, int(candls_utc))
else:
fastsma = sma(close, 7)
fastsma = float(fastsma[-1])
fiftysma = sma(close, 50)
fiftysma = float(fiftysma[-1])
highest = max(high)
avg_high = float(sum(high)+highest/int(len(high)+1))
lowest = min(low)
avg_low = float(sum(low)+lowest/int(len(low)+1))
up_bb = ubb(close, 7, 3.0)
lup_bb = up_bb[-1]
low_bb = lbb(close, 7, 3.0)
llow_bb = low_bb[-1]
print(f'The current Upper BB value: {lup_bb}')
print(f'The current Lower BB value: {llow_bb}')
diff = (float(lup_bb) - float(llow_bb))
profit = float(diff/float(price)) + 1
print(f'\n The trading profit for {altc} is potentially {profit} or {profit*float(price)}')
price = float(price)
print(loline)
long = False
short = False
tme_critical = False
if profit > 1.007 and profit < 1.033: #Lateral
scale = profit*0.009
profit = 1.0116 + scale
if price <= float(llow_bb)*0.9984 and l_open < fastsma:
tme_critical = True
if price <= float(llow_bb)*1.0033 and float(llow_bb) < fastsma and fastsma*1.002 < lvwap:
if trend == 'UP' or trend == 'SIDEWYS':
long = True
print(f'\n Very cheap state vs VWAP and smma,. looking to long {altc} for a {profit} prof')
else:
print('Almost there')
if price >= float(lup_bb)*1.0016 and l_open > fastsma:
tme_critical = True
if price > float(lup_bb)*0.9967 and float(lup_bb) > fastsma and fastsma*0.998 > lvwap:
if trend == 'DWN' or trend == 'SIDEWYS':
short = True
print(f'\n Price is a very high vs VWAP and smma,. looking to short {altc} for a {profit} prof')
else:
print('Almost there')
elif profit >= 1.033: #Pumping
if price >= fiftysma and l_open > fastsma:
tme_critical = True
if price >= avg_high:
if trend == 'UP' or trend == 'SIDEWYS': # If trend is UP or SIDEWYS
long = True
print(f'\n Pumping but cheap state vs VWAP and smma,. looking to long {altc} for a {profit} prof')
else:
print('Almost there')
if price <= fiftysma and l_open < fastsma:
tme_critical = True
if price <= avg_low:
if trend == 'DWN' or trend == 'SIDEWYS': # If trend is DWN or SIDEWYS
short = True
print(f'\n Price is falling vs VWAP and smma,. looking to short {altc} for a {profit} prof')
else:
print('Almost there')
else:
print(f'\n Not there yet')
return long
return short
return tme_critical
def OpenOrder(price):
global noLongPosition
global noShortPosition
altc = pair[:-4]
print(f'Checking open order on {altc}')
open_order = client.get_open_margin_orders(symbol= pair)
has_data = float(len(open_order))
noShortPosition= True
noLongPosition = True
if has_data > 0:
for i in range(len(open_order)):
orig_quant = float(open_order[i]['origQty'])
# exec_quant = float(open_order[i]['executedQty'])
orderId = int(open_order[i]['orderId'])
type = str((open_order[i]['type']))
side = str((open_order[i]['side']))
takeprofit = str((open_order[i]['price']))
takeprof = float(takeprofit)
time = pd.to_datetime(float(open_order[i]['price']), infer_datetime_format=True)
print(f'!!!!!\n ___Order of {orig_quant} units of {altc} at price of {takeprof} time{time}!!\n ')
if side == 'SELL': # -----------------------This is a long position
print(open_order)
print('\n There s an open TP Sell order here,.. \n')
noLongPosition = False
if price >= lup_vwap_b and float(price/takeprof) >= 0.9916: #Price is up + bad position
info = client.get_symbol_info(symbol=pair)
price_filter = float(info['filters'][0]['tickSize'])
ticker = client.get_symbol_ticker(symbol=pair)
price = float(ticker['price'])
price = D.from_float(price).quantize(D(str(price_filter)))
minimum = float(info['filters'][2]['minQty']) # 'minQty'
quant = D.from_float(orig_quant).quantize(D(str(minimum)))
result = client.cancel_margin_order(symbol= pair, orderId= orderId)
print('Price is up now + bad position!, Order cancelled')
try:
order = client.create_margin_order(symbol=pair,
side=SIDE_SELL,
type=ORDER_TYPE_MARKET,
quantity= quant)
print(f'Market sold {pair}')
except Exception as e:
traceback.print_exc(file=log)
print(e)
try:
sleep(2)
order = client.create_margin_order(
symbol=pair,
side=SIDE_SELL,
type=ORDER_TYPE_MARKET,
quantity=quant)
print(f'Market sold {pair}')
except Exception as e:
traceback.print_exc(file=log)
print(e)
RepayUSD()
noLongPosition = True
print(f'Sell order for {altc} cleared')
elif trend == 'DWN' or float(price/takeprof) >= 0.9916:
info = client.get_symbol_info(symbol=pair)
price_filter = float(info['filters'][0]['tickSize'])
ticker = client.get_symbol_ticker(symbol=pair)
price = float(ticker['price'])
price = D.from_float(price).quantize(D(str(price_filter)))
minimum = float(info['filters'][2]['minQty']) # 'minQty'
quant = D.from_float(orig_quant).quantize(D(str(minimum)))
result = client.cancel_margin_order(symbol= pair, orderId= orderId)
print('Price is up now + bad position!, Order cancelled')
try:
order = client.create_margin_order(symbol=pair,
side=SIDE_SELL,
type=ORDER_TYPE_MARKET,
quantity= quant)
print(f'Market sold {pair}')
except Exception as e:
traceback.print_exc(file=log)
print(e)
try:
sleep(2)
order = client.create_margin_order(
symbol=pair,
side=SIDE_SELL,
type=ORDER_TYPE_MARKET,
quantity=quant)
print(f'Market sold {pair}')
except Exception as e:
traceback.print_exc(file=log)
print(e)
RepayUSD()
noLongPosition = True
print(f'Sell order for {altc} cleared')
else:
print(f'Sell order for {altc} stays')
return noLongPosition
if side == 'BUY': # -----------------------This is a Short position
print(open_order)
print('\n There is an open TP Buy lower order here already \n')
noShortPosition = False
if price <= llow_vwap_b and float(price/takeprof) <= 1.0084: #price dwn + bad position
info = client.get_symbol_info(symbol=pair)
price_filter = float(info['filters'][0]['tickSize'])
ticker = client.get_symbol_ticker(symbol=pair)
price = float(ticker['price'])
price = D.from_float(price).quantize(D(str(price_filter)))
minimum = float(info['filters'][2]['minQty']) # 'minQty'
quant = D.from_float(orig_quant).quantize(D(str(minimum)), rounding=ROUND_UP)
result = client.cancel_margin_order(symbol= pair, orderId= orderId)
print('This is a loosing position, StopLoss: Order cancelled')
try:
order = client.create_margin_order(symbol=pair,
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity= quant)
print(f'Market bought {pair} to repay')
except Exception as e:
traceback.print_exc(file=log)
print(e)
try:
order = client.create_margin_order(
symbol=pair,
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity=quant)
print(f'Market bought {pair} to repay')
except Exception as e:
traceback.print_exc(file=log)
print(e)
RepayAltc()
noShortPosition= True
print(f'Buy order for {altc} cleared')
elif trend == 'UP' or float(price/takeprof) <= 1.0084:
info = client.get_symbol_info(symbol=pair)
price_filter = float(info['filters'][0]['tickSize'])
ticker = client.get_symbol_ticker(symbol=pair)
price = float(ticker['price'])
price = D.from_float(price).quantize(D(str(price_filter)))
minimum = float(info['filters'][2]['minQty']) # 'minQty'
quant = D.from_float(orig_quant).quantize(D(str(minimum)), rounding=ROUND_UP)
result = client.cancel_margin_order(symbol= pair, orderId= orderId)
print('This is a loosing position, StopLoss: Order cancelled')
try:
order = client.create_margin_order(symbol=pair,
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity= quant)
print(f'Market bought {pair} to repay')
except Exception as e:
traceback.print_exc(file=log)
print(e)
try:
order = client.create_margin_order(
symbol=pair,
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity=quant)
print(f'Market bought {pair} to repay')
except Exception as e:
traceback.print_exc(file=log)
print(e)
RepayAltc()
noShortPosition= True
print(f'Buy order for {altc} cleared')
else:
print(f'Buy order for {altc} stays for now')
return noShortPosition
else:
print(f'There are no open orders for {altc}')
def RepayUSD():
print(f'^ Checking free balances on USDT')
info = client.get_symbol_info(symbol='ADAUSDT')
minimum = float(info['filters'][2]['minQty']) # 'minQty'
dict_balanc = client.get_margin_account()
balances = (dict_balanc['userAssets'])
for i in balances:
if str('USDT') == i['asset'] and float(i['free']) > 0.00001 and float(i['borrowed']) > 10:
loaned = float(i['borrowed'])
quant = float(i['free'])
print(f'There are {quant} USD free, waiting')
quant1 = D.from_float(quant).quantize(D(str(minimum)), rounding=ROUND_DOWN)
print(f'The balance of USDT wallet is {quant1}')
sleep(5)
dict_balanc = client.get_margin_account()
balances = (dict_balanc['userAssets'])
for i in balances:
if str('USDT') == i['asset'] and float(i['free']) > 0.00001:
quant2 = float(i['free'])
print(f'There are {quant2} USD free, comparing')
quant2 = D.from_float(quant2).quantize(D(str(minimum)), rounding=ROUND_DOWN)
if float(quant) > 10 and quant1 == quant2 and loaned > 10:
print(f'Checking USDT for a repay of the free amount')
try:
quant = D.from_float(quant).quantize(D(str(minimum)))
repay = client.repay_margin_loan(asset='USDT', amount= quant)
print(f'Repayed the collateral for {pair} 1st try')
except Exception as e:
traceback.print_exc(file=log)
print(e)
try:
repay = client.repay_margin_loan(asset='USDT', amount= quant)
print(f'Repayed the collateral for {pair} 2nd try')
except Exception as e:
traceback.print_exc(file=log)
print(e)
if float(quant) > 10 and quant1 == quant2 and float(quant) > loaned:
print(f'Checking USDT for a repay of the free amount')
try:
loaned = D.from_float(loaned).quantize(D(str(minimum)), rounding=ROUND_DOWN)
repay = client.repay_margin_loan(asset='USDT', amount= loaned)
print(f'Repayed the collateral for USDT 1st try')
except Exception as e:
traceback.print_exc(file=log)
print(e)
if float(quant) > 10 and loaned > 10:
repay = client.repay_margin_loan(asset='USDT', amount= quant)
print(f'Repayed the collateral for USDT 1st try')
elif str('USDT') == i['asset'] and float(i['borrowed']) < 10:
print('No borrowed amount')
def RepayAltc():
print(f'^ Checking free balances on {altc}')
ticker = client.get_symbol_ticker(symbol=pair)
price = ticker['price']
info = client.get_symbol_info(symbol=pair)
minimum = float(info['filters'][2]['minQty']) # 'minQty'
dict_balanc = client.get_margin_account()
balances = (dict_balanc['userAssets'])
for i in balances:
if str(altc) == i['asset']:
asset = i
for key, value in asset.items():
if key == 'free' and float(value) >= 0.00001:
quant = float(value)
if key == 'borrowed' and float(value) >= 10.1/float(price):
loan = float(value)
print(f'There are {quant} {altc} free, waiting')
quant1 = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
print(f'The balance of {altc} wallet is {quant1}')
sleep(3)
try:
quant = D.from_float(quant).quantize(D(str(minimum)))
repay = client.repay_margin_loan(asset=altc, amount= quant)
print(f'Repayed the {altc} debt')
except Exception as e:
traceback.print_exc(file=log)
print(traceback.format_exc())
sleep(3.3)
try:
loaned = quant - loaned
loaned = D.from_float(loaned).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
order = client.create_margin_order(
symbol= pair,
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity=loaned)
print(f'Market bought {altc} to repay borrowed debt')
sleep(16)
repay = client.repay_margin_loan(asset=altc, amount= quant)
print(f'Repayed the {altc} debt')
except Exception as e:
traceback.print_exc(file=log)
print(traceback.format_exc())
try:
loaned = D.from_float(loan).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
order = client.create_margin_order(
symbol= pair,
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity=loaned)
print(f'Market bought {altc} to repay borrowed debt')
sleep(16)
repay = client.repay_margin_loan(asset=altc, amount= quant)
print(f'Market bought {altc} to repay borrowed debt')
except Exception as e:
traceback.print_exc(file=log)
print(traceback.format_exc())
try:
loaned = D.from_float(loan).quantize(D(str(minimum)))
order = client.create_margin_order(
symbol= pair,
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity=loaned)
print(f'Market bought {altc} to repay borrowed debt')
sleep(16)
repay = client.repay_margin_loan(asset=altc, amount= dollars)
print(f'Market bought {altc} to repay borrowed debt')
except Exception as e:
traceback.print_exc(file=log)
print(traceback.format_exc())
print(e)
elif key == 'borrowed' and float(value) >= minimum:
loaned = float(value)
if key == 'free' and float(value) > float(loaned):
free = float(value)
loaned = D.from_float(free).quantize(D(str(minimum)))
if free >= 0.0001:
try:
print(f'there is {free} amount of {altc} here')
repay = client.repay_margin_loan(asset=altc, amount= loaned)
print(f'Repayed the {altc} debt in the 1st try')
except Exception as e:
print(f'there is {free} amount of {altc} here')
traceback.print_exc(file=log)
print(traceback.format_exc())
print(e)
def Long(pair):
try:
print(loline)
ticker = client.get_symbol_ticker(symbol=pair)
price = ticker['price']
price = float(price)
price = float("{0:.5f}".format(price))
max_loan = client.get_max_margin_loan(asset='USDT') # Whats the max margin I get?
max_loan = float(max_loan['amount'])
loan = max_loan/6
loan = float(loan)
loan = float("{0:.5f}".format(loan))
print(f' the loan amnt is {loan} out of the max of: {max_loan}')
if max_loan >= 130 and profit > 1.00933:
transaction = client.create_margin_loan(asset='USDT', amount=loan) # Borrows longing asset prepares to Buy> Sale Higher > Repay USDT
print(transaction)
asset = 'USDT'
info = client.get_symbol_info(symbol=pair)
minimum = float(info['filters'][2]['minQty']) # 'minQty'
price_filter = float(info['filters'][0]['tickSize'])
price = D.from_float(price).quantize(D(str(price_filter)))
quant = loan/float(price)
quant = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
try:
print(f'Borrowed USDT and Market buying {altc}')
order = client.create_margin_order(
symbol= pair,
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity=quant)
sleep(21)
print(f'Borrowed USDT and Market bought {altc}')
except Exception as e:
traceback.print_exc(file=log)
print(e)
try:
price = float(client.get_orderbook_ticker(symbol=str(pair))['askPrice'])
price = D.from_float(price).quantize(D(str(price_filter)))
print('failed to market buy, going for limit on ask price')
order = client.create_margin_order(
symbol= pair,
side=SIDE_BUY,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=quant,
price=price)
print(f'Borrowed USDT and bought {altc}, waiting on order to go trw')
sleep(16)
except Exception as e:
dict_balanc = client.get_margin_account()
balances = (dict_balanc['userAssets'])
for i in balances:
if str(altc) == i['asset'] and float(i['free']) > 0.00:
quant = float(i['free'])
print(quant)
quant = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
print(f'The balance of {altc} wallet is {quant}')
order = client.create_margin_order(
symbol= pair,
side=SIDE_BUY,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=quant,
price=price)
print(f'Borrowed USDT and bought {altc}, waiting on order to go trw')
traceback.print_exc(file=log)
print(e)
try:
print(f'Attempting TP planned at {profit} parts of {price} for {altc} !****')
price = float(price)
dict_balanc = client.get_margin_account()
balances = (dict_balanc['userAssets'])
for i in balances:
if str(altc) == i['asset'] and float(i['free']) > 0.00000001:
quant = float(i['free'])
print(quant)
quant = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
print(f'The balance of {altc}s wallet is {quant}')
profitL = float(profit-1)
profitLong = profitL + 1
price = price*profitLong
price_filter = float(info['filters'][0]['tickSize'])
price = D.from_float(price).quantize(D(str(price_filter)))
order = client.create_margin_order(
symbol= pair,
side=SIDE_SELL,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=quant,
price=price)
print(f' *******Limit SELL order made: {quant} of {altc} * @ * {price} *****')
print(f'Borrowed USDT and bought {altc}, set TP at {price} for {altc}')
Plot(pair, profit)
except Exception as e:
traceback.print_exc(file=log)
print(e)
sleep(16)
print(f'Error with TP trying again')
try:
quant = (float(quant)/float(price))*0.9925 #Lesser amount left after fees
quant = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
order = client.create_margin_order(
symbol= pair,
side=SIDE_SELL,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=quant,
price=price)
print(f' *******Limit SELL order made: {quant} of {altc} * @ * {price} *****')
print(f'Borrowed USDT and bought {altc}, set TP at {price} for {altc}')
Plot(pair, profit)
except Exception as e:
traceback.print_exc(file=log)
print(e)
print(f'Error with TP trying again:')
try:
quant = float(quant)*0.9925 #amount before fees
quant = D.from_float(quant).quantize(D(str(minimum)))
order = client.create_margin_order(
symbol= pair,
side=SIDE_SELL,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=quant,
price=price)
print(f' *******Limit SELL order made: {quant} of {altc} * @ * {price} *****')
print(f'Borrowed USDT and bought {altc}, set TP at {price} for {altc}')
Plot(pair, profit)
except Exception as e:
traceback.print_exc(file=log)
print(e)
try:
quant = float(quant)*0.9925 #amount before fees
quant = D.from_float(quant).quantize(D(str(minimum)))
order = client.create_margin_order(
symbol= pair,
side=SIDE_SELL,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=quant,
price=price)
print(f' *******Limit SELL order made: {quant} of {altc} * @ * {price} *****')
print(f'Borrowed USDT and bought {altc}, set TP at {price} for {altc}')
Plot(pair, profit)
except Exception as e:
traceback.print_exc(file=log)
print(e)
else:
print('******** Not enough margin left, or profit opportunity is low *****')
sleep(sleepsec)
except Exception as e:
traceback.print_exc(file=log)
print(e)
sleep(1)
def Short(pair):
try:
print(loline)
ticker = client.get_symbol_ticker(symbol=pair)
price = ticker['price']
price = float(price)
price = float("{0:.5f}".format(price))
max_loan = client.get_max_margin_loan(asset=altc) # Whats the max margin I get?
max_loan = float(max_loan['amount'])
loan = max_loan/6
loan = float(loan)
loan = float("{0:.5f}".format(loan))
print(f' the loan amnt is {loan} out of the max of: {max_loan}')
if max_loan >= 130/price and float(profit) > 1.00933:
transaction = client.create_margin_loan(asset=altc, amount=loan) # Borrows shorting asset prepares to SELL> Rebuy lower > Repay altc
print(transaction)
asset = altc
info = client.get_symbol_info(symbol=pair)
price_filter = float(info['filters'][0]['tickSize'])
price = D.from_float(price).quantize(D(str(price_filter)))
quant = loan
minimum = float(info['filters'][2]['minQty']) # 'minQty'
quant = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
try:
print(f'Borrowing {altc} and market selling to short')
order = client.create_margin_order(
symbol= pair,
side=SIDE_SELL,
type=ORDER_TYPE_MARKET,
quantity=quant)
sleep(20)
print(f'Borrowed {altc} and market sold FOR USDT')
except Exception as e:
print(f'Error with Order trying again ***************')
traceback.print_exc(file=log)
print(e)
try:
price_filter = float(info['filters'][0]['tickSize'])
price = float(client.get_orderbook_ticker(symbol=str(pair))['bidPrice'])
price = D.from_float(price).quantize(D(str(price_filter)))
order = client.create_margin_order(
symbol= pair,
side=SIDE_SELL,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=quant,
price=price)
print(f'Borrowed {altc} and set a buy for bidPrice {price}, waiting on order to go trw')
sleep(21)
except Exception as e:
try:
print(f'Error with {altc} sell, market selling to short')
order = client.create_margin_order(
symbol= pair,
side=SIDE_SELL,
type=ORDER_TYPE_MARKET,
quantity=quant)
sleep(16)
print(f'Sold {altc} at market FOR USDT')
except Exception as e:
try:
info = client.get_symbol_info(symbol=pair)
price_filter = float(info['filters'][0]['tickSize'])
price = D.from_float(price).quantize(D(price_filter))
minimum = float(info['filters'][2]['minQty']) # 'minQty'
quant = loan
dict_balanc = client.get_margin_account()
balances = (dict_balanc['userAssets'])
for i in balances:
if str(altc) == i['asset'] and float(i['free']) > 0.00:
quant1 = float(i['free'])
print(f'There are {quant1} {altc} free')
sleep(2)
print(f'The balance of {altc} wallet is {quant}')
quant = D.from_float(quant1).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
order = client.create_margin_order(
symbol= pair,
side=SIDE_SELL,
type=ORDER_TYPE_MARKET,
quantity=quant)
except BinanceAPIException as e:
traceback.print_exc(file=log)
try:
info = client.get_symbol_info(symbol=pair)
price_filter = float(info['filters'][0]['tickSize'])
price = D.from_float(price).quantize(D(str(price_filter)))
minimum = float(info['filters'][2]['minQty']) # 'minQty'
dict_balanc = client.get_margin_account()
balances = (dict_balanc['userAssets'])
for i in balances:
if str(altc) == i['asset'] and float(i['free']) > 0.00:
quant = float(i['free'])
print(quant)
quant = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
print(f'The balance of {altc} wallet is {quant}')
order = client.create_margin_order(
symbol= pair,
side=SIDE_SELL,
type=ORDER_TYPE_MARKET,
quantity=quant)
print(f'Sold {altc} at market for ~{quant*price} USDT')
except BinanceAPIException as e:
traceback.print_exc(file=log)
print(f'***** Error with the order reverted and repyaing margin ************!!!!')
order = client.create_margin_order(
symbol= pair,
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity=quant)
sleep(16)
print(f' Bought {altc} back, repyaing {altc},..')
quant = float(quant)*0.9995
quant = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
repay = client.repay_margin_loan(asset=altc, amount= quant)
print(f'Margin of {altc} of {quant} repayed')
print(e)
profitS = float(profit-1)
profitShort = profitS+1
short_prof = 2-profitShort
info = client.get_symbol_info(symbol=pair)
price = float(price)*float(short_prof)
price_filter = float(info['filters'][0]['tickSize'])
price = D.from_float(price).quantize(D(str(price_filter)))
minimum = float(info['filters'][2]['minQty']) # 'minQty'
quant = loan #*0.9995?
minimum = float(info['filters'][2]['minQty']) # 'minQty'
quant = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
dict_balanc = client.get_margin_account()
balances = (dict_balanc['userAssets'])
for i in balances:
if str('USDT') == i['asset'] and float(i['free']) > 0.00:
quant1 = float(i['free'])
print(quant)
quant1 = D.from_float(quant1).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
print(f'The balance of USDT wallet is {quant1}')
print(f'TP planned at {2-profit} parts of {quant} at price of: {price} for {altc}')
try:
order = client.create_margin_order(
symbol= pair,
side=SIDE_BUY,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=quant,
price=price)
print(f' *******Limit BUY order made: {quant} of {altc} * @ * {price} *****')
print(f'Borrowed USDT and bought {altc}, setting TP at {profit}at: {price} for {altc}')
ShortPlot(pair, profit)
except Exception as e:
traceback.print_exc(file=log)
try:
quant = float(quant)*0.9995
quant = D.from_float(quant).quantize(D(str(minimum)), rounding= decimal.ROUND_DOWN)
order = client.create_margin_order(
symbol= pair,
side=SIDE_BUY,
type=ORDER_TYPE_LIMIT,
timeInForce=TIME_IN_FORCE_GTC,
quantity=quant,
price=price)
print(f' *******Limit BUY order made: {quant} of {altc} * @ * {price} *****')
print(f'Borrowed USDT and bought {altc}, setting TP at {profit}at: {price} for {altc}')
ShortPlot(pair, profit)
except Exception as e:
traceback.print_exc(file=log)
print(e)
else:
print('******** Not enough margin left, or profit opportunity *****')
except Exception as e:
traceback.print_exc(file=log)
print(e)
def ShortPlot(pair, profit):
buy_signals = []
try:
for item, prce in zip (pairsmas, close):
if item >= 1.0055*prce:
buy_signals.append([df['time'][i], close[prce]])
if item == item[-1]:
buy_signals.append([df['time'][i], close[prce]])
except Exception as e:
print(e)
pass
# Amount target to be gained from Buy2sell:
profit = profit-2 # BTC -s fees = 0.0015*quant traded
profit = profit*-1
# Stop loss target for Stop out sell limit orders
stop_out = float(1.06) # 0.06 loss At Sell TP
# plot candlestick chart
def Ploting(df, pairsmas, up_bb, low_bb, sell_signals):
candle = go.Candlestick(
x = df['time'],
open = df['open'],
close = df['close'],
high = df['high'],
low = df['low'],
name = str(altc))
# plot MAs
ssma = go.Scatter(
x = df['time'],
y = pairsmas,
name = "SMA",
line = dict(color = ('rgba(102, 207, 255, 50)'), width = 1))
upbb = go.Scatter(
x = df['time'],
y = up_bb,
name = "Upper BB",
line = dict(color = ('rgba(202, 107, 255)'),dash = 'solid',
shape = 'spline',
smoothing = 1,
width = 2))
lwbb = go.Scatter(
x = df['time'],
y = low_bb,
name = "Lower BB",
line = dict(color = ('rgba(202, 107, 255)'),dash = 'solid',
shape = 'spline',
smoothing = 1,
width = 2))
shorts = go.Scatter(
x = [item[0] for item in sell_signals],
y = [item[1] for item in sell_signals],
name = "Short Signals",
mode = "markers",
)
buyTPs = go.Scatter(
x = [item[0] for item in sell_signals],
y = [item[1]*profit for item in sell_signals],
name = "TP Point",
mode = "markers",
)
stops = go.Scatter(
x = [item[0] for item in sell_signals],
y = [item[1]*stop_out for item in sell_signals],
name = "Stops",
mode = "markers",
)
data = go.Data([candle, ssma, upbb, lwbb, shorts ,buyTPs, stops])
# style and display
layout = go.Layout(title = f'{pair}_{price}_Shorts 5m')
fig = go.Figure(data = data, layout = layout)
plot(fig, filename = str(f'{pair}_5m' + '.html'))
# Ploting(df,pairsmas, up_bb, low_bb, sell_signals)
print('Sleeping 25secs')
sleep(1)
return Ploting(df,pairsmas, up_bb, low_bb, sell_signals)
def Plot(pair, profit):
buy_signals = []
try:
for item, prce in zip (pairsmas, close):
if item <= 0.9945*prce:
buy_signals.append([df['time'][i], close[prce]])
if item == item[-1]:
buy_signals.append([df['time'][i], close[prce]])
except Exception as e:
print(e)
pass
# Amount target to be gained from Buy2sell:
profit = profit # BTC -s fees = 0.0015*quant traded
# Stop loss target for Stop out sell limit orders
stop_out = float(0.94) # 0.05 loss At Sell TP
# plot candlestick chart
def Ploting(df, pairsmas, up_bb, low_bb, buy_signals):
candle = go.Candlestick(
x = df['time'],
open = df['open'],
close = df['close'],
high = df['high'],
low = df['low'],
name = str(altc))
# plot MAs
ssma = go.Scatter(
x = df['time'],
y = df['VWAP'],
name = "VWAP",
line = dict(color = ('rgba(102, 207, 255, 50)'), width = 1))
upbb = go.Scatter(
x = df['time'],
y = up_bb,
name = "Upper BB",
line = dict(color = ('rgba(202, 107, 255)'),dash = 'solid',
shape = 'spline',
smoothing = 1,
width = 2))
lwbb = go.Scatter(
x = df['time'],
y = low_bb,
name = "Lower BB",
line = dict(color = ('rgba(202, 107, 255)'),dash = 'solid',
shape = 'spline',
smoothing = 1,
width = 2))
buys = go.Scatter(
x = [item[0] for item in buy_signals],
y = [item[1] for item in buy_signals],
name = "Buy Signals",
mode = "markers",
)
sells = go.Scatter(
x = [item[0] for item in buy_signals],
y = [item[1]*profit for item in buy_signals],
name = "Sell Signals",
mode = "markers",
)
stops = go.Scatter(
x = [item[0] for item in buy_signals],
y = [item[1]*stop_out for item in buy_signals],
name = "Stop Signals",
mode = "markers",
)
data = go.Data([candle, ssma, upbb, lwbb, buys ,sells, stops])
# style and display
layout = go.Layout(title = f'{pair}_{price}_ 5m')
fig = go.Figure(data = data, layout = layout)
plot(fig, filename = str(f'{pair}_5m' + '.html'))
try:
while True:
try:
for pair in pairs:
trend = Trend(pair)
Strategy(pair)
OpenOrder(price)
if long:
if noLongPosition:
Long(pair)
elif short:
if noShortPosition:
Short(pair)
elif not tme_critical:
print(f'Taking our time')
RepayAltc()
RepayUSD()
sleep(sleepsec)
except Exception as e:
print(traceback.format_exc())
traceback.print_exc(file=log)
print(e)
pass
except KeyboardInterrupt:
pass
| [
"binance.client.Client",
"traceback.format_exc",
"pyti.bollinger_bands.upper_bollinger_band",
"datetime.datetime.utcnow",
"plotly.graph_objs.Data",
"decimal.Decimal",
"pyti.smoothed_moving_average.smoothed_moving_average",
"decimal.Decimal.from_float",
"plotly.graph_objs.Scatter",
"time.sleep",
... | [((993, 1043), 'binance.client.Client', 'Client', ([], {'api_key': 'bikeys.Pass', 'api_secret': 'bikeys.Sec'}), '(api_key=bikeys.Pass, api_secret=bikeys.Sec)\n', (999, 1043), False, 'from binance.client import Client\n'), ((1663, 1689), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'candles'}), '(data=candles)\n', (1675, 1689), True, 'import pandas as pd\n'), ((2428, 2492), 'pandas.to_datetime', 'pd.to_datetime', (["(df['Time'] * 1000000)"], {'infer_datetime_format': '(True)'}), "(df['Time'] * 1000000, infer_datetime_format=True)\n", (2442, 2492), True, 'import pandas as pd\n'), ((2549, 2563), 'pyti.smoothed_moving_average.smoothed_moving_average', 'sma', (['close', '(14)'], {}), '(close, 14)\n', (2552, 2563), True, 'from pyti.smoothed_moving_average import smoothed_moving_average as sma\n'), ((2579, 2593), 'pyti.smoothed_moving_average.smoothed_moving_average', 'sma', (['close', '(30)'], {}), '(close, 30)\n', (2582, 2593), True, 'from pyti.smoothed_moving_average import smoothed_moving_average as sma\n'), ((2608, 2622), 'pyti.smoothed_moving_average.smoothed_moving_average', 'sma', (['close', '(50)'], {}), '(close, 50)\n', (2611, 2622), True, 'from pyti.smoothed_moving_average import smoothed_moving_average as sma\n'), ((2809, 2824), 'pyti.smoothed_moving_average.smoothed_moving_average', 'sma', (['volume', '(11)'], {}), '(volume, 11)\n', (2812, 2824), True, 'from pyti.smoothed_moving_average import smoothed_moving_average as sma\n'), ((5625, 5651), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5649, 5651), False, 'import datetime\n'), ((6477, 6503), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'candles'}), '(data=candles)\n', (6489, 6503), True, 'import pandas as pd\n'), ((7241, 7305), 'pandas.to_datetime', 'pd.to_datetime', (["(df['time'] * 1000000)"], {'infer_datetime_format': '(True)'}), "(df['time'] * 1000000, infer_datetime_format=True)\n", (7255, 7305), True, 'import pandas as pd\n'), ((7320, 7334), 'numpy.array', 'np.array', (['open'], {}), '(open)\n', (7328, 7334), True, 'import numpy as np\n'), ((7532, 7546), 'pyti.smoothed_moving_average.smoothed_moving_average', 'sma', (['close', '(50)'], {}), '(close, 50)\n', (7535, 7546), True, 'from pyti.smoothed_moving_average import smoothed_moving_average as sma\n'), ((7756, 7774), 'pyti.bollinger_bands.upper_bollinger_band', 'ubb', (['close', '(7)', '(3.0)'], {}), '(close, 7, 3.0)\n', (7759, 7774), True, 'from pyti.bollinger_bands import upper_bollinger_band as ubb\n'), ((7815, 7833), 'pyti.bollinger_bands.lower_bollinger_band', 'lbb', (['close', '(7)', '(3.0)'], {}), '(close, 7, 3.0)\n', (7818, 7833), True, 'from pyti.bollinger_bands import lower_bollinger_band as lbb\n'), ((48735, 48743), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (48740, 48743), False, 'from time import sleep\n'), ((7468, 7481), 'pyti.smoothed_moving_average.smoothed_moving_average', 'sma', (['close', '(7)'], {}), '(close, 7)\n', (7471, 7481), True, 'from pyti.smoothed_moving_average import smoothed_moving_average as sma\n'), ((47717, 47845), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': '[item[0] for item in sell_signals]', 'y': '[item[1] for item in sell_signals]', 'name': '"""Short Signals"""', 'mode': '"""markers"""'}), "(x=[item[0] for item in sell_signals], y=[item[1] for item in\n sell_signals], name='Short Signals', mode='markers')\n", (47727, 47845), True, 'import plotly.graph_objs as go\n'), ((47936, 48070), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': '[item[0] for item in sell_signals]', 'y': '[(item[1] * profit) for item in sell_signals]', 'name': '"""TP Point"""', 'mode': '"""markers"""'}), "(x=[item[0] for item in sell_signals], y=[(item[1] * profit) for\n item in sell_signals], name='TP Point', mode='markers')\n", (47946, 48070), True, 'import plotly.graph_objs as go\n'), ((48156, 48289), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': '[item[0] for item in sell_signals]', 'y': '[(item[1] * stop_out) for item in sell_signals]', 'name': '"""Stops"""', 'mode': '"""markers"""'}), "(x=[item[0] for item in sell_signals], y=[(item[1] * stop_out) for\n item in sell_signals], name='Stops', mode='markers')\n", (48166, 48289), True, 'import plotly.graph_objs as go\n'), ((48374, 48432), 'plotly.graph_objs.Data', 'go.Data', (['[candle, ssma, upbb, lwbb, shorts, buyTPs, stops]'], {}), '([candle, ssma, upbb, lwbb, shorts, buyTPs, stops])\n', (48381, 48432), True, 'import plotly.graph_objs as go\n'), ((48480, 48524), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': 'f"""{pair}_{price}_Shorts 5m"""'}), "(title=f'{pair}_{price}_Shorts 5m')\n", (48489, 48524), True, 'import plotly.graph_objs as go\n'), ((48542, 48577), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (48551, 48577), True, 'import plotly.graph_objs as go\n'), ((50474, 50598), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': '[item[0] for item in buy_signals]', 'y': '[item[1] for item in buy_signals]', 'name': '"""Buy Signals"""', 'mode': '"""markers"""'}), "(x=[item[0] for item in buy_signals], y=[item[1] for item in\n buy_signals], name='Buy Signals', mode='markers')\n", (50484, 50598), True, 'import plotly.graph_objs as go\n'), ((50690, 50826), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': '[item[0] for item in buy_signals]', 'y': '[(item[1] * profit) for item in buy_signals]', 'name': '"""Sell Signals"""', 'mode': '"""markers"""'}), "(x=[item[0] for item in buy_signals], y=[(item[1] * profit) for\n item in buy_signals], name='Sell Signals', mode='markers')\n", (50700, 50826), True, 'import plotly.graph_objs as go\n'), ((50912, 51050), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': '[item[0] for item in buy_signals]', 'y': '[(item[1] * stop_out) for item in buy_signals]', 'name': '"""Stop Signals"""', 'mode': '"""markers"""'}), "(x=[item[0] for item in buy_signals], y=[(item[1] * stop_out) for\n item in buy_signals], name='Stop Signals', mode='markers')\n", (50922, 51050), True, 'import plotly.graph_objs as go\n'), ((51135, 51190), 'plotly.graph_objs.Data', 'go.Data', (['[candle, ssma, upbb, lwbb, buys, sells, stops]'], {}), '([candle, ssma, upbb, lwbb, buys, sells, stops])\n', (51142, 51190), True, 'import plotly.graph_objs as go\n'), ((51238, 51276), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': 'f"""{pair}_{price}_ 5m"""'}), "(title=f'{pair}_{price}_ 5m')\n", (51247, 51276), True, 'import plotly.graph_objs as go\n'), ((51294, 51329), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (51303, 51329), True, 'import plotly.graph_objs as go\n'), ((20168, 20176), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (20173, 20176), False, 'from time import sleep\n'), ((36061, 36076), 'time.sleep', 'sleep', (['sleepsec'], {}), '(sleepsec)\n', (36066, 36076), False, 'from time import sleep\n'), ((36114, 36143), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (36133, 36143), False, 'import traceback\n'), ((36171, 36179), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (36176, 36179), False, 'from time import sleep\n'), ((45972, 46001), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (45991, 46001), False, 'import traceback\n'), ((29381, 29390), 'time.sleep', 'sleep', (['(21)'], {}), '(21)\n', (29386, 29390), False, 'from time import sleep\n'), ((37679, 37688), 'time.sleep', 'sleep', (['(20)'], {}), '(20)\n', (37684, 37688), False, 'from time import sleep\n'), ((52053, 52082), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (52072, 52082), False, 'import traceback\n'), ((20018, 20037), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (20030, 20037), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((28887, 28906), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (28899, 28906), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((28998, 29017), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (29010, 29017), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((29510, 29539), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (29529, 29539), False, 'import traceback\n'), ((32972, 33001), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (32991, 33001), False, 'import traceback\n'), ((33045, 33054), 'time.sleep', 'sleep', (['(16)'], {}), '(16)\n', (33050, 33054), False, 'from time import sleep\n'), ((37121, 37140), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (37133, 37140), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((37289, 37308), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (37301, 37308), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((37883, 37912), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (37902, 37912), False, 'import traceback\n'), ((43426, 43445), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (43438, 43445), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((43674, 43693), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (43686, 43693), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((44938, 44967), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (44957, 44967), False, 'import traceback\n'), ((52016, 52038), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (52036, 52038), False, 'import traceback\n'), ((21139, 21168), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (21158, 21168), False, 'import traceback\n'), ((22072, 22101), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (22091, 22101), False, 'import traceback\n'), ((23455, 23463), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (23460, 23463), False, 'from time import sleep\n'), ((30274, 30283), 'time.sleep', 'sleep', (['(16)'], {}), '(16)\n', (30279, 30283), False, 'from time import sleep\n'), ((32355, 32374), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (32367, 32374), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((38660, 38669), 'time.sleep', 'sleep', (['(21)'], {}), '(21)\n', (38665, 38669), False, 'from time import sleep\n'), ((11846, 11865), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (11858, 11865), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((12004, 12028), 'decimal.Decimal.from_float', 'D.from_float', (['orig_quant'], {}), '(orig_quant)\n', (12016, 12028), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((12589, 12618), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (12608, 12618), False, 'import traceback\n'), ((15917, 15936), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (15929, 15936), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((16075, 16099), 'decimal.Decimal.from_float', 'D.from_float', (['orig_quant'], {}), '(orig_quant)\n', (16087, 16099), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((16694, 16723), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (16713, 16723), False, 'import traceback\n'), ((20581, 20601), 'decimal.Decimal.from_float', 'D.from_float', (['quant2'], {}), '(quant2)\n', (20593, 20601), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((20860, 20879), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (20872, 20879), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((21772, 21792), 'decimal.Decimal.from_float', 'D.from_float', (['loaned'], {}), '(loaned)\n', (21784, 21792), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((31399, 31428), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (31418, 31428), False, 'import traceback\n'), ((33948, 33977), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (33967, 33977), False, 'import traceback\n'), ((44087, 44107), 'decimal.Decimal.from_float', 'D.from_float', (['quant1'], {}), '(quant1)\n', (44099, 44107), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((45777, 45806), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (45796, 45806), False, 'import traceback\n'), ((51949, 51964), 'time.sleep', 'sleep', (['sleepsec'], {}), '(sleepsec)\n', (51954, 51964), False, 'from time import sleep\n'), ((12712, 12720), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (12717, 12720), False, 'from time import sleep\n'), ((13686, 13705), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (13698, 13705), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((13844, 13868), 'decimal.Decimal.from_float', 'D.from_float', (['orig_quant'], {}), '(orig_quant)\n', (13856, 13868), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((14429, 14458), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (14448, 14458), False, 'import traceback\n'), ((17762, 17781), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (17774, 17781), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((17920, 17944), 'decimal.Decimal.from_float', 'D.from_float', (['orig_quant'], {}), '(orig_quant)\n', (17932, 17944), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((18539, 18568), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (18558, 18568), False, 'import traceback\n'), ((21482, 21511), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (21501, 21511), False, 'import traceback\n'), ((23278, 23297), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (23290, 23297), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((23806, 23835), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (23825, 23835), False, 'import traceback\n'), ((23924, 23934), 'time.sleep', 'sleep', (['(3.3)'], {}), '(3.3)\n', (23929, 23934), False, 'from time import sleep\n'), ((29711, 29730), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (29723, 29730), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((31977, 31996), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (31989, 31996), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((33258, 33277), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (33270, 33277), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((38158, 38177), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (38170, 38177), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((39091, 39100), 'time.sleep', 'sleep', (['(16)'], {}), '(16)\n', (39096, 39100), False, 'from time import sleep\n'), ((45068, 45087), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (45080, 45087), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((13122, 13151), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (13141, 13151), False, 'import traceback\n'), ((14552, 14560), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (14557, 14560), False, 'from time import sleep\n'), ((17199, 17228), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (17218, 17228), False, 'import traceback\n'), ((23531, 23550), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (23543, 23550), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((23871, 23893), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (23891, 23893), False, 'import traceback\n'), ((24552, 24561), 'time.sleep', 'sleep', (['(16)'], {}), '(16)\n', (24557, 24561), False, 'from time import sleep\n'), ((27205, 27223), 'decimal.Decimal.from_float', 'D.from_float', (['free'], {}), '(free)\n', (27217, 27223), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((34909, 34938), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (34928, 34938), False, 'import traceback\n'), ((14962, 14991), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (14981, 14991), False, 'import traceback\n'), ((19044, 19073), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (19063, 19073), False, 'import traceback\n'), ((24807, 24836), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (24826, 24836), False, 'import traceback\n'), ((27751, 27780), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (27770, 27780), False, 'import traceback\n'), ((34201, 34220), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (34213, 34220), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((24068, 24088), 'decimal.Decimal.from_float', 'D.from_float', (['loaned'], {}), '(loaned)\n', (24080, 24088), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((24876, 24898), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (24896, 24898), False, 'import traceback\n'), ((25494, 25503), 'time.sleep', 'sleep', (['(16)'], {}), '(16)\n', (25499, 25503), False, 'from time import sleep\n'), ((27820, 27842), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (27840, 27842), False, 'import traceback\n'), ((30713, 30732), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (30725, 30732), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((35875, 35904), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (35894, 35904), False, 'import traceback\n'), ((39461, 39476), 'decimal.Decimal', 'D', (['price_filter'], {}), '(price_filter)\n', (39462, 39476), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((40640, 40669), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (40659, 40669), False, 'import traceback\n'), ((25785, 25814), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (25804, 25814), False, 'import traceback\n'), ((35119, 35138), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (35131, 35138), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((39432, 39451), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (39444, 39451), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((40072, 40080), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (40077, 40080), False, 'from time import sleep\n'), ((40209, 40229), 'decimal.Decimal.from_float', 'D.from_float', (['quant1'], {}), '(quant1)\n', (40221, 40229), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((24984, 25002), 'decimal.Decimal.from_float', 'D.from_float', (['loan'], {}), '(loan)\n', (24996, 25002), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((25858, 25880), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (25878, 25880), False, 'import traceback\n'), ((26482, 26491), 'time.sleep', 'sleep', (['(16)'], {}), '(16)\n', (26487, 26491), False, 'from time import sleep\n'), ((42163, 42192), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (42182, 42192), False, 'import traceback\n'), ((42630, 42639), 'time.sleep', 'sleep', (['(16)'], {}), '(16)\n', (42635, 42639), False, 'from time import sleep\n'), ((26791, 26820), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'log'}), '(file=log)\n', (26810, 26820), False, 'import traceback\n'), ((40907, 40926), 'decimal.Decimal.from_float', 'D.from_float', (['price'], {}), '(price)\n', (40919, 40926), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((25974, 25992), 'decimal.Decimal.from_float', 'D.from_float', (['loan'], {}), '(loan)\n', (25986, 25992), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((26868, 26890), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (26888, 26890), False, 'import traceback\n'), ((42825, 42844), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (42837, 42844), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n'), ((41521, 41540), 'decimal.Decimal.from_float', 'D.from_float', (['quant'], {}), '(quant)\n', (41533, 41540), True, 'from decimal import Decimal as D, ROUND_DOWN, ROUND_UP\n')] |
import sys
import os
import numpy as np
input_file = sys.argv[1]
output_file = sys.argv[2]
def _dataset_info_standard(txt_labels):
with open(txt_labels, 'r') as f:
images_list = f.readlines()
file_names = []
labels = []
for row in images_list:
row = row.split(' ')
file_names.append(row[0])
labels.append(int(row[1]))
return file_names, labels
names, labels = _dataset_info_standard(input_file)
print(f"There are {len(names)} images in the input file")
np_labels = np.array(labels)
np_names = np.array(names)
labels_set = set(labels)
count_classes = np.zeros(len(labels_set),dtype=np.uint32)
for lbl in labels_set:
count_classes[lbl]= len(np_labels[np_labels==lbl])
max_count = count_classes.max()
balanced = True
for lbl in labels_set:
if count_classes[lbl] < max_count:
balanced = False
print("Classes counts:", count_classes)
if balanced:
print("Classes are already balanced!!")
sys.exit(0)
with open(output_file, "w") as out_f:
for lbl in labels_set:
names_this_class = np_names[np_labels==lbl]
for n in names_this_class:
out_f.write(f"{n} {lbl}\n")
while count_classes[lbl] < max_count:
random_n = np.random.choice(names_this_class)
out_f.write(f"{random_n} {lbl}\n")
count_classes[lbl] += 1
print("Final classes counts:", count_classes)
print("Done")
| [
"numpy.random.choice",
"numpy.array",
"sys.exit"
] | [((526, 542), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (534, 542), True, 'import numpy as np\n'), ((554, 569), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (562, 569), True, 'import numpy as np\n'), ((971, 982), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (979, 982), False, 'import sys\n'), ((1246, 1280), 'numpy.random.choice', 'np.random.choice', (['names_this_class'], {}), '(names_this_class)\n', (1262, 1280), True, 'import numpy as np\n')] |
import gym
import robo_gym
import math
import numpy as np
import pytest
ur_models = [pytest.param('ur3', marks=pytest.mark.nightly), \
pytest.param('ur3e', marks=pytest.mark.nightly), \
pytest.param('ur5', marks=pytest.mark.commit), \
pytest.param('ur5e', marks=pytest.mark.nightly), \
pytest.param('ur10', marks=pytest.mark.nightly), \
pytest.param('ur10e', marks=pytest.mark.nightly), \
pytest.param('ur16e', marks=pytest.mark.nightly), \
]
@pytest.fixture(scope='module', params=ur_models)
def env(request):
env = gym.make('EndEffectorPositioningURSim-v0', ip='robot-servers', ur_model=request.param)
env.request_param = request.param
yield env
env.kill_sim()
@pytest.mark.commit
def test_initialization(env):
assert env.ur.model == env.request_param
env.reset()
done = False
env.step([0,0,0,0,0])
for _ in range(10):
if not done:
action = env.action_space.sample()
observation, _, done, _ = env.step(action)
assert env.observation_space.contains(observation)
@pytest.mark.nightly
@pytest.mark.flaky(reruns=3)
def test_self_collision(env):
collision_joint_config = {'ur3': [0.0, 0.0, -3.14, -1.77, 1.0], \
'ur3e': [0.0, -1.88, 2.8, -0.75, -1.88], \
'ur5': [0.0, -1.26, -3.14, 0.0, 0.0], \
'ur5e': [0.0, -0.50, -3.14, 3.14, 0.0], \
'ur10': [0.0, -1.5, 3.14, 0.0, 0.0], \
'ur10e': [0.0, -0.15, -2.83, -2.51, 1.63], \
'ur16e': [0.0, -1.15, 2.9, -0.19, 0.42]}
env.reset()
action = env.ur.normalize_joint_values(collision_joint_config[env.ur.model])
done = False
while not done:
_, _, done, info = env.step(action)
assert info['final_status'] == 'collision'
@pytest.mark.nightly
@pytest.mark.flaky(reruns=3)
def test_collision_with_ground(env):
collision_joint_config = {'ur3': [0.0, 2.64, -1.95, -2.98, 0.41], \
'ur3e': [1.13, 1.88, -2.19, -3.43, 2.43], \
'ur5': [0.0, 1.0, 1.8, 0.0, 0.0], \
'ur5e': [0.0, 3.52, -2.58, 0.0, 0.0], \
'ur10': [0.0, 1.0, 1.15, 0.0, 0.0], \
'ur10e': [-2.14, -0.13, 0.63, -1.13, 1.63], \
'ur16e': [0.0, -0.15, 1.32, 0.0, 1.63]}
env.reset()
action = env.ur.normalize_joint_values(collision_joint_config[env.ur.model])
done = False
while not done:
_, _, done, info = env.step(action)
assert info['final_status'] == 'collision'
@pytest.mark.nightly
def test_reset_joint_positions(env):
joint_positions = [0.2, -2.5, 1.1, -2.0, -1.2, 1.2]
state = env.reset(joint_positions=joint_positions)
assert np.isclose(env.ur.normalize_joint_values(joint_positions), state[3:9], atol=0.1).all()
@pytest.mark.commit
def test_object_coordinates(env):
params = {
#? robot up-right, target_coord_in_ee_frame 0.0, -0.3, 0.2, coordinates of target calculated using official dimensions from DH parameters.
#? first value is d4+d6
#? second value is: d1+a2+a3+d5
'ur3': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.194 +0.2), (0.692 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur3e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.223 +0.2), (0.694 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur5': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.191 +0.2), (1.001 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur5e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.233 +0.2), (1.079 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur10': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.256 +0.2), (1.428 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur10e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.291 +0.2), (1.485 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},
'ur16e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.291 +0.2), (1.139 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}}
}
state = env.reset(joint_positions=params[env.ur.model]['joint_positions'], ee_target_pose=params[env.ur.model]['object_coords'])
assert np.isclose([params[env.ur.model]['polar_coords']['r']], state[0], atol=0.05).all()
assert np.isclose([params[env.ur.model]['polar_coords']['theta'], params[env.ur.model]['polar_coords']['phi']], state[1:3], atol=0.2).all()
test_ur_fixed_joints = [
('EndEffectorPositioningURSim-v0', True, False, False, False, False, False, 'ur3'), # fixed shoulder_pan
('EndEffectorPositioningURSim-v0', False, True, False, False, False, False, 'ur3e'), # fixed shoulder_lift
('EndEffectorPositioningURSim-v0', False, False, False, False, False, True, 'ur5'), # fixed wrist_3
('EndEffectorPositioningURSim-v0', True, False, True, False, False, False, 'ur5e'), # fixed Base and Elbow
('EndEffectorPositioningURSim-v0', False, False, True, False, False, False, 'ur10'), # fixed elbow
('EndEffectorPositioningURSim-v0', False, False, False, True, False, False, 'ur10e'), # fixed wrist_1
('EndEffectorPositioningURSim-v0', False, False, False, False, True, False, 'ur16e'), # fixed wrist_2
]
@pytest.mark.nightly
@pytest.mark.parametrize('env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model', test_ur_fixed_joints)
@pytest.mark.flaky(reruns=3)
def test_fixed_joints(env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model):
env = gym.make(env_name, ip='robot-servers', fix_base=fix_base, fix_shoulder=fix_shoulder, fix_elbow=fix_elbow,
fix_wrist_1=fix_wrist_1, fix_wrist_2=fix_wrist_2, fix_wrist_3=fix_wrist_3, ur_model=ur_model)
state = env.reset()
initial_joint_positions = state[3:9]
# Take 20 actions
action = env.action_space.sample()
for _ in range(20):
state, _, _, _ = env.step(action)
joint_positions = state[3:9]
if fix_base:
assert math.isclose(initial_joint_positions[0], joint_positions[0], abs_tol=0.05)
if fix_shoulder:
assert math.isclose(initial_joint_positions[1], joint_positions[1], abs_tol=0.05)
if fix_elbow:
assert math.isclose(initial_joint_positions[2], joint_positions[2], abs_tol=0.05)
if fix_wrist_1:
assert math.isclose(initial_joint_positions[3], joint_positions[3], abs_tol=0.05)
if fix_wrist_2:
assert math.isclose(initial_joint_positions[4], joint_positions[4], abs_tol=0.05)
if fix_wrist_3:
assert math.isclose(initial_joint_positions[5], joint_positions[5], abs_tol=0.05)
env.kill_sim()
@pytest.mark.commit
def test_success(env):
params = {
'ur3': {'object_coords':[0.0, 0.194, 0.692, 0.0, 0.0, 0.0]},
'ur3e': {'object_coords':[0.0, 0.223, 0.694, 0.0, 0.0, 0.0]},
'ur5': {'object_coords':[0.0, 0.191, 1.001, 0.0, 0.0, 0.0]},
'ur5e': {'object_coords':[0.0, 0.233, 1.079, 0.0, 0.0, 0.0]},
'ur10': {'object_coords':[0.0, 0.256, 1.428, 0.0, 0.0, 0.0]},
'ur10e': {'object_coords':[0.0, 0.291, 1.485, 0.0, 0.0, 0.0]},
'ur16e': {'object_coords':[0.0, 0.291, 1.139, 0.0, 0.0, 0.0]}
}
env.reset(joint_positions=[0.0, -1.3, 0.0, -1.3, 0.0, 0.0], ee_target_pose=params[env.ur.model]['object_coords'])
action = env.ur.normalize_joint_values([0.0, -1.57, 0.0, -1.57, 0.0])
done = False
while not done:
_, _, done, info = env.step(action)
assert info['final_status'] == 'success'
@pytest.mark.commit
def test_continue_on_success(env):
params = {
'ur3': {'object_coords':[0.0, 0.194, 0.692, 0.0, 0.0, 0.0]},
'ur3e': {'object_coords':[0.0, 0.223, 0.694, 0.0, 0.0, 0.0]},
'ur5': {'object_coords':[0.0, 0.191, 1.001, 0.0, 0.0, 0.0]},
'ur5e': {'object_coords':[0.0, 0.233, 1.079, 0.0, 0.0, 0.0]},
'ur10': {'object_coords':[0.0, 0.256, 1.428, 0.0, 0.0, 0.0]},
'ur10e': {'object_coords':[0.0, 0.291, 1.485, 0.0, 0.0, 0.0]},
'ur16e': {'object_coords':[0.0, 0.291, 1.139, 0.0, 0.0, 0.0]}
}
env.reset(joint_positions=[0.0, -1.3, 0.0, -1.3, 0.0, 0.0], ee_target_pose=params[env.ur.model]['object_coords'])
action = env.ur.normalize_joint_values([0.0, -1.57, 0.0, -1.57, 0.0])
done = False
while not done:
state, _, done, info = env.step(action)
assert info['final_status'] == 'success'
joint_positions = state[3:9]
state = env.reset(continue_on_success=True)
assert np.isclose(joint_positions, state[3:9], atol=0.05).all()
| [
"pytest.mark.flaky",
"numpy.isclose",
"math.isclose",
"pytest.param",
"pytest.mark.parametrize",
"pytest.fixture",
"gym.make"
] | [((525, 573), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': 'ur_models'}), "(scope='module', params=ur_models)\n", (539, 573), False, 'import pytest\n'), ((1142, 1169), 'pytest.mark.flaky', 'pytest.mark.flaky', ([], {'reruns': '(3)'}), '(reruns=3)\n', (1159, 1169), False, 'import pytest\n'), ((1952, 1979), 'pytest.mark.flaky', 'pytest.mark.flaky', ([], {'reruns': '(3)'}), '(reruns=3)\n', (1969, 1979), False, 'import pytest\n'), ((5832, 5983), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model"""', 'test_ur_fixed_joints'], {}), "(\n 'env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model'\n , test_ur_fixed_joints)\n", (5855, 5983), False, 'import pytest\n'), ((5975, 6002), 'pytest.mark.flaky', 'pytest.mark.flaky', ([], {'reruns': '(3)'}), '(reruns=3)\n', (5992, 6002), False, 'import pytest\n'), ((87, 133), 'pytest.param', 'pytest.param', (['"""ur3"""'], {'marks': 'pytest.mark.nightly'}), "('ur3', marks=pytest.mark.nightly)\n", (99, 133), False, 'import pytest\n'), ((150, 197), 'pytest.param', 'pytest.param', (['"""ur3e"""'], {'marks': 'pytest.mark.nightly'}), "('ur3e', marks=pytest.mark.nightly)\n", (162, 197), False, 'import pytest\n'), ((214, 259), 'pytest.param', 'pytest.param', (['"""ur5"""'], {'marks': 'pytest.mark.commit'}), "('ur5', marks=pytest.mark.commit)\n", (226, 259), False, 'import pytest\n'), ((276, 323), 'pytest.param', 'pytest.param', (['"""ur5e"""'], {'marks': 'pytest.mark.nightly'}), "('ur5e', marks=pytest.mark.nightly)\n", (288, 323), False, 'import pytest\n'), ((340, 387), 'pytest.param', 'pytest.param', (['"""ur10"""'], {'marks': 'pytest.mark.nightly'}), "('ur10', marks=pytest.mark.nightly)\n", (352, 387), False, 'import pytest\n'), ((404, 452), 'pytest.param', 'pytest.param', (['"""ur10e"""'], {'marks': 'pytest.mark.nightly'}), "('ur10e', marks=pytest.mark.nightly)\n", (416, 452), False, 'import pytest\n'), ((469, 517), 'pytest.param', 'pytest.param', (['"""ur16e"""'], {'marks': 'pytest.mark.nightly'}), "('ur16e', marks=pytest.mark.nightly)\n", (481, 517), False, 'import pytest\n'), ((602, 693), 'gym.make', 'gym.make', (['"""EndEffectorPositioningURSim-v0"""'], {'ip': '"""robot-servers"""', 'ur_model': 'request.param'}), "('EndEffectorPositioningURSim-v0', ip='robot-servers', ur_model=\n request.param)\n", (610, 693), False, 'import gym\n'), ((6130, 6339), 'gym.make', 'gym.make', (['env_name'], {'ip': '"""robot-servers"""', 'fix_base': 'fix_base', 'fix_shoulder': 'fix_shoulder', 'fix_elbow': 'fix_elbow', 'fix_wrist_1': 'fix_wrist_1', 'fix_wrist_2': 'fix_wrist_2', 'fix_wrist_3': 'fix_wrist_3', 'ur_model': 'ur_model'}), "(env_name, ip='robot-servers', fix_base=fix_base, fix_shoulder=\n fix_shoulder, fix_elbow=fix_elbow, fix_wrist_1=fix_wrist_1, fix_wrist_2\n =fix_wrist_2, fix_wrist_3=fix_wrist_3, ur_model=ur_model)\n", (6138, 6339), False, 'import gym\n'), ((6637, 6711), 'math.isclose', 'math.isclose', (['initial_joint_positions[0]', 'joint_positions[0]'], {'abs_tol': '(0.05)'}), '(initial_joint_positions[0], joint_positions[0], abs_tol=0.05)\n', (6649, 6711), False, 'import math\n'), ((6748, 6822), 'math.isclose', 'math.isclose', (['initial_joint_positions[1]', 'joint_positions[1]'], {'abs_tol': '(0.05)'}), '(initial_joint_positions[1], joint_positions[1], abs_tol=0.05)\n', (6760, 6822), False, 'import math\n'), ((6856, 6930), 'math.isclose', 'math.isclose', (['initial_joint_positions[2]', 'joint_positions[2]'], {'abs_tol': '(0.05)'}), '(initial_joint_positions[2], joint_positions[2], abs_tol=0.05)\n', (6868, 6930), False, 'import math\n'), ((6966, 7040), 'math.isclose', 'math.isclose', (['initial_joint_positions[3]', 'joint_positions[3]'], {'abs_tol': '(0.05)'}), '(initial_joint_positions[3], joint_positions[3], abs_tol=0.05)\n', (6978, 7040), False, 'import math\n'), ((7076, 7150), 'math.isclose', 'math.isclose', (['initial_joint_positions[4]', 'joint_positions[4]'], {'abs_tol': '(0.05)'}), '(initial_joint_positions[4], joint_positions[4], abs_tol=0.05)\n', (7088, 7150), False, 'import math\n'), ((7186, 7260), 'math.isclose', 'math.isclose', (['initial_joint_positions[5]', 'joint_positions[5]'], {'abs_tol': '(0.05)'}), '(initial_joint_positions[5], joint_positions[5], abs_tol=0.05)\n', (7198, 7260), False, 'import math\n'), ((4801, 4877), 'numpy.isclose', 'np.isclose', (["[params[env.ur.model]['polar_coords']['r']]", 'state[0]'], {'atol': '(0.05)'}), "([params[env.ur.model]['polar_coords']['r']], state[0], atol=0.05)\n", (4811, 4877), True, 'import numpy as np\n'), ((4894, 5025), 'numpy.isclose', 'np.isclose', (["[params[env.ur.model]['polar_coords']['theta'], params[env.ur.model][\n 'polar_coords']['phi']]", 'state[1:3]'], {'atol': '(0.2)'}), "([params[env.ur.model]['polar_coords']['theta'], params[env.ur.\n model]['polar_coords']['phi']], state[1:3], atol=0.2)\n", (4904, 5025), True, 'import numpy as np\n'), ((9109, 9159), 'numpy.isclose', 'np.isclose', (['joint_positions', 'state[3:9]'], {'atol': '(0.05)'}), '(joint_positions, state[3:9], atol=0.05)\n', (9119, 9159), True, 'import numpy as np\n')] |
# coding=utf-8
# 20160510
# __author__ = 'xhcao'
import numpy as np
import scipy.spatial as sp
# A=self.method.distance_correction_for_one_matrix(X, dimension)\
# B=self.method.distance_correction_for_one_matrix(Y, dimension)\
# corr_matrix[i,j]=self.method.distance_correlation(A,B) "
def distance_correction_for_one_matrix(x, dimension):
# X是一个样本,ndarray类型,矩阵的每列为样本的一个特征属性
# akl
n = x.shape[0]
akl = sp.distance.cdist(x, x, 'minkowski', p = dimension) #norm - d minkowski distance
#ak*
ak_ = np.zeros(n)
for i in range(0,n):
ak_[i] = np.sum(akl[i,:])/n
#a*l
a_l = np.zeros(n)
for i in range(0,n):
a_l[i] = np.sum(akl[:,i])/n
#a**
a__ = np.mean(akl)
res = akl - (np.ones((n,n))*ak_).T
res = res - np.ones((n,n))*a_l
res = res + np.ones((n,n))*a__
return res
def distance_correlation(A,B):
#计算两个样本之间的相关系数矩阵
A_B = np.mean(A*B)
A_A = np.mean(A*A)
B_B = np.mean(B*B)
if A_A*B_B>0:
return A_B/np.sqrt(A_A*B_B)
else:
return 0
| [
"numpy.mean",
"numpy.sqrt",
"numpy.ones",
"scipy.spatial.distance.cdist",
"numpy.sum",
"numpy.zeros"
] | [((438, 487), 'scipy.spatial.distance.cdist', 'sp.distance.cdist', (['x', 'x', '"""minkowski"""'], {'p': 'dimension'}), "(x, x, 'minkowski', p=dimension)\n", (455, 487), True, 'import scipy.spatial as sp\n'), ((539, 550), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (547, 550), True, 'import numpy as np\n'), ((632, 643), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (640, 643), True, 'import numpy as np\n'), ((725, 737), 'numpy.mean', 'np.mean', (['akl'], {}), '(akl)\n', (732, 737), True, 'import numpy as np\n'), ((926, 940), 'numpy.mean', 'np.mean', (['(A * B)'], {}), '(A * B)\n', (933, 940), True, 'import numpy as np\n'), ((949, 963), 'numpy.mean', 'np.mean', (['(A * A)'], {}), '(A * A)\n', (956, 963), True, 'import numpy as np\n'), ((972, 986), 'numpy.mean', 'np.mean', (['(B * B)'], {}), '(B * B)\n', (979, 986), True, 'import numpy as np\n'), ((593, 610), 'numpy.sum', 'np.sum', (['akl[i, :]'], {}), '(akl[i, :])\n', (599, 610), True, 'import numpy as np\n'), ((686, 703), 'numpy.sum', 'np.sum', (['akl[:, i]'], {}), '(akl[:, i])\n', (692, 703), True, 'import numpy as np\n'), ((794, 809), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (801, 809), True, 'import numpy as np\n'), ((829, 844), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (836, 844), True, 'import numpy as np\n'), ((1023, 1041), 'numpy.sqrt', 'np.sqrt', (['(A_A * B_B)'], {}), '(A_A * B_B)\n', (1030, 1041), True, 'import numpy as np\n'), ((756, 771), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (763, 771), True, 'import numpy as np\n')] |
'''
Created on Aug 29, 2016
@author: <NAME> <<EMAIL>>
'''
from __future__ import division
import unittest
import numpy as np
from .. import random
class TestRandom(unittest.TestCase):
_multiprocess_can_split_ = True # let nose know that tests can run parallel
def test_random_log_uniform(self):
""" tests the random_log_uniform function """
data = random.log_uniform(v_min=1, v_max=10, size=1000)
self.assertEqual(data.size, 1000)
self.assertTrue(data.min() >= 1)
self.assertTrue(data.max() <= 10)
def test_take_combinations(self):
""" tests the take_combinations function """
num = 10
data = np.arange(num)
# test length of the result
res = list(random.take_combinations(data, r=1, num=5))
self.assertEqual(len(res), 5)
res = list(random.take_combinations(data, r=1, num=2*num))
self.assertEqual(len(res), num)
res = list(random.take_combinations(data, r=1, num='all'))
self.assertEqual(len(res), num)
res = list(random.take_combinations(data, r=2, num='all'))
self.assertEqual(len(res), num*(num - 1)//2)
# test larger case where a different method is used
res = list(random.take_combinations(data, r=3, num=10))
self.assertEqual(len(res), 10)
# test content of the result
res = list(random.take_combinations(data, r=1, num=5))
for value in res:
self.assertIn(value, data)
def test_take_product(self):
""" test the take_product function """
num = 5
data = np.arange(num)
res = list(random.take_product(data, r=1, num=5))
self.assertEqual(len(res), 5)
res = list(random.take_product(data, r=1, num=2*num))
self.assertEqual(len(res), num)
res = list(random.take_product(data, r=1, num='all'))
self.assertEqual(len(res), num)
res = list(random.take_product(data, r=2, num='all'))
self.assertEqual(len(res), num**2)
# test larger case where a different method is used
res = list(random.take_product(data, r=3, num=10))
self.assertEqual(len(res), 10)
# test content of the result
res = list(random.take_product(data, r=1, num=5))
for value in res:
self.assertIn(value, data)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.arange"
] | [((2401, 2416), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2414, 2416), False, 'import unittest\n'), ((683, 697), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (692, 697), True, 'import numpy as np\n'), ((1628, 1642), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (1637, 1642), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Light each LED in sequence, and repeat.
import fastopc, time
import numpy as np
numLEDs = 512
client = fastopc.FastOPC('localhost:7890')
pixels = np.zeros([numLEDs, 3])
class Pixels():
def __init__(self, numLEDs, floor):
self.numLEDs = numLEDs
self.floor = floor
self.array = np.zeros([self.numLEDs, 3])
def update(self, arrayNew, alphaRise, alphaDecay):
alpha = arrayNew - self.array
alpha[alpha > 0.0 ] = alphaRise
alpha[alpha <= 0.0] = alphaDecay
self.array = alpha*arrayNew + (1.0-alpha)*self.array
def getArrayForDisplay(self):
returnArray = self.array
returnArray[returnArray < self.floor] = 0
return returnArray
pixels = Pixels(numLEDs, 20)
arrayTheo = np.zeros_like(pixels.array)
for i in range(0,8):
base=64*i
arrayTheo[base:base+i+1] = [0,0,255]
while True:
pixels.update(arrayTheo, 1.0, 0.0)
client.putPixels(0, pixels.getArrayForDisplay())
time.sleep(1)
client.putPixels(0, np.zeros_like(pixels.getArrayForDisplay()))
time.sleep(1)
| [
"fastopc.FastOPC",
"numpy.zeros",
"numpy.zeros_like",
"time.sleep"
] | [((130, 163), 'fastopc.FastOPC', 'fastopc.FastOPC', (['"""localhost:7890"""'], {}), "('localhost:7890')\n", (145, 163), False, 'import fastopc, time\n'), ((174, 196), 'numpy.zeros', 'np.zeros', (['[numLEDs, 3]'], {}), '([numLEDs, 3])\n', (182, 196), True, 'import numpy as np\n'), ((715, 742), 'numpy.zeros_like', 'np.zeros_like', (['pixels.array'], {}), '(pixels.array)\n', (728, 742), True, 'import numpy as np\n'), ((915, 928), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (925, 928), False, 'import fastopc, time\n'), ((995, 1008), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1005, 1008), False, 'import fastopc, time\n'), ((312, 339), 'numpy.zeros', 'np.zeros', (['[self.numLEDs, 3]'], {}), '([self.numLEDs, 3])\n', (320, 339), True, 'import numpy as np\n')] |
import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(a>3) #Where is A>3?
'''
[[False False False]
[ True True True]
[ True True True]]
gives the above.
So in 0th list, none are true. Then you have 1st list, 0th is true. 1st list 1th is true. 1st list 2nd is true
so you have 1-0, 1-1, 1-2, where 1st # is the list # and 2nd # is the index in that list
In the 2nd list (3rd one), 0th is true, 1st etc.
'''
print(np.nonzero(a>3))
'''
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
gives above. So the first array is the list #, and 2nd array is the index within that list
so list 1 number 0, list 1 number 1, list 1 number 2
list 2 number 0, list 2 number 1, list 2 number 2
''' | [
"numpy.array",
"numpy.nonzero"
] | [((23, 66), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (31, 66), True, 'import numpy as np\n'), ((443, 460), 'numpy.nonzero', 'np.nonzero', (['(a > 3)'], {}), '(a > 3)\n', (453, 460), True, 'import numpy as np\n')] |
import numpy as np
def unit_vector(k, N):
ek = np.zeros(N)
ek[k] = 1.0
return ek
| [
"numpy.zeros"
] | [((53, 64), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (61, 64), True, 'import numpy as np\n')] |
from __future__ import print_function
from fenics import *
from mshr import *
import numpy as np
from scipy import integrate
set_log_level(LogLevel.INFO)
T = 500.0 # final time
num_steps = 1000 # number of time steps
dt = T / num_steps # time step size
mu = 16 # dynamic viscosity
rho = 1 # density
save_int = 5
inflowVel=8
# mesh parameters
degree = 2
Lx = 5000
Ly = 5000
nx = 72
ny = 72
RD=126
#WTG parameters
numturbs = 9
#number of inflow direction bins
bins = 1
WTGexp = 6.
radius = RD/2.
thickness = RD/10.
numRefine = 1
A=RD # weird for 2D
HH=80
initExtent=1.
mlDenom=5
restart = False
randStart = False
gridStart = True
optimize = False
loadnuT = False
mesh = RectangleMesh(Point(-Lx/2., -Ly/2.), Point(Lx/2., Ly/2.), nx, ny)
site_x = 1000
site_y = 1000
refine_x = 1100
refine_y = 1100
def refine_mesh(mesh, refine_x, refine_y):
#refines the mesh around the site boundaries
h = mesh.hmin()
cell_markers = MeshFunction('bool',mesh, mesh.topology().dim())
cell_markers.set_all(False)
for cell in cells(mesh):
if (cell.midpoint()[0] > -(refine_x)) and (abs(cell.midpoint()[1]) < refine_y ):
cell_markers[cell] = True
mesh = refine(mesh, cell_markers)
return mesh
def refine_mesh2(mesh, refine_x, refine_y):
#refines the mesh around the site boundaries
h = mesh.hmin()
cell_markers = MeshFunction('bool',mesh, mesh.topology().dim())
cell_markers.set_all(False)
for cell in cells(mesh):
if (cell.midpoint()[0]**2 + cell.midpoint()[1]**2 < refine_x**2+refine_y**2 ):
cell_markers[cell] = True
mesh = refine(mesh, cell_markers)
return mesh
for nums in range(numRefine):
print('refining mesh')
mesh=refine_mesh2(mesh, refine_x, refine_y)
h = mesh.hmin()
Re = Lx*8/mu
print(Re)
print(mesh.hmin())
print(inflowVel*dt/mesh.hmin())
alpha = 7*pi/64
# Define function spaces
V = VectorFunctionSpace(mesh, 'P', 2)
Q = FunctionSpace(mesh, 'P', 1)
print(V.dim())
def WTGdist(x,y):
return np.exp(-((x/thickness)**WTGexp + (y/radius)**WTGexp))
def createLayout(numturbs):
mx=[]
my=[]
mz=[]
if randStart == True:
for i in range(numturbs):
mx.append(Constant(np.random.uniform(low=-(site_x - radius),high=(site_x - radius))))
my.append(Constant(np.random.uniform(low=-(site_y - radius), high=(site_y - radius))))
mz.append(Constant(HH))
elif gridStart ==True:
if numturbs == 16:
rows = 4
cols = 4
xpos = np.linspace(-initExtent*(site_x - radius),initExtent*(site_x - radius),cols)
ypos = np.linspace(-initExtent*(site_y - radius),initExtent*(site_y - radius),rows)
for i in range(rows):
for j in range(cols):
mx.append(Constant(xpos[j]))
my.append(Constant(ypos[i]))
# # some starting noise sometimes helps
# mx.append(Constant(xpos[j]+5.*np.random.randn()))
# my.append(Constant(ypos[i]+5.*np.random.randn()))
mz.append(Constant(HH))
if numturbs == 9:
rows = 3
cols = 3
xpos = np.linspace(-site_x,site_x,cols)
ypos = np.linspace(-site_y,site_y,rows)
for i in range(rows):
for j in range(cols):
mx.append(Constant(xpos[j]))
my.append(Constant(ypos[i]))
# # some starting noise sometimes helps
# mx.append(Constant(xpos[j]+5.*np.random.randn()))
# my.append(Constant(ypos[i]+5.*np.random.randn()))
mz.append(Constant(HH))
if numturbs == 1:
mx.append(Constant(-1500))
my.append(Constant(0))
mz.append(Constant(HH))
if numturbs == 2:
mx.append(Constant(-1500))
mx.append(Constant(-1500 + 7*RD))
my.append(Constant(0))
my.append(Constant(0))
mz.append(Constant(HH))
mz.append(Constant(HH))
if numturbs == 3:
mx.append(Constant(-1000))
mx.append(Constant(0))
mx.append(Constant(1000))
my.append(Constant(0))
my.append(Constant(0))
my.append(Constant(0))
mz.append(Constant(HH))
mz.append(Constant(HH))
mz.append(Constant(HH))
if numturbs == 4:
mx.append(Constant(-1200))
mx.append(Constant(-400))
mx.append(Constant(400))
mx.append(Constant(1200))
my.append(Constant(0))
my.append(Constant(0))
my.append(Constant(0))
my.append(Constant(0))
mz.append(Constant(HH))
mz.append(Constant(HH))
mz.append(Constant(HH))
mz.append(Constant(HH))
return mx, my, mz
def createRotatedTurbineForce(mx,my,ma,A,beta,numturbs,alpha,V):
x=SpatialCoordinate(mesh)
tf = Function(V)
for i in range(numturbs):
WTGbase = project(Expression(("cos(yaw)","-sin(yaw)"),yaw=myaw[i],degree=2),V)
# WTGbase = project(Expression(("0","1"),yaw=myaw[i],degree=2),V)
#rotation
mxrot = cos(alpha)*mx[i] - sin(alpha)*my[i]
myrot = sin(alpha)*mx[i] + cos(alpha)*my[i]
# mxrot=mx[i]
# myrot=my[i]
x_centered=x[0]-mxrot
y_centered=x[1]-myrot
x_centered_rotated = x_centered*cos(myaw[i]) + y_centered*sin(myaw[i])
y_centered_rotated = -x_centered*sin(myaw[i]) + y_centered*cos(myaw[i])
# tf = tf+ 0.0001*exp(-(((x[0] - mx[i])/thickness)**WTGexp +(((x[1] - my[i])**2)/radius**2)**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered/thickness)**WTGexp + ((y_centered-radius/2.)/(radius/2.))**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered/thickness)**WTGexp + ((y_centered+radius/2.)/(radius/2.))**WTGexp))*WTGbase
tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered_rotated/thickness)**WTGexp + ((y_centered_rotated-radius/2.)/(radius/2.))**WTGexp))*WTGbase
tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered_rotated/thickness)**WTGexp + ((y_centered_rotated+radius/2.)/(radius/2.))**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-(((x[0]*cos(myaw[i]) - x - mxrot)/thickness)**WTGexp + ((x[1] - myrot-radius/2.)/(radius/2.))**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-(((x[0]*cos(myaw[i]) - mxrot)/thickness)**WTGexp + ((x[1] - myrot+radius/2.)/(radius/2.))**WTGexp))*WTGbase
return tf
#boundary conditions
class walls(SubDomain):
def inside(self, x, on_boundary):
return near(x[1]**2 - (Ly/2.)**2, 0.) and on_boundary
class inflow(SubDomain):
def inside(self, x, on_boundary):
return near(x[0],-(Lx/2.)) and on_boundary
class outflow(SubDomain):
def inside(self, x, on_boundary):
return near(x[0],Lx/2.) and on_boundary
wavenum=2*pi/(Ly/4.)
wavenum2=2*pi/(Ly/4.)
freq=2*pi/200.
wavenummod=wavenum
wavenum2mod=wavenum2
freqmod=freq
inflowExpr=Expression(("inflowVel + 0.1*sin(freq*t + wavenum*x[1])","0. + 0.1*sin(freq*t + wavenum2*x[1]) "), inflowVel=inflowVel,t=0,wavenum=wavenum,wavenum2=wavenum2,freq=freq,degree=2)
# inflowExpr=Expression(("inflowVel + 0.05*sin(2*pi*t/100. + wavenum*x[1]) + perturbx*0.2*sin(2*pi*t/100. + wavenum2*x[1]+pi/2.)","0. + 0.01*sin(2*pi*t/100. + wavenum*x[1])+ perturby*0.2*sin(2*pi*t/100. + wavenum2*x[1])"), inflowVel=inflowVel,t=0,perturbx=0,perturby=0,wavenum=wavenum,wavenum2=wavenum2,degree=2)
# inflowExpr=Expression(("inflowVel + 0.5*sin(2*pi*t/100. + wavenum*x[1])","0. + 0.25*sin(2*pi*t/100.)"), inflowVel=inflowVel,t=0,wavenum=wavenum,degree=2)
# inflowExpr=Expression(("inflowVel","0."), inflowVel=inflowVel,degree=2)
# lateral BC
bcu_inflow = DirichletBC(V, inflowExpr, inflow())
# bcu_walls = DirichletBC(V, Expression(("0","0."), inflowVel=inflowVel,degree=2), walls())
bcp_outflow = DirichletBC(Q, Constant(0), outflow())
# bc1a = DirichletBC(V.sub(1), Constant(0.0), NoSlipBoundary())
# inflow BC
# bc2 = DirichletBC(V, Constant((inflowVel,0.0)), InflowBoundary())
# bc2a = DirichletBC(VQ.sub(0).sub(0), Constant(8.), InflowBoundary())
# bcp = [DirichletBC(Q, Constant(0), OutflowBoundary())]
bcp=[bcp_outflow]
# bcu = [bcu_inflow,bcu_walls]
bcu = [bcu_inflow]
# Define trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
p = TrialFunction(Q)
q = TestFunction(Q)
# Define functions for solutions at previous and current time steps
u_n = Function(V)
u_ = Function(V)
p_n = Function(Q)
p_ = Function(Q)
# Define expressions used in variational forms
U = 0.5*(u_n + u)
n = FacetNormal(mesh)
f = Constant((0, 0))
k = Constant(dt)
mu = Constant(mu)
rho = Constant(rho)
mx,my,mz = createLayout(numturbs)
ma=[Constant(mm) for mm in 0.33*np.ones(numturbs)]
# right hand rule from above
# myaw=[Constant(pi/8.),Constant(0),Constant(0)]
yaw=0
myaw = [Constant(mm) for mm in (yaw*pi/180.)*np.ones(numturbs)]
beta = integrate.dblquad(WTGdist,-3*radius,3*radius,lambda x: -3*radius,lambda x: 3*radius)
B=beta[0]
f = createRotatedTurbineForce(mx,my,ma,A,B,numturbs,alpha,V)
# Define symmetric gradient
def epsilon(u):
return sym(nabla_grad(u))
# Define stress tensor
def sigma(u, p):
return 2*mu*epsilon(u) - p*Identity(len(u))
# Define variational problem for step 1
F1 = rho*dot((u - u_n) / k, v)*dx \
+ rho*dot(dot(u_n, nabla_grad(u_n)), v)*dx \
+ inner(sigma(U, p_n), epsilon(v))*dx \
+ dot(p_n*n, v)*ds - dot(mu*nabla_grad(U)*n, v)*ds \
+ dot(f*(cos(myaw[0])**2*u_n[0]*u_n[0]+sin(myaw[0])**2*u_n[1]*u_n[1]), v)*dx # inner? other form of vel?
a1 = lhs(F1)
L1 = rhs(F1)
# Define variational problem for step 2
a2 = dot(nabla_grad(p), nabla_grad(q))*dx
L2 = dot(nabla_grad(p_n), nabla_grad(q))*dx - (1/k)*div(u_)*q*dx
# Define variational problem for step 3
a3 = dot(u, v)*dx
L3 = dot(u_, v)*dx - k*dot(nabla_grad(p_ - p_n), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Apply boundary conditions to matrices
[bc.apply(A1) for bc in bcu]
[bc.apply(A2) for bc in bcp]
# Create XDMF files for visualization output
# ufile = File('output/fields/velocity_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.pvd')
# pfile = File('output/fields/pressure_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.pvd')
xdmffile_u = XDMFFile('output/velocity_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
xdmffile_p = XDMFFile('output/pressure_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# # xdmffile_tf = XDMFFile('2DDynamic/turbine_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# # Create time series (for use in reaction_system.py)
# timeseries_u = TimeSeries('output/velocity_series_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# timeseries_p = TimeSeries('output/pressure_series_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# # Save mesh to file (for use in reaction_system.py)
# File('navier_stokes_cylinder/cylinder.xml.gz') << mesh
# Create progress bar
# progress = Progress('Time-stepping')
# set_log_level(PROGRESS)
# ufile = File('output/u_'+str(float(mu))+'.pvd')
# pfile = File('output/p_'+str(float(mu))+'.pvd')
# DoF=len(u_.vector()[:])
# snapshots = np.zeros((DoF,int(num_steps/save_int)))
# uInterp = Function(V)
# uInterp=project(Expression(("x[0]","x[1]"),degree=2),V)
# basePositions=uInterp.vector()[:]
# np.save('output/basePositions_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha),basePositions)
# Time-stepping
t = 0
count=0
for n in range(num_steps):
# Update current time
t += dt
# bcu_inflow.perturbx=.1*np.random.rand()
# bcu_inflow.perturby=.1*np.random.rand()
inflowExpr.t=t
# wavenummod = wavenummod + .01*np.random.randn()*wavenum
# wavenum2mod = wavenum2mod+ .01*np.random.randn()*wavenum2
# freqmod = freqmod+ .01*np.random.randn()*wavenum2
# inflowExpr.wavenum=wavenummod
# inflowExpr.wavenum2=wavenum2mod
# inflowExpr.freq=freqmod
bcu_inflow = DirichletBC(V, inflowExpr, inflow())
bcu=[bcu_inflow]
# Step 1: Tentative velocity step
b1 = assemble(L1)
[bc.apply(b1) for bc in bcu]
solve(A1, u_.vector(), b1, 'bicgstab', 'hypre_amg')
# Step 2: Pressure correction step
b2 = assemble(L2)
[bc.apply(b2) for bc in bcp]
solve(A2, p_.vector(), b2, 'bicgstab', 'hypre_amg')
# Step 3: Velocity correction step
b3 = assemble(L3)
solve(A3, u_.vector(), b3, 'cg', 'sor')
# Update previous solution
u_n.assign(u_)
p_n.assign(p_)
if n % save_int ==0:
# Save solution to file (XDMF/HDF5)
# ufile << u_
# pfile << p_
xdmffile_u.write(u_, t)
xdmffile_p.write(p_, t)
# xdmffile_tf.write(project(f,V),t)
# # Save nodal values to file
# timeseries_u.store(u_.vector(), t)
# timeseries_p.store(p_.vector(), t)
# snapshots[:,count]=u_.vector()[:]
print(t)
# print(wavenummod/wavenum)
# print(wavenum2mod/wavenum2)
# print(freqmod/freq)
count+=1
# # Update progress bar
# progress.update(t / T)
# print('u max:', u_.vector().array().max())
# Hold plot
# interactive()
# np.save('output/snapshots'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha),snapshots) | [
"numpy.ones",
"numpy.exp",
"numpy.linspace",
"numpy.random.uniform",
"scipy.integrate.dblquad",
"numpy.round"
] | [((9214, 9315), 'scipy.integrate.dblquad', 'integrate.dblquad', (['WTGdist', '(-3 * radius)', '(3 * radius)', '(lambda x: -3 * radius)', '(lambda x: 3 * radius)'], {}), '(WTGdist, -3 * radius, 3 * radius, lambda x: -3 * radius, \n lambda x: 3 * radius)\n', (9231, 9315), False, 'from scipy import integrate\n'), ((2061, 2122), 'numpy.exp', 'np.exp', (['(-((x / thickness) ** WTGexp + (y / radius) ** WTGexp))'], {}), '(-((x / thickness) ** WTGexp + (y / radius) ** WTGexp))\n', (2067, 2122), True, 'import numpy as np\n'), ((9037, 9054), 'numpy.ones', 'np.ones', (['numturbs'], {}), '(numturbs)\n', (9044, 9054), True, 'import numpy as np\n'), ((9187, 9204), 'numpy.ones', 'np.ones', (['numturbs'], {}), '(numturbs)\n', (9194, 9204), True, 'import numpy as np\n'), ((2586, 2672), 'numpy.linspace', 'np.linspace', (['(-initExtent * (site_x - radius))', '(initExtent * (site_x - radius))', 'cols'], {}), '(-initExtent * (site_x - radius), initExtent * (site_x - radius),\n cols)\n', (2597, 2672), True, 'import numpy as np\n'), ((2682, 2768), 'numpy.linspace', 'np.linspace', (['(-initExtent * (site_y - radius))', '(initExtent * (site_y - radius))', 'rows'], {}), '(-initExtent * (site_y - radius), initExtent * (site_y - radius),\n rows)\n', (2693, 2768), True, 'import numpy as np\n'), ((3265, 3299), 'numpy.linspace', 'np.linspace', (['(-site_x)', 'site_x', 'cols'], {}), '(-site_x, site_x, cols)\n', (3276, 3299), True, 'import numpy as np\n'), ((3317, 3351), 'numpy.linspace', 'np.linspace', (['(-site_y)', 'site_y', 'rows'], {}), '(-site_y, site_y, rows)\n', (3328, 3351), True, 'import numpy as np\n'), ((2266, 2329), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-(site_x - radius))', 'high': '(site_x - radius)'}), '(low=-(site_x - radius), high=site_x - radius)\n', (2283, 2329), True, 'import numpy as np\n'), ((2364, 2427), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-(site_y - radius))', 'high': '(site_y - radius)'}), '(low=-(site_y - radius), high=site_y - radius)\n', (2381, 2427), True, 'import numpy as np\n'), ((10710, 10722), 'numpy.round', 'np.round', (['Re'], {}), '(Re)\n', (10718, 10722), True, 'import numpy as np\n'), ((10841, 10853), 'numpy.round', 'np.round', (['Re'], {}), '(Re)\n', (10849, 10853), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import sys
import os
import pdb
# Cosine similarity
import numpy as np
from numpy import dot
from numpy.linalg import norm
from PIL import Image
import torch
from facenet_pytorch import MTCNN, InceptionResnetV1
#******************** Constants
K_MODEL_IMAGE_SIZE = (160, 160) # (Width, Height) # for now it is same as true but should check (256, 256)
get_model_img_size = lambda: K_MODEL_IMAGE_SIZE
K_PAIR_ENCODING = {'same': 0, 'diff': 1}
get_pair_encoding = lambda: K_PAIR_ENCODING
# Cosine similarity
cosine_similarity = lambda a, b: dot(a, b) / ((norm(a) * norm(b)))
#----------------------------------------------------------------------------
# Resize image
def load_and_resize_image(img_path, model_image_size):
"""Reads the image and resizes it.
Parameters:
-----------
img_path (str): fullpath to the image where it is located.
model_image_size (tuple): the dimension (width, height) of image which goes to model.
Note: here that pil-images have first dimension width and second height
Returns:
--------
image_data (numpy.ndarray): the resized image in shape (H x W x C)
"""
image = Image.open(img_path)
if image.mode == 'RGBA':
# https://stackoverflow.com/questions/9166400/convert-rgba-png-to-rgb-with-pil
background = Image.new("RGB", image.size, (255, 255, 255))
background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
image = background
resized_image = image.resize(model_image_size, Image.BICUBIC) # NOTE: (width, height).image.resize(model_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32') # this converts to (height x width x channel)
# true_img_size = image.size
return image_data
#----------------------------------------------------------------------------
# Load the pretrained model
def get_pretrained_face_cropper():
mtcnn = MTCNN()
return mtcnn
#----------------------------------------------------------------------------
def get_pretrained_feature_extractor():
"""Downloads pre-trained network for feature extraction
"""
resnet = InceptionResnetV1(pretrained='vggface2').eval()
resnet.classify = False # This will allow to only return feature and not the output of logit layer
return resnet
#----------------------------------------------------------------------------
def get_feature_extractor_info():
"""Return tuple of pretrained feature extractor and its best-input image size for the extractor"""
return get_pretrained_feature_extractor(), K_MODEL_IMAGE_SIZE
#----------------------------------------------------------------------------
def extract_features_with_nomalization(img_path, model_size, data_normalizer, features_extractor_model, type_tensor=False):
"""Resizes the data according to model-size, normalizes it and then
extract feature using InceptionResnetV1
"""
img = load_and_resize_image(img_path, model_size)
img_norml = data_normalizer(img)
img_tensor = torch.tensor(img_norml)
img_tensor = img_tensor.permute(2, 0, 1)
img_feature = features_extractor_model(img_tensor.unsqueeze(0).float())
if type_tensor:
return img_feature.detach()
else:
return img_feature.detach().numpy()
def extract_features_after_cropping_face(img_path, face_cropper_model, features_extractor_model, type_tensor=False):
"""First crop the face using <MTCNN> module and then
Extract feature using InceptionResnetV1
"""
print('[Extractor:img_path] = ', img_path)
img = Image.open(img_path)
img_cropped = face_cropper_model(img)
if img_cropped is None:
pdb.set_trace()
img_feature = features_extractor_model(img_cropped.unsqueeze(0))
if type_tensor:
return img_feature.detach()
else:
return img_feature.detach().numpy()
#----------------------------------------------------------------------------
def extract_features(data_tensor, feature_extractor_model, type_tensor=True):
""" Extracte features
"""
return feature_extractor_model(data_tensor.unsqueeze(0).detach().squeeze()) # 1D return
#----------------------------------------------------------------------------
def compute_diff_vector(img1_feature, img2_feature, type_chi=True):
""" [Deep Face Recognition: A Survey](https://arxiv.org/abs/1804.06655)
To use chi distribution on pair of images.
"""
if type_chi:
return ((img1_feature - img2_feature)**2) / (img1_feature + img2_feature)
else:
return img1_feature - img2_feature
| [
"PIL.Image.open",
"facenet_pytorch.InceptionResnetV1",
"PIL.Image.new",
"facenet_pytorch.MTCNN",
"numpy.array",
"numpy.dot",
"torch.tensor",
"pdb.set_trace",
"numpy.linalg.norm"
] | [((1167, 1187), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1177, 1187), False, 'from PIL import Image\n'), ((1601, 1641), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (1609, 1641), True, 'import numpy as np\n'), ((1893, 1900), 'facenet_pytorch.MTCNN', 'MTCNN', ([], {}), '()\n', (1898, 1900), False, 'from facenet_pytorch import MTCNN, InceptionResnetV1\n'), ((2977, 3000), 'torch.tensor', 'torch.tensor', (['img_norml'], {}), '(img_norml)\n', (2989, 3000), False, 'import torch\n'), ((3484, 3504), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (3494, 3504), False, 'from PIL import Image\n'), ((573, 582), 'numpy.dot', 'dot', (['a', 'b'], {}), '(a, b)\n', (576, 582), False, 'from numpy import dot\n'), ((1310, 1355), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'image.size', '(255, 255, 255)'], {}), "('RGB', image.size, (255, 255, 255))\n", (1319, 1355), False, 'from PIL import Image\n'), ((3572, 3587), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3585, 3587), False, 'import pdb\n'), ((587, 594), 'numpy.linalg.norm', 'norm', (['a'], {}), '(a)\n', (591, 594), False, 'from numpy.linalg import norm\n'), ((597, 604), 'numpy.linalg.norm', 'norm', (['b'], {}), '(b)\n', (601, 604), False, 'from numpy.linalg import norm\n'), ((2110, 2150), 'facenet_pytorch.InceptionResnetV1', 'InceptionResnetV1', ([], {'pretrained': '"""vggface2"""'}), "(pretrained='vggface2')\n", (2127, 2150), False, 'from facenet_pytorch import MTCNN, InceptionResnetV1\n')] |
from typing import List, Any, Dict
import numpy as np
from swd.bonuses import BONUSES, ImmediateBonus, SCIENTIFIC_SYMBOLS_RANGE
from swd.cards_board import AGES, CardsBoard
from swd.entity_manager import EntityManager
from swd.game import Game, GameState
from swd.player import Player
class StateFeatures:
@staticmethod
def extract_state_features(state: GameState) -> List[int]:
features = [
state.age,
state.current_player_index
]
features.extend([int(x in state.progress_tokens) for x in EntityManager.progress_token_names()])
# features.extend([int(x in state.discard_pile) for x in range(EntityManager.cards_count())])
# features.append(int(state.is_double_turn))
for player_state in state.players_state:
features.append(player_state.coins)
unbuilt_wonders = [x[0] for x in player_state.wonders if x[1] is None]
features.extend([int(x in unbuilt_wonders) for x in range(EntityManager.wonders_count())])
features.extend(list(player_state.bonuses))
features.append(state.military_track_state.conflict_pawn)
features.extend(list(state.military_track_state.military_tokens))
features.append(state.game_status.value)
# features.extend(list(state.cards_board_state.card_places.flat))
indices = np.flip(AGES[state.age] > 0, axis=0)
features.extend(list(np.flip(state.cards_board_state.card_places, axis=0)[indices]))
return features
@staticmethod
def extract_state_features_dict(state: GameState) -> Dict[str, Any]:
features = {
"age": state.age,
"current_player": state.current_player_index,
"tokens": [int(x in state.progress_tokens) for x in EntityManager.progress_token_names()],
"discard_pile": state.discard_pile,
"military_pawn": state.military_track_state.conflict_pawn,
"military_tokens": list(state.military_track_state.military_tokens),
"game_status": state.game_status.value,
"players": []
}
for player_state in state.players_state:
unbuilt_wonders = [x[0] for x in player_state.wonders if x[1] is None]
player = {
"coins": player_state.coins,
"unbuilt_wonders": [int(x in unbuilt_wonders) for x in range(EntityManager.wonders_count())],
"bonuses": list(player_state.bonuses)
}
features["players"].append(player)
indices = np.flip(AGES[state.age] > 0, axis=0)
available_cards = CardsBoard.available_cards(state.cards_board_state)
features["cards_board"] = list(np.flip(state.cards_board_state.card_places, axis=0)[indices])
features["available_cards"] = list(map(lambda x: x[0], available_cards))
return features
@staticmethod
def extract_manual_state_features(state: GameState) -> List[int]:
features = []
features.extend([int(x in state.progress_tokens) for x in EntityManager.progress_token_names()])
for i, player_state in enumerate(state.players_state):
features.append(player_state.coins)
features.extend(list(Game.points(state, i)))
unbuilt_wonders = [x[0] for x in player_state.wonders if x[1] is None]
features.append(len(unbuilt_wonders))
if player_state.bonuses[BONUSES.index("theology")] > 0:
features.append(len(unbuilt_wonders))
else:
features.append(len([x for x in unbuilt_wonders
if ImmediateBonus.DOUBLE_TURN in EntityManager.wonder(x).immediate_bonus]))
assets = Player.assets(player_state, Player.resources(state.players_state[1 - i]), None)
features.extend(list(assets.resources))
features.extend(list(assets.resources_cost))
features.append(np.count_nonzero(player_state.bonuses[SCIENTIFIC_SYMBOLS_RANGE]))
features.append(state.military_track_state.conflict_pawn)
available_cards = [x[0] for x in CardsBoard.available_cards(state.cards_board_state)]
features.extend([int(card_id in available_cards) for card_id in range(EntityManager.cards_count())])
return features
| [
"numpy.flip",
"swd.cards_board.CardsBoard.available_cards",
"swd.entity_manager.EntityManager.wonder",
"swd.entity_manager.EntityManager.wonders_count",
"numpy.count_nonzero",
"swd.entity_manager.EntityManager.cards_count",
"swd.entity_manager.EntityManager.progress_token_names",
"swd.game.Game.points... | [((1370, 1406), 'numpy.flip', 'np.flip', (['(AGES[state.age] > 0)'], {'axis': '(0)'}), '(AGES[state.age] > 0, axis=0)\n', (1377, 1406), True, 'import numpy as np\n'), ((2562, 2598), 'numpy.flip', 'np.flip', (['(AGES[state.age] > 0)'], {'axis': '(0)'}), '(AGES[state.age] > 0, axis=0)\n', (2569, 2598), True, 'import numpy as np\n'), ((2625, 2676), 'swd.cards_board.CardsBoard.available_cards', 'CardsBoard.available_cards', (['state.cards_board_state'], {}), '(state.cards_board_state)\n', (2651, 2676), False, 'from swd.cards_board import AGES, CardsBoard\n'), ((2716, 2768), 'numpy.flip', 'np.flip', (['state.cards_board_state.card_places'], {'axis': '(0)'}), '(state.cards_board_state.card_places, axis=0)\n', (2723, 2768), True, 'import numpy as np\n'), ((3770, 3814), 'swd.player.Player.resources', 'Player.resources', (['state.players_state[1 - i]'], {}), '(state.players_state[1 - i])\n', (3786, 3814), False, 'from swd.player import Player\n'), ((3959, 4023), 'numpy.count_nonzero', 'np.count_nonzero', (['player_state.bonuses[SCIENTIFIC_SYMBOLS_RANGE]'], {}), '(player_state.bonuses[SCIENTIFIC_SYMBOLS_RANGE])\n', (3975, 4023), True, 'import numpy as np\n'), ((4134, 4185), 'swd.cards_board.CardsBoard.available_cards', 'CardsBoard.available_cards', (['state.cards_board_state'], {}), '(state.cards_board_state)\n', (4160, 4185), False, 'from swd.cards_board import AGES, CardsBoard\n'), ((551, 587), 'swd.entity_manager.EntityManager.progress_token_names', 'EntityManager.progress_token_names', ([], {}), '()\n', (585, 587), False, 'from swd.entity_manager import EntityManager\n'), ((1436, 1488), 'numpy.flip', 'np.flip', (['state.cards_board_state.card_places'], {'axis': '(0)'}), '(state.cards_board_state.card_places, axis=0)\n', (1443, 1488), True, 'import numpy as np\n'), ((1790, 1826), 'swd.entity_manager.EntityManager.progress_token_names', 'EntityManager.progress_token_names', ([], {}), '()\n', (1824, 1826), False, 'from swd.entity_manager import EntityManager\n'), ((3063, 3099), 'swd.entity_manager.EntityManager.progress_token_names', 'EntityManager.progress_token_names', ([], {}), '()\n', (3097, 3099), False, 'from swd.entity_manager import EntityManager\n'), ((3247, 3268), 'swd.game.Game.points', 'Game.points', (['state', 'i'], {}), '(state, i)\n', (3258, 3268), False, 'from swd.game import Game, GameState\n'), ((3440, 3465), 'swd.bonuses.BONUSES.index', 'BONUSES.index', (['"""theology"""'], {}), "('theology')\n", (3453, 3465), False, 'from swd.bonuses import BONUSES, ImmediateBonus, SCIENTIFIC_SYMBOLS_RANGE\n'), ((4265, 4292), 'swd.entity_manager.EntityManager.cards_count', 'EntityManager.cards_count', ([], {}), '()\n', (4290, 4292), False, 'from swd.entity_manager import EntityManager\n'), ((997, 1026), 'swd.entity_manager.EntityManager.wonders_count', 'EntityManager.wonders_count', ([], {}), '()\n', (1024, 1026), False, 'from swd.entity_manager import EntityManager\n'), ((2395, 2424), 'swd.entity_manager.EntityManager.wonders_count', 'EntityManager.wonders_count', ([], {}), '()\n', (2422, 2424), False, 'from swd.entity_manager import EntityManager\n'), ((3678, 3701), 'swd.entity_manager.EntityManager.wonder', 'EntityManager.wonder', (['x'], {}), '(x)\n', (3698, 3701), False, 'from swd.entity_manager import EntityManager\n')] |
import functools
import gc
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import bmm
def read_data(path, chunksize=None):
data_reader = pd.read_csv(path, chunksize=10)
data_columns = data_reader.get_chunk().columns
polyline_converters = {col_name: json.loads for col_name in data_columns
if 'POLYLINE' in col_name}
return pd.read_csv(path, converters=polyline_converters, chunksize=chunksize)
def clear_cache():
gc.collect()
for a in gc.get_objects():
if isinstance(a, functools._lru_cache_wrapper):
a.cache_clear()
def total_variation_dists(dists_one,
dists_two,
bin_width=3):
n1 = len(dists_one)
n2 = len(dists_two)
all_dists = np.concatenate([dists_one, dists_two])
all_dists = np.unique(all_dists)
if bin_width is None:
tv = 0.
for dist in all_dists:
p_1 = np.sum(dists_one == dist) / n1
p_2 = np.sum(dists_two == dist) / n2
tv = tv + np.abs(p_1 - p_2)
else:
min_dist = np.min(dists_one)
max_dist = np.max(dists_one)
tv = 0
bin_linsp = np.arange(min_dist, max_dist, bin_width)
# Below min
tv += np.sum(dists_two < min_dist) / n2
# Above max
tv += np.sum(dists_two >= max_dist) / n2
for i in range(len(bin_linsp)):
int_min = bin_linsp[i]
int_max = int_min + bin_width
p_1 = np.sum((dists_one >= int_min) * (dists_one < int_max)) / n1
p_2 = np.sum((dists_two >= int_min) * (dists_two < int_max)) / n2
tv += np.abs(p_1 - p_2)
return tv / 2
def obs_rows_trim(particles, trail_zero_lim=3):
particles_obs_rows = []
for p in particles:
obs_rows = bmm.observation_time_rows(p)
zero_dist_bools = obs_rows[:, -1] == 0
if np.all(zero_dist_bools[-trail_zero_lim:]):
count = 3
is_zero = True
while is_zero and count < len(obs_rows):
count += 1
is_zero = zero_dist_bools[-count]
particles_obs_rows.append(obs_rows[:-(count - 1)])
else:
particles_obs_rows.append(obs_rows)
particles_obs_rows.append(bmm.observation_time_rows(p))
return particles_obs_rows
def interval_tv_dists(particles_one,
particles_two,
interval=60,
speeds=False,
bins=None,
trim_zeros=3):
observation_times = particles_one.observation_times
obs_int = observation_times[1]
if interval % obs_int != 0:
raise ValueError('interval must be a multiple of inter-observation times')
obs_per_int = int(interval / obs_int)
num_ints = int(observation_times[-1] / interval)
tv_each_time = np.zeros(num_ints)
particles_one_obs_rows = obs_rows_trim(particles_one, trim_zeros)
particles_two_obs_rows = obs_rows_trim(particles_two, trim_zeros)
for i in range(1, num_ints + 1):
start_time = observation_times[(i - 1) * obs_per_int]
end_time = observation_times[i * obs_per_int]
p1_dists = -np.ones(particles_one.n) * 2
for j in range(particles_one.n):
obs_rows = particles_one_obs_rows[j]
if end_time in obs_rows[:, 0]:
p1_dists[j] = np.sum(
obs_rows[np.logical_and(obs_rows[:, 0] >= start_time, obs_rows[:, 0] <= end_time), -1])
p2_dists = -np.ones(particles_two.n) * 3
for k in range(particles_two.n):
obs_rows = particles_two_obs_rows[k]
if end_time in obs_rows:
p2_dists[k] = np.sum(
obs_rows[np.logical_and(obs_rows[:, 0] >= start_time, obs_rows[:, 0] <= end_time), -1])
if speeds:
p1_dists /= interval
p2_dists /= interval
tv_each_time[i - 1] = total_variation_dists(p1_dists, p2_dists, bins)
return tv_each_time
def plot_metric_over_time(setup_dict, fl_pf_metric, fl_bsi_metric, save_dir=None,
t_linspace=None, x_lab='t', x_ticks=None):
lags = setup_dict['lags']
m = fl_pf_metric.shape[-1]
if t_linspace is None:
t_linspace = np.arange(m)
lines = [None] * (len(lags) + 1)
fig_pf, axes_pf = plt.subplots(len(setup_dict['fl_n_samps']), sharex='all', sharey='all', figsize=(8, 6))
fig_bs, axes_bs = plt.subplots(len(setup_dict['fl_n_samps']), sharex='all', sharey='all', figsize=(8, 6))
for j, n in enumerate(setup_dict['fl_n_samps']):
for k, lag in enumerate(lags):
axes_pf[j].plot(t_linspace, fl_pf_metric[j, k], label=f'Lag: {lag}')
lines[k], = axes_bs[j].plot(t_linspace, fl_bsi_metric[j, k], label=f'Lag: {lag}')
axes_pf[j].set_ylabel(f'N={n}', fontsize=18)
axes_bs[j].set_ylabel(f'N={n}', fontsize=18)
# axes_pf[j].set_ylim(0, 0.7)
# axes_bs[j].set_ylim(0, 0.7)
# axes[j, 0].set_yticks([0, 0.5, 1])
# axes[j, 1].set_yticks([0, 0.5, 1])
axes_pf[-1].set_xlabel(x_lab, fontsize=16)
axes_bs[-1].set_xlabel(x_lab, fontsize=16)
if x_ticks is not None:
axes_pf[-1].set_xticks(x_ticks)
axes_bs[-1].set_xticks(x_ticks)
plt.legend(loc='upper right', bbox_to_anchor=(0.8, 0.99))
fig_pf.set_figwidth(5)
fig_bs.set_figwidth(5)
# fig_pf.set_figheight(7)
# fig_bs.set_figheight(7)
fig_pf.set_figheight(11)
fig_bs.set_figheight(11)
fig_pf.tight_layout()
fig_bs.tight_layout()
if save_dir is not None:
fig_pf.savefig(save_dir + '_pf', dpi=400)
fig_bs.savefig(save_dir + '_bs', dpi=400)
return (fig_pf, axes_pf), (fig_bs, axes_bs)
| [
"numpy.abs",
"numpy.all",
"numpy.unique",
"pandas.read_csv",
"numpy.arange",
"numpy.ones",
"numpy.logical_and",
"numpy.min",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"numpy.concatenate",
"gc.collect",
"gc.get_objects",
"bmm.observation_time_rows",
"matplotlib.pyplot.legend"
] | [((180, 211), 'pandas.read_csv', 'pd.read_csv', (['path'], {'chunksize': '(10)'}), '(path, chunksize=10)\n', (191, 211), True, 'import pandas as pd\n'), ((406, 476), 'pandas.read_csv', 'pd.read_csv', (['path'], {'converters': 'polyline_converters', 'chunksize': 'chunksize'}), '(path, converters=polyline_converters, chunksize=chunksize)\n', (417, 476), True, 'import pandas as pd\n'), ((502, 514), 'gc.collect', 'gc.collect', ([], {}), '()\n', (512, 514), False, 'import gc\n'), ((528, 544), 'gc.get_objects', 'gc.get_objects', ([], {}), '()\n', (542, 544), False, 'import gc\n'), ((811, 849), 'numpy.concatenate', 'np.concatenate', (['[dists_one, dists_two]'], {}), '([dists_one, dists_two])\n', (825, 849), True, 'import numpy as np\n'), ((866, 886), 'numpy.unique', 'np.unique', (['all_dists'], {}), '(all_dists)\n', (875, 886), True, 'import numpy as np\n'), ((2916, 2934), 'numpy.zeros', 'np.zeros', (['num_ints'], {}), '(num_ints)\n', (2924, 2934), True, 'import numpy as np\n'), ((5358, 5415), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'bbox_to_anchor': '(0.8, 0.99)'}), "(loc='upper right', bbox_to_anchor=(0.8, 0.99))\n", (5368, 5415), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1145), 'numpy.min', 'np.min', (['dists_one'], {}), '(dists_one)\n', (1134, 1145), True, 'import numpy as np\n'), ((1165, 1182), 'numpy.max', 'np.max', (['dists_one'], {}), '(dists_one)\n', (1171, 1182), True, 'import numpy as np\n'), ((1218, 1258), 'numpy.arange', 'np.arange', (['min_dist', 'max_dist', 'bin_width'], {}), '(min_dist, max_dist, bin_width)\n', (1227, 1258), True, 'import numpy as np\n'), ((1847, 1875), 'bmm.observation_time_rows', 'bmm.observation_time_rows', (['p'], {}), '(p)\n', (1872, 1875), False, 'import bmm\n'), ((1934, 1975), 'numpy.all', 'np.all', (['zero_dist_bools[-trail_zero_lim:]'], {}), '(zero_dist_bools[-trail_zero_lim:])\n', (1940, 1975), True, 'import numpy as np\n'), ((4336, 4348), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (4345, 4348), True, 'import numpy as np\n'), ((1294, 1322), 'numpy.sum', 'np.sum', (['(dists_two < min_dist)'], {}), '(dists_two < min_dist)\n', (1300, 1322), True, 'import numpy as np\n'), ((1363, 1392), 'numpy.sum', 'np.sum', (['(dists_two >= max_dist)'], {}), '(dists_two >= max_dist)\n', (1369, 1392), True, 'import numpy as np\n'), ((1690, 1707), 'numpy.abs', 'np.abs', (['(p_1 - p_2)'], {}), '(p_1 - p_2)\n', (1696, 1707), True, 'import numpy as np\n'), ((2316, 2344), 'bmm.observation_time_rows', 'bmm.observation_time_rows', (['p'], {}), '(p)\n', (2341, 2344), False, 'import bmm\n'), ((979, 1004), 'numpy.sum', 'np.sum', (['(dists_one == dist)'], {}), '(dists_one == dist)\n', (985, 1004), True, 'import numpy as np\n'), ((1028, 1053), 'numpy.sum', 'np.sum', (['(dists_two == dist)'], {}), '(dists_two == dist)\n', (1034, 1053), True, 'import numpy as np\n'), ((1081, 1098), 'numpy.abs', 'np.abs', (['(p_1 - p_2)'], {}), '(p_1 - p_2)\n', (1087, 1098), True, 'import numpy as np\n'), ((1534, 1588), 'numpy.sum', 'np.sum', (['((dists_one >= int_min) * (dists_one < int_max))'], {}), '((dists_one >= int_min) * (dists_one < int_max))\n', (1540, 1588), True, 'import numpy as np\n'), ((1612, 1666), 'numpy.sum', 'np.sum', (['((dists_two >= int_min) * (dists_two < int_max))'], {}), '((dists_two >= int_min) * (dists_two < int_max))\n', (1618, 1666), True, 'import numpy as np\n'), ((3251, 3275), 'numpy.ones', 'np.ones', (['particles_one.n'], {}), '(particles_one.n)\n', (3258, 3275), True, 'import numpy as np\n'), ((3580, 3604), 'numpy.ones', 'np.ones', (['particles_two.n'], {}), '(particles_two.n)\n', (3587, 3604), True, 'import numpy as np\n'), ((3480, 3552), 'numpy.logical_and', 'np.logical_and', (['(obs_rows[:, 0] >= start_time)', '(obs_rows[:, 0] <= end_time)'], {}), '(obs_rows[:, 0] >= start_time, obs_rows[:, 0] <= end_time)\n', (3494, 3552), True, 'import numpy as np\n'), ((3803, 3875), 'numpy.logical_and', 'np.logical_and', (['(obs_rows[:, 0] >= start_time)', '(obs_rows[:, 0] <= end_time)'], {}), '(obs_rows[:, 0] >= start_time, obs_rows[:, 0] <= end_time)\n', (3817, 3875), True, 'import numpy as np\n')] |
#from __future__ import absolute_import, division, print_function, unicode_literals
from neural_networks.NeuralLDAanalysisMethods import *
from dataset_loader.dataset_helper import Dataset_Helper
from results_saver import LogWriter
from neural_networks.aliaser import *
import os
import sys
import numpy as np
from neural_networks.lda_impl import Lda
import tkinter as tk
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
root = tk.Tk()
root.withdraw()
from numpy.random import seed
seed(42)
tf.random.set_seed(42)
params = [['LDA-CSFD-simple',14]]
"""params = [['LDA-Yelp',6],
['LDA-Reuters',0],
['LDA-dbpedia',1],
['LDA-20News',3],
['LDA-CSFD',12]]"""
for param in params:
seed(42)
tf.random.set_seed(42)
test_name = param[0]
results = []
num_of_words = 10000
dataset_helper = Dataset_Helper(True)
dataset_helper.set_wanted_datasets([param[1]])
dataset_helper.next_dataset()
num_of_topics = dataset_helper.get_num_of_topics()
documents = dataset_helper.get_texts_as_list()
labels = dataset_helper.get_labels(dataset_helper.get_train_file_path())
tokenizer = Tokenizer(num_words=num_of_words)
tokenizer.fit_on_texts(documents)
#items= tokenizer.word_index
reverse_word_map = dict(map(reversed, tokenizer.word_index.items()))
matrix = tokenizer.texts_to_matrix(documents, mode='binary')
num_of_important_words = 20
log_writer = LogWriter(log_file_desc='{}{}'.format(test_name,""),result_desc="NeuralTopicModel")
model = Lda(num_of_topics,num_of_important_words,
passes=25,
iterations=25)
"""gensim.models.LdaModel(
doc_term_matrix,
num_topics=num_of_topics,
id2word=dictionary,
passes=2,
iterations=2)"""
#LDA section
model.train(documents)
topic_words_lda = extract_important_words(model.get_topics(), True)
print(topic_words_lda)
log_writer.write_2D_list('topic_words_lda', topic_words_lda, 'w+')
test_model(documents, labels, model, log_writer, 'standard_lda')
#plot_clustering_chart(model,True,documents,log_writer,'lda',dataset_helper.get_dataset_name(),dataset_helper.get_num_of_topics())
measureCoherence(topic_words_lda, log_writer, model.dictionary, documents, 'lda', dataset_helper.get_dataset_name())
log_writer.end_logging()
| [
"os.path.dirname",
"dataset_loader.dataset_helper.Dataset_Helper",
"tkinter.Tk",
"neural_networks.lda_impl.Lda",
"numpy.random.seed",
"sys.path.append"
] | [((385, 410), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (400, 410), False, 'import os\n'), ((411, 436), 'sys.path.append', 'sys.path.append', (['file_dir'], {}), '(file_dir)\n', (426, 436), False, 'import sys\n'), ((444, 451), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (449, 451), True, 'import tkinter as tk\n'), ((498, 506), 'numpy.random.seed', 'seed', (['(42)'], {}), '(42)\n', (502, 506), False, 'from numpy.random import seed\n'), ((729, 737), 'numpy.random.seed', 'seed', (['(42)'], {}), '(42)\n', (733, 737), False, 'from numpy.random import seed\n'), ((854, 874), 'dataset_loader.dataset_helper.Dataset_Helper', 'Dataset_Helper', (['(True)'], {}), '(True)\n', (868, 874), False, 'from dataset_loader.dataset_helper import Dataset_Helper\n'), ((1550, 1618), 'neural_networks.lda_impl.Lda', 'Lda', (['num_of_topics', 'num_of_important_words'], {'passes': '(25)', 'iterations': '(25)'}), '(num_of_topics, num_of_important_words, passes=25, iterations=25)\n', (1553, 1618), False, 'from neural_networks.lda_impl import Lda\n')] |
"""
Choice functions.
We focus upon the cumulative normal distribution as the choice function, but you
could impliment your own, eg Logistic, SoftMax, etc.
"""
from scipy.stats import norm
import numpy as np
def StandardCumulativeNormalChoiceFunc(decision_variable, θ, θ_fixed):
"""Cumulative normal choice function, but no alpha parameter"""
p_chose_B = θ_fixed["ϵ"] + (1 - 2 * θ_fixed["ϵ"]) * _Phi(decision_variable)
return p_chose_B
def CumulativeNormalChoiceFunc(decision_variable, θ, θ_fixed):
"""Our default choice function"""
α = 1e-3 + θ["α"].values
p_chose_B = θ_fixed["ϵ"] + (1 - 2 * θ_fixed["ϵ"]) * _Phi(
np.divide(decision_variable, α)
)
return p_chose_B
def _Phi(x):
"""Cumulative normal distribution, provided here as a helper function"""
# NOTE: because some of the data was from a pandas dataframe, the numpy
# arrays are coming out as dtype = object. So we need to cooerce into
# floats.
return norm.cdf(x.astype("float"), loc=0, scale=1)
| [
"numpy.divide"
] | [((655, 686), 'numpy.divide', 'np.divide', (['decision_variable', 'α'], {}), '(decision_variable, α)\n', (664, 686), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import torch
from collections import namedtuple
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'done'))
class DeepRLTrainer:
NB_EPISODES = 3000
SAVE_EVERY = 500
INFO_EVERY = 50
SOFT_MAX = False
DEVICE = 'cpu'
def __init__(self, environment, agent, save_path):
self.env = environment
self.agent = agent
self.save_path = save_path
self.total_rewards = []
self.avg_rewards = []
self.tiles_visited = []
self.avg_tiles_visited = []
self.nb_steps = []
self.avg_nb_steps = []
self.cc_counter = 0
self.nb_complete_cov = []
self.terrain_diffs = []
self.avg_terrain_diffs = []
def train(self):
for i in range(DeepRLTrainer.NB_EPISODES):
current_state = torch.tensor(self.env.reset(), dtype=torch.float,
device=DeepRLTrainer.DEVICE)
done = False
info = {}
self.agent.update_epsilon(i)
while not done:
action = self.agent.select_action(
current_state, soft_max=DeepRLTrainer.SOFT_MAX
)
n_state, reward, done, info = self.env.step(action)
action = torch.tensor(action, dtype=torch.int64,
device=DeepRLTrainer.DEVICE)
n_state = torch.tensor(n_state, dtype=torch.float,
device=DeepRLTrainer.DEVICE)
reward = torch.tensor(reward, dtype=torch.float,
device=DeepRLTrainer.DEVICE)
done = torch.tensor(done, dtype=torch.bool,
device=DeepRLTrainer.DEVICE)
self.agent.observe_transition(Transition(
current_state, action, n_state, reward, done
), device=DeepRLTrainer.DEVICE)
current_state = n_state
if info["full_cc"]:
self.cc_counter += 1
print(f"COMPLETE COVERAGE: {self.cc_counter}")
self.total_rewards.append(info["total_reward"])
self.nb_steps.append(info["nb_steps"])
self.tiles_visited.append(info["total_covered_tiles"])
self.nb_complete_cov.append(self.cc_counter)
self.terrain_diffs.append(info["total_pos_terr_diff"])
avg_start = 0 if i < DeepRLTrainer.SAVE_EVERY else -DeepRLTrainer.SAVE_EVERY
self.avg_rewards.append(np.average(self.total_rewards[avg_start:]))
self.avg_tiles_visited.append(np.average(self.tiles_visited[avg_start:]))
self.avg_nb_steps.append(np.average(self.nb_steps[avg_start:]))
self.avg_terrain_diffs.append(np.average(self.terrain_diffs[avg_start:]))
episode_nb = i + 1
if episode_nb % DeepRLTrainer.INFO_EVERY == 0:
print(f"Episode {episode_nb}")
print(f"average total reward: {self.avg_rewards[-1]}")
print(f"average nb steps: {self.avg_nb_steps[-1]}")
print(f"average nb tiles visited: {self.avg_tiles_visited[-1]}")
print(f"average positive terrain diff: {self.avg_terrain_diffs[-1]}")
print(f"epsilon: {self.agent.epsilon}")
print()
if episode_nb % DeepRLTrainer.SAVE_EVERY == 0:
x = range(episode_nb)
plt.clf()
plt.plot(x, self.total_rewards, x, self.avg_rewards)
plt.legend(['total rewards', 'average total rewards'])
plt.title('Total reward for every episode')
plt.savefig(self.save_path + f"rewards.png")
np.save(self.save_path + f"rewards.npy", self.total_rewards)
np.save(self.save_path + f"avg_rewards.npy", self.avg_rewards)
plt.clf()
plt.plot(x, self.tiles_visited, x, self.avg_tiles_visited)
plt.legend(['nb tiles visited', 'average nb tile visited'])
plt.title('Number of tiles visited for every episode')
plt.savefig(self.save_path + f"tiles_visited.png")
np.save(self.save_path + f"tiles_visited.npy", self.tiles_visited)
np.save(self.save_path + f"avg_tiles_visited.npy", self.avg_tiles_visited)
plt.clf()
plt.plot(x, self.nb_steps, x, self.avg_nb_steps)
plt.legend(['nb steps', 'average nb steps'])
plt.title('Number of steps for every episode')
plt.savefig(self.save_path + f"nb_steps.png")
np.save(self.save_path + f"nb_steps.npy", self.nb_steps)
np.save(self.save_path + f"avg_nb_steps.npy", self.avg_nb_steps)
plt.clf()
plt.plot(x, self.nb_complete_cov)
plt.legend(['nb complete coverage runs'])
plt.title('Nb of complete coverage runs')
plt.savefig(self.save_path + f"nb_complete_covs.png")
np.save(self.save_path + f"nb_complete_covs.npy", self.nb_complete_cov)
plt.clf()
plt.plot(x, self.terrain_diffs, x, self.avg_terrain_diffs)
plt.legend(['terrain differences', 'average terrain differences'])
plt.title('Total terrain differences for every episode')
plt.savefig(self.save_path + f"terrain_diffs.png")
np.save(self.save_path + f"terrain_diffs.npy", self.terrain_diffs)
np.save(self.save_path + f"avg_terrain_diffs.npy", self.avg_terrain_diffs)
self.agent.save(self.save_path, episode_nb)
| [
"collections.namedtuple",
"matplotlib.pyplot.savefig",
"numpy.average",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"torch.tensor",
"matplotlib.pyplot.title",
"numpy.save",
"matplotlib.pyplot.legend"
] | [((113, 190), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('state', 'action', 'next_state', 'reward', 'done')"], {}), "('Transition', ('state', 'action', 'next_state', 'reward', 'done'))\n", (123, 190), False, 'from collections import namedtuple\n'), ((1364, 1432), 'torch.tensor', 'torch.tensor', (['action'], {'dtype': 'torch.int64', 'device': 'DeepRLTrainer.DEVICE'}), '(action, dtype=torch.int64, device=DeepRLTrainer.DEVICE)\n', (1376, 1432), False, 'import torch\n'), ((1497, 1566), 'torch.tensor', 'torch.tensor', (['n_state'], {'dtype': 'torch.float', 'device': 'DeepRLTrainer.DEVICE'}), '(n_state, dtype=torch.float, device=DeepRLTrainer.DEVICE)\n', (1509, 1566), False, 'import torch\n'), ((1631, 1699), 'torch.tensor', 'torch.tensor', (['reward'], {'dtype': 'torch.float', 'device': 'DeepRLTrainer.DEVICE'}), '(reward, dtype=torch.float, device=DeepRLTrainer.DEVICE)\n', (1643, 1699), False, 'import torch\n'), ((1761, 1826), 'torch.tensor', 'torch.tensor', (['done'], {'dtype': 'torch.bool', 'device': 'DeepRLTrainer.DEVICE'}), '(done, dtype=torch.bool, device=DeepRLTrainer.DEVICE)\n', (1773, 1826), False, 'import torch\n'), ((2639, 2681), 'numpy.average', 'np.average', (['self.total_rewards[avg_start:]'], {}), '(self.total_rewards[avg_start:])\n', (2649, 2681), True, 'import numpy as np\n'), ((2725, 2767), 'numpy.average', 'np.average', (['self.tiles_visited[avg_start:]'], {}), '(self.tiles_visited[avg_start:])\n', (2735, 2767), True, 'import numpy as np\n'), ((2806, 2843), 'numpy.average', 'np.average', (['self.nb_steps[avg_start:]'], {}), '(self.nb_steps[avg_start:])\n', (2816, 2843), True, 'import numpy as np\n'), ((2887, 2929), 'numpy.average', 'np.average', (['self.terrain_diffs[avg_start:]'], {}), '(self.terrain_diffs[avg_start:])\n', (2897, 2929), True, 'import numpy as np\n'), ((3570, 3579), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3577, 3579), True, 'import matplotlib.pyplot as plt\n'), ((3596, 3648), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'self.total_rewards', 'x', 'self.avg_rewards'], {}), '(x, self.total_rewards, x, self.avg_rewards)\n', (3604, 3648), True, 'import matplotlib.pyplot as plt\n'), ((3665, 3719), 'matplotlib.pyplot.legend', 'plt.legend', (["['total rewards', 'average total rewards']"], {}), "(['total rewards', 'average total rewards'])\n", (3675, 3719), True, 'import matplotlib.pyplot as plt\n'), ((3736, 3779), 'matplotlib.pyplot.title', 'plt.title', (['"""Total reward for every episode"""'], {}), "('Total reward for every episode')\n", (3745, 3779), True, 'import matplotlib.pyplot as plt\n'), ((3796, 3840), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.save_path + f'rewards.png')"], {}), "(self.save_path + f'rewards.png')\n", (3807, 3840), True, 'import matplotlib.pyplot as plt\n'), ((3857, 3917), 'numpy.save', 'np.save', (["(self.save_path + f'rewards.npy')", 'self.total_rewards'], {}), "(self.save_path + f'rewards.npy', self.total_rewards)\n", (3864, 3917), True, 'import numpy as np\n'), ((3934, 3996), 'numpy.save', 'np.save', (["(self.save_path + f'avg_rewards.npy')", 'self.avg_rewards'], {}), "(self.save_path + f'avg_rewards.npy', self.avg_rewards)\n", (3941, 3996), True, 'import numpy as np\n'), ((4014, 4023), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4021, 4023), True, 'import matplotlib.pyplot as plt\n'), ((4040, 4098), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'self.tiles_visited', 'x', 'self.avg_tiles_visited'], {}), '(x, self.tiles_visited, x, self.avg_tiles_visited)\n', (4048, 4098), True, 'import matplotlib.pyplot as plt\n'), ((4115, 4174), 'matplotlib.pyplot.legend', 'plt.legend', (["['nb tiles visited', 'average nb tile visited']"], {}), "(['nb tiles visited', 'average nb tile visited'])\n", (4125, 4174), True, 'import matplotlib.pyplot as plt\n'), ((4191, 4245), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of tiles visited for every episode"""'], {}), "('Number of tiles visited for every episode')\n", (4200, 4245), True, 'import matplotlib.pyplot as plt\n'), ((4262, 4312), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.save_path + f'tiles_visited.png')"], {}), "(self.save_path + f'tiles_visited.png')\n", (4273, 4312), True, 'import matplotlib.pyplot as plt\n'), ((4329, 4395), 'numpy.save', 'np.save', (["(self.save_path + f'tiles_visited.npy')", 'self.tiles_visited'], {}), "(self.save_path + f'tiles_visited.npy', self.tiles_visited)\n", (4336, 4395), True, 'import numpy as np\n'), ((4412, 4486), 'numpy.save', 'np.save', (["(self.save_path + f'avg_tiles_visited.npy')", 'self.avg_tiles_visited'], {}), "(self.save_path + f'avg_tiles_visited.npy', self.avg_tiles_visited)\n", (4419, 4486), True, 'import numpy as np\n'), ((4504, 4513), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4511, 4513), True, 'import matplotlib.pyplot as plt\n'), ((4530, 4578), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'self.nb_steps', 'x', 'self.avg_nb_steps'], {}), '(x, self.nb_steps, x, self.avg_nb_steps)\n', (4538, 4578), True, 'import matplotlib.pyplot as plt\n'), ((4595, 4639), 'matplotlib.pyplot.legend', 'plt.legend', (["['nb steps', 'average nb steps']"], {}), "(['nb steps', 'average nb steps'])\n", (4605, 4639), True, 'import matplotlib.pyplot as plt\n'), ((4656, 4702), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of steps for every episode"""'], {}), "('Number of steps for every episode')\n", (4665, 4702), True, 'import matplotlib.pyplot as plt\n'), ((4719, 4764), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.save_path + f'nb_steps.png')"], {}), "(self.save_path + f'nb_steps.png')\n", (4730, 4764), True, 'import matplotlib.pyplot as plt\n'), ((4781, 4837), 'numpy.save', 'np.save', (["(self.save_path + f'nb_steps.npy')", 'self.nb_steps'], {}), "(self.save_path + f'nb_steps.npy', self.nb_steps)\n", (4788, 4837), True, 'import numpy as np\n'), ((4854, 4918), 'numpy.save', 'np.save', (["(self.save_path + f'avg_nb_steps.npy')", 'self.avg_nb_steps'], {}), "(self.save_path + f'avg_nb_steps.npy', self.avg_nb_steps)\n", (4861, 4918), True, 'import numpy as np\n'), ((4936, 4945), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4943, 4945), True, 'import matplotlib.pyplot as plt\n'), ((4962, 4995), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'self.nb_complete_cov'], {}), '(x, self.nb_complete_cov)\n', (4970, 4995), True, 'import matplotlib.pyplot as plt\n'), ((5012, 5053), 'matplotlib.pyplot.legend', 'plt.legend', (["['nb complete coverage runs']"], {}), "(['nb complete coverage runs'])\n", (5022, 5053), True, 'import matplotlib.pyplot as plt\n'), ((5070, 5111), 'matplotlib.pyplot.title', 'plt.title', (['"""Nb of complete coverage runs"""'], {}), "('Nb of complete coverage runs')\n", (5079, 5111), True, 'import matplotlib.pyplot as plt\n'), ((5128, 5181), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.save_path + f'nb_complete_covs.png')"], {}), "(self.save_path + f'nb_complete_covs.png')\n", (5139, 5181), True, 'import matplotlib.pyplot as plt\n'), ((5198, 5269), 'numpy.save', 'np.save', (["(self.save_path + f'nb_complete_covs.npy')", 'self.nb_complete_cov'], {}), "(self.save_path + f'nb_complete_covs.npy', self.nb_complete_cov)\n", (5205, 5269), True, 'import numpy as np\n'), ((5287, 5296), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5294, 5296), True, 'import matplotlib.pyplot as plt\n'), ((5313, 5371), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'self.terrain_diffs', 'x', 'self.avg_terrain_diffs'], {}), '(x, self.terrain_diffs, x, self.avg_terrain_diffs)\n', (5321, 5371), True, 'import matplotlib.pyplot as plt\n'), ((5388, 5454), 'matplotlib.pyplot.legend', 'plt.legend', (["['terrain differences', 'average terrain differences']"], {}), "(['terrain differences', 'average terrain differences'])\n", (5398, 5454), True, 'import matplotlib.pyplot as plt\n'), ((5471, 5527), 'matplotlib.pyplot.title', 'plt.title', (['"""Total terrain differences for every episode"""'], {}), "('Total terrain differences for every episode')\n", (5480, 5527), True, 'import matplotlib.pyplot as plt\n'), ((5544, 5594), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.save_path + f'terrain_diffs.png')"], {}), "(self.save_path + f'terrain_diffs.png')\n", (5555, 5594), True, 'import matplotlib.pyplot as plt\n'), ((5611, 5677), 'numpy.save', 'np.save', (["(self.save_path + f'terrain_diffs.npy')", 'self.terrain_diffs'], {}), "(self.save_path + f'terrain_diffs.npy', self.terrain_diffs)\n", (5618, 5677), True, 'import numpy as np\n'), ((5694, 5768), 'numpy.save', 'np.save', (["(self.save_path + f'avg_terrain_diffs.npy')", 'self.avg_terrain_diffs'], {}), "(self.save_path + f'avg_terrain_diffs.npy', self.avg_terrain_diffs)\n", (5701, 5768), True, 'import numpy as np\n')] |
'''
Spectra plottings
'''
import numpy as np
import matplotlib.pyplot as plt
def singles(wn, spec):
'''
Individual spectra plot.
Parameters
----------
wn : ndarray
Wavenumber of shape [n_points].
spec : ndarray
Spectra of shape [n_spectra, n_points].
Returns
-------
None.
'''
fig, ax = plt.subplots()
plt.xlabel('Wavenumber ($\mathrm{cm^{-1}}$)')
plt.ylabel('Absorbance')
plt.xlim(np.ceil(wn.max()), np.floor(wn.min()))
plt.plot(wn, spec.T)
plt.grid(False)
def means(wn, spec, label=False, std=False):
'''
Mean spectra plot.
If label is passed, one mean per group.
If std, plot mean + standard deviation.
Parameters
----------
wn : ndarray
Wavenumber of shape [n_points].
spec : ndarray
Spectra of shape [n_spectra, n_points].
label : list of str, optional
Labels of shape [n_sepctra]. The default is False.
std : boolean, optional
If True, plot mean + standard deviation. The default is False.
Returns
-------
None.
'''
if label:
label_set = list(set(label))
specmean = []
specstd = []
for i in range(len(label_set)):
mask = [None]*len(label)
for j in range(len(label)):
mask[j] = (label[j] == label_set[i])
specmean += [np.mean(spec[mask,:], axis=0)]
specstd += [np.std(spec[mask,:], axis=0)]
specmean = np.array(specmean)
specstd = np.array(specstd)
else:
specmean = np.mean(spec, axis=0)
specstd = np.std(spec, axis=0)
fig, ax = plt.subplots()
plt.grid()
plt.xlabel('Wavenumber ($\mathrm{cm^{-1}}$)')
plt.ylabel('Absorbance')
plt.xlim(np.ceil(wn.max()), np.floor(wn.min()))
plt.plot(wn, specmean.T)
if std:
if len(specstd.shape) == 1:
plt.fill_between(wn, (specmean-1*specstd), (specmean+1*specstd), alpha=0.25)
else:
for i in range(specstd.shape[0]):
plt.fill_between(wn, (specmean[i,:]-1*specstd[i,:]), (specmean[i,:]+1*specstd[i,:]), alpha=0.25)
if label:
plt.legend(label_set)
plt.grid(False)
def img(values, n_sample, fpa_size):
'''
Plot images given values such as area, intensities.
Parameters
----------
values : ndarray
Values to be plotted as image
n_sample : int
Sample to be plotted
fpa_size : int
Size of the FPA.
Returns
-------
None.
'''
# Reshape to an image of shape fpa_size x fpa_size
values_img = np.reshape(
values[0+(fpa_size*fpa_size*n_sample)
:fpa_size*fpa_size+(fpa_size*fpa_size*n_sample)],
(fpa_size,fpa_size)
)
# Define zero (outliers) as black
cmap = plt.cm.jet
cmap.set_under(color='black')
# Define the scale min as the values_img min
vmin = np.min(values_img[np.nonzero(values_img)])-0.000001
# Plot image
fig, ax = plt.subplots()
plt.imshow(values_img, cmap=cmap, vmin=vmin)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.colorbar()
| [
"matplotlib.pyplot.imshow",
"numpy.mean",
"matplotlib.pyplot.grid",
"numpy.reshape",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.fill_between",
"numpy.array",
"numpy.nonzero",
"numpy.std",
"matplotlib.pypl... | [((360, 374), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (372, 374), True, 'import matplotlib.pyplot as plt\n'), ((379, 425), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavenumber ($\\\\mathrm{cm^{-1}}$)"""'], {}), "('Wavenumber ($\\\\mathrm{cm^{-1}}$)')\n", (389, 425), True, 'import matplotlib.pyplot as plt\n'), ((429, 453), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absorbance"""'], {}), "('Absorbance')\n", (439, 453), True, 'import matplotlib.pyplot as plt\n'), ((510, 530), 'matplotlib.pyplot.plot', 'plt.plot', (['wn', 'spec.T'], {}), '(wn, spec.T)\n', (518, 530), True, 'import matplotlib.pyplot as plt\n'), ((535, 550), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (543, 550), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1710), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1708, 1710), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1725), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1723, 1725), True, 'import matplotlib.pyplot as plt\n'), ((1730, 1776), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavenumber ($\\\\mathrm{cm^{-1}}$)"""'], {}), "('Wavenumber ($\\\\mathrm{cm^{-1}}$)')\n", (1740, 1776), True, 'import matplotlib.pyplot as plt\n'), ((1780, 1804), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absorbance"""'], {}), "('Absorbance')\n", (1790, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1861, 1885), 'matplotlib.pyplot.plot', 'plt.plot', (['wn', 'specmean.T'], {}), '(wn, specmean.T)\n', (1869, 1885), True, 'import matplotlib.pyplot as plt\n'), ((2244, 2259), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (2252, 2259), True, 'import matplotlib.pyplot as plt\n'), ((2673, 2807), 'numpy.reshape', 'np.reshape', (['values[0 + fpa_size * fpa_size * n_sample:fpa_size * fpa_size + fpa_size *\n fpa_size * n_sample]', '(fpa_size, fpa_size)'], {}), '(values[0 + fpa_size * fpa_size * n_sample:fpa_size * fpa_size + \n fpa_size * fpa_size * n_sample], (fpa_size, fpa_size))\n', (2683, 2807), True, 'import numpy as np\n'), ((3088, 3102), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3100, 3102), True, 'import matplotlib.pyplot as plt\n'), ((3107, 3151), 'matplotlib.pyplot.imshow', 'plt.imshow', (['values_img'], {'cmap': 'cmap', 'vmin': 'vmin'}), '(values_img, cmap=cmap, vmin=vmin)\n', (3117, 3151), True, 'import matplotlib.pyplot as plt\n'), ((3242, 3256), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3254, 3256), True, 'import matplotlib.pyplot as plt\n'), ((1505, 1523), 'numpy.array', 'np.array', (['specmean'], {}), '(specmean)\n', (1513, 1523), True, 'import numpy as np\n'), ((1547, 1564), 'numpy.array', 'np.array', (['specstd'], {}), '(specstd)\n', (1555, 1564), True, 'import numpy as np\n'), ((1597, 1618), 'numpy.mean', 'np.mean', (['spec'], {'axis': '(0)'}), '(spec, axis=0)\n', (1604, 1618), True, 'import numpy as np\n'), ((1637, 1657), 'numpy.std', 'np.std', (['spec'], {'axis': '(0)'}), '(spec, axis=0)\n', (1643, 1657), True, 'import numpy as np\n'), ((2218, 2239), 'matplotlib.pyplot.legend', 'plt.legend', (['label_set'], {}), '(label_set)\n', (2228, 2239), True, 'import matplotlib.pyplot as plt\n'), ((1946, 2031), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['wn', '(specmean - 1 * specstd)', '(specmean + 1 * specstd)'], {'alpha': '(0.25)'}), '(wn, specmean - 1 * specstd, specmean + 1 * specstd, alpha=0.25\n )\n', (1962, 2031), True, 'import matplotlib.pyplot as plt\n'), ((1401, 1431), 'numpy.mean', 'np.mean', (['spec[mask, :]'], {'axis': '(0)'}), '(spec[mask, :], axis=0)\n', (1408, 1431), True, 'import numpy as np\n'), ((1456, 1485), 'numpy.std', 'np.std', (['spec[mask, :]'], {'axis': '(0)'}), '(spec[mask, :], axis=0)\n', (1462, 1485), True, 'import numpy as np\n'), ((2099, 2207), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['wn', '(specmean[i, :] - 1 * specstd[i, :])', '(specmean[i, :] + 1 * specstd[i, :])'], {'alpha': '(0.25)'}), '(wn, specmean[i, :] - 1 * specstd[i, :], specmean[i, :] + 1 *\n specstd[i, :], alpha=0.25)\n', (2115, 2207), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3040), 'numpy.nonzero', 'np.nonzero', (['values_img'], {}), '(values_img)\n', (3028, 3040), True, 'import numpy as np\n')] |
import numpy as np
import os
from display import Display
import logging
from matplotlib import pyplot as plt
import cv2 as cv
class VisualOdometry:
FLANN_INDEX_LSH = 6
TSH_ORB_MATCHING = 0.5
COLORS = False
def __init__(self, data_path, n_features=3000, flann_precision=100, debug=False):
self.gt_poses = self._load_poses(os.path.join(data_path, "poses.txt"))
self.left_cam, self.right_cam = self._load_cam(os.path.join(data_path, "calib.txt"))
self.left_imgs = self._load_imgs(os.path.join(data_path, "image_l"))
self.right_imgs = self._load_imgs(os.path.join(data_path, "image_r"))
self.debug = debug
# cur pose
self.pose = np.eye(4, 4)
self.traj = [self.pose]
self.it = 1
# triangulated_points
self.Q_3d = []
if self.COLORS is True:
self.colors = []
# init orb, could include more params
self.n_features = n_features
self.orb = cv.ORB_create(self.n_features)
self.features = dict()
self.matches = dict()
# utilizes a kd-tree | trees, leaf_max_size, more information found here: https://docs.opencv.org/4.x/dc/dc3/tutorial_py_matcher.html
index_params= dict(algorithm = self.FLANN_INDEX_LSH, table_number = 6, key_size = 12, multi_probe_level = 2)
search_params = dict(checks = flann_precision)
self.flann = cv.FlannBasedMatcher(index_params, search_params)
@staticmethod
def _load_poses(file_path: str):
poses = np.loadtxt(file_path, delimiter=" ")
dummy_row = np.ones((len(poses), 4)) * np.array([0, 0, 0, 1])
poses = np.hstack((poses, dummy_row)).reshape((-1, 4, 4))
return poses
@staticmethod
def _load_cam(file_path: str):
cam_param = np.loadtxt(file_path, delimiter=" ")
l_cam = cam_param[0].reshape((3, 4))
r_cam = cam_param[1].reshape((3, 4))
return l_cam, r_cam
@staticmethod
def _load_imgs(cam_dir: str):
list_dir = np.sort(os.listdir(cam_dir))
imgs = []
for i in range(len(list_dir)):
imgs.append(cv.imread(cam_dir + "/" + list_dir[i], 0))
return imgs
@staticmethod
def __H(R: np.array, t: np.array):
H = np.eye(4, dtype=np.float32)
H[:3, :3] = R
H[:3, 3] = t.reshape(-1)
return H
def key(self, i: int, j: int):
if i > j:
return str(j) + "_" + str(i)
else:
return str(i) + "_" + str(j)
def _find_orb_features(self, i: int):
kp, des = self.orb.detectAndCompute(self.left_imgs[i], None)
if self.debug:
img = cv.drawKeypoints(self.left_imgs[i], kp, None);
plt.imshow(img);
plt.show()
kp_ = np.zeros((len(kp), 2))
for idx, ele in enumerate(kp):
kp_[idx] = ele.pt
return {"idx": i, "keypoint": kp_, "descriptor": des}
def _match_two_features(self, i: int, j: int):
des_i = self.features[str(i)]["descriptor"]
des_j = self.features[str(j)]["descriptor"]
matches = self.flann.knnMatch(des_i, des_j, k=2)
filtered_matches = []
for k in range(len(matches)):
if matches[k][0].distance < self.TSH_ORB_MATCHING * matches[i][1].distance: # smaller means better
filtered_matches.append(np.r_[matches[k][0].queryIdx, matches[k][0].trainIdx])
return {"i": i, "j": j, "matches": np.stack(filtered_matches)}
def _find_orb_features_and_match(self, i: int, j: int):
keys = [i, j]
for _, key in enumerate(keys):
if str(key) not in self.features:
self.features[str(key)] = self._find_orb_features(key)
key = self.key(i, j)
if key not in self.matches:
self.matches[key] = self._match_two_features(i, j)
def estimate_essential_matrix(self, i: int, j: int):
self._find_orb_features_and_match(i, j)
kp_i, kp_j = self.features[str(i)]["keypoint"], self.features[str(j)]["keypoint"]
_, _, matches = self.matches[self.key(i, j)].values()
q_i, q_j = kp_i[matches[:, 0]], kp_j[matches[:, 1]]
E, mask = cv.findEssentialMat(q_i, q_j, self.left_cam[:3, :3], prob=0.9999, threshold=1, method=cv.RANSAC)
mask = (mask == 1).reshape(-1)
return q_i[mask, :], q_j[mask, :], E
def decompose_essential_matrix(self, q_i, q_j, E):
R1, R2, t = cv.decomposeEssentialMat(E)
KA = self.left_cam
homogenous_transformations = [self.__H(R1, t), self.__H(R1, -t), self.__H(R2, t), self.__H(R2, -t)]
feasible_poses = np.zeros(len(homogenous_transformations))
Q_ = []
for idx, H in enumerate(homogenous_transformations):
Pi = KA @ np.eye(4, 4)
Pj = KA @ H
Q = cv.triangulatePoints(Pi, Pj, q_i.T, q_j.T)
Q = Q / Q[3, :]
Q_.append(Q)
z = np.array([0, 0, 1]).reshape((1, 3))
z_validation_cam1 = (z @ np.eye(3, 4) @ Q) > 0
z_validation_cam2 = (z @ H[:3, :] @ Q) > 0
z_validation = np.sum(np.logical_and(z_validation_cam1, z_validation_cam2))
feasible_poses[idx] = z_validation
idx = np.argmax(feasible_poses)
return homogenous_transformations[idx][:3, :3], homogenous_transformations[idx][:3, 3], Q_[idx]
def __colors(self, i: int, q_i: np.array):
indices = np.floor(q_i).astype(np.uint32)
colors = (self.left_imgs[i])[indices[:, 1], indices[:, 0]]
return colors
def step(self):
assert self.it >= 1
if self.it >= len(self.gt_poses):
return False
q_i, q_j, E = self.estimate_essential_matrix(self.it-1, self.it)
R, t, Q = self.decompose_essential_matrix(q_i, q_j, E)
if self.COLORS is True:
self.colors.append(self.__colors(self.it, q_i))
w_T_cam = np.linalg.inv(self.__H(R, t))
self.Q_3d.append(((self.pose @ Q)[:3, ...].T))
self.pose = self.pose @ w_T_cam
self.it += 1
return True
if __name__ == "__main__":
import time
disp = Display()
p_data = os.path.dirname(os.path.abspath(__file__)) + "/../data/KITTI_sequence_2"
vo = VisualOdometry(p_data, n_features=3000)
fps = 10
dt = 1/fps
gt_poses, est_pose = [], []
i = 0
while vo.step() is True:
pts = {"est_pose": None, "gt_poses": None, "cam_pts": None}
est_pose += [np.r_[vo.pose[:3, 3]]]
gt_poses += [vo.gt_poses[i][:3, 3]]
pts["est_pose"] = est_pose
pts["gt_poses"] = gt_poses
pts["cam_pts"] = np.vstack(vo.Q_3d).reshape((-1, 3)).tolist()
# pts["colors"] = np.hstack(vo.colors).reshape((-1)).tolist()
disp.q.put(pts)
time.sleep(dt)
i += 1
while True:
disp.q.put(pts)
time.sleep(dt) | [
"numpy.hstack",
"time.sleep",
"cv2.triangulatePoints",
"numpy.array",
"matplotlib.pyplot.imshow",
"os.listdir",
"numpy.stack",
"cv2.ORB_create",
"numpy.vstack",
"numpy.eye",
"numpy.floor",
"numpy.argmax",
"cv2.imread",
"matplotlib.pyplot.show",
"cv2.drawKeypoints",
"numpy.logical_and",... | [((6223, 6232), 'display.Display', 'Display', ([], {}), '()\n', (6230, 6232), False, 'from display import Display\n'), ((719, 731), 'numpy.eye', 'np.eye', (['(4)', '(4)'], {}), '(4, 4)\n', (725, 731), True, 'import numpy as np\n'), ((1002, 1032), 'cv2.ORB_create', 'cv.ORB_create', (['self.n_features'], {}), '(self.n_features)\n', (1015, 1032), True, 'import cv2 as cv\n'), ((1430, 1479), 'cv2.FlannBasedMatcher', 'cv.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (1450, 1479), True, 'import cv2 as cv\n'), ((1552, 1588), 'numpy.loadtxt', 'np.loadtxt', (['file_path'], {'delimiter': '""" """'}), "(file_path, delimiter=' ')\n", (1562, 1588), True, 'import numpy as np\n'), ((1820, 1856), 'numpy.loadtxt', 'np.loadtxt', (['file_path'], {'delimiter': '""" """'}), "(file_path, delimiter=' ')\n", (1830, 1856), True, 'import numpy as np\n'), ((2294, 2321), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (2300, 2321), True, 'import numpy as np\n'), ((4250, 4351), 'cv2.findEssentialMat', 'cv.findEssentialMat', (['q_i', 'q_j', 'self.left_cam[:3, :3]'], {'prob': '(0.9999)', 'threshold': '(1)', 'method': 'cv.RANSAC'}), '(q_i, q_j, self.left_cam[:3, :3], prob=0.9999, threshold\n =1, method=cv.RANSAC)\n', (4269, 4351), True, 'import cv2 as cv\n'), ((4515, 4542), 'cv2.decomposeEssentialMat', 'cv.decomposeEssentialMat', (['E'], {}), '(E)\n', (4539, 4542), True, 'import cv2 as cv\n'), ((5308, 5333), 'numpy.argmax', 'np.argmax', (['feasible_poses'], {}), '(feasible_poses)\n', (5317, 5333), True, 'import numpy as np\n'), ((6868, 6882), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (6878, 6882), False, 'import time\n'), ((6947, 6961), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (6957, 6961), False, 'import time\n'), ((358, 394), 'os.path.join', 'os.path.join', (['data_path', '"""poses.txt"""'], {}), "(data_path, 'poses.txt')\n", (370, 394), False, 'import os\n'), ((451, 487), 'os.path.join', 'os.path.join', (['data_path', '"""calib.txt"""'], {}), "(data_path, 'calib.txt')\n", (463, 487), False, 'import os\n'), ((530, 564), 'os.path.join', 'os.path.join', (['data_path', '"""image_l"""'], {}), "(data_path, 'image_l')\n", (542, 564), False, 'import os\n'), ((608, 642), 'os.path.join', 'os.path.join', (['data_path', '"""image_r"""'], {}), "(data_path, 'image_r')\n", (620, 642), False, 'import os\n'), ((1636, 1658), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (1644, 1658), True, 'import numpy as np\n'), ((2055, 2074), 'os.listdir', 'os.listdir', (['cam_dir'], {}), '(cam_dir)\n', (2065, 2074), False, 'import os\n'), ((2705, 2750), 'cv2.drawKeypoints', 'cv.drawKeypoints', (['self.left_imgs[i]', 'kp', 'None'], {}), '(self.left_imgs[i], kp, None)\n', (2721, 2750), True, 'import cv2 as cv\n'), ((2765, 2780), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2775, 2780), True, 'from matplotlib import pyplot as plt\n'), ((2795, 2805), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2803, 2805), True, 'from matplotlib import pyplot as plt\n'), ((3508, 3534), 'numpy.stack', 'np.stack', (['filtered_matches'], {}), '(filtered_matches)\n', (3516, 3534), True, 'import numpy as np\n'), ((4897, 4939), 'cv2.triangulatePoints', 'cv.triangulatePoints', (['Pi', 'Pj', 'q_i.T', 'q_j.T'], {}), '(Pi, Pj, q_i.T, q_j.T)\n', (4917, 4939), True, 'import cv2 as cv\n'), ((6263, 6288), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (6278, 6288), False, 'import os\n'), ((1675, 1704), 'numpy.hstack', 'np.hstack', (['(poses, dummy_row)'], {}), '((poses, dummy_row))\n', (1684, 1704), True, 'import numpy as np\n'), ((2157, 2198), 'cv2.imread', 'cv.imread', (["(cam_dir + '/' + list_dir[i])", '(0)'], {}), "(cam_dir + '/' + list_dir[i], 0)\n", (2166, 2198), True, 'import cv2 as cv\n'), ((4844, 4856), 'numpy.eye', 'np.eye', (['(4)', '(4)'], {}), '(4, 4)\n', (4850, 4856), True, 'import numpy as np\n'), ((5193, 5245), 'numpy.logical_and', 'np.logical_and', (['z_validation_cam1', 'z_validation_cam2'], {}), '(z_validation_cam1, z_validation_cam2)\n', (5207, 5245), True, 'import numpy as np\n'), ((5508, 5521), 'numpy.floor', 'np.floor', (['q_i'], {}), '(q_i)\n', (5516, 5521), True, 'import numpy as np\n'), ((5009, 5028), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (5017, 5028), True, 'import numpy as np\n'), ((5082, 5094), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (5088, 5094), True, 'import numpy as np\n'), ((6721, 6739), 'numpy.vstack', 'np.vstack', (['vo.Q_3d'], {}), '(vo.Q_3d)\n', (6730, 6739), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import cv2
##############################################################
offset_np = [
[ [0, 0], [0, 48], [0, 96], [0, 144], [0, 192], [0, 240]],
[ [48, 0], [48, 48], [48, 96], [48, 144], [48, 192], [48, 240]],
[ [96, 0], [96, 48], [96, 96], [96, 144], [96, 192], [96, 240]],
[[144, 0], [144, 48], [144, 96], [144, 144], [144, 192], [144, 240]],
[[192, 0], [192, 48], [192, 96], [192, 144], [192, 192], [192, 240]]
]
def grid_to_pix(box):
box[..., 0:2] = 48. * box[..., 0:2] + offset_np
box[..., 2] = np.square(box[..., 2]) * 240.
box[..., 3] = np.square(box[..., 3]) * 288.
return box
##############################################################
def calc_iou(label, pred1, pred2):
iou1 = calc_iou_help(label, pred1)
iou2 = calc_iou_help(label, pred2)
return np.stack([iou1, iou2], 3)
def calc_iou_help(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
yA = np.maximum(boxA[...,0] - 0.5 * boxA[...,2], boxB[...,0] - 0.5 * boxB[...,2])
yB = np.minimum(boxA[...,0] + 0.5 * boxA[...,2], boxB[...,0] + 0.5 * boxB[...,2])
xA = np.maximum(boxA[...,1] - 0.5 * boxA[...,3], boxB[...,1] - 0.5 * boxB[...,3])
xB = np.minimum(boxA[...,1] + 0.5 * boxA[...,3], boxB[...,1] + 0.5 * boxB[...,3])
# compute the area of intersection rectangle
iy = yB - yA
ix = xB - xA
interArea = np.maximum(np.zeros_like(iy), iy) * np.maximum(np.zeros_like(ix), ix)
# compute the area of both the prediction and ground-truth rectangles
boxAArea = np.absolute(boxA[...,2] * boxA[...,3])
boxBArea = np.absolute(boxB[...,2] * boxB[...,3])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / (boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def draw_box(name, image, truth, pred):
true_image = np.copy(image)
pred_image = np.copy(image)
truth = np.copy(truth)
pred = np.copy(pred)
############################################
boxes = grid_to_pix(truth[:, :, :, 0:4])
objs = truth[:, :, :, 4]
no_objs = truth[:, :, :, 5]
cats = truth[:, :, :, 6]
vld = truth[:, :, :, 7]
obj = np.where(truth[:, :, :, 4] == 1)
box = boxes[obj]
cat = cats[obj]
ndet = len(box)
for d in range(ndet):
draw_box_help(true_image, box[d], None)
############################################
cat = np.argmax(pred[:, :, 10:12], axis=-1)
box1 = grid_to_pix(pred[:, :, 0:4])
conf1 = pred[:, :, 4]
obj1 = np.where(conf1 > 0.25)
boxes1 = box1[obj1]
conf1 = conf1[obj1]
cat1 = cat[obj1]
ndet = len(conf1)
for d in range(ndet):
draw_box_help(pred_image, boxes1[d], None)
############################################
cat = np.argmax(pred[:, :, 10:12], axis=-1)
box2 = grid_to_pix(pred[:, :, 5:9])
conf2 = pred[:, :, 9]
obj2 = np.where(conf2 > 0.25)
boxes2 = box2[obj2]
conf2 = conf2[obj2]
cat2 = cat[obj2]
ndet = len(conf2)
for d in range(ndet):
draw_box_help(pred_image, boxes2[d], None)
############################################
concat = np.concatenate((true_image, pred_image), axis=1)
plt.imsave(name, concat)
def draw_box_help(image, box, color):
[y, x, h, w] = box
pt1 = (int(x-0.5*w), int(y-0.5*h))
pt2 = (int(x+0.5*w), int(y+0.5*h))
cv2.rectangle(image, pt1, pt2, 0, 1)
| [
"cv2.rectangle",
"numpy.copy",
"numpy.minimum",
"numpy.where",
"numpy.absolute",
"matplotlib.pyplot.imsave",
"numpy.argmax",
"numpy.square",
"numpy.stack",
"numpy.concatenate",
"numpy.maximum",
"numpy.zeros_like"
] | [((877, 902), 'numpy.stack', 'np.stack', (['[iou1, iou2]', '(3)'], {}), '([iou1, iou2], 3)\n', (885, 902), True, 'import numpy as np\n'), ((1013, 1098), 'numpy.maximum', 'np.maximum', (['(boxA[..., 0] - 0.5 * boxA[..., 2])', '(boxB[..., 0] - 0.5 * boxB[..., 2])'], {}), '(boxA[..., 0] - 0.5 * boxA[..., 2], boxB[..., 0] - 0.5 * boxB[..., 2]\n )\n', (1023, 1098), True, 'import numpy as np\n'), ((1099, 1184), 'numpy.minimum', 'np.minimum', (['(boxA[..., 0] + 0.5 * boxA[..., 2])', '(boxB[..., 0] + 0.5 * boxB[..., 2])'], {}), '(boxA[..., 0] + 0.5 * boxA[..., 2], boxB[..., 0] + 0.5 * boxB[..., 2]\n )\n', (1109, 1184), True, 'import numpy as np\n'), ((1186, 1271), 'numpy.maximum', 'np.maximum', (['(boxA[..., 1] - 0.5 * boxA[..., 3])', '(boxB[..., 1] - 0.5 * boxB[..., 3])'], {}), '(boxA[..., 1] - 0.5 * boxA[..., 3], boxB[..., 1] - 0.5 * boxB[..., 3]\n )\n', (1196, 1271), True, 'import numpy as np\n'), ((1272, 1357), 'numpy.minimum', 'np.minimum', (['(boxA[..., 1] + 0.5 * boxA[..., 3])', '(boxB[..., 1] + 0.5 * boxB[..., 3])'], {}), '(boxA[..., 1] + 0.5 * boxA[..., 3], boxB[..., 1] + 0.5 * boxB[..., 3]\n )\n', (1282, 1357), True, 'import numpy as np\n'), ((1609, 1649), 'numpy.absolute', 'np.absolute', (['(boxA[..., 2] * boxA[..., 3])'], {}), '(boxA[..., 2] * boxA[..., 3])\n', (1620, 1649), True, 'import numpy as np\n'), ((1663, 1703), 'numpy.absolute', 'np.absolute', (['(boxB[..., 2] * boxB[..., 3])'], {}), '(boxB[..., 2] * boxB[..., 3])\n', (1674, 1703), True, 'import numpy as np\n'), ((2053, 2067), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (2060, 2067), True, 'import numpy as np\n'), ((2085, 2099), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (2092, 2099), True, 'import numpy as np\n'), ((2112, 2126), 'numpy.copy', 'np.copy', (['truth'], {}), '(truth)\n', (2119, 2126), True, 'import numpy as np\n'), ((2138, 2151), 'numpy.copy', 'np.copy', (['pred'], {}), '(pred)\n', (2145, 2151), True, 'import numpy as np\n'), ((2393, 2425), 'numpy.where', 'np.where', (['(truth[:, :, :, 4] == 1)'], {}), '(truth[:, :, :, 4] == 1)\n', (2401, 2425), True, 'import numpy as np\n'), ((2631, 2668), 'numpy.argmax', 'np.argmax', (['pred[:, :, 10:12]'], {'axis': '(-1)'}), '(pred[:, :, 10:12], axis=-1)\n', (2640, 2668), True, 'import numpy as np\n'), ((2752, 2774), 'numpy.where', 'np.where', (['(conf1 > 0.25)'], {}), '(conf1 > 0.25)\n', (2760, 2774), True, 'import numpy as np\n'), ((3013, 3050), 'numpy.argmax', 'np.argmax', (['pred[:, :, 10:12]'], {'axis': '(-1)'}), '(pred[:, :, 10:12], axis=-1)\n', (3022, 3050), True, 'import numpy as np\n'), ((3130, 3152), 'numpy.where', 'np.where', (['(conf2 > 0.25)'], {}), '(conf2 > 0.25)\n', (3138, 3152), True, 'import numpy as np\n'), ((3398, 3446), 'numpy.concatenate', 'np.concatenate', (['(true_image, pred_image)'], {'axis': '(1)'}), '((true_image, pred_image), axis=1)\n', (3412, 3446), True, 'import numpy as np\n'), ((3451, 3475), 'matplotlib.pyplot.imsave', 'plt.imsave', (['name', 'concat'], {}), '(name, concat)\n', (3461, 3475), True, 'import matplotlib.pyplot as plt\n'), ((3620, 3656), 'cv2.rectangle', 'cv2.rectangle', (['image', 'pt1', 'pt2', '(0)', '(1)'], {}), '(image, pt1, pt2, 0, 1)\n', (3633, 3656), False, 'import cv2\n'), ((593, 615), 'numpy.square', 'np.square', (['box[..., 2]'], {}), '(box[..., 2])\n', (602, 615), True, 'import numpy as np\n'), ((643, 665), 'numpy.square', 'np.square', (['box[..., 3]'], {}), '(box[..., 3])\n', (652, 665), True, 'import numpy as np\n'), ((1460, 1477), 'numpy.zeros_like', 'np.zeros_like', (['iy'], {}), '(iy)\n', (1473, 1477), True, 'import numpy as np\n'), ((1496, 1513), 'numpy.zeros_like', 'np.zeros_like', (['ix'], {}), '(ix)\n', (1509, 1513), True, 'import numpy as np\n')] |
import math
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.signal import argrelextrema
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
# --------------- Filter line segments with major orientation ---------------
def get_orientation(line):
'''get orientation of a line
'''
if line[0] > line[2]:
line[0], line[2] = line[2], line[0]
line[1], line[3] = line[3], line[1]
orientation = math.atan2((line[3] - line[1]), (line[2] - line[0]))
return math.degrees(orientation)
def lineMagnitude(line):
'Get line (aka vector) length'
lineMagnitude = math.hypot(line[2] - line[0], line[3] - line[1])
return lineMagnitude
def perp_distance(a_line, b_line):
'Perpendicular distance between two parallel line segments'
px, py = a_line[:2]
x1, y1, x2, y2 = b_line
LineMag = lineMagnitude(b_line)
u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1)))
u = u1 / (LineMag * LineMag)
ix = x1 + u * (x2 - x1)
iy = y1 + u * (y2 - y1)
perp_dist = lineMagnitude([px, py, ix, iy])
return perp_dist
def is_aligned(a_line, b_line, dist, tol_angle):
'check if two parallel lines almost align'
if dist < 10: # if the two lines are close enough
return True
elif perp_distance(a_line, b_line) / dist < math.sin(
math.pi / 180 * tol_angle):
return True
else:
return False
def find_major_orientations(lines):
orit = []
l_mag = []
for l in lines:
line_mag = lineMagnitude(l)
orientation = get_orientation(l)
l_mag.append(line_mag)
orit.append(orientation)
# 1D clustering
kde = KernelDensity(bandwidth=2,
kernel='gaussian').fit(np.array(orit).reshape(-1, 1))
# make the range a little wider than [-90,90]
s = np.linspace(-91, 91, 183)
e = kde.score_samples(s.reshape(-1, 1))
#plt.plot(s, e)
mi, ma = argrelextrema(e, np.less)[0], argrelextrema(e, np.greater)[0]
# special case
# if nearly vertical lines are caterigorized into two clusters
if s[ma][0] < -85 and s[ma][-1] > 85:
# we consider the two clusters as one cluster. Thus, the number of groups is len(mi)
groups = [[] for x in range(len(mi))]
line_idx = [[] for x in range(len(mi))]
for i in range(len(orit)):
if orit[i] > s[mi][-1]:
groups[0].append(l_mag[i])
line_idx[0].append(i)
else:
for j in range(len(mi)):
if orit[i] < s[mi][j]:
groups[j].append(l_mag[i])
line_idx[j].append(i)
break
else:
# common case
groups = [[] for x in range(len(mi) + 1)]
line_idx = [[] for x in range(len(mi) + 1)]
if len(mi) > 0:
for i in range(len(orit)):
if orit[i] > s[mi][-1]:
groups[-1].append(l_mag[i])
line_idx[-1].append(i)
else:
for j in range(len(mi)):
if orit[i] < s[mi][j]:
groups[j].append(l_mag[i])
line_idx[j].append(i)
break
else:
for i in range(len(orit)):
groups[0].append(l_mag[i])
line_idx[0].append(i)
# determine the major orientations based on the total length of line segments from nearly the same oritation
line_mag_sum = np.zeros(len(groups))
for i in range(len(groups)):
line_mag_sum[i] = np.sum(np.array(groups[i]))
# the total length of line segments should not be too small (1/6 is just an estimate)
group_idx = np.where(
line_mag_sum > np.sum(line_mag_sum) * 1 / 6)[0].tolist()
group_idx_all = []
# find the oritation of tow ends and tow edges along the boungdary of the top layer
# (or subtop layer because the total length of tow ends or tow edges that
# consists of the boungdary of the top layer is not necessarily the largest)
top_group_idx = []
top_1_group_idx = np.argsort(line_mag_sum)[-1].tolist()
top_group_idx.append(top_1_group_idx)
if len(group_idx) > 1:
for i in group_idx:
if abs(abs(s[ma][i] - s[ma][top_1_group_idx]) - 90) < 5:
top_group_idx.append(i)
break
group_idx_all = top_group_idx
# just in case that a subtop layer exists, find the oritation of tow ends and
# tow edges along the boungdary of the subtop layer
sub_group_idx = list(set(group_idx) - set(top_group_idx))
if len(sub_group_idx) > 0:
sub_top_group_idx = []
sub_top_1_group_idx = sub_group_idx[np.argsort(
line_mag_sum[sub_group_idx])[-1]]
sub_top_group_idx.append(sub_top_1_group_idx)
if len(sub_group_idx) > 1:
for i in sub_group_idx:
if abs(abs(s[ma][i] - s[ma][sub_top_1_group_idx]) - 90) < 5:
sub_top_group_idx.append(i)
break
group_idx_all += sub_top_group_idx
idx_all = []
for i in group_idx_all:
idx_all += line_idx[i]
filter_lines = np.array(lines)[idx_all]
return filter_lines.tolist()
# --------------------------- Merge line segments ---------------------------
# modified from https://stackoverflow.com/questions/45531074/how-to-merge-lines-after-houghlinesp
def group_lines(lines):
'Clusterize (group) lines'
groups = [] # all lines groups are here
# Parameters to play with
tol_distance_to_merge = 25
tol_angle_to_merge = 22.5
# first line will create new group every time
groups.append([lines[0]])
# if line is different from existing gropus, create a new group
for line_new in lines[1:]:
if check_similaririty(line_new, groups, tol_distance_to_merge,
tol_angle_to_merge):
groups.append([line_new])
return groups
def check_similaririty(line_new, groups, tol_distance_to_merge,
tol_angle_to_merge):
'''Check if line have enough distance and angle to be count as similar
'''
for group in groups:
# walk through existing line groups
for line_old in group:
orit_new = get_orientation(line_new)
orit_old = get_orientation(line_old)
l_dist = get_distance(line_new, line_old)
# special case:
# if two line segements are nearly parallel
# for merging, since we often deal with short lines, we allow comparatively
# larger angles to check parallel and alignment conditions
if abs(orit_new - orit_old) < 10:
# if two line segements almost align, we allow a larger distance (40 pix) to merge
if is_aligned(line_new, line_old, l_dist, 20) and l_dist < 40:
group.append(line_new)
return False
elif l_dist > 15 and perp_distance(
line_new, line_old) / l_dist > math.sin(
math.pi / 180 * 60):
continue
# common case
# check the angle between lines
if abs(orit_new - orit_old) < tol_angle_to_merge:
# if all is ok -- line is similar to others in group
if l_dist < tol_distance_to_merge:
group.append(line_new)
return False
# if it is totally different line
return True
def get_distance(a_line, b_line):
"""Get all possible distances between each dot of two lines and second line
return the shortest
"""
dist1 = DistancePointLine(a_line[:2], b_line)
dist2 = DistancePointLine(a_line[2:], b_line)
dist3 = DistancePointLine(b_line[:2], a_line)
dist4 = DistancePointLine(b_line[2:], a_line)
return min(dist1, dist2, dist3, dist4)
def DistancePointLine(point, line):
"""Get distance between point and line
http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/source.vba
"""
px, py = point
x1, y1, x2, y2 = line
LineMag = lineMagnitude(line)
u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1)))
u = u1 / (LineMag * LineMag)
if (u < 1e-5) or (u > 1):
# closest point does not fall within the line segment, take the shorter distance
# to an endpoint
ix = lineMagnitude([px, py, x1, y1])
iy = lineMagnitude([px, py, x2, y2])
DistancePointLine = np.amin([ix, iy])
else:
# Intersecting point is on the line, use the formula
ix = x1 + u * (x2 - x1)
iy = y1 + u * (y2 - y1)
DistancePointLine = lineMagnitude([px, py, ix, iy])
return DistancePointLine
def sort_lines(lines):
"""Sort lines cluster and return first and last coordinates
"""
orientation = get_orientation(lines[0])
# special case
if (len(lines) == 1):
return [lines[0][:2], lines[0][2:]]
# [[1,2,3,4],[]] to [[1,2],[3,4],[],[]]
points = []
for line in lines:
points.append(line[:2])
points.append(line[2:])
if orientation < 22.5:
points = sorted(points, key=lambda point: point[0])
else:
points = sorted(points, key=lambda point: point[1])
# return first and last point in sorted group
# [[x,y],[x,y]]
return [points[0], points[-1]]
def merge_lines(lines):
lines_g1 = []
lines_g2 = []
for line in lines:
orientation = get_orientation(line)
if orientation > 85:
orientation -= 180 # let the nearly vertical lines be in the same group
# Since the range of line oritations is [-90,90], and the oritation of
# tow ends and tow edges is around either -45 and 45 (or vice versa) or
# 0 and 90. Thus, a threshold of 22.5 will safely seperate groups of
# tow ends and tow edges
if orientation < 22.5:
lines_g1.append(line)
else:
lines_g2.append(line)
lines_g1 = sorted(lines_g1, key=lambda line: line[0])
lines_g2 = sorted(lines_g2, key=lambda line: line[1])
merged_lines_all = []
# for each cluster in group 1 and group 2 lines leave only one line
for i in [lines_g1, lines_g2]:
if len(i) > 0:
groups = group_lines(i)
merged_lines = []
for group in groups:
merged_lines.append(sort_lines(group))
merged_lines_all.extend(merged_lines)
merged_lines_all = [
endpoint[0] + endpoint[1] for endpoint in merged_lines_all
]
return merged_lines_all
# ---------------------------------------------------------------------------
def remove_isolated_lines(lines):
'Remove lines not "connected" to others'
n_l = len(lines)
if n_l > 1:
dist_m = np.zeros((n_l, n_l))
orit_diff_m = np.zeros((n_l, n_l))
# adjacency matrix to find connected components
adj_m_1 = np.zeros((n_l, n_l))
adj_m_2 = np.zeros((n_l, n_l))
for i in range(n_l):
for j in range(i + 1, n_l):
dist_m[i, j] = get_distance(lines[i], lines[j])
orit_i = get_orientation(lines[i])
orit_j = get_orientation(lines[j])
orit_diff_m[i, j] = abs(orit_i - orit_j)
# Case 1: find line segments within a distance and form about 90 deg
# condition for connection
if dist_m[i, j] < 50 and abs(orit_diff_m[i, j] - 90) < 10:
adj_m_1[i, j] = 1
# Case 2: find vertical or horizontal line segments almost align
# condition for connection
if orit_diff_m[i, j] < 5 and (abs(orit_i) > 85
or abs(orit_i) < 5):
if is_aligned(lines[i], lines[j], dist_m[i, j], 5):
adj_m_2[i, j] = 1
# find line segements that are "connnected"
adj_1_n_components, adj_1_labels = connected_components(
csgraph=csr_matrix(adj_m_1), directed=False, return_labels=True)
adj_2_n_components, adj_2_labels = connected_components(
csgraph=csr_matrix(adj_m_2), directed=False, return_labels=True)
counts_1 = np.unique(adj_1_labels, return_counts=True)[1]
counts_2 = np.unique(adj_2_labels, return_counts=True)[1]
# keep polylines that are composed of more than one line segment
polylines_1 = np.where(counts_1 > 1)[0]
polylines_2 = np.where(counts_2 > 1)[0]
connected_lines_idx_all = []
if len(polylines_1) > 0:
for idx_1 in polylines_1:
connected_lines_idx_all.append(
np.where(adj_1_labels == idx_1)[0])
if len(polylines_2) > 0:
for idx_2 in polylines_2:
connected_lines_idx_all.append(
np.where(adj_2_labels == idx_2)[0])
if len(connected_lines_idx_all) > 0:
filter_lines = np.array(lines)[np.concatenate(
connected_lines_idx_all).ravel()].tolist()
else:
# if all the line segments are isolated from each other, then we keep the longest line segment
filter_lines = lines[0]
for line in lines[1:]:
if lineMagnitude(line) > lineMagnitude(filter_lines):
filter_lines = line
filter_lines = [filter_lines]
return filter_lines
else:
return lines
| [
"numpy.amin",
"numpy.unique",
"scipy.signal.argrelextrema",
"numpy.where",
"math.degrees",
"sklearn.neighbors.KernelDensity",
"numpy.argsort",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.sum",
"math.atan2",
"numpy.concatenate",
"math.hypot",
"scipy.sparse.csr_matrix",
"math.... | [((501, 549), 'math.atan2', 'math.atan2', (['(line[3] - line[1])', '(line[2] - line[0])'], {}), '(line[3] - line[1], line[2] - line[0])\n', (511, 549), False, 'import math\n'), ((568, 593), 'math.degrees', 'math.degrees', (['orientation'], {}), '(orientation)\n', (580, 593), False, 'import math\n'), ((681, 729), 'math.hypot', 'math.hypot', (['(line[2] - line[0])', '(line[3] - line[1])'], {}), '(line[2] - line[0], line[3] - line[1])\n', (691, 729), False, 'import math\n'), ((1956, 1981), 'numpy.linspace', 'np.linspace', (['(-91)', '(91)', '(183)'], {}), '(-91, 91, 183)\n', (1967, 1981), True, 'import numpy as np\n'), ((5435, 5450), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (5443, 5450), True, 'import numpy as np\n'), ((8862, 8879), 'numpy.amin', 'np.amin', (['[ix, iy]'], {}), '([ix, iy])\n', (8869, 8879), True, 'import numpy as np\n'), ((11278, 11298), 'numpy.zeros', 'np.zeros', (['(n_l, n_l)'], {}), '((n_l, n_l))\n', (11286, 11298), True, 'import numpy as np\n'), ((11322, 11342), 'numpy.zeros', 'np.zeros', (['(n_l, n_l)'], {}), '((n_l, n_l))\n', (11330, 11342), True, 'import numpy as np\n'), ((11419, 11439), 'numpy.zeros', 'np.zeros', (['(n_l, n_l)'], {}), '((n_l, n_l))\n', (11427, 11439), True, 'import numpy as np\n'), ((11459, 11479), 'numpy.zeros', 'np.zeros', (['(n_l, n_l)'], {}), '((n_l, n_l))\n', (11467, 11479), True, 'import numpy as np\n'), ((1415, 1450), 'math.sin', 'math.sin', (['(math.pi / 180 * tol_angle)'], {}), '(math.pi / 180 * tol_angle)\n', (1423, 1450), False, 'import math\n'), ((1790, 1835), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': '(2)', 'kernel': '"""gaussian"""'}), "(bandwidth=2, kernel='gaussian')\n", (1803, 1835), False, 'from sklearn.neighbors import KernelDensity\n'), ((2062, 2087), 'scipy.signal.argrelextrema', 'argrelextrema', (['e', 'np.less'], {}), '(e, np.less)\n', (2075, 2087), False, 'from scipy.signal import argrelextrema\n'), ((2092, 2120), 'scipy.signal.argrelextrema', 'argrelextrema', (['e', 'np.greater'], {}), '(e, np.greater)\n', (2105, 2120), False, 'from scipy.signal import argrelextrema\n'), ((3798, 3817), 'numpy.array', 'np.array', (['groups[i]'], {}), '(groups[i])\n', (3806, 3817), True, 'import numpy as np\n'), ((12762, 12805), 'numpy.unique', 'np.unique', (['adj_1_labels'], {'return_counts': '(True)'}), '(adj_1_labels, return_counts=True)\n', (12771, 12805), True, 'import numpy as np\n'), ((12829, 12872), 'numpy.unique', 'np.unique', (['adj_2_labels'], {'return_counts': '(True)'}), '(adj_2_labels, return_counts=True)\n', (12838, 12872), True, 'import numpy as np\n'), ((12975, 12997), 'numpy.where', 'np.where', (['(counts_1 > 1)'], {}), '(counts_1 > 1)\n', (12983, 12997), True, 'import numpy as np\n'), ((13024, 13046), 'numpy.where', 'np.where', (['(counts_2 > 1)'], {}), '(counts_2 > 1)\n', (13032, 13046), True, 'import numpy as np\n'), ((1865, 1879), 'numpy.array', 'np.array', (['orit'], {}), '(orit)\n', (1873, 1879), True, 'import numpy as np\n'), ((4326, 4350), 'numpy.argsort', 'np.argsort', (['line_mag_sum'], {}), '(line_mag_sum)\n', (4336, 4350), True, 'import numpy as np\n'), ((4947, 4986), 'numpy.argsort', 'np.argsort', (['line_mag_sum[sub_group_idx]'], {}), '(line_mag_sum[sub_group_idx])\n', (4957, 4986), True, 'import numpy as np\n'), ((12541, 12560), 'scipy.sparse.csr_matrix', 'csr_matrix', (['adj_m_1'], {}), '(adj_m_1)\n', (12551, 12560), False, 'from scipy.sparse import csr_matrix\n'), ((12685, 12704), 'scipy.sparse.csr_matrix', 'csr_matrix', (['adj_m_2'], {}), '(adj_m_2)\n', (12695, 12704), False, 'from scipy.sparse import csr_matrix\n'), ((13233, 13264), 'numpy.where', 'np.where', (['(adj_1_labels == idx_1)'], {}), '(adj_1_labels == idx_1)\n', (13241, 13264), True, 'import numpy as np\n'), ((13412, 13443), 'numpy.where', 'np.where', (['(adj_2_labels == idx_2)'], {}), '(adj_2_labels == idx_2)\n', (13420, 13443), True, 'import numpy as np\n'), ((13524, 13539), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (13532, 13539), True, 'import numpy as np\n'), ((7366, 7394), 'math.sin', 'math.sin', (['(math.pi / 180 * 60)'], {}), '(math.pi / 180 * 60)\n', (7374, 7394), False, 'import math\n'), ((3963, 3983), 'numpy.sum', 'np.sum', (['line_mag_sum'], {}), '(line_mag_sum)\n', (3969, 3983), True, 'import numpy as np\n'), ((13540, 13579), 'numpy.concatenate', 'np.concatenate', (['connected_lines_idx_all'], {}), '(connected_lines_idx_all)\n', (13554, 13579), True, 'import numpy as np\n')] |
"""Sets of metrics to look at general sky coverage - nvisits/coadded depth/Teff.
"""
import numpy as np
import rubin_sim.maf.metrics as metrics
import rubin_sim.maf.slicers as slicers
import rubin_sim.maf.plots as plots
import rubin_sim.maf.metricBundles as mb
import rubin_sim.maf.utils as mafUtils
from .colMapDict import ColMapDict, getColMap
from .common import standardSummary, filterList, radecCols, combineMetadata
__all__ = ['nvisitsM5Maps', 'tEffMetrics', 'nvisitsPerNight', 'nvisitsPerProp']
def nvisitsM5Maps(colmap=None, runName='opsim',
extraSql=None, extraMetadata=None,
nside=64, runLength=10.,
ditherStacker=None, ditherkwargs=None):
"""Generate number of visits and Coadded depth per RA/Dec point in all and per filters.
Parameters
----------
colmap : dict, optional
A dictionary with a mapping of column names. Default will use OpsimV4 column names.
runName : str, optional
The name of the simulated survey. Default is "opsim".
extraSql : str, optional
Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522').
Default None, for no additional constraints.
extraMetadata : str, optional
Additional metadata to add before any below (i.e. "WFD"). Default is None.
nside : int, optional
Nside value for healpix slicer. Default 64.
If "None" is passed, the healpixslicer-based metrics will be skipped.
runLength : float, optional
Length of the simulated survey, for scaling values for the plot limits.
Default 10.
ditherStacker: str or rubin_sim.maf.stackers.BaseDitherStacker
Optional dither stacker to use to define ra/dec columns.
ditherkwargs: dict, optional
Optional dictionary of kwargs for the dither stacker.
Returns
-------
metricBundleDict
"""
if colmap is None:
colmap = ColMapDict('opsimV4')
bundleList = []
subgroup = extraMetadata
if subgroup is None:
subgroup = 'All visits'
raCol, decCol, degrees, ditherStacker, ditherMeta = radecCols(ditherStacker, colmap, ditherkwargs)
extraMetadata = combineMetadata(extraMetadata, ditherMeta)
# Set up basic all and per filter sql constraints.
filterlist, colors, orders, sqls, metadata = filterList(all=True,
extraSql=extraSql,
extraMetadata=extraMetadata)
# Set up some values to make nicer looking plots.
benchmarkVals = mafUtils.scaleBenchmarks(runLength, benchmark='design')
# Check that nvisits is not set to zero (for very short run length).
for f in benchmarkVals['nvisits']:
if benchmarkVals['nvisits'][f] == 0:
print('Updating benchmark nvisits value in %s to be nonzero' % (f))
benchmarkVals['nvisits'][f] = 1
benchmarkVals['coaddedDepth'] = mafUtils.calcCoaddedDepth(benchmarkVals['nvisits'],
benchmarkVals['singleVisitDepth'])
# Scale the nvisit ranges for the runLength.
nvisitsRange = {'u': [20, 80], 'g': [50, 150], 'r': [100, 250],
'i': [100, 250], 'z': [100, 300], 'y': [100, 300], 'all': [700, 1200]}
scale = runLength / 10.0
for f in nvisitsRange:
for i in [0, 1]:
nvisitsRange[f][i] = int(np.floor(nvisitsRange[f][i] * scale))
# Generate Nvisit maps in all and per filters
displayDict = {'group': 'Nvisits Maps', 'subgroup': subgroup}
metric = metrics.CountMetric(colmap['mjd'], metricName='NVisits', units='')
slicer = slicers.HealpixSlicer(nside=nside, latCol=decCol, lonCol=raCol,
latLonDeg=degrees)
for f in filterlist:
sql = sqls[f]
displayDict['caption'] = 'Number of visits per healpix in %s.' % metadata[f]
displayDict['order'] = orders[f]
binsize = 2
if f == 'all':
binsize = 5
plotDict = {'xMin': nvisitsRange[f][0], 'xMax': nvisitsRange[f][1],
'colorMin': nvisitsRange[f][0], 'colorMax': nvisitsRange[f][1],
'binsize': binsize, 'color': colors[f]}
bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata[f],
stackerList=ditherStacker,
displayDict=displayDict, plotDict=plotDict,
summaryMetrics=standardSummary())
bundleList.append(bundle)
# Generate Coadded depth maps per filter
displayDict = {'group': 'Coadded M5 Maps', 'subgroup': subgroup}
metric = metrics.Coaddm5Metric(m5Col=colmap['fiveSigmaDepth'], metricName='CoaddM5')
slicer = slicers.HealpixSlicer(nside=nside, latCol=decCol, lonCol=raCol,
latLonDeg=degrees)
for f in filterlist:
# Skip "all" for coadded depth.
if f == 'all':
continue
mag_zp = benchmarkVals['coaddedDepth'][f]
sql = sqls[f]
displayDict['caption'] = 'Coadded depth per healpix, with %s benchmark value subtracted (%.1f) ' \
'in %s.' % (f, mag_zp, metadata[f])
displayDict['caption'] += ' More positive numbers indicate fainter limiting magnitudes.'
displayDict['order'] = orders[f]
plotDict = {'zp': mag_zp, 'xMin': -0.6, 'xMax': 0.6,
'xlabel': 'coadded m5 - %.1f' % mag_zp,
'colorMin': -0.6, 'colorMax': 0.6, 'color': colors[f]}
bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata[f],
stackerList=ditherStacker,
displayDict=displayDict, plotDict=plotDict,
summaryMetrics=standardSummary())
bundleList.append(bundle)
# Set the runName for all bundles and return the bundleDict.
for b in bundleList:
b.setRunName(runName)
return mb.makeBundlesDictFromList(bundleList)
def tEffMetrics(colmap=None, runName='opsim',
extraSql=None, extraMetadata=None, nside=64,
ditherStacker=None, ditherkwargs=None):
"""Generate a series of Teff metrics. Teff total, per night, and sky maps (all and per filter).
Parameters
----------
colmap : dict, optional
A dictionary with a mapping of column names. Default will use OpsimV4 column names.
runName : str, optional
The name of the simulated survey. Default is "opsim".
extraSql : str, optional
Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522').
Default None, for no additional constraints.
extraMetadata : str, optional
Additional metadata to add before any below (i.e. "WFD"). Default is None.
nside : int, optional
Nside value for healpix slicer. Default 64.
If "None" is passed, the healpixslicer-based metrics will be skipped.
ditherStacker: str or rubin_sim.maf.stackers.BaseDitherStacker
Optional dither stacker to use to define ra/dec columns.
ditherkwargs: dict, optional
Optional dictionary of kwargs for the dither stacker.
Returns
-------
metricBundleDict
"""
if colmap is None:
colmap = ColMapDict('opsimV4')
bundleList = []
subgroup = extraMetadata
if subgroup is None:
subgroup = 'All visits'
raCol, decCol, degrees, ditherStacker, ditherMeta = radecCols(ditherStacker, colmap, ditherkwargs)
extraMetadata = combineMetadata(extraMetadata, ditherMeta)
# Set up basic all and per filter sql constraints.
filterlist, colors, orders, sqls, metadata = filterList(all=True,
extraSql=extraSql,
extraMetadata=extraMetadata)
if metadata['all'] is None:
metadata['all'] = 'All visits'
subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()]
# Total Teff and normalized Teff.
displayDict = {'group': 'T_eff Summary', 'subgroup': subgroup}
displayDict['caption'] = 'Total effective time of the survey (see Teff metric).'
displayDict['order'] = 0
metric = metrics.TeffMetric(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'],
normed=False, metricName='Total Teff')
slicer = slicers.UniSlicer()
bundle = mb.MetricBundle(metric, slicer, constraint=sqls['all'], displayDict=displayDict,
metadata=metadata['all'])
bundleList.append(bundle)
displayDict['caption'] = 'Normalized total effective time of the survey (see Teff metric).'
displayDict['order'] = 1
metric = metrics.TeffMetric(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'],
normed=True, metricName='Normalized Teff')
slicer = slicers.UniSlicer()
bundle = mb.MetricBundle(metric, slicer, constraint=sqls['all'], displayDict=displayDict,
metadata=metadata['all'])
bundleList.append(bundle)
# Generate Teff maps in all and per filters
displayDict = {'group': 'T_eff Maps', 'subgroup': subgroup}
if ditherMeta is not None:
for m in metadata:
metadata[m] = combineMetadata(metadata[m], ditherMeta)
metric = metrics.TeffMetric(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'],
normed=True, metricName='Normalized Teff')
slicer = slicers.HealpixSlicer(nside=nside, latCol=decCol, lonCol=raCol,
latLonDeg=degrees)
for f in filterlist:
displayDict['caption'] = 'Normalized effective time of the survey, for %s' % metadata[f]
displayDict['order'] = orders[f]
plotDict = {'color': colors[f]}
bundle = mb.MetricBundle(metric, slicer, sqls[f], metadata=metadata[f],
stackerList=ditherStacker,
displayDict=displayDict, plotFuncs=subsetPlots, plotDict=plotDict,
summaryMetrics=standardSummary())
bundleList.append(bundle)
# Set the runName for all bundles and return the bundleDict.
for b in bundleList:
b.setRunName(runName)
return mb.makeBundlesDictFromList(bundleList)
def nvisitsPerNight(colmap=None, runName='opsim', binNights=1,
extraSql=None, extraMetadata=None, subgroup=None):
"""Count the number of visits per night through the survey.
Parameters
----------
colmap : dict or None, optional
A dictionary with a mapping of column names. Default will use OpsimV4 column names.
runName : str, optional
The name of the simulated survey. Default is "opsim".
binNights : int, optional
Number of nights to count in each bin. Default = 1, count number of visits in each night.
extraSql : str or None, optional
Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522').
Default None, for no additional constraints.
extraMetadata : str or None, optional
Additional metadata to add before any below (i.e. "WFD"). Default is None.
subgroup : str or None, optional
Use this for the 'subgroup' in the displayDict, instead of metadata. Default is None.
Returns
-------
metricBundleDict
"""
if colmap is None:
colmap = ColMapDict('opsimV4')
subgroup = subgroup
if subgroup is None:
subgroup = extraMetadata
if subgroup is None:
subgroup = 'All visits'
metadataCaption = extraMetadata
if extraMetadata is None:
if extraSql is not None:
metadataCaption = extraSql
else:
metadataCaption = 'all visits'
bundleList = []
displayDict = {'group': 'Nvisits Per Night', 'subgroup': subgroup}
displayDict['caption'] = 'Number of visits per night for %s.' % (metadataCaption)
displayDict['order'] = 0
metric = metrics.CountMetric(colmap['mjd'], metricName='Nvisits')
slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=binNights)
bundle = mb.MetricBundle(metric, slicer, extraSql, metadata=metadataCaption,
displayDict=displayDict, summaryMetrics=standardSummary())
bundleList.append(bundle)
# Set the runName for all bundles and return the bundleDict.
for b in bundleList:
b.setRunName(runName)
return mb.makeBundlesDictFromList(bundleList)
def nvisitsPerProp(opsdb, colmap=None, runName='opsim', binNights=1, extraSql=None):
"""Set up a group of all and per-proposal nvisits metrics.
Parameters
----------
opsdb : rubin_sim.maf.db.Database or rubin_sim.maf.db.OpsimDatabase* object
colmap : dict or None, optional
A dictionary with a mapping of column names. Default will use OpsimV4 column names.
runName : str, optional
The name of the simulated survey. Default is "opsim".
binNights : int, optional
Number of nights to count in each bin. Default = 1, count number of visits in each night.
sqlConstraint : str or None, optional
SQL constraint to add to all metrics.
Returns
-------
metricBundle
"""
if colmap is None:
colmap = getColMap(opsdb)
propids, proptags = opsdb.fetchPropInfo()
bdict = {}
bundleList = []
totvisits = opsdb.fetchNVisits()
metadata = 'All props'
if extraSql is not None and len(extraSql) > 0:
metadata += ' %s' % extraSql
# Nvisits per night, all proposals.
bdict.update(nvisitsPerNight(colmap=colmap, runName=runName, binNights=binNights,
extraSql=extraSql, extraMetadata=metadata, subgroup='All proposals'))
# Nvisits total, all proposals.
metric = metrics.CountMetric(colmap['mjd'], metricName='Nvisits')
slicer = slicers.UniSlicer()
summaryMetrics = [metrics.IdentityMetric(metricName='Count'),
metrics.NormalizeMetric(normVal=totvisits, metricName='Fraction of total')]
displayDict = {'group': 'Nvisit Summary', 'subgroup': 'Proposal distribution', 'order': -1}
displayDict['caption'] = 'Total number of visits for all proposals.'
if extraSql is not None and len(extraSql) > 0:
displayDict['caption'] += ' (with constraint %s.)' % extraSql
bundle = mb.MetricBundle(metric, slicer, extraSql, metadata=metadata,
displayDict=displayDict, summaryMetrics=summaryMetrics)
bundleList.append(bundle)
# Look for any multi-proposal groups that we should include.
for tag in proptags:
if len(proptags[tag]) > 1:
pids = proptags[tag]
sql = '('
for pid in pids[:-1]:
sql += '%s=%d or ' % (colmap['proposalId'], pid)
sql += ' %s=%d)' % (colmap['proposalId'], pids[-1])
metadata = '%s' % tag
if extraSql is not None:
sql = '(%s) and (%s)' % (sql, extraSql)
metadata += ' %s' % (extraSql)
bdict.update(nvisitsPerNight(colmap=colmap, runName=runName, binNights=binNights,
extraSql=sql, extraMetadata=metadata, subgroup=tag))
displayDict['order'] += 1
displayDict['caption'] = 'Number of visits and fraction of total visits, for %s.' % metadata
bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata,
summaryMetrics=summaryMetrics, displayDict=displayDict)
bundleList.append(bundle)
# And each proposal separately.
for propid in propids:
sql = '%s=%d' % (colmap['proposalId'], propid)
metadata = '%s' % (propids[propid])
if extraSql is not None:
sql += ' and (%s)' % (extraSql)
metadata += ' %s' % extraSql
bdict.update(nvisitsPerNight(colmap=colmap, runName=runName, binNights=binNights,
extraSql=sql, extraMetadata=metadata, subgroup='Per proposal'))
displayDict['order'] += 1
displayDict['caption'] = 'Number of visits and fraction of total visits, for %s.' % metadata
bundle = mb.MetricBundle(metric, slicer, constraint=sql, metadata=metadata,
summaryMetrics=summaryMetrics, displayDict=displayDict)
bundleList.append(bundle)
for b in bundleList:
b.setRunName(runName)
bdict.update(mb.makeBundlesDictFromList(bundleList))
return bdict
| [
"rubin_sim.maf.metricBundles.MetricBundle",
"rubin_sim.maf.slicers.OneDSlicer",
"rubin_sim.maf.plots.HealpixHistogram",
"rubin_sim.maf.metrics.CountMetric",
"numpy.floor",
"rubin_sim.maf.utils.calcCoaddedDepth",
"rubin_sim.maf.plots.HealpixSkyMap",
"rubin_sim.maf.metricBundles.makeBundlesDictFromList"... | [((2608, 2663), 'rubin_sim.maf.utils.scaleBenchmarks', 'mafUtils.scaleBenchmarks', (['runLength'], {'benchmark': '"""design"""'}), "(runLength, benchmark='design')\n", (2632, 2663), True, 'import rubin_sim.maf.utils as mafUtils\n'), ((2981, 3072), 'rubin_sim.maf.utils.calcCoaddedDepth', 'mafUtils.calcCoaddedDepth', (["benchmarkVals['nvisits']", "benchmarkVals['singleVisitDepth']"], {}), "(benchmarkVals['nvisits'], benchmarkVals[\n 'singleVisitDepth'])\n", (3006, 3072), True, 'import rubin_sim.maf.utils as mafUtils\n'), ((3624, 3690), 'rubin_sim.maf.metrics.CountMetric', 'metrics.CountMetric', (["colmap['mjd']"], {'metricName': '"""NVisits"""', 'units': '""""""'}), "(colmap['mjd'], metricName='NVisits', units='')\n", (3643, 3690), True, 'import rubin_sim.maf.metrics as metrics\n'), ((3704, 3791), 'rubin_sim.maf.slicers.HealpixSlicer', 'slicers.HealpixSlicer', ([], {'nside': 'nside', 'latCol': 'decCol', 'lonCol': 'raCol', 'latLonDeg': 'degrees'}), '(nside=nside, latCol=decCol, lonCol=raCol, latLonDeg=\n degrees)\n', (3725, 3791), True, 'import rubin_sim.maf.slicers as slicers\n'), ((4724, 4799), 'rubin_sim.maf.metrics.Coaddm5Metric', 'metrics.Coaddm5Metric', ([], {'m5Col': "colmap['fiveSigmaDepth']", 'metricName': '"""CoaddM5"""'}), "(m5Col=colmap['fiveSigmaDepth'], metricName='CoaddM5')\n", (4745, 4799), True, 'import rubin_sim.maf.metrics as metrics\n'), ((4813, 4900), 'rubin_sim.maf.slicers.HealpixSlicer', 'slicers.HealpixSlicer', ([], {'nside': 'nside', 'latCol': 'decCol', 'lonCol': 'raCol', 'latLonDeg': 'degrees'}), '(nside=nside, latCol=decCol, lonCol=raCol, latLonDeg=\n degrees)\n', (4834, 4900), True, 'import rubin_sim.maf.slicers as slicers\n'), ((6068, 6106), 'rubin_sim.maf.metricBundles.makeBundlesDictFromList', 'mb.makeBundlesDictFromList', (['bundleList'], {}), '(bundleList)\n', (6094, 6106), True, 'import rubin_sim.maf.metricBundles as mb\n'), ((8349, 8471), 'rubin_sim.maf.metrics.TeffMetric', 'metrics.TeffMetric', ([], {'m5Col': "colmap['fiveSigmaDepth']", 'filterCol': "colmap['filter']", 'normed': '(False)', 'metricName': '"""Total Teff"""'}), "(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap[\n 'filter'], normed=False, metricName='Total Teff')\n", (8367, 8471), True, 'import rubin_sim.maf.metrics as metrics\n'), ((8512, 8531), 'rubin_sim.maf.slicers.UniSlicer', 'slicers.UniSlicer', ([], {}), '()\n', (8529, 8531), True, 'import rubin_sim.maf.slicers as slicers\n'), ((8545, 8656), 'rubin_sim.maf.metricBundles.MetricBundle', 'mb.MetricBundle', (['metric', 'slicer'], {'constraint': "sqls['all']", 'displayDict': 'displayDict', 'metadata': "metadata['all']"}), "(metric, slicer, constraint=sqls['all'], displayDict=\n displayDict, metadata=metadata['all'])\n", (8560, 8656), True, 'import rubin_sim.maf.metricBundles as mb\n'), ((8850, 8976), 'rubin_sim.maf.metrics.TeffMetric', 'metrics.TeffMetric', ([], {'m5Col': "colmap['fiveSigmaDepth']", 'filterCol': "colmap['filter']", 'normed': '(True)', 'metricName': '"""Normalized Teff"""'}), "(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap[\n 'filter'], normed=True, metricName='Normalized Teff')\n", (8868, 8976), True, 'import rubin_sim.maf.metrics as metrics\n'), ((9017, 9036), 'rubin_sim.maf.slicers.UniSlicer', 'slicers.UniSlicer', ([], {}), '()\n', (9034, 9036), True, 'import rubin_sim.maf.slicers as slicers\n'), ((9050, 9161), 'rubin_sim.maf.metricBundles.MetricBundle', 'mb.MetricBundle', (['metric', 'slicer'], {'constraint': "sqls['all']", 'displayDict': 'displayDict', 'metadata': "metadata['all']"}), "(metric, slicer, constraint=sqls['all'], displayDict=\n displayDict, metadata=metadata['all'])\n", (9065, 9161), True, 'import rubin_sim.maf.metricBundles as mb\n'), ((9468, 9594), 'rubin_sim.maf.metrics.TeffMetric', 'metrics.TeffMetric', ([], {'m5Col': "colmap['fiveSigmaDepth']", 'filterCol': "colmap['filter']", 'normed': '(True)', 'metricName': '"""Normalized Teff"""'}), "(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap[\n 'filter'], normed=True, metricName='Normalized Teff')\n", (9486, 9594), True, 'import rubin_sim.maf.metrics as metrics\n'), ((9635, 9722), 'rubin_sim.maf.slicers.HealpixSlicer', 'slicers.HealpixSlicer', ([], {'nside': 'nside', 'latCol': 'decCol', 'lonCol': 'raCol', 'latLonDeg': 'degrees'}), '(nside=nside, latCol=decCol, lonCol=raCol, latLonDeg=\n degrees)\n', (9656, 9722), True, 'import rubin_sim.maf.slicers as slicers\n'), ((10429, 10467), 'rubin_sim.maf.metricBundles.makeBundlesDictFromList', 'mb.makeBundlesDictFromList', (['bundleList'], {}), '(bundleList)\n', (10455, 10467), True, 'import rubin_sim.maf.metricBundles as mb\n'), ((12169, 12225), 'rubin_sim.maf.metrics.CountMetric', 'metrics.CountMetric', (["colmap['mjd']"], {'metricName': '"""Nvisits"""'}), "(colmap['mjd'], metricName='Nvisits')\n", (12188, 12225), True, 'import rubin_sim.maf.metrics as metrics\n'), ((12239, 12306), 'rubin_sim.maf.slicers.OneDSlicer', 'slicers.OneDSlicer', ([], {'sliceColName': "colmap['night']", 'binsize': 'binNights'}), "(sliceColName=colmap['night'], binsize=binNights)\n", (12257, 12306), True, 'import rubin_sim.maf.slicers as slicers\n'), ((12638, 12676), 'rubin_sim.maf.metricBundles.makeBundlesDictFromList', 'mb.makeBundlesDictFromList', (['bundleList'], {}), '(bundleList)\n', (12664, 12676), True, 'import rubin_sim.maf.metricBundles as mb\n'), ((13994, 14050), 'rubin_sim.maf.metrics.CountMetric', 'metrics.CountMetric', (["colmap['mjd']"], {'metricName': '"""Nvisits"""'}), "(colmap['mjd'], metricName='Nvisits')\n", (14013, 14050), True, 'import rubin_sim.maf.metrics as metrics\n'), ((14064, 14083), 'rubin_sim.maf.slicers.UniSlicer', 'slicers.UniSlicer', ([], {}), '()\n', (14081, 14083), True, 'import rubin_sim.maf.slicers as slicers\n'), ((14551, 14672), 'rubin_sim.maf.metricBundles.MetricBundle', 'mb.MetricBundle', (['metric', 'slicer', 'extraSql'], {'metadata': 'metadata', 'displayDict': 'displayDict', 'summaryMetrics': 'summaryMetrics'}), '(metric, slicer, extraSql, metadata=metadata, displayDict=\n displayDict, summaryMetrics=summaryMetrics)\n', (14566, 14672), True, 'import rubin_sim.maf.metricBundles as mb\n'), ((8067, 8088), 'rubin_sim.maf.plots.HealpixSkyMap', 'plots.HealpixSkyMap', ([], {}), '()\n', (8086, 8088), True, 'import rubin_sim.maf.plots as plots\n'), ((8090, 8114), 'rubin_sim.maf.plots.HealpixHistogram', 'plots.HealpixHistogram', ([], {}), '()\n', (8112, 8114), True, 'import rubin_sim.maf.plots as plots\n'), ((14106, 14148), 'rubin_sim.maf.metrics.IdentityMetric', 'metrics.IdentityMetric', ([], {'metricName': '"""Count"""'}), "(metricName='Count')\n", (14128, 14148), True, 'import rubin_sim.maf.metrics as metrics\n'), ((14172, 14246), 'rubin_sim.maf.metrics.NormalizeMetric', 'metrics.NormalizeMetric', ([], {'normVal': 'totvisits', 'metricName': '"""Fraction of total"""'}), "(normVal=totvisits, metricName='Fraction of total')\n", (14195, 14246), True, 'import rubin_sim.maf.metrics as metrics\n'), ((16408, 16534), 'rubin_sim.maf.metricBundles.MetricBundle', 'mb.MetricBundle', (['metric', 'slicer'], {'constraint': 'sql', 'metadata': 'metadata', 'summaryMetrics': 'summaryMetrics', 'displayDict': 'displayDict'}), '(metric, slicer, constraint=sql, metadata=metadata,\n summaryMetrics=summaryMetrics, displayDict=displayDict)\n', (16423, 16534), True, 'import rubin_sim.maf.metricBundles as mb\n'), ((16671, 16709), 'rubin_sim.maf.metricBundles.makeBundlesDictFromList', 'mb.makeBundlesDictFromList', (['bundleList'], {}), '(bundleList)\n', (16697, 16709), True, 'import rubin_sim.maf.metricBundles as mb\n'), ((15597, 15713), 'rubin_sim.maf.metricBundles.MetricBundle', 'mb.MetricBundle', (['metric', 'slicer', 'sql'], {'metadata': 'metadata', 'summaryMetrics': 'summaryMetrics', 'displayDict': 'displayDict'}), '(metric, slicer, sql, metadata=metadata, summaryMetrics=\n summaryMetrics, displayDict=displayDict)\n', (15612, 15713), True, 'import rubin_sim.maf.metricBundles as mb\n'), ((3456, 3492), 'numpy.floor', 'np.floor', (['(nvisitsRange[f][i] * scale)'], {}), '(nvisitsRange[f][i] * scale)\n', (3464, 3492), True, 'import numpy as np\n')] |
## @ingroup Components
# Mass_Properties.py
#
# Created:
# Modified: Feb 2016, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Data
import numpy as np
# ----------------------------------------------------------------------
# Mass Properties
# ----------------------------------------------------------------------
## @ingroup Components
class Mass_Properties(Data):
""" Mass properties for a physical component
Assumptions:
None
Source:
None
"""
def __defaults__(self):
"""This sets the default values.
Assumptions:
None
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
None
"""
self.mass = 0.0
self.volume = 0.0
self.center_of_gravity = np.array([[0.0,0.0,0.0]])
self.moments_of_inertia = Data()
self.moments_of_inertia.center = np.array([0.0,0.0,0.0])
self.moments_of_inertia.tensor = np.array([[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]]) | [
"SUAVE.Core.Data",
"numpy.array"
] | [((1061, 1088), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (1069, 1088), True, 'import numpy as np\n'), ((1130, 1136), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (1134, 1136), False, 'from SUAVE.Core import Data\n'), ((1178, 1203), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1186, 1203), True, 'import numpy as np\n'), ((1243, 1304), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (1251, 1304), True, 'import numpy as np\n')] |
"""
Routine to add Moster et al. 2013 stellar masses
python3 lc_add_Ms_Mo13.py 115 MD10
"""
import sys
ii = int(sys.argv[1])
env = sys.argv[2] # 'MD10'
status = sys.argv[3]
import h5py # HDF5 support
import os
import glob
import numpy as n
h5_dir = os.path.join(os.environ[env], 'cluster_h5/' )
input_list = n.array(glob.glob(os.path.join(h5_dir, "hlist_?.?????.hdf5")))
input_list.sort()
from scipy.stats import lognorm
from scipy.stats import norm
import astropy.units as u
import astropy.constants as cc
from astropy.cosmology import FlatLambdaCDM
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115)
from scipy.interpolate import interp1d
"""
definitions
-----------
- Planck flat LCDM cosmoMDlogy
- :math:`m = ln(M_{500} / (10^{15} M_\odot))`
- :math:`m_{gas} = ln(M_{gas, 500} / (10^{15} M_\odot))` is the gas mass within r500
- :math:`m_{lens} = ln(M_{lens, 500} / (10^{15} M_\odot))` is the spherical mass estimate from lensing corresponding to an
idealized shear profile without statistical noise
- :math:`l = ln(L_{500} / (E(z) 10^{44} erg/s))` where L is defined as the cluster rest-frame luminosity in the 0.1 - 2.4 keV band.
- :math:`t = ln(kT_{500} / keV)` where kT is the emission weighted temperature measured in annulus from 0.15 to 1.0 r500
- :math:`E(z) = H(z)/H_0`
- :math:`\epsilon = ln(E(z))`
Parameters
----------
* normalization for parameter X, :math:`N_X`
* slope for E(z) for parameter X, :math:`slope_E_X`
* slope for M500 for parameter X, :math:`slope_{M500}_X`
Workflow
--------
- Select relaxed clusters from the DM point of view. to be defined how ... with T/U ? Look at the publications from Sembolini, Yepes, Knebe ...
- using M15, add gas density profile and temperature using scaling relations
"""
# DIETRICH 2017
N_Mgas = 31.92 # Dietrich 17
N_kT = 2.18
N_L = 103.7
N_Lce = 102.66
slope_E_Mgas = 0.05 # Dietrich 17
slope_E_kT = 0.61
slope_E_L = 1.20
slope_E_Lce = 1.82
slope_M500_Mgas= 1.398 # Dietrich 17
slope_M500_kT = 0.66
slope_M500_L = 1.43 # 1.26*(1.+0.33*0.43)
slope_M500_Lce = 1.36 # 1.06*(1.+0.33*0.88)
scatter_Mgas = 0.106 # Dietrich 17
scatter_kT = 0.18
scatter_L = 0.24
scatter_Lce = 0.17
# MANTZ 2016
#N_Mgas = 31.98
#N_kT = 2.18
#N_L = 103.7
#N_Lce = 102.66
#slope_E_Mgas = -0.11
#slope_E_kT = 0.61
#slope_E_L = 1.20
#slope_E_Lce = 1.82
#slope_M500_Mgas= 1.04
#slope_M500_kT = 0.66
#slope_M500_L = 1.26
#slope_M500_Lce = 1.06
#scatter_Mgas = 0.086
#scatter_kT = 0.18
#scatter_L = 0.24
#scatter_Lce = 0.17
E035 = cosmoMD.efunc(0.35)
# converts logM500 to clusters observables
m500_to_qty = lambda logM500, z, slope_efunc, slope_m500, normalization : n.e**normalization * (cosmoMD.efunc(z)/E035)**(slope_efunc) * (10**(logM500-n.log10(6)-14))**(slope_m500)
logM500_to_logMgas = lambda logM500, z : m500_to_qty( logM500, z, slope_E_Mgas, slope_M500_Mgas, N_Mgas)
logM500_to_kT = lambda logM500, z : m500_to_qty( logM500, z, slope_E_kT, slope_M500_kT, N_kT)
logM500_to_L = lambda logM500, z : m500_to_qty( logM500, z, slope_E_L, slope_M500_L, N_L)
logM500_to_Lce = lambda logM500, z : m500_to_qty( logM500, z, slope_E_Lce, slope_M500_Lce, N_Lce)
file_1 = input_list[ii]
print(file_1)
f1 = h5py.File(file_1, "r+")
z = f1.attrs['redshift']
log_m500c = n.log10(f1['/halo_properties/M500c'].value)
nCluster = len(log_m500c)
#rds = (n.random.rand(len(log_m500c))-0.5)*2.
Mean_Mgas = n.log10(logM500_to_logMgas (log_m500c, z))
V_scatter_Mgas = norm.rvs(loc=0,scale=scatter_Mgas,size=nCluster)
VAL_Mgas = Mean_Mgas + V_scatter_Mgas
Mean_kT = logM500_to_kT(log_m500c, z)
V_scatter_kT = norm.rvs(loc=0,scale=scatter_kT,size=nCluster)
VAL_kT = Mean_kT + V_scatter_kT
Mean_L = n.log10(logM500_to_L(log_m500c, z))
V_scatter_L = norm.rvs(loc=0,scale=scatter_L,size=nCluster)
VAL_L = Mean_L + V_scatter_L
Mean_Lce = n.log10(logM500_to_Lce(log_m500c, z))
V_scatter_Lce = norm.rvs(loc=0,scale=scatter_Lce,size=nCluster)
VAL_Lce = Mean_Lce + V_scatter_Lce
if status=='create':
ds = f1['/cluster_data'].create_dataset('log_Mgas', data = VAL_Mgas )
ds.attrs['units'] = 'log10(Msun)'
ds = f1['/cluster_data'].create_dataset('kT', data = VAL_kT )
ds.attrs['units'] = 'keV'
ds = f1['/cluster_data'].create_dataset('log_LX_05_24', data = VAL_L )
ds.attrs['units'] = 'log10(L 0.5-2.4 keV/[erg/s])'
ds = f1['/cluster_data'].create_dataset('log_LceX_05_24', data = VAL_Lce )
ds.attrs['units'] = 'log10(Lce 0.5-2.4 keV/[erg/s])'
if status=='update':
ds = f1['/cluster_data/log_Mgas'][:] = VAL_Mgas
ds = f1['/cluster_data/kT'][:] = VAL_kT
ds = f1['/cluster_data/log_LX_05_24'][:] = VAL_L
ds = f1['/cluster_data/log_LceX_05_24'][:] = VAL_Lce
f1.close()
| [
"numpy.log10",
"os.path.join",
"astropy.cosmology.FlatLambdaCDM",
"scipy.stats.norm.rvs",
"h5py.File"
] | [((255, 299), 'os.path.join', 'os.path.join', (['os.environ[env]', '"""cluster_h5/"""'], {}), "(os.environ[env], 'cluster_h5/')\n", (267, 299), False, 'import os\n'), ((569, 627), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {'H0': '(67.77 * u.km / u.s / u.Mpc)', 'Om0': '(0.307115)'}), '(H0=67.77 * u.km / u.s / u.Mpc, Om0=0.307115)\n', (582, 627), False, 'from astropy.cosmology import FlatLambdaCDM\n'), ((3218, 3241), 'h5py.File', 'h5py.File', (['file_1', '"""r+"""'], {}), "(file_1, 'r+')\n", (3227, 3241), False, 'import h5py\n'), ((3281, 3324), 'numpy.log10', 'n.log10', (["f1['/halo_properties/M500c'].value"], {}), "(f1['/halo_properties/M500c'].value)\n", (3288, 3324), True, 'import numpy as n\n'), ((3489, 3539), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': '(0)', 'scale': 'scatter_Mgas', 'size': 'nCluster'}), '(loc=0, scale=scatter_Mgas, size=nCluster)\n', (3497, 3539), False, 'from scipy.stats import norm\n'), ((3630, 3678), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': '(0)', 'scale': 'scatter_kT', 'size': 'nCluster'}), '(loc=0, scale=scatter_kT, size=nCluster)\n', (3638, 3678), False, 'from scipy.stats import norm\n'), ((3769, 3816), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': '(0)', 'scale': 'scatter_L', 'size': 'nCluster'}), '(loc=0, scale=scatter_L, size=nCluster)\n', (3777, 3816), False, 'from scipy.stats import norm\n'), ((3910, 3959), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': '(0)', 'scale': 'scatter_Lce', 'size': 'nCluster'}), '(loc=0, scale=scatter_Lce, size=nCluster)\n', (3918, 3959), False, 'from scipy.stats import norm\n'), ((332, 374), 'os.path.join', 'os.path.join', (['h5_dir', '"""hlist_?.?????.hdf5"""'], {}), "(h5_dir, 'hlist_?.?????.hdf5')\n", (344, 374), False, 'import os\n'), ((2749, 2759), 'numpy.log10', 'n.log10', (['(6)'], {}), '(6)\n', (2756, 2759), True, 'import numpy as n\n')] |
"""
==========
Polar plot
==========
Demo of a line plot on a polar axis.
"""
import numpy as np
import matplotlib.pyplot as plt
r = np.arange(0, 2, 0.01)
theta = 2 * np.pi * r
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
ax.plot(theta, r)
ax.set_rmax(2)
ax.set_rticks([0.5, 1, 1.5, 2]) # Less radial ticks
ax.set_rlabel_position(-22.5) # Move radial labels away from plotted line
ax.grid(True)
ax.set_title("A line plot on a polar axis", va='bottom')
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.plot` / `matplotlib.pyplot.plot`
# - `matplotlib.projections.polar`
# - `matplotlib.projections.polar.PolarAxes`
# - `matplotlib.projections.polar.PolarAxes.set_rticks`
# - `matplotlib.projections.polar.PolarAxes.set_rmax`
# - `matplotlib.projections.polar.PolarAxes.set_rlabel_position`
| [
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((136, 157), 'numpy.arange', 'np.arange', (['(0)', '(2)', '(0.01)'], {}), '(0, 2, 0.01)\n', (145, 157), True, 'import numpy as np\n'), ((191, 239), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': 'polar'}"}), "(subplot_kw={'projection': 'polar'})\n", (203, 239), True, 'import matplotlib.pyplot as plt\n'), ((473, 483), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (481, 483), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import tensorflow as tf
from scipy.special import loggamma
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply
class TFWeibullDistribution:
@staticmethod
def log_likelihood(x: tf.Tensor, alpha: tf.Tensor, beta: tf.Tensor):
ya = (x + 0.00000000001) / alpha
return tf.reduce_mean(K.log(beta) + (beta * K.log(ya)) - K.pow(ya, beta))
@staticmethod
def kl_divergence(l1: tf.Tensor, k1: tf.Tensor, l2: tf.Tensor, k2: tf.Tensor):
term_1 = K.log(k1 / K.pow(l1, k1))
term_2 = K.log(k2 / K.pow(l2, k2))
term_3 = (k1 - k2) * (K.log(l1) - 0.5772 / k1)
term_4 = K.pow((l1 / l2), k2) * K.exp(tf.math.lgamma((k2 / k1) + 1))
tf.print(term_1, term_2, term_3, term_4)
return K.mean(term_1 - term_2 + term_3 + term_4 - 1)
class WeibullDistribution:
@staticmethod
def mean(alpha, beta):
return alpha * np.exp(loggamma(1 + (1 / beta)))
@staticmethod
def mode(alpha, beta):
vmode = alpha * np.power((beta - 1) / beta, 1 / beta)
vmode[beta <= 1] = 0
return vmode
@staticmethod
def median(alpha, beta):
return alpha * (np.power(np.log(2.0), (1 / beta)))
@staticmethod
def variance(alpha, beta):
return alpha ** 2 * (
np.exp(loggamma(1 + (2 / beta))) - (np.exp(loggamma(1 + (1 / beta)))) ** 2
)
@staticmethod
def quantile(q, alpha, beta):
return alpha * np.power(-np.log(1 - q), 1 / beta)
class NotCensoredWeibull(tf.keras.losses.Loss):
def __init__(self, regression_weight: float = 5, likelihood_weight:float = 1):
super().__init__()
self.regression_weight = regression_weight
self.likelihood_weight = likelihood_weight
def call(self, y_true, y_pred):
pRUL = y_pred[:, 0]
alpha = y_pred[:, 1]
beta = y_pred[:, 2]
y_true = tf.squeeze(y_true)
reg_loss = tf.keras.losses.MeanAbsoluteError()(pRUL, y_true)
log_liks = TFWeibullDistribution.log_likelihood(y_true, alpha, beta)
# log_liks = K.clip(log_liks, K.log(0.0000000001), K.log(1 - 0.0000000001))
# + kl_weibull(alpha, beta, alpha, 2.0 )
loss = -self.likelihood_weight*log_liks + self.regression_weight * reg_loss
# + K.pow(ya,beta)
return loss
class WeibullLayer(tf.keras.layers.Layer):
def __init__(
self,
return_params=True,
regression="mode",
name="WeibullParams",
*args,
**kwargs
):
super().__init__(name=name, *args, **kwargs)
self.return_params = return_params
if self.return_params:
self.params = Concatenate(name="Weibullparams")
if regression == "mode":
self.fun = self.mode
elif regression == "mean":
self.fun = self.mean
elif regression == "median":
self.fun = self.median
def mean(self, lambda_pipe, k_pipe):
inner_gamma = Lambda(lambda x: tf.math.exp(tf.math.lgamma(1 + (1 / x))))(k_pipe)
return Multiply(name="RUL")([lambda_pipe, inner_gamma])
def median(self, lambda_pipe, k_pipe):
return lambda_pipe * (tf.math.pow(tf.math.log(2.0), tf.math.reciprocal(k_pipe)))
def mode(self, alpha, beta):
mask = K.cast(K.greater(beta, 1), tf.float32)
beta = tf.clip_by_value(beta, 1 + 0.00000000001, np.inf)
return mask * alpha * tf.math.pow((beta - 1) / beta, (1 / beta))
def _result(self, alpha, beta):
RUL = self.fun(alpha, beta)
if self.return_params:
return self.params([alpha, beta])
else:
return RUL
class WeibullParameters(WeibullLayer):
def __init__(self, hidden, regression="mode", return_params=True, *args, **kwargs):
super(WeibullParameters, self).__init__(
return_params=True, regression=regression, name="", *args, **kwargs
)
self.W = Dense(hidden, activation="relu")
self.xalpha1 = Dense(hidden, activation="relu")
self.xalpha2 = Dense(1, name="w_alpha", activation='softplus')
self.xbeta1 = Dense(hidden, activation="relu")
self.xbeta2 = Dense(1, name="w_beta", activation='softplus')
def call(self, input_tensor, training=False):
x = self.W(input_tensor)
alpha = self.xalpha1(x)
alpha = self.xalpha2(alpha)
beta = self.xbeta1(x)
beta = self.xbeta2(beta)
RUL = self.mode(alpha, beta)
x = Concatenate(axis=1)([RUL, alpha, beta])
return x
| [
"tensorflow.keras.losses.MeanAbsoluteError",
"tensorflow.math.pow",
"tensorflow.keras.backend.log",
"tensorflow.keras.backend.mean",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Multiply",
"numpy.power",
"tensorflow.keras.backend.greater",
"tensorflow.math.log",
"tensorflow.math... | [((766, 806), 'tensorflow.print', 'tf.print', (['term_1', 'term_2', 'term_3', 'term_4'], {}), '(term_1, term_2, term_3, term_4)\n', (774, 806), True, 'import tensorflow as tf\n'), ((822, 867), 'tensorflow.keras.backend.mean', 'K.mean', (['(term_1 - term_2 + term_3 + term_4 - 1)'], {}), '(term_1 - term_2 + term_3 + term_4 - 1)\n', (828, 867), True, 'from tensorflow.keras import backend as K\n'), ((1952, 1970), 'tensorflow.squeeze', 'tf.squeeze', (['y_true'], {}), '(y_true)\n', (1962, 1970), True, 'import tensorflow as tf\n'), ((3409, 3450), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['beta', '(1 + 1e-11)', 'np.inf'], {}), '(beta, 1 + 1e-11, np.inf)\n', (3425, 3450), True, 'import tensorflow as tf\n'), ((4005, 4037), 'tensorflow.keras.layers.Dense', 'Dense', (['hidden'], {'activation': '"""relu"""'}), "(hidden, activation='relu')\n", (4010, 4037), False, 'from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply\n'), ((4062, 4094), 'tensorflow.keras.layers.Dense', 'Dense', (['hidden'], {'activation': '"""relu"""'}), "(hidden, activation='relu')\n", (4067, 4094), False, 'from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply\n'), ((4118, 4165), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""w_alpha"""', 'activation': '"""softplus"""'}), "(1, name='w_alpha', activation='softplus')\n", (4123, 4165), False, 'from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply\n'), ((4189, 4221), 'tensorflow.keras.layers.Dense', 'Dense', (['hidden'], {'activation': '"""relu"""'}), "(hidden, activation='relu')\n", (4194, 4221), False, 'from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply\n'), ((4244, 4290), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""w_beta"""', 'activation': '"""softplus"""'}), "(1, name='w_beta', activation='softplus')\n", (4249, 4290), False, 'from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply\n'), ((698, 716), 'tensorflow.keras.backend.pow', 'K.pow', (['(l1 / l2)', 'k2'], {}), '(l1 / l2, k2)\n', (703, 716), True, 'from tensorflow.keras import backend as K\n'), ((1068, 1105), 'numpy.power', 'np.power', (['((beta - 1) / beta)', '(1 / beta)'], {}), '((beta - 1) / beta, 1 / beta)\n', (1076, 1105), True, 'import numpy as np\n'), ((1991, 2026), 'tensorflow.keras.losses.MeanAbsoluteError', 'tf.keras.losses.MeanAbsoluteError', ([], {}), '()\n', (2024, 2026), True, 'import tensorflow as tf\n'), ((2738, 2771), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""Weibullparams"""'}), "(name='Weibullparams')\n", (2749, 2771), False, 'from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply\n'), ((3124, 3144), 'tensorflow.keras.layers.Multiply', 'Multiply', ([], {'name': '"""RUL"""'}), "(name='RUL')\n", (3132, 3144), False, 'from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply\n'), ((3362, 3380), 'tensorflow.keras.backend.greater', 'K.greater', (['beta', '(1)'], {}), '(beta, 1)\n', (3371, 3380), True, 'from tensorflow.keras import backend as K\n'), ((3489, 3529), 'tensorflow.math.pow', 'tf.math.pow', (['((beta - 1) / beta)', '(1 / beta)'], {}), '((beta - 1) / beta, 1 / beta)\n', (3500, 3529), True, 'import tensorflow as tf\n'), ((4560, 4579), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (4571, 4579), False, 'from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply\n'), ((421, 436), 'tensorflow.keras.backend.pow', 'K.pow', (['ya', 'beta'], {}), '(ya, beta)\n', (426, 436), True, 'from tensorflow.keras import backend as K\n'), ((568, 581), 'tensorflow.keras.backend.pow', 'K.pow', (['l1', 'k1'], {}), '(l1, k1)\n', (573, 581), True, 'from tensorflow.keras import backend as K\n'), ((611, 624), 'tensorflow.keras.backend.pow', 'K.pow', (['l2', 'k2'], {}), '(l2, k2)\n', (616, 624), True, 'from tensorflow.keras import backend as K\n'), ((656, 665), 'tensorflow.keras.backend.log', 'K.log', (['l1'], {}), '(l1)\n', (661, 665), True, 'from tensorflow.keras import backend as K\n'), ((727, 754), 'tensorflow.math.lgamma', 'tf.math.lgamma', (['(k2 / k1 + 1)'], {}), '(k2 / k1 + 1)\n', (741, 754), True, 'import tensorflow as tf\n'), ((972, 994), 'scipy.special.loggamma', 'loggamma', (['(1 + 1 / beta)'], {}), '(1 + 1 / beta)\n', (980, 994), False, 'from scipy.special import loggamma\n'), ((1237, 1248), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (1243, 1248), True, 'import numpy as np\n'), ((3259, 3275), 'tensorflow.math.log', 'tf.math.log', (['(2.0)'], {}), '(2.0)\n', (3270, 3275), True, 'import tensorflow as tf\n'), ((3277, 3303), 'tensorflow.math.reciprocal', 'tf.math.reciprocal', (['k_pipe'], {}), '(k_pipe)\n', (3295, 3303), True, 'import tensorflow as tf\n'), ((386, 397), 'tensorflow.keras.backend.log', 'K.log', (['beta'], {}), '(beta)\n', (391, 397), True, 'from tensorflow.keras import backend as K\n'), ((1362, 1384), 'scipy.special.loggamma', 'loggamma', (['(1 + 2 / beta)'], {}), '(1 + 2 / beta)\n', (1370, 1384), False, 'from scipy.special import loggamma\n'), ((1526, 1539), 'numpy.log', 'np.log', (['(1 - q)'], {}), '(1 - q)\n', (1532, 1539), True, 'import numpy as np\n'), ((408, 417), 'tensorflow.keras.backend.log', 'K.log', (['ya'], {}), '(ya)\n', (413, 417), True, 'from tensorflow.keras import backend as K\n'), ((1398, 1420), 'scipy.special.loggamma', 'loggamma', (['(1 + 1 / beta)'], {}), '(1 + 1 / beta)\n', (1406, 1420), False, 'from scipy.special import loggamma\n'), ((3071, 3096), 'tensorflow.math.lgamma', 'tf.math.lgamma', (['(1 + 1 / x)'], {}), '(1 + 1 / x)\n', (3085, 3096), True, 'import tensorflow as tf\n')] |
# -*- encoding:utf-8 -*-
import torch.nn.functional as F
import torch.optim.lr_scheduler
import numpy as np
from uer.models.model import Model
from uer.model_builder import build_model
from uer.layers.layer_norm import LayerNorm
from uer.utils.act_fun import gelu
import torch.nn as nn
from torch.autograd import Variable
from matplotlib.pylab import *
def orthonormal_initializer(output_size, input_size):
"""
adopted from <NAME> https://github.com/tdozat/Parser/blob/master/lib/linalg.py
"""
print(output_size, input_size)
I = np.eye(output_size)
lr = .1
eps = .05 / (output_size + input_size)
success = False
tries = 0
while not success and tries < 10:
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
for i in range(100):
QTQmI = Q.T.dot(Q) - I
loss = np.sum(QTQmI ** 2 / 2)
Q2 = Q ** 2
Q -= lr * Q.dot(QTQmI) / (
np.abs(Q2 + Q2.sum(axis=0, keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps)
if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss):
tries += 1
lr /= 2
break
success = True
if success:
print('Orthogonal pretrainer loss: %.2e' % loss)
else:
print('Orthogonal pretrainer failed, using non-orthogonal random matrix')
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
return np.transpose(Q.astype(np.float32))
def pad_sequence(xs, length=None, padding=-1, dtype=np.float64):
lengths = [len(x) for x in xs]
if length is None:
length = max(lengths)
y = np.array([np.pad(x.astype(dtype), (0, length - l),
mode="constant", constant_values=padding)
for x, l in zip(xs, lengths)])
return torch.from_numpy(y)
def _model_var(model, x):
p = next(filter(lambda p: p.requires_grad, model.parameters()))
if p.is_cuda:
x = x.cuda()
return torch.autograd.Variable(x)
def generate_perm_inv(perm):
# Definitly correct.
perm_inv = zeros(len(perm), dtype=int32)
for i, p in enumerate(perm):
perm_inv[int(p)] = i
return perm_inv
class NonLinear(nn.Module):
def __init__(self, input_size, hidden_size, activation=None):
super(NonLinear, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.linear = nn.Linear(in_features=input_size, out_features=hidden_size)
if activation is None:
self._activate = lambda x: x
else:
if not callable(activation):
raise ValueError("activation must be callable: type={}".format(type(activation)))
self._activate = activation
self.reset_parameters()
def forward(self, x):
y = self.linear(x)
return self._activate(y)
def reset_parameters(self):
W = orthonormal_initializer(self.hidden_size, self.input_size)
self.linear.weight.data.copy_(torch.from_numpy(W))
b = np.zeros(self.hidden_size, dtype=np.float32)
self.linear.bias.data.copy_(torch.from_numpy(b))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def encode(lstm, wemb_l, l, return_hidden=False, hc0=None, last_only=False, U=None, V=None, ctx=None, l_hs=None):
""" [batch_size, max token length, dim_emb]
"""
bS, mL, eS = wemb_l.shape
# sort before packking
l = array(l)
perm_idx = argsort(-l)
perm_idx_inv = generate_perm_inv(perm_idx)
# pack sequence
packed_wemb_l = nn.utils.rnn.pack_padded_sequence(wemb_l[perm_idx, :, :],
l[perm_idx],
batch_first=True)
# Time to encode
if hc0 is not None:
hc0 = (hc0[0][:, perm_idx], hc0[1][:, perm_idx])
# ipdb.set_trace()
packed_wemb_l = packed_wemb_l.float() # I don't know why..
packed_wenc, hc_out = lstm(packed_wemb_l, hc0)
hout, cout = hc_out
# unpack
wenc, _l = nn.utils.rnn.pad_packed_sequence(packed_wenc, batch_first=True)
if last_only:
if ctx is None:
# Take only final outputs for each columns.
wenc = wenc[tuple(range(bS)), l[perm_idx] - 1] # [batch_size, dim_emb]
wenc.unsqueeze_(1) # [batch_size, 1, dim_emb]
else:
ctx = ctx.unsqueeze(1)
# [batch_size, 1, dim_emb] -> [batch_size, 1, hS]
wenc_u = U(ctx)
# [batch_size, seq_len, dim_emb] -> [batch_size, seq_len, hS]
wenc_v = V(wenc)
start = 0
# [batch_size, 1, dim_emb]
wenc2 = torch.zeros(wenc.shape[0], 1, wenc.shape[2])
for b in range(ctx.shape[0]):
# [1, hS] * [batch_size, seq_len, hS] -> [batch_size, seq_len, hS]
attn = torch.mul(wenc_u[b], wenc_v[start:start + l_hs[b]])
# attn, _ = nn.utils.rnn.pad_packed_sequence(attn, batch_first=True)
# [batch_size, seq_len]
attn = F.softmax(attn.sum(2), dim=1)
wenc1 = torch.bmm(attn.unsqueeze(1), wenc[start:start + l_hs[b]])
wenc1 += ctx[b]
wenc2[start:start + l_hs[b]] = wenc1
start += l_hs[b]
wenc = wenc2
wenc = wenc[perm_idx_inv]
if return_hidden:
# hout.shape = [number_of_directoin * num_of_layer, seq_len(=batch size), dim * number_of_direction ] w/ batch_first.. w/o batch_first? I need to see.
hout = hout[:, perm_idx_inv].to(device)
cout = cout[:, perm_idx_inv].to(device) # Is this correct operation?
return wenc, hout, cout
else:
return wenc
def encode_hpu(lstm, wemb_hpu, l_hpu, l_hs, U=None, V=None, ctx=None):
# wenc_hpu, hout, cout = encode(lstm,
# wemb_hpu,
# l_hpu,
# return_hidden=True,
# hc0=None,
# last_only=True,
# U=U,
# V=V,
# ctx=ctx,
# l_hs=l_hs)
# print("wemb_hpu:", wemb_hpu.shape)
emb_hs_mean = torch.mean(wemb_hpu, dim=1)
# print('mean:', emb_hs_mean.shape)
wenc_hpu = emb_hs_mean
bS_hpu, mL_hpu, eS = wemb_hpu.shape
hS = wenc_hpu.size(-1)
# print('l heasers:', l_hs)
wenc_hs = wenc_hpu.new_zeros(len(l_hs), max(l_hs), hS)
wenc_hs = wenc_hs.to(device)
# Re-pack according to batch.
# ret = [B_NLq, max_len_headers_all, dim_lstm]
st = 0
for i, l_hs1 in enumerate(l_hs):
wenc_hs[i, :l_hs1] = wenc_hpu[st:(st + l_hs1)]
st += l_hs1
# print('w enc hs:', wenc_hs.shape)
return wenc_hs
class Biaffine(nn.Module):
def __init__(self, in1_features, in2_features, out_features,
bias=(True, True)):
super(Biaffine, self).__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.bias = bias
self.linear_input_size = in1_features + int(bias[0])
self.linear_output_size = out_features * (in2_features + int(bias[1]))
self.linear = nn.Linear(in_features=self.linear_input_size,
out_features=self.linear_output_size,
bias=False)
self.reset_parameters()
def reset_parameters(self):
W = np.zeros((self.linear_output_size, self.linear_input_size), dtype=np.float32)
self.linear.weight.data.copy_(torch.from_numpy(W))
def forward(self, input1, input2):
batch_size, len1, dim1 = input1.size()
batch_size, len2, dim2 = input2.size()
if self.bias[0]:
ones = input1.data.new(batch_size, len1, 1).zero_().fill_(1)
input1 = torch.cat((input1, Variable(ones)), dim=2)
dim1 += 1
if self.bias[1]:
ones = input2.data.new(batch_size, len2, 1).zero_().fill_(1)
input2 = torch.cat((input2, Variable(ones)), dim=2)
dim2 += 1
affine = self.linear(input1)
affine = affine.view(batch_size, len1*self.out_features, dim2)
input2 = torch.transpose(input2, 1, 2)
biaffine = torch.transpose(torch.bmm(affine, input2), 1, 2)
biaffine = biaffine.contiguous().view(batch_size, len2, len1, self.out_features)
return biaffine
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'in1_features=' + str(self.in1_features) \
+ ', in2_features=' + str(self.in2_features) \
+ ', out_features=' + str(self.out_features) + ')'
def drop_sequence_sharedmask(inputs, dropout, batch_first=True):
if batch_first:
inputs = inputs.transpose(0, 1)
seq_length, batch_size, hidden_size = inputs.size()
drop_masks = inputs.data.new(batch_size, hidden_size).fill_(1 - dropout)
drop_masks = Variable(torch.bernoulli(drop_masks), requires_grad=False)
drop_masks = drop_masks / (1 - dropout)
drop_masks = torch.unsqueeze(drop_masks, dim=2).expand(-1, -1, seq_length).permute(2, 0, 1)
inputs = inputs * drop_masks
return inputs.transpose(1, 0)
class AutomaticWeightedLoss(nn.Module):
"""automatically weighted multi-task loss
Params:
num: int,the number of loss
x: multi-task loss
Examples:
loss1=1
loss2=2
awl = AutomaticWeightedLoss(2)
loss_sum = awl(loss1, loss2)
"""
def __init__(self, num=2):
super(AutomaticWeightedLoss, self).__init__()
params = torch.ones(num, requires_grad=True)
self.params = torch.nn.Parameter(params)
def forward(self, *x):
loss_sum = 0
for i, loss in enumerate(x):
loss_sum += 0.5 / (self.params[i] ** 2) * loss + torch.log(1 + self.params[i] ** 2)
return loss_sum
class TableTextPretraining(nn.Module):
def __init__(self, args):
super(TableTextPretraining, self).__init__()
# self.word_embed = nn.Embedding(vocab.vocab_size, config.word_dims, padding_idx=0)
# self.extword_embed = nn.Embedding(vocab.extvocab_size, config.word_dims, padding_idx=0)
self.config = args
self.hidden_size = args.hidden_size
self.vocab_size = args.vocab_size
self.pre_encoder = build_model(args)
if args.use_cuda:
pretrained_model = torch.load(args.pretrained_model_path)
else:
pretrained_model = torch.load(args.pretrained_model_path, map_location='cpu')
print('loading model from table Text model:', args.pretrained_model_path)
self.pre_encoder.load_state_dict(pretrained_model, strict=False)
self.pre_encoder.to(args.device)
# MLM.
self.mlm_linear_1 = nn.Linear(args.hidden_size, args.hidden_size)
self.layer_norm = LayerNorm(args.hidden_size)
self.mlm_linear_2 = nn.Linear(args.hidden_size, self.vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
self.device = args.device
self.use_cuda = args.use_cuda
hidden_size = 128 # int(args.iS / 2)
self.lstm_hiddens = hidden_size
self.lstm_layers = args.lS
self.dropout_lstm_input = args.dr
self.dropout_lstm_hidden = args.dr
self.dropout_mlp = args.dr
self.input_dims = 768
self.enc_h = nn.LSTM(input_size=self.input_dims, hidden_size=int(self.lstm_hiddens / 2),
num_layers=self.lstm_layers, batch_first=True,
dropout=self.dropout_lstm_hidden, bidirectional=True)
self.enc_n = nn.LSTM(input_size=self.input_dims, hidden_size=int(self.lstm_hiddens / 2),
num_layers=self.lstm_layers, batch_first=True,
dropout=self.dropout_lstm_hidden, bidirectional=True)
# Schema Dependency
self.mlp_all = NonLinear(
input_size=768,
hidden_size=400,
activation=nn.LeakyReLU(0.1))
# self.mlp_arc_dep1 = NonLinear(
# input_size=400,
# hidden_size=args.mlp_arc_size + args.mlp_rel_size,
# activation=nn.LeakyReLU(0.1))
# self.mlp_arc_head1 = NonLinear(
# input_size=400,
# hidden_size=args.mlp_arc_size + args.mlp_rel_size,
# activation=nn.LeakyReLU(0.1))
# self.total_num = int((args.mlp_arc_size+args.mlp_rel_size) / 100)
# self.arc_num = int(args.mlp_arc_size / 100)
# self.rel_num = int(args.mlp_rel_size / 100)
#
# self.arc_biaffine1 = Biaffine(args.mlp_arc_size, args.mlp_arc_size, 1, bias=(True, False))
# self.rel_biaffine1 = Biaffine(args.mlp_rel_size, args.mlp_rel_size, 9, bias=(True, True))
#
# # self.relation_weights = torch.FloatTensor(args.rel_weights).to(args.device)
# self.auto_loss = AutomaticWeightedLoss(2)
if __name__ == '__main__':
pass
| [
"numpy.eye",
"numpy.sqrt",
"torch.autograd.Variable",
"torch.nn.LeakyReLU",
"numpy.max",
"uer.model_builder.build_model",
"numpy.sum",
"numpy.zeros",
"numpy.isfinite",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Linear",
"uer.layers.layer_norm.LayerNorm",
"torch.nn.LogSoftmax",
"t... | [((552, 571), 'numpy.eye', 'np.eye', (['output_size'], {}), '(output_size)\n', (558, 571), True, 'import numpy as np\n'), ((3614, 3706), 'torch.nn.utils.rnn.pack_padded_sequence', 'nn.utils.rnn.pack_padded_sequence', (['wemb_l[perm_idx, :, :]', 'l[perm_idx]'], {'batch_first': '(True)'}), '(wemb_l[perm_idx, :, :], l[perm_idx],\n batch_first=True)\n', (3647, 3706), True, 'import torch.nn as nn\n'), ((4106, 4169), 'torch.nn.utils.rnn.pad_packed_sequence', 'nn.utils.rnn.pad_packed_sequence', (['packed_wenc'], {'batch_first': '(True)'}), '(packed_wenc, batch_first=True)\n', (4138, 4169), True, 'import torch.nn as nn\n'), ((2456, 2515), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'input_size', 'out_features': 'hidden_size'}), '(in_features=input_size, out_features=hidden_size)\n', (2465, 2515), True, 'import torch.nn as nn\n'), ((3077, 3121), 'numpy.zeros', 'np.zeros', (['self.hidden_size'], {'dtype': 'np.float32'}), '(self.hidden_size, dtype=np.float32)\n', (3085, 3121), True, 'import numpy as np\n'), ((7416, 7516), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.linear_input_size', 'out_features': 'self.linear_output_size', 'bias': '(False)'}), '(in_features=self.linear_input_size, out_features=self.\n linear_output_size, bias=False)\n', (7425, 7516), True, 'import torch.nn as nn\n'), ((7654, 7731), 'numpy.zeros', 'np.zeros', (['(self.linear_output_size, self.linear_input_size)'], {'dtype': 'np.float32'}), '((self.linear_output_size, self.linear_input_size), dtype=np.float32)\n', (7662, 7731), True, 'import numpy as np\n'), ((10572, 10589), 'uer.model_builder.build_model', 'build_model', (['args'], {}), '(args)\n', (10583, 10589), False, 'from uer.model_builder import build_model\n'), ((11030, 11075), 'torch.nn.Linear', 'nn.Linear', (['args.hidden_size', 'args.hidden_size'], {}), '(args.hidden_size, args.hidden_size)\n', (11039, 11075), True, 'import torch.nn as nn\n'), ((11102, 11129), 'uer.layers.layer_norm.LayerNorm', 'LayerNorm', (['args.hidden_size'], {}), '(args.hidden_size)\n', (11111, 11129), False, 'from uer.layers.layer_norm import LayerNorm\n'), ((11158, 11202), 'torch.nn.Linear', 'nn.Linear', (['args.hidden_size', 'self.vocab_size'], {}), '(args.hidden_size, self.vocab_size)\n', (11167, 11202), True, 'import torch.nn as nn\n'), ((11227, 11248), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (11240, 11248), True, 'import torch.nn as nn\n'), ((711, 751), 'numpy.random.randn', 'np.random.randn', (['input_size', 'output_size'], {}), '(input_size, output_size)\n', (726, 751), True, 'import numpy as np\n'), ((754, 774), 'numpy.sqrt', 'np.sqrt', (['output_size'], {}), '(output_size)\n', (761, 774), True, 'import numpy as np\n'), ((858, 880), 'numpy.sum', 'np.sum', (['(QTQmI ** 2 / 2)'], {}), '(QTQmI ** 2 / 2)\n', (864, 880), True, 'import numpy as np\n'), ((1394, 1434), 'numpy.random.randn', 'np.random.randn', (['input_size', 'output_size'], {}), '(input_size, output_size)\n', (1409, 1434), True, 'import numpy as np\n'), ((1437, 1457), 'numpy.sqrt', 'np.sqrt', (['output_size'], {}), '(output_size)\n', (1444, 1457), True, 'import numpy as np\n'), ((12250, 12267), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (12262, 12267), True, 'import torch.nn as nn\n'), ((1065, 1074), 'numpy.max', 'np.max', (['Q'], {}), '(Q)\n', (1071, 1074), True, 'import numpy as np\n'), ((1102, 1119), 'numpy.isfinite', 'np.isfinite', (['loss'], {}), '(loss)\n', (1113, 1119), True, 'import numpy as np\n'), ((8063, 8077), 'torch.autograd.Variable', 'Variable', (['ones'], {}), '(ones)\n', (8071, 8077), False, 'from torch.autograd import Variable\n'), ((8247, 8261), 'torch.autograd.Variable', 'Variable', (['ones'], {}), '(ones)\n', (8255, 8261), False, 'from torch.autograd import Variable\n')] |
from math import ceil, floor
import numpy as np
from BoundingBox import BoundingBox
def fibonacci_sphere_section(num_points_whole_sphere: int, bbox: BoundingBox):
lat_min, lat_max, lon_min, lon_max = bbox.lat_min, bbox.lat_max, bbox.lon_min, bbox.lon_max
#print("lat_min: {}, lat_max: {}, lon_min: {}, lon_max: {}".format(lat_min, lat_max, lon_min, lon_max))
ga = (3 - np.sqrt(5)) * np.pi # golden angle
repeat = np.pi*2/ga # after how many indices the 2 pi is reached
z_step = 2.0/num_points_whole_sphere
z_min_bound = z_step-1.0 # minimum z
z_max_bound = 1.0 - z_step # maximum z
z_min = np.sin(lat_min)
z_max = np.sin(lat_max)
if z_min < z_min_bound:
z_min = z_min_bound
if z_max > z_max_bound:
z_max = z_max_bound
# linear interpolation
linInterp = lambda x1, x2, y1, y2, x: (y2-y1)/(x2-x1)*(x-x1)
i_min = linInterp(z_min_bound, z_max_bound, 0.0, float(num_points_whole_sphere), z_min) # smallest i where z is within bounding box latitude
i_max = linInterp(z_min_bound, z_max_bound, 0.0, float(num_points_whole_sphere), z_max) # biggest i where z is within bounding box latitude
i_min_0 = np.floor(i_min / repeat) * repeat # smallest i at 0° degrees
circulations = ceil((i_max - i_min_0)/repeat) # number of circulations
relative_begin = linInterp(0.0, np.pi*2, 0.0, repeat, lon_min) # relative index to start of bounding box longitude
relative_end = linInterp(0.0, np.pi*2, 0.0, repeat, lon_max) # relative index to end of bounding box longitude
theta = []
z = []
for r in range(circulations):
offset = repeat*r + i_min_0
i_start = ceil(offset + relative_begin)
i_end = int(offset + relative_end)
if i_end >= num_points_whole_sphere: # prevent overflow
i_end = num_points_whole_sphere -1
indices = range(i_start, i_end+1) # all indices within the bounding box for the current circulation. can be empty
#print("r: {r:4d}, o: {o:.3f}, s: {s:.3f}, e: {e:.3f}".format(r=r, o=offset, s=offset + relative_begin, e=offset + relative_end), indices, list(indices))
for i in indices:
theta.append(ga*i)
z.append(z_step-1.0 + z_step*i)
theta = np.array(theta)
z = np.array(z)
# a list of the radii at each height step of the unit circle
radius = np.sqrt(1 - z * z)
# Determine where xy fall on the sphere, given the azimuthal and polar angles
y = radius * np.sin(theta)
x = radius * np.cos(theta)
return x, y, z | [
"numpy.sqrt",
"math.ceil",
"numpy.floor",
"numpy.array",
"numpy.cos",
"numpy.sin"
] | [((627, 642), 'numpy.sin', 'np.sin', (['lat_min'], {}), '(lat_min)\n', (633, 642), True, 'import numpy as np\n'), ((656, 671), 'numpy.sin', 'np.sin', (['lat_max'], {}), '(lat_max)\n', (662, 671), True, 'import numpy as np\n'), ((1266, 1298), 'math.ceil', 'ceil', (['((i_max - i_min_0) / repeat)'], {}), '((i_max - i_min_0) / repeat)\n', (1270, 1298), False, 'from math import ceil, floor\n'), ((2271, 2286), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (2279, 2286), True, 'import numpy as np\n'), ((2295, 2306), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (2303, 2306), True, 'import numpy as np\n'), ((2387, 2405), 'numpy.sqrt', 'np.sqrt', (['(1 - z * z)'], {}), '(1 - z * z)\n', (2394, 2405), True, 'import numpy as np\n'), ((1186, 1210), 'numpy.floor', 'np.floor', (['(i_min / repeat)'], {}), '(i_min / repeat)\n', (1194, 1210), True, 'import numpy as np\n'), ((1677, 1706), 'math.ceil', 'ceil', (['(offset + relative_begin)'], {}), '(offset + relative_begin)\n', (1681, 1706), False, 'from math import ceil, floor\n'), ((2507, 2520), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2513, 2520), True, 'import numpy as np\n'), ((2538, 2551), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2544, 2551), True, 'import numpy as np\n'), ((383, 393), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (390, 393), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 23:52:06 2018
@author: malti
"""
#music notic is a collecion of fundamental tones and harmonics or overtones.
import numpy as np
import soundfile as sf
import sounddevice as sd
amp = [1,1]
amp_a = [[1,0,0,0],[1,1/2,1/3,1/4]]
freq = [1000,440]
def tone(duration = 1,freq = 1000,amp = 1, sample_rate=20000,rtimes = False):
''' This function genarates tones
With no parameter it generates an array representing a 1kHz tone'''
times = np.linspace(0,duration,sample_rate * duration) #
#short_clip = np.zeros(sample_rate * time - 1)
overtone = np.cos(2 * np.pi * freq * times) * amp
if rtimes == True:
return overtone , times
elif rtimes == False:
return overtone
else:
print('error in parameters')
# tone synthesis
def note(freq, lent, amp=1, rate=20000):
t = np.linspace(0,lent,lent*rate)
data = np.cos(2*np.pi*freq*t)*amp
return data.astype('int16') # two byte integers
def complex_tone(freq,amp_array,duration = 0.5,rate = 20000):
data = np.zeros(np.size(rate * duration))
for i , n in enumerate(amp_array):
data = data + tone(duration,freq = (i+1) * freq ,amp = n,sample_rate = rate)
return data
def complex_sound(freq_array,amp_array,duration,rate = 20000):
if len(amp_array) != len(freq_array):
return None
data = np.zeros(np.size(rate * duration))
for index_freq , n_freq in enumerate(freq_array):
for index_amp , n_amp in enumerate(amp_array[index_freq]):
data = data + tone(duration,freq = (index_freq+1) * n_freq ,amp = n_amp,sample_rate = rate)
return data
def blank_sound(duration,rate = 20000):
data = np.zeros(np.size(rate * duration))
return data
def tick1(d,f,p,n):
for i in range(0,n):
sd.play(tone(d,f,2))
sd.play(blank_sound(d))
def tick(d,f,p,n):
for i in p:
if i > 0:
j = 0
for j in range(0,i):
sd.play(tone(d,f,2))
sd.wait()
#sd.play(blank_sound(d))
else:
sd.play(blank_sound(1))
# A tone, 2 seconds, 44100 samples per second
#tonep = note(440,2,amp=1)
#tonex = tone(duration = 2,freq = 440,amp = 1)
| [
"numpy.linspace",
"sounddevice.wait",
"numpy.size",
"numpy.cos"
] | [((530, 578), 'numpy.linspace', 'np.linspace', (['(0)', 'duration', '(sample_rate * duration)'], {}), '(0, duration, sample_rate * duration)\n', (541, 578), True, 'import numpy as np\n'), ((918, 951), 'numpy.linspace', 'np.linspace', (['(0)', 'lent', '(lent * rate)'], {}), '(0, lent, lent * rate)\n', (929, 951), True, 'import numpy as np\n'), ((647, 679), 'numpy.cos', 'np.cos', (['(2 * np.pi * freq * times)'], {}), '(2 * np.pi * freq * times)\n', (653, 679), True, 'import numpy as np\n'), ((957, 985), 'numpy.cos', 'np.cos', (['(2 * np.pi * freq * t)'], {}), '(2 * np.pi * freq * t)\n', (963, 985), True, 'import numpy as np\n'), ((1120, 1144), 'numpy.size', 'np.size', (['(rate * duration)'], {}), '(rate * duration)\n', (1127, 1144), True, 'import numpy as np\n'), ((1440, 1464), 'numpy.size', 'np.size', (['(rate * duration)'], {}), '(rate * duration)\n', (1447, 1464), True, 'import numpy as np\n'), ((1785, 1809), 'numpy.size', 'np.size', (['(rate * duration)'], {}), '(rate * duration)\n', (1792, 1809), True, 'import numpy as np\n'), ((2118, 2127), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (2125, 2127), True, 'import sounddevice as sd\n')] |
from functools import partial
import numpy as np
import torch
from torch import nn
from counterfactualms.arch.layers import Conv2d, ConvTranspose2d
class HierarchicalEncoder(nn.Module):
def __init__(self, num_convolutions=3, filters=(16,32,64,128,256), latent_dim=100,
input_size=(1,128,128), use_weight_norm=False, use_spectral_norm=False,
hierarchical_layers=(1,3,5), div_factor=8):
super().__init__()
self.num_convolutions = num_convolutions
self.filters = filters
if use_weight_norm and use_spectral_norm:
raise ValueError('Cannot use both weight norm and spectral norm.')
self.use_weight_norm = use_weight_norm
self.use_spectral_norm = use_spectral_norm
self.hierarchical_layers = hierarchical_layers
self.div_factor = div_factor
self.down_layers = nn.ModuleList([])
self.resolution_layers = nn.ModuleList([])
self.intermediate_shapes = []
self.out_layers = nn.ModuleList([])
cur_channels = input_size[0]
for i, c in enumerate(filters):
resolution_layer = []
for _ in range(0, num_convolutions - 1):
resolution_layer += self._conv_layer(cur_channels, c)
cur_channels = c
self.resolution_layers.append(nn.Sequential(*resolution_layer))
if i in self.hierarchical_layers:
out_channels = max(cur_channels // div_factor, 1)
self.out_layers.append(self._conv(cur_channels, out_channels, 1, bias=True))
self.intermediate_shapes.append(np.array(input_size) // (2 ** i))
self.intermediate_shapes[-1][0] = out_channels
self.down_layers.append(nn.Sequential(*self._down_conv_layer(cur_channels, c)))
cur_channels = c
if len(filters) in self.hierarchical_layers:
self.intermediate_shapes.append(np.array(input_size) // (2 ** len(filters)))
self.intermediate_shapes[-1][0] = cur_channels
self.fc = nn.Sequential(
nn.Linear(np.prod(self.intermediate_shapes[-1]), latent_dim, bias=False),
nn.BatchNorm1d(latent_dim),
nn.LeakyReLU(.1, inplace=True)
)
@property
def _conv(self):
return partial(Conv2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
def _conv_layer(self, ci, co):
return [self._conv(ci, co, 3, 1, 1, bias=False),
nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
def _down_conv_layer(self, ci, co):
return [self._conv(ci, co, 4, 2, 1, bias=False),
nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
def forward(self, x):
out = []
c = 0
for i, (conv, down) in enumerate(zip(self.resolution_layers, self.down_layers)):
x = conv(x)
if i in self.hierarchical_layers:
out.append(self.out_layers[c](x))
c += 1
x = down(x)
if len(self.filters) in self.hierarchical_layers:
x = x.view(-1, np.prod(self.intermediate_shapes[-1]))
out.append(self.fc(x))
return out
class HierarchicalDecoder(nn.Module):
def __init__(self, num_convolutions=3, filters=(256,128,64,32,16), latent_dim=100, output_size=(1,128,128),
upconv=False, use_weight_norm=False, use_spectral_norm=False, hierarchical_layers=(1,3,5),
context_dim=4, div_factor=8):
super().__init__()
self.num_convolutions = num_convolutions
self.filters = filters
self.upconv = upconv
if use_weight_norm and use_spectral_norm:
raise ValueError('Cannot use both weight norm and spectral norm.')
self.use_weight_norm = use_weight_norm
self.use_spectral_norm = use_spectral_norm
self.hierarchical_layers = hierarchical_layers
hierarchical_layers_ = [h for h in hierarchical_layers if h != len(filters)]
self.context_dim = context_dim
self.div_factor = div_factor
self.resolution_layers = nn.ModuleList([])
self.up_layers = nn.ModuleList([])
self.intermediate_shapes = []
self.context_attention = nn.ModuleList([])
cur_channels = filters[0]
self.start_context_attention = self._attn(cur_channels)
self.start_up_layer = nn.Sequential(*self._upsample_layer(cur_channels, cur_channels))
if len(filters) in hierarchical_layers:
self.intermediate_shapes.append(np.array(output_size) // (2 ** (len(filters))))
self.intermediate_shapes[-1][0] = cur_channels
for i, c in enumerate(filters[1:], 1):
resolution_layer = []
i = (len(filters) - i)
input_layer = i in hierarchical_layers_
in_channels = max(cur_channels // div_factor, 1)
for j in range(0, num_convolutions - 1):
ci = (in_channels+cur_channels) if j == 0 and input_layer else cur_channels
resolution_layer += self._conv_layer(ci, cur_channels)
self.resolution_layers.append(nn.Sequential(*resolution_layer))
self.context_attention.append(self._attn(cur_channels))
self.up_layers.append(nn.Sequential(*self._upsample_layer(cur_channels, c)))
if input_layer:
self.intermediate_shapes.append(np.array(output_size) // (2 ** i))
self.intermediate_shapes[-1][0] = in_channels
cur_channels = c
final_layer = self._conv_layer(cur_channels, cur_channels)
final_layer.append(self._conv(cur_channels, output_size[0], 1, 1, bias=True))
self.final_layer = nn.Sequential(*final_layer)
self.fc = nn.Sequential(
nn.Linear(latent_dim, np.prod(self.intermediate_shapes[0]), bias=False),
nn.BatchNorm1d(np.prod(self.intermediate_shapes[0])),
nn.LeakyReLU(.1, inplace=True)
)
@property
def _conv(self):
return partial(Conv2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
@property
def _conv_transpose(self):
return partial(ConvTranspose2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
def _conv_layer(self, ci, co):
return [self._conv(ci, co, 3, 1, 1, bias=False),
nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
def _upsample_layer(self, ci, co):
if self.upconv:
layer = [nn.Upsample(scale_factor=2, mode='nearest'),
self._conv(ci, co, kernel_size=5, stride=1, padding=2, bias=False)]
else:
layer = [self._conv_transpose(ci, co, kernel_size=4, stride=2, padding=1, bias=False)]
layer += [nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
return layer
def _attn(self, co):
hidden_dim = max(co // 4, self.context_dim)
return nn.Sequential(nn.Linear(self.context_dim, hidden_dim),
nn.LeakyReLU(0.1, inplace=True),
nn.Linear(hidden_dim, co),
nn.Sigmoid())
def forward(self, x, ctx):
assert x[0].size(0) == ctx.size(0)
batch_size = ctx.size(0)
layers = zip(self.resolution_layers, self.up_layers, self.context_attention)
ctx_attn = self.start_context_attention(ctx).view(batch_size, -1, 1, 1)
y = self.fc(x.pop()).view(-1, *self.intermediate_shapes[0])
y = self.start_up_layer(y) * ctx_attn
for i, (conv, up, attn) in enumerate(layers, 1):
i = len(self.filters) - i
output_layer = i in self.hierarchical_layers
ctx_attn = attn(ctx).view(batch_size, -1, 1, 1)
if output_layer:
y = torch.cat([y, x.pop()], 1)
y = conv(y) * ctx_attn
y = up(y)
y = self.final_layer(y)
return y
if __name__ == "__main__":
hl = (1, 2, 3, 4, 5)
filters = [20, 40, 80, 160, 320]
div_factor = 80
img_shape = (3,128,128)
enc = HierarchicalEncoder(
hierarchical_layers=hl, filters=filters,
div_factor=div_factor, input_size=img_shape
)
dec = HierarchicalDecoder(
hierarchical_layers=hl, filters=filters[::-1],
div_factor=div_factor, output_size=img_shape
)
print(enc.intermediate_shapes)
print(dec.intermediate_shapes)
ctx = torch.randn(2, 4)
x = torch.randn(2, *img_shape)
y = enc(x)
z = dec(y, ctx)
assert z.shape == x.shape
print(enc)
print(dec)
| [
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"numpy.prod",
"torch.nn.LeakyReLU",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"numpy.array",
"functools.partial",
"torch.nn.Upsample",
"torch.nn.Linear",
"torch.randn"
] | [((8633, 8650), 'torch.randn', 'torch.randn', (['(2)', '(4)'], {}), '(2, 4)\n', (8644, 8650), False, 'import torch\n'), ((8659, 8685), 'torch.randn', 'torch.randn', (['(2)', '*img_shape'], {}), '(2, *img_shape)\n', (8670, 8685), False, 'import torch\n'), ((882, 899), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (895, 899), False, 'from torch import nn\n'), ((933, 950), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (946, 950), False, 'from torch import nn\n'), ((1015, 1032), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (1028, 1032), False, 'from torch import nn\n'), ((2312, 2412), 'functools.partial', 'partial', (['Conv2d'], {'use_weight_norm': 'self.use_weight_norm', 'use_spectral_norm': 'self.use_spectral_norm'}), '(Conv2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=\n self.use_spectral_norm)\n', (2319, 2412), False, 'from functools import partial\n'), ((4210, 4227), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (4223, 4227), False, 'from torch import nn\n'), ((4253, 4270), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (4266, 4270), False, 'from torch import nn\n'), ((4342, 4359), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (4355, 4359), False, 'from torch import nn\n'), ((5814, 5841), 'torch.nn.Sequential', 'nn.Sequential', (['*final_layer'], {}), '(*final_layer)\n', (5827, 5841), False, 'from torch import nn\n'), ((6131, 6231), 'functools.partial', 'partial', (['Conv2d'], {'use_weight_norm': 'self.use_weight_norm', 'use_spectral_norm': 'self.use_spectral_norm'}), '(Conv2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=\n self.use_spectral_norm)\n', (6138, 6231), False, 'from functools import partial\n'), ((6288, 6396), 'functools.partial', 'partial', (['ConvTranspose2d'], {'use_weight_norm': 'self.use_weight_norm', 'use_spectral_norm': 'self.use_spectral_norm'}), '(ConvTranspose2d, use_weight_norm=self.use_weight_norm,\n use_spectral_norm=self.use_spectral_norm)\n', (6295, 6396), False, 'from functools import partial\n'), ((2180, 2206), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['latent_dim'], {}), '(latent_dim)\n', (2194, 2206), False, 'from torch import nn\n'), ((2220, 2251), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (2232, 2251), False, 'from torch import nn\n'), ((2517, 2550), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['co'], {'momentum': '(0.05)'}), '(co, momentum=0.05)\n', (2531, 2550), False, 'from torch import nn\n'), ((2568, 2599), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (2580, 2599), False, 'from torch import nn\n'), ((2714, 2747), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['co'], {'momentum': '(0.05)'}), '(co, momentum=0.05)\n', (2728, 2747), False, 'from torch import nn\n'), ((2765, 2796), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (2777, 2796), False, 'from torch import nn\n'), ((6039, 6070), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (6051, 6070), False, 'from torch import nn\n'), ((6502, 6535), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['co'], {'momentum': '(0.05)'}), '(co, momentum=0.05)\n', (6516, 6535), False, 'from torch import nn\n'), ((6553, 6584), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (6565, 6584), False, 'from torch import nn\n'), ((6935, 6968), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['co'], {'momentum': '(0.05)'}), '(co, momentum=0.05)\n', (6949, 6968), False, 'from torch import nn\n'), ((6988, 7019), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7000, 7019), False, 'from torch import nn\n'), ((7148, 7187), 'torch.nn.Linear', 'nn.Linear', (['self.context_dim', 'hidden_dim'], {}), '(self.context_dim, hidden_dim)\n', (7157, 7187), False, 'from torch import nn\n'), ((7218, 7249), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7230, 7249), False, 'from torch import nn\n'), ((7280, 7305), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'co'], {}), '(hidden_dim, co)\n', (7289, 7305), False, 'from torch import nn\n'), ((7336, 7348), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (7346, 7348), False, 'from torch import nn\n'), ((1342, 1374), 'torch.nn.Sequential', 'nn.Sequential', (['*resolution_layer'], {}), '(*resolution_layer)\n', (1355, 1374), False, 'from torch import nn\n'), ((2104, 2141), 'numpy.prod', 'np.prod', (['self.intermediate_shapes[-1]'], {}), '(self.intermediate_shapes[-1])\n', (2111, 2141), True, 'import numpy as np\n'), ((3196, 3233), 'numpy.prod', 'np.prod', (['self.intermediate_shapes[-1]'], {}), '(self.intermediate_shapes[-1])\n', (3203, 3233), True, 'import numpy as np\n'), ((5240, 5272), 'torch.nn.Sequential', 'nn.Sequential', (['*resolution_layer'], {}), '(*resolution_layer)\n', (5253, 5272), False, 'from torch import nn\n'), ((5910, 5946), 'numpy.prod', 'np.prod', (['self.intermediate_shapes[0]'], {}), '(self.intermediate_shapes[0])\n', (5917, 5946), True, 'import numpy as np\n'), ((5988, 6024), 'numpy.prod', 'np.prod', (['self.intermediate_shapes[0]'], {}), '(self.intermediate_shapes[0])\n', (5995, 6024), True, 'import numpy as np\n'), ((6670, 6713), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (6681, 6713), False, 'from torch import nn\n'), ((1944, 1964), 'numpy.array', 'np.array', (['input_size'], {}), '(input_size)\n', (1952, 1964), True, 'import numpy as np\n'), ((4646, 4667), 'numpy.array', 'np.array', (['output_size'], {}), '(output_size)\n', (4654, 4667), True, 'import numpy as np\n'), ((1629, 1649), 'numpy.array', 'np.array', (['input_size'], {}), '(input_size)\n', (1637, 1649), True, 'import numpy as np\n'), ((5507, 5528), 'numpy.array', 'np.array', (['output_size'], {}), '(output_size)\n', (5515, 5528), True, 'import numpy as np\n')] |
import numpy as np
from nose.tools import raises
from chainer import Variable
from chainer.testing import assert_allclose
from chainer.gradient_check import check_backward
from chainer.utils.type_check import InvalidType
from chainerltr.functions.logcumsumexp import logcumsumexp
def test_logcumsumexp_forward_2d():
x = np.array([[-3.2, 1.9, 0.01],
[0.5, 1.2, 3.5]])
expected = np.array([[2.04597612, 2.04069352, 0.01],
[3.63980187, 3.59554546, 3.5]])
res = logcumsumexp(Variable(x))
assert_allclose(res.data, expected)
def test_logcumsumexp_backward_2d():
x = np.array([[-3.2, 1.9, 0.01],
[0.5, 1.2, 3.5]])
check_backward(logcumsumexp, x, np.ones(x.shape))
def test_logcumsumexp_backward_2d_2():
x = np.array([[5.6, 6.6, 7.6, 0.1],
[0.1, 0.5, 0.8, 1.2],
[0.9, 0.9, 0.9, 0.9]])
check_backward(logcumsumexp, x, np.ones(x.shape))
@raises(InvalidType)
def test_logcumsumexp_typeerror_0d():
x = np.array(0.2718)
logcumsumexp(x)
@raises(InvalidType)
def test_logcumsumexp_typeerror_3d():
x = np.array([[[0.1, 0.2, 0.3]]])
logcumsumexp(x)
| [
"numpy.ones",
"chainer.Variable",
"chainerltr.functions.logcumsumexp.logcumsumexp",
"numpy.array",
"nose.tools.raises",
"chainer.testing.assert_allclose"
] | [((966, 985), 'nose.tools.raises', 'raises', (['InvalidType'], {}), '(InvalidType)\n', (972, 985), False, 'from nose.tools import raises\n'), ((1072, 1091), 'nose.tools.raises', 'raises', (['InvalidType'], {}), '(InvalidType)\n', (1078, 1091), False, 'from nose.tools import raises\n'), ((326, 372), 'numpy.array', 'np.array', (['[[-3.2, 1.9, 0.01], [0.5, 1.2, 3.5]]'], {}), '([[-3.2, 1.9, 0.01], [0.5, 1.2, 3.5]])\n', (334, 372), True, 'import numpy as np\n'), ((406, 479), 'numpy.array', 'np.array', (['[[2.04597612, 2.04069352, 0.01], [3.63980187, 3.59554546, 3.5]]'], {}), '([[2.04597612, 2.04069352, 0.01], [3.63980187, 3.59554546, 3.5]])\n', (414, 479), True, 'import numpy as np\n'), ((545, 580), 'chainer.testing.assert_allclose', 'assert_allclose', (['res.data', 'expected'], {}), '(res.data, expected)\n', (560, 580), False, 'from chainer.testing import assert_allclose\n'), ((628, 674), 'numpy.array', 'np.array', (['[[-3.2, 1.9, 0.01], [0.5, 1.2, 3.5]]'], {}), '([[-3.2, 1.9, 0.01], [0.5, 1.2, 3.5]])\n', (636, 674), True, 'import numpy as np\n'), ((796, 872), 'numpy.array', 'np.array', (['[[5.6, 6.6, 7.6, 0.1], [0.1, 0.5, 0.8, 1.2], [0.9, 0.9, 0.9, 0.9]]'], {}), '([[5.6, 6.6, 7.6, 0.1], [0.1, 0.5, 0.8, 1.2], [0.9, 0.9, 0.9, 0.9]])\n', (804, 872), True, 'import numpy as np\n'), ((1032, 1048), 'numpy.array', 'np.array', (['(0.2718)'], {}), '(0.2718)\n', (1040, 1048), True, 'import numpy as np\n'), ((1053, 1068), 'chainerltr.functions.logcumsumexp.logcumsumexp', 'logcumsumexp', (['x'], {}), '(x)\n', (1065, 1068), False, 'from chainerltr.functions.logcumsumexp import logcumsumexp\n'), ((1138, 1167), 'numpy.array', 'np.array', (['[[[0.1, 0.2, 0.3]]]'], {}), '([[[0.1, 0.2, 0.3]]])\n', (1146, 1167), True, 'import numpy as np\n'), ((1172, 1187), 'chainerltr.functions.logcumsumexp.logcumsumexp', 'logcumsumexp', (['x'], {}), '(x)\n', (1184, 1187), False, 'from chainerltr.functions.logcumsumexp import logcumsumexp\n'), ((528, 539), 'chainer.Variable', 'Variable', (['x'], {}), '(x)\n', (536, 539), False, 'from chainer import Variable\n'), ((729, 745), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (736, 745), True, 'import numpy as np\n'), ((945, 961), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (952, 961), True, 'import numpy as np\n')] |
import pytest
import os,shutil,sys
import numpy as np
from mpi4py import MPI
from pypospack.pyposmat.data import PyposmatConfigurationFile
from pypospack.pyposmat.engines import PyposmatIterativeSampler
pyposmat_data_dir = 'data'
config_fn = os.path.join(pyposmat_data_dir,'pyposmat.config.in')
def test__determine_rv_seeds__no_args__attribute_not_none():
pyposmat_app = PyposmatIterativeSampler(configuration_filename = config_fn)
pyposmat_app.read_configuration_file()
pyposmat_app.setup_mpi_environment()
pyposmat_app.rv_seed = 1
assert type(pyposmat_app.rv_seed) is int
assert type(pyposmat_app.rv_seeds) is type(None)
assert pyposmat_app.rv_seed == 1
pyposmat_app.determine_rv_seeds()
assert type(pyposmat_app.rv_seed) is int
assert type(pyposmat_app.rv_seeds) is np.ndarray
assert pyposmat_app.rv_seed == 1
assert pyposmat_app.rv_seeds.shape[0] == pyposmat_app.mpi_size
assert pyposmat_app.rv_seeds.shape[1] == pyposmat_app.n_iterations
def test__determine_rv_seeds__seeds_do_not_change_when_run_again():
pyposmat_app = PyposmatIterativeSampler(configuration_filename = config_fn)
pyposmat_app.read_configuration_file()
pyposmat_app.setup_mpi_environment()
pyposmat_app.determine_rv_seeds()
# make sure the seed doesn't change when it's run again
old_seed = pyposmat_app.rv_seed
old_seeds = pyposmat_app.rv_seeds
pyposmat_app.determine_rv_seeds()
assert pyposmat_app.rv_seed == old_seed
assert np.array_equal(pyposmat_app.rv_seeds, old_seeds)
def dev__determine_rv_seeds():
pyposmat_app = PyposmatIterativeSampler(configuration_filename = config_fn)
pyposmat_app.read_configuration_file()
pyposmat_app.setup_mpi_environment()
pyposmat_app.determine_rv_seeds()
print('pyposmat_app.rv_seed:')
print('\ttype:{}.'.format(str(type(pyposmat_app.rv_seed))))
print('pyposmat_app.rv_seeds:')
print(pyposmat_app.rv_seeds.shape)
| [
"numpy.array_equal",
"os.path.join",
"pypospack.pyposmat.engines.PyposmatIterativeSampler"
] | [((245, 298), 'os.path.join', 'os.path.join', (['pyposmat_data_dir', '"""pyposmat.config.in"""'], {}), "(pyposmat_data_dir, 'pyposmat.config.in')\n", (257, 298), False, 'import os, shutil, sys\n'), ((380, 438), 'pypospack.pyposmat.engines.PyposmatIterativeSampler', 'PyposmatIterativeSampler', ([], {'configuration_filename': 'config_fn'}), '(configuration_filename=config_fn)\n', (404, 438), False, 'from pypospack.pyposmat.engines import PyposmatIterativeSampler\n'), ((1096, 1154), 'pypospack.pyposmat.engines.PyposmatIterativeSampler', 'PyposmatIterativeSampler', ([], {'configuration_filename': 'config_fn'}), '(configuration_filename=config_fn)\n', (1120, 1154), False, 'from pypospack.pyposmat.engines import PyposmatIterativeSampler\n'), ((1516, 1564), 'numpy.array_equal', 'np.array_equal', (['pyposmat_app.rv_seeds', 'old_seeds'], {}), '(pyposmat_app.rv_seeds, old_seeds)\n', (1530, 1564), True, 'import numpy as np\n'), ((1616, 1674), 'pypospack.pyposmat.engines.PyposmatIterativeSampler', 'PyposmatIterativeSampler', ([], {'configuration_filename': 'config_fn'}), '(configuration_filename=config_fn)\n', (1640, 1674), False, 'from pypospack.pyposmat.engines import PyposmatIterativeSampler\n')] |
#! /usr/bin/env python
# 'http://stackoverflow.com/questions/31588584/
# pyqt-qtableview-prohibitibily-slow-when-scrolling-with-large-data-sets
# /31591015#31591015'
# TreeGraphModel modeified from:
# http://www.yasinuludag.com/blog/?p=98
from PyQt4 import QtCore
import numpy as np
import pandas as pd
class PandasTableModel(QtCore.QAbstractTableModel):
'''
This class is an abstract table class from Qt to visualize
data in a table format and using the pandas dataframe
as object that supply the data to be visualized.
To Do: Nothing
Last edit: Removed the ability to edit the table
'''
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
if data is not None:
self.__data = np.array(data.values)
else:
self.__data = pd.DataFrame()
self.__cols = data.columns
self.r, self.c = np.shape(self.__data)
def rowCount(self, parent=None):
return self.r
def columnCount(self, parent=None):
return self.c
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.__cols[section]
elif orientation == QtCore.Qt.Vertical:
return section
def data(self, index, role):
if role == QtCore.Qt.UserRole:
index = None
return pd.DataFrame(self.__data, columns=self.__cols)
else:
pass
if index.isValid():
# This is the role to display every item
# That you created...I think
if role == QtCore.Qt.DisplayRole:
return self.__data[index.row(), index.column()]
def flags(self, index):
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
class PandasTableModelEdit(QtCore.QAbstractTableModel):
'''
This class is an abstract table class from Qt to visualize
data in a table format and using the pandas dataframe
as object that supply the data to be visualized.
To Do: Nothing
Last edit: Removed the ability to edit the table
'''
log_change = QtCore.pyqtSignal(object)
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
if data is not None:
self.__data = np.array(data.values)
self.__cols = data.columns
self.r, self.c = np.shape(self.__data)
else:
self.__data = None
self.__cols = None
self.r, self.c = [None, None]
def set_data(self, data):
self.__data = np.array(data.values)
self.__cols = data.columns
self.r, self.c = np.shape(self.__data)
def rowCount(self, parent=None):
return self.r
def columnCount(self, parent=None):
return self.c
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.__cols[section]
elif orientation == QtCore.Qt.Vertical:
return section
def data(self, index, role):
if role == QtCore.Qt.UserRole:
index = None
return pd.DataFrame(self.__data, columns=self.__cols)
else:
pass
if index.isValid():
if role == QtCore.Qt.DisplayRole:
return self.__data[index.row(), index.column()]
def flags(self, index):
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable |\
QtCore.Qt.ItemIsEditable
def setData(self, index, value, role=QtCore.Qt.EditRole):
if role == QtCore.Qt.EditRole:
og_value = self.data(index, QtCore.Qt.DisplayRole)
self.__data[index.row(), index.column()] = value
self.dataChanged.emit(index, index)
print('in model class: ', og_value, value)
self.log_change.emit(
{'cell_changes':{og_value: value}})
return True
return False
def event(self, event):
if (event.key() == QtCore.Qt.Key_Return):
print('Presed Enter')
raise KeyError
return QtCore.QAbStractTableModel.event(self, event)
| [
"PyQt4.QtCore.pyqtSignal",
"PyQt4.QtCore.QAbStractTableModel.event",
"numpy.array",
"PyQt4.QtCore.QAbstractTableModel.__init__",
"pandas.DataFrame",
"numpy.shape"
] | [((2260, 2285), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['object'], {}), '(object)\n', (2277, 2285), False, 'from PyQt4 import QtCore\n'), ((689, 738), 'PyQt4.QtCore.QAbstractTableModel.__init__', 'QtCore.QAbstractTableModel.__init__', (['self', 'parent'], {}), '(self, parent)\n', (724, 738), False, 'from PyQt4 import QtCore\n'), ((937, 958), 'numpy.shape', 'np.shape', (['self.__data'], {}), '(self.__data)\n', (945, 958), True, 'import numpy as np\n'), ((2341, 2390), 'PyQt4.QtCore.QAbstractTableModel.__init__', 'QtCore.QAbstractTableModel.__init__', (['self', 'parent'], {}), '(self, parent)\n', (2376, 2390), False, 'from PyQt4 import QtCore\n'), ((2740, 2761), 'numpy.array', 'np.array', (['data.values'], {}), '(data.values)\n', (2748, 2761), True, 'import numpy as np\n'), ((2824, 2845), 'numpy.shape', 'np.shape', (['self.__data'], {}), '(self.__data)\n', (2832, 2845), True, 'import numpy as np\n'), ((4390, 4435), 'PyQt4.QtCore.QAbStractTableModel.event', 'QtCore.QAbStractTableModel.event', (['self', 'event'], {}), '(self, event)\n', (4422, 4435), False, 'from PyQt4 import QtCore\n'), ((796, 817), 'numpy.array', 'np.array', (['data.values'], {}), '(data.values)\n', (804, 817), True, 'import numpy as np\n'), ((860, 874), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (872, 874), True, 'import pandas as pd\n'), ((1493, 1539), 'pandas.DataFrame', 'pd.DataFrame', (['self.__data'], {'columns': 'self.__cols'}), '(self.__data, columns=self.__cols)\n', (1505, 1539), True, 'import pandas as pd\n'), ((2448, 2469), 'numpy.array', 'np.array', (['data.values'], {}), '(data.values)\n', (2456, 2469), True, 'import numpy as np\n'), ((2540, 2561), 'numpy.shape', 'np.shape', (['self.__data'], {}), '(self.__data)\n', (2548, 2561), True, 'import numpy as np\n'), ((3388, 3434), 'pandas.DataFrame', 'pd.DataFrame', (['self.__data'], {'columns': 'self.__cols'}), '(self.__data, columns=self.__cols)\n', (3400, 3434), True, 'import pandas as pd\n')] |
import numpy as np
class MiniBatch:
def __init__(self, X: np.array, y: np.array, n, batch_size=1, shuffle=True):
"""
Creates iterator throw given data
:param X: features array
:param y: marks array
:param n: number of elements
:param batch_size: mini-batch size
:param shuffle: check whether data needed to be shuffled
"""
self.X = X
self.y = y
self.n = n
self.k = 0
self.batch_size = batch_size
self.shuffle = shuffle
if self.shuffle:
self.X, self.y = self.__shuffle__(X=self.X, y=self.y, n=self.n)
def __iter__(self):
return self
def __next__(self):
if self.n <= self.batch_size * self.k:
raise StopIteration
start = self.k * self.batch_size
end = start + self.batch_size
self.k += 1
return self.X[start:end], self.y[start:end]
@staticmethod
def __shuffle__(X, y, n):
indices = np.arange(n)
np.random.seed(indices)
X_, y_ = [], []
for i in indices:
X_.append(X[i])
y_.append(y[i])
return np.array(X_), np.array(y_)
def __reset_index__(self):
self.k = 0
if self.shuffle:
self.X, self.y = self.__shuffle__(self.X, self.y, self.n)
| [
"numpy.array",
"numpy.random.seed",
"numpy.arange"
] | [((1010, 1022), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1019, 1022), True, 'import numpy as np\n'), ((1031, 1054), 'numpy.random.seed', 'np.random.seed', (['indices'], {}), '(indices)\n', (1045, 1054), True, 'import numpy as np\n'), ((1179, 1191), 'numpy.array', 'np.array', (['X_'], {}), '(X_)\n', (1187, 1191), True, 'import numpy as np\n'), ((1193, 1205), 'numpy.array', 'np.array', (['y_'], {}), '(y_)\n', (1201, 1205), True, 'import numpy as np\n')] |
import argparse
import warnings
from typing import Tuple, Callable
import numpy as np
from PIL import Image
from sklearn.utils.extmath import randomized_svd
warnings.filterwarnings("ignore")
# Aliases
Matrix = np.ndarray
SVD = Tuple[Matrix, Matrix, Matrix]
EVD = Tuple[Matrix, Matrix, Matrix]
def sklearn_svd(matrix: Matrix, n_components: int = None, **kwargs) -> SVD:
u, sigm, vt = randomized_svd(matrix, n_components=n_components, **kwargs)
sigm = np.diag(sigm)
return u, sigm, vt
def custom_svd(matrix: Matrix, n_components: int = None, **kwargs) -> SVD:
def _pad_matrix(matrix: Matrix, rows: int, cols: int) -> Matrix:
cr, cc = matrix.shape
matrix = np.copy(matrix)
# Add zeros
if cr < rows:
padding = np.zeros((rows - cr, matrix.shape[1]))
matrix = np.concatenate((matrix, padding), axis=0)
# Delete rows
if cr > rows:
matrix = matrix[:rows, :]
# Add columns
if cc < cols:
padding = np.zeros((matrix.shape[0], cols - cc))
matrix = np.concatenate((matrix, padding), axis=1)
# Delete rows
if cc > cols:
matrix = matrix[:, :cols]
return matrix
def _calc_inv(matrix: Matrix) -> Matrix:
with np.errstate(divide='ignore'):
result = 1. / matrix
result[matrix == 0] = 0
return result
def _evd(a: Matrix) -> EVD:
eigval, eigvec = np.linalg.eig(a)
return eigvec, eigval
A = np.copy(matrix)
n, m = A.shape
C = A.T @ A
eigvec, eigval = _evd(C)
# SIGM
eigvec = eigvec[:, np.argsort(-eigval)]
eigval = eigval[np.argsort(-eigval)]
eigval = eigval[eigval != 0]
eigval = np.sqrt(eigval)
SIGM = np.diag(eigval)
# V
V = eigvec
V_SIGM = _pad_matrix(V, V.shape[0], SIGM.shape[1])
# U
U = (A @ V_SIGM) @ _calc_inv(SIGM)
U = _pad_matrix(U, n, n)
# OUTPUT
SIGM = _pad_matrix(SIGM, n, m)
return U[:, :n_components], SIGM[:n_components, :n_components], \
V.T[:n_components]
def compress_layer(matrix: Matrix, method: Callable, n_components: int
) -> Matrix:
""" Compress single layer of a photo """
u, s, v = method(matrix, n_components)
return u @ s @ v
def compress_image(args: argparse.Namespace) -> None:
""" Compresses an image by applying SVD decomposition """
def rescale(x: Matrix) -> Matrix:
return (x - x.min()) / (x.max() - x.min())
img = np.array(Image.open(args.file)) / 255.
n_components = args.k if args.k is not None else img.shape[1]
svd_method = custom_svd if args.svd_method_type == 'custom' \
else sklearn_svd
colormap = 'RGB'
if len(img.shape) == 2:
colormap = 'L'
img = img.reshape(img.shape[0], img.shape[1], 1)
compressed_img = []
for ch in range(img.shape[2]):
data = compress_layer(img[:, :, ch], svd_method, n_components)
compressed_img.append(np.expand_dims(data, 2))
compressed_img = np.concatenate(compressed_img, axis=2)
compressed_img = (rescale(compressed_img) * 255).astype('uint8')
if colormap == 'L':
compressed_img = compressed_img[:, :, 0]
if args.output is not None:
Image.fromarray(compressed_img).save(args.output)
def parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', required=True, dest='file',
help='Input file path.')
parser.add_argument('-out', '--output', default=None, dest='output',
help='Output file path.')
parser.add_argument('-svd', '--svd_method_type', default='custom',
dest='svd_method_type', choices=['custom', 'scikit'],
help='Method type.')
parser.add_argument('-k', '--k', default=None, type=int, dest='k',
help='Compression strength')
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
compress_image(args)
| [
"sklearn.utils.extmath.randomized_svd",
"numpy.copy",
"PIL.Image.open",
"numpy.sqrt",
"numpy.linalg.eig",
"argparse.ArgumentParser",
"PIL.Image.fromarray",
"numpy.diag",
"numpy.argsort",
"numpy.errstate",
"numpy.zeros",
"numpy.concatenate",
"numpy.expand_dims",
"warnings.filterwarnings"
] | [((159, 192), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (182, 192), False, 'import warnings\n'), ((392, 451), 'sklearn.utils.extmath.randomized_svd', 'randomized_svd', (['matrix'], {'n_components': 'n_components'}), '(matrix, n_components=n_components, **kwargs)\n', (406, 451), False, 'from sklearn.utils.extmath import randomized_svd\n'), ((463, 476), 'numpy.diag', 'np.diag', (['sigm'], {}), '(sigm)\n', (470, 476), True, 'import numpy as np\n'), ((1529, 1544), 'numpy.copy', 'np.copy', (['matrix'], {}), '(matrix)\n', (1536, 1544), True, 'import numpy as np\n'), ((1752, 1767), 'numpy.sqrt', 'np.sqrt', (['eigval'], {}), '(eigval)\n', (1759, 1767), True, 'import numpy as np\n'), ((1779, 1794), 'numpy.diag', 'np.diag', (['eigval'], {}), '(eigval)\n', (1786, 1794), True, 'import numpy as np\n'), ((3064, 3102), 'numpy.concatenate', 'np.concatenate', (['compressed_img'], {'axis': '(2)'}), '(compressed_img, axis=2)\n', (3078, 3102), True, 'import numpy as np\n'), ((3398, 3423), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3421, 3423), False, 'import argparse\n'), ((694, 709), 'numpy.copy', 'np.copy', (['matrix'], {}), '(matrix)\n', (701, 709), True, 'import numpy as np\n'), ((1473, 1489), 'numpy.linalg.eig', 'np.linalg.eig', (['a'], {}), '(a)\n', (1486, 1489), True, 'import numpy as np\n'), ((1685, 1704), 'numpy.argsort', 'np.argsort', (['(-eigval)'], {}), '(-eigval)\n', (1695, 1704), True, 'import numpy as np\n'), ((775, 813), 'numpy.zeros', 'np.zeros', (['(rows - cr, matrix.shape[1])'], {}), '((rows - cr, matrix.shape[1]))\n', (783, 813), True, 'import numpy as np\n'), ((835, 876), 'numpy.concatenate', 'np.concatenate', (['(matrix, padding)'], {'axis': '(0)'}), '((matrix, padding), axis=0)\n', (849, 876), True, 'import numpy as np\n'), ((1027, 1065), 'numpy.zeros', 'np.zeros', (['(matrix.shape[0], cols - cc)'], {}), '((matrix.shape[0], cols - cc))\n', (1035, 1065), True, 'import numpy as np\n'), ((1087, 1128), 'numpy.concatenate', 'np.concatenate', (['(matrix, padding)'], {'axis': '(1)'}), '((matrix, padding), axis=1)\n', (1101, 1128), True, 'import numpy as np\n'), ((1294, 1322), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (1305, 1322), True, 'import numpy as np\n'), ((1644, 1663), 'numpy.argsort', 'np.argsort', (['(-eigval)'], {}), '(-eigval)\n', (1654, 1663), True, 'import numpy as np\n'), ((2539, 2560), 'PIL.Image.open', 'Image.open', (['args.file'], {}), '(args.file)\n', (2549, 2560), False, 'from PIL import Image\n'), ((3017, 3040), 'numpy.expand_dims', 'np.expand_dims', (['data', '(2)'], {}), '(data, 2)\n', (3031, 3040), True, 'import numpy as np\n'), ((3287, 3318), 'PIL.Image.fromarray', 'Image.fromarray', (['compressed_img'], {}), '(compressed_img)\n', (3302, 3318), False, 'from PIL import Image\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import pickle
from image_thresholding import *
from plotting_helpers import *
from line_fit import *
# *** PIPELINE ***
# Get image
# img_name = " - No parallel lanes (1.5)_2.jpg"
name = "straight_lines2"
img_name = "test_images/" + name + ".jpg"
img = mpimg.imread(img_name)
# 1. Correct distorsion
# open distorsion matrix
try:
saved_dist = pickle.load(open('calibrate_camera.p', 'rb'), encoding='latin1')
mtx = saved_dist['mtx']
dist = saved_dist['dist']
except (OSError, IOError): # No progress file yet available
print("No saved distorsion data. Run camera_calibration.py")
# get undistorted image
undist = cv2.undistort(img, mtx, dist, None, mtx)
# plot_calibration(img, undist)
# 2. Apply filters to get binary map
ksize = 3
gradx = abs_sobel_thresh(undist, orient='x', sobel_kernel=ksize, thresh=(10, 100))
grady = abs_sobel_thresh(undist, orient='y', sobel_kernel=ksize, thresh=(5, 100))
mag_bin = mag_thresh(undist, sobel_kernel=ksize, mag_thresh=(10, 200))
dir_bin = dir_threshold(undist, sobel_kernel=15, thresh=(0.9, 1.2))
hls_bin = hls_select(img, thresh=(50, 255))
white_bin = white_select(img, thresh=195)
yellow_bin = yellow_select(img)
# combine filters to a final output
combined = np.zeros_like(dir_bin)
combined[((mag_bin == 1) & (dir_bin == 1) & (hls_bin == 1)) | ((white_bin == 1) | (yellow_bin == 1))] = 1
# Plot the thresholding step
# plot_thresholds(undist, mag_bin, dir_bin,
# hls_bin, white_bin, yellow_bin,
# ((mag_bin == 1) & (dir_bin == 1) & (hls_bin == 1)), combined,
# ((white_bin == 1) | (yellow_bin == 1)))
# 3. Define trapezoid points on the road and transform perspective
X = combined.shape[1]
Y = combined.shape[0]
src = np.float32(
[[205, 720],
[1075, 720],
[700, 460],
[580, 460]])
dst = np.float32(
[[300, 720],
[980, 720],
[980, 0],
[300, 0]])
# Get perspective transformation matrix
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# Warp the result of binary thresholds
warped = cv2.warpPerspective(combined, M, (X,Y), flags=cv2.INTER_LINEAR)
# Plot warping step
# for i in range(len(src)):
# cv2.line(undist, (src[i][0], src[i][1]), (src[(i + 1) % 4][0], src[(i + 1) % 4][1]), (255, 0, 0), 2)
# img_warped = cv2.warpPerspective(undist, M, (X,Y), flags=cv2.INTER_LINEAR)
# plot_warping(undist, img_warped, src)
# 4. Get polinomial fit of lines
out_img, left_fit_cf, right_fit_cf, left_fitx, right_fitx, ploty = fit_polynomial(warped)
# leftx, lefty, rightx, righty, out_img = find_lane_pixels(warped)
ok, err = sanity_chk(ploty, left_fitx, right_fitx)
print(ok, err)
# Plot polynomial result
# plot_img(out_img)
# 5. Calculate curvature
curv_left, curv_right = find_curv(ploty, left_fit_cf, right_fit_cf)
road_curv = (curv_left + curv_right) / 2
lane_w = (right_fitx[-1] - left_fitx[-1]) * 3.7/700
offset = (((right_fitx[-1] + left_fitx[-1]) - img.shape[1]) / 2) * 3.7/700
print("base dist: ", right_fitx[-1] - left_fitx[-1])
print("upper dist: ", right_fitx[0] - left_fitx[0])
print("Curvature left: ", curv_left)
print("Curvature right: ", curv_right)
print("Lane width: ", lane_w)
print("Offset: ", offset)
# 6. Plot fitted lanes into original image
# create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# print("dst: ", dst.shape)
# print("newwarp: ", newwarp.shape)
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
# add text
curv_txt = "Radius of curvature: {0:.0f}m".format(road_curv)
side = {True: "left", False: "right"}
offset_txt = "Car is {0:.2f}m {1:s} of center".format(offset, side[offset>0])
cv2.putText(result, curv_txt, (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)
cv2.putText(result, offset_txt, (75, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)
plot_img(result)
mpimg.imsave("output_images/"+ name + "_output.jpg", result) | [
"numpy.dstack",
"cv2.getPerspectiveTransform",
"numpy.hstack",
"matplotlib.image.imread",
"cv2.undistort",
"matplotlib.image.imsave",
"cv2.putText",
"cv2.addWeighted",
"cv2.warpPerspective",
"numpy.vstack",
"numpy.int_",
"numpy.zeros_like",
"numpy.float32"
] | [((362, 384), 'matplotlib.image.imread', 'mpimg.imread', (['img_name'], {}), '(img_name)\n', (374, 384), True, 'import matplotlib.image as mpimg\n'), ((739, 779), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (752, 779), False, 'import cv2\n'), ((1329, 1351), 'numpy.zeros_like', 'np.zeros_like', (['dir_bin'], {}), '(dir_bin)\n', (1342, 1351), True, 'import numpy as np\n'), ((1838, 1899), 'numpy.float32', 'np.float32', (['[[205, 720], [1075, 720], [700, 460], [580, 460]]'], {}), '([[205, 720], [1075, 720], [700, 460], [580, 460]])\n', (1848, 1899), True, 'import numpy as np\n'), ((1942, 1998), 'numpy.float32', 'np.float32', (['[[300, 720], [980, 720], [980, 0], [300, 0]]'], {}), '([[300, 720], [980, 720], [980, 0], [300, 0]])\n', (1952, 1998), True, 'import numpy as np\n'), ((2079, 2116), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (2106, 2116), False, 'import cv2\n'), ((2124, 2161), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (2151, 2161), False, 'import cv2\n'), ((2210, 2274), 'cv2.warpPerspective', 'cv2.warpPerspective', (['combined', 'M', '(X, Y)'], {'flags': 'cv2.INTER_LINEAR'}), '(combined, M, (X, Y), flags=cv2.INTER_LINEAR)\n', (2229, 2274), False, 'import cv2\n'), ((3497, 3541), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (3506, 3541), True, 'import numpy as np\n'), ((3762, 3794), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (3771, 3794), True, 'import numpy as np\n'), ((3990, 4057), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'Minv', '(img.shape[1], img.shape[0])'], {}), '(color_warp, Minv, (img.shape[1], img.shape[0]))\n', (4009, 4057), False, 'import cv2\n'), ((4176, 4219), 'cv2.addWeighted', 'cv2.addWeighted', (['undist', '(1)', 'newwarp', '(0.3)', '(0)'], {}), '(undist, 1, newwarp, 0.3, 0)\n', (4191, 4219), False, 'import cv2\n'), ((4410, 4503), 'cv2.putText', 'cv2.putText', (['result', 'curv_txt', '(75, 75)', 'cv2.FONT_HERSHEY_SIMPLEX', '(2)', '(255, 255, 255)', '(3)'], {}), '(result, curv_txt, (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, \n 255, 255), 3)\n', (4421, 4503), False, 'import cv2\n'), ((4499, 4595), 'cv2.putText', 'cv2.putText', (['result', 'offset_txt', '(75, 150)', 'cv2.FONT_HERSHEY_SIMPLEX', '(2)', '(255, 255, 255)', '(3)'], {}), '(result, offset_txt, (75, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (\n 255, 255, 255), 3)\n', (4510, 4595), False, 'import cv2\n'), ((4610, 4671), 'matplotlib.image.imsave', 'mpimg.imsave', (["('output_images/' + name + '_output.jpg')", 'result'], {}), "('output_images/' + name + '_output.jpg', result)\n", (4622, 4671), True, 'import matplotlib.image as mpimg\n'), ((3865, 3879), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (3872, 3879), True, 'import numpy as np\n'), ((3445, 3466), 'numpy.zeros_like', 'np.zeros_like', (['warped'], {}), '(warped)\n', (3458, 3466), True, 'import numpy as np\n'), ((3643, 3672), 'numpy.vstack', 'np.vstack', (['[left_fitx, ploty]'], {}), '([left_fitx, ploty])\n', (3652, 3672), True, 'import numpy as np\n'), ((3721, 3751), 'numpy.vstack', 'np.vstack', (['[right_fitx, ploty]'], {}), '([right_fitx, ploty])\n', (3730, 3751), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["setup", "update_list", "update_database", "write_tweet"]
import os
import json
import nltk
import tweepy
import string
import numpy as np
import cPickle as pickle
from collections import defaultdict
PROJECTNAME = "parrotization"
DATABASE_FILE = "{0}.pkl".format(PROJECTNAME)
SETTINGS_FILE = "{0}.json".format(PROJECTNAME)
START = "<S>"
STOP = "</S>"
def load_settings():
if os.path.exists(SETTINGS_FILE):
with open(SETTINGS_FILE, "r") as f:
settings = json.load(f)
else:
settings = {}
return settings
def save_settings(settings):
with open(SETTINGS_FILE, "w") as f:
json.dump(settings, f, indent=2)
def get_api():
settings = load_settings()
auth = tweepy.OAuthHandler(settings["consumer_key"],
settings["consumer_secret"])
auth.secure = True
auth.set_access_token(settings["user_key"], settings["user_secret"])
return tweepy.API(auth,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
def _default():
return defaultdict(int)
def load_db():
if not os.path.exists(DATABASE_FILE):
bigrams = defaultdict(_default)
trigrams = defaultdict(_default)
return (bigrams, trigrams)
with open(DATABASE_FILE, "r") as f:
return pickle.load(f)
def save_db(db):
with open(DATABASE_FILE, "wb") as f:
return pickle.dump(db, f, -1)
def setup(clobber=False):
settings = load_settings()
# Get the app information.
if (clobber or "consumer_key" not in settings or
"consumer_secret" not in settings):
print("Enter some info about your app")
settings["consumer_key"] = raw_input("Consumer key: ")
settings["consumer_secret"] = raw_input("Consumer secret: ")
# Authorize the user.
if clobber or "user_key" not in settings or "user_secret" not in settings:
auth = tweepy.OAuthHandler(settings["consumer_key"],
settings["consumer_secret"],
"oob")
url = auth.get_authorization_url()
print("Go to this URL:\n{0}".format(url))
pin = raw_input("Enter the PIN: ")
auth.get_access_token(pin)
settings["user_key"] = auth.access_token
settings["user_secret"] = auth.access_token_secret
save_settings(settings)
def update_list():
# Get the initial settings.
api = get_api()
settings = load_settings()
if "list_slug" not in settings:
settings["list_slug"] = api.create_list("cast").slug
save_settings(settings)
if "screen_name" not in settings:
settings["screen_name"] = api.me().screen_name
save_settings(settings)
# Add all the followers to the list.
owner, list_slug = settings["screen_name"], settings["list_slug"]
api.add_list_members(user_id=api.followers_ids(),
owner_screen_name=owner, slug=list_slug)
def update_database():
# Get all of the recent tweets in the timeline.
api = get_api()
settings = load_settings()
bigrams, trigrams = load_db()
owner, list_slug = api.me().screen_name, settings["list_slug"]
for tweet in tweepy.Cursor(api.list_timeline, owner_screen_name=owner,
since_id=settings.get("since_id", None),
include_rts=False,
slug=list_slug).items(1000):
# Tokenize the tweet.
text = tweet.text
a, b = "://", "URLURLURL"
text = text.replace(a, b)
tokens = [w.replace(b, a) for w in nltk.word_tokenize(text)]
tokens = [START, START]+tokens+[STOP, STOP]
# Update the id of the most recently seen tweet.
settings["since_id"] = max(tweet.id, settings.get("since_id", 0))
# Update the bigram and trigram dictionaries.
for i in range(2, len(tokens)):
bigrams[tokens[i-1]][tokens[i]] += 1
trigrams[tokens[i-2]+" "+tokens[i-1]][tokens[i]] += 1
# Save the database and the settings file.
save_db((bigrams, trigrams))
save_settings(settings)
def build_tweet(words, api, settings):
s = " "
for i, w in enumerate(words):
if i > 0 and words[i-1] == "@":
try:
f, _ = api.show_friendship(
source_screen_name=settings["screen_name"],
target_screen_name=w)
except tweepy.error.TweepError:
is_follower = False
else:
is_follower = f.followed_by
if is_follower:
s += w + " "
else:
s = s[:-1] + "." + w + " "
elif w.startswith("'") or w in ["n't"]:
s = s[:-1] + w + " "
elif not len(w.strip(string.punctuation)):
if w in ["(", "{", "@", "#", "&", "``"]:
s += w
else:
s = s[:-1] + w + " "
else:
s += w + " "
s = s.strip()
# Finally match any missing parens.
if "(" in s and ")" not in s:
s += ")"
if ")" in s and "(" not in s:
s = "(" + s
s = s.replace("``", "\"").replace("''", "\"")
return s
def write_tweet(alpha=0.6):
api = get_api()
settings = load_settings()
bigrams, trigrams = load_db()
tweet = [START, START]
while True:
b_prob = bigrams[tweet[-1]]
t_prob = trigrams[tweet[-2]+" "+tweet[-1]]
b_norm = sum(b_prob.values())
t_norm = sum(t_prob.values())
if b_norm < 1 or t_norm < 1:
continue
words, probs = [], []
for w in set(b_prob.keys()) | set(t_prob.keys()):
words.append(w)
probs.append(alpha * t_prob.get(w, 0.0)/t_norm
+ (1-alpha) * b_prob.get(w, 0.0)/b_norm)
word = np.random.choice(words, p=probs)
if word == STOP:
if len(tweet) > 6:
break
# Too short.
tweet = [START, START]
continue
tweet.append(word)
sent = build_tweet(tweet[2:], api, settings)
if len(sent) > 140:
# Too long.
tweet = [START, START]
return sent
if __name__ == "__main__":
import sys
if "setup" in sys.argv:
setup()
elif "update" in sys.argv:
update_list()
update_database()
elif "print" in sys.argv:
print(write_tweet())
elif "tweet" in sys.argv:
tweet = write_tweet()
print(tweet)
api = get_api()
api.update_status(tweet)
| [
"os.path.exists",
"cPickle.dump",
"nltk.word_tokenize",
"numpy.random.choice",
"tweepy.API",
"collections.defaultdict",
"json.load",
"cPickle.load",
"json.dump",
"tweepy.OAuthHandler"
] | [((490, 519), 'os.path.exists', 'os.path.exists', (['SETTINGS_FILE'], {}), '(SETTINGS_FILE)\n', (504, 519), False, 'import os\n'), ((824, 898), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (["settings['consumer_key']", "settings['consumer_secret']"], {}), "(settings['consumer_key'], settings['consumer_secret'])\n", (843, 898), False, 'import tweepy\n'), ((1037, 1110), 'tweepy.API', 'tweepy.API', (['auth'], {'wait_on_rate_limit': '(True)', 'wait_on_rate_limit_notify': '(True)'}), '(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n', (1047, 1110), False, 'import tweepy\n'), ((1184, 1200), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1195, 1200), False, 'from collections import defaultdict\n'), ((732, 764), 'json.dump', 'json.dump', (['settings', 'f'], {'indent': '(2)'}), '(settings, f, indent=2)\n', (741, 764), False, 'import json\n'), ((1229, 1258), 'os.path.exists', 'os.path.exists', (['DATABASE_FILE'], {}), '(DATABASE_FILE)\n', (1243, 1258), False, 'import os\n'), ((1278, 1299), 'collections.defaultdict', 'defaultdict', (['_default'], {}), '(_default)\n', (1289, 1299), False, 'from collections import defaultdict\n'), ((1319, 1340), 'collections.defaultdict', 'defaultdict', (['_default'], {}), '(_default)\n', (1330, 1340), False, 'from collections import defaultdict\n'), ((1431, 1445), 'cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1442, 1445), True, 'import cPickle as pickle\n'), ((1521, 1543), 'cPickle.dump', 'pickle.dump', (['db', 'f', '(-1)'], {}), '(db, f, -1)\n', (1532, 1543), True, 'import cPickle as pickle\n'), ((2037, 2122), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (["settings['consumer_key']", "settings['consumer_secret']", '"""oob"""'], {}), "(settings['consumer_key'], settings['consumer_secret'],\n 'oob')\n", (2056, 2122), False, 'import tweepy\n'), ((5993, 6025), 'numpy.random.choice', 'np.random.choice', (['words'], {'p': 'probs'}), '(words, p=probs)\n', (6009, 6025), True, 'import numpy as np\n'), ((588, 600), 'json.load', 'json.load', (['f'], {}), '(f)\n', (597, 600), False, 'import json\n'), ((3740, 3764), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (3758, 3764), False, 'import nltk\n')] |
'''
Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
If this code is useful to you, please cite the following paper:
<NAME>, <NAME>, and <NAME>. Learning topology from synthetic data for unsupervised depth completion.
In the Robotics and Automation Letters (RA-L) 2021 and Proceedings of International Conference on Robotics and Automation (ICRA) 2021
@article{wong2021learning,
title={Learning topology from synthetic data for unsupervised depth completion},
author={<NAME> and <NAME> and <NAME>},
journal={IEEE Robotics and Automation Letters},
volume={6},
number={2},
pages={1495--1502},
year={2021},
publisher={IEEE}
}
'''
import sys, os, glob
import numpy as np
import cv2
import multiprocessing as mp
from skimage import morphology as skmorph
sys.path.insert(0, 'src')
import data_utils
'''
Paths for KITTI dataset
'''
KITTI_ROOT_DIRPATH = os.path.join('data', 'kitti_depth_completion')
KITTI_TRAIN_SPARSE_DEPTH_DIRPATH = os.path.join(
KITTI_ROOT_DIRPATH, 'train_val_split', 'sparse_depth', 'train')
# To be concatenated to sequence path
KITTI_SPARSE_DEPTH_REFPATH = os.path.join('proj_depth', 'velodyne_raw')
'''
Paths for Virtual KITTI dataset
'''
VKITTI_ROOT_DIRPATH = os.path.join('data', 'virtual_kitti')
VKITTI_TRAIN_DEPTH_REFPATH = 'vkitti_1.3.1_depthgt'
# Note: we only need to use the clone directory since lighting change only affects RGB
VKITTI_TRAIN_DENSE_DEPTH_DIRPATH = \
os.path.join(VKITTI_ROOT_DIRPATH, VKITTI_TRAIN_DEPTH_REFPATH)
'''
Output directory
'''
OUTPUT_ROOT_DIRPATH = os.path.join('data', 'virtual_kitti_learning_topology')
OUTPUT_REF_DIRPATH = os.path.join('training', 'vkitti')
OUTPUT_SPARSE_DEPTH_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_sparse_depth.txt')
OUTPUT_VALIDITY_MAP_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_validity_map.txt')
OUTPUT_SEMI_DENSE_DEPTH_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_semi_dense_depth.txt')
OUTPUT_DENSE_DEPTH_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_dense_depth.txt')
OUTPUT_GROUND_TRUTH_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_ground_truth.txt')
def process_frame(inputs):
'''
Processes a single depth frame
Args:
inputs : tuple
KITTI sparse depth path,
Virtual KITTI ground truth path,
output directory paths in order of:
sparse depth, validity map, semi-dense depth, dense depth, groundtruth
Returns:
str : Virtual KITTI output sparse depth path
str : Virtual KITTI output validity map path
str : Virtual KITTI output semi-dense depth (convex hull of sparse points) path
str : Virtual KITTI output dense depth path (ground truth without sky)
str : Virtual KITTI output ground truth path
'''
# Separate arguments into individual variables
kitti_sparse_depth_path, vkitti_ground_truth_path, output_dirpaths = inputs
# Extract validity map from KITTI sparse depth
_, kitti_validity_map = data_utils.load_depth_with_validity_map(kitti_sparse_depth_path)
# Load Virtual KITTI ground truth
vkitti_ground_truth = \
cv2.imread(vkitti_ground_truth_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
# Convert Virtual KITTI ground truth to meters
vkitti_ground_truth = vkitti_ground_truth / 100.0
if kitti_validity_map.shape != vkitti_ground_truth.shape:
# Resize KITTI validity map to VKITTI size
kitti_validity_map = cv2.resize(
kitti_validity_map,
dsize=(vkitti_ground_truth.shape[1], vkitti_ground_truth.shape[0]),
interpolation=cv2.INTER_NEAREST)
assert(np.all(np.unique(kitti_validity_map) == [0, 1]))
# Get Virtual KITTI dense depth without sky
vkitti_validity_map = np.ones(vkitti_ground_truth.shape)
vkitti_validity_map[vkitti_ground_truth > 600.0] = 0.0
vkitti_dense_depth = vkitti_validity_map * vkitti_ground_truth
# Get Virtual KITTI sparse depth
vkitti_sparse_depth = kitti_validity_map * vkitti_dense_depth
# Get Virtual KITTI semi-dense depth (convex hull of sparse points)
vkitti_semi_dense_depth = \
np.where(skmorph.convex_hull_image(kitti_validity_map), 1, 0) * vkitti_dense_depth
# Create output filepaths
filename = os.path.basename(vkitti_ground_truth_path)
output_sparse_depth_dirpath, \
output_validity_map_dirpath, \
output_semi_dense_depth_dirpath, \
output_dense_depth_dirpath, \
output_ground_truth_dirpath = output_dirpaths
output_sparse_depth_path = os.path.join(output_sparse_depth_dirpath, filename)
output_validity_map_path = os.path.join(output_validity_map_dirpath, filename)
output_semi_dense_depth_path = os.path.join(output_semi_dense_depth_dirpath, filename)
output_dense_depth_path = os.path.join(output_dense_depth_dirpath, filename)
output_ground_truth_path = os.path.join(output_ground_truth_dirpath, filename)
# Write to disk
data_utils.save_depth(vkitti_sparse_depth, output_sparse_depth_path)
data_utils.save_validity_map(kitti_validity_map, output_validity_map_path)
data_utils.save_depth(vkitti_semi_dense_depth, output_semi_dense_depth_path)
data_utils.save_depth(vkitti_dense_depth, output_dense_depth_path)
data_utils.save_depth(vkitti_ground_truth, output_ground_truth_path)
return (output_sparse_depth_path,
output_validity_map_path,
output_semi_dense_depth_path,
output_dense_depth_path,
output_ground_truth_path)
'''
Select KITTI and Virtual KITTI paths
'''
# Obtain the set of sequence dirpaths
kitti_sequence_dirpaths = glob.glob(os.path.join(KITTI_TRAIN_SPARSE_DEPTH_DIRPATH, '*/'))
vkitti_sequence_dirpaths = glob.glob(os.path.join(VKITTI_TRAIN_DENSE_DEPTH_DIRPATH, '*/'))
# Get the longest sequence from VKITTI
max_vkitti_filepaths = 0
for vkitti_sequence_dirpath in vkitti_sequence_dirpaths:
# Select filepaths in Virtual KITTI sequence
vkitti_sequence_dirpath = os.path.join(vkitti_sequence_dirpath, 'clone')
vkitti_sequence_filepaths = glob.glob(os.path.join(vkitti_sequence_dirpath, '*.png'))
n_vkitti_filepaths = len(vkitti_sequence_filepaths)
if n_vkitti_filepaths > max_vkitti_filepaths:
max_vkitti_filepaths = n_vkitti_filepaths
# Select from the KITTI sequences that have at least the number of files as VKITTI
kitti_sequence_dirpath_pool = []
for kitti_sequence_dirpath in kitti_sequence_dirpaths:
# Select filepaths in KITTI sequence
kitti_sequence_filepaths = glob.glob(
os.path.join(kitti_sequence_dirpath, KITTI_SPARSE_DEPTH_REFPATH, 'image_02', '*.png'))
n_kitti_filepaths = len(kitti_sequence_filepaths)
if n_kitti_filepaths >= max_vkitti_filepaths:
kitti_sequence_dirpath_pool.append(kitti_sequence_dirpath)
'''
Process data to generate sparse depth for Virtual KITTI
'''
if not os.path.exists(OUTPUT_REF_DIRPATH):
os.makedirs(OUTPUT_REF_DIRPATH)
output_sparse_depth_paths = []
output_validity_map_paths = []
output_semi_dense_depth_paths = []
output_dense_depth_paths = []
output_ground_truth_paths = []
for vkitti_sequence_dirpath in vkitti_sequence_dirpaths:
print('Processing Virtual KITTI sequence: {}'.format(vkitti_sequence_dirpath))
# Select filepath in Virtual KITTI sequence
vkitti_sequence_dirpath = os.path.join(vkitti_sequence_dirpath, 'clone')
vkitti_sequence = vkitti_sequence_dirpath.split(os.sep)[-2]
vkitti_sequence_depth_filepaths = sorted(glob.glob(os.path.join(vkitti_sequence_dirpath, '*.png')))
n_vkitti_filepaths = len(vkitti_sequence_depth_filepaths)
output_sequence_dirpath = os.path.join(
OUTPUT_ROOT_DIRPATH, VKITTI_TRAIN_DEPTH_REFPATH, vkitti_sequence)
for kitti_sequence_dirpath in kitti_sequence_dirpath_pool:
# Select KITTI sequence, since it is a directory last element is empty so grab the second til last
kitti_sequence = kitti_sequence_dirpath.split(os.sep)[-2]
kitti_sequence_dirpath = os.path.join(kitti_sequence_dirpath, KITTI_SPARSE_DEPTH_REFPATH)
for camera_dirpath in ['image_02', 'image_03']:
kitti_sequence_filepaths = sorted(glob.glob(
os.path.join(kitti_sequence_dirpath, camera_dirpath, '*.png')))
kitti_sequence_filepaths = kitti_sequence_filepaths[0:n_vkitti_filepaths]
output_sparse_depth_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'sparse_depth')
output_validity_map_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'validity_map')
output_semi_dense_depth_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'semi_dense_depth')
output_dense_depth_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'dense_depth')
output_ground_truth_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'ground_truth')
output_dirpaths = [
output_sparse_depth_dirpath,
output_validity_map_dirpath,
output_semi_dense_depth_dirpath,
output_dense_depth_dirpath,
output_ground_truth_dirpath
]
for output_dirpath in output_dirpaths:
if not os.path.exists(output_dirpath):
os.makedirs(output_dirpath)
pool_input = [
(kitti_sequence_filepaths[idx], vkitti_sequence_depth_filepaths[idx], output_dirpaths)
for idx in range(n_vkitti_filepaths)
]
with mp.Pool() as pool:
pool_results = pool.map(process_frame, pool_input)
for result in pool_results:
output_sparse_depth_path, \
output_validity_map_path, \
output_semi_dense_depth_path, \
output_dense_depth_path, \
output_ground_truth_path = result
# Collect filepaths
output_sparse_depth_paths.append(output_sparse_depth_path)
output_validity_map_paths.append(output_validity_map_path)
output_semi_dense_depth_paths.append(output_semi_dense_depth_path)
output_dense_depth_paths.append(output_dense_depth_path)
output_ground_truth_paths.append(output_ground_truth_path)
print('Completed generating {} depth samples for using KITTI sequence={} camera={}'.format(
n_vkitti_filepaths, kitti_sequence, camera_dirpath))
print('Storing sparse depth file paths into: %s' % OUTPUT_SPARSE_DEPTH_FILEPATH)
data_utils.write_paths(
OUTPUT_SPARSE_DEPTH_FILEPATH, output_sparse_depth_paths)
print('Storing validity map file paths into: %s' % OUTPUT_VALIDITY_MAP_FILEPATH)
data_utils.write_paths(
OUTPUT_VALIDITY_MAP_FILEPATH, output_validity_map_paths)
print('Storing semi dense depth file paths into: %s' % OUTPUT_SEMI_DENSE_DEPTH_FILEPATH)
data_utils.write_paths(
OUTPUT_SEMI_DENSE_DEPTH_FILEPATH, output_semi_dense_depth_paths)
print('Storing dense depth file paths into: %s' % OUTPUT_DENSE_DEPTH_FILEPATH)
data_utils.write_paths(
OUTPUT_DENSE_DEPTH_FILEPATH, output_dense_depth_paths)
print('Storing ground-truth depth file paths into: %s' % OUTPUT_GROUND_TRUTH_FILEPATH)
data_utils.write_paths(
OUTPUT_GROUND_TRUTH_FILEPATH, output_ground_truth_paths)
| [
"os.path.exists",
"data_utils.write_paths",
"sys.path.insert",
"numpy.ones",
"os.makedirs",
"numpy.unique",
"os.path.join",
"skimage.morphology.convex_hull_image",
"os.path.basename",
"multiprocessing.Pool",
"data_utils.save_validity_map",
"cv2.resize",
"data_utils.save_depth",
"cv2.imread... | [((778, 803), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""src"""'], {}), "(0, 'src')\n", (793, 803), False, 'import sys, os, glob\n'), ((877, 923), 'os.path.join', 'os.path.join', (['"""data"""', '"""kitti_depth_completion"""'], {}), "('data', 'kitti_depth_completion')\n", (889, 923), False, 'import sys, os, glob\n'), ((959, 1035), 'os.path.join', 'os.path.join', (['KITTI_ROOT_DIRPATH', '"""train_val_split"""', '"""sparse_depth"""', '"""train"""'], {}), "(KITTI_ROOT_DIRPATH, 'train_val_split', 'sparse_depth', 'train')\n", (971, 1035), False, 'import sys, os, glob\n'), ((1109, 1151), 'os.path.join', 'os.path.join', (['"""proj_depth"""', '"""velodyne_raw"""'], {}), "('proj_depth', 'velodyne_raw')\n", (1121, 1151), False, 'import sys, os, glob\n'), ((1215, 1252), 'os.path.join', 'os.path.join', (['"""data"""', '"""virtual_kitti"""'], {}), "('data', 'virtual_kitti')\n", (1227, 1252), False, 'import sys, os, glob\n'), ((1434, 1495), 'os.path.join', 'os.path.join', (['VKITTI_ROOT_DIRPATH', 'VKITTI_TRAIN_DEPTH_REFPATH'], {}), '(VKITTI_ROOT_DIRPATH, VKITTI_TRAIN_DEPTH_REFPATH)\n', (1446, 1495), False, 'import sys, os, glob\n'), ((1544, 1599), 'os.path.join', 'os.path.join', (['"""data"""', '"""virtual_kitti_learning_topology"""'], {}), "('data', 'virtual_kitti_learning_topology')\n", (1556, 1599), False, 'import sys, os, glob\n'), ((1621, 1655), 'os.path.join', 'os.path.join', (['"""training"""', '"""vkitti"""'], {}), "('training', 'vkitti')\n", (1633, 1655), False, 'import sys, os, glob\n'), ((1688, 1753), 'os.path.join', 'os.path.join', (['OUTPUT_REF_DIRPATH', '"""vkitti_train_sparse_depth.txt"""'], {}), "(OUTPUT_REF_DIRPATH, 'vkitti_train_sparse_depth.txt')\n", (1700, 1753), False, 'import sys, os, glob\n'), ((1790, 1855), 'os.path.join', 'os.path.join', (['OUTPUT_REF_DIRPATH', '"""vkitti_train_validity_map.txt"""'], {}), "(OUTPUT_REF_DIRPATH, 'vkitti_train_validity_map.txt')\n", (1802, 1855), False, 'import sys, os, glob\n'), ((1896, 1965), 'os.path.join', 'os.path.join', (['OUTPUT_REF_DIRPATH', '"""vkitti_train_semi_dense_depth.txt"""'], {}), "(OUTPUT_REF_DIRPATH, 'vkitti_train_semi_dense_depth.txt')\n", (1908, 1965), False, 'import sys, os, glob\n'), ((2001, 2065), 'os.path.join', 'os.path.join', (['OUTPUT_REF_DIRPATH', '"""vkitti_train_dense_depth.txt"""'], {}), "(OUTPUT_REF_DIRPATH, 'vkitti_train_dense_depth.txt')\n", (2013, 2065), False, 'import sys, os, glob\n'), ((2102, 2167), 'os.path.join', 'os.path.join', (['OUTPUT_REF_DIRPATH', '"""vkitti_train_ground_truth.txt"""'], {}), "(OUTPUT_REF_DIRPATH, 'vkitti_train_ground_truth.txt')\n", (2114, 2167), False, 'import sys, os, glob\n'), ((10895, 10974), 'data_utils.write_paths', 'data_utils.write_paths', (['OUTPUT_SPARSE_DEPTH_FILEPATH', 'output_sparse_depth_paths'], {}), '(OUTPUT_SPARSE_DEPTH_FILEPATH, output_sparse_depth_paths)\n', (10917, 10974), False, 'import data_utils\n'), ((11062, 11141), 'data_utils.write_paths', 'data_utils.write_paths', (['OUTPUT_VALIDITY_MAP_FILEPATH', 'output_validity_map_paths'], {}), '(OUTPUT_VALIDITY_MAP_FILEPATH, output_validity_map_paths)\n', (11084, 11141), False, 'import data_utils\n'), ((11237, 11328), 'data_utils.write_paths', 'data_utils.write_paths', (['OUTPUT_SEMI_DENSE_DEPTH_FILEPATH', 'output_semi_dense_depth_paths'], {}), '(OUTPUT_SEMI_DENSE_DEPTH_FILEPATH,\n output_semi_dense_depth_paths)\n', (11259, 11328), False, 'import data_utils\n'), ((11410, 11487), 'data_utils.write_paths', 'data_utils.write_paths', (['OUTPUT_DENSE_DEPTH_FILEPATH', 'output_dense_depth_paths'], {}), '(OUTPUT_DENSE_DEPTH_FILEPATH, output_dense_depth_paths)\n', (11432, 11487), False, 'import data_utils\n'), ((11581, 11660), 'data_utils.write_paths', 'data_utils.write_paths', (['OUTPUT_GROUND_TRUTH_FILEPATH', 'output_ground_truth_paths'], {}), '(OUTPUT_GROUND_TRUTH_FILEPATH, output_ground_truth_paths)\n', (11603, 11660), False, 'import data_utils\n'), ((3055, 3119), 'data_utils.load_depth_with_validity_map', 'data_utils.load_depth_with_validity_map', (['kitti_sparse_depth_path'], {}), '(kitti_sparse_depth_path)\n', (3094, 3119), False, 'import data_utils\n'), ((3195, 3274), 'cv2.imread', 'cv2.imread', (['vkitti_ground_truth_path', '(cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)'], {}), '(vkitti_ground_truth_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)\n', (3205, 3274), False, 'import cv2\n'), ((3833, 3867), 'numpy.ones', 'np.ones', (['vkitti_ground_truth.shape'], {}), '(vkitti_ground_truth.shape)\n', (3840, 3867), True, 'import numpy as np\n'), ((4340, 4382), 'os.path.basename', 'os.path.basename', (['vkitti_ground_truth_path'], {}), '(vkitti_ground_truth_path)\n', (4356, 4382), False, 'import sys, os, glob\n'), ((4625, 4676), 'os.path.join', 'os.path.join', (['output_sparse_depth_dirpath', 'filename'], {}), '(output_sparse_depth_dirpath, filename)\n', (4637, 4676), False, 'import sys, os, glob\n'), ((4708, 4759), 'os.path.join', 'os.path.join', (['output_validity_map_dirpath', 'filename'], {}), '(output_validity_map_dirpath, filename)\n', (4720, 4759), False, 'import sys, os, glob\n'), ((4795, 4850), 'os.path.join', 'os.path.join', (['output_semi_dense_depth_dirpath', 'filename'], {}), '(output_semi_dense_depth_dirpath, filename)\n', (4807, 4850), False, 'import sys, os, glob\n'), ((4881, 4931), 'os.path.join', 'os.path.join', (['output_dense_depth_dirpath', 'filename'], {}), '(output_dense_depth_dirpath, filename)\n', (4893, 4931), False, 'import sys, os, glob\n'), ((4963, 5014), 'os.path.join', 'os.path.join', (['output_ground_truth_dirpath', 'filename'], {}), '(output_ground_truth_dirpath, filename)\n', (4975, 5014), False, 'import sys, os, glob\n'), ((5040, 5108), 'data_utils.save_depth', 'data_utils.save_depth', (['vkitti_sparse_depth', 'output_sparse_depth_path'], {}), '(vkitti_sparse_depth, output_sparse_depth_path)\n', (5061, 5108), False, 'import data_utils\n'), ((5113, 5187), 'data_utils.save_validity_map', 'data_utils.save_validity_map', (['kitti_validity_map', 'output_validity_map_path'], {}), '(kitti_validity_map, output_validity_map_path)\n', (5141, 5187), False, 'import data_utils\n'), ((5192, 5268), 'data_utils.save_depth', 'data_utils.save_depth', (['vkitti_semi_dense_depth', 'output_semi_dense_depth_path'], {}), '(vkitti_semi_dense_depth, output_semi_dense_depth_path)\n', (5213, 5268), False, 'import data_utils\n'), ((5273, 5339), 'data_utils.save_depth', 'data_utils.save_depth', (['vkitti_dense_depth', 'output_dense_depth_path'], {}), '(vkitti_dense_depth, output_dense_depth_path)\n', (5294, 5339), False, 'import data_utils\n'), ((5344, 5412), 'data_utils.save_depth', 'data_utils.save_depth', (['vkitti_ground_truth', 'output_ground_truth_path'], {}), '(vkitti_ground_truth, output_ground_truth_path)\n', (5365, 5412), False, 'import data_utils\n'), ((5728, 5780), 'os.path.join', 'os.path.join', (['KITTI_TRAIN_SPARSE_DEPTH_DIRPATH', '"""*/"""'], {}), "(KITTI_TRAIN_SPARSE_DEPTH_DIRPATH, '*/')\n", (5740, 5780), False, 'import sys, os, glob\n'), ((5819, 5871), 'os.path.join', 'os.path.join', (['VKITTI_TRAIN_DENSE_DEPTH_DIRPATH', '"""*/"""'], {}), "(VKITTI_TRAIN_DENSE_DEPTH_DIRPATH, '*/')\n", (5831, 5871), False, 'import sys, os, glob\n'), ((6074, 6120), 'os.path.join', 'os.path.join', (['vkitti_sequence_dirpath', '"""clone"""'], {}), "(vkitti_sequence_dirpath, 'clone')\n", (6086, 6120), False, 'import sys, os, glob\n'), ((6963, 6997), 'os.path.exists', 'os.path.exists', (['OUTPUT_REF_DIRPATH'], {}), '(OUTPUT_REF_DIRPATH)\n', (6977, 6997), False, 'import sys, os, glob\n'), ((7003, 7034), 'os.makedirs', 'os.makedirs', (['OUTPUT_REF_DIRPATH'], {}), '(OUTPUT_REF_DIRPATH)\n', (7014, 7034), False, 'import sys, os, glob\n'), ((7413, 7459), 'os.path.join', 'os.path.join', (['vkitti_sequence_dirpath', '"""clone"""'], {}), "(vkitti_sequence_dirpath, 'clone')\n", (7425, 7459), False, 'import sys, os, glob\n'), ((7721, 7799), 'os.path.join', 'os.path.join', (['OUTPUT_ROOT_DIRPATH', 'VKITTI_TRAIN_DEPTH_REFPATH', 'vkitti_sequence'], {}), '(OUTPUT_ROOT_DIRPATH, VKITTI_TRAIN_DEPTH_REFPATH, vkitti_sequence)\n', (7733, 7799), False, 'import sys, os, glob\n'), ((3524, 3659), 'cv2.resize', 'cv2.resize', (['kitti_validity_map'], {'dsize': '(vkitti_ground_truth.shape[1], vkitti_ground_truth.shape[0])', 'interpolation': 'cv2.INTER_NEAREST'}), '(kitti_validity_map, dsize=(vkitti_ground_truth.shape[1],\n vkitti_ground_truth.shape[0]), interpolation=cv2.INTER_NEAREST)\n', (3534, 3659), False, 'import cv2\n'), ((6163, 6209), 'os.path.join', 'os.path.join', (['vkitti_sequence_dirpath', '"""*.png"""'], {}), "(vkitti_sequence_dirpath, '*.png')\n", (6175, 6209), False, 'import sys, os, glob\n'), ((6631, 6720), 'os.path.join', 'os.path.join', (['kitti_sequence_dirpath', 'KITTI_SPARSE_DEPTH_REFPATH', '"""image_02"""', '"""*.png"""'], {}), "(kitti_sequence_dirpath, KITTI_SPARSE_DEPTH_REFPATH, 'image_02',\n '*.png')\n", (6643, 6720), False, 'import sys, os, glob\n'), ((8079, 8143), 'os.path.join', 'os.path.join', (['kitti_sequence_dirpath', 'KITTI_SPARSE_DEPTH_REFPATH'], {}), '(kitti_sequence_dirpath, KITTI_SPARSE_DEPTH_REFPATH)\n', (8091, 8143), False, 'import sys, os, glob\n'), ((4220, 4265), 'skimage.morphology.convex_hull_image', 'skmorph.convex_hull_image', (['kitti_validity_map'], {}), '(kitti_validity_map)\n', (4245, 4265), True, 'from skimage import morphology as skmorph\n'), ((7579, 7625), 'os.path.join', 'os.path.join', (['vkitti_sequence_dirpath', '"""*.png"""'], {}), "(vkitti_sequence_dirpath, '*.png')\n", (7591, 7625), False, 'import sys, os, glob\n'), ((8467, 8556), 'os.path.join', 'os.path.join', (['output_sequence_dirpath', 'kitti_sequence', 'camera_dirpath', '"""sparse_depth"""'], {}), "(output_sequence_dirpath, kitti_sequence, camera_dirpath,\n 'sparse_depth')\n", (8479, 8556), False, 'import sys, os, glob\n'), ((8612, 8701), 'os.path.join', 'os.path.join', (['output_sequence_dirpath', 'kitti_sequence', 'camera_dirpath', '"""validity_map"""'], {}), "(output_sequence_dirpath, kitti_sequence, camera_dirpath,\n 'validity_map')\n", (8624, 8701), False, 'import sys, os, glob\n'), ((8761, 8854), 'os.path.join', 'os.path.join', (['output_sequence_dirpath', 'kitti_sequence', 'camera_dirpath', '"""semi_dense_depth"""'], {}), "(output_sequence_dirpath, kitti_sequence, camera_dirpath,\n 'semi_dense_depth')\n", (8773, 8854), False, 'import sys, os, glob\n'), ((8909, 8997), 'os.path.join', 'os.path.join', (['output_sequence_dirpath', 'kitti_sequence', 'camera_dirpath', '"""dense_depth"""'], {}), "(output_sequence_dirpath, kitti_sequence, camera_dirpath,\n 'dense_depth')\n", (8921, 8997), False, 'import sys, os, glob\n'), ((9053, 9142), 'os.path.join', 'os.path.join', (['output_sequence_dirpath', 'kitti_sequence', 'camera_dirpath', '"""ground_truth"""'], {}), "(output_sequence_dirpath, kitti_sequence, camera_dirpath,\n 'ground_truth')\n", (9065, 9142), False, 'import sys, os, glob\n'), ((3716, 3745), 'numpy.unique', 'np.unique', (['kitti_validity_map'], {}), '(kitti_validity_map)\n', (3725, 3745), True, 'import numpy as np\n'), ((9801, 9810), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (9808, 9810), True, 'import multiprocessing as mp\n'), ((8274, 8335), 'os.path.join', 'os.path.join', (['kitti_sequence_dirpath', 'camera_dirpath', '"""*.png"""'], {}), "(kitti_sequence_dirpath, camera_dirpath, '*.png')\n", (8286, 8335), False, 'import sys, os, glob\n'), ((9505, 9535), 'os.path.exists', 'os.path.exists', (['output_dirpath'], {}), '(output_dirpath)\n', (9519, 9535), False, 'import sys, os, glob\n'), ((9557, 9584), 'os.makedirs', 'os.makedirs', (['output_dirpath'], {}), '(output_dirpath)\n', (9568, 9584), False, 'import sys, os, glob\n')] |
from pepnet.encoder import Encoder
from nose.tools import eq_
import numpy as np
def test_encoder_index_lists():
encoder = Encoder()
S_idx = encoder.index_dict["S"]
A_idx = encoder.index_dict["A"]
index_lists = encoder.encode_index_lists(["SSS", "AAA", "SAS"])
eq_(index_lists, [
[S_idx, S_idx, S_idx],
[A_idx, A_idx, A_idx],
[S_idx, A_idx, S_idx]
])
def test_encoder_prepare_sequences_padding():
encoder = Encoder()
eq_(encoder.prepare_sequences(["SISI"], 5), ["SISI-"])
def test_encoder_prepare_sequences_start_token():
encoder = Encoder(add_start_tokens=True)
eq_(encoder.prepare_sequences(["SISI"], 5), ["^SISI-"])
def test_encoder_prepare_sequences_stop_token():
encoder = Encoder(add_stop_tokens=True)
eq_(encoder.prepare_sequences(["SISI"], 5), ["SISI$-"])
def test_encoder_index_array():
encoder = Encoder()
S_idx = encoder.index_dict["S"]
A_idx = encoder.index_dict["A"]
assert S_idx > 0
assert A_idx > 0
X = encoder.encode_index_array(["SSS", "AAA", "SASA"], max_peptide_length=4)
expected = np.array([
[S_idx, S_idx, S_idx, 0],
[A_idx, A_idx, A_idx, 0],
[S_idx, A_idx, S_idx, A_idx]
])
assert (X == expected).all()
def test_encoder_FOFE():
# turn off the gap character '-' used for ends of shorter sequences
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_FOFE(["AAA", "SSS", "SASA"])
eq_(x.shape, (3, 20))
def test_encoder_FOFE_bidirectional():
# turn off the gap character '-' used for ends of shorter sequences
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_FOFE(["AAA", "SSS", "SASA"], bidirectional=True)
eq_(x.shape, (3, 40))
def test_encoder_blosum():
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_blosum(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 20))
def test_encoder_pmbec():
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_pmbec(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 20))
def test_encoder_onehot():
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_onehot(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 20))
def test_encoder_blosum_with_positional_features():
encoder = Encoder(
variable_length_sequences=False,
add_normalized_position=True,
add_normalized_centrality=True)
x = encoder.encode_blosum(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 22))
def test_encoder_pmbec_with_positional_features():
encoder = Encoder(
variable_length_sequences=False,
add_normalized_position=True,
add_normalized_centrality=True)
x = encoder.encode_pmbec(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 22))
def test_encoder_onehot_with_positional_features():
encoder = Encoder(
variable_length_sequences=False,
add_normalized_position=True,
add_normalized_centrality=True)
x = encoder.encode_onehot(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 22))
| [
"numpy.array",
"nose.tools.eq_",
"pepnet.encoder.Encoder"
] | [((128, 137), 'pepnet.encoder.Encoder', 'Encoder', ([], {}), '()\n', (135, 137), False, 'from pepnet.encoder import Encoder\n'), ((282, 373), 'nose.tools.eq_', 'eq_', (['index_lists', '[[S_idx, S_idx, S_idx], [A_idx, A_idx, A_idx], [S_idx, A_idx, S_idx]]'], {}), '(index_lists, [[S_idx, S_idx, S_idx], [A_idx, A_idx, A_idx], [S_idx,\n A_idx, S_idx]])\n', (285, 373), False, 'from nose.tools import eq_\n'), ((461, 470), 'pepnet.encoder.Encoder', 'Encoder', ([], {}), '()\n', (468, 470), False, 'from pepnet.encoder import Encoder\n'), ((595, 625), 'pepnet.encoder.Encoder', 'Encoder', ([], {'add_start_tokens': '(True)'}), '(add_start_tokens=True)\n', (602, 625), False, 'from pepnet.encoder import Encoder\n'), ((751, 780), 'pepnet.encoder.Encoder', 'Encoder', ([], {'add_stop_tokens': '(True)'}), '(add_stop_tokens=True)\n', (758, 780), False, 'from pepnet.encoder import Encoder\n'), ((889, 898), 'pepnet.encoder.Encoder', 'Encoder', ([], {}), '()\n', (896, 898), False, 'from pepnet.encoder import Encoder\n'), ((1109, 1205), 'numpy.array', 'np.array', (['[[S_idx, S_idx, S_idx, 0], [A_idx, A_idx, A_idx, 0], [S_idx, A_idx, S_idx,\n A_idx]]'], {}), '([[S_idx, S_idx, S_idx, 0], [A_idx, A_idx, A_idx, 0], [S_idx, A_idx,\n S_idx, A_idx]])\n', (1117, 1205), True, 'import numpy as np\n'), ((1378, 1418), 'pepnet.encoder.Encoder', 'Encoder', ([], {'variable_length_sequences': '(False)'}), '(variable_length_sequences=False)\n', (1385, 1418), False, 'from pepnet.encoder import Encoder\n'), ((1475, 1496), 'nose.tools.eq_', 'eq_', (['x.shape', '(3, 20)'], {}), '(x.shape, (3, 20))\n', (1478, 1496), False, 'from nose.tools import eq_\n'), ((1623, 1663), 'pepnet.encoder.Encoder', 'Encoder', ([], {'variable_length_sequences': '(False)'}), '(variable_length_sequences=False)\n', (1630, 1663), False, 'from pepnet.encoder import Encoder\n'), ((1740, 1761), 'nose.tools.eq_', 'eq_', (['x.shape', '(3, 40)'], {}), '(x.shape, (3, 40))\n', (1743, 1761), False, 'from nose.tools import eq_\n'), ((1804, 1844), 'pepnet.encoder.Encoder', 'Encoder', ([], {'variable_length_sequences': '(False)'}), '(variable_length_sequences=False)\n', (1811, 1844), False, 'from pepnet.encoder import Encoder\n'), ((1902, 1926), 'nose.tools.eq_', 'eq_', (['x.shape', '(3, 3, 20)'], {}), '(x.shape, (3, 3, 20))\n', (1905, 1926), False, 'from nose.tools import eq_\n'), ((1968, 2008), 'pepnet.encoder.Encoder', 'Encoder', ([], {'variable_length_sequences': '(False)'}), '(variable_length_sequences=False)\n', (1975, 2008), False, 'from pepnet.encoder import Encoder\n'), ((2065, 2089), 'nose.tools.eq_', 'eq_', (['x.shape', '(3, 3, 20)'], {}), '(x.shape, (3, 3, 20))\n', (2068, 2089), False, 'from nose.tools import eq_\n'), ((2132, 2172), 'pepnet.encoder.Encoder', 'Encoder', ([], {'variable_length_sequences': '(False)'}), '(variable_length_sequences=False)\n', (2139, 2172), False, 'from pepnet.encoder import Encoder\n'), ((2230, 2254), 'nose.tools.eq_', 'eq_', (['x.shape', '(3, 3, 20)'], {}), '(x.shape, (3, 3, 20))\n', (2233, 2254), False, 'from nose.tools import eq_\n'), ((2322, 2428), 'pepnet.encoder.Encoder', 'Encoder', ([], {'variable_length_sequences': '(False)', 'add_normalized_position': '(True)', 'add_normalized_centrality': '(True)'}), '(variable_length_sequences=False, add_normalized_position=True,\n add_normalized_centrality=True)\n', (2329, 2428), False, 'from pepnet.encoder import Encoder\n'), ((2507, 2531), 'nose.tools.eq_', 'eq_', (['x.shape', '(3, 3, 22)'], {}), '(x.shape, (3, 3, 22))\n', (2510, 2531), False, 'from nose.tools import eq_\n'), ((2598, 2704), 'pepnet.encoder.Encoder', 'Encoder', ([], {'variable_length_sequences': '(False)', 'add_normalized_position': '(True)', 'add_normalized_centrality': '(True)'}), '(variable_length_sequences=False, add_normalized_position=True,\n add_normalized_centrality=True)\n', (2605, 2704), False, 'from pepnet.encoder import Encoder\n'), ((2782, 2806), 'nose.tools.eq_', 'eq_', (['x.shape', '(3, 3, 22)'], {}), '(x.shape, (3, 3, 22))\n', (2785, 2806), False, 'from nose.tools import eq_\n'), ((2874, 2980), 'pepnet.encoder.Encoder', 'Encoder', ([], {'variable_length_sequences': '(False)', 'add_normalized_position': '(True)', 'add_normalized_centrality': '(True)'}), '(variable_length_sequences=False, add_normalized_position=True,\n add_normalized_centrality=True)\n', (2881, 2980), False, 'from pepnet.encoder import Encoder\n'), ((3059, 3083), 'nose.tools.eq_', 'eq_', (['x.shape', '(3, 3, 22)'], {}), '(x.shape, (3, 3, 22))\n', (3062, 3083), False, 'from nose.tools import eq_\n')] |
import numpy as np
import logging
class Tracker():
def __init__(self):
self.current_tracks = [] ### List of all tracks
self.temp_tracks = [] ### List of temporary tracks
self.temp_track_len = 3 ### How much tracks to collect before merging into main track
self.thresh_distance = 13 ### Euclidean distance threshold
self.avg_dist = []
def convert_to_center(self, result):
return np.array([int((result[2] + result[0]) / 2), int((result[3] + result[1]) / 2)])
def update(self, result):
### Check if we have any detections
if len(result[0]) == 0:
self.current_tracks.append(None)
self.temp_tracks = []
self.zero_res = result
elif len(result[0]) == 1:
self.single_res = result[0][0]
logging.debug("SINGLE RES")
### If we don't have approved tracks or no good history
if len(self.current_tracks) == 0 or self.current_tracks[-1] is None:
logging.debug("NONE TRACK CONTINUED")
### if we don't temporary tracks we update temp tracks or we lost the track, else we check its distance to previous temp track
if len(self.temp_tracks) == 0:
self.temp_tracks.append(result[0][0])
self.current_tracks.append(None)
else:
curr_center = self.convert_to_center(result[0][0])
last_center = self.convert_to_center(self.temp_tracks[-1])
self.avg_dist.append(np.linalg.norm(curr_center - last_center))
if np.linalg.norm(curr_center - last_center) < self.thresh_distance:
logging.debug("DIST CRITERIA SATISFIED")
self.temp_tracks.append(result[0][0])
if len(self.temp_tracks) < self.temp_track_len:
self.current_tracks.append(None)
else:
logging.debug("CURRENT_TRACK UPDATED-------------------")
self.current_tracks.append(None)
self.current_tracks[-len(self.temp_tracks):] = self.temp_tracks.copy()
self.temp_tracks = []
else:
self.current_tracks.append(None)
self.current_tracks[-(len(self.temp_tracks) + 1):] = [None for _ in
range(len(self.temp_tracks) + 1)]
self.temp_tracks = []
else:
# self.temp_tracks = []
### Now, we work with tracks that have history greater than 5
logging.debug("CURRENT TRACK CONTINUED")
curr_center = self.convert_to_center(result[0][0])
last_canter = self.convert_to_center(self.current_tracks[-1])
if np.linalg.norm(curr_center - last_canter) < self.thresh_distance:
logging.debug("GOOD TRACK APPENDED")
self.avg_dist.append(np.linalg.norm(curr_center - last_canter))
self.current_tracks.append(result[0][0])
else:
logging.debug("BAD TRACK APPENDED")
self.current_tracks.append(None)
elif len(result[0]) > 1:
self.multi_res = result
logging.debug("MULTI RES")
# self.temp_tracks = []
if len(self.current_tracks) == 0 or self.current_tracks[-1] is None:
if len(self.temp_tracks) > 0:
appended = False
for i in range(len(result[0])):
curr_res = result[0][i]
curr_center = self.convert_to_center(curr_res)
last_center = self.convert_to_center(self.temp_tracks[-1])
if np.linalg.norm(curr_center - last_center) < self.thresh_distance:
self.temp_tracks.append(result[0][0])
if len(self.temp_tracks) < self.temp_track_len:
self.current_tracks.append(None)
else:
logging.debug("CURRENT_TRACK UPDATED-------------------")
self.current_tracks.append(None)
self.current_tracks[-len(self.temp_tracks):] = self.temp_tracks.copy()
self.temp_tracks = []
appended = True
break
if not appended:
self.current_tracks.append(None)
self.current_tracks[-(len(self.temp_tracks) + 1):] = [None for _ in
range(len(self.temp_tracks) + 1)]
self.temp_tracks = []
else:
self.current_tracks.append(None)
else:
# self.current_tracks.append(None)
self.temp_tracks = []
appended = False
for i in range(len(result[0])):
curr_res = result[0][i]
curr_center = self.convert_to_center(curr_res)
last_center = self.convert_to_center(self.current_tracks[-1])
if np.linalg.norm(curr_center - last_center) < self.thresh_distance:
appended = True
self.current_tracks.append(curr_res)
break
if not appended:
self.current_tracks.append(None)
def calc_missing_intervals(self, length=2):
i = 0
j = 0
misses = 0
while i < len(self.current_tracks) and j < len(self.current_tracks):
if self.current_tracks[i] is not None:
i += 1
j = i
elif self.current_tracks[i] is None and self.current_tracks[j] is None:
j += 1
elif self.current_tracks[i] is None and self.current_tracks[j] is not None and 0 < (j - i) <= length and i==0:
i = j
elif self.current_tracks[i] is None and self.current_tracks[j] is not None and 0 < (j - i) <= length:
interp_tracks = self.interpolate_bboxes(self.current_tracks[i - 1:j + 1])
self.current_tracks[i - 1:j + 1] = interp_tracks
i = j
misses += 1
elif self.current_tracks[i] is None and self.current_tracks[j] is not None and (j - i) > length:
i = j
if 0 < (j - i) <= length:
misses += 1
return misses
def interpolate_bboxes(self, tracks):
first = tracks[0]
last = tracks[-1]
first_center = [(first[2] + first[0]) / 2, (first[3] + first[1]) / 2]
last_center = [(last[2] + last[0]) / 2, (last[3] + last[1]) / 2]
half_width = ((abs(first[2] - first[0]) + abs(last[2] - last[0])) / 2) / 2
half_height = ((abs(first[3] - first[1]) + abs(last[3] - last[1])) / 2) / 2
i = 1
while i < len(tracks) - 1:
new_x = first_center[0] + ((i + 1) / len(tracks)) * (last_center[0] - first_center[0])
new_y = first_center[1] + ((i + 1) / len(tracks)) * (last_center[1] - first_center[1])
new_bbox = np.array(
[new_x - half_width, new_y - half_height, new_x + half_width, new_y + half_height, first[-1]])
tracks[i] = new_bbox
i += 1
return tracks
| [
"numpy.array",
"logging.debug",
"numpy.linalg.norm"
] | [((7587, 7695), 'numpy.array', 'np.array', (['[new_x - half_width, new_y - half_height, new_x + half_width, new_y +\n half_height, first[-1]]'], {}), '([new_x - half_width, new_y - half_height, new_x + half_width, \n new_y + half_height, first[-1]])\n', (7595, 7695), True, 'import numpy as np\n'), ((836, 863), 'logging.debug', 'logging.debug', (['"""SINGLE RES"""'], {}), "('SINGLE RES')\n", (849, 863), False, 'import logging\n'), ((1030, 1067), 'logging.debug', 'logging.debug', (['"""NONE TRACK CONTINUED"""'], {}), "('NONE TRACK CONTINUED')\n", (1043, 1067), False, 'import logging\n'), ((2806, 2846), 'logging.debug', 'logging.debug', (['"""CURRENT TRACK CONTINUED"""'], {}), "('CURRENT TRACK CONTINUED')\n", (2819, 2846), False, 'import logging\n'), ((3494, 3520), 'logging.debug', 'logging.debug', (['"""MULTI RES"""'], {}), "('MULTI RES')\n", (3507, 3520), False, 'import logging\n'), ((3012, 3053), 'numpy.linalg.norm', 'np.linalg.norm', (['(curr_center - last_canter)'], {}), '(curr_center - last_canter)\n', (3026, 3053), True, 'import numpy as np\n'), ((3098, 3134), 'logging.debug', 'logging.debug', (['"""GOOD TRACK APPENDED"""'], {}), "('GOOD TRACK APPENDED')\n", (3111, 3134), False, 'import logging\n'), ((3322, 3357), 'logging.debug', 'logging.debug', (['"""BAD TRACK APPENDED"""'], {}), "('BAD TRACK APPENDED')\n", (3335, 3357), False, 'import logging\n'), ((1583, 1624), 'numpy.linalg.norm', 'np.linalg.norm', (['(curr_center - last_center)'], {}), '(curr_center - last_center)\n', (1597, 1624), True, 'import numpy as np\n'), ((1650, 1691), 'numpy.linalg.norm', 'np.linalg.norm', (['(curr_center - last_center)'], {}), '(curr_center - last_center)\n', (1664, 1691), True, 'import numpy as np\n'), ((1740, 1780), 'logging.debug', 'logging.debug', (['"""DIST CRITERIA SATISFIED"""'], {}), "('DIST CRITERIA SATISFIED')\n", (1753, 1780), False, 'import logging\n'), ((3176, 3217), 'numpy.linalg.norm', 'np.linalg.norm', (['(curr_center - last_canter)'], {}), '(curr_center - last_canter)\n', (3190, 3217), True, 'import numpy as np\n'), ((2035, 2092), 'logging.debug', 'logging.debug', (['"""CURRENT_TRACK UPDATED-------------------"""'], {}), "('CURRENT_TRACK UPDATED-------------------')\n", (2048, 2092), False, 'import logging\n'), ((5560, 5601), 'numpy.linalg.norm', 'np.linalg.norm', (['(curr_center - last_center)'], {}), '(curr_center - last_center)\n', (5574, 5601), True, 'import numpy as np\n'), ((4018, 4059), 'numpy.linalg.norm', 'np.linalg.norm', (['(curr_center - last_center)'], {}), '(curr_center - last_center)\n', (4032, 4059), True, 'import numpy as np\n'), ((4358, 4415), 'logging.debug', 'logging.debug', (['"""CURRENT_TRACK UPDATED-------------------"""'], {}), "('CURRENT_TRACK UPDATED-------------------')\n", (4371, 4415), False, 'import logging\n')] |
"""
假设有一个带有标签的样本数据集(训练样本集),其中包含每条数据与所属分类的对应关系。
输入没有标签的新数据后,将新数据的每个特征与样本集中数据对应的特征进行比较。
计算新数据与样本数据集中每条数据的距离。
对求得的所有距离进行排序(从小到大,越小表示越相似)。
取前 k (k 一般小于等于 20 )个样本数据对应的分类标签。
求 k 个数据中出现次数最多的分类标签作为新数据的分类。
收集数据:提供文本文件
准备数据:使用 Python 解析文本文件
分析数据:使用 Matplotlib 画二维散点图
训练算法:此步骤不适用于 k-近邻算法
测试算法:使用海伦提供的部分数据作为测试样本。
测试样本和非测试样本的区别在于:
测试样本是已经完成分类的数据,如果预测分类与实际类别不同,则标记为一个错误。
使用算法:产生简单的命令行程序,然后海伦可以输入一些特征数据以判断对方是否为自己喜欢的类型。
"""
import matplotlib.pyplot as plt
import numpy as np
def file2matrix():
"""
每年获得的飞行常客里程数
玩视频游戏所耗时间百分比
每周消费的冰淇淋公升数
"""
with open('dating.txt') as f:
lines = f.readlines()
mat = np.ma.zeros((len(lines), 3))
vector = []
for index, l in enumerate(lines):
l = l.strip().split('\t')
mat[index] = l[0:3]
vector.append(int(l[-1]))
return mat, vector
def draw():
fig = plt.figure()
# 绘制 1×1网格,第一子图
ax = fig.add_subplot(111)
mat, vector = file2matrix()
ax.scatter(
mat[:,0],
mat[:,1],
15.0 * np.ma.array(vector),
np.ma.array(vector),
)
plt.show()
if __name__ == '__main__':
draw()
| [
"matplotlib.pyplot.figure",
"numpy.ma.array",
"matplotlib.pyplot.show"
] | [((892, 904), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (902, 904), True, 'import matplotlib.pyplot as plt\n'), ((1114, 1124), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1122, 1124), True, 'import matplotlib.pyplot as plt\n'), ((1083, 1102), 'numpy.ma.array', 'np.ma.array', (['vector'], {}), '(vector)\n', (1094, 1102), True, 'import numpy as np\n'), ((1054, 1073), 'numpy.ma.array', 'np.ma.array', (['vector'], {}), '(vector)\n', (1065, 1073), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 29 11:33:00 2019
@author: Peter
"""
import os, h5py
from uclahedp.tools import hdf as hdftools
from uclahedp.tools import dataset
import numpy as np
import matplotlib.pyplot as plt
def abs_phase(sig, ref):
#Calculate the ffts
ref_fft = np.fft.fft(ref)
sig_fft = np.fft.fft(sig)
nfreq = ref_fft.shape[0]
#Determine the dominant frequency from the reference signal
nyquist = int(nfreq/2)
ref_freq = np.argmax(ref_fft[1:nyquist])
#Apply bandpass
#Intentionally zeros negative frequencies
nfreq = ref.shape[0]
delta = int(nfreq*0.01) #Arbitrary small window of frequencies
ref_fft[0:ref_freq-delta] = 0
ref_fft[ref_freq+delta:-1] = 0
sig_fft[0:ref_freq-delta] = 0
sig_fft[ref_freq+delta:-1] = 0
#Reverse FFT
sig = np.fft.ifft(sig_fft)
ref = np.fft.ifft(ref_fft)
#Compute the phase dufference by multiplying by the conjugate
sig = sig*np.conj(ref)
#Separate out the phase
phase = np.angle(sig)
#Unwrap the phase (correct jumps of more than pi)
phase = np.unwrap(phase)
return phase
if __name__ == "__main__":
sig_file = hdftools.hdfPath(os.path.join("F:","LAPD_Mar2018","RAW", "run83_interferometer_signal.hdf5"))
ref_file = hdftools.hdfPath(os.path.join("F:","LAPD_Mar2018","RAW", "run83_interferometer_reference.hdf5"))
dest_file = hdftools.hdfPath(os.path.join("F:","LAPD_Mar2018","FULL", "run83_interferometer.hdf5"))
with h5py.File(sig_file.file) as f:
sig = f['data'][:,0]
time = f['time'][:]
nti = time.shape[0]
with h5py.File(ref_file.file) as f:
ref = f['data'][0:nti,0]
phase = abs_phase(sig, ref)
axes = [{'ax':time, 'name':'time', 'unit':'s'}]
dataset.createDataset(phase/3.14, axes, dest_file, dataunit='rad', attrs=None)
| [
"uclahedp.tools.dataset.createDataset",
"numpy.conj",
"numpy.unwrap",
"numpy.fft.fft",
"numpy.argmax",
"os.path.join",
"numpy.angle",
"h5py.File",
"numpy.fft.ifft"
] | [((298, 313), 'numpy.fft.fft', 'np.fft.fft', (['ref'], {}), '(ref)\n', (308, 313), True, 'import numpy as np\n'), ((329, 344), 'numpy.fft.fft', 'np.fft.fft', (['sig'], {}), '(sig)\n', (339, 344), True, 'import numpy as np\n'), ((490, 519), 'numpy.argmax', 'np.argmax', (['ref_fft[1:nyquist]'], {}), '(ref_fft[1:nyquist])\n', (499, 519), True, 'import numpy as np\n'), ((856, 876), 'numpy.fft.ifft', 'np.fft.ifft', (['sig_fft'], {}), '(sig_fft)\n', (867, 876), True, 'import numpy as np\n'), ((888, 908), 'numpy.fft.ifft', 'np.fft.ifft', (['ref_fft'], {}), '(ref_fft)\n', (899, 908), True, 'import numpy as np\n'), ((1052, 1065), 'numpy.angle', 'np.angle', (['sig'], {}), '(sig)\n', (1060, 1065), True, 'import numpy as np\n'), ((1134, 1150), 'numpy.unwrap', 'np.unwrap', (['phase'], {}), '(phase)\n', (1143, 1150), True, 'import numpy as np\n'), ((1895, 1980), 'uclahedp.tools.dataset.createDataset', 'dataset.createDataset', (['(phase / 3.14)', 'axes', 'dest_file'], {'dataunit': '"""rad"""', 'attrs': 'None'}), "(phase / 3.14, axes, dest_file, dataunit='rad', attrs=None\n )\n", (1916, 1980), False, 'from uclahedp.tools import dataset\n'), ((997, 1009), 'numpy.conj', 'np.conj', (['ref'], {}), '(ref)\n', (1004, 1009), True, 'import numpy as np\n'), ((1272, 1349), 'os.path.join', 'os.path.join', (['"""F:"""', '"""LAPD_Mar2018"""', '"""RAW"""', '"""run83_interferometer_signal.hdf5"""'], {}), "('F:', 'LAPD_Mar2018', 'RAW', 'run83_interferometer_signal.hdf5')\n", (1284, 1349), False, 'import os, h5py\n'), ((1382, 1467), 'os.path.join', 'os.path.join', (['"""F:"""', '"""LAPD_Mar2018"""', '"""RAW"""', '"""run83_interferometer_reference.hdf5"""'], {}), "('F:', 'LAPD_Mar2018', 'RAW', 'run83_interferometer_reference.hdf5'\n )\n", (1394, 1467), False, 'import os, h5py\n'), ((1496, 1567), 'os.path.join', 'os.path.join', (['"""F:"""', '"""LAPD_Mar2018"""', '"""FULL"""', '"""run83_interferometer.hdf5"""'], {}), "('F:', 'LAPD_Mar2018', 'FULL', 'run83_interferometer.hdf5')\n", (1508, 1567), False, 'import os, h5py\n'), ((1583, 1607), 'h5py.File', 'h5py.File', (['sig_file.file'], {}), '(sig_file.file)\n', (1592, 1607), False, 'import os, h5py\n'), ((1726, 1750), 'h5py.File', 'h5py.File', (['ref_file.file'], {}), '(ref_file.file)\n', (1735, 1750), False, 'import os, h5py\n')] |
"""Module for Bresenham kernel"""
import numpy as np
from copa_map.util.occ_grid import OccGrid
import cv2
class KernelGrid(OccGrid):
"""Class for creating an occupation map with widened walls"""
def __init__(self, base_occ_map: OccGrid, digitize_size=0.2, num_of_borders=2):
"""
Constructor
Args:
base_occ_map: Occupancy grid map to use as basis of the kernel. The Kernel grid will have the same
dimension and origin as the map
digitize_size: Discretization size for grid bins
num_of_borders: Number of cells around occupied cells, from which covariance factor increases linearly
from 0 to 1
"""
# We do not need the full map resolution, so we resize the image based on the given parameter
assert digitize_size >= base_occ_map.resolution,\
"Kernel map discretization should be larger than Occupancy grid map resolution"
# Rescale the occupancy map
new_img_size = (np.array(base_occ_map.img.shape) * base_occ_map.resolution / digitize_size).astype(int)
new_img = cv2.resize(base_occ_map.img, dsize=(new_img_size[1], new_img_size[0]),
interpolation=cv2.INTER_NEAREST_EXACT)
super(KernelGrid, self).__init__(img=new_img,
width=base_occ_map.width,
height=base_occ_map.height,
resolution=digitize_size,
origin=base_occ_map.orig,
rotation=base_occ_map.rotation,
)
self.digitize_size = digitize_size
self.num_of_borders = num_of_borders
self._create_map()
def _create_map(self):
"""
Creates a grid array characterizing walls and cells near walls
Reads the map and creates cells with the defined digitize_size, where walls are classified with 0
and free cells with 1. The values of surrounding cells increase linearly to 1 depending on the
number of neighboring cells num_of_borders
"""
# Create kernel for dilation. Every pixels 8-neighbors should be extended
kernel = np.ones((3, 3), np.uint8)
# Get factor between extension border which determines the occupancy
# Interpolates linearly so that every border increases occupancy by same amount
increment = 1 / (self.num_of_borders + 1)
adj_img = dil_img = self.img
# Extend the wall pixels by dilating the image, then multiplying with the respective factor for occupancy
# reduction
for i in np.arange(0, 1, increment):
if i == 0:
continue
# Dilate the image from last iteration by one more border
# Our map has zeros where we want to extend, so we need to use the inverse
dil_img = cv2.dilate(~dil_img, kernel)
dil_img = ~dil_img
# Change the pixels of the new border, where the old image was still white (255) and the new
# is now black (0)
adj_img[np.logical_and(dil_img == 0, adj_img == 255)] = i * 255
self.img = adj_img
self.map = np.flipud(adj_img.astype(float) / 255)
| [
"numpy.ones",
"numpy.logical_and",
"cv2.dilate",
"numpy.array",
"cv2.resize",
"numpy.arange"
] | [((1139, 1252), 'cv2.resize', 'cv2.resize', (['base_occ_map.img'], {'dsize': '(new_img_size[1], new_img_size[0])', 'interpolation': 'cv2.INTER_NEAREST_EXACT'}), '(base_occ_map.img, dsize=(new_img_size[1], new_img_size[0]),\n interpolation=cv2.INTER_NEAREST_EXACT)\n', (1149, 1252), False, 'import cv2\n'), ((2317, 2342), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (2324, 2342), True, 'import numpy as np\n'), ((2746, 2772), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'increment'], {}), '(0, 1, increment)\n', (2755, 2772), True, 'import numpy as np\n'), ((3001, 3029), 'cv2.dilate', 'cv2.dilate', (['(~dil_img)', 'kernel'], {}), '(~dil_img, kernel)\n', (3011, 3029), False, 'import cv2\n'), ((3217, 3261), 'numpy.logical_and', 'np.logical_and', (['(dil_img == 0)', '(adj_img == 255)'], {}), '(dil_img == 0, adj_img == 255)\n', (3231, 3261), True, 'import numpy as np\n'), ((1033, 1065), 'numpy.array', 'np.array', (['base_occ_map.img.shape'], {}), '(base_occ_map.img.shape)\n', (1041, 1065), True, 'import numpy as np\n')] |
"""Test file to visualize detected trail lines from videos"""
# Usage --> python Trackviz.py 3
# 0 --> Amtala
# 1 --> Bamoner
# 2 --> Diamond
# 3 --> Fotepore
# 4 --> Gangasagar
import cv2
import json
import math
import time
import sys
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from sklearn.cluster import KMeans
from os import listdir
from os.path import isfile, join
from numba import jit, float32, cuda
print("This script plots your data points on its respective image")
print("and additionally outputs a histogram with plot of frequency")
print("of vehicle object.")
style.use("ggplot")
plt.title("Tracking distances")
plt.xlabel("Plot Number")
plt.ylabel("Plot points")
plt.xlim(0, 1)
plt.ylim(1, 0)
# plt.gca().invert_yaxis()
inputdir = "./out_traildetection_alt"
outputdir = "./out_trackviz"
# matplotlib axis/bg settings
images = np.asarray(["../images/Sample_Amtala.jpg",
"../images/Sample_Bamoner.jpg",
"../images/Sample_Diamond.jpg",
"../images/Sample_Fotepore.jpg",
"../images/Sample_Gangasagar.jpg"])
json_files_track = np.asarray([inputdir + "/veh_A_c.json", inputdir + "/veh_B_c.json",
inputdir + "/veh_D_c.json", inputdir + "/veh_F_c.json", inputdir + "/veh_G_c.json"])
json_files_frames = np.asarray([inputdir + "/veh_A.json", inputdir + "/veh_B.json",
inputdir + "/veh_D.json", inputdir + "/veh_F.json", inputdir + "/veh_G.json"])
# Modify
targetindex = 2
# Primary variables
image_to_open = images[int(sys.argv[1])]
file_to_open = json_files_track[int(sys.argv[1])]
# ----------------------------------------------
img = cv2.imread(image_to_open)
bins = np.fromiter((i*10 for i in range(100)), dtype="float32")
# Setup sub-plot
fig, ax = plt.subplots()
plt.imshow(img, extent=[0, 1, 1, 0])
FRAME_COUNTERS = np.zeros((0, 1), dtype=np.float)
with open(file_to_open, "r") as f:
data = json.load(f)
for tracked_vehicle in data:
# Stores "list of co-ordinates" from json file
COORD_LIST = np.zeros((0, 2), dtype=np.float)
FRAME_COUNTERS = np.append(
FRAME_COUNTERS, [[tracked_vehicle["frame_count"]]], axis=0)
for coordinates in tracked_vehicle["objects"]:
COORD_LIST = np.append(COORD_LIST,
[[coordinates["center_x"],
coordinates["center_y"]]], axis=0)
# print(FRAME_COUNTERS)
ax.scatter(COORD_LIST[:, 0:1], COORD_LIST[:, 1:2])
# plt.scatter(COORD_LIST[:,0:1], COORD_LIST[:,1:2])
# plt.savefig(join("./output", "output.png"))
plt.savefig(join(outputdir, "output.png"))
plt.show()
plt.clf()
plt.hist(FRAME_COUNTERS, bins, histtype="bar", rwidth=0.75)
plt.savefig(join(outputdir, "track_length.png"))
# print(COORD_LIST[:,0:1])
# secarr = np.asarray(arr[0]["objects"][1])
# lookup = json.JSONDecoder().decode(secarr)
# print (lookup)
# for vehicle_object in data:
# print(vehicle_object)
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"numpy.asarray",
"os.path.join",
"numpy.append",
"numpy.zeros",
"matplotlib.style.use",
"json.load",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"... | [((633, 652), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (642, 652), False, 'from matplotlib import style\n'), ((653, 684), 'matplotlib.pyplot.title', 'plt.title', (['"""Tracking distances"""'], {}), "('Tracking distances')\n", (662, 684), True, 'import matplotlib.pyplot as plt\n'), ((685, 710), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Plot Number"""'], {}), "('Plot Number')\n", (695, 710), True, 'import matplotlib.pyplot as plt\n'), ((711, 736), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Plot points"""'], {}), "('Plot points')\n", (721, 736), True, 'import matplotlib.pyplot as plt\n'), ((737, 751), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (745, 751), True, 'import matplotlib.pyplot as plt\n'), ((752, 766), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1)', '(0)'], {}), '(1, 0)\n', (760, 766), True, 'import matplotlib.pyplot as plt\n'), ((902, 1085), 'numpy.asarray', 'np.asarray', (["['../images/Sample_Amtala.jpg', '../images/Sample_Bamoner.jpg',\n '../images/Sample_Diamond.jpg', '../images/Sample_Fotepore.jpg',\n '../images/Sample_Gangasagar.jpg']"], {}), "(['../images/Sample_Amtala.jpg', '../images/Sample_Bamoner.jpg',\n '../images/Sample_Diamond.jpg', '../images/Sample_Fotepore.jpg',\n '../images/Sample_Gangasagar.jpg'])\n", (912, 1085), True, 'import numpy as np\n'), ((1182, 1343), 'numpy.asarray', 'np.asarray', (["[inputdir + '/veh_A_c.json', inputdir + '/veh_B_c.json', inputdir +\n '/veh_D_c.json', inputdir + '/veh_F_c.json', inputdir + '/veh_G_c.json']"], {}), "([inputdir + '/veh_A_c.json', inputdir + '/veh_B_c.json', \n inputdir + '/veh_D_c.json', inputdir + '/veh_F_c.json', inputdir +\n '/veh_G_c.json'])\n", (1192, 1343), True, 'import numpy as np\n'), ((1387, 1533), 'numpy.asarray', 'np.asarray', (["[inputdir + '/veh_A.json', inputdir + '/veh_B.json', inputdir +\n '/veh_D.json', inputdir + '/veh_F.json', inputdir + '/veh_G.json']"], {}), "([inputdir + '/veh_A.json', inputdir + '/veh_B.json', inputdir +\n '/veh_D.json', inputdir + '/veh_F.json', inputdir + '/veh_G.json'])\n", (1397, 1533), True, 'import numpy as np\n'), ((1756, 1781), 'cv2.imread', 'cv2.imread', (['image_to_open'], {}), '(image_to_open)\n', (1766, 1781), False, 'import cv2\n'), ((1874, 1888), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1886, 1888), True, 'import matplotlib.pyplot as plt\n'), ((1889, 1925), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'extent': '[0, 1, 1, 0]'}), '(img, extent=[0, 1, 1, 0])\n', (1899, 1925), True, 'import matplotlib.pyplot as plt\n'), ((1944, 1976), 'numpy.zeros', 'np.zeros', (['(0, 1)'], {'dtype': 'np.float'}), '((0, 1), dtype=np.float)\n', (1952, 1976), True, 'import numpy as np\n'), ((2766, 2776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2774, 2776), True, 'import matplotlib.pyplot as plt\n'), ((2777, 2786), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2784, 2786), True, 'import matplotlib.pyplot as plt\n'), ((2787, 2846), 'matplotlib.pyplot.hist', 'plt.hist', (['FRAME_COUNTERS', 'bins'], {'histtype': '"""bar"""', 'rwidth': '(0.75)'}), "(FRAME_COUNTERS, bins, histtype='bar', rwidth=0.75)\n", (2795, 2846), True, 'import matplotlib.pyplot as plt\n'), ((2024, 2036), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2033, 2036), False, 'import json\n'), ((2734, 2763), 'os.path.join', 'join', (['outputdir', '"""output.png"""'], {}), "(outputdir, 'output.png')\n", (2738, 2763), False, 'from os.path import isfile, join\n'), ((2859, 2894), 'os.path.join', 'join', (['outputdir', '"""track_length.png"""'], {}), "(outputdir, 'track_length.png')\n", (2863, 2894), False, 'from os.path import isfile, join\n'), ((2147, 2179), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {'dtype': 'np.float'}), '((0, 2), dtype=np.float)\n', (2155, 2179), True, 'import numpy as np\n'), ((2205, 2274), 'numpy.append', 'np.append', (['FRAME_COUNTERS', "[[tracked_vehicle['frame_count']]]"], {'axis': '(0)'}), "(FRAME_COUNTERS, [[tracked_vehicle['frame_count']]], axis=0)\n", (2214, 2274), True, 'import numpy as np\n'), ((2368, 2455), 'numpy.append', 'np.append', (['COORD_LIST', "[[coordinates['center_x'], coordinates['center_y']]]"], {'axis': '(0)'}), "(COORD_LIST, [[coordinates['center_x'], coordinates['center_y']]],\n axis=0)\n", (2377, 2455), True, 'import numpy as np\n')] |
import numpy as np
from imread import ijrois
from . import file_path
def test_rois_smoke():
rois = ijrois.read_roi_zip(file_path('rois.zip'))
assert len(rois) == 4
r = ijrois.read_roi(open(file_path('0186-0099.roi'), 'rb'))
assert any([np.array_equal(ri, r) for ri in rois])
| [
"numpy.array_equal"
] | [((255, 276), 'numpy.array_equal', 'np.array_equal', (['ri', 'r'], {}), '(ri, r)\n', (269, 276), True, 'import numpy as np\n')] |
from pathlib import Path
from sklearn.model_selection import train_test_split
import numpy as np
from livelossplot.keras import PlotLossesCallback
import matplotlib.pyplot as plt
import seaborn as sns
from hyperas.distributions import uniform, choice
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D, Input
from keras.datasets import cifar10
from keras import regularizers
from keras.callbacks import LearningRateScheduler, Callback
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import seaborn as sns
import keras
## For reproducibility
from numpy.random import seed
seed(9251996)
def data():
train_values = pd.read_csv('train_values.csv', index_col='patient_id')
train_labels = pd.read_csv('train_labels.csv', index_col='patient_id')
#train_labels.heart_disease_present.value_counts().plot.bar(title='Number with Heart Disease')
#selected_features = ['age',
# 'sex',
# 'max_heart_rate_achieved',
# 'resting_blood_pressure']
selected_features =['slope_of_peak_exercise_st_segment',
'resting_blood_pressure',
'chest_pain_type',
'num_major_vessels',
'fasting_blood_sugar_gt_120_mg_per_dl',
'resting_ekg_results',
'serum_cholesterol_mg_per_dl',
'oldpeak_eq_st_depression',
'sex',
'age',
'max_heart_rate_achieved',
'exercise_induced_angina']
train_values_subset = train_values[selected_features]
predictors =train_values_subset
target = train_labels.heart_disease_present
X_train,X_val,Y_train,Y_val = train_test_split(predictors,target,test_size=0.10,random_state=0)
return X_train, Y_train,X_val,Y_val
def create_model(X_train, Y_train,X_val,Y_val):
inshape = 12
outshape = 1
min_hlayers=3
model = Sequential()
for i in range(min_hlayers):
if i==0:
model.add(Dense({{ choice(range(1024)) }},input_shape=(inshape,)))
model.add(Activation({{ choice(['relu','sigmoid']) }})) ## Choose between relu or signmoid activation
model.add(Dropout({{ uniform(0,1) }})) ## Choose dropout value using uniform distribution of values from 0 to 1
else:
model.add(Dense({{ choice(range(1024)) }}))
model.add(Activation({{ choice(['relu','sigmoid']) }}))
model.add(Dropout({{ uniform(0,1) }}))
model.add(Dense(outshape))
model.add(Activation({{choice(['relu','sigmoid']) }}))
## Hyperparameterization of optimizers and learning rate
_adam = keras.optimizers.Adam(lr={{choice([10**-3, 10**-2, 10**-1])}})
_rmsprop = keras.optimizers.RMSprop(lr={{choice([10**-3, 10**-2, 10**-1])}})
_sgd = keras.optimizers.SGD(lr={{choice([10**-3, 10**-2, 10**-1])}})
opt_choiceval = {{ choice( ['_adam', '_rmsprop', '_sgd'] ) }}
if opt_choiceval == '_adam':
optim = _adam
elif opt_choiceval == '_rmsprop':
optim = _rmsprop
else:
optim = _sgd
model.summary()
model.compile(loss='binary_crossentropy', metrics=['accuracy'],optimizer=optim)
model.fit(X_train, Y_train,
batch_size=256,
epochs=125,
verbose=2,
validation_data=(X_val, Y_val))
score, acc = model.evaluate(X_val, Y_val)
predicted = model.predict(X_val)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def main():
trials =Trials()
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
max_evals=10,
trials=trials)
X_train, Y_train, X_val, Y_val = data()
print("\n >> Hyperparameters ")
for t in best_run.items():
print("[**] ",t[0],": ", t[1])
print("\nSaving model...")
model_json = best_model.to_json()
with open("model_num.json","w") as json_file:
json_file.write(model_json)
best_model.save_weights("model_num.h5")
selected_features =['slope_of_peak_exercise_st_segment',
'resting_blood_pressure',
'chest_pain_type',
'num_major_vessels',
'fasting_blood_sugar_gt_120_mg_per_dl',
'resting_ekg_results',
'serum_cholesterol_mg_per_dl',
'oldpeak_eq_st_depression',
'sex',
'age',
'max_heart_rate_achieved',
'exercise_induced_angina']
test_values = pd.read_csv('test_values.csv', index_col='patient_id')
X_test = test_values[selected_features]
# name = sys.argv[1]
json_file = open("model_num.json" , 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model_num.h5")
print("Loaded model...")
# evaluate loaded model on test data
predictions=loaded_model.predict(X_test, batch_size=128)
submission_format = pd.read_csv('submission_format.csv', index_col='patient_id')
my_submission = pd.DataFrame(data=predictions,
columns=submission_format.columns,
index=submission_format.index)
my_submission.head()
my_submission.to_csv('submission5.csv')
if __name__ == "__main__":
main()
print("done..")
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"hyperas.optim.minimize",
"keras.models.Sequential",
"keras.layers.Dense",
"hyperas.distributions.uniform",
"numpy.random.seed",
"hyperas.distributions.choice",
"pandas.DataFrame",
"hyperopt.Trials"
] | [((962, 975), 'numpy.random.seed', 'seed', (['(9251996)'], {}), '(9251996)\n', (966, 975), False, 'from numpy.random import seed\n'), ((1011, 1066), 'pandas.read_csv', 'pd.read_csv', (['"""train_values.csv"""'], {'index_col': '"""patient_id"""'}), "('train_values.csv', index_col='patient_id')\n", (1022, 1066), True, 'import pandas as pd\n'), ((1087, 1142), 'pandas.read_csv', 'pd.read_csv', (['"""train_labels.csv"""'], {'index_col': '"""patient_id"""'}), "('train_labels.csv', index_col='patient_id')\n", (1098, 1142), True, 'import pandas as pd\n'), ((1974, 2041), 'sklearn.model_selection.train_test_split', 'train_test_split', (['predictors', 'target'], {'test_size': '(0.1)', 'random_state': '(0)'}), '(predictors, target, test_size=0.1, random_state=0)\n', (1990, 2041), False, 'from sklearn.model_selection import train_test_split\n'), ((2195, 2207), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2205, 2207), False, 'from keras.models import Sequential\n'), ((3703, 3711), 'hyperopt.Trials', 'Trials', ([], {}), '()\n', (3709, 3711), False, 'from hyperopt import Trials, STATUS_OK, tpe\n'), ((3737, 3834), 'hyperas.optim.minimize', 'optim.minimize', ([], {'model': 'create_model', 'data': 'data', 'algo': 'tpe.suggest', 'max_evals': '(10)', 'trials': 'trials'}), '(model=create_model, data=data, algo=tpe.suggest, max_evals=\n 10, trials=trials)\n', (3751, 3834), False, 'from hyperas import optim\n'), ((4563, 4617), 'pandas.read_csv', 'pd.read_csv', (['"""test_values.csv"""'], {'index_col': '"""patient_id"""'}), "('test_values.csv', index_col='patient_id')\n", (4574, 4617), True, 'import pandas as pd\n'), ((5064, 5124), 'pandas.read_csv', 'pd.read_csv', (['"""submission_format.csv"""'], {'index_col': '"""patient_id"""'}), "('submission_format.csv', index_col='patient_id')\n", (5075, 5124), True, 'import pandas as pd\n'), ((5143, 5244), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'predictions', 'columns': 'submission_format.columns', 'index': 'submission_format.index'}), '(data=predictions, columns=submission_format.columns, index=\n submission_format.index)\n', (5155, 5244), True, 'import pandas as pd\n'), ((2718, 2733), 'keras.layers.Dense', 'Dense', (['outshape'], {}), '(outshape)\n', (2723, 2733), False, 'from keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D, Input\n'), ((3099, 3136), 'hyperas.distributions.choice', 'choice', (["['_adam', '_rmsprop', '_sgd']"], {}), "(['_adam', '_rmsprop', '_sgd'])\n", (3105, 3136), False, 'from hyperas.distributions import uniform, choice\n'), ((2760, 2787), 'hyperas.distributions.choice', 'choice', (["['relu', 'sigmoid']"], {}), "(['relu', 'sigmoid'])\n", (2766, 2787), False, 'from hyperas.distributions import uniform, choice\n'), ((2890, 2928), 'hyperas.distributions.choice', 'choice', (['[10 ** -3, 10 ** -2, 10 ** -1]'], {}), '([10 ** -3, 10 ** -2, 10 ** -1])\n', (2896, 2928), False, 'from hyperas.distributions import uniform, choice\n'), ((2969, 3007), 'hyperas.distributions.choice', 'choice', (['[10 ** -3, 10 ** -2, 10 ** -1]'], {}), '([10 ** -3, 10 ** -2, 10 ** -1])\n', (2975, 3007), False, 'from hyperas.distributions import uniform, choice\n'), ((3040, 3078), 'hyperas.distributions.choice', 'choice', (['[10 ** -3, 10 ** -2, 10 ** -1]'], {}), '([10 ** -3, 10 ** -2, 10 ** -1])\n', (3046, 3078), False, 'from hyperas.distributions import uniform, choice\n'), ((2350, 2377), 'hyperas.distributions.choice', 'choice', (["['relu', 'sigmoid']"], {}), "(['relu', 'sigmoid'])\n", (2356, 2377), False, 'from hyperas.distributions import uniform, choice\n'), ((2453, 2466), 'hyperas.distributions.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2460, 2466), False, 'from hyperas.distributions import uniform, choice\n'), ((2629, 2656), 'hyperas.distributions.choice', 'choice', (["['relu', 'sigmoid']"], {}), "(['relu', 'sigmoid'])\n", (2635, 2656), False, 'from hyperas.distributions import uniform, choice\n'), ((2686, 2699), 'hyperas.distributions.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2693, 2699), False, 'from hyperas.distributions import uniform, choice\n')] |
#!/usr/bin/python
import numpy as np
from mnist import load_mnist
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (3,3)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
plt.ion()
def binaryze_dataset(data, threshold=0.5):
new_data = np.zeros(np.shape(data))
new_data[data > threshold] = 1
return new_data
# Given a dataset with samples and features, it chooses some features for each
# sample from following a Bernoulli distribution and inverts their values
def add_salt_and_pepper(data, proportion):
num_samples = np.shape(data)[0]
original_shape = np.shape(data[0])
new_data = np.reshape(data, (num_samples,-1))
num_features = np.shape(new_data)[1]
salt_and_pepper = np.random.binomial(1, proportion,
size=(num_samples,num_features))
new_data = (1-new_data)*salt_and_pepper + new_data*(1-salt_and_pepper)
return np.reshape(new_data, (num_samples, original_shape[0],
original_shape[1]))
# load data
ret = load_mnist(path='../datasets/mnist/downloads/', digits=[0,1,2])
X = ret[0]
Y = ret[1]
# show one example
fig = plt.figure('mnist_example')
for i in range(3):
plt.subplot(1,3,i)
sample = X[i]
plt.imshow(sample)
plt.show()
sap_X = add_salt_and_pepper(X, 0.25)
fig = plt.figure('mnist_salt_and_pepper')
for i in range(3):
plt.subplot(1,3,i)
sample = sap_X[i]
plt.imshow(sample)
plt.show()
bin_X = binaryze_dataset(X,0.5)
fig = plt.figure('mnist_binarized')
for i in range(3):
plt.subplot(1,3,i)
sample = bin_X[i]
plt.imshow(sample)
plt.show()
sap_bin_X = add_salt_and_pepper(bin_X, 0.25)
fig = plt.figure('binarized_salt_and_pepper')
for i in range(3):
plt.subplot(1,3,i)
sample = sap_bin_X[i]
plt.imshow(sample)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.reshape",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ion",
"mnist.load_mnist",
"numpy.shape",
"matplotlib.pyplot.subplot",
"numpy.random.binomial",
"matplotlib.pyplot.show"
] | [((221, 230), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (228, 230), True, 'import matplotlib.pyplot as plt\n'), ((1053, 1118), 'mnist.load_mnist', 'load_mnist', ([], {'path': '"""../datasets/mnist/downloads/"""', 'digits': '[0, 1, 2]'}), "(path='../datasets/mnist/downloads/', digits=[0, 1, 2])\n", (1063, 1118), False, 'from mnist import load_mnist\n'), ((1166, 1193), 'matplotlib.pyplot.figure', 'plt.figure', (['"""mnist_example"""'], {}), "('mnist_example')\n", (1176, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1371), 'matplotlib.pyplot.figure', 'plt.figure', (['"""mnist_salt_and_pepper"""'], {}), "('mnist_salt_and_pepper')\n", (1346, 1371), True, 'import matplotlib.pyplot as plt\n'), ((1513, 1542), 'matplotlib.pyplot.figure', 'plt.figure', (['"""mnist_binarized"""'], {}), "('mnist_binarized')\n", (1523, 1542), True, 'import matplotlib.pyplot as plt\n'), ((1697, 1736), 'matplotlib.pyplot.figure', 'plt.figure', (['"""binarized_salt_and_pepper"""'], {}), "('binarized_salt_and_pepper')\n", (1707, 1736), True, 'import matplotlib.pyplot as plt\n'), ((624, 641), 'numpy.shape', 'np.shape', (['data[0]'], {}), '(data[0])\n', (632, 641), True, 'import numpy as np\n'), ((658, 693), 'numpy.reshape', 'np.reshape', (['data', '(num_samples, -1)'], {}), '(data, (num_samples, -1))\n', (668, 693), True, 'import numpy as np\n'), ((757, 824), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'proportion'], {'size': '(num_samples, num_features)'}), '(1, proportion, size=(num_samples, num_features))\n', (775, 824), True, 'import numpy as np\n'), ((952, 1025), 'numpy.reshape', 'np.reshape', (['new_data', '(num_samples, original_shape[0], original_shape[1])'], {}), '(new_data, (num_samples, original_shape[0], original_shape[1]))\n', (962, 1025), True, 'import numpy as np\n'), ((1217, 1237), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'i'], {}), '(1, 3, i)\n', (1228, 1237), True, 'import matplotlib.pyplot as plt\n'), ((1258, 1276), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample'], {}), '(sample)\n', (1268, 1276), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1291), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1289, 1291), True, 'import matplotlib.pyplot as plt\n'), ((1395, 1415), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'i'], {}), '(1, 3, i)\n', (1406, 1415), True, 'import matplotlib.pyplot as plt\n'), ((1440, 1458), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample'], {}), '(sample)\n', (1450, 1458), True, 'import matplotlib.pyplot as plt\n'), ((1463, 1473), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1471, 1473), True, 'import matplotlib.pyplot as plt\n'), ((1566, 1586), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'i'], {}), '(1, 3, i)\n', (1577, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1629), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample'], {}), '(sample)\n', (1621, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1644), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1642, 1644), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1780), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'i'], {}), '(1, 3, i)\n', (1771, 1780), True, 'import matplotlib.pyplot as plt\n'), ((1809, 1827), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample'], {}), '(sample)\n', (1819, 1827), True, 'import matplotlib.pyplot as plt\n'), ((1832, 1842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1840, 1842), True, 'import matplotlib.pyplot as plt\n'), ((299, 313), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (307, 313), True, 'import numpy as np\n'), ((585, 599), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (593, 599), True, 'import numpy as np\n'), ((712, 730), 'numpy.shape', 'np.shape', (['new_data'], {}), '(new_data)\n', (720, 730), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
def funcion(x,y=2):
return x + 2 + y
print('El resultado de la funcion basica es: ', funcion(2))
def funcion_basic(x):
return np.sin(x)
print('El resultado operacion de la funcion lamda es: ', funcion_basic(np.pi/2))
f = lambda x,y=3 : x + 2
print('El resultado de la funcion lambda ', f(2))
| [
"numpy.sin"
] | [((241, 250), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (247, 250), True, 'import numpy as np\n')] |
# std libs
import warnings
# third-party libs
import numpy as np
from numpy.lib.stride_tricks import as_strided
def _checks(wsize, overlap, n, axis):
# checks
if n < wsize < 0:
raise ValueError(f'Window size ({wsize}) should be greater than 0 and '
f'smaller than array size ({n}) along axis {axis}')
if wsize <= overlap < 0:
raise ValueError(f'Overlap ({overlap}) should be greater equal 0 and '
f'smaller than window size ({wsize})')
# FIXME: does not always pad out to the correct length!
def fold(a, wsize, overlap=0, axis=0, pad='masked', **kws):
"""
Fold (window) an array along a given `axis` at given `size`, with successive
windows overlapping each previous by `overlap` elements. This method
works on masked arrays as well and will fold the mask identically to the
data. By default the array is padded out with masked elements so that the
step size evenly divides the array along the given axis.
Parameters
----------
a
wsize
overlap
axis
pad
kws
Keywords are passed to `np.pad` which pads up the array to the required
length.
Returns
-------
Notes
-----
When overlap is nonzero, the array returned by this function will have
multiple entries **with the same memory location**. Beware of this when
doing inplace arithmetic operations on the returned array.
eg.:
>>> n, size, overlap = 100, 10, 5
>>> q = fold(np.arange(n), size, overlap)
>>> k = 0
>>> q[0, overlap + k] *= 10
>>> q[1, k] == q[0, overlap + k]
True
"""
a = np.asanyarray(a)
shape = a.shape
n = shape[axis]
_checks(wsize, overlap, n, axis)
# short circuits
if (n == wsize) and (overlap == 0):
return a.reshape(np.insert(shape, axis, 1))
if n < wsize:
warnings.warn(
f'Window size larger than array size along dimension {axis}')
return a.reshape(np.insert(shape, axis, 1))
# pad out
if pad:
a, n_seg = padder(a, wsize, overlap, axis, **kws)
#
sa = get_strided_array(a, wsize, overlap, axis)
# deal with masked data
if np.ma.isMA(a):
mask = a.mask
if mask is not False:
mask = get_strided_array(mask, wsize, overlap, axis)
sa = np.ma.array(sa.data, mask=mask)
return sa
def is_null(x):
return (x is None) or (x is False)
def padder(a, wsize, overlap=0, axis=0, pad_mode='masked', **kws):
""" """
a = np.asanyarray(a) # convert to (un-masked) array
n = a.shape[axis]
# checks
_checks(wsize, overlap, n, axis)
#
mask = a.mask if np.ma.is_masked(a) else None
step = wsize - overlap
n_seg, leftover = divmod(n, step) #
if step == 1:
leftover = wsize - 1
if leftover:
# default is to mask the "out of array" values
# pad_mode = kws.pop('pad', 'mask')
if (pad_mode == 'masked') and is_null(mask):
mask = np.zeros(a.shape, bool)
# pad the array at the end with `pad_end` number of values
pad_end = wsize - leftover
pad_width = np.zeros((a.ndim, 2), int) # initialise pad width indicator
pad_width[axis, -1] = pad_end
pad_width = list(map(tuple, pad_width)) # map to list of tuples
# pad (apodise) the input signal (and mask)
if pad_mode == 'masked':
a = np.pad(a, pad_width, 'constant', constant_values=0)
mask = np.pad(mask, pad_width, 'constant', constant_values=True)
else:
a = np.pad(a, pad_width, pad_mode, **kws)
if not is_null(mask):
mask = np.pad(mask, pad_width, pad_mode, **kws)
# convert back to masked array
if not is_null(mask):
a = np.ma.array(a, mask=mask)
return a, int(n_seg)
def get_strided_array(a, size, overlap, axis=0):
"""
Fold array `a` along axis `axis` with window size of `size`, each
consecutive segment overlapping previous by `overlap` elements.
Use array strides (byte-steps) for memory efficiency. The new axis is
inserted in the position before `axis`.
Parameters
----------
a
size
overlap
axis
Returns
-------
"""
if axis < 0:
axis += a.ndim
step = size - overlap
# if padded:
# note line below relies on the array already being padded out
n_segs = (a.shape[axis] - overlap) // step # number of segments
new_shape = np.insert(a.shape, axis + 1, size)
new_shape[axis] = n_segs
# new shape is (..., n_seg, size, ...)
# byte steps
new_strides = np.insert(a.strides, axis, step * a.strides[axis])
return as_strided(a, new_shape, new_strides)
def gen(a, size, overlap=0, axis=0, **kw):
"""
Generator version of fold.
"""
a, n_seg = padder(a, size, overlap, **kw)
step = size - overlap
i = 0
while i < n_seg:
start = i * step
stop = start + size
ix = [slice(None)] * a.ndim
ix[axis] = slice(start, stop)
yield a[ix]
i += 1
def rebin(x, binsize, t=None, e=None):
"""
Rebin time series data. Assumes data are evenly sampled in time (constant
time steps).
"""
xrb = fold(x, binsize).mean(1)
returns = (xrb,)
if t is not None:
trb = np.median(fold(t, binsize), 1)
returns += (trb,)
if e is not None:
erb = np.sqrt(np.square(fold(e, binsize)).mean(1))
returns += (erb,)
if len(returns) == 1:
return returns[0]
return returns
def get_nocc(n, wsize, overlap):
"""
Return an array of length N, with elements representing the number of
times that the index corresponding to that element would be repeated in
the strided array.
"""
from recipes.lists import tally, cosort
indices = fold(np.arange(n), wsize, overlap).ravel()
if np.ma.is_masked(indices):
indices = indices[~indices.mask]
_, noc = cosort(*zip(*tally(indices).items()))
return noc
| [
"numpy.insert",
"numpy.ma.is_masked",
"numpy.ma.isMA",
"numpy.ma.array",
"recipes.lists.tally",
"numpy.lib.stride_tricks.as_strided",
"numpy.asanyarray",
"numpy.zeros",
"warnings.warn",
"numpy.pad",
"numpy.arange"
] | [((1658, 1674), 'numpy.asanyarray', 'np.asanyarray', (['a'], {}), '(a)\n', (1671, 1674), True, 'import numpy as np\n'), ((2214, 2227), 'numpy.ma.isMA', 'np.ma.isMA', (['a'], {}), '(a)\n', (2224, 2227), True, 'import numpy as np\n'), ((2552, 2568), 'numpy.asanyarray', 'np.asanyarray', (['a'], {}), '(a)\n', (2565, 2568), True, 'import numpy as np\n'), ((4530, 4564), 'numpy.insert', 'np.insert', (['a.shape', '(axis + 1)', 'size'], {}), '(a.shape, axis + 1, size)\n', (4539, 4564), True, 'import numpy as np\n'), ((4673, 4723), 'numpy.insert', 'np.insert', (['a.strides', 'axis', '(step * a.strides[axis])'], {}), '(a.strides, axis, step * a.strides[axis])\n', (4682, 4723), True, 'import numpy as np\n'), ((4735, 4772), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['a', 'new_shape', 'new_strides'], {}), '(a, new_shape, new_strides)\n', (4745, 4772), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((5946, 5970), 'numpy.ma.is_masked', 'np.ma.is_masked', (['indices'], {}), '(indices)\n', (5961, 5970), True, 'import numpy as np\n'), ((1894, 1969), 'warnings.warn', 'warnings.warn', (['f"""Window size larger than array size along dimension {axis}"""'], {}), "(f'Window size larger than array size along dimension {axis}')\n", (1907, 1969), False, 'import warnings\n'), ((2359, 2390), 'numpy.ma.array', 'np.ma.array', (['sa.data'], {'mask': 'mask'}), '(sa.data, mask=mask)\n', (2370, 2390), True, 'import numpy as np\n'), ((2702, 2720), 'numpy.ma.is_masked', 'np.ma.is_masked', (['a'], {}), '(a)\n', (2717, 2720), True, 'import numpy as np\n'), ((3182, 3208), 'numpy.zeros', 'np.zeros', (['(a.ndim, 2)', 'int'], {}), '((a.ndim, 2), int)\n', (3190, 3208), True, 'import numpy as np\n'), ((3825, 3850), 'numpy.ma.array', 'np.ma.array', (['a'], {'mask': 'mask'}), '(a, mask=mask)\n', (3836, 3850), True, 'import numpy as np\n'), ((1840, 1865), 'numpy.insert', 'np.insert', (['shape', 'axis', '(1)'], {}), '(shape, axis, 1)\n', (1849, 1865), True, 'import numpy as np\n'), ((2008, 2033), 'numpy.insert', 'np.insert', (['shape', 'axis', '(1)'], {}), '(shape, axis, 1)\n', (2017, 2033), True, 'import numpy as np\n'), ((3035, 3058), 'numpy.zeros', 'np.zeros', (['a.shape', 'bool'], {}), '(a.shape, bool)\n', (3043, 3058), True, 'import numpy as np\n'), ((3456, 3507), 'numpy.pad', 'np.pad', (['a', 'pad_width', '"""constant"""'], {'constant_values': '(0)'}), "(a, pad_width, 'constant', constant_values=0)\n", (3462, 3507), True, 'import numpy as np\n'), ((3527, 3584), 'numpy.pad', 'np.pad', (['mask', 'pad_width', '"""constant"""'], {'constant_values': '(True)'}), "(mask, pad_width, 'constant', constant_values=True)\n", (3533, 3584), True, 'import numpy as np\n'), ((3615, 3652), 'numpy.pad', 'np.pad', (['a', 'pad_width', 'pad_mode'], {}), '(a, pad_width, pad_mode, **kws)\n', (3621, 3652), True, 'import numpy as np\n'), ((3710, 3750), 'numpy.pad', 'np.pad', (['mask', 'pad_width', 'pad_mode'], {}), '(mask, pad_width, pad_mode, **kws)\n', (3716, 3750), True, 'import numpy as np\n'), ((5901, 5913), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5910, 5913), True, 'import numpy as np\n'), ((6040, 6054), 'recipes.lists.tally', 'tally', (['indices'], {}), '(indices)\n', (6045, 6054), False, 'from recipes.lists import tally, cosort\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 19 14:58:38 2021
@author: TD
"""
import numpy as np
class GLM:
def __init__(self, basis_funcs=([lambda x: np.ones_like(x), lambda x:x],)):
"""
Parameters
----------
basis_funcs : list or tuple
List of lists of functions, where each list in the list is
the basis functions corresponding to a single dimension.
For example, if passed
([lambda x: np.ones_like(x), lambda x: x], [lambda x: x])
this is equivalent to the basis {1, x, y} for the equation
z = Ax + By + C; an equation for a plane.
The default is the basis for an equation of a line
in 1-dimension ([lambda x: 1, lambda x:x],).
"""
self.basis_funcs = basis_funcs
def fit(self, X, y, sample_weights=None):
A = GLM.create_design_matrix(self.basis_funcs, X)
W = GLM.get_sample_weight_matrix(sample_weights, y)
B = GLM.compute_b_matrix(A, W)
self.beta = np.dot(B, y)
### for diagnotics and stats
# trace(W.M)
self.dof = np.trace(
np.dot(
W,
GLM.compute_m_matrix(
GLM.compute_projection_matrix(A, B)
)
)
)
self.sigma_sqrd = GLM.compute_sigma_sqrd(
y - GLM._func_glm(A, self.beta),
W,
self.dof
)
self.var_beta = GLM.compute_var_beta(self.sigma_sqrd, B)
return self
def predict(self, X):
### TODO address trade off here:
# must recompute design matrix each predict call
# not saving design matrix during fit reduces memory footprint
return GLM.func_glm(self.basis_funcs, self.beta, X)
@staticmethod
def create_design_matrix(basis_funcs, X):
A = np.concatenate(
[
np.array(
[f(X[:, i]) for f in bf_coord]
).T
for i, bf_coord in enumerate(basis_funcs)
],
axis=1
)
return A
@staticmethod
def _func_glm(A, beta):
return np.dot(A, beta)
@staticmethod
def func_glm(basis_funcs, beta, X):
A = GLM.create_design_matrix(basis_funcs, X)
return GLM._func_glm(A, beta)
@staticmethod
def jac_glm(basis_funcs, X):
return GLM.create_design_matrix(basis_funcs, X)
@staticmethod
def jac_objective(basis_funcs, beta, X, y, sample_weights=None):
A = GLM.create_design_matrix(basis_funcs, X)
e = y - np.dot(A, beta) # faster not to call func_glm
W = GLM.get_sample_weight_matrix(sample_weights, y)
return 2 * np.dot(np.dot(e.T, W), A)
@staticmethod
def get_sample_weight_matrix(sample_weights, y):
if sample_weights is None:
sample_weights = np.identity(y.shape[0])
if not isinstance(sample_weights, np.ndarray):
sample_weights = np.array(sample_weights)
W = sample_weights
if len(W.shape) < 2:
W = np.diag(W)
if len(W.shape) != 2:
raise ValueError(
f'{W.shape} -- weights matrix shape. 2-d matrix required!'
)
if (W.shape[0] != W.shape[1]):
raise ValueError(
f'{W.shape} -- weights matrix shape. Matrix is not square!'
)
if (W.shape[0] != len(y)):
raise ValueError(
f'{W.shape} -- weights matrix shape.\n'
f'{len(y)} -- number of samples.\n'
'Weight matrix should have shape nsamples x nsamples!'
)
return W
@staticmethod
def compute_b_matrix(A, W):
"""
beta = B.y
"""
ATW = np.dot(A.T, W)
V = np.dot(ATW, A)
if np.linalg.det(V) == 0:
raise ValueError(
'NO SOLUTION: det(A^T.W.A)=0\n'
'Check design matrix or sample weight matrix!'
)
B = np.dot(np.linalg.inv(V), ATW)
del V, ATW
return B
@staticmethod
def compute_projection_matrix(A, B):
"""
projection matrix is idempotent P^2 = P
y_fit = P.y
"""
return np.dot(A, B)
@staticmethod
def compute_m_matrix(PA):
"""
residual operator matrix
e = M.y
"""
return np.identity(PA.shape[0]) - PA
@staticmethod
def compute_sigma_sqrd(e, W, dof):
return np.dot(np.dot(e.T, W), e) / dof
@staticmethod
def compute_var_beta(sigma_sqrd, B):
return sigma_sqrd * np.dot(B, B.T) | [
"numpy.identity",
"numpy.ones_like",
"numpy.linalg.det",
"numpy.diag",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv"
] | [((1104, 1116), 'numpy.dot', 'np.dot', (['B', 'y'], {}), '(B, y)\n', (1110, 1116), True, 'import numpy as np\n'), ((2284, 2299), 'numpy.dot', 'np.dot', (['A', 'beta'], {}), '(A, beta)\n', (2290, 2299), True, 'import numpy as np\n'), ((3985, 3999), 'numpy.dot', 'np.dot', (['A.T', 'W'], {}), '(A.T, W)\n', (3991, 3999), True, 'import numpy as np\n'), ((4021, 4035), 'numpy.dot', 'np.dot', (['ATW', 'A'], {}), '(ATW, A)\n', (4027, 4035), True, 'import numpy as np\n'), ((4488, 4500), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (4494, 4500), True, 'import numpy as np\n'), ((2742, 2757), 'numpy.dot', 'np.dot', (['A', 'beta'], {}), '(A, beta)\n', (2748, 2757), True, 'import numpy as np\n'), ((3038, 3061), 'numpy.identity', 'np.identity', (['y.shape[0]'], {}), '(y.shape[0])\n', (3049, 3061), True, 'import numpy as np\n'), ((3155, 3179), 'numpy.array', 'np.array', (['sample_weights'], {}), '(sample_weights)\n', (3163, 3179), True, 'import numpy as np\n'), ((3261, 3271), 'numpy.diag', 'np.diag', (['W'], {}), '(W)\n', (3268, 3271), True, 'import numpy as np\n'), ((4047, 4063), 'numpy.linalg.det', 'np.linalg.det', (['V'], {}), '(V)\n', (4060, 4063), True, 'import numpy as np\n'), ((4244, 4260), 'numpy.linalg.inv', 'np.linalg.inv', (['V'], {}), '(V)\n', (4257, 4260), True, 'import numpy as np\n'), ((4656, 4680), 'numpy.identity', 'np.identity', (['PA.shape[0]'], {}), '(PA.shape[0])\n', (4667, 4680), True, 'import numpy as np\n'), ((4898, 4912), 'numpy.dot', 'np.dot', (['B', 'B.T'], {}), '(B, B.T)\n', (4904, 4912), True, 'import numpy as np\n'), ((2874, 2888), 'numpy.dot', 'np.dot', (['e.T', 'W'], {}), '(e.T, W)\n', (2880, 2888), True, 'import numpy as np\n'), ((4776, 4790), 'numpy.dot', 'np.dot', (['e.T', 'W'], {}), '(e.T, W)\n', (4782, 4790), True, 'import numpy as np\n'), ((165, 180), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (177, 180), True, 'import numpy as np\n')] |
from __future__ import print_function, division
#
import sys,os
quspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,quspin_path)
#
from quspin.operators import hamiltonian, exp_op # Hamiltonians, operators and exp_op
from quspin.basis import spin_basis_1d # Hilbert space spin basis
import numpy as np # generic math functions
#
##### define model parameters #####
L=4 # system size
J=1.0 # spin interaction
g=0.809 # transverse field
h=0.9045 # parallel field
#
##### construct basis
basis=spin_basis_1d(L=L)
# define PBC site-coupling lists for operators
x_field=[[g,i] for i in range(L)]
z_field=[[h,i] for i in range(L)]
J_nn=[[J,i,(i+1)%L] for i in range(L)] # PBC
# static and dynamic lists
static=[["zz",J_nn],["z",z_field],["x",x_field]]
dynamic=[]
###### construct Hamiltonian
H=hamiltonian(static,dynamic,dtype=np.float64,basis=basis)
#
###### compute evolution operator as matrix exponential
start, stop, N_t = 0.0, 4.0, 21 # time vector parameters
# define evolution operator
U=exp_op(H,a=-1j,start=start,stop=stop,num=N_t,endpoint=True,iterate=True)
print(U)
#
# compute domain wall initial state
dw_str = "".join("1" for i in range(L//2)) + "".join("0" for i in range(L-L//2))
i_0 = basis.index(dw_str) # find index of product state in basis
psi = np.zeros(basis.Ns) # allocate space for state
psi[i_0] = 1.0 # set MB state to be the given product state
#
##### calculate time-evolved state by successive application of matrix exponential
psi_t=U.dot(psi) # create generator object to apply matrix exponential on the initial state
print(psi_t)
for psi_i in psi_t:
print("evolved state:", psi_i) | [
"sys.path.insert",
"quspin.operators.exp_op",
"quspin.operators.hamiltonian",
"os.getcwd",
"quspin.basis.spin_basis_1d",
"numpy.zeros"
] | [((113, 144), 'sys.path.insert', 'sys.path.insert', (['(0)', 'quspin_path'], {}), '(0, quspin_path)\n', (128, 144), False, 'import sys, os\n'), ((506, 524), 'quspin.basis.spin_basis_1d', 'spin_basis_1d', ([], {'L': 'L'}), '(L=L)\n', (519, 524), False, 'from quspin.basis import spin_basis_1d\n'), ((803, 862), 'quspin.operators.hamiltonian', 'hamiltonian', (['static', 'dynamic'], {'dtype': 'np.float64', 'basis': 'basis'}), '(static, dynamic, dtype=np.float64, basis=basis)\n', (814, 862), False, 'from quspin.operators import hamiltonian, exp_op\n'), ((1005, 1090), 'quspin.operators.exp_op', 'exp_op', (['H'], {'a': '(-1.0j)', 'start': 'start', 'stop': 'stop', 'num': 'N_t', 'endpoint': '(True)', 'iterate': '(True)'}), '(H, a=-1.0j, start=start, stop=stop, num=N_t, endpoint=True, iterate=True\n )\n', (1011, 1090), False, 'from quspin.operators import hamiltonian, exp_op\n'), ((1277, 1295), 'numpy.zeros', 'np.zeros', (['basis.Ns'], {}), '(basis.Ns)\n', (1285, 1295), True, 'import numpy as np\n'), ((91, 102), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (100, 102), False, 'import sys, os\n')] |
# Run the labyrinth navigation experiment.
# %%
from Environments.unity_labyrinth import build_unity_labyrinth_env
import numpy as np
from Controllers.unity_labyrinth_controller import UnityLabyrinthController
from Controllers.unity_meta_controller import MetaController
import pickle
import os, sys
from datetime import datetime
from MDP.general_high_level_mdp import HLMDP
from utils.results_saver import Results
# %% Setup and create the environment
env_settings = {
'time_scale' : 99.0,
}
env, side_channels = build_unity_labyrinth_env()
side_channels['engine_config_channel'].set_configuration_parameters(
time_scale=env_settings['time_scale'])
prob_threshold = 0.95 # Desired probability of reaching the final goal
training_iters = 5e4 # 5e4
num_rollouts = 1000 # 100
n_steps_per_rollout = 50
meta_controller_n_steps_per_rollout = 5 * n_steps_per_rollout
max_timesteps_per_component = 2e5
# %% Set the load directory (if loading pre-trained sub-systems)
# or create a new directory in which to save results
load_folder_name = ''
save_learned_controllers = True
experiment_name = 'unity_labyrinth'
base_path = os.path.abspath(os.path.curdir)
string_ind = base_path.find('src')
assert(string_ind >= 0)
base_path = base_path[0:string_ind + 4]
base_path = os.path.join(base_path, 'data', 'saved_controllers')
load_dir = os.path.join(base_path, load_folder_name)
if load_folder_name == '':
now = datetime.now()
dt_string = now.strftime("%Y-%m-%d_%H-%M-%S")
rseed = int(now.time().strftime('%H%M%S'))
save_path = os.path.join(base_path, dt_string + '_' + experiment_name)
else:
save_path = os.path.join(base_path, load_folder_name)
if save_learned_controllers and not os.path.isdir(save_path):
os.mkdir(save_path)
# %% Create the list of partially instantiated sub-systems
controller_list = []
if load_folder_name == '':
for i in range(12):
controller_list.append(UnityLabyrinthController(i, env, env_settings=env_settings))
else:
for controller_dir in os.listdir(load_dir):
controller_load_path = os.path.join(load_dir, controller_dir)
if os.path.isdir(controller_load_path):
controller = UnityLabyrinthController(0, env, load_dir=controller_load_path)
controller_list.append(controller)
# re-order the controllers by index
reordered_list = []
for i in range(len(controller_list)):
for controller in controller_list:
if controller.controller_ind == i:
reordered_list.append(controller)
controller_list = reordered_list
# %% Create or load object to store the results
if load_folder_name == '':
results = Results(controller_list,
env_settings,
prob_threshold,
training_iters,
num_rollouts,
n_steps_per_rollout,
meta_controller_n_steps_per_rollout,
random_seed=rseed)
else:
results = Results(load_dir=load_dir)
rseed = results.data['random_seed']
# %%
import torch
import random
torch.manual_seed(rseed)
random.seed(rseed)
np.random.seed(rseed)
print('Random seed: {}'.format(results.data['random_seed']))
# %%
for controller_ind in range(len(controller_list)):
controller = controller_list[controller_ind]
# Evaluate initial performance of controllers (they haven't learned
# anything yet so they will likely have no chance of success.)
controller.eval_performance(env,
side_channels['custom_side_channel'],
n_episodes=10,
n_steps=n_steps_per_rollout)
print('Controller {} achieved prob succes: {}'.format(controller_ind,
controller.get_success_prob()))
# Save learned controller
if save_learned_controllers:
controller_save_path = \
os.path.join(save_path, 'controller_{}'.format(controller_ind))
controller.save(controller_save_path)
results.update_training_steps(0)
results.update_controllers(controller_list)
results.save(save_path)
# %%
# Construct high-level MDP and solve for the max reach probability
S = np.arange(-1, 11)
A = np.arange(len(controller_list))
s_i = 0
s_goal = 8
s_fail = -1
successor_map = {
(0,0) : 2,
(0,1) : 1,
(1,2) : 3,
(1,3) : 5,
(2,4) : 9,
(9,5) : 10,
(3,6) : 4,
(4,7) : 3,
(5,8) : 6,
(10,9): 8,
(6,10): 7,
(7,11): 8,
}
hlmdp = HLMDP(S, A, s_i, s_goal, s_fail, controller_list, successor_map)
policy, reach_prob, feasible_flag = hlmdp.solve_max_reach_prob_policy()
# Construct a meta-controller and emprirically evaluate it.
meta_controller = MetaController(policy, hlmdp, side_channels)
meta_success_rate = meta_controller.eval_performance(env,
side_channels,
n_episodes=num_rollouts,
n_steps=meta_controller_n_steps_per_rollout)
meta_controller.unsubscribe_meta_controller(side_channels)
# Save the results
results.update_composition_data(meta_success_rate, num_rollouts, policy, reach_prob)
results.save(save_path)
# %% Main loop of iterative compositional reinforcement learning
total_timesteps = training_iters
while reach_prob < prob_threshold:
# Solve the HLM biliniear program to obtain sub-task specifications.
optimistic_policy, \
required_reach_probs, \
optimistic_reach_prob, \
feasible_flag = \
hlmdp.solve_low_level_requirements_action(prob_threshold,
max_timesteps_per_component=max_timesteps_per_component)
if not feasible_flag:
print(required_reach_probs)
# Print the empirical sub-system estimates and the sub-system
# specifications to terminal
for controller_ind in range(len(hlmdp.controller_list)):
controller = hlmdp.controller_list[controller_ind]
print('Sub-task: {}, \
Achieved success prob: {}, Required success prob: {}'\
.format(controller_ind,
controller.get_success_prob(),
controller.data['required_success_prob']))
# Decide which sub-system to train next.
performance_gaps = []
for controller_ind in range(len(hlmdp.controller_list)):
controller = hlmdp.controller_list[controller_ind]
performance_gaps.append(controller.data['required_success_prob'] - \
controller.get_success_prob())
largest_gap_ind = np.argmax(performance_gaps)
controller_to_train = hlmdp.controller_list[largest_gap_ind]
# Train the sub-system and empirically evaluate its performance
print('Training controller {}'.format(largest_gap_ind))
controller_to_train.learn(side_channels['custom_side_channel'],
total_timesteps=total_timesteps)
print('Completed training controller {}'.format(largest_gap_ind))
controller_to_train.eval_performance(env,
side_channels['custom_side_channel'],
n_episodes=num_rollouts,
n_steps=n_steps_per_rollout)
# Save learned controller
if save_learned_controllers:
controller_save_path = os.path.join(save_path,
'controller_{}'.format(largest_gap_ind))
if not os.path.isdir(controller_save_path):
os.mkdir(controller_save_path)
controller_to_train.save(controller_save_path)
# Solve the HLM for the meta-policy maximizing reach probability
policy, reach_prob, feasible_flag = hlmdp.solve_max_reach_prob_policy()
# Construct a meta-controller with this policy and empirically evaluate its performance
meta_controller = MetaController(policy, hlmdp, side_channels)
meta_success_rate = meta_controller.eval_performance(env,
side_channels,
n_episodes=num_rollouts,
n_steps=meta_controller_n_steps_per_rollout)
meta_controller.unsubscribe_meta_controller(side_channels)
# Save results
results.update_training_steps(total_timesteps)
results.update_controllers(hlmdp.controller_list)
results.update_composition_data(meta_success_rate, num_rollouts, policy, reach_prob)
results.save(save_path)
print('Predicted success prob: {}, Empirical success prob: {}'.format(reach_prob, meta_success_rate))
# %% Once the loop has been completed, construct a meta-controller and visualize its performance
meta_controller = MetaController(policy, hlmdp, side_channels)
print('evaluating performance of meta controller')
meta_success_rate = meta_controller.eval_performance(env,
side_channels,
n_episodes=num_rollouts,
n_steps=meta_controller_n_steps_per_rollout)
meta_controller.unsubscribe_meta_controller(side_channels)
print('Predicted success prob: {}, \
empirically measured success prob: {}'.format(reach_prob, meta_success_rate))
# %%
n_episodes = 5
render = True
meta_controller = MetaController(policy, hlmdp, side_channels)
meta_controller.demonstrate_capabilities(env,
side_channels,
n_episodes=n_episodes,
n_steps=meta_controller_n_steps_per_rollout,
render=render)
meta_controller.unsubscribe_meta_controller(side_channels)
# %%
| [
"Controllers.unity_labyrinth_controller.UnityLabyrinthController",
"torch.manual_seed",
"os.listdir",
"Environments.unity_labyrinth.build_unity_labyrinth_env",
"utils.results_saver.Results",
"os.path.join",
"Controllers.unity_meta_controller.MetaController",
"random.seed",
"numpy.argmax",
"datetim... | [((523, 550), 'Environments.unity_labyrinth.build_unity_labyrinth_env', 'build_unity_labyrinth_env', ([], {}), '()\n', (548, 550), False, 'from Environments.unity_labyrinth import build_unity_labyrinth_env\n'), ((1169, 1200), 'os.path.abspath', 'os.path.abspath', (['os.path.curdir'], {}), '(os.path.curdir)\n', (1184, 1200), False, 'import os, sys\n'), ((1312, 1364), 'os.path.join', 'os.path.join', (['base_path', '"""data"""', '"""saved_controllers"""'], {}), "(base_path, 'data', 'saved_controllers')\n", (1324, 1364), False, 'import os, sys\n'), ((1377, 1418), 'os.path.join', 'os.path.join', (['base_path', 'load_folder_name'], {}), '(base_path, load_folder_name)\n', (1389, 1418), False, 'import os, sys\n'), ((3158, 3182), 'torch.manual_seed', 'torch.manual_seed', (['rseed'], {}), '(rseed)\n', (3175, 3182), False, 'import torch\n'), ((3183, 3201), 'random.seed', 'random.seed', (['rseed'], {}), '(rseed)\n', (3194, 3201), False, 'import random\n'), ((3202, 3223), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (3216, 3223), True, 'import numpy as np\n'), ((4304, 4321), 'numpy.arange', 'np.arange', (['(-1)', '(11)'], {}), '(-1, 11)\n', (4313, 4321), True, 'import numpy as np\n'), ((4600, 4664), 'MDP.general_high_level_mdp.HLMDP', 'HLMDP', (['S', 'A', 's_i', 's_goal', 's_fail', 'controller_list', 'successor_map'], {}), '(S, A, s_i, s_goal, s_fail, controller_list, successor_map)\n', (4605, 4664), False, 'from MDP.general_high_level_mdp import HLMDP\n'), ((4816, 4860), 'Controllers.unity_meta_controller.MetaController', 'MetaController', (['policy', 'hlmdp', 'side_channels'], {}), '(policy, hlmdp, side_channels)\n', (4830, 4860), False, 'from Controllers.unity_meta_controller import MetaController\n'), ((8948, 8992), 'Controllers.unity_meta_controller.MetaController', 'MetaController', (['policy', 'hlmdp', 'side_channels'], {}), '(policy, hlmdp, side_channels)\n', (8962, 8992), False, 'from Controllers.unity_meta_controller import MetaController\n'), ((9575, 9619), 'Controllers.unity_meta_controller.MetaController', 'MetaController', (['policy', 'hlmdp', 'side_channels'], {}), '(policy, hlmdp, side_channels)\n', (9589, 9619), False, 'from Controllers.unity_meta_controller import MetaController\n'), ((1457, 1471), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1469, 1471), False, 'from datetime import datetime\n'), ((1585, 1643), 'os.path.join', 'os.path.join', (['base_path', "(dt_string + '_' + experiment_name)"], {}), "(base_path, dt_string + '_' + experiment_name)\n", (1597, 1643), False, 'import os, sys\n'), ((1666, 1707), 'os.path.join', 'os.path.join', (['base_path', 'load_folder_name'], {}), '(base_path, load_folder_name)\n', (1678, 1707), False, 'import os, sys\n'), ((1775, 1794), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (1783, 1794), False, 'import os, sys\n'), ((2052, 2072), 'os.listdir', 'os.listdir', (['load_dir'], {}), '(load_dir)\n', (2062, 2072), False, 'import os, sys\n'), ((2702, 2871), 'utils.results_saver.Results', 'Results', (['controller_list', 'env_settings', 'prob_threshold', 'training_iters', 'num_rollouts', 'n_steps_per_rollout', 'meta_controller_n_steps_per_rollout'], {'random_seed': 'rseed'}), '(controller_list, env_settings, prob_threshold, training_iters,\n num_rollouts, n_steps_per_rollout, meta_controller_n_steps_per_rollout,\n random_seed=rseed)\n', (2709, 2871), False, 'from utils.results_saver import Results\n'), ((3057, 3083), 'utils.results_saver.Results', 'Results', ([], {'load_dir': 'load_dir'}), '(load_dir=load_dir)\n', (3064, 3083), False, 'from utils.results_saver import Results\n'), ((6760, 6787), 'numpy.argmax', 'np.argmax', (['performance_gaps'], {}), '(performance_gaps)\n', (6769, 6787), True, 'import numpy as np\n'), ((8054, 8098), 'Controllers.unity_meta_controller.MetaController', 'MetaController', (['policy', 'hlmdp', 'side_channels'], {}), '(policy, hlmdp, side_channels)\n', (8068, 8098), False, 'from Controllers.unity_meta_controller import MetaController\n'), ((1745, 1769), 'os.path.isdir', 'os.path.isdir', (['save_path'], {}), '(save_path)\n', (1758, 1769), False, 'import os, sys\n'), ((2105, 2143), 'os.path.join', 'os.path.join', (['load_dir', 'controller_dir'], {}), '(load_dir, controller_dir)\n', (2117, 2143), False, 'import os, sys\n'), ((2155, 2190), 'os.path.isdir', 'os.path.isdir', (['controller_load_path'], {}), '(controller_load_path)\n', (2168, 2190), False, 'import os, sys\n'), ((1959, 2018), 'Controllers.unity_labyrinth_controller.UnityLabyrinthController', 'UnityLabyrinthController', (['i', 'env'], {'env_settings': 'env_settings'}), '(i, env, env_settings=env_settings)\n', (1983, 2018), False, 'from Controllers.unity_labyrinth_controller import UnityLabyrinthController\n'), ((2217, 2280), 'Controllers.unity_labyrinth_controller.UnityLabyrinthController', 'UnityLabyrinthController', (['(0)', 'env'], {'load_dir': 'controller_load_path'}), '(0, env, load_dir=controller_load_path)\n', (2241, 2280), False, 'from Controllers.unity_labyrinth_controller import UnityLabyrinthController\n'), ((7658, 7693), 'os.path.isdir', 'os.path.isdir', (['controller_save_path'], {}), '(controller_save_path)\n', (7671, 7693), False, 'import os, sys\n'), ((7707, 7737), 'os.mkdir', 'os.mkdir', (['controller_save_path'], {}), '(controller_save_path)\n', (7715, 7737), False, 'import os, sys\n')] |
import numpy as np
import uncertainties.unumpy as unp
def center():
return [2, 3] # or the arg-number of the center.
def args():
return 'amp', 'x0', 'y0', 'sig_x', 'sig_y', 'theta', 'offset'
def f(coordinates, amplitude, xo, yo, sigma_x, sigma_y, offset):
"""
The normal function call for this function. Performs checks on valid arguments, then calls the "raw" function.
:return:
"""
if sigma_x > 50 or sigma_y > 50 or xo < 0 or yo < 0 or amplitude < 0 or sigma_x < 0 or sigma_y < 0:
return 1e10*np.ones(len(coordinates[0])*len(coordinates[0][0]))
res = f_raw(coordinates, amplitude, xo, yo, sigma_x, sigma_y, offset)
return res
def f_noravel(coordinates, amplitude, xo, yo, sigma_x, sigma_y, offset):
x = coordinates[0]
y = coordinates[1]
xo = float(xo)
yo = float(yo)
a = 1/(2*sigma_x**2)
c = 1/(2*sigma_y**2)
xp = x-xo
Norm = 1/(4*sigma_x*sigma_y*np.pi)
return offset + amplitude*Norm*(xp**2/(sigma_x**2)-1)**2*np.exp(-(a*(xp**2) + c*((y-yo)**2)))
def f_raw(coordinates, amplitude, xo, yo, sigma_x, sigma_y, offset):
"""
The raw function call, performs no checks on valid parameters..
:return:
"""
return f_noravel(coordinates, amplitude, xo, yo, sigma_x, sigma_y, offset).ravel()
def guess(key, values):
"""
Returns guess values for the parameters of this function class based on the input. Used for fitting using this
class.
:param key:
:param values:
:return:
"""
| [
"numpy.exp"
] | [((998, 1040), 'numpy.exp', 'np.exp', (['(-(a * xp ** 2 + c * (y - yo) ** 2))'], {}), '(-(a * xp ** 2 + c * (y - yo) ** 2))\n', (1004, 1040), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import logging
logger = logging.getLogger(__name__)
__all__ = ['tmvtnorm_sample', 'tmvtnorm_rpy2', 'tmvtnorm_emcee']
def importr_tryhard(packname):
"""
Load R package. If package cannot be loaded then install. If you have problems with this function, try making sure
that the default respository contains the package.
:param packname: name of package to install
:return:
TODO: R libraries currently get re-loaded every time, need to change so its done once on initial imports.
"""
logger.debug(f'Loading R package {packname}')
import rpy2.robjects.packages as rpackages
utils = rpackages.importr('utils')
utils.chooseCRANmirror(ind=2) # select first mirror in the list
from rpy2.robjects.vectors import StrVector
try:
rpack = rpackages.importr(packname)
except:
try:
#print(utils.install_packages.)
utils.install_packages(StrVector(packname), repos='http://cran.us.r-project.org')
rpack = rpackages.importr(packname)
except:
raise RuntimeError(f'Unable to install {packname}')
return rpack
def tmvtnorm_sample(samples, mean, sigma, lower, upper, algorithm='gibbs', position=None):
"""
Sample from the truncated multivariate normal distribution.
rpy2 - demo a Gibbs sampler in R when the package is available.
scipy/emcee - demo a rejection sampler in Python when rpy2 is not available
:param samples: Number of samples to obtain (after burnin)
:param mean: Truncated normal mean vector
:param sigma: Truncated normal covariance matrix
:param lower: Lower bounds on of truncation
:param upper: Upper bounds of truncation
:param algorithm: Which rpy2+tmvtnorm algorithm to use.
(if not available use Affine Invariant MCMC Ensemble sampler, avoid in high dimensions)
:param position: Starting position for Markov Chain
:return:
"""
assert (sigma.shape[0] == sigma.shape[1] == len(mean))
assert (len(lower) == len(upper) == len(mean))
try:
# Load the rpy2 package and load/install tmvtnorm lib
return tmvtnorm_rpy2(samples, mean, np.ndarray.flatten(sigma), lower, upper, algorithm=algorithm, pos=position)
except:
#logging.info('rpy2 and/or tmvtnorm failed, resorting to rejection sampling')
logging.critical('Not attempting to use emcee...')
#try:
# If the response is univariate we can use the scipy package for rejection sampling, not implemented
# Otherwise we can try to use the emcee package for rejection sampling
# return tmvtnorm_emcee(samples, mean, sigma, lower, upper, pos=position)
#except:
raise ImportError('rpy2/emcee must be installed')
def tmvtnorm_rpy2(samples, mean, sigma_vec, lower, upper, algorithm='gibbs', pos=None):
"""
Sampling the truncated multivariate normal distribution using Gibbs sampling.
:return:
"""
import rpy2.robjects.numpy2ri
try: # try and load library
_ = importr_tryhard('mvtnorm')
tmvtnorm = importr_tryhard('tmvtnorm')
except:
raise RuntimeError('Failed to import tmvtnorm and dependencies. Ensure R version > 3.')
rpy2.robjects.numpy2ri.activate() # activate pipe to demo R code
# convert args into R objects
rmean = rpy2.robjects.FloatVector(mean) # mean vector
v = rpy2.robjects.FloatVector(sigma_vec) # vectorised sigma
rsigma = rpy2.robjects.r['matrix'](v, nrow=len(mean)) # sigma matrix
rlower = rpy2.robjects.FloatVector(lower) # lower bound vector
rupper = rpy2.robjects.FloatVector(upper) # upper bound vector
if pos is not None:
rpos0 = rpy2.robjects.FloatVector(pos) # Convert position into R object
from rpy2.robjects.functions import SignatureTranslatedFunction # Change arg signature for '.' args
STM = SignatureTranslatedFunction # TODO: check intricacies here
tmvtnorm.rtmvnorm = STM(tmvtnorm.rtmvnorm,
init_prm_translate={'start_value': 'start.value'})
return np.matrix(tmvtnorm.rtmvnorm(n=samples, mean=rmean, sigma=rsigma, lower=rlower, upper=rupper,
algorithm=algorithm, start_value=rpos0))
else:
return np.matrix(tmvtnorm.rtmvnorm(n=samples, mean=rmean, sigma=rsigma, lower=rlower, upper=rupper,
algorithm=algorithm))
def tmvtnorm_emcee(samples, mean, sigma, lower, upper, pos=None, burnin=10000):
"""
Sampling the truncated multivariate normal distribution using rejection sampling with an ensemble sampler.
see: Affine Invariant Markov chain Monte Carlo (MCMC) Ensemble sampler.
:return:
"""
from numpy.linalg import inv
import emcee
if len(mean) > 10:
logging.warning('Sampling in {0} dimensional space, install rpy2 for Gibb\'s sampling!'.format(len(mean)))
logging.critical('Not attempting to use rejection sampling...')
def lnprob_trunc_norm(x, mu, lower, upper, icov):
if np.any(x < lower) or np.any(x > upper):
return -np.inf
else:
diff = x - mu
return -np.dot(diff, np.dot(icov, diff)) / 2.0
# create ensemble sampler
Nwalkers = 10 * len(mean)
S = emcee.EnsembleSampler(Nwalkers, len(mean), lnprob_trunc_norm, a=20, args=(mean, lower, upper, inv(sigma)))
# inital position for each walker, sample uniformly.
# If one bound is +/- inf then we create a small box around other bound.
# If both bounds are inf, then we set upper bin to mean and lower to mean - 0.1
if pos is None:
pos = np.ones((Nwalkers,len(mean)))
for dim in range(len(mean)):
low = lower[dim]
upp = upper[dim]
if lower[dim] == -np.inf:
if upper[dim] == np.inf:
low = mean[dim]
else:
low = upper[dim] - 1
if upper[dim] == np.inf:
upp = low + 1
pos[:, dim] = np.random.uniform(low, upp, Nwalkers)
else:
assert len(pos) == len(mean)
pos = np.tile(pos, (Nwalkers)).T
walkerSamples = np.ceil(samples/Nwalkers)
S.run_mcmc(pos, walkerSamples+burnin)
# fill chain with values after burnin from each walker
chain = np.zeros((samples, len(mean)))
for walker in range(Nwalkers):
bl = int(walker*walkerSamples)
bu = int((walker+1)*walkerSamples)
chain[bl:bu, :] = S.chain[walker, burnin:, :]
# report acceptance rate
if np.mean(S.acceptance_fraction) < 1:
logger.warning(
"Low mean acceptance rate: {0:.3f}. Too many dimensions?".format(np.mean(S.acceptance_fraction)))
if len(mean) < 10 and False:
import matplotlib.pyplot as plt
for walker in range(S.chain.shape[0]):
print(S.acceptance_fraction[walker])
for latenty in range(S.chain.shape[2]):
if np.var(S.chain[walker, : ,latenty]) > 0:
print(np.var(S.chain[walker,:,latenty]))
plt.plot(range(S.chain.shape[1]), S.chain[walker, :, latenty])
plt.show()
if False:
import matplotlib.pyplot as plt
print(chain[0, :])
plt.plot(range(chain.shape[1]), chain[0, :])
plt.show()
return chain[:samples, :] | [
"logging.getLogger",
"numpy.mean",
"numpy.ceil",
"numpy.tile",
"rpy2.robjects.vectors.StrVector",
"numpy.any",
"rpy2.robjects.packages.importr",
"numpy.ndarray.flatten",
"numpy.dot",
"numpy.linalg.inv",
"logging.critical",
"numpy.random.uniform",
"numpy.var",
"matplotlib.pyplot.show"
] | [((75, 102), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (92, 102), False, 'import logging\n'), ((678, 704), 'rpy2.robjects.packages.importr', 'rpackages.importr', (['"""utils"""'], {}), "('utils')\n", (695, 704), True, 'import rpy2.robjects.packages as rpackages\n'), ((6868, 6895), 'numpy.ceil', 'np.ceil', (['(samples / Nwalkers)'], {}), '(samples / Nwalkers)\n', (6875, 6895), True, 'import numpy as np\n'), ((874, 901), 'rpy2.robjects.packages.importr', 'rpackages.importr', (['packname'], {}), '(packname)\n', (891, 901), True, 'import rpy2.robjects.packages as rpackages\n'), ((5594, 5657), 'logging.critical', 'logging.critical', (['"""Not attempting to use rejection sampling..."""'], {}), "('Not attempting to use rejection sampling...')\n", (5610, 5657), False, 'import logging\n'), ((7247, 7277), 'numpy.mean', 'np.mean', (['S.acceptance_fraction'], {}), '(S.acceptance_fraction)\n', (7254, 7277), True, 'import numpy as np\n'), ((2329, 2354), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['sigma'], {}), '(sigma)\n', (2347, 2354), True, 'import numpy as np\n'), ((2511, 2561), 'logging.critical', 'logging.critical', (['"""Not attempting to use emcee..."""'], {}), "('Not attempting to use emcee...')\n", (2527, 2561), False, 'import logging\n'), ((5724, 5741), 'numpy.any', 'np.any', (['(x < lower)'], {}), '(x < lower)\n', (5730, 5741), True, 'import numpy as np\n'), ((5745, 5762), 'numpy.any', 'np.any', (['(x > upper)'], {}), '(x > upper)\n', (5751, 5762), True, 'import numpy as np\n'), ((6721, 6758), 'numpy.random.uniform', 'np.random.uniform', (['low', 'upp', 'Nwalkers'], {}), '(low, upp, Nwalkers)\n', (6738, 6758), True, 'import numpy as np\n'), ((6820, 6842), 'numpy.tile', 'np.tile', (['pos', 'Nwalkers'], {}), '(pos, Nwalkers)\n', (6827, 6842), True, 'import numpy as np\n'), ((8066, 8076), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8074, 8076), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1112), 'rpy2.robjects.packages.importr', 'rpackages.importr', (['packname'], {}), '(packname)\n', (1102, 1112), True, 'import rpy2.robjects.packages as rpackages\n'), ((6053, 6063), 'numpy.linalg.inv', 'inv', (['sigma'], {}), '(sigma)\n', (6056, 6063), False, 'from numpy.linalg import inv\n'), ((7384, 7414), 'numpy.mean', 'np.mean', (['S.acceptance_fraction'], {}), '(S.acceptance_fraction)\n', (7391, 7414), True, 'import numpy as np\n'), ((7892, 7902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7900, 7902), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1025), 'rpy2.robjects.vectors.StrVector', 'StrVector', (['packname'], {}), '(packname)\n', (1015, 1025), False, 'from rpy2.robjects.vectors import StrVector\n'), ((5864, 5882), 'numpy.dot', 'np.dot', (['icov', 'diff'], {}), '(icov, diff)\n', (5870, 5882), True, 'import numpy as np\n'), ((7683, 7718), 'numpy.var', 'np.var', (['S.chain[walker, :, latenty]'], {}), '(S.chain[walker, :, latenty])\n', (7689, 7718), True, 'import numpy as np\n'), ((7754, 7789), 'numpy.var', 'np.var', (['S.chain[walker, :, latenty]'], {}), '(S.chain[walker, :, latenty])\n', (7760, 7789), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 24 15:53:51 2016
@author: jdorvinen
"""
import numpy as np
def kriebel_dean(w_cm, B, D, W, m, S, T_d, H_b, gamma=0.78):
'''Calculates storm erosion based on the method presented in,
<NAME>., and <NAME>., 'Convolution method for time-dependent
beach-profile response' J. Waterway, Port, Coastal, Ocean Eng., 1993,
119(2): 204-226
Inputs:
REQUIRED \n
w_cm = sediment fall velocity (cm/s) \n
B = Berm height above mean sea-level (meters) \n
D = Dune height (meters) \n
W = Width of the back-shore (meters) \n
m = Linear beach face slope (m/m) \n
S = Water-level rise ('storm-surge') (meters) \n
T_d = Storm duration (hours) \n
H_b = Breaking wave height (meters) \n
OPTIONAL \n
gamma = Breaker index, usually taken to be 0.78-1.0 \n
Returns:
V_max = Maximum shoreline erosion volume (m**3) \n
R_max = Maximum shoreline erosion distance (m)
'''
# Constants
g = 9.8066 # gravitational acceleration (m/s/s)
# Sediment data
#d_50 = 0.3 # mass-mean sediment grain-size diameter (mm)
w_cm = w_cm # sediment fall velocity (cm/s)
w = w_cm/100 # m/sec
# Profile data
# Based on equilibrium profile of the form 'x=(h/A)**(3/2)', where h = the
# water depth at a distance x offshore from the still-water level
A = 2.25*((w**2)/g)**(1/3) # Eq. 15 'parameter governs profile steepness'
# valid for sand where 0.1mm < d_50 < 0.4mm
B = B # Berm height above mean sea-level (meters)
D = D # Dune height (meters)
W = W # Width of the back-shore (meters)
m = m # Linear beach face slope (m/m)
# Storm data
S = S # given water-level rise ('storm-surge') (meters)
T_d = T_d # Storm duration (hours)
gamma = gamma # Breaker index, usually taken to be 0.78-1.0.
H_b = H_b # Breaking wave height (meters)
h_b = H_b/gamma # Breaking depth, assumed to remain constant (meters)
# Active profile width 'x_b', x_0 = the distance from the still-water
# shoreline to the virtual origin of the concave equilibrium profile form,
# given by x_0 = h_T/3m, where h_T is the depth at which the linear slope
# is tangent to the concave profile, which may be shown to equal
# 4A**3/9m**2.
h_T = (4/9)*(A**3/m**2) # Eq. 16b_1
x_0 = h_T/(3*m) # Eq. 16b_2
x_b = x_0+(h_b/A)**(3/2) # Eq. 23
# Calculate erosion potential
# Maximum erosion potential, 'R_inf', and maximum potential volume eroded,
# 'V_inf', based on an equilibrium profile with a linear beach slope.
#R_inf = S*(x_b-(h_b/m)) / (B+h_b-(S/2)) # Eq. 22
#V_inf = R_inf*B + (S**2)/(2*m) - (2/5)*(S**(5/2))/(A**(3/2)) # Eq. 24
# Calculate maximum erosion potential 'R_inf' and maximum potential volume
# eroded 'V_inf' based on an equilibrium profile with a dune.
# Dune with no back-shore.
# R_inf = S*(x_b-(h_b/m))/(B+D+h_b-(S/2)) # Eq. 25
# Dune with a wide back-shore.
R_inf = (S*(x_b-(h_b/m)) - (W*(B+h_b-(S/2)))) / (B+D+h_b-(S/2)) # Eq. 26
# Volume eroded
V_inf = R_inf*D + (R_inf+W)*(B-S) # Eq. 27 --> used in K&D examples
# Volume eroded above original sea level #Eq. 28
# V_minf = R_inf*D +(R_inf+W)*B+(S**2)/(2*m)-(2/5)*(S**(5/2))/(A**(3/2))
# Calculate erosion timescale
# Time scale of profile response
C_1 = 320 # Empirical coefficient from Kriebel and Dean 1993
# Time scale parameter # Eq.31 (sec)
T_sec = ((H_b**(3/2))/(g**(1/2) * A**3)) / (1+(h_b/B)+(m*x_b)/h_b)
T_s = C_1*T_sec/3600 # convert seconds to hours
# Combine erosion potential and timescale
# Beach response to idealized storm surge
alpha = 1/T_s
sigma = np.pi/T_d
beta = 2*sigma/alpha # 2*np.pi*(T_s/T_d)
# Eq. 10
# R_t/R_inf=0.5*(1 - \
# (beta**2/(1+beta**2))*np.exp(-(2*sigma*t)/beta) - \
# (1/(1+beta**2))*(np.cos(2*sigma*t)+beta*np.sin(2*sigma*t)))
# Setting time derivative of Eq. 10 to zero leads to Eq. 12, where t_max is
# the time at which maximum erosion will take place.
def find_t_max(t_max):
""" doc string """
zero = np.cos(2*sigma*t_max) - \
(1/beta)*np.sin(2*sigma*t_max) - \
np.exp(-(2*sigma*t_max)/beta) # Eq. 12
return zero
# This can then be solved iteratively to find the time at which maximum
# erosion occurs, 't_max' (hrs)
import scipy.optimize as opt
t_max = opt.brentq(find_t_max,
a=T_d/2,
b=T_d)
# Finally calculate maximum shoreline recession and volumetric erosion for
# the given storm parameters.
R_max = R_inf*0.5*(1-np.cos(2*sigma*t_max)) # Eq. 13
V_max = V_inf*(R_max/R_inf)
# Turn this block on if need to debug
'''
print("R_max: {:.1f} (m)".format(R_max))
print("R_inf: {:.1f} (m)".format(R_inf))
print("R_max/R_inf: {:.2f}".format(R_max/R_inf))
print("V_max: {:.1f} (m**3/m)".format(V_max))
print("V_inf: {:.1f} (m**#/m)".format(V_inf))
print("T_s: {:.2f} (h)".format(T_s))
print("t_max: {:.1f} (h)".format(t_max))
print("A: {:.3f}".format(A))
print("alpha: {:.3f} (1/h)".format(alpha))
print("beta: {:.3f}".format(beta))
print("sigma: {:.3f}".format(sigma))
'''
return (V_max, R_max, V_inf, R_inf)
def recovery(V_max, interim, T_a=400):
'''Calculate eroded sand-volume post recovery during storm interim.
Inputs:
V_max = Initially erroded volume (m**3)
interim = Period of calm between storms (h)
T_a = Characteristic accretive timescale (h)
Outputs:
V_recoverd = Eroded volume remaining after recovery (m**3)
'''
from numpy import exp
V_recovered = V_max*exp(-1*interim/T_a) # Eq. 28 Callaghan et al. 2008
return V_recovered
| [
"numpy.exp",
"numpy.sin",
"numpy.cos",
"scipy.optimize.brentq"
] | [((4637, 4677), 'scipy.optimize.brentq', 'opt.brentq', (['find_t_max'], {'a': '(T_d / 2)', 'b': 'T_d'}), '(find_t_max, a=T_d / 2, b=T_d)\n', (4647, 4677), True, 'import scipy.optimize as opt\n'), ((5996, 6019), 'numpy.exp', 'exp', (['(-1 * interim / T_a)'], {}), '(-1 * interim / T_a)\n', (5999, 6019), False, 'from numpy import exp\n'), ((4421, 4456), 'numpy.exp', 'np.exp', (['(-(2 * sigma * t_max) / beta)'], {}), '(-(2 * sigma * t_max) / beta)\n', (4427, 4456), True, 'import numpy as np\n'), ((4861, 4886), 'numpy.cos', 'np.cos', (['(2 * sigma * t_max)'], {}), '(2 * sigma * t_max)\n', (4867, 4886), True, 'import numpy as np\n'), ((4330, 4355), 'numpy.cos', 'np.cos', (['(2 * sigma * t_max)'], {}), '(2 * sigma * t_max)\n', (4336, 4355), True, 'import numpy as np\n'), ((4380, 4405), 'numpy.sin', 'np.sin', (['(2 * sigma * t_max)'], {}), '(2 * sigma * t_max)\n', (4386, 4405), True, 'import numpy as np\n')] |
import math
import numpy as np
from openmdao.api import ScipyOptimizeDriver
from ema.core.problems.benchmark_problem import BenchmarkProblem
from ema.core.models.mdf_models import Actuator, ActuatorBlackBox
import matplotlib.pyplot as plt
class MDFProblem(BenchmarkProblem):
def __init__(self, **kwargs):
super(MDFProblem, self).__init__(**kwargs)
if not self.blackbox:
self.problem.model = Actuator(optimizer=self.optimizer, derivative_method=self.derivative_method,
N=self.N, t=self.t)
else:
self.problem.model = ActuatorBlackBox(optimizer=self.optimizer,
derivative_method=self.derivative_method,N=self.N,
t=self.t)
self.initialize_problem()
def initialize_problem(self):
model = self.problem.model
problem = self.problem
model.add_design_var('a_n', lower=-1e0, upper=1e0)
model.add_design_var('b_n', lower=-1e0, upper=1e0)
model.add_constraint('X_final', lower=0.15, upper=0.15)
model.add_constraint('V_final', lower=0.0, upper=0.0)
if self.prob_type == 'MDO':
# Adding design variables
model.add_design_var('N_red', lower=1., upper=8.)
# Adding constraints
model.add_constraint('W_mot_constr', upper=0.)
# Adding objective
model.add_objective('M_mot')
# Setting approx_totals if monolythic finite difference requested
if self.derivative_method == 'monolythic_fd' and not self.blackbox:
model.approx_totals(method='fd')
# Setting optimizer
problem.driver = ScipyOptimizeDriver()
if self.optimizer == 'COBYLA':
problem.driver.options['optimizer'] = 'COBYLA'
problem.driver.options['maxiter'] = 50000
elif self.optimizer == 'SLSQP':
problem.driver.options['optimizer'] = 'SLSQP'
problem.driver.options['maxiter'] = 10000
else:
raise('Unknown optimizer' + self.optimizer)
problem.driver.options['tol'] = self.tol
# More design variables than functions but the adjoint fails
problem.setup(mode='fwd')
def number_of_evaluations(self):
# Number of counts, -1 due to setup
if not self.blackbox:
num_compute = self.problem.model.MDA.motor_torque.num_compute - 1
num_compute_partials = self.problem.model.MDA.motor_torque.num_compute_partials - 1
else:
num_compute = self.problem.model.problem.model.MDA.motor_torque.num_compute - 1
num_compute_partials = self.problem.model.problem.model.MDA.motor_torque.num_compute_partials - 1
s = '------- Number of evaluations ------- \n' + \
'Number of function evaluations: ' + str(num_compute) + '\n' + \
'Number of derivative evaluations: ' + str(num_compute_partials) + '\n' + \
'------------------------------------- \n'
if self.print:
print(s)
log_file = open(self.log_file, 'a+')
log_file.writelines(s)
log_file.close()
return num_compute, num_compute_partials
def check_success(self):
# Custom verification of the optimization results
tol = self.tol
T_em_real = (np.mean((self.problem['J_mot'] * self.problem['A_rms'] *
self.problem['N_red'] / self.problem['p'] + self.problem['F_ema'] *
self.problem['p'] / self.problem['N_red']) ** 2)) ** (1 / 2)
J_mot_real = self.problem['J_mot_ref'] * (abs(self.problem['T_em']) /
self.problem['T_em_ref']) ** (5.0 / 3.5)
success = (str(self.problem['T_em'][0]) != 'nan') and (str(self.problem['T_em'][0]) != 'inf')
relative_error = abs(abs(self.problem['T_em'][0]) - abs(T_em_real)) / self.problem['T_em']
success = success and math.isclose(relative_error, 0., abs_tol=tol)
relative_error = abs(abs(self.problem['J_mot'][0]) - abs(J_mot_real)) / self.problem['J_mot']
success = success and math.isclose(relative_error, 0., abs_tol=tol)
relative_error = abs(self.problem['V_final'])
success = success and math.isclose(relative_error, 0., abs_tol=tol)
relative_error = (abs(self.problem['X_final']) - 0.15) / 0.15
success = success and math.isclose(relative_error, 0., abs_tol=tol)
if self.prob_type == 'MDO':
if self.scale == 1.0:
relative_error = abs(abs(self.problem['M_mot']) - \
abs(self.optimal_value)) / self.optimal_value
# * 1000 due to scaler
success = success and math.isclose(relative_error, 0., abs_tol=tol * 1000)
relative_error = abs(self.problem['W_mot_constr']) / self.problem['W_mot']
success = success and math.isclose(relative_error, 0., abs_tol=tol)
s = 'Success in solving system consistency: ' + str(success) + '\n' \
'Motor mass: ' + str(self.problem['M_mot'][0]) + '\n' \
'Motor torque value: ' + str(self.problem['T_em'][0]) + '\n' \
'Motor inertia value: ' + str(self.problem['J_mot'][0]) + '\n' \
'A_rms: ' + str(self.problem['A_rms'][0]) + '\n' \
'T_em: ' + str(self.problem['T_em'][0]) + '\n' \
'X_final: ' + str(self.problem['X_final'][0]) + '\n' \
'V_final: ' + str(self.problem['V_final'][0]) + '\n' \
'V_max: ' + str(np.max(self.problem['V_ema'])) + '\n' \
'N_red: ' + str(self.problem['N_red'][0]) + '\n' \
'Motor speed constraint: ' + str(np.max(self.problem['W_mot_constr'])) + '\n'
if self.plot:
t = self.t
X_ema = self.problem['X_ema']
V_ema = self.problem['V_ema']
A_ema = self.problem['A_ema']
f, (ax1, ax2, ax3) = plt.subplots(3, 1)
ax1.plot(t, X_ema)
ax2.plot(t, V_ema)
ax3.plot(t, A_ema)
plt.show()
if self.print:
log_file = open(self.log_file, 'a+')
log_file.writelines(s)
log_file.close()
print(s)
return success
| [
"numpy.mean",
"math.isclose",
"ema.core.models.mdf_models.ActuatorBlackBox",
"numpy.max",
"matplotlib.pyplot.subplots",
"openmdao.api.ScipyOptimizeDriver",
"ema.core.models.mdf_models.Actuator",
"matplotlib.pyplot.show"
] | [((427, 527), 'ema.core.models.mdf_models.Actuator', 'Actuator', ([], {'optimizer': 'self.optimizer', 'derivative_method': 'self.derivative_method', 'N': 'self.N', 't': 'self.t'}), '(optimizer=self.optimizer, derivative_method=self.derivative_method,\n N=self.N, t=self.t)\n', (435, 527), False, 'from ema.core.models.mdf_models import Actuator, ActuatorBlackBox\n'), ((613, 722), 'ema.core.models.mdf_models.ActuatorBlackBox', 'ActuatorBlackBox', ([], {'optimizer': 'self.optimizer', 'derivative_method': 'self.derivative_method', 'N': 'self.N', 't': 'self.t'}), '(optimizer=self.optimizer, derivative_method=self.\n derivative_method, N=self.N, t=self.t)\n', (629, 722), False, 'from ema.core.models.mdf_models import Actuator, ActuatorBlackBox\n'), ((1770, 1791), 'openmdao.api.ScipyOptimizeDriver', 'ScipyOptimizeDriver', ([], {}), '()\n', (1789, 1791), False, 'from openmdao.api import ScipyOptimizeDriver\n'), ((3478, 3661), 'numpy.mean', 'np.mean', (["((self.problem['J_mot'] * self.problem['A_rms'] * self.problem['N_red'] /\n self.problem['p'] + self.problem['F_ema'] * self.problem['p'] / self.\n problem['N_red']) ** 2)"], {}), "((self.problem['J_mot'] * self.problem['A_rms'] * self.problem[\n 'N_red'] / self.problem['p'] + self.problem['F_ema'] * self.problem['p'\n ] / self.problem['N_red']) ** 2)\n", (3485, 3661), True, 'import numpy as np\n'), ((4126, 4172), 'math.isclose', 'math.isclose', (['relative_error', '(0.0)'], {'abs_tol': 'tol'}), '(relative_error, 0.0, abs_tol=tol)\n', (4138, 4172), False, 'import math\n'), ((4304, 4350), 'math.isclose', 'math.isclose', (['relative_error', '(0.0)'], {'abs_tol': 'tol'}), '(relative_error, 0.0, abs_tol=tol)\n', (4316, 4350), False, 'import math\n'), ((4435, 4481), 'math.isclose', 'math.isclose', (['relative_error', '(0.0)'], {'abs_tol': 'tol'}), '(relative_error, 0.0, abs_tol=tol)\n', (4447, 4481), False, 'import math\n'), ((4581, 4627), 'math.isclose', 'math.isclose', (['relative_error', '(0.0)'], {'abs_tol': 'tol'}), '(relative_error, 0.0, abs_tol=tol)\n', (4593, 4627), False, 'import math\n'), ((6129, 6147), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (6141, 6147), True, 'import matplotlib.pyplot as plt\n'), ((6255, 6265), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6263, 6265), True, 'import matplotlib.pyplot as plt\n'), ((5100, 5146), 'math.isclose', 'math.isclose', (['relative_error', '(0.0)'], {'abs_tol': 'tol'}), '(relative_error, 0.0, abs_tol=tol)\n', (5112, 5146), False, 'import math\n'), ((4926, 4979), 'math.isclose', 'math.isclose', (['relative_error', '(0.0)'], {'abs_tol': '(tol * 1000)'}), '(relative_error, 0.0, abs_tol=tol * 1000)\n', (4938, 4979), False, 'import math\n'), ((5879, 5915), 'numpy.max', 'np.max', (["self.problem['W_mot_constr']"], {}), "(self.problem['W_mot_constr'])\n", (5885, 5915), True, 'import numpy as np\n'), ((5731, 5760), 'numpy.max', 'np.max', (["self.problem['V_ema']"], {}), "(self.problem['V_ema'])\n", (5737, 5760), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
from feature_extractor import FeatureExtractor
from datetime import datetime
from flask import Flask, request, render_template ,jsonify
from pathlib import Path
from flask import jsonify
from io import BytesIO
import base64
import re
from urllib.request import urlopen
import json
app = Flask(__name__)
# test data
tpe = {
"id": 0,
"city_name": "Taipei",
"country_name": "Taiwan",
"is_capital": True,
"location": {
"longitude": 121.569649,
"latitude": 25.036786
}
}
nyc = {
"id": 1,
"city_name": "New York",
"country_name": "United States",
"is_capital": False,
"location": {
"longitude": -74.004364,
"latitude": 40.710405
}
}
ldn = {
"id": 2,
"city_name": "London",
"country_name": "United Kingdom",
"is_capital": True,
"location": {
"longitude": -0.114089,
"latitude": 51.507497
}
}
cities = [tpe, nyc, ldn]
# Read image features
fe = FeatureExtractor()
features = []
img_paths = []
cardNumber = []
for feature_path in Path("./static/feature").glob("*.npy"):
features.append(np.load(feature_path))
filenamestr = feature_path.stem+".jpg"
cardfirst = filenamestr[0:1]
cardsecond = filenamestr[0:filenamestr.rfind('_')]
cardthird = filenamestr[0:filenamestr.rfind('.')]
img_paths.append("https://ws-tcg.com/wordpress/wp-content/images/cardlist/"+cardfirst+"/"+cardsecond+"/"+cardthird+".png")
cardNumber.append(feature_path.stem)
features = np.array(features)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
file = request.files['query_img']
b64Full = request.values.get('imgimg')
b64 = re.sub('data:image\/jpeg;base64,','',b64Full)
#print(b64)
img = Image.open(BytesIO(base64.b64decode(b64)))
# Save query image
#img = Image.open(file.stream) # PIL image <-
#img = img.thumbnail((600, 600))
uploaded_img_path = "static/uploaded/" + datetime.now().isoformat().replace(":", ".") + "_" + file.filename
#img.save(uploaded_img_path)
#url = "https://storage.googleapis.com/divine-vehicle-292507.appspot.com/json/cardDataAllDataAtLasted.json"
url = "https://storage.googleapis.com/divine-vehicle-292507.appspot.com/resource/cardMappingList.json"
response = urlopen(url)
data_json = json.loads(response.read())
#print(data_json)
# Run search
query = fe.extract(img)
dists = np.linalg.norm(features-query, axis=1) # L2 distances to features
ids = np.argsort(dists)[:5] # Top 30 results
scores = [(dists[id], img_paths[id]) for id in ids]
#cardNumberList = [(cardNumber[id]) for id in ids]
cardNumberList = []
cardPrice = []
for id in ids:
str = cardNumber[id]
str1 = str.find("_")
str2 = str.rfind("_")
cardNumberLower = str[:str1]+"/"+str[str1+1:str2]+"-"+str[str2+1:len(str)]
cardNumberList.append(cardNumberLower)
try:
#cardNoPrice = data_json[cardNumberLower.upper()]["cardPrice"]
cardNoPrice = data_json[cardNumberLower.upper()]["VER"] + "," + data_json[cardNumberLower.upper()]["CID"]
except:
cardNoPrice = [0]
cardPrice.append(cardNoPrice)
return render_template('index.html',
query_path=uploaded_img_path,
scores=scores,
cardPrice=cardPrice,
cardNumberList=cardNumberList)
else:
return render_template('index.html')
@app.route('/api', methods=['GET', 'POST'])
def api():
if request.method == 'POST':
file = request.files['query_img']
b64Full = request.values.get('imgimg')
b64 = re.sub('data:image\/jpeg;base64,','',b64Full)
#print(b64)
img = Image.open(BytesIO(base64.b64decode(b64)))
# Save query image
#img = Image.open(file.stream) # PIL image <-
#img = img.thumbnail((600, 600))
uploaded_img_path = "static/uploaded/" + datetime.now().isoformat().replace(":", ".") + "_" + file.filename
#img.save(uploaded_img_path)
#url = "https://storage.googleapis.com/divine-vehicle-292507.appspot.com/json/cardDataAllDataAtLasted.json"
url = "https://storage.googleapis.com/divine-vehicle-292507.appspot.com/resource/cardMappingList.json"
response = urlopen(url)
data_json = json.loads(response.read())
#print(data_json)
# Run search
query = fe.extract(img)
dists = np.linalg.norm(features-query, axis=1) # L2 distances to features
ids = np.argsort(dists)[:5] # Top 30 results
scores = [(dists[id], img_paths[id]) for id in ids]
#cardNumberList = [(cardNumber[id]) for id in ids]
cardNumberList = []
cardPrice = []
for id in ids:
str = cardNumber[id]
str1 = str.find("_")
str2 = str.rfind("_")
cardNumberLower = str[:str1]+"/"+str[str1+1:str2]+"-"+str[str2+1:len(str)]
cardNumberList.append(cardNumberLower)
try:
#cardNoPrice = data_json[cardNumberLower.upper()]["cardPrice"]
cardNoPrice = data_json[cardNumberLower.upper()]["VER"] + "," + data_json[cardNumberLower.upper()]["CID"]
except:
cardNoPrice = [0]
cardPrice.append(cardNoPrice)
return jsonify({'scores': scores,'cardPrice':cardPrice,'cardNumberList':cardNumberList})
else:
return render_template('index.html')
if __name__=="__main__":
app.run("0.0.0.0")
| [
"flask.render_template",
"pathlib.Path",
"flask.Flask",
"flask.jsonify",
"base64.b64decode",
"numpy.argsort",
"numpy.array",
"datetime.datetime.now",
"flask.request.values.get",
"numpy.linalg.norm",
"re.sub",
"numpy.load",
"urllib.request.urlopen",
"feature_extractor.FeatureExtractor"
] | [((329, 344), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (334, 344), False, 'from flask import Flask, request, render_template, jsonify\n'), ((1001, 1019), 'feature_extractor.FeatureExtractor', 'FeatureExtractor', ([], {}), '()\n', (1017, 1019), False, 'from feature_extractor import FeatureExtractor\n'), ((1537, 1555), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1545, 1555), True, 'import numpy as np\n'), ((1085, 1109), 'pathlib.Path', 'Path', (['"""./static/feature"""'], {}), "('./static/feature')\n", (1089, 1109), False, 'from pathlib import Path\n'), ((1145, 1166), 'numpy.load', 'np.load', (['feature_path'], {}), '(feature_path)\n', (1152, 1166), True, 'import numpy as np\n'), ((1705, 1733), 'flask.request.values.get', 'request.values.get', (['"""imgimg"""'], {}), "('imgimg')\n", (1723, 1733), False, 'from flask import Flask, request, render_template, jsonify\n'), ((1748, 1796), 're.sub', 're.sub', (['"""data:image\\\\/jpeg;base64,"""', '""""""', 'b64Full'], {}), "('data:image\\\\/jpeg;base64,', '', b64Full)\n", (1754, 1796), False, 'import re\n'), ((2396, 2408), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (2403, 2408), False, 'from urllib.request import urlopen\n'), ((2561, 2601), 'numpy.linalg.norm', 'np.linalg.norm', (['(features - query)'], {'axis': '(1)'}), '(features - query, axis=1)\n', (2575, 2601), True, 'import numpy as np\n'), ((3443, 3573), 'flask.render_template', 'render_template', (['"""index.html"""'], {'query_path': 'uploaded_img_path', 'scores': 'scores', 'cardPrice': 'cardPrice', 'cardNumberList': 'cardNumberList'}), "('index.html', query_path=uploaded_img_path, scores=scores,\n cardPrice=cardPrice, cardNumberList=cardNumberList)\n", (3458, 3573), False, 'from flask import Flask, request, render_template, jsonify\n'), ((3719, 3748), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (3734, 3748), False, 'from flask import Flask, request, render_template, jsonify\n'), ((3900, 3928), 'flask.request.values.get', 'request.values.get', (['"""imgimg"""'], {}), "('imgimg')\n", (3918, 3928), False, 'from flask import Flask, request, render_template, jsonify\n'), ((3943, 3991), 're.sub', 're.sub', (['"""data:image\\\\/jpeg;base64,"""', '""""""', 'b64Full'], {}), "('data:image\\\\/jpeg;base64,', '', b64Full)\n", (3949, 3991), False, 'import re\n'), ((4591, 4603), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (4598, 4603), False, 'from urllib.request import urlopen\n'), ((4756, 4796), 'numpy.linalg.norm', 'np.linalg.norm', (['(features - query)'], {'axis': '(1)'}), '(features - query, axis=1)\n', (4770, 4796), True, 'import numpy as np\n'), ((5638, 5727), 'flask.jsonify', 'jsonify', (["{'scores': scores, 'cardPrice': cardPrice, 'cardNumberList': cardNumberList}"], {}), "({'scores': scores, 'cardPrice': cardPrice, 'cardNumberList':\n cardNumberList})\n", (5645, 5727), False, 'from flask import jsonify\n'), ((5749, 5778), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (5764, 5778), False, 'from flask import Flask, request, render_template, jsonify\n'), ((2642, 2659), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (2652, 2659), True, 'import numpy as np\n'), ((4837, 4854), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (4847, 4854), True, 'import numpy as np\n'), ((1847, 1868), 'base64.b64decode', 'base64.b64decode', (['b64'], {}), '(b64)\n', (1863, 1868), False, 'import base64\n'), ((4042, 4063), 'base64.b64decode', 'base64.b64decode', (['b64'], {}), '(b64)\n', (4058, 4063), False, 'import base64\n'), ((2043, 2057), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2055, 2057), False, 'from datetime import datetime\n'), ((4238, 4252), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4250, 4252), False, 'from datetime import datetime\n')] |
"""
"""
import numpy
import os
import pickle
import shutil
import tarfile
from six.moves import range, urllib
import datasets
class SourceCifar10(object):
"""
"""
@staticmethod
def default_data_path(dataset):
"""
"""
path_home = os.path.expanduser('~')
return os.path.join(path_home, 'datasets', 'cifar-10-batches-py')
@staticmethod
def subsets():
"""
"""
return [
datasets.DATASET_CIFAR_10_TRAINING,
datasets.DATASET_CIFAR_10_TEST]
@staticmethod
def include(dataset):
"""
"""
return dataset in SourceCifar10.subsets()
@staticmethod
def download(dataset, data_path):
"""
https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
result:
data_path/data_batch_1
data_path/data_batch_2
data_path/data_batch_3
data_path/data_batch_4
data_path/data_batch_5
data_path/test_batch
"""
if data_path is None:
data_path = SourceCifar10.default_data_path(dataset)
all_there = True
# check if all batches are ready.
for i in range(1, 6):
file_name = 'data_batch_{}'.format(i)
file_path = os.path.join(data_path, file_name)
if not os.path.isfile(file_path):
all_there = False
break
# check if test batch is ready.
file_path = os.path.join(data_path, 'test_batch')
if not os.path.isfile(file_path):
all_there = False
# return if all batches are downloaded, unzipped and moved.
if all_there:
return
if not os.path.isdir(data_path):
os.makedirs(data_path)
# download source to temp_path:
# data_path/cifar-10-python.tar.gz
gzip_path = os.path.join(data_path, 'cifar-10-python.tar.gz')
# download
url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
print('downloading {}'.format(url))
urllib.request.urlretrieve(url, gzip_path)
# unzip, move
temp_source_path = os.path.join(data_path, 'cifar-10-batches-py')
# unzip data_path/cifar-10-python.tar.gz to data_path
# become data_path/cifar-10-batches-py/*
with tarfile.open(gzip_path) as tar:
tar.extractall(data_path)
# move data_path/cifar-10-batches-py/* to data_path
for name in os.listdir(temp_source_path):
source_path = os.path.join(temp_source_path, name)
target_path = os.path.join(data_path, name)
shutil.move(source_path, target_path)
# remove data_path/cifar-10-batches-py
shutil.rmtree(temp_source_path)
# remove data_path/cifar-10-python.tar.gz
os.remove(gzip_path)
@staticmethod
def pre_process(dataset, data_path):
"""
"""
@staticmethod
def default_map_fn(img):
"""
remap the image. the default mapping is to do nothing.
"""
return img
def __init__(self, dataset, range_percentage=(0, 100), data_path=None):
"""
"""
if data_path is None:
data_path = SourceCifar10.default_data_path(dataset)
SourceCifar10.download(dataset, data_path)
SourceCifar10.pre_process(dataset, data_path)
if dataset == datasets.DATASET_CIFAR_10_TRAINING:
names = ['data_batch_{}'.format(i) for i in range(1, 6)]
else:
names = ['test_batch']
self._labels = []
self._images = []
for name in names:
file_path = os.path.join(data_path, name)
if not os.path.isfile(file_path):
raise Exception('can not find {}'.format(file_path))
with open(file_path, 'rb') as f:
batch = pickle.load(f)
images = batch[b'data']
labels = batch[b'labels']
labels = numpy.array(labels)
images = images.reshape(10000, 3, 32, 32)
images = images.transpose(0, 2, 3, 1)
images = images.astype(numpy.float32)
images = images / 127.5 - 1.0
self._images.append(images)
self._labels.append(labels)
self._images = numpy.concatenate(self._images)
self._labels = numpy.concatenate(self._labels)
# NOTE: range must be dealt within each source due to the layout of
# sources may be different.
head, tail = range_percentage
size = self._labels.shape[0]
head = head * size // 100
tail = tail * size // 100
if head >= tail:
raise Exception('the range is too narrow')
self._images = self._images[head:tail]
self._labels = self._labels[head:tail]
@property
def cite(self):
"""
"""
return """
Learning Multiple Layers of Features from Tiny Images,
<NAME>, 2009.
"""
@property
def info(self):
"""
"""
return 'haha'
@property
def size(self):
"""
"""
return self._labels.shape[0]
def batch(self, idx_list=[], map_fn=default_map_fn.__func__,
one_hot=False, **options):
"""
idx_list: list of data indice.
map_fn: map_fn(source_numpy_array), return target_numpy_array
one_hot: return one_hot label if it's True
"""
cnt = len(idx_list)
ims = None
idx = None
for i, j in enumerate(idx_list):
if j >= self._labels.shape[0]:
raise Exception('invalid index {}'.format(j))
img = self._images[j]
img = map_fn(img)
if ims is None:
ims = numpy.zeros((cnt,) + img.shape)
idx = numpy.zeros((cnt,), dtype=numpy.int32)
ims[i] = img
idx[i] = self._labels[j]
if one_hot:
tmp = idx
idx = numpy.zeros((cnt, 10), dtype=numpy.float32)
idx[numpy.arange(cnt), tmp] = 1.0
return ims, idx
| [
"os.listdir",
"six.moves.range",
"tarfile.open",
"os.makedirs",
"shutil.move",
"numpy.arange",
"os.path.join",
"pickle.load",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"os.path.isdir",
"six.moves.urllib.request.urlretrieve",
"numpy.concatenate",
"shutil.rmtree",
"os.path.expandus... | [((273, 296), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (291, 296), False, 'import os\n'), ((313, 371), 'os.path.join', 'os.path.join', (['path_home', '"""datasets"""', '"""cifar-10-batches-py"""'], {}), "(path_home, 'datasets', 'cifar-10-batches-py')\n", (325, 371), False, 'import os\n'), ((1189, 1200), 'six.moves.range', 'range', (['(1)', '(6)'], {}), '(1, 6)\n', (1194, 1200), False, 'from six.moves import range, urllib\n'), ((1475, 1512), 'os.path.join', 'os.path.join', (['data_path', '"""test_batch"""'], {}), "(data_path, 'test_batch')\n", (1487, 1512), False, 'import os\n'), ((1877, 1926), 'os.path.join', 'os.path.join', (['data_path', '"""cifar-10-python.tar.gz"""'], {}), "(data_path, 'cifar-10-python.tar.gz')\n", (1889, 1926), False, 'import os\n'), ((2073, 2115), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'gzip_path'], {}), '(url, gzip_path)\n', (2099, 2115), False, 'from six.moves import range, urllib\n'), ((2166, 2212), 'os.path.join', 'os.path.join', (['data_path', '"""cifar-10-batches-py"""'], {}), "(data_path, 'cifar-10-batches-py')\n", (2178, 2212), False, 'import os\n'), ((2489, 2517), 'os.listdir', 'os.listdir', (['temp_source_path'], {}), '(temp_source_path)\n', (2499, 2517), False, 'import os\n'), ((2745, 2776), 'shutil.rmtree', 'shutil.rmtree', (['temp_source_path'], {}), '(temp_source_path)\n', (2758, 2776), False, 'import shutil\n'), ((2836, 2856), 'os.remove', 'os.remove', (['gzip_path'], {}), '(gzip_path)\n', (2845, 2856), False, 'import os\n'), ((4363, 4394), 'numpy.concatenate', 'numpy.concatenate', (['self._images'], {}), '(self._images)\n', (4380, 4394), False, 'import numpy\n'), ((4418, 4449), 'numpy.concatenate', 'numpy.concatenate', (['self._labels'], {}), '(self._labels)\n', (4435, 4449), False, 'import numpy\n'), ((1276, 1310), 'os.path.join', 'os.path.join', (['data_path', 'file_name'], {}), '(data_path, file_name)\n', (1288, 1310), False, 'import os\n'), ((1529, 1554), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1543, 1554), False, 'import os\n'), ((1712, 1736), 'os.path.isdir', 'os.path.isdir', (['data_path'], {}), '(data_path)\n', (1725, 1736), False, 'import os\n'), ((1750, 1772), 'os.makedirs', 'os.makedirs', (['data_path'], {}), '(data_path)\n', (1761, 1772), False, 'import os\n'), ((2338, 2361), 'tarfile.open', 'tarfile.open', (['gzip_path'], {}), '(gzip_path)\n', (2350, 2361), False, 'import tarfile\n'), ((2545, 2581), 'os.path.join', 'os.path.join', (['temp_source_path', 'name'], {}), '(temp_source_path, name)\n', (2557, 2581), False, 'import os\n'), ((2608, 2637), 'os.path.join', 'os.path.join', (['data_path', 'name'], {}), '(data_path, name)\n', (2620, 2637), False, 'import os\n'), ((2651, 2688), 'shutil.move', 'shutil.move', (['source_path', 'target_path'], {}), '(source_path, target_path)\n', (2662, 2688), False, 'import shutil\n'), ((3679, 3708), 'os.path.join', 'os.path.join', (['data_path', 'name'], {}), '(data_path, name)\n', (3691, 3708), False, 'import os\n'), ((6085, 6128), 'numpy.zeros', 'numpy.zeros', (['(cnt, 10)'], {'dtype': 'numpy.float32'}), '((cnt, 10), dtype=numpy.float32)\n', (6096, 6128), False, 'import numpy\n'), ((1331, 1356), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1345, 1356), False, 'import os\n'), ((3729, 3754), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (3743, 3754), False, 'import os\n'), ((3895, 3909), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3906, 3909), False, 'import pickle\n'), ((4018, 4037), 'numpy.array', 'numpy.array', (['labels'], {}), '(labels)\n', (4029, 4037), False, 'import numpy\n'), ((5868, 5899), 'numpy.zeros', 'numpy.zeros', (['((cnt,) + img.shape)'], {}), '((cnt,) + img.shape)\n', (5879, 5899), False, 'import numpy\n'), ((5922, 5960), 'numpy.zeros', 'numpy.zeros', (['(cnt,)'], {'dtype': 'numpy.int32'}), '((cnt,), dtype=numpy.int32)\n', (5933, 5960), False, 'import numpy\n'), ((3512, 3523), 'six.moves.range', 'range', (['(1)', '(6)'], {}), '(1, 6)\n', (3517, 3523), False, 'from six.moves import range, urllib\n'), ((6145, 6162), 'numpy.arange', 'numpy.arange', (['cnt'], {}), '(cnt)\n', (6157, 6162), False, 'import numpy\n')] |
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
x = np.linspace(0, 10, 100)
y1 = x**2 + 1
y2 = x
plt.figure('LearnPLT')
plt.plot(x, y1, c='r')
plt.plot(x, y2, c='g')
plt.show()
plt.figure('Continue learning PLT')
new_ticks = np.linspace(0, 100, 11)
print(new_ticks)
plt.xticks(new_ticks)
plt.yticks([1, 2, 3, 4, 5], [r'$\alpha num_1$', r'$\beta num_2$', r'$\gamma num_3$', r'$\zeta num_4$', r'$\phi num_5$'])
plt.plot(y1, y2, c='b')
plt.xlim(0, 101)
plt.ylim(-1, 10)
plt.xlabel('this is x')
plt.ylabel('this is y')
plt.show()
plt.figure()
# get current axis
ax = plt.gca()
print(ax)
ax.spines['top'].set_color('red')
ax.spines['bottom'].set_position(('data', 0))
ax.spines['left'].set_position(('data', 0))
plt.show()
plt.figure(num=2)
l1, = plt.plot(x, y1, label='line 1') # use comma here coz plot returns a 'list' with only one element
l2, = plt.plot(x, y2, label='line 2')
print(l1, l2)
# plt.legend(loc='upper left')
plt.legend(handles=[l1, l2], labels=['l1', 'l2'], loc='best')
plt.show()
def fun(input_x):
return 2 * input_x + 1
# some basic plotting
x = np.linspace(-3, 3, 100)
y = fun
plt.figure('Annotations')
plt.plot(x, y(x), label=r'$y = 2 x + 1$')
plt.legend(loc='best')
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['bottom'].set_position(('data', 0))
ax.spines['left'].set_position(('data', 0))
x0 = 1
y0 = y(x0)
plt.plot([x0, x0], [0, y0], 'k--', linewidth=2.5)
plt.scatter([x0], [y0], s=50, c='b')
# make annotations
plt.annotate(r'$2x+1=%s$' % y0, xy=(x0, y0), xycoords='data', xytext=(+30, -30),
textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=.2'))
# xy: position of point to annotate
# xycoords='data': choose the position based on the data
# xytext: the position relative to the point
# put text
plt.text(-4, 3, r'$This\ is\ a\ special\ point$', fontdict={'size': 16, 'color': 'r'})
plt.show()
# Scatter
plt.figure('Scatter')
n = 1024 # data size
x = np.random.rand(n)
y = np.random.rand(n)
T = np.arctan2(x, y) # for color value
plt.scatter(x, y, c=T, alpha=0.5)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xticks(()) # ignore x ticks
plt.yticks(()) # ignore y ticks
plt.show()
# Bar
plt.figure('Bar')
n = 12
x = np.arange(n)
y1 = (1 - x / float(n)) * np.random.uniform(0.5, 1.0, n)
y2 = (1 - x / float(n)) * np.random.uniform(0.5, 1.0, n)
plt.bar(x, +y1, facecolor='#9999ff', edgecolor='white')
plt.bar(x, -y2, facecolor='#ff9999', edgecolor='white')
# ha: horizontal alignment
# va: vertical alignment
for xa, ya in zip(x, y1):
plt.text(xa, ya + 0.05, '%.2f' % ya, ha='center', va='bottom')
for xa, ya in zip(x, y2):
plt.text(xa, -ya - 0.05, '%.2f' % ya, ha='center', va='top')
plt.xlim(-1, n)
plt.xticks(())
plt.ylim(-1.25, 1.25)
plt.yticks(())
plt.show()
# Contour
def f(x, y):
return (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 - y**2)
plt.figure('Contour')
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
X, Y = np.meshgrid(x, y)
# contour filling
plt.contourf(X, Y, f(X, Y), 10, alpha=0.75, cmap=plt.cm.hot)
# contour
C = plt.contour(X, Y, f(X, Y), 10, colors='black')
plt.clabel(C, inline=True, fontsize=10)
plt.xticks(())
plt.yticks(())
plt.show()
# Image
x = np.random.rand(3, 3)
print(x)
plt.imshow(x, cmap='bone', interpolation='nearest', origin='lower')
plt.colorbar(shrink=0.92)
plt.xticks(())
plt.yticks(())
plt.show()
# plot 3D
fig = plt.figure()
ax = Axes3D(fig)
x = np.arange(-4, 4, 0.25)
y = np.arange(-4, 4, 0.25)
x, y = np.meshgrid(x, y)
r = np.sqrt(x**2 + y**2)
z = np.sin(r)
# rstride: row
# cstride: column
ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))
ax.contourf(x, y, z, zdir='z', offset=-2, cmap=plt.get_cmap('rainbow'))
ax.set_zlim(-2, 2)
plt.show()
| [
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"numpy.arctan2",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"matplotlib... | [((96, 119), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (107, 119), True, 'import numpy as np\n'), ((141, 163), 'matplotlib.pyplot.figure', 'plt.figure', (['"""LearnPLT"""'], {}), "('LearnPLT')\n", (151, 163), True, 'import matplotlib.pyplot as plt\n'), ((164, 186), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {'c': '"""r"""'}), "(x, y1, c='r')\n", (172, 186), True, 'import matplotlib.pyplot as plt\n'), ((187, 209), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'c': '"""g"""'}), "(x, y2, c='g')\n", (195, 209), True, 'import matplotlib.pyplot as plt\n'), ((210, 220), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (218, 220), True, 'import matplotlib.pyplot as plt\n'), ((222, 257), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Continue learning PLT"""'], {}), "('Continue learning PLT')\n", (232, 257), True, 'import matplotlib.pyplot as plt\n'), ((270, 293), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(11)'], {}), '(0, 100, 11)\n', (281, 293), True, 'import numpy as np\n'), ((311, 332), 'matplotlib.pyplot.xticks', 'plt.xticks', (['new_ticks'], {}), '(new_ticks)\n', (321, 332), True, 'import matplotlib.pyplot as plt\n'), ((333, 457), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[1, 2, 3, 4, 5]', "['$\\\\alpha num_1$', '$\\\\beta num_2$', '$\\\\gamma num_3$', '$\\\\zeta num_4$',\n '$\\\\phi num_5$']"], {}), "([1, 2, 3, 4, 5], ['$\\\\alpha num_1$', '$\\\\beta num_2$',\n '$\\\\gamma num_3$', '$\\\\zeta num_4$', '$\\\\phi num_5$'])\n", (343, 457), True, 'import matplotlib.pyplot as plt\n'), ((454, 477), 'matplotlib.pyplot.plot', 'plt.plot', (['y1', 'y2'], {'c': '"""b"""'}), "(y1, y2, c='b')\n", (462, 477), True, 'import matplotlib.pyplot as plt\n'), ((478, 494), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(101)'], {}), '(0, 101)\n', (486, 494), True, 'import matplotlib.pyplot as plt\n'), ((495, 511), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(10)'], {}), '(-1, 10)\n', (503, 511), True, 'import matplotlib.pyplot as plt\n'), ((512, 535), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""this is x"""'], {}), "('this is x')\n", (522, 535), True, 'import matplotlib.pyplot as plt\n'), ((536, 559), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""this is y"""'], {}), "('this is y')\n", (546, 559), True, 'import matplotlib.pyplot as plt\n'), ((560, 570), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (568, 570), True, 'import matplotlib.pyplot as plt\n'), ((572, 584), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (582, 584), True, 'import matplotlib.pyplot as plt\n'), ((609, 618), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (616, 618), True, 'import matplotlib.pyplot as plt\n'), ((753, 763), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (761, 763), True, 'import matplotlib.pyplot as plt\n'), ((765, 782), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(2)'}), '(num=2)\n', (775, 782), True, 'import matplotlib.pyplot as plt\n'), ((789, 820), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {'label': '"""line 1"""'}), "(x, y1, label='line 1')\n", (797, 820), True, 'import matplotlib.pyplot as plt\n'), ((894, 925), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'label': '"""line 2"""'}), "(x, y2, label='line 2')\n", (902, 925), True, 'import matplotlib.pyplot as plt\n'), ((971, 1032), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[l1, l2]', 'labels': "['l1', 'l2']", 'loc': '"""best"""'}), "(handles=[l1, l2], labels=['l1', 'l2'], loc='best')\n", (981, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1033, 1043), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1041, 1043), True, 'import matplotlib.pyplot as plt\n'), ((1119, 1142), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(100)'], {}), '(-3, 3, 100)\n', (1130, 1142), True, 'import numpy as np\n'), ((1151, 1176), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Annotations"""'], {}), "('Annotations')\n", (1161, 1176), True, 'import matplotlib.pyplot as plt\n'), ((1219, 1241), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1229, 1241), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1256), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1254, 1256), True, 'import matplotlib.pyplot as plt\n'), ((1511, 1560), 'matplotlib.pyplot.plot', 'plt.plot', (['[x0, x0]', '[0, y0]', '"""k--"""'], {'linewidth': '(2.5)'}), "([x0, x0], [0, y0], 'k--', linewidth=2.5)\n", (1519, 1560), True, 'import matplotlib.pyplot as plt\n'), ((1561, 1597), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[x0]', '[y0]'], {'s': '(50)', 'c': '"""b"""'}), "([x0], [y0], s=50, c='b')\n", (1572, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1980, 2073), 'matplotlib.pyplot.text', 'plt.text', (['(-4)', '(3)', '"""$This\\\\ is\\\\ a\\\\ special\\\\ point$"""'], {'fontdict': "{'size': 16, 'color': 'r'}"}), "(-4, 3, '$This\\\\ is\\\\ a\\\\ special\\\\ point$', fontdict={'size': 16,\n 'color': 'r'})\n", (1988, 2073), True, 'import matplotlib.pyplot as plt\n'), ((2067, 2077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2075, 2077), True, 'import matplotlib.pyplot as plt\n'), ((2089, 2110), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Scatter"""'], {}), "('Scatter')\n", (2099, 2110), True, 'import matplotlib.pyplot as plt\n'), ((2139, 2156), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2153, 2156), True, 'import numpy as np\n'), ((2161, 2178), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2175, 2178), True, 'import numpy as np\n'), ((2183, 2199), 'numpy.arctan2', 'np.arctan2', (['x', 'y'], {}), '(x, y)\n', (2193, 2199), True, 'import numpy as np\n'), ((2221, 2254), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'T', 'alpha': '(0.5)'}), '(x, y, c=T, alpha=0.5)\n', (2232, 2254), True, 'import matplotlib.pyplot as plt\n'), ((2255, 2269), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (2263, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2270, 2284), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (2278, 2284), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2299), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (2295, 2299), True, 'import matplotlib.pyplot as plt\n'), ((2318, 2332), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (2328, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2359, 2361), True, 'import matplotlib.pyplot as plt\n'), ((2369, 2386), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Bar"""'], {}), "('Bar')\n", (2379, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2398, 2410), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2407, 2410), True, 'import numpy as np\n'), ((2526, 2581), 'matplotlib.pyplot.bar', 'plt.bar', (['x', '(+y1)'], {'facecolor': '"""#9999ff"""', 'edgecolor': '"""white"""'}), "(x, +y1, facecolor='#9999ff', edgecolor='white')\n", (2533, 2581), True, 'import matplotlib.pyplot as plt\n'), ((2582, 2637), 'matplotlib.pyplot.bar', 'plt.bar', (['x', '(-y2)'], {'facecolor': '"""#ff9999"""', 'edgecolor': '"""white"""'}), "(x, -y2, facecolor='#ff9999', edgecolor='white')\n", (2589, 2637), True, 'import matplotlib.pyplot as plt\n'), ((2877, 2892), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', 'n'], {}), '(-1, n)\n', (2885, 2892), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2907), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (2903, 2907), True, 'import matplotlib.pyplot as plt\n'), ((2908, 2929), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.25)', '(1.25)'], {}), '(-1.25, 1.25)\n', (2916, 2929), True, 'import matplotlib.pyplot as plt\n'), ((2930, 2944), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (2940, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2946, 2956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2954, 2956), True, 'import matplotlib.pyplot as plt\n'), ((3044, 3065), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Contour"""'], {}), "('Contour')\n", (3054, 3065), True, 'import matplotlib.pyplot as plt\n'), ((3078, 3099), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'n'], {}), '(-3, 3, n)\n', (3089, 3099), True, 'import numpy as np\n'), ((3104, 3125), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'n'], {}), '(-3, 3, n)\n', (3115, 3125), True, 'import numpy as np\n'), ((3133, 3150), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (3144, 3150), True, 'import numpy as np\n'), ((3291, 3330), 'matplotlib.pyplot.clabel', 'plt.clabel', (['C'], {'inline': '(True)', 'fontsize': '(10)'}), '(C, inline=True, fontsize=10)\n', (3301, 3330), True, 'import matplotlib.pyplot as plt\n'), ((3331, 3345), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (3341, 3345), True, 'import matplotlib.pyplot as plt\n'), ((3346, 3360), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (3356, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3361, 3371), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3369, 3371), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3405), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (3399, 3405), True, 'import numpy as np\n'), ((3415, 3482), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x'], {'cmap': '"""bone"""', 'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(x, cmap='bone', interpolation='nearest', origin='lower')\n", (3425, 3482), True, 'import matplotlib.pyplot as plt\n'), ((3483, 3508), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.92)'}), '(shrink=0.92)\n', (3495, 3508), True, 'import matplotlib.pyplot as plt\n'), ((3509, 3523), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (3519, 3523), True, 'import matplotlib.pyplot as plt\n'), ((3524, 3538), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (3534, 3538), True, 'import matplotlib.pyplot as plt\n'), ((3539, 3549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3547, 3549), True, 'import matplotlib.pyplot as plt\n'), ((3567, 3579), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3577, 3579), True, 'import matplotlib.pyplot as plt\n'), ((3585, 3596), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (3591, 3596), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((3601, 3623), 'numpy.arange', 'np.arange', (['(-4)', '(4)', '(0.25)'], {}), '(-4, 4, 0.25)\n', (3610, 3623), True, 'import numpy as np\n'), ((3628, 3650), 'numpy.arange', 'np.arange', (['(-4)', '(4)', '(0.25)'], {}), '(-4, 4, 0.25)\n', (3637, 3650), True, 'import numpy as np\n'), ((3658, 3675), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (3669, 3675), True, 'import numpy as np\n'), ((3680, 3704), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (3687, 3704), True, 'import numpy as np\n'), ((3705, 3714), 'numpy.sin', 'np.sin', (['r'], {}), '(r)\n', (3711, 3714), True, 'import numpy as np\n'), ((3916, 3926), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3924, 3926), True, 'import matplotlib.pyplot as plt\n'), ((2437, 2467), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.0)', 'n'], {}), '(0.5, 1.0, n)\n', (2454, 2467), True, 'import numpy as np\n'), ((2494, 2524), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.0)', 'n'], {}), '(0.5, 1.0, n)\n', (2511, 2524), True, 'import numpy as np\n'), ((2721, 2783), 'matplotlib.pyplot.text', 'plt.text', (['xa', '(ya + 0.05)', "('%.2f' % ya)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(xa, ya + 0.05, '%.2f' % ya, ha='center', va='bottom')\n", (2729, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2815, 2875), 'matplotlib.pyplot.text', 'plt.text', (['xa', '(-ya - 0.05)', "('%.2f' % ya)"], {'ha': '"""center"""', 'va': '"""top"""'}), "(xa, -ya - 0.05, '%.2f' % ya, ha='center', va='top')\n", (2823, 2875), True, 'import matplotlib.pyplot as plt\n'), ((3021, 3045), 'numpy.exp', 'np.exp', (['(-x ** 2 - y ** 2)'], {}), '(-x ** 2 - y ** 2)\n', (3027, 3045), True, 'import numpy as np\n'), ((3800, 3823), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""rainbow"""'], {}), "('rainbow')\n", (3812, 3823), True, 'import matplotlib.pyplot as plt\n'), ((3872, 3895), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""rainbow"""'], {}), "('rainbow')\n", (3884, 3895), True, 'import matplotlib.pyplot as plt\n')] |
"""Roughness helper functions"""
import os
import contextlib
from pathlib import Path
import numpy as np
import numpy.f2py
import xarray as xr
import jupytext
from . import config as cfg
from . import __version__
# Line of sight helpers
def lookup2xarray(lookups):
"""
Convert list of default lookups to xarray.DataSet.
Parameters
----------
lookups (array): Lookup tables (e.g. from make_los_table).
Returns
-------
xarray.DataArray: xarray.DataArray with labeled dims and coords.
"""
names = cfg.LUT_NAMES
longnames = cfg.LUT_LONGNAMES
for i, lut in enumerate(lookups):
# Make DataArray
da = np2xr(lut, name=names[i])
da.attrs["long_name"] = longnames[i]
# Add DataArray to DataSet
if i == 0:
ds = da.to_dataset()
else:
ds[names[i]] = da
return ds
def np2xr(arr, dims=None, coords=None, name=None, cnames=None, cunits=None):
"""
Convert numpy array to xarray.DataArray.
Parameters
----------
array (np.array): Numpy array to convert to xarray.DataArray.
dims (list of str): Dimensions name of each param (default: index order).
coords (list of arr): Coordinate arrays of each dim.
name (str): Name of xarray.DataArray (default: None)
cnames (list of str): Coordinate names of each param.
cunits (list of str): Coordinate units of each param (default: deg).
Returns
-------
xarray.DataArray
"""
ndims = len(arr.shape)
if dims is None:
dims = cfg.LUT_DIMS[:ndims]
if coords is None:
coords = get_lookup_coords(*arr.shape)[:ndims]
if cnames is None:
cnames = cfg.LUT_DIMS_LONGNAMES[:ndims]
if cunits is None:
cunits = ["deg"] * ndims
# Make xarray-readable (dim, coord) pairs
coords_xr = list(zip(dims, coords))
# Make DataArray
da = xr.DataArray(arr, coords=coords_xr, name=name)
for i, coord in enumerate(da.coords):
da.coords[coord].attrs["long_name"] = cnames[i]
da.coords[coord].attrs["units"] = cunits[i]
return da
def wl2xr(arr, units="microns"):
"""Return wavelength numpy array as xarray."""
da = xr.DataArray(arr, coords=[("wavelength", arr)])
da.coords["wavelength"].attrs["long_name"] = "Wavelength"
da.coords["wavelength"].attrs["units"] = units
return da
def wn2xr(arr, units="cm^-1"):
"""Return wavenumber numpy array as xarray."""
da = xr.DataArray(arr, coords=[("wavenumber", arr)])
da.coords["wavelength"].attrs["long_name"] = "Wavenumber"
da.coords["wavelength"].attrs["units"] = units
return da
def get_lookup_coords(nrms=10, ninc=10, naz=36, ntheta=45):
"""
Return coordinate arrays corresponding to number of elements in each axis.
Return lookup axes in the following order with ranges:
rms: [0, 50] degrees
inc: [0, 90] degrees
az: [0, 360] degrees
theta: [0, 90] degrees
Parameters
----------
nrms (int): Number of RMS slopes in [0, 50) degrees.
ninc (int): Number of incidence angles in [0, 90) degrees.
naz (int): Number of facet azimuth bins in [0, 360) degrees.
ntheta (int): Number of facet slope bins in [0, 90) degrees.
Return
------
lookup_coords (tuple of array): Coordinate arrays (rms, cinc, az, theta)
"""
rms_coords = np.linspace(0, 50, nrms, endpoint=False)
cinc_coords = np.linspace(1, 0, ninc, endpoint=True) # [0, 90) degrees
azim_coords = np.linspace(0, 360, naz, endpoint=False)
slope_coords = np.linspace(0, 90, ntheta, endpoint=False)
inc_coords = np.rad2deg(np.arccos(cinc_coords))
return (rms_coords, inc_coords, azim_coords, slope_coords)
def facet_grids(los_table, units="degrees"):
"""
Return 2D grids of surface facet slope and azimuth angles of los_table.
Assumes los_table axes are (az, theta) with ranges:
az: [0, 360] degrees
theta: [0, 90] degrees
Parameters
----------
los_table (arr): Line of sight table (dims: az, theta)
units (str): Return grids in specified units ('degrees' or 'radians')
Return
------
thetas, azs (tuple of 2D array): Coordinate grids of facet slope and az
"""
if isinstance(los_table, xr.DataArray):
az_arr = los_table.az.values
theta_arr = los_table.theta.values
else:
naz, ntheta = los_table.shape
_, _, az_arr, theta_arr = get_lookup_coords(naz=naz, ntheta=ntheta)
if units == "radians":
az_arr = np.radians(az_arr)
theta_arr = np.radians(theta_arr)
# Make coordinate grids
theta_grid, az_grid = np.meshgrid(theta_arr, az_arr)
if isinstance(los_table, xr.DataArray):
theta_grid = xr.ones_like(los_table) * theta_grid
theta_grid.name = f"theta [{units}]"
az_grid = xr.ones_like(los_table) * az_grid
az_grid.name = f"azimuth [{units}]"
return theta_grid, az_grid
def get_facet_bins(naz=36, ntheta=45):
"""
Return az, theta bin arrays of los_table.
Assumes los_table axes are (az, theta) with ranges:
az: [0, 360] degrees
theta: [0, 90] degrees
Parameters
----------
los_table (array): Line of sight table (dims: az, theta)
units (str): Return grids in specified units ('degrees' or 'radians')
Return
------
thetas, azs (tuple of 2D array): Bin edges of facet slope and az
"""
azim_coords = np.linspace(0, 360, naz + 1)
slope_coords = np.linspace(0, 90, ntheta + 1)
return (azim_coords, slope_coords)
# File I/O helpers
def rm_regex(dirpath, regex):
"""Remove all files matching regex in dirpath."""
dirpath = Path(dirpath)
for f in dirpath.rglob(regex):
f.unlink()
def fname_with_demsize(filename, demsize):
"""
Return filename with demsize appended to the end.
Parameters
----------
fname (str or Path): Filename to append to.
demsize (int): Length of dem in pixels.
Returns
-------
fname_with_demsize (str): New filename with new demsize appended.
"""
filename = Path(filename)
return filename.with_name(f"{filename.stem}_s{demsize}{filename.suffix}")
def versions_match(version_a, version_b, precision=2):
"""
Check if semantic versions match to precision (default 2).
Examples
--------
>>> versions_match('1.0.0', '1.2.3', precision=1)
True
>>> versions_match('1.2.0', '1.2.3', precision=2)
True
>>> versions_match('1.2.3', '1.2.3', precision=3)
True
"""
va_split = version_a.split(".")
vb_split = version_b.split(".")
for i in range(precision):
if va_split[i] != vb_split[i]:
return False
return True
def check_data_updated():
"""Check if data version is up to date."""
data_version = get_data_version()
if data_version is None or not versions_match(data_version, __version__):
print("WARNING: The roughness/data folder is not up to date!")
print("Update to the newest lookup tables with -d flag.")
return False
return True
def get_data_version(data_version_file=cfg.FDATA_VERSION):
"""Get data version in data/data_version.txt."""
data_version = None
try:
with open(data_version_file, "r") as f:
data_version = f.readline().strip()
except FileNotFoundError:
pass
return data_version
def set_data_version(data_version_file=cfg.FDATA_VERSION):
"""Set data version in data/data_version.txt."""
with open(data_version_file, "w") as f:
f.write(__version__)
@contextlib.contextmanager
def change_working_directory(path):
"""Change working directory and revert to previous on exit."""
prev_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
# Fortran helpers
def compile_lineofsight(los_f90=cfg.FLOS_F90, verbose=False):
"""Compile lineofsight module with numpy f2py. Error if no compiler."""
with open(los_f90) as f:
src = f.read()
failed = numpy.f2py.compile(
src, "lineofsight", verbose=verbose, source_fn=los_f90
)
if failed:
msg = "Cannot compile Fortran raytracing code. Please ensure you have \
a F90 compatible Fortran compiler (e.g. gfortran) installed."
raise ImportError(msg)
def import_lineofsight(fortran_dir=cfg.FORTRAN_DIR):
"""Import lineofsight module. Compile first if not found."""
# pragma pylint: disable=import-outside-toplevel
lineofsight = None
with change_working_directory(fortran_dir):
try:
from .fortran import lineofsight
except (ImportError, ModuleNotFoundError):
try:
compile_lineofsight()
from .fortran import lineofsight
except (ImportError, ModuleNotFoundError):
msg = "Cannot compile lineofsight FORTRAN module. Please \
ensure you have a F90 compatible Fortran compiler (e.g.\
gfortran) installed."
print(msg)
return lineofsight
def build_jupyter_notebooks(nbpath=cfg.EXAMPLES_DIR):
"""Build Jupyter notebooks using Jupytext."""
print("Setting up Jupyter notebooks")
for nb_py in nbpath.rglob("*.py"):
fout = nb_py.with_suffix(".ipynb")
if fout.exists():
print(f"Skipping existing notebook {fout}")
else:
jupytext.write(jupytext.read(nb_py), fout)
print(f"Wrote {fout}")
# Geometry helpers
def get_surf_geometry(ground_zen, ground_az, sun_zen, sun_az, sc_zen, sc_az):
"""
Return local i, e, g, azimuth give input viewing geometry assuming all
input zeniths and azimuths are in radians.
"""
ground = sph2cart(ground_zen, ground_az)
sun = sph2cart(sun_zen, sun_az)
sc = sph2cart(sc_zen, sc_az)
inc = get_local_inc(ground, sun)
em = get_local_em(ground, sc)
phase = get_local_phase(sun, sc)
az = get_local_az(ground, sun, sc)
return (inc, em, phase, az)
def get_ieg(ground_zen, ground_az, sun_zen, sun_az, sc_zen, sc_az):
"""
Return local solar incidence, spacecraft emission and phase angle (i, e, g)
and azimuth given input viewing geometry.
Input zeniths and azimuths in radians.
"""
ground = sph2cart(ground_zen, ground_az)
sun = sph2cart(sun_zen, sun_az)
sc = sph2cart(sc_zen, sc_az)
inc = get_local_inc(ground, sun)
em = get_local_em(ground, sc)
phase = get_local_phase(sun, sc)
az = get_local_az(ground, sun, sc)
return (inc, em, phase, az)
def safe_arccos(arr):
"""Return arccos, restricting output to [-1, 1] without raising Exception."""
if (arr < -1).any() or (arr > 1).any():
print("Invalid cosine input; restrict to [-1,1]")
arr[arr < -1] = -1
arr[arr > 1] = 1
return np.arccos(arr)
def get_azim(ground_sc_ground, ground_sun_ground):
"""
Return the azimuth arccos(dot product of the spacecraft and sun vectors)
"""
dot_azim = np.degrees(
np.arccos(np.sum(ground_sc_ground * ground_sun_ground, axis=2))
)
oob = dot_azim[(dot_azim < 0) * (dot_azim > 180)]
if oob.any():
w = f"Azimuth {oob} outside (0, 180); Setting to 0"
print(w)
dot_azim[(dot_azim < 0) * (dot_azim > 180)] = 0
return dot_azim
def get_local_inc(ground, sun):
"""
Return solar incidence angle of each pixel given local topography. Assumes
inputs are same size and shape and are in 3D Cartesian coords (ixjxk).
"""
cos_inc = element_dot(ground, sun)
inc = np.degrees(safe_arccos(cos_inc))
inc[inc > 89.999] = 89.999
return inc
def get_local_em(ground, sc):
"""
Return emergence angle of each pixel given local topography. Assumes
inputs are same size and shape and are in 3D Cartesian coords (ixjxk).
"""
cos_em = element_dot(ground, sc)
em = np.degrees(safe_arccos(cos_em))
return em
def get_local_phase(sun, sc):
"""
Return phase angle of each pixel given local topography. Assumes
inputs are same size and shape and are in 3D Cartesian coords (ixjxk).
"""
cos_phase = element_dot(sc, sun)
phase = np.degrees(safe_arccos(cos_phase))
return phase
def get_local_az(ground, sun, sc):
"""
Return azimuth angle of the spacecraft with respect to the sun and local
slope. Assumes inputs are same size and shape and are in 3D
Cartesian coords (ixjxk).
"""
sc_rel_ground = element_norm(element_triple_cross(ground, sc, ground))
sun_rel_ground = element_norm(element_triple_cross(ground, sun, ground))
cos_az = element_dot(sc_rel_ground, sun_rel_ground)
az = np.degrees(safe_arccos(cos_az))
az[(az < 0) * (az > 180)] = 0
return az
def inc_to_tloc(inc, az):
"""
Convert solar incidence and az to decimal local time (in 6-18h).
Parameters
----------
inc: (float)
Solar incidence in degrees (0, 90)
az: (str)
Solar azimuth in degrees (0, 360)
"""
inc = inc.copy()
if isinstance(az, np.ndarray):
inc[az < 180] *= -1
elif az < 180:
inc *= -1
coinc = 90 + inc # (-90, 90) -> (0, 180)
tloc = 6 * coinc / 90 + 6 # (0, 180) -> (6, 18)
return tloc
def tloc_to_inc(tloc):
"""
Convert decimal local time to solar incidence (equator only).
"""
coinc = (tloc - 6) * 90 / 6 # (6, 18) -> (0, 180)
inc = coinc - 90 # (0, 180) -> (-90, 90)
return inc
# Linear algebra
# def element_az_elev(v1, v2):
# """
# Return azimuth and elevation of v2 relative to v1.
#
# untested
# """
# v = v2 - v1
# az = np.degrees(np.arctan2(v[:, :, 0], v[:, :, 1]))
# elev = np.degrees(np.arctan2(v[:, :, 2], np.sqrt(v[:, :, 0]** 2 + v[:, :, 1]**2)))
# return az, elev
def element_cross(A, B):
"""
Return element-wise cross product of two 3D arrays in cartesian coords.
"""
out = np.zeros_like(A)
out[:, :, 0] = A[:, :, 1] * B[:, :, 2] - A[:, :, 2] * B[:, :, 1]
out[:, :, 1] = A[:, :, 2] * B[:, :, 0] - A[:, :, 0] * B[:, :, 2]
out[:, :, 2] = A[:, :, 0] * B[:, :, 1] - A[:, :, 1] * B[:, :, 0]
return out
def element_dot(A, B):
"""Return element-wise dot product of two 3D arr in Cartesian coords."""
return np.sum(A * B, axis=2)
def element_norm(A):
"""Return input array of vectors normalized to length 1."""
mag = np.sqrt(np.sum(A ** 2, axis=2))
return A / mag[:, :, np.newaxis]
def element_triple_cross(A, B, C):
"""Return element-wise triple cross product of three 3D arr in Cartesian"""
return (
B * (element_dot(A, C))[:, :, np.newaxis]
- C * (element_dot(A, B))[:, :, np.newaxis]
)
# Coordinate transformations
def cart2pol(x, y):
"""
Convert ordered coordinate pairs from Cartesian (X,Y) to polar (r,theta).
Parameters
----------
X: X component of ordered Cartesian coordinate pair.
Y: Y component of ordered Cartesian coordinate pair.
Returns
-------
r: Distance from origin.
theta: Angle, in radians.
"""
r = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan2(y, x)
return (r, theta)
def pol2cart(rho, phi):
"""
Convert ordered coordinate pairs from polar (r,theta) to Cartesian (X,Y).
Parameters
----------
r: Distance from origin.
theta: Angle, in radians.
Returns
-------
X: X component of ordered Cartesian coordinate pair.
Y: Y component of ordered Cartesian coordinate pair.
"""
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return (x, y)
def sph2cart(theta, phi, radius=1):
"""
Convert from spherical (theta, phi, r) to cartesian (x, y, z) coordinates.
Theta and phi must be specified in radians and returned units correspond to
units of r, default is unitless (r=1 is unit vector). Returns vectors along
z-axis (e.g., if theta and phi are int/float return 1x1x3 vector; if theta
and phi are MxN arrays return 3D array of vectors MxNx3).
Parameters
----------
theta (num or array): Polar angle [rad].
phi (num or array): Azimuthal angle [rad].
radius (num or array): Radius (default=1).
Returns
-------
cartesian (array): Cartesian vector(s), same shape as theta and phi.
"""
return np.dstack(
[
radius * np.sin(theta) * np.cos(phi),
radius * np.sin(theta) * np.sin(phi),
radius * np.cos(theta),
]
).squeeze()
def xy2lonlat_coords(x, y, extent):
"""Convert x,y coordinates to lat,lon coordinates."""
lon = np.linspace(extent[0], extent[1], len(x))
lat = np.linspace(extent[3], extent[2], len(y))
return lon, lat
| [
"numpy.radians",
"numpy.arccos",
"numpy.sqrt",
"pathlib.Path",
"pathlib.Path.cwd",
"xarray.ones_like",
"jupytext.read",
"os.chdir",
"numpy.sum",
"numpy.linspace",
"numpy.arctan2",
"numpy.cos",
"xarray.DataArray",
"numpy.sin",
"numpy.meshgrid",
"numpy.zeros_like"
] | [((1891, 1937), 'xarray.DataArray', 'xr.DataArray', (['arr'], {'coords': 'coords_xr', 'name': 'name'}), '(arr, coords=coords_xr, name=name)\n', (1903, 1937), True, 'import xarray as xr\n'), ((2197, 2244), 'xarray.DataArray', 'xr.DataArray', (['arr'], {'coords': "[('wavelength', arr)]"}), "(arr, coords=[('wavelength', arr)])\n", (2209, 2244), True, 'import xarray as xr\n'), ((2465, 2512), 'xarray.DataArray', 'xr.DataArray', (['arr'], {'coords': "[('wavenumber', arr)]"}), "(arr, coords=[('wavenumber', arr)])\n", (2477, 2512), True, 'import xarray as xr\n'), ((3373, 3413), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', 'nrms'], {'endpoint': '(False)'}), '(0, 50, nrms, endpoint=False)\n', (3384, 3413), True, 'import numpy as np\n'), ((3432, 3470), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', 'ninc'], {'endpoint': '(True)'}), '(1, 0, ninc, endpoint=True)\n', (3443, 3470), True, 'import numpy as np\n'), ((3508, 3548), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', 'naz'], {'endpoint': '(False)'}), '(0, 360, naz, endpoint=False)\n', (3519, 3548), True, 'import numpy as np\n'), ((3568, 3610), 'numpy.linspace', 'np.linspace', (['(0)', '(90)', 'ntheta'], {'endpoint': '(False)'}), '(0, 90, ntheta, endpoint=False)\n', (3579, 3610), True, 'import numpy as np\n'), ((4654, 4684), 'numpy.meshgrid', 'np.meshgrid', (['theta_arr', 'az_arr'], {}), '(theta_arr, az_arr)\n', (4665, 4684), True, 'import numpy as np\n'), ((5455, 5483), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', '(naz + 1)'], {}), '(0, 360, naz + 1)\n', (5466, 5483), True, 'import numpy as np\n'), ((5503, 5533), 'numpy.linspace', 'np.linspace', (['(0)', '(90)', '(ntheta + 1)'], {}), '(0, 90, ntheta + 1)\n', (5514, 5533), True, 'import numpy as np\n'), ((5692, 5705), 'pathlib.Path', 'Path', (['dirpath'], {}), '(dirpath)\n', (5696, 5705), False, 'from pathlib import Path\n'), ((6108, 6122), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (6112, 6122), False, 'from pathlib import Path\n'), ((7745, 7755), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (7753, 7755), False, 'from pathlib import Path\n'), ((7760, 7774), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (7768, 7774), False, 'import os\n'), ((10881, 10895), 'numpy.arccos', 'np.arccos', (['arr'], {}), '(arr)\n', (10890, 10895), True, 'import numpy as np\n'), ((13984, 14000), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (13997, 14000), True, 'import numpy as np\n'), ((14336, 14357), 'numpy.sum', 'np.sum', (['(A * B)'], {'axis': '(2)'}), '(A * B, axis=2)\n', (14342, 14357), True, 'import numpy as np\n'), ((15144, 15168), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (15151, 15168), True, 'import numpy as np\n'), ((15181, 15197), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (15191, 15197), True, 'import numpy as np\n'), ((3639, 3661), 'numpy.arccos', 'np.arccos', (['cinc_coords'], {}), '(cinc_coords)\n', (3648, 3661), True, 'import numpy as np\n'), ((4538, 4556), 'numpy.radians', 'np.radians', (['az_arr'], {}), '(az_arr)\n', (4548, 4556), True, 'import numpy as np\n'), ((4577, 4598), 'numpy.radians', 'np.radians', (['theta_arr'], {}), '(theta_arr)\n', (4587, 4598), True, 'import numpy as np\n'), ((7819, 7837), 'os.chdir', 'os.chdir', (['prev_cwd'], {}), '(prev_cwd)\n', (7827, 7837), False, 'import os\n'), ((14463, 14485), 'numpy.sum', 'np.sum', (['(A ** 2)'], {'axis': '(2)'}), '(A ** 2, axis=2)\n', (14469, 14485), True, 'import numpy as np\n'), ((15583, 15594), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (15589, 15594), True, 'import numpy as np\n'), ((15609, 15620), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (15615, 15620), True, 'import numpy as np\n'), ((4750, 4773), 'xarray.ones_like', 'xr.ones_like', (['los_table'], {}), '(los_table)\n', (4762, 4773), True, 'import xarray as xr\n'), ((4850, 4873), 'xarray.ones_like', 'xr.ones_like', (['los_table'], {}), '(los_table)\n', (4862, 4873), True, 'import xarray as xr\n'), ((11087, 11139), 'numpy.sum', 'np.sum', (['(ground_sc_ground * ground_sun_ground)'], {'axis': '(2)'}), '(ground_sc_ground * ground_sun_ground, axis=2)\n', (11093, 11139), True, 'import numpy as np\n'), ((9468, 9488), 'jupytext.read', 'jupytext.read', (['nb_py'], {}), '(nb_py)\n', (9481, 9488), False, 'import jupytext\n'), ((16411, 16422), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (16417, 16422), True, 'import numpy as np\n'), ((16461, 16472), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (16467, 16472), True, 'import numpy as np\n'), ((16495, 16508), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (16501, 16508), True, 'import numpy as np\n'), ((16395, 16408), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (16401, 16408), True, 'import numpy as np\n'), ((16445, 16458), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (16451, 16458), True, 'import numpy as np\n')] |
import PIL as PIL
import pytesseract as tess
import matplotlib.pyplot as plt
import numpy as np
class Img2Str:
def __init__(self, img_path=None):
self.img_path = img_path
self.image = None
def load_image(self, img_path):
self.img_path = img_path
self.image = PIL.Image.open(img_path)
return self.image
def run_ocr(self):
if not self.img_path:
raise AttributeError('self.image is not defined, load an image first.')
self.load_image(self.img_path)
text = tess.image_to_string(self.image)
print(text)
return text
def show_image(self, figsize=(8,8)):
"""
To show a PIL image you can use Image.show() class method, but this
opens the file with default OS program through generation of a temp file.
In order to show the image in Jupyter notebooks, one strategy is to
convert the image to a numpy array, and then use matplotlib.imshow()
"""
if not self.img_path:
raise AttributeError('self.image is not defined, load an image first.')
self.load_image(self.img_path)
as_np = np.asarray(self.image)
plt.figure(figsize=figsize)
plt.imshow(as_np)
plt.axis('off')
plt.show()
def __call__(self, img_path):
self.load_image(img_path)
return self.run_ocr()
ocr = Img2Str('./ocr_img_test.png')
ocr.run_ocr()
| [
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"numpy.asarray",
"matplotlib.pyplot.figure",
"pytesseract.image_to_string",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((302, 326), 'PIL.Image.open', 'PIL.Image.open', (['img_path'], {}), '(img_path)\n', (316, 326), True, 'import PIL as PIL\n'), ((556, 588), 'pytesseract.image_to_string', 'tess.image_to_string', (['self.image'], {}), '(self.image)\n', (576, 588), True, 'import pytesseract as tess\n'), ((1176, 1198), 'numpy.asarray', 'np.asarray', (['self.image'], {}), '(self.image)\n', (1186, 1198), True, 'import numpy as np\n'), ((1207, 1234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1217, 1234), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1260), 'matplotlib.pyplot.imshow', 'plt.imshow', (['as_np'], {}), '(as_np)\n', (1253, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1269, 1284), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1277, 1284), True, 'import matplotlib.pyplot as plt\n'), ((1293, 1303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1301, 1303), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from math import factorial
from numpy.ma import masked_array
import scipy
from scipy.special import factorial
import scipy.stats as stat
import pdb
#Define Zernike radial polynomials
def rnm(n,m,rho):
"""
Return an array with the zernike Rnm polynomial calculated at rho points.
**ARGUMENTS:**
=== ==========================================
n n order of the Zernike polynomial
m m order of the Zernike polynomial
rho Matrix containing the radial coordinates.
=== ==========================================
.. note:: For rho>1 the returned value is 0
.. note:: Values for rho<0 are silently returned as rho=0
"""
if(type(n) is not int):
raise Exception("n must be integer")
if(type(m) is not int):
raise Exception("m must be integer")
if (n-m)%2!=0:
raise Exception("n-m must be even")
if abs(m)>n:
raise Exception("The following must be true |m|<=n")
mask=np.where(rho<=1,False,True)
if(n==0 and m==0):
return masked_array(data=np.ones(np.shape(rho)), mask=mask)
rho=np.where(rho<0,0,rho)
Rnm=np.zeros(rho.shape)
S=(n-abs(m))/2
for s in range (0,S+1):
CR=pow(-1,s)*factorial(n-s)/ \
(factorial(s)*factorial(-s+(n+abs(m))/2)* \
factorial(-s+(n-abs(m))/2))
p=CR*pow(rho,n-2*s)
Rnm=Rnm+p
return masked_array(data=Rnm, mask=mask)
def zernike(n,m,rho,theta):
"""
Returns the an array with the Zernike polynomial evaluated in the rho and
theta.
**ARGUMENTS:**
===== ==========================================
n n order of the Zernike polynomial
m m order of the Zernike polynomial
rho Matrix containing the radial coordinates.
theta Matrix containing the angular coordinates.
===== ==========================================
.. note:: For rho>1 the returned value is 0
.. note:: Values for rho<0 are silently returned as rho=0
"""
Rnm=rnm(n,m,rho)
NC=np.sqrt(2*(n+1))
S=(n-abs(m))/2
if m>0:
Zmn=NC*Rnm*np.cos(m*theta)
#las funciones cos() y sin() de scipy tienen problemas cuando la grilla
# tiene dimension cero
elif m<0:
Zmn=NC*Rnm*np.sin(m*theta)
else:
Zmn=np.sqrt(0.5)*NC*Rnm
return Zmn
def zmodes(N):
"""
Construct Zernike mode vectors in standard ordering
Includes all modes up to radial order N
"""
r = 0 #Starting radial mode
radial = []
azimuthal = []
z = []
while np.size(radial) < N:
if r % 2 == 0:
m = 0 #Set starting azimuthal mode to 0
else:
m = 1 #Set starting azimuthal mode to 1
while m <= r and np.size(radial) < N:
#Get current z number
z = np.size(azimuthal) + 1
#If z is odd, append sine first
#Append negative and positive m
if z % 2 == 1:
azimuthal.append(-m)
else:
azimuthal.append(m)
radial.append(r)
if m > 0:
if z % 2 == 1:
azimuthal.append(m)
else:
azimuthal.append(-m)
radial.append(r)
m = m + 2
r = r + 1 #Increment radial order
return np.array(radial[:N],order='F').astype('int'),\
np.array(azimuthal[:N],order='F').astype('int')
def zmatrix(rho,theta,N,r=None,m=None):
"""
Formulate Zernike least squares fitting matrix
Requires rho and theta vectors, normalized to rhomax=1
"""
#Create mode vectors
if r is None:
r,m = zmodes(N)
#Form matrix
A = np.zeros((np.size(rho),np.size(r)))
#Populate matrix columns
for i in range(np.size(r)):
A[:,i] = zernike(int(r[i]),int(m[i]),rho,theta)
return A
def carttopolar(x,y,cx,cy,rad):
#Convert to polar
r = (sqrt((x-cx)**2+(y-cy)**2))/rad
#Remove invalid points
mask = r <= 1
r = r[mask]
x = x[mask]
y = y[mask]
theta = arctan2((y-cy),(x-cx))
return r, theta, mask
#Reconstruct surface on unique x and y vectors for zernike coefficients
def zernsurf(x,y,cx,cy,rad,coeff,r=None,m=None):
if np.size(np.unique(x)) == np.size(x):
x,y = np.meshgrid(x,y)
rho = np.sqrt((x-cx)**2+(y-cy)**2)/rad
theta = np.arctan2((y-cy),(x-cx))
heights = np.zeros(np.shape(x))
if r is None:
r,m = zmodes(np.size(coeff))
for i in range(np.size(r)):
heights = heights + coeff[i]*zernike(int(r[i]),int(m[i]),rho,theta)
#Set outside pixels to NaN
heights[np.where(rho>1.)] = np.NaN
return heights.data
def fitimg(img,N=20,r=None,m=None):
"""
Perform Zernike fit on an image.
Zernike domain is defined over the full image array.
"""
#Construct rho and theta vectors
x,y = np.meshgrid(np.linspace(-1.,1.,np.shape(img)[0]),\
np.linspace(-1.,1.,np.shape(img)[1]))
rho = np.sqrt(x**2+y**2)
theta = np.arctan2(y,x)
#Flatten and remove NaNs
rho = rho.flatten()
theta = theta.flatten()
#Get b vector
b = img.flatten()
#Get A matrix
A = zmatrix(rho[~np.isnan(b)],theta[~np.isnan(b)],N,r=r,m=m)
#Solve for coefficients
c = scipy.linalg.lstsq(A,b[~np.isnan(b)])
#Reconstruct fit image
coeff = c[0]
A = zmatrix(rho,theta,N,r=r,m=m)
fit = np.dot(A,coeff)
fit = fit.reshape(np.shape(x))
fit[np.isnan(img)] = np.nan
return c,fit.reshape(np.shape(x))
def fitvec(x,y,z,N=20,r=None,m=None):
"""
Perform Zernike fit on an image.
Zernike domain is defined over the full image array.
"""
#Construct rho and theta vectors
rho = np.sqrt(x**2+y**2)
theta = np.arctan2(y,x)
rho = rho/rho.max()
#Get A matrix
A = zmatrix(rho,theta,N,r=r,m=m)
#Solve for coefficients
c = scipy.linalg.lstsq(A,z)
return c
#Function to output Zernike coefficients and RMS fit error for x,y,z txt file
def zcoeff(filename,save=False,cx=0.,cy=0.,rad=1.,order=20,r=None,m=None,**kwags):
#Read in surface data
if type(filename) is str:
d = genfromtxt(filename,**kwags)
else:
d = filename
if shape(d)[0]==3:
sagx, sagy, sagz = d[0],d[1],d[2]
else:
## #Strip NaN rows/columns
## while sum(isnan(d[0]))==shape(d)[1]:
## d = d[1:]
## while sum(isnan(d[-1]))==shape(d)[1]:
## d = d[:-1]
## newsize = shape(d)[0]
## while sum(isnan(d[:,0]))==newsize:
## d = d[:,1:]
## while sum(isnan(d[:,-1]))==newsize:
## d = d[:,:-1]
x,y=meshgrid(arange(shape(d)[0],dtype='float'),\
arange(shape(d)[1],dtype='float'))
## ind = invert(isnan(d))
## d2 = d[ind]
## x = x[ind]
## y = y[ind]
x = x.flatten()
y = y.flatten()
d2 = d.flatten()
sagx = []
sagy = []
sagz = []
for i in range(size(d2)):
if invert(isnan(d2[i])):
sagx.append(x[i])
sagy.append(y[i])
sagz.append(d2[i])
sagx = array(sagx)
sagy = array(sagy)
sagz = array(sagz)
#Convert to normalized polar coordinates for Zernike fitting
rho, theta, mask = carttopolar(sagx,sagy,cx,cy,rad)
#Create Zernike polynomial matrix for least squares fit
#Using all Zernike polynomials up to radial order 20
pdb.set_trace()
A = zmatrix(rho,theta,order,r=r,m=m)
#Perform least squares fit
#0th element is the coefficient matrix
#1st element is sum of squared residuals
#2nd element is rank of matrix A
#3rd element is singular values of A
fit = scipy.linalg.lstsq(A,sagz[mask])
#Compute fitted surface from Zernike coefficients
if shape(d)[0] != 3:
y,x=meshgrid(arange(shape(d)[0],dtype='float'),\
arange(shape(d)[1],dtype='float'))
else:
x,y = d[0],d[1]
fitsurf = zernsurf(x.flatten(),y.flatten(),cx,cy,rad,fit[0],r=r,m=m)
##
## #Do residuals match up with those from fit?
## print sum((fitsurf-sagz)**2)
## print fit[1]
rms = sqrt(fit[1]/size(sagz))
#If save=True, save coefficients to a txt file
#First line is number of coefficients
if save==True:
savetxt(filename.split('.')[0]+'Coeff.txt'\
,insert(fit[0],0,size(fit[0])))
return fit[0],fit[1],rms,fitsurf
| [
"scipy.linalg.lstsq",
"numpy.sqrt",
"numpy.unique",
"numpy.where",
"scipy.special.factorial",
"numpy.size",
"numpy.sin",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.arctan2",
"numpy.meshgrid",
"pdb.set_trace",
"numpy.isnan",
"numpy.cos",
"numpy.ma.masked_array",
"numpy.shape"
] | [((1006, 1037), 'numpy.where', 'np.where', (['(rho <= 1)', '(False)', '(True)'], {}), '(rho <= 1, False, True)\n', (1014, 1037), True, 'import numpy as np\n'), ((1135, 1160), 'numpy.where', 'np.where', (['(rho < 0)', '(0)', 'rho'], {}), '(rho < 0, 0, rho)\n', (1143, 1160), True, 'import numpy as np\n'), ((1165, 1184), 'numpy.zeros', 'np.zeros', (['rho.shape'], {}), '(rho.shape)\n', (1173, 1184), True, 'import numpy as np\n'), ((1424, 1457), 'numpy.ma.masked_array', 'masked_array', ([], {'data': 'Rnm', 'mask': 'mask'}), '(data=Rnm, mask=mask)\n', (1436, 1457), False, 'from numpy.ma import masked_array\n'), ((2056, 2076), 'numpy.sqrt', 'np.sqrt', (['(2 * (n + 1))'], {}), '(2 * (n + 1))\n', (2063, 2076), True, 'import numpy as np\n'), ((4386, 4412), 'numpy.arctan2', 'np.arctan2', (['(y - cy)', '(x - cx)'], {}), '(y - cy, x - cx)\n', (4396, 4412), True, 'import numpy as np\n'), ((5024, 5048), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (5031, 5048), True, 'import numpy as np\n'), ((5055, 5071), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (5065, 5071), True, 'import numpy as np\n'), ((5445, 5461), 'numpy.dot', 'np.dot', (['A', 'coeff'], {}), '(A, coeff)\n', (5451, 5461), True, 'import numpy as np\n'), ((5763, 5787), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (5770, 5787), True, 'import numpy as np\n'), ((5794, 5810), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (5804, 5810), True, 'import numpy as np\n'), ((5927, 5951), 'scipy.linalg.lstsq', 'scipy.linalg.lstsq', (['A', 'z'], {}), '(A, z)\n', (5945, 5951), False, 'import scipy\n'), ((7523, 7538), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7536, 7538), False, 'import pdb\n'), ((7788, 7821), 'scipy.linalg.lstsq', 'scipy.linalg.lstsq', (['A', 'sagz[mask]'], {}), '(A, sagz[mask])\n', (7806, 7821), False, 'import scipy\n'), ((2570, 2585), 'numpy.size', 'np.size', (['radial'], {}), '(radial)\n', (2577, 2585), True, 'import numpy as np\n'), ((3801, 3811), 'numpy.size', 'np.size', (['r'], {}), '(r)\n', (3808, 3811), True, 'import numpy as np\n'), ((4288, 4298), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (4295, 4298), True, 'import numpy as np\n'), ((4314, 4331), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4325, 4331), True, 'import numpy as np\n'), ((4341, 4379), 'numpy.sqrt', 'np.sqrt', (['((x - cx) ** 2 + (y - cy) ** 2)'], {}), '((x - cx) ** 2 + (y - cy) ** 2)\n', (4348, 4379), True, 'import numpy as np\n'), ((4435, 4446), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (4443, 4446), True, 'import numpy as np\n'), ((4524, 4534), 'numpy.size', 'np.size', (['r'], {}), '(r)\n', (4531, 4534), True, 'import numpy as np\n'), ((4657, 4676), 'numpy.where', 'np.where', (['(rho > 1.0)'], {}), '(rho > 1.0)\n', (4665, 4676), True, 'import numpy as np\n'), ((5483, 5494), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (5491, 5494), True, 'import numpy as np\n'), ((5504, 5517), 'numpy.isnan', 'np.isnan', (['img'], {}), '(img)\n', (5512, 5517), True, 'import numpy as np\n'), ((2124, 2141), 'numpy.cos', 'np.cos', (['(m * theta)'], {}), '(m * theta)\n', (2130, 2141), True, 'import numpy as np\n'), ((3726, 3738), 'numpy.size', 'np.size', (['rho'], {}), '(rho)\n', (3733, 3738), True, 'import numpy as np\n'), ((3739, 3749), 'numpy.size', 'np.size', (['r'], {}), '(r)\n', (3746, 3749), True, 'import numpy as np\n'), ((4271, 4283), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (4280, 4283), True, 'import numpy as np\n'), ((4488, 4502), 'numpy.size', 'np.size', (['coeff'], {}), '(coeff)\n', (4495, 4502), True, 'import numpy as np\n'), ((5554, 5565), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (5562, 5565), True, 'import numpy as np\n'), ((1253, 1269), 'scipy.special.factorial', 'factorial', (['(n - s)'], {}), '(n - s)\n', (1262, 1269), False, 'from scipy.special import factorial\n'), ((2277, 2294), 'numpy.sin', 'np.sin', (['(m * theta)'], {}), '(m * theta)\n', (2283, 2294), True, 'import numpy as np\n'), ((2757, 2772), 'numpy.size', 'np.size', (['radial'], {}), '(radial)\n', (2764, 2772), True, 'import numpy as np\n'), ((2828, 2846), 'numpy.size', 'np.size', (['azimuthal'], {}), '(azimuthal)\n', (2835, 2846), True, 'import numpy as np\n'), ((3350, 3381), 'numpy.array', 'np.array', (['radial[:N]'], {'order': '"""F"""'}), "(radial[:N], order='F')\n", (3358, 3381), True, 'import numpy as np\n'), ((3408, 3442), 'numpy.array', 'np.array', (['azimuthal[:N]'], {'order': '"""F"""'}), "(azimuthal[:N], order='F')\n", (3416, 3442), True, 'import numpy as np\n'), ((4934, 4947), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (4942, 4947), True, 'import numpy as np\n'), ((4995, 5008), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (5003, 5008), True, 'import numpy as np\n'), ((5234, 5245), 'numpy.isnan', 'np.isnan', (['b'], {}), '(b)\n', (5242, 5245), True, 'import numpy as np\n'), ((5254, 5265), 'numpy.isnan', 'np.isnan', (['b'], {}), '(b)\n', (5262, 5265), True, 'import numpy as np\n'), ((5339, 5350), 'numpy.isnan', 'np.isnan', (['b'], {}), '(b)\n', (5347, 5350), True, 'import numpy as np\n'), ((1100, 1113), 'numpy.shape', 'np.shape', (['rho'], {}), '(rho)\n', (1108, 1113), True, 'import numpy as np\n'), ((1284, 1296), 'scipy.special.factorial', 'factorial', (['s'], {}), '(s)\n', (1293, 1296), False, 'from scipy.special import factorial\n'), ((2315, 2327), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2322, 2327), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import os
import torch
from skimage import io, transform
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms#, utils
import pickle
from pdb import set_trace as stop
from PIL import Image
import json, string, sys
import torchvision.transforms.functional as TF
import random
import hashlib
import time
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import csv
from dataloaders.data_utils import get_unk_mask_indices
from dataloaders.data_utils import image_loader,pil_loader
def get_vocab(objData):
spunctuation = set(string.punctuation)
swords = set(stopwords.words('english'))
print('Building vocabulary of words...')
lem = WordNetLemmatizer()
word_counts = dict()
for (i, entry) in enumerate(objData['annotations']):
if i % 10000 == 0: print('.'),
caption = entry['caption']
for word in word_tokenize(caption.lower()):
word = lem.lemmatize(word)
if word not in swords and word not in spunctuation:
word_counts[word] = 1 + word_counts.get(word, 0)
sword_counts = sorted(word_counts.items(), key = lambda x: -x[1])
id2word = {idx: word for (idx, (word, count)) in enumerate(sword_counts[:1000])}
id2count = {idx: count for (idx, (word, count)) in enumerate(sword_counts[:1000])}
word2id = {word: idx for (idx, word) in id2word.items()}
vocabulary = (id2word, word2id, id2count)
pickle.dump(vocabulary, open('data/coco/coco_words_vocabulary.p', 'wb'))
return vocabulary
class Coco1000Dataset(torch.utils.data.Dataset):
def __init__(self, annotation_dir,image_dir,split='train',transform = None,known_labels=0,testing=False):
# Load training data.
self.split = split
self.image_dir = image_dir
self.transform = transform
self.testing=testing
self.num_labels = 1000#num_labels
self.epoch = 1
self.known_labels = known_labels
# Load annotations.
print(('\nLoading %s object annotations...') % self.split)
self.objData = json.load(open(os.path.join(annotation_dir, 'captions_' + self.split + '2014.json')))
self.imageIds = [entry['id'] for entry in self.objData['images']]
self.imageNames = [entry['file_name'] for entry in self.objData['images']]
self.imageId2index = {image_id: idx for (idx, image_id) in enumerate(self.imageIds)}
if os.path.exists("data/coco/coco_words_vocabulary.p"):
self.vocabulary = pickle.load(open('data/coco/coco_words_vocabulary.p', 'rb'))
else:
self.vocabulary = get_vocab(self.objData)
label_file_path = os.path.join(annotation_dir, '1000_labels_' + self.split + '2014.npy')
if os.path.exists(label_file_path):
print('Loading labels')
self.labels = np.load(label_file_path)
else:
print('Preparing label space')
lem = WordNetLemmatizer()
self.labels = np.zeros((len(self.objData['images']), len(self.vocabulary[0])))
for (i, entry) in enumerate(self.objData['annotations']):
# if i % 10000 == 0: print('.'),
image_id = entry['image_id']
caption = entry['caption']
for word in word_tokenize(caption.lower()):
word = lem.lemmatize(word)
if word in self.vocabulary[1].keys():
self.labels[self.imageId2index[image_id], self.word2id(word)] = 1
np.save(label_file_path, self.labels)
def getLabelWeights(self):
return (self.labels == 0).sum(axis = 0) / self.labels.sum(axis = 0)
def decodeCategories(self, labelVector):
return [self.id2word(idx) for idx in np.nonzero(labelVector)[0]]
def id2word(self, idx):
return self.vocabulary[0][idx]
def word2id(self, word):
return self.vocabulary[1][word]
def imageName(self, index):
return self.split + '2014/' + self.imageNames[index]
def __getitem__(self, index):
split_str = self.split if (self.split != 'test') else 'val'
imageName_ = split_str + '2014/' + self.imageNames[index]
image = pil_loader(os.path.join(self.image_dir, imageName_))
if self.transform is not None:
image = self.transform(image)
sample = {'image': image,'labels':torch.Tensor(self.labels[index, :])}
mask = sample['labels'].clone()
unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels)
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
sample['mask'] = mask
sample['imageIDs'] = imageName_
return sample
def __len__(self):
return len(self.imageIds)
def numCategories(self):
return len(self.vocabulary[0])
| [
"os.path.exists",
"nltk.corpus.stopwords.words",
"os.path.join",
"dataloaders.data_utils.get_unk_mask_indices",
"nltk.stem.WordNetLemmatizer",
"torch.Tensor",
"numpy.nonzero",
"numpy.load",
"numpy.save"
] | [((808, 827), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (825, 827), False, 'from nltk.stem import WordNetLemmatizer\n'), ((725, 751), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (740, 751), False, 'from nltk.corpus import stopwords\n'), ((2544, 2595), 'os.path.exists', 'os.path.exists', (['"""data/coco/coco_words_vocabulary.p"""'], {}), "('data/coco/coco_words_vocabulary.p')\n", (2558, 2595), False, 'import os\n'), ((2792, 2862), 'os.path.join', 'os.path.join', (['annotation_dir', "('1000_labels_' + self.split + '2014.npy')"], {}), "(annotation_dir, '1000_labels_' + self.split + '2014.npy')\n", (2804, 2862), False, 'import os\n'), ((2874, 2905), 'os.path.exists', 'os.path.exists', (['label_file_path'], {}), '(label_file_path)\n', (2888, 2905), False, 'import os\n'), ((4655, 4732), 'dataloaders.data_utils.get_unk_mask_indices', 'get_unk_mask_indices', (['image', 'self.testing', 'self.num_labels', 'self.known_labels'], {}), '(image, self.testing, self.num_labels, self.known_labels)\n', (4675, 4732), False, 'from dataloaders.data_utils import get_unk_mask_indices\n'), ((2969, 2993), 'numpy.load', 'np.load', (['label_file_path'], {}), '(label_file_path)\n', (2976, 2993), True, 'import numpy as np\n'), ((3069, 3088), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (3086, 3088), False, 'from nltk.stem import WordNetLemmatizer\n'), ((3654, 3691), 'numpy.save', 'np.save', (['label_file_path', 'self.labels'], {}), '(label_file_path, self.labels)\n', (3661, 3691), True, 'import numpy as np\n'), ((4356, 4396), 'os.path.join', 'os.path.join', (['self.image_dir', 'imageName_'], {}), '(self.image_dir, imageName_)\n', (4368, 4396), False, 'import os\n'), ((4530, 4565), 'torch.Tensor', 'torch.Tensor', (['self.labels[index, :]'], {}), '(self.labels[index, :])\n', (4542, 4565), False, 'import torch\n'), ((2211, 2279), 'os.path.join', 'os.path.join', (['annotation_dir', "('captions_' + self.split + '2014.json')"], {}), "(annotation_dir, 'captions_' + self.split + '2014.json')\n", (2223, 2279), False, 'import os\n'), ((3891, 3914), 'numpy.nonzero', 'np.nonzero', (['labelVector'], {}), '(labelVector)\n', (3901, 3914), True, 'import numpy as np\n'), ((4756, 4786), 'torch.Tensor', 'torch.Tensor', (['unk_mask_indices'], {}), '(unk_mask_indices)\n', (4768, 4786), False, 'import torch\n')] |
import numpy as np
from numba import njit, jit, prange
@njit(parallel=True,nogil=True)
def generateColumnForward(gdimg):
forward = forwardEnergy(gdimg)
x = gdimg.shape[0]
y = gdimg.shape[1]
lastDir = np.zeros((x,y),np.int8)
energy = np.zeros((x,y),np.uint32)
for i in range(y):
energy[0,i] = gdimg[0,i]
for i in range(1,x):
for j in range(y):
idx = 0
tmp = energy[i-1,j] + forward[i-1,j,1]
if j != 0:
a1 = energy[i-1,j-1] + forward[i-1,j-1,0]
if a1 < tmp:
tmp = a1
idx = -1
if j != y - 1:
a2 = energy[i-1,j+1] + forward[i-1,j+1,2]
if a2 < tmp:
tmp = a2
idx = 1
lastDir[i,j] = idx
energy[i,j] = tmp + gdimg[i,j]
return energy, lastDir
@jit(nopython=True, parallel=True)
def forwardEnergy(I):
n = I.shape[0]
m = I.shape[1]
ret = np.zeros((n, m, 3))
for i in prange(n):
for j in prange(m):
if j < m-1:
x = I[i, j+1]
else:
x = I[i, j]
if j > 0:
y = I[i, j-1]
else:
y = I[i, j]
if i > 0:
z = I[i-1, j]
else:
z = I[i, j]
ret[i, j, 0] = np.abs(x - y) + np.abs(z - y)
ret[i, j, 1] = np.abs(x - y)
ret[i, j, 2] = np.abs(x - y) + np.abs(z - x)
return ret | [
"numpy.abs",
"numba.njit",
"numpy.zeros",
"numba.jit",
"numba.prange"
] | [((58, 89), 'numba.njit', 'njit', ([], {'parallel': '(True)', 'nogil': '(True)'}), '(parallel=True, nogil=True)\n', (62, 89), False, 'from numba import njit, jit, prange\n'), ((903, 936), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'parallel': '(True)'}), '(nopython=True, parallel=True)\n', (906, 936), False, 'from numba import njit, jit, prange\n'), ((218, 243), 'numpy.zeros', 'np.zeros', (['(x, y)', 'np.int8'], {}), '((x, y), np.int8)\n', (226, 243), True, 'import numpy as np\n'), ((255, 282), 'numpy.zeros', 'np.zeros', (['(x, y)', 'np.uint32'], {}), '((x, y), np.uint32)\n', (263, 282), True, 'import numpy as np\n'), ((1007, 1026), 'numpy.zeros', 'np.zeros', (['(n, m, 3)'], {}), '((n, m, 3))\n', (1015, 1026), True, 'import numpy as np\n'), ((1040, 1049), 'numba.prange', 'prange', (['n'], {}), '(n)\n', (1046, 1049), False, 'from numba import njit, jit, prange\n'), ((1068, 1077), 'numba.prange', 'prange', (['m'], {}), '(m)\n', (1074, 1077), False, 'from numba import njit, jit, prange\n'), ((1459, 1472), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (1465, 1472), True, 'import numpy as np\n'), ((1402, 1415), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (1408, 1415), True, 'import numpy as np\n'), ((1418, 1431), 'numpy.abs', 'np.abs', (['(z - y)'], {}), '(z - y)\n', (1424, 1431), True, 'import numpy as np\n'), ((1500, 1513), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (1506, 1513), True, 'import numpy as np\n'), ((1516, 1529), 'numpy.abs', 'np.abs', (['(z - x)'], {}), '(z - x)\n', (1522, 1529), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot as plt
class TreSlice:
def __init__(self, X, Y, Z, C):
self.data = C
self.axes = np.array([X, Y, Z])
self.labels = np.array(['X', 'Y', 'Z'])
self.views = np.array([[0, 1, 2], [1, 0, 2], [2, 0, 1]])
self.curAxis = 2
self.curSlice = 0
self.fig = plt.figure()
self.dataMin = np.amin(self.data)
self.dataMax = np.amax(self.data)
def keypress(self, event):
if event.key == ' ':
self.curAxis += 1
self.curSlice = 0
if self.curAxis > 2:
self.curAxis = 0
self.refresh()
elif event.key == 'up':
self.move(1)
elif event.key == 'down':
self.move(-1)
elif event.key == 'right':
self.move(5)
elif event.key == 'left':
self.move(-5)
def move(self, i):
self.curSlice = max(min(self.curSlice + i,
len(self.axes[self.curAxis]) - 2), 0)
self.refresh()
def show(self):
self.fig.canvas.mpl_connect('key_press_event', self.keypress)
self.makePlot()
plt.show()
def refresh(self):
self.fig.clear()
self.makePlot()
self.fig.canvas.draw()
def makePlot(self):
ax = self.fig.gca()
axes = np.take(self.axes, self.views[self.curAxis], axis=0)
labels = np.take(self.labels, self.views[self.curAxis])
heatmap = ax.pcolormesh(
axes[1], axes[2],
np.rollaxis(self.data, self.curAxis)[self.curSlice, :, :].T,
vmin=self.dataMin, vmax=self.dataMax)
ax.set_aspect('equal')
ax.set_xlabel(labels[1])
ax.set_ylabel(labels[2])
ax.set_title('{0} = {1}'.format(labels[0], axes[0][self.curSlice]))
self.fig.colorbar(heatmap)
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print('Usage: python treslice.py filename')
sys.exit(0)
data = np.load(sys.argv[1])
plot = TreSlice(data['X'], data['Y'], data['Z'], data['C'])
plot.show()
| [
"numpy.amin",
"numpy.rollaxis",
"numpy.take",
"numpy.array",
"matplotlib.pyplot.figure",
"sys.exit",
"numpy.load",
"numpy.amax",
"matplotlib.pyplot.show"
] | [((2041, 2061), 'numpy.load', 'np.load', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2048, 2061), True, 'import numpy as np\n'), ((152, 171), 'numpy.array', 'np.array', (['[X, Y, Z]'], {}), '([X, Y, Z])\n', (160, 171), True, 'import numpy as np\n'), ((194, 219), 'numpy.array', 'np.array', (["['X', 'Y', 'Z']"], {}), "(['X', 'Y', 'Z'])\n", (202, 219), True, 'import numpy as np\n'), ((241, 284), 'numpy.array', 'np.array', (['[[0, 1, 2], [1, 0, 2], [2, 0, 1]]'], {}), '([[0, 1, 2], [1, 0, 2], [2, 0, 1]])\n', (249, 284), True, 'import numpy as np\n'), ((355, 367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (365, 367), True, 'from matplotlib import pyplot as plt\n'), ((391, 409), 'numpy.amin', 'np.amin', (['self.data'], {}), '(self.data)\n', (398, 409), True, 'import numpy as np\n'), ((433, 451), 'numpy.amax', 'np.amax', (['self.data'], {}), '(self.data)\n', (440, 451), True, 'import numpy as np\n'), ((1194, 1204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1202, 1204), True, 'from matplotlib import pyplot as plt\n'), ((1377, 1429), 'numpy.take', 'np.take', (['self.axes', 'self.views[self.curAxis]'], {'axis': '(0)'}), '(self.axes, self.views[self.curAxis], axis=0)\n', (1384, 1429), True, 'import numpy as np\n'), ((1447, 1493), 'numpy.take', 'np.take', (['self.labels', 'self.views[self.curAxis]'], {}), '(self.labels, self.views[self.curAxis])\n', (1454, 1493), True, 'import numpy as np\n'), ((2018, 2029), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2026, 2029), False, 'import sys\n'), ((1569, 1605), 'numpy.rollaxis', 'np.rollaxis', (['self.data', 'self.curAxis'], {}), '(self.data, self.curAxis)\n', (1580, 1605), True, 'import numpy as np\n')] |
import numpy as np
from math import inf as infinity
import itertools
import random
import pdb
import matplotlib.pyplot as plt
class TicTacToe(object):
def __init__(self):
self.game_state = [[' ',' ',' '],
[' ',' ',' '],
[' ',' ',' ']]
self.agent_state = np.zeros((3,3)) #actual agent observation
def print_board(self):
print('----------------')
print('| ' + str(self.game_state[0][0]) + ' || ' + str(self.game_state[0][1]) + ' || ' + str(self.game_state[0][2]) + ' |')
print('----------------')
print('| ' + str(self.game_state[1][0]) + ' || ' + str(self.game_state[1][1]) + ' || ' + str(self.game_state[1][2]) + ' |')
print('----------------')
print('| ' + str(self.game_state[2][0]) + ' || ' + str(self.game_state[2][1]) + ' || ' + str(self.game_state[2][2]) + ' |')
print('----------------')
def convert_state(self):
#agent plays X. Env plays O. If cell is empty, denoted by zero
#if it has X it is denoted by 1. if it has O it is denoted by 2.
for i in range(3):
for j in range(3):
if self.game_state[i][j] == ' ':
self.agent_state[i][j] = 0
elif self.game_state[i][j] == 'X':
self.agent_state[i][j] = 1
else:
self.agent_state[i][j] = 2
return self.agent_state
def reset(self):
self.game_state = [[' ',' ',' '],[' ',' ',' '],[' ',' ',' ']]
current_state = "Not Done"
self.print_board()
winner = None
current_player_idx = 0
return self.convert_state()
def step(self,action):
#action must be valid and must be numbered 1 through 9.
#convert accordingly. Additionally write script in the
#training/inference loop to make sure action is valid, i.e
#do not output an action in a cell that is already occupied.
#This will raise an exception and break your training loop.
self.play_move('X', action)
#Opponent plays action
block_choice = self.getOpponentMove(self.game_state,"O")
if block_choice != -10 and block_choice != 10 and block_choice != 0:
self.play_move('O', block_choice)
self.print_board()
rew, done = self.rew_calc()
winner, current_state = self.check_current_state(self.game_state)
if current_state == "Draw":
print("Draw")
elif winner == 'X':
print("Win")
elif winner == 'O':
print("Lost")
return self.convert_state(), rew, done
def rew_calc(self):
reward = 0
done = False
current_state = "Not Done"
winner, current_state = self.check_current_state(self.game_state)
#While game is being played return done = False
#Design the reward to be returned
if current_state == "Not Done":
reward = 0
return reward, done
if current_state == "Draw":
reward = 0.5
done = True
return reward, done
elif winner == 'X':
reward = 1.0
done = True
elif winner == 'O':
reward = -1.0
done = True
return reward, done
def play_move(self, player, block_num):
if self.game_state[int((block_num-1)/3)][(block_num-1)%3] == ' ':
self.game_state[int((block_num-1)/3)][(block_num-1)%3] = player
else:
raise Exception('Invalid Action!')
def play_move_hallucinate(self,state, player, block_num):
if state[int((block_num-1)/3)][(block_num-1)%3] == ' ':
state[int((block_num-1)/3)][(block_num-1)%3] = player
else:
raise Exception('Invalid Action!')
def getOpponentMove(self, state,player):
winner_loser , done = self.check_current_state(state)
if done == "Done" and winner_loser == 'O': # If Opponent won
return 10
elif done == "Done" and winner_loser == 'X': # If Human won
return -10
elif done == "Draw": # Draw condition
return 0
moves = []
empty_cells = []
for i in range(3):
for j in range(3):
if state[i][j] == ' ':
empty_cells.append(i*3 + (j+1))
for empty_cell in empty_cells:
move = {}
move['index'] = empty_cell
new_state = self.copy_game_state(state) #hallucinate through states
self.play_move_hallucinate(new_state, player, empty_cell)
if player == 'O': # If Opponent
result = self.getOpponentMove(new_state, 'X') # make more depth tree for human
move['score'] = result
else:
result = self.getOpponentMove(new_state, 'O') # make more depth tree for Opponent
move['score'] = result
moves.append(move)
# Find best move
best_move = None
if player == 'O': # If Opponent player
best = -infinity
for move in moves:
if move['score'] > best:
best = move['score']
best_move = move['index']
else:
best = infinity
for move in moves:
if move['score'] < best:
best = move['score']
best_move = move['index']
return best_move
def copy_game_state(self,state):
new_state = [[' ',' ',' '],[' ',' ',' '],[' ',' ',' ']]
for i in range(3):
for j in range(3):
new_state[i][j] = state[i][j]
return new_state
def check_current_state(self,game_state):
# Check horizontals
if (game_state[0][0] == game_state[0][1] and game_state[0][1] == game_state[0][2] and game_state[0][0] != ' '):
return game_state[0][0], "Done"
if (game_state[1][0] == game_state[1][1] and game_state[1][1] == game_state[1][2] and game_state[1][0] != ' '):
return game_state[1][0], "Done"
if (game_state[2][0] == game_state[2][1] and game_state[2][1] == game_state[2][2] and game_state[2][0] != ' '):
return game_state[2][0], "Done"
# Check verticals
if (game_state[0][0] == game_state[1][0] and game_state[1][0] == game_state[2][0] and game_state[0][0] != ' '):
return game_state[0][0], "Done"
if (game_state[0][1] == game_state[1][1] and game_state[1][1] == game_state[2][1] and game_state[0][1] != ' '):
return game_state[0][1], "Done"
if (game_state[0][2] == game_state[1][2] and game_state[1][2] == game_state[2][2] and game_state[0][2] != ' '):
return game_state[0][2], "Done"
# Check diagonals
if (game_state[0][0] == game_state[1][1] and game_state[1][1] == game_state[2][2] and game_state[0][0] != ' '):
return game_state[1][1], "Done"
if (game_state[2][0] == game_state[1][1] and game_state[1][1] == game_state[0][2] and game_state[2][0] != ' '):
return game_state[1][1], "Done"
# Check if draw
draw_flag = 0
for i in range(3):
for j in range(3):
if game_state[i][j] == ' ':
draw_flag = 1
if draw_flag == 0:
return None, "Draw"
return None, "Not Done"
if __name__ == "__main__":
env = TicTacToe()
num_episodes = 1000
learning_rate = 0.2
epsilon = 0.2
number_of_actions = 9
player_matrix = ['O', 'X', ' ']
player = ['X', 'O']
total_states_possible = [[list(i[0:3]),list(i[3:6]),list(i[6:10])] for i in itertools.product(player_matrix, repeat = 9)]
number_of_states = len(total_states_possible)
Q_values_agent = np.zeros((number_of_states, number_of_actions)) ##initialization of Q-values
Q_values_agent2 = np.zeros((number_of_states, number_of_actions))
states_dictionary = {}
for i in range(number_of_states):
states_dictionary[i] = total_states_possible[i]
# Update the Q values
def update(state_index, new_state_index, lr, rewardd, done, actionn):
discount_factor = 0.99
q_current = Q_values_agent[state_index][actionn-1]
if not done:
q_new = rewardd + discount_factor * np.max(Q_values_agent[new_state_index])
else:
q_new = rewardd
Q_values_agent[state_index][actionn-1] = q_current + lr * (q_new - q_current)
def update2(state_index, new_state_index, lr, rewardd, done, actionn):
discount_factor = 0.99
q_current = Q_values_agent2[state_index][actionn-1]
if not done:
q_new = rewardd + discount_factor * np.max(Q_values_agent2[new_state_index])
else:
q_new = rewardd
Q_values_agent2[state_index][actionn-1] = q_current + lr * (q_new - q_current)
## Fetching action using epsilon greedy algorithm
def fetch_action(player, state, epsilon):
Q_values_current = []
moves = []
empty_cells = []
for i in range(3):
for j in range(3):
if state[i][j] == ' ': empty_cells.append(i*3 + (j + 1))
for empty_cell in empty_cells:
moves.append(empty_cell)
next_state = env.copy_game_state(state)
env.play_move_hallucinate(next_state, player, empty_cell)
next_state_index = list(states_dictionary.keys())[list(states_dictionary.values()).index(next_state)]
if player == 'X':
Q_values_current.append(Q_values_agent[next_state_index])
elif player == 'O':
Q_values_current.append(Q_values_agent2[next_state_index])
best_action_index = np.argmax(Q_values_current)
if np.random.rand() < epsilon:
best_action = random.choice(empty_cells)
epsilon = epsilon/1.01 ##decrease epsilon
else:
best_action = moves[best_action_index]
return best_action
def convertstate(astate):
gstate = [[' ',' ',' '],[' ',' ',' '],[' ',' ',' ']]
for i in range(3):
for j in range(3):
if astate[i][j] == 0:
gstate[i][j] = ' '
elif astate[i][j] == 1:
gstate[i][j] = 'X'
else:
gstate[i][j] = 'O'
return gstate
epsilon = 0.2
## Actual training
final_reward = np.zeros(num_episodes)
for i in range(num_episodes):
rewards = []
total_reward = 0
agent_state = env.reset()
gamestate = convertstate(agent_state)
done = False
while not done:
current_state_index = list(states_dictionary.keys())[list(states_dictionary.values()).index(gamestate)]
if 0 in agent_state:
#if current_player_index == 0:
action = fetch_action('X', gamestate, epsilon)
env.play_move("X", action)
agent_state_new_a = env.convert_state()
gamestate_new_a = convertstate(agent_state_new_a)
gamestate_new_b = gamestate_new_a
next_state_index_a = list(states_dictionary.keys())[list(states_dictionary.values()).index(gamestate_new_a)]
winner_loser , done = env.check_current_state(gamestate_new_a)
# else:
if 0 in agent_state_new_a:
action2 = fetch_action('O', gamestate_new_a, epsilon)
winner_loser , done = env.check_current_state(gamestate_new_a)
temp = 1
if done == "Done" and winner_loser == 'O': # If Opponent won
temp = 10
elif done == "Done" and winner_loser == 'X': # If Human won
temp = -10
elif done == "Draw": # Draw condition
temp = 0
if temp != -10 and temp != 10 and temp != 0:
env.play_move('O', action2)
agent_state_new = env.convert_state()
gamestate_new = convertstate(agent_state_new)
next_state_index = list(states_dictionary.keys())[list(states_dictionary.values()).index(gamestate_new)]
gamestate_new_b = gamestate_new
env.print_board()
reward, done = env.rew_calc()
winner, current_state = env.check_current_state(gamestate_new_b)
if current_state == "Draw":
print("draw")
elif winner == 'X':
print("Win")
elif winner == 'O':
print("Lost")
rewards.append(reward)
update(current_state_index, next_state_index_a, learning_rate, reward, done, action)
update2(next_state_index_a, next_state_index, learning_rate, -reward, done, action2)
gamestate = gamestate_new
agent_state = agent_state_new
current_state_index = next_state_index
# if winner == None and current_state != "Draw":
# if current_player_index == 0:
# current_player_index = 1
# elif current_player_index == 1:
# current_player_index = 0
if done:
for j in range(len(rewards)):
total_reward += rewards[j]
final_reward[i] = total_reward
print(final_reward[i])
plt.scatter(np.arange(0, num_episodes, 1), final_reward)
plt.xlabel("Episode")
plt.ylabel("Total Reward")
plt.title("Total Reward for Each Episode")
plt.show()
| [
"random.choice",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"itertools.product",
"numpy.argmax",
"numpy.max",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((7042, 7089), 'numpy.zeros', 'np.zeros', (['(number_of_states, number_of_actions)'], {}), '((number_of_states, number_of_actions))\n', (7050, 7089), True, 'import numpy as np\n'), ((7142, 7189), 'numpy.zeros', 'np.zeros', (['(number_of_states, number_of_actions)'], {}), '((number_of_states, number_of_actions))\n', (7150, 7189), True, 'import numpy as np\n'), ((9967, 9989), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (9975, 9989), True, 'import numpy as np\n'), ((13280, 13301), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (13290, 13301), True, 'import matplotlib.pyplot as plt\n'), ((13307, 13333), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Reward"""'], {}), "('Total Reward')\n", (13317, 13333), True, 'import matplotlib.pyplot as plt\n'), ((13339, 13381), 'matplotlib.pyplot.title', 'plt.title', (['"""Total Reward for Each Episode"""'], {}), "('Total Reward for Each Episode')\n", (13348, 13381), True, 'import matplotlib.pyplot as plt\n'), ((13387, 13397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13395, 13397), True, 'import matplotlib.pyplot as plt\n'), ((287, 303), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (295, 303), True, 'import numpy as np\n'), ((9185, 9212), 'numpy.argmax', 'np.argmax', (['Q_values_current'], {}), '(Q_values_current)\n', (9194, 9212), True, 'import numpy as np\n'), ((13230, 13259), 'numpy.arange', 'np.arange', (['(0)', 'num_episodes', '(1)'], {}), '(0, num_episodes, 1)\n', (13239, 13259), True, 'import numpy as np\n'), ((6923, 6965), 'itertools.product', 'itertools.product', (['player_matrix'], {'repeat': '(9)'}), '(player_matrix, repeat=9)\n', (6940, 6965), False, 'import itertools\n'), ((9235, 9251), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9249, 9251), True, 'import numpy as np\n'), ((9290, 9316), 'random.choice', 'random.choice', (['empty_cells'], {}), '(empty_cells)\n', (9303, 9316), False, 'import random\n'), ((7615, 7654), 'numpy.max', 'np.max', (['Q_values_agent[new_state_index]'], {}), '(Q_values_agent[new_state_index])\n', (7621, 7654), True, 'import numpy as np\n'), ((8062, 8102), 'numpy.max', 'np.max', (['Q_values_agent2[new_state_index]'], {}), '(Q_values_agent2[new_state_index])\n', (8068, 8102), True, 'import numpy as np\n')] |
import numpy as np
import cv2
def triangulate(point1, point2, cam1, cam2):
""" triangulate point1 in cam1 with point2 in cam2
:param point1: [2,]
:param point2: [2,]
"""
point1 = np.expand_dims(point1, axis=1).astype('float64') # 2 x n
point2 = np.expand_dims(point2, axis=1).astype('float64')
P1 = cam1.P
P2 = cam2.P
point3d = np.squeeze(cv2.triangulatePoints(P1, P2, point1, point2)) # (4, ) // homogenous
h = point3d[3]
point3d = point3d/h
return point3d[0:3]
def triangulate_multiple(points, cams):
""" triangulate all points with each other and take the avg point
:param points: list([ (x,y), (x,y), ... ])
:param cams: list([{cam}, {cam}])
"""
assert len(points) == len(cams), 'number of points and cameras must agree!'
n_cameras = len(points)
assert n_cameras >= 2, 'number of cameras must be 1 < but was ' + str(n_cameras)
pts3d = []
for cid1 in range(n_cameras-1):
for cid2 in range(cid1+1, n_cameras):
cam1 = cams[cid1]
cam2 = cams[cid2]
point1 = points[cid1]
point2 = points[cid2]
p3d = triangulate(point1, point2, cam1, cam2)
pts3d.append(p3d)
pt3d = np.mean(pts3d, axis=0)
return pt3d
def compute_epiline(point1, cam1, cam2):
""" computes the epiline ax + by + c = 0
:param point1: [x, y]
:param cam1: source camera
:param cam2: target camera
"""
point1 = np.array(point1, np.float64)
if len(point1.shape) == 1:
point1 = np.expand_dims(point1, axis=0)
F = get_fundamental_matrix(cam1, cam2)
epiline = np.squeeze(cv2.computeCorrespondEpilines(point1, 1, F))
return epiline
def get_fundamental_matrix(cam1, cam2):
""" finds the fundamental matrix between two views
:param P1: {3x4} projection matrix
:param P2: {3x4} projection matrix
:return:
"""
P1 = cam1.P
P2 = cam2.P
points3d = np.array([
[0, 0, 0],
[1505, 1493, 1501],
[300, 300, 0],
[1200, 0, 1200],
[0, 0, 1355],
[1355, 0, 1],
[999, 999, 1001],
[1005, 1001, 1000],
[551, 5, 333],
[-100, -100, 1005],
[1004, -100, 531],
[-999, 5, 33],
[-1500,-1000, -503],
[99, -99, 99],
[-99, 99, 99],
[99, 99, -99],
[5, 5, 5],
[-5, -5, 5],
[0.5, 0.5, 0.5],
[0.1, 0.9, 0.8],
[-0.1, -0.8, -.9]
], 'float64')
points1 = np.zeros((21, 2))
points2 = np.zeros((21, 2))
for i, (x, y, z) in enumerate(points3d):
p3d = np.array([x, y, z, 1])
a1, b1, c1 = P1 @ p3d
a2, b2, c2 = P2 @ p3d
assert c1 != 0 and c2 != 0
points1[i, 0] = a1 / c1
points1[i, 1] = b1 / c1
points2[i, 0] = a2 / c2
points2[i, 1] = b2 / c2
F, mask = cv2.findFundamentalMat(
points1, points2, cv2.FM_8POINT
)
return F
| [
"numpy.mean",
"cv2.triangulatePoints",
"numpy.array",
"numpy.zeros",
"cv2.findFundamentalMat",
"numpy.expand_dims",
"cv2.computeCorrespondEpilines"
] | [((1238, 1260), 'numpy.mean', 'np.mean', (['pts3d'], {'axis': '(0)'}), '(pts3d, axis=0)\n', (1245, 1260), True, 'import numpy as np\n'), ((1474, 1502), 'numpy.array', 'np.array', (['point1', 'np.float64'], {}), '(point1, np.float64)\n', (1482, 1502), True, 'import numpy as np\n'), ((1963, 2351), 'numpy.array', 'np.array', (['[[0, 0, 0], [1505, 1493, 1501], [300, 300, 0], [1200, 0, 1200], [0, 0, 1355\n ], [1355, 0, 1], [999, 999, 1001], [1005, 1001, 1000], [551, 5, 333], [\n -100, -100, 1005], [1004, -100, 531], [-999, 5, 33], [-1500, -1000, -\n 503], [99, -99, 99], [-99, 99, 99], [99, 99, -99], [5, 5, 5], [-5, -5, \n 5], [0.5, 0.5, 0.5], [0.1, 0.9, 0.8], [-0.1, -0.8, -0.9]]', '"""float64"""'], {}), "([[0, 0, 0], [1505, 1493, 1501], [300, 300, 0], [1200, 0, 1200], [0,\n 0, 1355], [1355, 0, 1], [999, 999, 1001], [1005, 1001, 1000], [551, 5, \n 333], [-100, -100, 1005], [1004, -100, 531], [-999, 5, 33], [-1500, -\n 1000, -503], [99, -99, 99], [-99, 99, 99], [99, 99, -99], [5, 5, 5], [-\n 5, -5, 5], [0.5, 0.5, 0.5], [0.1, 0.9, 0.8], [-0.1, -0.8, -0.9]], 'float64'\n )\n", (1971, 2351), True, 'import numpy as np\n'), ((2515, 2532), 'numpy.zeros', 'np.zeros', (['(21, 2)'], {}), '((21, 2))\n', (2523, 2532), True, 'import numpy as np\n'), ((2547, 2564), 'numpy.zeros', 'np.zeros', (['(21, 2)'], {}), '((21, 2))\n', (2555, 2564), True, 'import numpy as np\n'), ((2885, 2940), 'cv2.findFundamentalMat', 'cv2.findFundamentalMat', (['points1', 'points2', 'cv2.FM_8POINT'], {}), '(points1, points2, cv2.FM_8POINT)\n', (2907, 2940), False, 'import cv2\n'), ((378, 423), 'cv2.triangulatePoints', 'cv2.triangulatePoints', (['P1', 'P2', 'point1', 'point2'], {}), '(P1, P2, point1, point2)\n', (399, 423), False, 'import cv2\n'), ((1551, 1581), 'numpy.expand_dims', 'np.expand_dims', (['point1'], {'axis': '(0)'}), '(point1, axis=0)\n', (1565, 1581), True, 'import numpy as np\n'), ((1651, 1694), 'cv2.computeCorrespondEpilines', 'cv2.computeCorrespondEpilines', (['point1', '(1)', 'F'], {}), '(point1, 1, F)\n', (1680, 1694), False, 'import cv2\n'), ((2624, 2646), 'numpy.array', 'np.array', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (2632, 2646), True, 'import numpy as np\n'), ((201, 231), 'numpy.expand_dims', 'np.expand_dims', (['point1'], {'axis': '(1)'}), '(point1, axis=1)\n', (215, 231), True, 'import numpy as np\n'), ((272, 302), 'numpy.expand_dims', 'np.expand_dims', (['point2'], {'axis': '(1)'}), '(point2, axis=1)\n', (286, 302), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Most of this code is taken from https://github.com/matthewearl/faceswap
"""
import argparse
import cv2
import dlib
import json
import numpy
import sys
import os
import urllib.request
import dropbox
import hashlib
import sys
from parsy import generate, string, regex, eof, alt, Parser, ParseError
from tempfile import NamedTemporaryFile
# Parse command line arguments.
parser = argparse.ArgumentParser(description='None')
parser.add_argument('botnick', metavar='N', type=str,
help='Nickname of the bot')
parser.add_argument('channel', metavar='N', type=str,
help='Channel we run on')
parser.add_argument('plugins', metavar='N', type=str,
help='Plugins running')
parser.add_argument('--predictor-path', dest='pred_path',
help='Path to predictor')
parser.add_argument('--mark-image-path', dest='mark_path',
help='Path to image of The Mark')
parser.add_argument('--dropbox-token', dest='dropbox_token',
help='Token to connect to dropbox')
args, unknown = parser.parse_known_args()
# Setup connection to dropbox.
dbx = dropbox.Dropbox(args.dropbox_token)
dbx.users_get_current_account()
HELP_REQUEST = 0
FIND_MARK_REQUEST = 1
PREDICTOR_PATH = args.pred_path
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
# Points used to line up the images.
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)
# Points from the second image to overlay on the first. The convex hull of each
# element will be overlaid.
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]
# Amount of blur to use during colour correction, as a fraction of the
# pupillary distance.
COLOUR_CORRECT_BLUR_FRAC = 0.6
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
# Represents a request comming from a user. A request can either be a
# HELP_REQUEST or a FIND_MARK_REQUEST with an associated URL.
class Request:
def __init__(self, request_type, url=None):
self.request_type = request_type
self.url = url
# Handle a Request. None is an allowed value and will be returned.
def handle_request(req):
if req == None:
pass
elif req.request_type == HELP_REQUEST:
give_help(args.botnick)
elif req.request_type == FIND_MARK_REQUEST:
find_mark(args.botnick, req.url)
# Print help message to stdout.
def give_help(botnick):
print(botnick + ': mark help - Display this message')
print(botnick + ": find mark <url> - Locate The Mark in the picture given")
# Download the file pointed to by the URL and locate The Mark in the image. If
# it is not an image or the file is to large an error message will be printed.
def find_mark(botnick, url):
with NamedTemporaryFile() as f1, NamedTemporaryFile(suffix='.png') as f2:
try:
print('Jeg leder efter The Mark')
response = urllib.request.urlopen(url)
if int(response.getheader("Content-Length")) > 1000000000:
raise Exception
f1.write(response.read())
replace_face(f1.name, args.mark_path, f2.name)
file_url = upload_file(f2.name)
print('The Mark er fundet ' + file_url)
except TooManyFaces:
print('Der er for mange ansigter i dit billede')
except NoFaces:
print('Jeg kunne ikke finde The Mark')
except Exception as e:
print('Filen er for stor eller kan ikke hentes')
print(e)
# Upload file to dropbox and return the URL to that file.
def upload_file(fname):
with open(fname, 'rb') as f:
content = f.read()
dbname = "/{}.png".format(hashlib.md5(content).hexdigest())
dbx.files_upload(content, dbname)
dbx.sharing_create_shared_link(dbname, True)
return dbx.sharing_get_shared_links(dbname).links[0].url
# Takes a message from the IRC channel and parse it to the Request class.
def parse_request(message):
try:
parser = Parser(request)
return parser.parse(message)
except ParseError:
return None
# Parser generator that parses a request.
@generate
def request():
req = yield alt(help_request, find_mark_request)
return req
# Parser generator that parses a help request.
@generate
def help_request():
yield regex(r' ').many()
yield string(args.botnick + ":")
yield regex(r' ').at_least(1)
yield string('mark')
yield regex(r' ').at_least(1)
yield string('help')
yield regex(r' ').many()
yield eof
return Request(HELP_REQUEST)
# Parser generator that parses a "find mark" request.
@generate
def find_mark_request():
yield regex(r' ').many()
yield string(args.botnick + ":")
yield regex(r' ').at_least(1)
yield string('find')
yield regex(r' ').at_least(1)
yield string('mark')
yield regex(r' ').at_least(1)
url = yield regex(r"[^ \t]").at_least(1)
yield regex(r' ').many()
yield eof
return Request(FIND_MARK_REQUEST, "".join(url))
class TooManyFaces(Exception):
pass
class NoFaces(Exception):
pass
def get_landmarks(im):
rects = detector(im, 1)
# if len(rects) > 1:
# raise TooManyFaces
if len(rects) == 0:
raise NoFaces
return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def get_face_mask(im, landmarks):
im = numpy.zeros(im.shape[:2], dtype=numpy.float64)
for group in OVERLAY_POINTS:
draw_convex_hull(im, landmarks[group], color=1)
im = numpy.array([im, im, im]).transpose((1, 2, 0))
im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)
return im
def transformation_from_points(points1, points2):
"""
Return an affine transformation [s * R | T] such that:
sum ||s*R*p1,i + T - p2,i||^2
is minimized.
"""
# Solve the procrustes problem by subtracting centroids, scaling by the
# standard deviation, and then using the SVD to calculate the rotation. See
# the following for more details:
# https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)
c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = numpy.linalg.svd(points1.T * points2)
# The R we seek is in fact the transpose of the one given by U * Vt. This
# is because the above formulation assumes the matrix goes on the right
# (with row vectors) where as our solution requires the matrix to be on the
# left (with column vectors).
R = (U * Vt).T
return numpy.vstack(
[numpy.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)),
numpy.matrix([0., 0., 1.])])
def read_im_and_landmarks(fname):
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR,
im.shape[0] * SCALE_FACTOR))
s = get_landmarks(im)
return im, s
def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im, M[:2], (dshape[1], dshape[0]), dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP)
return output_im
def correct_colours(im1, im2, landmarks1):
blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
# Avoid divide-by-zero errors.
im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)
return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))
# Performs face replacement. Loads the mark and source files and replaces the
# face in the source with the face of The Mark. The result is written to dest.
def replace_face(mark_file, source, dest):
im1, landmarks1 = read_im_and_landmarks(mark_file)
im2, landmarks2 = read_im_and_landmarks(source)
M = transformation_from_points(landmarks1[ALIGN_POINTS],
landmarks2[ALIGN_POINTS])
mask = get_face_mask(im2, landmarks2)
warped_mask = warp_im(mask, M, im1.shape)
combined_mask = numpy.max([get_face_mask(im1, landmarks1), warped_mask],
axis=0)
warped_im2 = warp_im(im2, M, im1.shape)
warped_corrected_im2 = correct_colours(im1, warped_im2, landmarks1)
output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 *\
combined_mask
cv2.imwrite(dest, output_im)
# Main: loop through lines in stdin, parse requests and handle those requests.
for line in sys.stdin:
message = json.loads(line)
if message['command'] == 'PRIVMSG':
req = parse_request(message['message'])
handle_request(req)
| [
"numpy.hstack",
"numpy.array",
"parsy.Parser",
"parsy.alt",
"numpy.mean",
"argparse.ArgumentParser",
"dlib.shape_predictor",
"dlib.get_frontal_face_detector",
"parsy.string",
"tempfile.NamedTemporaryFile",
"json.loads",
"dropbox.Dropbox",
"cv2.warpAffine",
"hashlib.md5",
"parsy.regex",
... | [((1527, 1570), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""None"""'}), "(description='None')\n", (1550, 1570), False, 'import argparse\n'), ((2214, 2249), 'dropbox.Dropbox', 'dropbox.Dropbox', (['args.dropbox_token'], {}), '(args.dropbox_token)\n', (2229, 2249), False, 'import dropbox\n'), ((3251, 3283), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (3281, 3283), False, 'import dlib\n'), ((3296, 3332), 'dlib.shape_predictor', 'dlib.shape_predictor', (['PREDICTOR_PATH'], {}), '(PREDICTOR_PATH)\n', (3316, 3332), False, 'import dlib\n'), ((7301, 7323), 'cv2.convexHull', 'cv2.convexHull', (['points'], {}), '(points)\n', (7315, 7323), False, 'import cv2\n'), ((7328, 7371), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['im', 'points'], {'color': 'color'}), '(im, points, color=color)\n', (7346, 7371), False, 'import cv2\n'), ((7416, 7462), 'numpy.zeros', 'numpy.zeros', (['im.shape[:2]'], {'dtype': 'numpy.float64'}), '(im.shape[:2], dtype=numpy.float64)\n', (7427, 7462), False, 'import numpy\n'), ((7699, 7756), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im', '(FEATHER_AMOUNT, FEATHER_AMOUNT)', '(0)'], {}), '(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)\n', (7715, 7756), False, 'import cv2\n'), ((8318, 8345), 'numpy.mean', 'numpy.mean', (['points1'], {'axis': '(0)'}), '(points1, axis=0)\n', (8328, 8345), False, 'import numpy\n'), ((8355, 8382), 'numpy.mean', 'numpy.mean', (['points2'], {'axis': '(0)'}), '(points2, axis=0)\n', (8365, 8382), False, 'import numpy\n'), ((8429, 8447), 'numpy.std', 'numpy.std', (['points1'], {}), '(points1)\n', (8438, 8447), False, 'import numpy\n'), ((8457, 8475), 'numpy.std', 'numpy.std', (['points2'], {}), '(points2)\n', (8466, 8475), False, 'import numpy\n'), ((8528, 8565), 'numpy.linalg.svd', 'numpy.linalg.svd', (['(points1.T * points2)'], {}), '(points1.T * points2)\n', (8544, 8565), False, 'import numpy\n'), ((9042, 9077), 'cv2.imread', 'cv2.imread', (['fname', 'cv2.IMREAD_COLOR'], {}), '(fname, cv2.IMREAD_COLOR)\n', (9052, 9077), False, 'import cv2\n'), ((9087, 9159), 'cv2.resize', 'cv2.resize', (['im', '(im.shape[1] * SCALE_FACTOR, im.shape[0] * SCALE_FACTOR)'], {}), '(im, (im.shape[1] * SCALE_FACTOR, im.shape[0] * SCALE_FACTOR))\n', (9097, 9159), False, 'import cv2\n'), ((9257, 9292), 'numpy.zeros', 'numpy.zeros', (['dshape'], {'dtype': 'im.dtype'}), '(dshape, dtype=im.dtype)\n', (9268, 9292), False, 'import numpy\n'), ((9297, 9429), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'M[:2]', '(dshape[1], dshape[0])'], {'dst': 'output_im', 'borderMode': 'cv2.BORDER_TRANSPARENT', 'flags': 'cv2.WARP_INVERSE_MAP'}), '(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode\n =cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP)\n', (9311, 9429), False, 'import cv2\n'), ((9794, 9846), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im1', '(blur_amount, blur_amount)', '(0)'], {}), '(im1, (blur_amount, blur_amount), 0)\n', (9810, 9846), False, 'import cv2\n'), ((9862, 9914), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im2', '(blur_amount, blur_amount)', '(0)'], {}), '(im2, (blur_amount, blur_amount), 0)\n', (9878, 9914), False, 'import cv2\n'), ((10947, 10975), 'cv2.imwrite', 'cv2.imwrite', (['dest', 'output_im'], {}), '(dest, output_im)\n', (10958, 10975), False, 'import cv2\n'), ((11093, 11109), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (11103, 11109), False, 'import json\n'), ((4275, 4295), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {}), '()\n', (4293, 4295), False, 'from tempfile import NamedTemporaryFile\n'), ((4303, 4336), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': '""".png"""'}), "(suffix='.png')\n", (4321, 4336), False, 'from tempfile import NamedTemporaryFile\n'), ((5531, 5546), 'parsy.Parser', 'Parser', (['request'], {}), '(request)\n', (5537, 5546), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((5711, 5747), 'parsy.alt', 'alt', (['help_request', 'find_mark_request'], {}), '(help_request, find_mark_request)\n', (5714, 5747), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((5880, 5906), 'parsy.string', 'string', (["(args.botnick + ':')"], {}), "(args.botnick + ':')\n", (5886, 5906), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((5951, 5965), 'parsy.string', 'string', (['"""mark"""'], {}), "('mark')\n", (5957, 5965), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6010, 6024), 'parsy.string', 'string', (['"""help"""'], {}), "('help')\n", (6016, 6024), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6231, 6257), 'parsy.string', 'string', (["(args.botnick + ':')"], {}), "(args.botnick + ':')\n", (6237, 6257), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6302, 6316), 'parsy.string', 'string', (['"""find"""'], {}), "('find')\n", (6308, 6316), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6361, 6375), 'parsy.string', 'string', (['"""mark"""'], {}), "('mark')\n", (6367, 6375), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((7188, 7231), 'cv2.circle', 'cv2.circle', (['im', 'pos', '(3)'], {'color': '(0, 255, 255)'}), '(im, pos, 3, color=(0, 255, 255))\n', (7198, 7231), False, 'import cv2\n'), ((7563, 7588), 'numpy.array', 'numpy.array', (['[im, im, im]'], {}), '([im, im, im])\n', (7574, 7588), False, 'import numpy\n'), ((7621, 7678), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im', '(FEATHER_AMOUNT, FEATHER_AMOUNT)', '(0)'], {}), '(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)\n', (7637, 7678), False, 'import cv2\n'), ((8893, 8947), 'numpy.hstack', 'numpy.hstack', (['(s2 / s1 * R, c2.T - s2 / s1 * R * c1.T)'], {}), '((s2 / s1 * R, c2.T - s2 / s1 * R * c1.T))\n', (8905, 8947), False, 'import numpy\n'), ((8969, 8998), 'numpy.matrix', 'numpy.matrix', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (8981, 8998), False, 'import numpy\n'), ((5851, 5861), 'parsy.regex', 'regex', (['""" """'], {}), "(' ')\n", (5856, 5861), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((5917, 5927), 'parsy.regex', 'regex', (['""" """'], {}), "(' ')\n", (5922, 5927), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((5976, 5986), 'parsy.regex', 'regex', (['""" """'], {}), "(' ')\n", (5981, 5986), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6035, 6045), 'parsy.regex', 'regex', (['""" """'], {}), "(' ')\n", (6040, 6045), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6202, 6212), 'parsy.regex', 'regex', (['""" """'], {}), "(' ')\n", (6207, 6212), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6268, 6278), 'parsy.regex', 'regex', (['""" """'], {}), "(' ')\n", (6273, 6278), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6327, 6337), 'parsy.regex', 'regex', (['""" """'], {}), "(' ')\n", (6332, 6337), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6386, 6396), 'parsy.regex', 'regex', (['""" """'], {}), "(' ')\n", (6391, 6396), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6426, 6442), 'parsy.regex', 'regex', (['"""[^ \\\\t]"""'], {}), "('[^ \\\\t]')\n", (6431, 6442), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((6465, 6475), 'parsy.regex', 'regex', (['""" """'], {}), "(' ')\n", (6470, 6475), False, 'from parsy import generate, string, regex, eof, alt, Parser, ParseError\n'), ((9578, 9625), 'numpy.mean', 'numpy.mean', (['landmarks1[LEFT_EYE_POINTS]'], {'axis': '(0)'}), '(landmarks1[LEFT_EYE_POINTS], axis=0)\n', (9588, 9625), False, 'import numpy\n'), ((9640, 9688), 'numpy.mean', 'numpy.mean', (['landmarks1[RIGHT_EYE_POINTS]'], {'axis': '(0)'}), '(landmarks1[RIGHT_EYE_POINTS], axis=0)\n', (9650, 9688), False, 'import numpy\n'), ((5206, 5226), 'hashlib.md5', 'hashlib.md5', (['content'], {}), '(content)\n', (5217, 5226), False, 'import hashlib\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import random, copy
import matplotlib.cm as cm
import itertools
import scipy.stats
import math
import statistics
import pysan.multisequence as pysan_ms
from itertools import combinations
random.seed('1<PASSWORD>')
def generate_sequence(length, alphabet):
"""
Generates a random sequence of a given length, given an alphabet of elements.
This is useful for benchmarking function performance, and creating examples in the docs.
Example
--------
>>> ps.generate_sequence(12, [1,2,3])
[2, 3, 3, 3, 2, 2, 2, 1, 3, 3, 2, 2]
"""
return [random.choice(alphabet) for x in range(length)]
def full_analysis(sequence):
"""
UC Computes a collection of information on a given sequence plus a collection of plots.
"""
details = describe(sequence)
sequence_plot = plot_sequence(sequence)
tm = plot_transition_matrix(sequence)
element_counts = get_element_counts(sequence)
element_prevalence = plot_element_counts(sequence)
bigrams = plot_ngram_counts(sequence, 2)
trigrams = plot_ngram_counts(sequence, 3)
print(details)
print(element_counts, element_prevalence)
sequence_plot.show()
tm.show()
bigrams.show()
trigrams.show()
return None
def describe(sequence):
"""
Computes descriptive properties of a given sequence, returning a dictionary containing the keys:
{'length', 'alphabet', 'sequence_universe', 'unique_bigrams', 'bigram_universe', 'entropy'}.
Example
---------
>>> sequence = [1,1,2,1,2,2,3,4,2]
>>> ps.describe(sequence) #doctest: +NORMALIZE_WHITESPACE
{'length': 9,
'alphabet': {1, 2, 3, 4},
'is_recurrent': True,
'entropy': 0.8763576394898526,
'complexity': 0.6885628567541515,
'turbulence': 7.868228975239414,
'element_counts': {1: 3, 2: 4, 3: 1, 4: 1},
'first_positions': {1: 0, 2: 2, 3: 6, 4: 7},
'ntransitions': 6,
'sequence_universe': 262144,
'distinct_subsequences': 175,
'unique_bigrams': 7,
'bigram_universe': 16,
'longest_spell': {'element': 1, 'count': 2, 'start': 0}}
"""
details = {
'length': len(sequence),
'alphabet': get_alphabet(sequence),
'is_recurrent': is_recurrent(sequence),
'entropy' : get_entropy(sequence),
'complexity': get_complexity(sequence),
'turbulence': get_turbulence(sequence),
'element_counts': get_element_counts(sequence),
'first_positions': get_first_positions(sequence),
'ntransitions' : get_ntransitions(sequence),
'sequence_universe': get_ngram_universe(sequence, len(sequence)),
'distinct_subsequences': get_ndistinct_subsequences(sequence),
'unique_bigrams': len(get_unique_ngrams(sequence, 2)),
'bigram_universe' : get_ngram_universe(sequence, 2),
'longest_spell': get_longest_spell(sequence)
# spell durations here
}
return details
# ====================================================================================
# SUMMARY STATISTICS
# ====================================================================================
def is_recurrent(sequence):
"""
Returns true if the given sequence is recurrent (elements can exist more than once), otherwise returns false.
Example
---------
>>> sequence = [1,2,3,4,5]
>>> ps.is_recurrent(sequence)
False
>>> sequence = [1,1,2,2,3]
>>> ps.is_recurrent(sequence)
True
"""
element_counts = get_element_counts(sequence)
truths = [count > 1 for element, count in element_counts.items()]
if True in truths:
return True
return False
def get_entropy(sequence):
"""
Computes the normalised `Shannon entropy <https://en.wikipedia.org/wiki/Entropy_(information_theory)>`_ of a given sequence, using the `scipy.stats.entropy <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html>`_ implementation.
Note that this measure is insensitive to transition frequency or event order, so should be used in conjunction with other measures.
Example
--------
>>> low_entropy_sequence = [1,1,1,1,1,1,1,2]
>>> ps.get_entropy(low_entropy_sequence)
0.543...
>>> high_entropy_sequence = [1,2,2,3,4,3]
>>> ps.get_entropy(high_entropy_sequence)
0.959...
"""
alphabet = get_alphabet(sequence)
entropy = 0
for state in alphabet:
proportion_occurances = sequence.count(state) / len(sequence)
entropy += proportion_occurances * math.log(proportion_occurances)
maximal_occurances = 1 / len(alphabet)
alphabet_entropy = sum([maximal_occurances * math.log(maximal_occurances) for x in alphabet])
if alphabet_entropy == 0:
return 0
return -entropy / -alphabet_entropy
def get_turbulence(sequence):
"""
Computes turbulence for a given sequence, based on `Elzinga & Liefbroer's 2007 definition <https://www.researchgate.net/publication/225402919_De-standardization_of_Family-Life_Trajectories_of_Young_Adults_A_Cross-National_Comparison_Using_Sequence_Analysis>`_ which is also implemented in the `TraMineR <http://traminer.unige.ch/doc/seqST.html>`_ sequence analysis library.
Example
--------
>>> sequence = [1,1,2,2,3]
>>> ps.get_turbulence(sequence)
5.228...
"""
phi = get_ndistinct_subsequences(sequence)
#print('phi', phi)
state_durations = [value for key, value in get_spells(sequence)]
#print('durations', state_durations)
#print('mean duration', statistics.mean(state_durations))
variance_of_state_durations = statistics.variance(state_durations)
#print('variance', variance_of_state_durations)
tbar = statistics.mean(state_durations)
maximum_state_duration_variance = (len(sequence) - 1) * (1 - tbar) ** 2
#print('smax', maximum_state_duration_variance)
top_right = maximum_state_duration_variance + 1
bot_right = variance_of_state_durations + 1
turbulence = math.log2(phi * (top_right / bot_right))
#print('turbulence', turbulence)
return turbulence
def get_complexity(sequence):
"""
Computes the complexity of a given sequence, based on TraMineR's `seqici <http://traminer.unige.ch/doc/seqici.html>`_ method.
"""
alphabet = get_alphabet(sequence)
pre_log = 1 / len(alphabet)
hmax = -math.log(pre_log)
#print('hmax', hmax)
if hmax == 0:
return 0 # all identical elements, no complexity
hs = get_entropy(sequence)
#print('hs', hs)
qs = get_ntransitions(sequence)
#print('qs', qs)
qmax = len(sequence) - 1
#print('qmax', qmax)
norm_transitions = qs / qmax
norm_entropy = hs / hmax
#print('nt', norm_transitions)
#print('ne', norm_entropy)
complexity = math.sqrt(norm_transitions * norm_entropy)
#print('complexity', complexity)
return complexity
def get_routine(sequence, duration):
"""
Computes a normalised measure of routine within a sequence for a given duration within that sequence.
E.g. with a sequence where each element is one day, calling get_routine() with a duration of 7 would look at weekly routines.
Note that this routine measure is identical to the multisequence measure of synchrony, but applied within-sequence in duration length chunks.
Example
---------
>>> sequence = [1,1,2,2,3,1,1,2,3,2,1,1,3,2,2]
>>> ps.get_routine(sequence, 5)
0.4
"""
if len(sequence) % duration != 0:
raise Exception('sequence not divisible by interval, check data input')
num_cycles = int(len(sequence) / duration)
cycles = [sequence[n * duration:n * duration + duration] for n in range(num_cycles)]
return pysan_ms.get_synchrony(cycles)
# ====================================================================================
# ELEMENTS
# ====================================================================================
def get_alphabet(sequence):
"""
Computes the alphabet of a given sequence (set of its unique elements).
Parameters
----------
sequence : int
A sequence of elements, encoded as integers e.g. [1,3,2,1].
Example
----------
>>> sequence = [1,1,2,1,2,2,3,4,2]
>>> ps.get_alphabet(sequence)
{1, 2, 3, 4}
"""
return set(sequence)
def get_first_positions(sequence):
"""
Reports the first occurance of each element in the sequence in a dictionary, with each element as keys, and their first position as values.
Example
---------
>>> sequence = [1,1,2,3,4]
>>> ps.get_first_positions(sequence)
{1: 0, 2: 2, 3: 3, 4: 4}
"""
unique_elements = list(set(sequence))
first_positions = {}
for element in unique_elements:
first_positions[element] = sequence.index(element)
return first_positions
def get_element_counts(sequence):
"""
Counts the number of occurances for each element in a sequence, returning a dictionary containing the elements as keys and their counts as values.
Example
---------
>>> sequence = [1,1,2,1,2,2,3,4,2]
>>> ps.get_element_counts(sequence)
{1: 3, 2: 4, 3: 1, 4: 1}
"""
alphabet = get_alphabet(sequence)
counts = {}
for element in alphabet:
counts[element] = sequence.count(element)
return counts
def get_element_frequency(sequence):
"""
Computes the relative frequency (aka prevalence or unconditional probability) of each element in a sequence, returning a dictionary where each key is an element and each value is that elements relative frequency.
Example
---------
>>> sequence = [1,1,2,1,2,2,3,4,2,1]
>>> ps.get_element_frequency(sequence)
{1: 0.4, 2: 0.4, 3: 0.1, 4: 0.1}
"""
alphabet = get_alphabet(sequence)
prevalences = {}
for element in alphabet:
prevalences[element] = sequence.count(element) / len(sequence)
return prevalences
# ====================================================================================
# SUBSEQUENCES
# ====================================================================================
# NGRAMS
def get_subsequences(sequence):
"""
Computes the actual possible subsequences in a given sequence, returning them as a list of lists.
Note that this does not include a single empty list as a subsequence.
This method is based on a similar implementation available `here <https://www.w3resource.com/python-exercises/list/python-data-type-list-exercise-33.php>`_.
Example
--------
>>> sequence = [1,2,3]
>>> ps.get_subsequences(sequence)
[[1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]]
"""
subsequences = []
for i in range(0, len(sequence)+1):
temp = [list(x) for x in combinations(sequence, i)]
if len(temp)>0:
subsequences.extend(temp)
return subsequences[1:]
def get_ndistinct_subsequences(sequence):
"""
Computes the number of distinct subsequences for a given sequence, based on original implementation by
<NAME> available `here <https://www.geeksforgeeks.org/count-distinct-subsequences/>`_.
Example
--------
>>> sequence = [1,2,1,3]
>>> ps.get_ndistinct_subsequences(sequence)
14
"""
# this implementation works on strings, so parse non-strings to strings
if sequence is not str:
sequence = [str(e) for e in sequence]
# create an array to store index of last
last = [-1 for i in range(256 + 1)] # hard-coded value needs explaining -ojs
# length of input string
sequence_length = len(sequence)
# dp[i] is going to store count of discount subsequence of length of i
dp = [-2 for i in range(sequence_length + 1)]
# empty substring has only one subseqence
dp[0] = 1
# Traverse through all lengths from 1 to n
for i in range(1, sequence_length + 1):
# number of subseqence with substring str[0...i-1]
dp[i] = 2 * dp[i - 1]
# if current character has appeared before, then remove all subseqences ending with previous occurrence.
if last[ord(sequence[i - 1])] != -1:
dp[i] = dp[i] - dp[last[ord(sequence[i - 1])]]
last[ord(sequence[i - 1])] = i - 1
return dp[sequence_length]
def get_unique_ngrams(sequence, n):
"""
Creates a list of all unique ngrams found in a given sequence.
Example
---------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_unique_ngrams(sequence, 3) #doctest: +NORMALIZE_WHITESPACE
[[2, 1, 1],
[1, 1, 4],
[1, 4, 2],
[4, 2, 2],
[2, 2, 3],
[2, 3, 4],
[3, 4, 2],
[4, 2, 1]]
"""
unique_ngrams = []
for x in range(len(sequence) - n + 1):
this_ngram = sequence[x:x + n]
if str(this_ngram) not in unique_ngrams:
unique_ngrams.append(str(this_ngram))
return [eval(x) for x in unique_ngrams]
def get_all_ngrams(sequence, n):
"""
Creates a list of all ngrams found in a given sequence.
Example
---------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_unique_ngrams(sequence, 3) #doctest: +NORMALIZE_WHITESPACE
[[2, 1, 1],
[1, 1, 4],
[1, 4, 2],
[4, 2, 2],
[2, 2, 3],
[2, 3, 4],
[3, 4, 2],
[4, 2, 1]]
"""
all_ngrams = []
for x in range(len(sequence) - n + 1):
this_ngram = sequence[x:x + n]
all_ngrams.append(this_ngram)
return all_ngrams
def get_ngram_universe(sequence, n):
"""
Computes the universe of possible ngrams given a sequence. Where n is equal to the length of the sequence, the resulting number represents the sequence universe.
Example
--------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_ngram_universe(sequence, 3)
64
"""
# if recurrance is possible, the universe is given by k^t (SSA pg 68)
k = len(set(sequence))
if k > 10 and n > 10:
return 'really big'
return k**n
def get_ngram_counts(sequence, n):
"""
Computes the prevalence of ngrams in a sequence, returning a dictionary where each key is an ngram, and each value is the number of times that ngram appears in the sequence.
Parameters
-------------
sequence : list(int)
A sequence of elements, encoded as integers e.g. [1,3,2,1].
n: int
The number of elements in the ngrams to extract.
Example
---------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_ngram_counts(sequence, 3) #doctest: +NORMALIZE_WHITESPACE
{'[2, 1, 1]': 2,
'[1, 1, 4]': 1,
'[1, 4, 2]': 1,
'[4, 2, 2]': 1,
'[2, 2, 3]': 1,
'[2, 3, 4]': 1,
'[3, 4, 2]': 1,
'[4, 2, 1]': 1}
"""
ngrams = get_unique_ngrams(sequence, n)
ngram_counts = {str(i):0 for i in ngrams}
for x in range(len(sequence) - n + 1):
this_ngram = sequence[x:x + n]
ngram_counts[str(this_ngram)] += 1
return ngram_counts
# TRANSITIONS
def get_transitions(sequence):
"""
Extracts a list of transitions from a sequence, returning a list of lists containing each transition.
Example
--------
>>> sequence = [1,2,2,1,2,3,2,3,1]
>>> ps.get_transitions(sequence)
[[1, 2], [2, 1], [1, 2], [2, 3], [3, 2], [2, 3], [3, 1]]
"""
transitions = []
for position in range(len(sequence) - 1):
if sequence[position] != sequence[position + 1]:
transitions.append([sequence[position], sequence[position + 1]])
return transitions
def get_ntransitions(sequence):
"""
Computes the number of transitions in a sequence.
Example
--------
>>> sequence = [1,1,1,2,2,3,3,3,4,4]
>>> ps.get_ntransitions(sequence)
3
"""
return len(get_transitions(sequence))
def get_transition_matrix(sequence, alphabet=None, verbose=False):
"""
Computes a transition matrix for each bigram in a sequence.
The resulting matrix can be interpreted by reading along the side first, then across the top, indicating from the element in down the side to the element along the top.
For example, to find the number of transitions from element 2 to element 3, find element 2 down the side, then follow that row across until it reaches element 3 across the top.
Examples
----------
>>> sequence = [1,2,2,1,2,3,2,3,1]
>>> ps.get_transition_matrix(sequence) #doctest: +NORMALIZE_WHITESPACE
->1 ->2 ->3
1-> 0.0 2.0 0.0
2-> 1.0 1.0 2.0
3-> 1.0 1.0 0.0
"""
if alphabet == None:
alphabet = get_alphabet(sequence)
all_ngrams = get_all_ngrams(sequence, 2)
transition_matrix = np.zeros((len(alphabet), len(alphabet)))
descriptive_matrix = [['-' for x in range(len(alphabet))] for y in range(len(alphabet))]
for x, element_row in enumerate(alphabet):
for y, element_column in enumerate(alphabet):
current_ngram = [element_row, element_column]
descriptive_matrix[x][y] = 'n' + str(current_ngram)
#print('from', current_ngram[0], 'to', current_ngram[1], ':', all_ngrams.count(current_ngram))
transition_matrix[x, y] = all_ngrams.count(current_ngram)
# add column & index labelling in TraMineR style
pre_alphabet = [str(a) + '->' for a in alphabet]
post_alphabet = ['->' + str(a) for a in alphabet]
if verbose:
de_df = pd.DataFrame(descriptive_matrix, columns=post_alphabet, index=pre_alphabet)
print(de_df)
tm_df = pd.DataFrame(transition_matrix, columns=post_alphabet, index=pre_alphabet)
return tm_df
# SPELLS
def get_spells(sequence):
"""
Returns a list of tuples where each tuple holds the element and the length of the spell (also known as run or episode) for each spell in the sequence.
Example
---------
>>> sequence = [1,1,2,1,2,2,3]
>>> ps.get_spells(sequence)
[(1, 2), (2, 1), (1, 1), (2, 2), (3, 1)]
"""
# get each spell and its length
spells = [(k, sum(1 for x in v)) for k,v in itertools.groupby(sequence)]
# this is functionally equivalent to the following;
# spells = [(k, len(list(v))) for k,v in itertools.groupby(sequence)]
return spells
def get_longest_spell(sequence):
"""
Returns a dict containing the element, count, and starting position of the longest spell in the sequence. The keys of this dict are 'element, 'count', and 'start'.
Example
--------
>>> sequence = [1,1,1,4,2,2,3,4,2]
>>> ps.get_longest_spell(sequence)
{'element': 1, 'count': 3, 'start': 0}
"""
spells = get_spells(sequence)
longest_spell = max(count for element, count in spells)
for i, (element, count) in enumerate(spells):
if count == longest_spell:
# sum the counts of all previous spells to get its starting position
position_in_sequence = sum(count for _,count in spells[:i])
return {'element':element, 'count':count,'start':position_in_sequence}
def get_spell_durations(sequence):
"""
Computes the durations of each spell in the sequence, returning a list.
Example
---------
>>> sequence = [1,1,2,1,2,2,3]
>>> ps.get_spell_durations(sequence)
[2, 1, 1, 2, 1]
"""
spells = get_spells(sequence)
durations = [spell[1] for spell in spells]
return durations
# ====================================================================================
# PLOTTING FUNCTIONS
# ====================================================================================
def plot_sequence(sequence, highlighted_ngrams=[]):
"""
Creates a standard sequence plot where each element corresponds to a position on the y-axis.
The optional highlighted_ngrams parameter can be one or more n-grams to be outlined in a red box.
Example
----------
.. plot::
>>> sequence = [1,1,2,1,2,2,3,1,1,2,2,1,2,2,3,1,1,2]
>>> ps.plot_sequence(sequence) #doctest: +SKIP
.. plot::
>>> sequence = [1,1,2,1,2,2,3,1,1,2,2,1,2,2,3,1,1,2]
>>> ps.plot_sequence(sequence, [1,2]) #doctest: +SKIP
.. plot::
>>> sequence = [1,2,3,2,3,4,4,3,2,3,1,3,1,2,3,1,3,4,2,3,2,2]
>>> ps.plot_sequence(sequence, [[1,2,3], [3,4]]) #doctest: +SKIP
"""
np_sequence = np.array(sequence)
alphabet_len = len(get_alphabet(sequence))
plt.figure(figsize=[len(sequence)*0.3,alphabet_len * 0.3])
unique_values = list(set(sequence))
for i, value in enumerate(unique_values):
points = np.where(np_sequence == value, i, np.nan)
plt.scatter(x=range(len(np_sequence)), y=points, marker='s', label=value, s=35)
plt.yticks(range(len(unique_values)), unique_values)
plt.ylim(-1, len(unique_values))
# highlight any of the n-grams given
if highlighted_ngrams != []:
def highlight_ngram(ngram):
n = len(ngram)
match_positions = []
for x in range(len(sequence) - n + 1):
this_ngram = sequence[x:x + n]
if str(this_ngram) == str(ngram):
match_positions.append(x)
for position in match_positions:
bot = min(ngram) - 1.5
top = max(ngram) - 0.5
left = position - 0.5
right = left + n
line_width = 1
plt.plot([left,right], [bot,bot], color='red', linewidth=line_width)
plt.plot([left,right], [top,top], color='red', linewidth=line_width)
plt.plot([left,left], [bot,top], color='red', linewidth=line_width)
plt.plot([right,right], [bot,top], color='red', linewidth=line_width)
# check if only one n-gram has been supplied
if type(highlighted_ngrams[0]) is int:
highlight_ngram(highlighted_ngrams)
else: # multiple n-gram's found
for ngram in highlighted_ngrams:
highlight_ngram(ngram)
return plt
def plot_sequence_1d(sequence, flat=False):
"""
Plots a sequence in one dimension - useful for stacking multiple sequences above one another.
Example
---------
.. plot::
>>> sequence = [1,1,1,2,2,2,3,1,3,2,2,2,4,4,1,1,1,1,2,1,1]
>>> ps.plot_sequence_1d(sequence) #doctest: +SKIP
"""
np_sequence = np.array(sequence)
alphabet_len = len(get_alphabet(sequence))
plt.figure(figsize=[len(sequence)*0.4, 0.5])
unique_values = list(set(sequence))
for i, value in enumerate(unique_values):
points = np.where(np_sequence == value, 1, np.nan)
plt.bar(range(len(points)), points, width=1, align='edge', label=i)
plt.ylim(-0.3, 1.3)
plt.tick_params(
axis='y',
which='both',
left=False,
labelleft=False)
plt.xlabel('Position, p')
plt.legend(bbox_to_anchor=(1, 1.2), loc='upper left')
return plt
def plot_element_counts(sequence):
"""
Plots the number of occurances of each unique element in a given sequence.
Example
---------
.. plot::
>>> sequence = [1,1,2,1,2,2,3,1,1,2,2,1,2,2,3,1,1,2]
>>> ps.plot_element_counts(sequence) #doctest: +SKIP
"""
prev = get_element_counts(sequence)
prev = {k: prev[k] for k in sorted(prev, key=prev.get)}
xdata = [str(key) for key,value in prev.items()]
ydata = [value for key,value in prev.items()]
plt.figure()
plt.barh(xdata, ydata, label='element count')
plt.gca().yaxis.grid(False)
plt.legend()
return plt
def plot_ngram_counts(sequence, n):
"""
Plots the number of occurances of ngrams in a given sequence.
Example
---------
.. plot::
>>> sequence = [1,1,2,1,2,2,3,1,1,2,2,1,2,2,3,1,1,2]
>>> ps.plot_ngram_counts(sequence, 3) #doctest: +SKIP
"""
ngram_counts = get_ngram_counts(sequence, n)
ngram_counts = {k: ngram_counts[k] for k in sorted(ngram_counts, key=ngram_counts.get)}
xdata = [key[1:len(key)-1].replace(', ', ', ') for key,value in ngram_counts.items()]
ydata = [value for key,value in ngram_counts.items()]
plt.figure()
plt.barh(xdata, ydata, label=str(n) +'-gram')
plt.gca().yaxis.grid(False)
plt.legend()
return plt
def plot_transition_matrix(sequence, cmap='summer'):
"""
Computes and plots a transition matrix, returning a colored matrix with elements at position n up the y axis, and elements at position n+1 along the x axis.
Example
---------
.. plot::
>>> sequence = [1,1,2,1,4,2,3,1,1,2,2,1,2,2,3,1,1,2]
>>> ps.plot_transition_matrix(sequence) #doctest: +SKIP
"""
tm = get_transition_matrix(sequence)
plot = color_matrix(tm, cmap=cmap)
return plot
def color_matrix(matrix, cmap='summer'):
"""
Creates a shaded matrix based on the values in that matrix. This is most useful when given a transition matrix as it intuitively plots the prevalence of transitions between states. The y axis represents the elements at position n, and the x axis represents the elements at position n+1.
Parameters
-----------
matrix: DataFrame
A 2D matrix of values in the form of a `pandas dataframe <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_. Column names are used as axis ticks.
cmap: string
The name of a `matplotlib color map <https://matplotlib.org/3.3.1/tutorials/colors/colormaps.html>`_.
"""
results_size = len(matrix.columns)
values = np.empty((results_size, results_size), dtype=object)
for r, row in enumerate(matrix.values):
for e, element in enumerate(row):
if element == "-":
values[r, e] = 100
continue
if element == "":
values[r, e] = np.nan
continue
if "*" in str(element):
value = element.replace("*", "")
values[r, e] = float(value)
else:
values[r, e] = element
current_cmap = copy.copy(cm.get_cmap(cmap))
current_cmap.set_bad(color="white")
plt.figure()
# this one-lines sets the x axis to appear at the top of this plot only
with plt.rc_context({'xtick.bottom':False, 'xtick.labelbottom':False, 'xtick.top':True, 'xtick.labeltop':True}):
ax = plt.gca()
ax.xaxis.set_label_position('top')
plt.imshow(np.array(values).astype(np.float), cmap=current_cmap)
plt.yticks(range(len(matrix.index)), list(matrix.index))
plt.xticks(range(len(matrix.columns)), list(matrix.columns))
cbar = plt.colorbar()
#cbar.set_ticks([-100, -80, -60, -40, -20, 0, 20, 40, 60, 80, 100])
#cbar.set_ticklabels([-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.grid(False)
return plt
# print to console to confirm everything is loaded properly
print('pysan ready') | [
"matplotlib.pyplot.rc_context",
"matplotlib.pyplot.grid",
"math.sqrt",
"math.log2",
"math.log",
"numpy.array",
"pysan.multisequence.get_synchrony",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.plot",
"numpy.empty",
"statistics.variance",
"pandas.D... | [((284, 310), 'random.seed', 'random.seed', (['"""1<PASSWORD>"""'], {}), "('1<PASSWORD>')\n", (295, 310), False, 'import random, copy\n'), ((5296, 5332), 'statistics.variance', 'statistics.variance', (['state_durations'], {}), '(state_durations)\n', (5315, 5332), False, 'import statistics\n'), ((5392, 5424), 'statistics.mean', 'statistics.mean', (['state_durations'], {}), '(state_durations)\n', (5407, 5424), False, 'import statistics\n'), ((5659, 5699), 'math.log2', 'math.log2', (['(phi * (top_right / bot_right))'], {}), '(phi * (top_right / bot_right))\n', (5668, 5699), False, 'import math\n'), ((6392, 6434), 'math.sqrt', 'math.sqrt', (['(norm_transitions * norm_entropy)'], {}), '(norm_transitions * norm_entropy)\n', (6401, 6434), False, 'import math\n'), ((7274, 7304), 'pysan.multisequence.get_synchrony', 'pysan_ms.get_synchrony', (['cycles'], {}), '(cycles)\n', (7296, 7304), True, 'import pysan.multisequence as pysan_ms\n'), ((16269, 16343), 'pandas.DataFrame', 'pd.DataFrame', (['transition_matrix'], {'columns': 'post_alphabet', 'index': 'pre_alphabet'}), '(transition_matrix, columns=post_alphabet, index=pre_alphabet)\n', (16281, 16343), True, 'import pandas as pd\n'), ((18856, 18874), 'numpy.array', 'np.array', (['sequence'], {}), '(sequence)\n', (18864, 18874), True, 'import numpy as np\n'), ((20608, 20626), 'numpy.array', 'np.array', (['sequence'], {}), '(sequence)\n', (20616, 20626), True, 'import numpy as np\n'), ((20929, 20948), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.3)', '(1.3)'], {}), '(-0.3, 1.3)\n', (20937, 20948), True, 'import matplotlib.pyplot as plt\n'), ((20950, 21018), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'left': '(False)', 'labelleft': '(False)'}), "(axis='y', which='both', left=False, labelleft=False)\n", (20965, 21018), True, 'import matplotlib.pyplot as plt\n'), ((21029, 21054), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position, p"""'], {}), "('Position, p')\n", (21039, 21054), True, 'import matplotlib.pyplot as plt\n'), ((21056, 21109), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 1.2)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1, 1.2), loc='upper left')\n", (21066, 21109), True, 'import matplotlib.pyplot as plt\n'), ((21587, 21599), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21597, 21599), True, 'import matplotlib.pyplot as plt\n'), ((21601, 21646), 'matplotlib.pyplot.barh', 'plt.barh', (['xdata', 'ydata'], {'label': '"""element count"""'}), "(xdata, ydata, label='element count')\n", (21609, 21646), True, 'import matplotlib.pyplot as plt\n'), ((21677, 21689), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21687, 21689), True, 'import matplotlib.pyplot as plt\n'), ((22241, 22253), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22251, 22253), True, 'import matplotlib.pyplot as plt\n'), ((22331, 22343), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22341, 22343), True, 'import matplotlib.pyplot as plt\n'), ((23550, 23602), 'numpy.empty', 'np.empty', (['(results_size, results_size)'], {'dtype': 'object'}), '((results_size, results_size), dtype=object)\n', (23558, 23602), True, 'import numpy as np\n'), ((24015, 24027), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24025, 24027), True, 'import matplotlib.pyplot as plt\n'), ((639, 662), 'random.choice', 'random.choice', (['alphabet'], {}), '(alphabet)\n', (652, 662), False, 'import random, copy\n'), ((6005, 6022), 'math.log', 'math.log', (['pre_log'], {}), '(pre_log)\n', (6013, 6022), False, 'import math\n'), ((16169, 16244), 'pandas.DataFrame', 'pd.DataFrame', (['descriptive_matrix'], {'columns': 'post_alphabet', 'index': 'pre_alphabet'}), '(descriptive_matrix, columns=post_alphabet, index=pre_alphabet)\n', (16181, 16244), True, 'import pandas as pd\n'), ((19073, 19114), 'numpy.where', 'np.where', (['(np_sequence == value)', 'i', 'np.nan'], {}), '(np_sequence == value, i, np.nan)\n', (19081, 19114), True, 'import numpy as np\n'), ((20811, 20852), 'numpy.where', 'np.where', (['(np_sequence == value)', '(1)', 'np.nan'], {}), '(np_sequence == value, 1, np.nan)\n', (20819, 20852), True, 'import numpy as np\n'), ((23957, 23974), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cmap'], {}), '(cmap)\n', (23968, 23974), True, 'import matplotlib.cm as cm\n'), ((24108, 24222), 'matplotlib.pyplot.rc_context', 'plt.rc_context', (["{'xtick.bottom': False, 'xtick.labelbottom': False, 'xtick.top': True,\n 'xtick.labeltop': True}"], {}), "({'xtick.bottom': False, 'xtick.labelbottom': False,\n 'xtick.top': True, 'xtick.labeltop': True})\n", (24122, 24222), True, 'import matplotlib.pyplot as plt\n'), ((24223, 24232), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24230, 24232), True, 'import matplotlib.pyplot as plt\n'), ((24469, 24483), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (24481, 24483), True, 'import matplotlib.pyplot as plt\n'), ((24635, 24650), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (24643, 24650), True, 'import matplotlib.pyplot as plt\n'), ((4272, 4303), 'math.log', 'math.log', (['proportion_occurances'], {}), '(proportion_occurances)\n', (4280, 4303), False, 'import math\n'), ((16763, 16790), 'itertools.groupby', 'itertools.groupby', (['sequence'], {}), '(sequence)\n', (16780, 16790), False, 'import itertools\n'), ((4393, 4421), 'math.log', 'math.log', (['maximal_occurances'], {}), '(maximal_occurances)\n', (4401, 4421), False, 'import math\n'), ((10125, 10150), 'itertools.combinations', 'combinations', (['sequence', 'i'], {}), '(sequence, i)\n', (10137, 10150), False, 'from itertools import combinations\n'), ((19747, 19817), 'matplotlib.pyplot.plot', 'plt.plot', (['[left, right]', '[bot, bot]'], {'color': '"""red"""', 'linewidth': 'line_width'}), "([left, right], [bot, bot], color='red', linewidth=line_width)\n", (19755, 19817), True, 'import matplotlib.pyplot as plt\n'), ((19820, 19890), 'matplotlib.pyplot.plot', 'plt.plot', (['[left, right]', '[top, top]'], {'color': '"""red"""', 'linewidth': 'line_width'}), "([left, right], [top, top], color='red', linewidth=line_width)\n", (19828, 19890), True, 'import matplotlib.pyplot as plt\n'), ((19893, 19962), 'matplotlib.pyplot.plot', 'plt.plot', (['[left, left]', '[bot, top]'], {'color': '"""red"""', 'linewidth': 'line_width'}), "([left, left], [bot, top], color='red', linewidth=line_width)\n", (19901, 19962), True, 'import matplotlib.pyplot as plt\n'), ((19965, 20036), 'matplotlib.pyplot.plot', 'plt.plot', (['[right, right]', '[bot, top]'], {'color': '"""red"""', 'linewidth': 'line_width'}), "([right, right], [bot, top], color='red', linewidth=line_width)\n", (19973, 20036), True, 'import matplotlib.pyplot as plt\n'), ((21648, 21657), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (21655, 21657), True, 'import matplotlib.pyplot as plt\n'), ((22302, 22311), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22309, 22311), True, 'import matplotlib.pyplot as plt\n'), ((24283, 24299), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (24291, 24299), True, 'import numpy as np\n')] |
from __future__ import annotations
import numpy as np
from dask_awkward.data import load_array
from dask_awkward.utils import assert_eq
def test_ufunc_sin():
daa = load_array()
a1 = np.sin(daa)
a2 = np.sin(daa.compute())
assert_eq(a1, a2)
def test_ufunc_add():
daa = load_array()
a1 = daa
a2 = daa + 2
a3 = a1 + 2
a4 = a2.compute()
assert_eq(a3, a4)
| [
"numpy.sin",
"dask_awkward.data.load_array",
"dask_awkward.utils.assert_eq"
] | [((172, 184), 'dask_awkward.data.load_array', 'load_array', ([], {}), '()\n', (182, 184), False, 'from dask_awkward.data import load_array\n'), ((194, 205), 'numpy.sin', 'np.sin', (['daa'], {}), '(daa)\n', (200, 205), True, 'import numpy as np\n'), ((241, 258), 'dask_awkward.utils.assert_eq', 'assert_eq', (['a1', 'a2'], {}), '(a1, a2)\n', (250, 258), False, 'from dask_awkward.utils import assert_eq\n'), ((293, 305), 'dask_awkward.data.load_array', 'load_array', ([], {}), '()\n', (303, 305), False, 'from dask_awkward.data import load_array\n'), ((378, 395), 'dask_awkward.utils.assert_eq', 'assert_eq', (['a3', 'a4'], {}), '(a3, a4)\n', (387, 395), False, 'from dask_awkward.utils import assert_eq\n')] |
import numpy as np
from scipy.fft import dct, idct
import math
def idct_basis_2d(len_basis, num_basis):
'''
Generate basic 2D DCT basis for dictionary learning
Inputs:
len_basis: length of the flattened atom, e.g. 36 for 6x6 basis
num_basis: number of the atoms. usually it is overcomplete (larger than len_basis)
Returns:
DCT basis in [len_basis, num_basis]
'''
assert len_basis <= num_basis, 'should be over-complete dictionary'
ODCT = idct(np.identity(math.ceil(num_basis ** 0.5)), norm='ortho', axis=0)
ODCT = ODCT[:math.ceil(len_basis ** 0.5), :]
ODCT = np.kron(ODCT, ODCT)
ODCT = np.column_stack((ODCT[:, 0], ODCT[:, 1:] - np.mean(ODCT[:, 1:], axis=0)))
ODCT = ODCT / np.linalg.norm(ODCT, axis=0)
ODCT = ODCT[:, :num_basis]
return ODCT
def idct_basis_3d(len_basis, num_basis):
'''
Generate basic 3D DCT basis for dictionary learning
Inputs:
len_basis: length of the flattened atom, e.g. 216 for 6x6x6 basis
num_basis: number of the atoms. usually it is overcomplete (larger than len_basis)
Returns:
DCT basis in [len_basis, num_basis]
'''
assert len_basis <= num_basis, 'should be over-complete dictionary'
ODCT = idct(np.identity(math.ceil(num_basis ** (1 / 3))), norm='ortho', axis=0)
ODCT = ODCT[:math.ceil(len_basis ** (1 / 3)), :]
ODCT = np.kron(ODCT, np.kron(ODCT, ODCT))
ODCT = np.column_stack((ODCT[:, 0], ODCT[:, 1:] - np.mean(ODCT[:, 1:], axis=0)))
ODCT = ODCT / np.linalg.norm(ODCT, axis=0)
ODCT = ODCT[:, :num_basis]
return ODCT
| [
"numpy.kron",
"numpy.mean",
"math.ceil",
"numpy.linalg.norm"
] | [((641, 660), 'numpy.kron', 'np.kron', (['ODCT', 'ODCT'], {}), '(ODCT, ODCT)\n', (648, 660), True, 'import numpy as np\n'), ((764, 792), 'numpy.linalg.norm', 'np.linalg.norm', (['ODCT'], {'axis': '(0)'}), '(ODCT, axis=0)\n', (778, 792), True, 'import numpy as np\n'), ((1443, 1462), 'numpy.kron', 'np.kron', (['ODCT', 'ODCT'], {}), '(ODCT, ODCT)\n', (1450, 1462), True, 'import numpy as np\n'), ((1567, 1595), 'numpy.linalg.norm', 'np.linalg.norm', (['ODCT'], {'axis': '(0)'}), '(ODCT, axis=0)\n', (1581, 1595), True, 'import numpy as np\n'), ((529, 556), 'math.ceil', 'math.ceil', (['(num_basis ** 0.5)'], {}), '(num_basis ** 0.5)\n', (538, 556), False, 'import math\n'), ((1309, 1340), 'math.ceil', 'math.ceil', (['(num_basis ** (1 / 3))'], {}), '(num_basis ** (1 / 3))\n', (1318, 1340), False, 'import math\n'), ((598, 625), 'math.ceil', 'math.ceil', (['(len_basis ** 0.5)'], {}), '(len_basis ** 0.5)\n', (607, 625), False, 'import math\n'), ((715, 743), 'numpy.mean', 'np.mean', (['ODCT[:, 1:]'], {'axis': '(0)'}), '(ODCT[:, 1:], axis=0)\n', (722, 743), True, 'import numpy as np\n'), ((1382, 1413), 'math.ceil', 'math.ceil', (['(len_basis ** (1 / 3))'], {}), '(len_basis ** (1 / 3))\n', (1391, 1413), False, 'import math\n'), ((1518, 1546), 'numpy.mean', 'np.mean', (['ODCT[:, 1:]'], {'axis': '(0)'}), '(ODCT[:, 1:], axis=0)\n', (1525, 1546), True, 'import numpy as np\n')] |
import os
import numpy as np
from tqdm import tqdm
import pandas as pd
import recordlinkage
from recordlinkage.index import SortedNeighbourhood
import lib.utils as utils
class ApproxLinkage:
def __init__(self, storage_folder, parquet_folder):
'''
'''
self.storage = storage_folder
self.parquet_folder = parquet_folder
self.colsforlink = ["primeiro_nome_mae", "segundo_nome_mae", "complemento_nome_mae",
"dia_nascimento", "mes_nascimento", "ano_nascimento", "sexo", "primeiro_nome",
"segundo_nome", "complemento_nome", "bairro", "cpf"]
self.vacineja_df = None
self.covid_obito_df = None
self.cart_obito_df = None
def load_data(self):
'''
'''
self.vacineja_df = pd.read_parquet(os.path.join(self.parquet_folder, "VACINEJA.parquet"))
self.covid_obito_df = pd.read_parquet(os.path.join(self.parquet_folder, "OBITO_COVID.parquet"))
self.cart_obito_df = pd.read_parquet(os.path.join(self.parquet_folder, "OBITO_CARTORIO.parquet"))
self.covid_obito_df = self.covid_obito_df.merge(self.cart_obito_df[["do_8", "cpf"]], left_on="numerodo", right_on="do_8", how="left").drop("do_8", axis=1)
def format_data(self):
'''
pass
'''
# --> <NAME>
self.vacineja_df["nome"] = self.vacineja_df["nome"].apply(lambda x: utils.replace_string(x, sep=" "))
self.vacineja_df["nome_mae"] = self.vacineja_df["nome_mae"].apply(lambda x: utils.replace_string(x, sep=" "))
self.vacineja_df["primeiro_nome"] = self.vacineja_df["nome"].apply(lambda x: x.split(" ")[0] if pd.notna(x) and len(x.split(" "))>0 else np.nan)
self.vacineja_df["segundo_nome"] = self.vacineja_df["nome"].apply(lambda x: x.split(" ")[1] if pd.notna(x) and len(x.split(" "))>1 else np.nan)
self.vacineja_df["complemento_nome"] = self.vacineja_df["nome"].apply(lambda x: ' '.join(x.split(" ")[2:]) if pd.notna(x) and len(x.split(" "))>2 else np.nan)
self.vacineja_df["primeiro_nome_mae"] = self.vacineja_df["nome_mae"].apply(lambda x: x.split(" ")[0] if pd.notna(x) and len(x.split(" "))>0 else np.nan)
self.vacineja_df["segundo_nome_mae"] = self.vacineja_df["nome_mae"].apply(lambda x: x.split(" ")[1] if pd.notna(x) and len(x.split(" "))>1 else np.nan)
self.vacineja_df["complemento_nome_mae"] = self.vacineja_df["nome_mae"].apply(lambda x: ' '.join(x.split(" ")[2:]) if pd.notna(x) and len(x.split(" "))>2 else np.nan)
#self.vacineja_df["timestamp"] = self.vacineja_df["data_nascimento"].apply(lambda x: x.timestamp() if pd.notna(x) else np.nan)
self.vacineja_df["dia_nascimento"] = self.vacineja_df["data_nascimento"].apply(lambda x: x.day if pd.notna(x) else np.nan)
self.vacineja_df["mes_nascimento"] = self.vacineja_df["data_nascimento"].apply(lambda x: x.month if pd.notna(x) else np.nan)
self.vacineja_df["ano_nascimento"] = self.vacineja_df["data_nascimento"].apply(lambda x: x.year if pd.notna(x) else np.nan)
self.vacineja_df["cpf"] = self.vacineja_df["cpf"].copy()
self.vacineja_df["bairro"] = self.vacineja_df["bairro"].copy()
self.vacineja_df["sexo"] = self.vacineja_df["sexo"].copy()
# --> Death by Covid-19
self.covid_obito_df["primeiro_nome"] = self.covid_obito_df["NOME(OBITO COVID)"].apply(lambda x: x.split(" ")[0] if pd.notna(x) and len(x.split(" "))>0 else np.nan)
self.covid_obito_df["segundo_nome"] = self.covid_obito_df["NOME(OBITO COVID)"].apply(lambda x: x.split(" ")[1] if pd.notna(x) and len(x.split(" "))>1 else np.nan)
self.covid_obito_df["complemento_nome"] = self.covid_obito_df["NOME(OBITO COVID)"].apply(lambda x: ' '.join(x.split(" ")[2:]) if pd.notna(x) and len(x.split(" "))>2 else np.nan)
self.covid_obito_df["primeiro_nome_mae"] = self.covid_obito_df["NOME_MAE(OBITO COVID)"].apply(lambda x: x.split(" ")[0] if pd.notna(x) and len(x.split(" "))>0 else np.nan)
self.covid_obito_df["segundo_nome_mae"] = self.covid_obito_df["NOME_MAE(OBITO COVID)"].apply(lambda x: x.split(" ")[1] if pd.notna(x) and len(x.split(" "))>1 else np.nan)
self.covid_obito_df["complemento_nome_mae"] = self.covid_obito_df["NOME_MAE(OBITO COVID)"].apply(lambda x: ' '.join(x.split(" ")[2:]) if pd.notna(x) and len(x.split(" "))>2 else np.nan)
self.covid_obito_df["sexo"] = self.covid_obito_df["SEXO(OBITO COVID)"].map({"MASC": "M", "FEM": "F", "FEM ": "F", "MAS": "M", "MASC ": "M"})
self.covid_obito_df["bairro"] = self.covid_obito_df["BAIRRO_RESIDENCIA(OBITO COVID)"].apply(lambda x: utils.replace_string(x, sep=" ") if pd.notna(x) else np.nan)
self.covid_obito_df["data_nascimento"] = self.covid_obito_df["DATA_NASCIMENTO(OBITO COVID)"].copy()
self.covid_obito_df["dia_nascimento"] = self.covid_obito_df["data_nascimento"].apply(lambda x: x.day if pd.notna(x) else np.nan)
self.covid_obito_df["mes_nascimento"] = self.covid_obito_df["data_nascimento"].apply(lambda x: x.month if pd.notna(x) else np.nan)
self.covid_obito_df["ano_nascimento"] = self.covid_obito_df["data_nascimento"].apply(lambda x: x.year if pd.notna(x) else np.nan)
self.covid_obito_df["cpf"] = self.covid_obito_df["cpf"].copy()
def create_total_pairs(self, chunksize=40000):
'''
pass
'''
vacineja_link = self.vacineja_df[self.colsforlink].reset_index(drop=True)
covid_link = self.covid_obito_df[self.colsforlink].reset_index(drop=True)
indexer_local = recordlinkage.Index()
indexer_local.add(SortedNeighbourhood("primeiro_nome", "primeiro_nome", window=3))
compare_cl = recordlinkage.Compare()
compare_cl.string("primeiro_nome_mae", "primeiro_nome_mae", method="jarowinkler", threshold=0.8, label="primeiro_nome_mae")
compare_cl.string("segundo_nome_mae", "segundo_nome_mae", method="jarowinkler", threshold=0.8, label="segundo_nome_mae")
compare_cl.string("complemento_nome_mae", "complemento_nome_mae", method="jarowinkler", threshold=0.8, label="complemento_nome_mae")
#compare_cl.string("primeiro_nome", "primeiro_nome", method="jarowinkler", threshold=0.8, label="primeiro_nome")
compare_cl.string("segundo_nome", "segundo_nome", method="jarowinkler", threshold=0.8, label="segundo_nome")
compare_cl.string("complemento_nome", "complemento_nome", method="jarowinkler", threshold=0.8, label="complemento_nome")
compare_cl.string("bairro", "bairro", method="jarowinkler", threshold=0.70, label="bairro")
compare_cl.exact("sexo", "sexo", label="sexo")
compare_cl.exact("cpf", "cpf", label="cpf")
compare_cl.exact("dia_nascimento", "dia_nascimento", label="dia_nascimento")
compare_cl.exact("mes_nascimento", "mes_nascimento", label="mes_nascimento")
compare_cl.exact("ano_nascimento", "ano_nascimento", label="ano_nascimento")
#compare_cl.date("data_nascimento", "data_nascimento", label="data_nascimento", swap_month_day=0.8)
chunks = np.split(vacineja_link, indices_or_sections=np.arange(chunksize, vacineja_link.shape[0], chunksize))
for index, chunk in tqdm(enumerate(chunks)):
candidate_links = indexer_local.index(chunk, covid_link)
features = compare_cl.compute(candidate_links, chunk, covid_link)
features["SOMA NASCIMENTO"] = features[["dia_nascimento", "mes_nascimento", "ano_nascimento"]].sum(axis=1)
features["SOMA NASCIMENTO"] = features["SOMA NASCIMENTO"].apply(lambda x: 1 if x==3 else 0)
features["SOMA"] = features[["primeiro_nome_mae", "segundo_nome_mae", "complemento_nome_mae",
"segundo_nome", "complemento_nome", "cpf", "sexo", "bairro", "dia_nascimento",
"mes_nascimento", "ano_nascimento"]].sum(axis=1)
features["SOMA"] = features["SOMA"]+features["SOMA NASCIMENTO"]
features = features[(features["SOMA"]>=6) | (features["cpf"]==1.0)]
print(features.shape, candidate_links.shape)
features.to_csv(os.path.join(self.storage, f"feature_{index}.csv"))
| [
"lib.utils.replace_string",
"recordlinkage.Compare",
"recordlinkage.Index",
"os.path.join",
"pandas.notna",
"recordlinkage.index.SortedNeighbourhood",
"numpy.arange"
] | [((5614, 5635), 'recordlinkage.Index', 'recordlinkage.Index', ([], {}), '()\n', (5633, 5635), False, 'import recordlinkage\n'), ((5749, 5772), 'recordlinkage.Compare', 'recordlinkage.Compare', ([], {}), '()\n', (5770, 5772), False, 'import recordlinkage\n'), ((853, 906), 'os.path.join', 'os.path.join', (['self.parquet_folder', '"""VACINEJA.parquet"""'], {}), "(self.parquet_folder, 'VACINEJA.parquet')\n", (865, 906), False, 'import os\n'), ((954, 1010), 'os.path.join', 'os.path.join', (['self.parquet_folder', '"""OBITO_COVID.parquet"""'], {}), "(self.parquet_folder, 'OBITO_COVID.parquet')\n", (966, 1010), False, 'import os\n'), ((1057, 1116), 'os.path.join', 'os.path.join', (['self.parquet_folder', '"""OBITO_CARTORIO.parquet"""'], {}), "(self.parquet_folder, 'OBITO_CARTORIO.parquet')\n", (1069, 1116), False, 'import os\n'), ((5662, 5725), 'recordlinkage.index.SortedNeighbourhood', 'SortedNeighbourhood', (['"""primeiro_nome"""', '"""primeiro_nome"""'], {'window': '(3)'}), "('primeiro_nome', 'primeiro_nome', window=3)\n", (5681, 5725), False, 'from recordlinkage.index import SortedNeighbourhood\n'), ((1448, 1480), 'lib.utils.replace_string', 'utils.replace_string', (['x'], {'sep': '""" """'}), "(x, sep=' ')\n", (1468, 1480), True, 'import lib.utils as utils\n'), ((1566, 1598), 'lib.utils.replace_string', 'utils.replace_string', (['x'], {'sep': '""" """'}), "(x, sep=' ')\n", (1586, 1598), True, 'import lib.utils as utils\n'), ((7182, 7237), 'numpy.arange', 'np.arange', (['chunksize', 'vacineja_link.shape[0]', 'chunksize'], {}), '(chunksize, vacineja_link.shape[0], chunksize)\n', (7191, 7237), True, 'import numpy as np\n'), ((8219, 8269), 'os.path.join', 'os.path.join', (['self.storage', 'f"""feature_{index}.csv"""'], {}), "(self.storage, f'feature_{index}.csv')\n", (8231, 8269), False, 'import os\n'), ((2810, 2821), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (2818, 2821), True, 'import pandas as pd\n'), ((2943, 2954), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (2951, 2954), True, 'import pandas as pd\n'), ((3075, 3086), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (3083, 3086), True, 'import pandas as pd\n'), ((4714, 4725), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (4722, 4725), True, 'import pandas as pd\n'), ((4678, 4710), 'lib.utils.replace_string', 'utils.replace_string', (['x'], {'sep': '""" """'}), "(x, sep=' ')\n", (4698, 4710), True, 'import lib.utils as utils\n'), ((4959, 4970), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (4967, 4970), True, 'import pandas as pd\n'), ((5098, 5109), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (5106, 5109), True, 'import pandas as pd\n'), ((5236, 5247), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (5244, 5247), True, 'import pandas as pd\n'), ((1705, 1716), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (1713, 1716), True, 'import pandas as pd\n'), ((1857, 1868), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (1865, 1868), True, 'import pandas as pd\n'), ((2024, 2035), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (2032, 2035), True, 'import pandas as pd\n'), ((2185, 2196), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (2193, 2196), True, 'import pandas as pd\n'), ((2345, 2356), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (2353, 2356), True, 'import pandas as pd\n'), ((2520, 2531), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (2528, 2531), True, 'import pandas as pd\n'), ((3459, 3470), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (3467, 3470), True, 'import pandas as pd\n'), ((3630, 3641), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (3638, 3641), True, 'import pandas as pd\n'), ((3816, 3827), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (3824, 3827), True, 'import pandas as pd\n'), ((3996, 4007), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (4004, 4007), True, 'import pandas as pd\n'), ((4175, 4186), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (4183, 4186), True, 'import pandas as pd\n'), ((4369, 4380), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (4377, 4380), True, 'import pandas as pd\n')] |
# Author: <NAME>
# 2010 - 2011
"""Tools for spectral analysis of unequally sampled signals."""
import numpy as np
#pythran export lombscargle(float64[], float64[], float64[])
#runas import numpy; x = numpy.arange(2., 12.); y = numpy.arange(1., 11.); z = numpy.arange(3., 13.); lombscargle(x, y, z)
def lombscargle(x, y, freqs):
"""
_lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values (must be registered so the mean is zero).
freqs : array_like
Angular frequencies for output periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
See also
--------
lombscargle
"""
# Check input sizes
if x.shape != y.shape:
raise ValueError("Input arrays do not have the same size.")
# Local variables
c = np.cos(freqs[:, None] * x)
s = np.sin(freqs[:, None] * x)
xc = np.sum(y * c, axis=1)
xs = np.sum(y * s, axis=1)
cc = np.sum(c ** 2, axis=1)
ss = np.sum(s * s, axis=1)
cs = np.sum(c * s, axis=1)
tau = np.arctan2(2 * cs, cc - ss) / (2 * freqs)
c_tau = np.cos(freqs * tau)
s_tau = np.sin(freqs * tau)
c_tau2 = c_tau * c_tau
s_tau2 = s_tau * s_tau
cs_tau = 2 * c_tau * s_tau
pgram = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \
(c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \
((c_tau * xs - s_tau * xc)**2 / \
(c_tau2 * ss - cs_tau * cs + s_tau2 * cc)))
return pgram
| [
"numpy.sin",
"numpy.sum",
"numpy.arctan2",
"numpy.cos"
] | [((1043, 1069), 'numpy.cos', 'np.cos', (['(freqs[:, None] * x)'], {}), '(freqs[:, None] * x)\n', (1049, 1069), True, 'import numpy as np\n'), ((1078, 1104), 'numpy.sin', 'np.sin', (['(freqs[:, None] * x)'], {}), '(freqs[:, None] * x)\n', (1084, 1104), True, 'import numpy as np\n'), ((1114, 1135), 'numpy.sum', 'np.sum', (['(y * c)'], {'axis': '(1)'}), '(y * c, axis=1)\n', (1120, 1135), True, 'import numpy as np\n'), ((1145, 1166), 'numpy.sum', 'np.sum', (['(y * s)'], {'axis': '(1)'}), '(y * s, axis=1)\n', (1151, 1166), True, 'import numpy as np\n'), ((1176, 1198), 'numpy.sum', 'np.sum', (['(c ** 2)'], {'axis': '(1)'}), '(c ** 2, axis=1)\n', (1182, 1198), True, 'import numpy as np\n'), ((1208, 1229), 'numpy.sum', 'np.sum', (['(s * s)'], {'axis': '(1)'}), '(s * s, axis=1)\n', (1214, 1229), True, 'import numpy as np\n'), ((1239, 1260), 'numpy.sum', 'np.sum', (['(c * s)'], {'axis': '(1)'}), '(c * s, axis=1)\n', (1245, 1260), True, 'import numpy as np\n'), ((1325, 1344), 'numpy.cos', 'np.cos', (['(freqs * tau)'], {}), '(freqs * tau)\n', (1331, 1344), True, 'import numpy as np\n'), ((1357, 1376), 'numpy.sin', 'np.sin', (['(freqs * tau)'], {}), '(freqs * tau)\n', (1363, 1376), True, 'import numpy as np\n'), ((1271, 1298), 'numpy.arctan2', 'np.arctan2', (['(2 * cs)', '(cc - ss)'], {}), '(2 * cs, cc - ss)\n', (1281, 1298), True, 'import numpy as np\n')] |
"""
Some convenience classes/functions for querying Horizons
Helps with tests of accuracy in various functions
"""
# import standard packages
# -----------------------------------------------------------------------------
import numpy as np
# import third-party packages
# -----------------------------------------------------------------------------
from astroquery.jplhorizons import Horizons
def nice_Horizons(target, centre, epochs, id_type, refplane='earth'):
"""
<NAME>
Convenience function to reformat data returned by Horizons
Only require the inputs I actually want to vary.
Return in the format I actually want, not an astropy table.
"""
horizons_table = Horizons(target, centre, epochs=epochs, id_type=id_type)
horizons_vector = horizons_table.vectors(refplane=refplane)
horizons_xyzv = horizons_vector['x', 'y', 'z', 'vx', 'vy', 'vz']
return np.array(list(horizons_xyzv.as_array()[0]))
def nice_Horizons_radec(target, centre, epochs, id_type, refplane='earth'):
"""
Convenience function to reformat data returned by Horizons
Only require the inputs I actually want to vary.
Return in the format I actually want, not an astropy table.
- Returned shape == (N_epochs, 2)
"""
horizons_table = Horizons(target, centre, epochs=epochs, id_type=id_type)
horizons_eph = horizons_table.ephemerides(extra_precision = True)
return np.array( [ list(horizons_eph['RA'].data),
list(horizons_eph['DEC'].data) ] ).T
def read_Horizons_state_from_text( two_lines):
"""
Extract these ...
X =-2.590350154796811E+00 Y =-7.949342693459856E-02 Z = 1.245107691757731E-01
VX=-1.454708370733871E-03 VY=-9.503445860627428E-03 VZ=-3.846514535533382E-03
"""
xyz_line = two_lines[0]
uvw_line = two_lines[1]
x = float(xyz_line.split('=')[1].strip('Y '))
y = float(xyz_line.split('=')[2].strip('Z '))
z = float(xyz_line.split('=')[3].strip())
u = float(uvw_line.split('=')[1].strip('VY '))
v = float(uvw_line.split('=')[2].strip('VZ '))
w = float(uvw_line.split('=')[3].strip())
return np.array( [x,y,z,u,v,w] )
def extract_first_state_from_text( text_block ):
"""
Extract lines that look like...
X =-2.590350154796811E+00 Y =-7.949342693459856E-02 Z = 1.245107691757731E-01
VX=-1.454708370733871E-03 VY=-9.503445860627428E-03 VZ=-3.846514535533382E-03
from a big block that looks like ...
*******************************************************************************
JPL/HORIZONS 12345 (1993 FT8) 2022-Jan-28 14:39:42
Rec #: 12345 (+COV) Soln.date: 2021-Nov-10_08:38:58 # obs: 1959 (1993-2021)
IAU76/J2000 helio. ecliptic osc. elements (au, days, deg., period=Julian yrs):
EPOCH= 2457108.5 ! 2015-Mar-27.00 (TDB) Residual RMS= .2812
EC= .1603033905689926 QR= 2.056207695854036 TP= 2457050.1973502915
OM= 106.4549280993016 W= 314.1929318541605 IN= 3.350816780296945
A= 2.448750742541829 MA= 14.99600220651154 ADIST= 2.841293789229623
PER= 3.832 N= .25720961 ANGMOM= .02657056
DAN= 2.14602 DDN= 2.68596 L= 60.6968709
B= -2.401858 MOID= 1.06974006 TP= 2015-Jan-27.6973502915
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= 1.506 ROTPER= n.a.
H= 14.52 G= .150 B-V= n.a.
ALBEDO= .407 STYP= n.a.
ASTEROID comments:
1: soln ref.= JPL#32, OCC=0
2: source=ORB
*******************************************************************************
*******************************************************************************
Ephemeris / WWW_USER Fri Jan 28 14:39:42 2022 Pasadena, USA / Horizons
*******************************************************************************
Target body name: 12345 (1993 FT8) {source: JPL#32}
Center body name: Sun (10) {source: DE441}
Center-site name: BODY CENTER
*******************************************************************************
Start time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Stop time : A.D. 2020-Jan-01 12:00:00.0000 TDB
Step-size : DISCRETE TIME-LIST
*******************************************************************************
Center geodetic : 0.00000000,0.00000000,0.0000000 {E-lon(deg),Lat(deg),Alt(km)}
Center cylindric: 0.00000000,0.00000000,0.0000000 {E-lon(deg),Dxy(km),Dz(km)}
Center radii : 696000.0 x 696000.0 x 696000.0 k{Equator, meridian, pole}
Small perturbers: Yes {source: SB441-N16}
Output units : AU-D
Output type : GEOMETRIC cartesian states
Output format : 3 (position, velocity, LT, range, range-rate)
Reference frame : ICRF
*******************************************************************************
Initial IAU76/J2000 heliocentric ecliptic osculating elements (au, days, deg.):
EPOCH= 2457108.5 ! 2015-Mar-27.00 (TDB) Residual RMS= .2812
EC= .1603033905689926 QR= 2.056207695854036 TP= 2457050.1973502915
OM= 106.4549280993016 W= 314.1929318541605 IN= 3.350816780296945
Equivalent ICRF heliocentric cartesian coordinates (au, au/d):
X= 3.047919278950221E-01 Y= 1.902892265722551E+00 Z= 7.692605770652556E-01
VX=-1.255238959074424E-02 VY= 2.052146789677108E-03 VZ= 1.612315394505861E-03
Asteroid physical parameters (km, seconds, rotational period in hours):
GM= n.a. RAD= 1.506 ROTPER= n.a.
H= 14.52 G= .150 B-V= n.a.
ALBEDO= .407 STYP= n.a.
*******************************************************************************
JDTDB
X Y Z
VX VY VZ
LT RG RR
*******************************************************************************
$$SOE
2458850.000000000 = A.D. 2020-Jan-01 12:00:00.0000 TDB [del_T= 69.183915 s]
X =-2.590350154796811E+00 Y =-7.949342693459856E-02 Z = 1.245107691757731E-01
VX=-1.454708370733871E-03 VY=-9.503445860627428E-03 VZ=-3.846514535533382E-03
LT= 1.498492268422344E-02 RG= 2.594558933811760E+00 RR= 1.558928955626413E-03
$$EOE
*******************************************************************************
TIME
Barycentric Dynamical Time ("TDB" or T_eph) output was requested. This
continuous relativistic coordinate time is equivalent to the relativistic
proper time of a clock at rest in a reference frame comoving with the
solar system barycenter but outside the system's gravity well. It is the
independent variable in the solar system relativistic equations of motion.
TDB runs at a uniform rate of one SI second per second and is independent
of irregularities in Earth's rotation.
Calendar dates prior to 1582-Oct-15 are in the Julian calendar system.
Later calendar dates are in the Gregorian system.
REFERENCE FRAME AND COORDINATES
International Celestial Reference Frame (ICRF)
The ICRF is an adopted reference frame whose axes are defined relative to
fixed extragalactic radio sources distributed across the sky.
The ICRF was aligned with the prior FK5/J2000 dynamical system at the ~0.02
arcsecond level but is not identical and has no associated standard epoch.
Symbol meaning [1 au= 149597870.700 km, 1 day= 86400.0 s]:
JDTDB Julian Day Number, Barycentric Dynamical Time
del_T Time-scale conversion difference TDB - UT (s)
X X-component of position vector (au)
Y Y-component of position vector (au)
Z Z-component of position vector (au)
VX X-component of velocity vector (au/day)
VY Y-component of velocity vector (au/day)
VZ Z-component of velocity vector (au/day)
LT One-way down-leg Newtonian light-time (day)
RG Range; distance from coordinate center (au)
RR Range-rate; radial velocity wrt coord. center (au/day)
ABERRATIONS AND CORRECTIONS
Geometric state vectors have NO corrections or aberrations applied.
Computations by ...
Solar System Dynamics Group, Horizons On-Line Ephemeris System
4800 Oak Grove Drive, Jet Propulsion Laboratory
Pasadena, CA 91109 USA
General site: https://ssd.jpl.nasa.gov/
Mailing list: https://ssd.jpl.nasa.gov/email_list.html
System news : https://ssd.jpl.nasa.gov/horizons/news.html
User Guide : https://ssd.jpl.nasa.gov/horizons/manual.html
Connect : browser https://ssd.jpl.nasa.gov/horizons/app.html#/x
API https://ssd-api.jpl.nasa.gov/doc/horizons.html
command-line telnet ssd.jpl.nasa.gov 6775
e-mail/batch https://ssd.jpl.nasa.gov/ftp/ssd/hrzn_batch.txt
scripts https://ssd.jpl.nasa.gov/ftp/ssd/SCRIPTS
Author : <EMAIL>.D.Giorg<EMAIL>
*******************************************************************************
"""
# find "$$SOE" in text_block
SOE_index = [ n for n, line in enumerate(text_block) if "$$SOE" in line ][0]
# no results ?
if not SOE_index:
return False
# extract the two lines that contain the state information ...
for XYZ_index,line in enumerate(text_block[SOE_index:]):
if line.strip()[0]=='X':
break
state_lines = text_block[SOE_index:][XYZ_index:XYZ_index+2]
# extract the coordinates from the lines
xyzuvw = read_Horizons_state_from_text( state_lines )
return xyzuvw
| [
"numpy.array",
"astroquery.jplhorizons.Horizons"
] | [((698, 754), 'astroquery.jplhorizons.Horizons', 'Horizons', (['target', 'centre'], {'epochs': 'epochs', 'id_type': 'id_type'}), '(target, centre, epochs=epochs, id_type=id_type)\n', (706, 754), False, 'from astroquery.jplhorizons import Horizons\n'), ((1280, 1336), 'astroquery.jplhorizons.Horizons', 'Horizons', (['target', 'centre'], {'epochs': 'epochs', 'id_type': 'id_type'}), '(target, centre, epochs=epochs, id_type=id_type)\n', (1288, 1336), False, 'from astroquery.jplhorizons import Horizons\n'), ((2155, 2183), 'numpy.array', 'np.array', (['[x, y, z, u, v, w]'], {}), '([x, y, z, u, v, w])\n', (2163, 2183), True, 'import numpy as np\n')] |
import abc
import numpy as np
import homog as hm
from numpy.linalg import inv
from worms.util import jit
Ux = np.array([1, 0, 0, 0])
Uy = np.array([0, 1, 0, 0])
Uz = np.array([0, 0, 1, 0])
class WormCriteria(abc.ABC):
@abc.abstractmethod
def score(self, **kw):
pass
allowed_attributes = (
"last_body_same_as",
"symname",
"is_cyclic",
"alignment",
"from_seg",
"to_seg",
"origin_seg",
"symfile_modifiers",
"crystinfo",
)
class CriteriaList(WormCriteria):
def __init__(self, children):
if isinstance(children, WormCriteria):
children = [children]
self.children = children
def score(self, **kw):
return sum(c.score(**kw) for c in self.children)
def __getattr__(self, name):
if name not in WormCriteria.allowed_attributes:
raise AttributeError("CriteriaList has no attribute: " + name)
r = [getattr(c, name) for c in self.children if hasattr(c, name)]
r = [x for x in r if x is not None]
assert len(r) < 2
return r[0] if len(r) else None
def __getitem__(self, index):
assert isinstance(index, int)
return self.children[index]
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
class NullCriteria(WormCriteria):
def __init__(self, from_seg=0, to_seg=-1, origin_seg=None):
self.from_seg = from_seg
self.to_seg = to_seg
self.origin_seg = None
self.is_cyclic = False
self.tolerance = 9e8
self.symname = None
def merge_segment(self, **kw):
return None
def score(self, segpos, **kw):
return np.zeros(segpos[-1].shape[:-2])
def alignment(self, segpos, **kw):
r = np.empty_like(segpos[-1])
r[..., :, :] = np.eye(4)
return r
def jit_lossfunc(self):
@jit
def null_lossfunc(pos, idx, verts):
return 0.0
return null_lossfunc
def iface_rms(self, pose0, prov0, **kw):
return -1
| [
"numpy.array",
"numpy.zeros",
"numpy.empty_like",
"numpy.eye"
] | [((111, 133), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (119, 133), True, 'import numpy as np\n'), ((139, 161), 'numpy.array', 'np.array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (147, 161), True, 'import numpy as np\n'), ((167, 189), 'numpy.array', 'np.array', (['[0, 0, 1, 0]'], {}), '([0, 0, 1, 0])\n', (175, 189), True, 'import numpy as np\n'), ((1670, 1701), 'numpy.zeros', 'np.zeros', (['segpos[-1].shape[:-2]'], {}), '(segpos[-1].shape[:-2])\n', (1678, 1701), True, 'import numpy as np\n'), ((1751, 1776), 'numpy.empty_like', 'np.empty_like', (['segpos[-1]'], {}), '(segpos[-1])\n', (1764, 1776), True, 'import numpy as np\n'), ((1798, 1807), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1804, 1807), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
"""
One should note, that R is required to use this module, as original Clifford's
program is written in R. On my Windows 10, I am using anaconda and I had to add
R_HOME env variable and R_path\bin, R_path\bin\x64 to the path.
https://cran.r-project.org/web/packages/BosonSampling/index.html
"""
from numpy import arange, array, array_split, int64, ndarray, isclose
from scipy.special import binom
from collections import defaultdict
from typing import List, Dict, Tuple, DefaultDict
from .simulation_strategy_interface import SimulationStrategyInterface
from rpy2 import robjects
from rpy2.robjects import packages
from ..boson_sampling_utilities.boson_sampling_utilities import (
particle_state_to_modes_state,
)
class CliffordsRSimulationStrategy(SimulationStrategyInterface):
def __init__(self, interferometer_matrix: ndarray) -> None:
self.interferometer_matrix = interferometer_matrix
boson_sampling_package = packages.importr("BosonSampling")
self.cliffords_r_sampler = boson_sampling_package.bosonSampler
def set_matrix(self, interferometer_matrix: ndarray) -> None:
self.interferometer_matrix = interferometer_matrix
@staticmethod
def _numpy_array_to_r_matrix(numpy_array: ndarray) -> robjects.r.matrix:
rows_number, columns_number = numpy_array.shape
# Transposition is required as R inserts columns, not rows.
r_values = robjects.ComplexVector(
[val for val in numpy_array.transpose().reshape(numpy_array.size)]
)
return robjects.r.matrix(r_values, nrow=rows_number, ncol=columns_number)
def simulate(
self, initial_state: ndarray, samples_number: int = 1
) -> List[ndarray]:
"""
Simulate BS experiment for given input.
Note: The results of Clifford & Clifford method are given in the first
quantization description (mode assignment)!
:param initial_state: Input state in the modes occupation description.
:param samples_number: Number of samples to sample.
:return: List of samples in the first quantization description (mode
assignment)
"""
number_of_bosons = int(sum(initial_state))
boson_sampler_input_matrix = self._numpy_array_to_r_matrix(
self.interferometer_matrix[:, arange(number_of_bosons)]
)
result, permanent, probability_mass_function = self.cliffords_r_sampler(
boson_sampler_input_matrix, sampleSize=samples_number, perm=False
)
# Add -1 to R indexation of modes (they start from 1).
python_result = array([mode_value - 1 for mode_value in result], dtype=int64)
samples_in_particle_states = array_split(python_result, samples_number)
# There are some problems with the actual and theoretical runtimes. The
# reason for that could be parsing the result to a second quantization
# description.
# return samples_in_particle_states
samples_in_occupation_description = []
for sample in samples_in_particle_states:
samples_in_occupation_description.append(
particle_state_to_modes_state(sample, len(initial_state))
)
return samples_in_occupation_description
def find_probabilities(
self, initial_state: ndarray, outcomes_of_interest: List[ndarray]
) -> Dict[Tuple[int, ...], float]:
number_of_bosons = int(sum(initial_state))
outcomes_of_interest = [tuple(o) for o in outcomes_of_interest]
outcomes_probabilities: dict = {}
boson_sampler_input_matrix = self._numpy_array_to_r_matrix(
self.interferometer_matrix[:, arange(number_of_bosons)]
)
number_of_samplings = 0
while len(outcomes_probabilities) != len(outcomes_of_interest):
result, permanent, pmf = self.cliffords_r_sampler(
boson_sampler_input_matrix, sampleSize=1, perm=True
)
number_of_samplings += 1
# Add -1 to R indexation of modes (they start from 1).
python_result = array(
[mode_value - 1 for mode_value in result], dtype=int64
)
sample_in_particle_states = array_split(python_result, 1)[0]
sample = tuple(
particle_state_to_modes_state(
sample_in_particle_states, len(initial_state)
)
)
if sample in outcomes_of_interest:
outcomes_probabilities[sample] = pmf[0]
if number_of_samplings % int(1e4) == 0:
print(f"\tNumber of samplings: {number_of_samplings}")
return outcomes_probabilities
def find_probabilities_of_n_random_states(
self, initial_state: ndarray, number_of_random_states: int
) -> DefaultDict[Tuple[int, ...], float]:
n = int(sum(initial_state))
m = len(initial_state)
boson_sampler_input_matrix = self._numpy_array_to_r_matrix(
self.interferometer_matrix[:, arange(n)]
)
if int(binom(n + m - 1, m - 1)) < number_of_random_states:
number_of_random_states = int(binom(n + m - 1, m - 1))
probabilities_of_random_states = defaultdict(lambda: 0)
probabilities_sum = 0.0
number_of_samplings = 0
while len(
probabilities_of_random_states
) < number_of_random_states or not isclose(probabilities_sum, 1):
result, permanent, pmf = self.cliffords_r_sampler(
boson_sampler_input_matrix, sampleSize=1, perm=True
)
number_of_samplings += 1
# Add -1 to R indexation of modes (they start from 1).
python_result = array(
[mode_value - 1 for mode_value in result], dtype=int64
)
sample_in_particle_states = array_split(python_result, 1)[0]
sample = tuple(
particle_state_to_modes_state(
sample_in_particle_states, len(initial_state)
)
)
probabilities_of_random_states[sample] = pmf[0]
if number_of_samplings % int(1e4) == 0:
print(f"\tNumber of samplings: {number_of_samplings}")
probabilities_sum = 0.0
for state in probabilities_of_random_states:
probabilities_sum += probabilities_of_random_states[state]
return probabilities_of_random_states
| [
"numpy.isclose",
"scipy.special.binom",
"rpy2.robjects.packages.importr",
"rpy2.robjects.r.matrix",
"numpy.array",
"numpy.array_split",
"collections.defaultdict",
"numpy.arange"
] | [((987, 1020), 'rpy2.robjects.packages.importr', 'packages.importr', (['"""BosonSampling"""'], {}), "('BosonSampling')\n", (1003, 1020), False, 'from rpy2.robjects import packages\n'), ((1585, 1651), 'rpy2.robjects.r.matrix', 'robjects.r.matrix', (['r_values'], {'nrow': 'rows_number', 'ncol': 'columns_number'}), '(r_values, nrow=rows_number, ncol=columns_number)\n', (1602, 1651), False, 'from rpy2 import robjects\n'), ((2711, 2774), 'numpy.array', 'array', (['[(mode_value - 1) for mode_value in result]'], {'dtype': 'int64'}), '([(mode_value - 1) for mode_value in result], dtype=int64)\n', (2716, 2774), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n'), ((2810, 2852), 'numpy.array_split', 'array_split', (['python_result', 'samples_number'], {}), '(python_result, samples_number)\n', (2821, 2852), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n'), ((5357, 5380), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (5368, 5380), False, 'from collections import defaultdict\n'), ((4213, 4276), 'numpy.array', 'array', (['[(mode_value - 1) for mode_value in result]'], {'dtype': 'int64'}), '([(mode_value - 1) for mode_value in result], dtype=int64)\n', (4218, 4276), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n'), ((5861, 5924), 'numpy.array', 'array', (['[(mode_value - 1) for mode_value in result]'], {'dtype': 'int64'}), '([(mode_value - 1) for mode_value in result], dtype=int64)\n', (5866, 5924), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n'), ((4345, 4374), 'numpy.array_split', 'array_split', (['python_result', '(1)'], {}), '(python_result, 1)\n', (4356, 4374), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n'), ((5196, 5219), 'scipy.special.binom', 'binom', (['(n + m - 1)', '(m - 1)'], {}), '(n + m - 1, m - 1)\n', (5201, 5219), False, 'from scipy.special import binom\n'), ((5290, 5313), 'scipy.special.binom', 'binom', (['(n + m - 1)', '(m - 1)'], {}), '(n + m - 1, m - 1)\n', (5295, 5313), False, 'from scipy.special import binom\n'), ((5550, 5579), 'numpy.isclose', 'isclose', (['probabilities_sum', '(1)'], {}), '(probabilities_sum, 1)\n', (5557, 5579), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n'), ((5993, 6022), 'numpy.array_split', 'array_split', (['python_result', '(1)'], {}), '(python_result, 1)\n', (6004, 6022), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n'), ((2417, 2441), 'numpy.arange', 'arange', (['number_of_bosons'], {}), '(number_of_bosons)\n', (2423, 2441), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n'), ((3791, 3815), 'numpy.arange', 'arange', (['number_of_bosons'], {}), '(number_of_bosons)\n', (3797, 3815), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n'), ((5159, 5168), 'numpy.arange', 'arange', (['n'], {}), '(n)\n', (5165, 5168), False, 'from numpy import arange, array, array_split, int64, ndarray, isclose\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import argparse
import csv
import numpy as np
from keras.models import Model
from keras.layers import Flatten, Input, Dense, Dropout
from keras.layers import Conv1D, MaxPooling1D
from keras.models import load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
MODEL_NAME = 'cnn3'
np.random.seed(42)
def read_csv(filename, skip_header_lines=1, skip_cols=1):
"""
Read csv file and return numpy array
:param filename: full path to the csv file
:param skip_header_lines: number of lines to skip from the beginning of file
:param skip_cols: number of columns to skip from the beginning of file
:return: data as numpy float array
"""
if filename:
with open(filename, 'r') as f:
data = csv.reader(f)
data = list(data)
try:
data = np.array(data[skip_header_lines:], dtype=float)
except ValueError as e:
print("Error while puttin csv data into numpy array")
print("ValueError: {}".format(e))
raise ValueError
return data[:, skip_cols:]
else:
raise IOError('Non-empty filename expected.')
def save_csv(filename, data):
"""
Save prediction data to csv file with header and rows ID
:param filename: full path to output csv file
:param data: 1D data array to be saved into csv
:return: saved filename
"""
data = [(i, r) for i, r in enumerate(data)]
data.insert(0, ('ID', 'TARGET'))
with open(filename, 'w') as f:
csv_writer = csv.writer(f)
csv_writer.writerows(data)
def transform_features(data, feature_dim=750, features_number=12):
"""
Take array of samples in a shape of N x M, where N - number of samples, M - raw features
values (must be equal to `feature_step X features_number`)
:param data: numpy array with raw data
:param feature_dim: split step to cut raw data into individual feature's data
:param features_number: number of individual features in raw data
:return: numpy array of shape (N, feature_dim, features_number)
"""
data_stacked = np.array([r.reshape(features_number, feature_dim).transpose() for r in data])
return data_stacked
def enrich_data(data, labels):
"""
Generate more data samples by shifting data with random step
:param data: numpy array with transformed features
:param labels: target labels for data
:return: numpy array with original and generated samples
"""
data_gen = []
labels_gen = []
for i, lbl in enumerate(labels):
# Store original data
data_gen.append(data[i])
labels_gen.append(lbl)
for j in range(5):
# Shift data
shift = np.random.randint(10, 740)
data_mod = np.roll(data[i], shift, axis=0)
# Add generated data
data_gen.append(data_mod)
labels_gen.append(lbl)
data_gen = np.array(data_gen)
labels_gen = np.array(labels_gen)
return data_gen, labels_gen
def input_func(data_file, labels_file, mode, generate_more=True):
"""
Read CSV and prepare data for consuming by model.
:param data_file: input data CSV file
:param labels_file: input labels CSV file
:param mode: one of the mode for model: {TRAIN, EVAL, PRED}
:param generate_more: bool flag to create synthetic samples from original one
:return: x, y - input data formatted to use by model and labels for that data
"""
if mode == 'PRED':
# Prediction needs only data, not labels
x = transform_features(read_csv(data_file))
return x, []
else:
# Train and Eval needs both data and labels
x = transform_features(read_csv(data_file))
y = read_csv(labels_file)
if generate_more:
x, y = enrich_data(x, y)
return x, y
def build_model(show_summary=False):
"""
Build and return a CNN model
:param show_summary: boole flag to show built model summary
:return: tensorflow keras model
"""
input_layer = Input(batch_shape=(None, 750, 12), name='input')
x = Conv1D(64, 3, activation='relu')(input_layer)
x = MaxPooling1D()(x)
x = Conv1D(128, 3, activation='relu')(x)
x = MaxPooling1D()(x)
x = Conv1D(256, 3, activation='relu')(x)
x = MaxPooling1D()(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
output_layer = Dense(1, activation='sigmoid', name='output')(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['binary_accuracy'])
if show_summary:
model.summary()
return model
def run_model(model, features, labels, mode):
"""
if mode is TRAIN take input data with its labels and train new model .
If mode is EVAL load model from saved state and run evaluation.
If mode is PRED load model from saved state, run prediction and save predictions to CSV file.
:param model: keras model
:param features: input data for model
:param labels: labels for input data
:param mode: one of the modes from {TRAIN, EVAL, PRED}
"""
if mode == 'PRED':
csv_out_file = './output_predictions_{}.csv'.format(MODEL_NAME)
scores = model.predict(x=features)
labels = map(lambda score: 1 if score >= 0.5 else 0, scores)
save_csv(csv_out_file, labels)
msg = "Saved prediction results to {}".format(csv_out_file)
elif mode == 'EVAL':
loss, accuracy = model.evaluate(x=features,
y=labels)
msg = "\nModel evaluation finished\nLoss: {}\tAccuracy: {}".format(loss, accuracy)
else:
# ok, let's train then!
saved_model_file = './trained_model_{}.h5'.format(MODEL_NAME)
# We use early stopping to avoid spending time on overfitting our model
early_stopping = EarlyStopping(monitor='val_loss', patience=3)
# save model at checkpoints when loss function improved
checkpoint = ModelCheckpoint(saved_model_file, monitor='val_loss', save_best_only=True, verbose=1)
# and keep logs for visualisation with TensorBoard
tensorboard = TensorBoard('./tensorboard_logs', histogram_freq=1)
# Train model
model.fit(x=features,
y=labels,
epochs=20,
validation_split=0.25,
callbacks=[tensorboard, checkpoint, early_stopping])
msg = "Model training finished"
print(msg)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--run-mode',
required=True,
type=str,
choices=['TRAIN', 'EVAL', 'PRED'],
help="""
Perform one of the following operations on model use these commands:
TRAIN : train model,
EVAL : evaluate model
PRED : make prediction with model
""")
parser.add_argument('--data-csv',
required=True,
type=str,
help='Raw data CSV file')
parser.add_argument('--labels-csv',
required=True,
type=str,
help='Labels CSV file. Labels are ignored in PRED mode')
parser.add_argument('--model-name',
required=False,
type=str,
help='Optional model name to be added as suffix to output files')
parse_args, _ = parser.parse_known_args()
if parse_args.model_name:
MODEL_NAME = parse_args.model_name
# Prepare input data from CSV files
input_data, input_labels = input_func(parse_args.data_csv, parse_args.labels_csv, parse_args.run_mode)
if parse_args.run_mode == 'TRAIN':
# create model
model_cnn = build_model()
# run model with mode params
run_model(model_cnn, input_data, input_labels, parse_args.run_mode)
pass
else:
try:
# load model
model_cnn = load_model('./trained_model_{}.h5'.format(MODEL_NAME))
# and run prediction
run_model(model_cnn, input_data, input_labels, parse_args.run_mode)
except Exception as e:
print("Can't found model, check that model was trained and input data is correct.\n".format(e))
| [
"keras.layers.MaxPooling1D",
"numpy.roll",
"keras.layers.Flatten",
"argparse.ArgumentParser",
"keras.callbacks.ModelCheckpoint",
"csv.writer",
"keras.callbacks.TensorBoard",
"numpy.array",
"keras.layers.Input",
"numpy.random.randint",
"keras.models.Model",
"numpy.random.seed",
"csv.reader",
... | [((396, 414), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (410, 414), True, 'import numpy as np\n'), ((3051, 3069), 'numpy.array', 'np.array', (['data_gen'], {}), '(data_gen)\n', (3059, 3069), True, 'import numpy as np\n'), ((3087, 3107), 'numpy.array', 'np.array', (['labels_gen'], {}), '(labels_gen)\n', (3095, 3107), True, 'import numpy as np\n'), ((4178, 4226), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, 750, 12)', 'name': '"""input"""'}), "(batch_shape=(None, 750, 12), name='input')\n", (4183, 4226), False, 'from keras.layers import Flatten, Input, Dense, Dropout\n'), ((4575, 4622), 'keras.models.Model', 'Model', ([], {'inputs': 'input_layer', 'outputs': 'output_layer'}), '(inputs=input_layer, outputs=output_layer)\n', (4580, 4622), False, 'from keras.models import Model\n'), ((6679, 6704), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6702, 6704), False, 'import argparse\n'), ((1657, 1670), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1667, 1670), False, 'import csv\n'), ((4235, 4267), 'keras.layers.Conv1D', 'Conv1D', (['(64)', '(3)'], {'activation': '"""relu"""'}), "(64, 3, activation='relu')\n", (4241, 4267), False, 'from keras.layers import Conv1D, MaxPooling1D\n'), ((4289, 4303), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {}), '()\n', (4301, 4303), False, 'from keras.layers import Conv1D, MaxPooling1D\n'), ((4315, 4348), 'keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'activation': '"""relu"""'}), "(128, 3, activation='relu')\n", (4321, 4348), False, 'from keras.layers import Conv1D, MaxPooling1D\n'), ((4360, 4374), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {}), '()\n', (4372, 4374), False, 'from keras.layers import Conv1D, MaxPooling1D\n'), ((4386, 4419), 'keras.layers.Conv1D', 'Conv1D', (['(256)', '(3)'], {'activation': '"""relu"""'}), "(256, 3, activation='relu')\n", (4392, 4419), False, 'from keras.layers import Conv1D, MaxPooling1D\n'), ((4431, 4445), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {}), '()\n', (4443, 4445), False, 'from keras.layers import Conv1D, MaxPooling1D\n'), ((4457, 4466), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4464, 4466), False, 'from keras.layers import Flatten, Input, Dense, Dropout\n'), ((4478, 4490), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4485, 4490), False, 'from keras.layers import Flatten, Input, Dense, Dropout\n'), ((4513, 4558), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""output"""'}), "(1, activation='sigmoid', name='output')\n", (4518, 4558), False, 'from keras.layers import Flatten, Input, Dense, Dropout\n'), ((851, 864), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (861, 864), False, 'import csv\n'), ((2848, 2874), 'numpy.random.randint', 'np.random.randint', (['(10)', '(740)'], {}), '(10, 740)\n', (2865, 2874), True, 'import numpy as np\n'), ((2898, 2929), 'numpy.roll', 'np.roll', (['data[i]', 'shift'], {'axis': '(0)'}), '(data[i], shift, axis=0)\n', (2905, 2929), True, 'import numpy as np\n'), ((6010, 6055), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(3)'}), "(monitor='val_loss', patience=3)\n", (6023, 6055), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((6141, 6230), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['saved_model_file'], {'monitor': '"""val_loss"""', 'save_best_only': '(True)', 'verbose': '(1)'}), "(saved_model_file, monitor='val_loss', save_best_only=True,\n verbose=1)\n", (6156, 6230), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((6308, 6359), 'keras.callbacks.TensorBoard', 'TensorBoard', (['"""./tensorboard_logs"""'], {'histogram_freq': '(1)'}), "('./tensorboard_logs', histogram_freq=1)\n", (6319, 6359), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((935, 982), 'numpy.array', 'np.array', (['data[skip_header_lines:]'], {'dtype': 'float'}), '(data[skip_header_lines:], dtype=float)\n', (943, 982), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 15:20:25 2019
@author: karad
"""
import numpy as np
import matplotlib.pyplot as plt
# depending on the number of files change the below parameters
#-----
it=10 # number of iterations
nrows=2 # number of rows
ncols=4 # number of columns (e.g. 4 processes -- 2row by 2column)
#-----
a = []
for i in range(0, it):
for r in range(nrows):
for c in range(ncols):
globals()['data_%i_%d_%d' % (i,r,c)] = np.loadtxt('output_iteration_%d_processrow_%d_processcolumn_%d.txt'%(i,r,c),bool)
a.append(np.loadtxt('output_iteration_%d_processrow_%d_processcolumn_%d.txt'%(i,r,c),bool))
d=[]
c=[]
b = []
for iteration in range(it):
for row in range(nrows):
for col in range(ncols):
b.append(a[iteration*nrows*ncols + row*ncols +col])
c.append(b)
b=[]
d.append(c)
c=[]
#row1 = np.concatenate(([a[0], a[1]]),axis=1) #column wise
f = []
e = []
for t in range(it):
for col in range(nrows):
globals()['row_%d_'%col] = np.concatenate(d[t][col],axis=1) # column wise merge
e.append(np.concatenate(d[t][col],axis=1)) #column wise merge
f.append(e)
e = []
for t in range(it):
matrix = np.concatenate((f[t]),axis=0) #row wise merge
plt.imshow(matrix)
plt.savefig('iteration_%d.jpg'%t)
np.savetxt('iteration_%d.txt'%t,matrix,fmt='%d') | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"numpy.savetxt",
"numpy.concatenate",
"numpy.loadtxt"
] | [((1248, 1276), 'numpy.concatenate', 'np.concatenate', (['f[t]'], {'axis': '(0)'}), '(f[t], axis=0)\n', (1262, 1276), True, 'import numpy as np\n'), ((1298, 1316), 'matplotlib.pyplot.imshow', 'plt.imshow', (['matrix'], {}), '(matrix)\n', (1308, 1316), True, 'import matplotlib.pyplot as plt\n'), ((1321, 1356), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('iteration_%d.jpg' % t)"], {}), "('iteration_%d.jpg' % t)\n", (1332, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1411), 'numpy.savetxt', 'np.savetxt', (["('iteration_%d.txt' % t)", 'matrix'], {'fmt': '"""%d"""'}), "('iteration_%d.txt' % t, matrix, fmt='%d')\n", (1369, 1411), True, 'import numpy as np\n'), ((1063, 1096), 'numpy.concatenate', 'np.concatenate', (['d[t][col]'], {'axis': '(1)'}), '(d[t][col], axis=1)\n', (1077, 1096), True, 'import numpy as np\n'), ((476, 566), 'numpy.loadtxt', 'np.loadtxt', (["('output_iteration_%d_processrow_%d_processcolumn_%d.txt' % (i, r, c))", 'bool'], {}), "('output_iteration_%d_processrow_%d_processcolumn_%d.txt' % (i, r,\n c), bool)\n", (486, 566), True, 'import numpy as np\n'), ((1134, 1167), 'numpy.concatenate', 'np.concatenate', (['d[t][col]'], {'axis': '(1)'}), '(d[t][col], axis=1)\n', (1148, 1167), True, 'import numpy as np\n'), ((580, 670), 'numpy.loadtxt', 'np.loadtxt', (["('output_iteration_%d_processrow_%d_processcolumn_%d.txt' % (i, r, c))", 'bool'], {}), "('output_iteration_%d_processrow_%d_processcolumn_%d.txt' % (i, r,\n c), bool)\n", (590, 670), True, 'import numpy as np\n')] |
import numpy as np
from numpy import inf
import time
import random
def GS_static(graph, eps, alpha, seed, method ) :
# initial distribution
N = graph.A.shape[0]
y = np.zeros([N,1]); y[seed,0] = 1;
# extract operator and coefficients
if method == 'PageRank':
rho = (1-alpha)
psi = alpha;
OP = graph.P.T
gt = graph.pr.T
deg = np.count_nonzero(OP, axis = 1)
elif method == 'GammaPageRank':
mu = (1-alpha)/alpha
psi = -10/(2*mu + 10)
rho = (2*mu)/(2*mu + 10)
OP = graph.Op_shift
gt = graph.gpr
deg = np.count_nonzero(OP, axis = 1)
# compute initial distribution (assuming p(0) = 0)
p = np.zeros([N,1]);
r = rho*y
it = 0;
flops = 0
#while np.linalg.norm(r, inf) > eps :
while (np.linalg.norm(p - gt, ord=2)/np.linalg.norm(gt,ord=2)) > eps :
it += 1;
ix_u = np.argmax(abs(r))
r_u = float(r[ix_u]);
p[ix_u] += r_u
r[ix_u] = 0;
r = r + np.expand_dims(psi*r_u*OP[:,ix_u],1)
# count the flops
flops_scaling = np.count_nonzero(OP[:,ix_u])
flops = flops + 2*flops_scaling + np.int(deg[ix_u])
print('---- Gauss Soutwell ----')
print('iter =', it)
print('flops =', flops)
print('err =', np.linalg.norm(p - gt,ord=2)/np.linalg.norm(gt,ord=2) )
return p, r, it, flops
def PI_static(graph, eps, alpha, seed, method) :
# initial distribution
N = graph.A.shape[0]
y = np.zeros([N,1]); y[seed,0] = 1;
# extract operator and coefficients
if method == 'PageRank':
rho = (1-alpha)
psi = alpha;
OP = graph.P.T
gt = graph.pr.T
elif method == 'GammaPageRank':
mu = (1-alpha)/alpha
psi = -10/(2*mu + 10)
rho = (2*mu)/(2*mu + 10)
OP = graph.Op_shift
gt = graph.gpr
# compute initial distribution
p = np.zeros([N,1]);
it = 0;
flops = 0
#for it in range(K):
while (np.linalg.norm(p - gt, 2)/np.linalg.norm(gt,ord=2)) > eps :
it += 1
# count flops
nnz = np.where(p > 0)[0]
flops_dotprod = np.count_nonzero(OP[:,nnz])
flops_scaling = np.count_nonzero(p)
flops_addition = np.count_nonzero(OP.dot(p))
flops = flops + flops_dotprod + flops_scaling + flops_addition
# next iteration in approx
p = rho*y + psi*OP.dot(p)
print('---- Power Iteration ----')
print('iter =', it)
print('flops =', flops)
print('err =', np.linalg.norm(p - gt, 2)/np.linalg.norm(gt,ord=2))
return p, it, flops
def CP_static(graph, eps, alpha, seed, method, mode) :
# initial distribution
N = graph.A.shape[0];
y = np.zeros([N,1]); y[seed,0] = 1
flops = 0
# Coefficient parameters
mu = (1-alpha)/alpha
theta = np.linspace(0,np.pi,50000+1)
step = theta[1] - theta[0]
# extract operator and coefficients
if method == 'PageRank':
Lap = (graph.D - graph.A).dot(graph.Dinv)
OP = Lap - np.eye(N)
gt = graph.pr.T
filt_arg = (np.cos(theta) + 1)
filt = np.divide(mu, mu + filt_arg)
elif method == 'GammaPageRank':
OP = graph.Op_shift
gt = graph.gpr
filt_arg = (10/2)*(np.cos(theta) + 1)
filt = np.divide(mu, mu + filt_arg)
# coefficients
tmp1 = np.multiply(np.cos(0*theta),filt*step); tmp1[0]=tmp1[0]/2; tmp1[-1]=tmp1[-1]/2;
tmp2 = np.multiply(np.cos(1*theta),filt*step); tmp2[0]=tmp2[0]/2; tmp2[-1]=tmp2[-1]/2;
coef1 = (2/np.pi)*np.sum(tmp1)
coef2 = (2/np.pi)*np.sum(tmp2)
# Polynomial elements
polyTerm_2back = np.array(y)
polyTerm_1back = np.array(OP).dot(y)
nnz = np.where(y != 0)[0]
flops = flops + np.count_nonzero(OP[:,nnz])
# Chebyshev approximation
Cheby_approximation_prev = 0.5*coef1*polyTerm_2back + coef2*polyTerm_1back;
Cheby_approximation_curr = np.array(Cheby_approximation_prev)
flops = flops + 2*np.count_nonzero(polyTerm_1back)
#for it in range(2, hops-1):
it = 2;
activeNodes = np.where(graph.clust_memb > 0)[0]
if mode == 'FixedError':
while (np.linalg.norm(Cheby_approximation_curr[activeNodes] - gt[activeNodes], ord=2)/np.linalg.norm(gt[activeNodes],ord=2)) > eps :
# Chebyshev coefficient
tmp = np.array(np.multiply(np.cos(it*theta),filt*step)); tmp[0]=tmp[0]/2; tmp[-1]=tmp[-1]/2;
coef_curr = (2/np.pi)*np.sum(tmp);
# Current polynomial term
polyTerm_curr = 2*(OP).dot(polyTerm_1back) - polyTerm_2back;
nnz = np.where(polyTerm_1back != 0)[0]
flops = flops + np.count_nonzero(OP[:,nnz]) + np.count_nonzero(OP.dot(polyTerm_1back)) + np.count_nonzero(polyTerm_1back) + np.count_nonzero(polyTerm_2back)
# Chebyshev approximation
Cheby_approximation_curr = np.array(Cheby_approximation_prev + coef_curr*polyTerm_curr);
flops = flops + 2*np.count_nonzero(polyTerm_curr)
# Update
polyTerm_2back = np.array(polyTerm_1back);
polyTerm_1back = np.array(polyTerm_curr);
Cheby_approximation_prev = np.array(Cheby_approximation_curr);
it += 1
elif mode == 'FixedFlops':
while flops < eps:
# Chebyshev coefficient
tmp = np.array(np.multiply(np.cos(it*theta),filt*step)); tmp[0]=tmp[0]/2; tmp[-1]=tmp[-1]/2;
coef_curr = (2/np.pi)*np.sum(tmp);
# Current polynomial term
polyTerm_curr = 2*(OP).dot(polyTerm_1back) - polyTerm_2back;
nnz = np.where(polyTerm_1back != 0)[0]
flops = flops + np.count_nonzero(OP[:,nnz]) + np.count_nonzero(OP.dot(polyTerm_1back)) + np.count_nonzero(polyTerm_1back) + np.count_nonzero(polyTerm_2back)
# Chebyshev approximation
Cheby_approximation_curr = np.array(Cheby_approximation_prev + coef_curr*polyTerm_curr);
flops = flops + 2*np.count_nonzero(polyTerm_curr)
# Update
polyTerm_2back = np.array(polyTerm_1back);
polyTerm_1back = np.array(polyTerm_curr);
Cheby_approximation_prev = np.array(Cheby_approximation_curr);
it += 1
p = Cheby_approximation_curr
err = np.linalg.norm(p[activeNodes] - gt[activeNodes],ord=2)/np.linalg.norm(gt[activeNodes],ord=2)
print('---- Chebyshev ----')
print('iter =', it)
print('flops =', flops)
print('err =', err )
return p, it, flops, err
| [
"numpy.eye",
"numpy.where",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.sum",
"numpy.cos",
"numpy.expand_dims",
"numpy.linalg.norm",
"numpy.int",
"numpy.divide"
] | [((171, 187), 'numpy.zeros', 'np.zeros', (['[N, 1]'], {}), '([N, 1])\n', (179, 187), True, 'import numpy as np\n'), ((619, 635), 'numpy.zeros', 'np.zeros', (['[N, 1]'], {}), '([N, 1])\n', (627, 635), True, 'import numpy as np\n'), ((1329, 1345), 'numpy.zeros', 'np.zeros', (['[N, 1]'], {}), '([N, 1])\n', (1337, 1345), True, 'import numpy as np\n'), ((1680, 1696), 'numpy.zeros', 'np.zeros', (['[N, 1]'], {}), '([N, 1])\n', (1688, 1696), True, 'import numpy as np\n'), ((2403, 2419), 'numpy.zeros', 'np.zeros', (['[N, 1]'], {}), '([N, 1])\n', (2411, 2419), True, 'import numpy as np\n'), ((2503, 2535), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(50000 + 1)'], {}), '(0, np.pi, 50000 + 1)\n', (2514, 2535), True, 'import numpy as np\n'), ((3233, 3244), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3241, 3244), True, 'import numpy as np\n'), ((3497, 3531), 'numpy.array', 'np.array', (['Cheby_approximation_prev'], {}), '(Cheby_approximation_prev)\n', (3505, 3531), True, 'import numpy as np\n'), ((343, 371), 'numpy.count_nonzero', 'np.count_nonzero', (['OP'], {'axis': '(1)'}), '(OP, axis=1)\n', (359, 371), True, 'import numpy as np\n'), ((965, 994), 'numpy.count_nonzero', 'np.count_nonzero', (['OP[:, ix_u]'], {}), '(OP[:, ix_u])\n', (981, 994), True, 'import numpy as np\n'), ((1884, 1912), 'numpy.count_nonzero', 'np.count_nonzero', (['OP[:, nnz]'], {}), '(OP[:, nnz])\n', (1900, 1912), True, 'import numpy as np\n'), ((1931, 1950), 'numpy.count_nonzero', 'np.count_nonzero', (['p'], {}), '(p)\n', (1947, 1950), True, 'import numpy as np\n'), ((2752, 2780), 'numpy.divide', 'np.divide', (['mu', '(mu + filt_arg)'], {}), '(mu, mu + filt_arg)\n', (2761, 2780), True, 'import numpy as np\n'), ((2971, 2988), 'numpy.cos', 'np.cos', (['(0 * theta)'], {}), '(0 * theta)\n', (2977, 2988), True, 'import numpy as np\n'), ((3059, 3076), 'numpy.cos', 'np.cos', (['(1 * theta)'], {}), '(1 * theta)\n', (3065, 3076), True, 'import numpy as np\n'), ((3146, 3158), 'numpy.sum', 'np.sum', (['tmp1'], {}), '(tmp1)\n', (3152, 3158), True, 'import numpy as np\n'), ((3178, 3190), 'numpy.sum', 'np.sum', (['tmp2'], {}), '(tmp2)\n', (3184, 3190), True, 'import numpy as np\n'), ((3291, 3307), 'numpy.where', 'np.where', (['(y != 0)'], {}), '(y != 0)\n', (3299, 3307), True, 'import numpy as np\n'), ((3328, 3356), 'numpy.count_nonzero', 'np.count_nonzero', (['OP[:, nnz]'], {}), '(OP[:, nnz])\n', (3344, 3356), True, 'import numpy as np\n'), ((3639, 3669), 'numpy.where', 'np.where', (['(graph.clust_memb > 0)'], {}), '(graph.clust_memb > 0)\n', (3647, 3669), True, 'import numpy as np\n'), ((5554, 5609), 'numpy.linalg.norm', 'np.linalg.norm', (['(p[activeNodes] - gt[activeNodes])'], {'ord': '(2)'}), '(p[activeNodes] - gt[activeNodes], ord=2)\n', (5568, 5609), True, 'import numpy as np\n'), ((5609, 5647), 'numpy.linalg.norm', 'np.linalg.norm', (['gt[activeNodes]'], {'ord': '(2)'}), '(gt[activeNodes], ord=2)\n', (5623, 5647), True, 'import numpy as np\n'), ((529, 557), 'numpy.count_nonzero', 'np.count_nonzero', (['OP'], {'axis': '(1)'}), '(OP, axis=1)\n', (545, 557), True, 'import numpy as np\n'), ((716, 745), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - gt)'], {'ord': '(2)'}), '(p - gt, ord=2)\n', (730, 745), True, 'import numpy as np\n'), ((746, 771), 'numpy.linalg.norm', 'np.linalg.norm', (['gt'], {'ord': '(2)'}), '(gt, ord=2)\n', (760, 771), True, 'import numpy as np\n'), ((888, 930), 'numpy.expand_dims', 'np.expand_dims', (['(psi * r_u * OP[:, ix_u])', '(1)'], {}), '(psi * r_u * OP[:, ix_u], 1)\n', (902, 930), True, 'import numpy as np\n'), ((1030, 1047), 'numpy.int', 'np.int', (['deg[ix_u]'], {}), '(deg[ix_u])\n', (1036, 1047), True, 'import numpy as np\n'), ((1147, 1176), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - gt)'], {'ord': '(2)'}), '(p - gt, ord=2)\n', (1161, 1176), True, 'import numpy as np\n'), ((1176, 1201), 'numpy.linalg.norm', 'np.linalg.norm', (['gt'], {'ord': '(2)'}), '(gt, ord=2)\n', (1190, 1201), True, 'import numpy as np\n'), ((1749, 1774), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - gt)', '(2)'], {}), '(p - gt, 2)\n', (1763, 1774), True, 'import numpy as np\n'), ((1775, 1800), 'numpy.linalg.norm', 'np.linalg.norm', (['gt'], {'ord': '(2)'}), '(gt, ord=2)\n', (1789, 1800), True, 'import numpy as np\n'), ((1847, 1862), 'numpy.where', 'np.where', (['(p > 0)'], {}), '(p > 0)\n', (1855, 1862), True, 'import numpy as np\n'), ((2221, 2246), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - gt)', '(2)'], {}), '(p - gt, 2)\n', (2235, 2246), True, 'import numpy as np\n'), ((2247, 2272), 'numpy.linalg.norm', 'np.linalg.norm', (['gt'], {'ord': '(2)'}), '(gt, ord=2)\n', (2261, 2272), True, 'import numpy as np\n'), ((2682, 2691), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (2688, 2691), True, 'import numpy as np\n'), ((2724, 2737), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2730, 2737), True, 'import numpy as np\n'), ((2904, 2932), 'numpy.divide', 'np.divide', (['mu', '(mu + filt_arg)'], {}), '(mu, mu + filt_arg)\n', (2913, 2932), True, 'import numpy as np\n'), ((3263, 3275), 'numpy.array', 'np.array', (['OP'], {}), '(OP)\n', (3271, 3275), True, 'import numpy as np\n'), ((3551, 3583), 'numpy.count_nonzero', 'np.count_nonzero', (['polyTerm_1back'], {}), '(polyTerm_1back)\n', (3567, 3583), True, 'import numpy as np\n'), ((4355, 4417), 'numpy.array', 'np.array', (['(Cheby_approximation_prev + coef_curr * polyTerm_curr)'], {}), '(Cheby_approximation_prev + coef_curr * polyTerm_curr)\n', (4363, 4417), True, 'import numpy as np\n'), ((4504, 4528), 'numpy.array', 'np.array', (['polyTerm_1back'], {}), '(polyTerm_1back)\n', (4512, 4528), True, 'import numpy as np\n'), ((4550, 4573), 'numpy.array', 'np.array', (['polyTerm_curr'], {}), '(polyTerm_curr)\n', (4558, 4573), True, 'import numpy as np\n'), ((4605, 4639), 'numpy.array', 'np.array', (['Cheby_approximation_curr'], {}), '(Cheby_approximation_curr)\n', (4613, 4639), True, 'import numpy as np\n'), ((3710, 3788), 'numpy.linalg.norm', 'np.linalg.norm', (['(Cheby_approximation_curr[activeNodes] - gt[activeNodes])'], {'ord': '(2)'}), '(Cheby_approximation_curr[activeNodes] - gt[activeNodes], ord=2)\n', (3724, 3788), True, 'import numpy as np\n'), ((3789, 3827), 'numpy.linalg.norm', 'np.linalg.norm', (['gt[activeNodes]'], {'ord': '(2)'}), '(gt[activeNodes], ord=2)\n', (3803, 3827), True, 'import numpy as np\n'), ((3985, 3996), 'numpy.sum', 'np.sum', (['tmp'], {}), '(tmp)\n', (3991, 3996), True, 'import numpy as np\n'), ((4101, 4130), 'numpy.where', 'np.where', (['(polyTerm_1back != 0)'], {}), '(polyTerm_1back != 0)\n', (4109, 4130), True, 'import numpy as np\n'), ((4262, 4294), 'numpy.count_nonzero', 'np.count_nonzero', (['polyTerm_2back'], {}), '(polyTerm_2back)\n', (4278, 4294), True, 'import numpy as np\n'), ((5219, 5281), 'numpy.array', 'np.array', (['(Cheby_approximation_prev + coef_curr * polyTerm_curr)'], {}), '(Cheby_approximation_prev + coef_curr * polyTerm_curr)\n', (5227, 5281), True, 'import numpy as np\n'), ((5368, 5392), 'numpy.array', 'np.array', (['polyTerm_1back'], {}), '(polyTerm_1back)\n', (5376, 5392), True, 'import numpy as np\n'), ((5414, 5437), 'numpy.array', 'np.array', (['polyTerm_curr'], {}), '(polyTerm_curr)\n', (5422, 5437), True, 'import numpy as np\n'), ((5469, 5503), 'numpy.array', 'np.array', (['Cheby_approximation_curr'], {}), '(Cheby_approximation_curr)\n', (5477, 5503), True, 'import numpy as np\n'), ((2876, 2889), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2882, 2889), True, 'import numpy as np\n'), ((3894, 3912), 'numpy.cos', 'np.cos', (['(it * theta)'], {}), '(it * theta)\n', (3900, 3912), True, 'import numpy as np\n'), ((4227, 4259), 'numpy.count_nonzero', 'np.count_nonzero', (['polyTerm_1back'], {}), '(polyTerm_1back)\n', (4243, 4259), True, 'import numpy as np\n'), ((4438, 4469), 'numpy.count_nonzero', 'np.count_nonzero', (['polyTerm_curr'], {}), '(polyTerm_curr)\n', (4454, 4469), True, 'import numpy as np\n'), ((4849, 4860), 'numpy.sum', 'np.sum', (['tmp'], {}), '(tmp)\n', (4855, 4860), True, 'import numpy as np\n'), ((4965, 4994), 'numpy.where', 'np.where', (['(polyTerm_1back != 0)'], {}), '(polyTerm_1back != 0)\n', (4973, 4994), True, 'import numpy as np\n'), ((5126, 5158), 'numpy.count_nonzero', 'np.count_nonzero', (['polyTerm_2back'], {}), '(polyTerm_2back)\n', (5142, 5158), True, 'import numpy as np\n'), ((4758, 4776), 'numpy.cos', 'np.cos', (['(it * theta)'], {}), '(it * theta)\n', (4764, 4776), True, 'import numpy as np\n'), ((5091, 5123), 'numpy.count_nonzero', 'np.count_nonzero', (['polyTerm_1back'], {}), '(polyTerm_1back)\n', (5107, 5123), True, 'import numpy as np\n'), ((5302, 5333), 'numpy.count_nonzero', 'np.count_nonzero', (['polyTerm_curr'], {}), '(polyTerm_curr)\n', (5318, 5333), True, 'import numpy as np\n'), ((4153, 4181), 'numpy.count_nonzero', 'np.count_nonzero', (['OP[:, nnz]'], {}), '(OP[:, nnz])\n', (4169, 4181), True, 'import numpy as np\n'), ((5017, 5045), 'numpy.count_nonzero', 'np.count_nonzero', (['OP[:, nnz]'], {}), '(OP[:, nnz])\n', (5033, 5045), True, 'import numpy as np\n')] |
import os,sys
from torchvision import transforms
import torch, torch.utils
import numpy as np
from torch.utils.data import Dataset
import random
import PIL
import more_itertools as mit
from torch.utils.data.sampler import BatchSampler
class BalancedBatchSampler(BatchSampler):
#[set_id, in_path , x]
def __init__(self, dataset, max_batch_size):
self.dataset = dataset
self.max_batch_size = max_batch_size
self.frame_dataset = {}
for idx, item in enumerate(self.dataset):
l = len(item[2])
if l not in self.frame_dataset:
self.frame_dataset[l] = []
self.frame_dataset[l].append(idx)
self.batch_dataset = []
for key in self.frame_dataset.keys():
self.batch_dataset.extend([self.frame_dataset[key][i:i + self.max_batch_size] for i in range(0, len(self.frame_dataset[key]),self.max_batch_size)])
print('Number Batch ' , len(self.batch_dataset))
def __iter__(self):
for batch in self.batch_dataset:
yield batch
def __len__(self):
return len(self.batch_dataset)
np.random.seed(42)
def cut_data(data, out_length):
if out_length is not None:
if data.shape[0] > out_length:
max_offset = data.shape[0] - out_length
offset = np.random.randint(max_offset)
data = data[offset:(out_length+offset),:]
else:
offset = out_length - data.shape[0]
data = np.pad(data, ((0,offset),(0,0)), "constant")
if data.shape[0] < 200:
offset = 200 - data.shape[0]
data = np.pad(data, ((0,offset),(0,0)), "constant")
return data
def cut_data_front(data, out_length):
if out_length is not None:
if data.shape[0] > out_length:
max_offset = data.shape[0] - out_length
offset = 0
data = data[offset:(out_length+offset),:]
else:
offset = out_length - data.shape[0]
data = np.pad(data, ((0,offset),(0,0)), "constant")
if data.shape[0] < 200:
offset = 200 - data.shape[0]
data = np.pad(data, ((0,offset),(0,0)), "constant")
return data
def shorter(feature, mean_size=2):
length, height = feature.shape
new_f = np.zeros((int(length/mean_size),height),dtype=np.float64)
for i in range(int(length/mean_size)):
new_f[i,:] = feature[i*mean_size:(i+1)*mean_size,:].mean(axis=0)
return new_f
class CQT(Dataset):
def __init__(self, filepath , out_length=None):
self.indir = filepath
self.file_list = list(os.listdir(filepath))
self.out_length = out_length
def __getitem__(self, index):
transform_train = transforms.Compose([
lambda x : x.T,
lambda x : change_speed(x, 0.7, 1.3),
# lambda x : x-np.mean(x),
lambda x : x.astype(np.float32) / (np.max(np.abs(x))+ 1e-6),
lambda x : cut_data(x, self.out_length),
lambda x : torch.Tensor(x),
lambda x : x.permute(1,0).unsqueeze(0),
])
filename = self.file_list[index].strip()
set_id, version_id = filename.split('.')[0].split('-')
in_path = os.path.join(self.indir, filename)
data = np.load(in_path) # from 12xN to Nx12
data = transform_train(data)
return data, int(set_id)
def __len__(self):
return len(self.file_list)
class CQTSiamese(Dataset):
def __init__(self, filepath , out_length=None, song_factor=2):
self.transform = transforms.Compose([
lambda x : x.T,
lambda x : change_speed(x, 0.9, 1.1),
# lambda x : x-np.mean(x),
lambda x : x.astype(np.float32) / (np.max(np.abs(x))+ 1e-6),
lambda x : cut_data(x, self.out_length),
lambda x : torch.Tensor(x),
lambda x : x.permute(1,0).unsqueeze(0),
])
self.indir = filepath
self.file_list = list(os.listdir(filepath))
self.out_length = out_length
self.hums = []
self.songs = {}
self.labels = []
for i in range(len(self.file_list)):
fileName = self.file_list[i]
id, t = fileName.split('-')[0].split('_')
self.labels.append(id)
if t == 'hum':
self.hums.append([id, i, fileName])
elif t == 'song':
if id not in self.songs:
self.songs[id] = []
self.songs[id].append([i, fileName])
self.labels_set = set(self.labels)
self.cqtFeature = []
for i in range(len(self.file_list)):
in_path = os.path.join(self.indir, self.file_list[i])
data = np.load(in_path)
data = self.transform(data)
self.cqtFeature.append(data)
self.posPair = []
self.negPair = []
for hum in self.hums:
id, humIdx, _ = hum
for song in self.songs[id]:
self.posPair.append([humIdx, song[0]])
negSongId = np.random.choice(list(self.labels_set - set([id])))
for song in self.songs[negSongId]:
self.negPair.append([humIdx, song[0]])
self.pairData = []
for p in self.posPair:
self.pairData.append([p, 1])
for p in self.negPair:
self.pairData.append([p, 0])
random.seed(42)
random.shuffle(self.pairData)
def __getitem__(self, index):
pair, target = self.pairData[index]
humIdx, songIdx = pair
data1 = self.cqtFeature[humIdx]
data2 = self.cqtFeature[songIdx]
return (data1, data2), target
def __len__(self):
return len(self.pairData)
class CQTVal(Dataset):
def __init__(self, filepath , out_length=None):
self.indir = filepath
self.file_list = list(os.listdir(filepath))
self.out_length = out_length
def __getitem__(self, index):
transform_test = transforms.Compose([
lambda x : x.T,
# lambda x : x-np.mean(x),
lambda x : x.astype(np.float32) / (np.max(np.abs(x))+ 1e-6),
lambda x : cut_data_front(x, self.out_length),
lambda x : torch.Tensor(x),
lambda x : x.permute(1,0).unsqueeze(0),
])
filename = self.file_list[index].strip()
set_id, version_id = filename.split('.')[0].split('-')
in_path = os.path.join(self.indir, filename)
data = np.load(in_path) # from 12xN to Nx12
data = transform_test(data)
return data, [set_id, version_id]
def __len__(self):
return len(self.file_list)
# hum_len = len(hum_feat)
# hum_pad = 0.1*hum_len
# for track_id in vocals_features.keys():
# vocal_feat = vocals_features[track_id][0]
# for search_len in [hum_len - hum_pad, hum_len, hum_len + hum_pad ]:
# windows = list(mit.windowed(vocal_feat, n=search_len, step=hum_pad))
# windows = [list(filter(None, w)) for w in windows]
class CQTVocal(Dataset):
def __init__(self, filepath , hum_length, file_list):
self.indir = filepath
self.file_list = file_list
self.hum_length = hum_length
self.hum_pad = int(0.05 * hum_length)
self.stride = int(0.1 * hum_length)
self.dataset = []
for filename in self.file_list:
set_id, _ = filename.split('.')[0].split('-')
in_path = os.path.join(self.indir, filename)
vocal_feat = np.load(in_path)
vocal_indxs = list(range(vocal_feat.shape[1]))
frame_idx = []
for search_len in [self.hum_length + self.hum_pad*x for x in list(range(-3, 4))]:
windows = list(mit.windowed(vocal_indxs, n=search_len, step=self.stride))
windows = [ [x for x in list(w) if x is not None] for w in windows]
frame_idx.extend(windows)
self.dataset.extend([[set_id, in_path , x] for x in frame_idx])
def __getitem__(self, index):
transform_test = transforms.Compose([
lambda x : x.T,
# lambda x : x-np.mean(x),
lambda x : x.astype(np.float32) / (np.max(np.abs(x))+ 1e-6),
lambda x : cut_data_front(x, None),
lambda x : torch.Tensor(x),
lambda x : x.permute(1,0).unsqueeze(0),
])
set_id, in_path, frame_idx = self.dataset[index]
data = np.load(in_path) # from 12xN to Nx12
data = data.T[frame_idx].T
data = transform_test(data)
return data, [set_id, 0]
def __len__(self):
return len(self.dataset)
class CQTHum(Dataset):
def __init__(self, filepath , out_length=None):
self.indir = filepath
self.file_list = list(os.listdir(filepath))
self.out_length = out_length
def __getitem__(self, index):
transform_test = transforms.Compose([
lambda x : x.T,
# lambda x : x-np.mean(x),
lambda x : x.astype(np.float32) / (np.max(np.abs(x))+ 1e-6),
lambda x : cut_data_front(x, self.out_length),
lambda x : torch.Tensor(x),
lambda x : x.permute(1,0).unsqueeze(0),
])
filename = self.file_list[index].strip()
hum_id = filename.split('.')[0]
in_path = os.path.join(self.indir, filename)
data = np.load(in_path) # from 12xN to Nx12
data = transform_test(data)
return data, hum_id
def __len__(self):
return len(self.file_list)
def change_speed(data, l=0.7, r=1.5): # change data.shape[0]
new_len = int(data.shape[0]*np.random.uniform(l,r))
maxx = np.max(data)+1
data0 = PIL.Image.fromarray((data*255.0/maxx).astype(np.uint8))
transform = transforms.Compose([
transforms.Resize(size=(new_len,data.shape[1])),
])
new_data = transform(data0)
return np.array(new_data)/255.0*maxx
if __name__=='__main__':
train_dataset = HPCP('train', 394)
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=128, num_workers=12, shuffle=True)
| [
"numpy.abs",
"os.listdir",
"more_itertools.windowed",
"random.shuffle",
"os.path.join",
"torch.Tensor",
"random.seed",
"numpy.max",
"numpy.array",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"numpy.p... | [((1135, 1153), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1149, 1153), True, 'import numpy as np\n'), ((10099, 10191), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': '(128)', 'num_workers': '(12)', 'shuffle': '(True)'}), '(train_dataset, batch_size=128, num_workers=12,\n shuffle=True)\n', (10126, 10191), False, 'import torch, torch.utils\n'), ((1619, 1666), 'numpy.pad', 'np.pad', (['data', '((0, offset), (0, 0))', '"""constant"""'], {}), "(data, ((0, offset), (0, 0)), 'constant')\n", (1625, 1666), True, 'import numpy as np\n'), ((2123, 2170), 'numpy.pad', 'np.pad', (['data', '((0, offset), (0, 0))', '"""constant"""'], {}), "(data, ((0, offset), (0, 0)), 'constant')\n", (2129, 2170), True, 'import numpy as np\n'), ((3216, 3250), 'os.path.join', 'os.path.join', (['self.indir', 'filename'], {}), '(self.indir, filename)\n', (3228, 3250), False, 'import os, sys\n'), ((3266, 3282), 'numpy.load', 'np.load', (['in_path'], {}), '(in_path)\n', (3273, 3282), True, 'import numpy as np\n'), ((5410, 5425), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (5421, 5425), False, 'import random\n'), ((5434, 5463), 'random.shuffle', 'random.shuffle', (['self.pairData'], {}), '(self.pairData)\n', (5448, 5463), False, 'import random\n'), ((6487, 6521), 'os.path.join', 'os.path.join', (['self.indir', 'filename'], {}), '(self.indir, filename)\n', (6499, 6521), False, 'import os, sys\n'), ((6537, 6553), 'numpy.load', 'np.load', (['in_path'], {}), '(in_path)\n', (6544, 6553), True, 'import numpy as np\n'), ((8526, 8542), 'numpy.load', 'np.load', (['in_path'], {}), '(in_path)\n', (8533, 8542), True, 'import numpy as np\n'), ((9419, 9453), 'os.path.join', 'os.path.join', (['self.indir', 'filename'], {}), '(self.indir, filename)\n', (9431, 9453), False, 'import os, sys\n'), ((9469, 9485), 'numpy.load', 'np.load', (['in_path'], {}), '(in_path)\n', (9476, 9485), True, 'import numpy as np\n'), ((9758, 9770), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (9764, 9770), True, 'import numpy as np\n'), ((1329, 1358), 'numpy.random.randint', 'np.random.randint', (['max_offset'], {}), '(max_offset)\n', (1346, 1358), True, 'import numpy as np\n'), ((1494, 1541), 'numpy.pad', 'np.pad', (['data', '((0, offset), (0, 0))', '"""constant"""'], {}), "(data, ((0, offset), (0, 0)), 'constant')\n", (1500, 1541), True, 'import numpy as np\n'), ((1998, 2045), 'numpy.pad', 'np.pad', (['data', '((0, offset), (0, 0))', '"""constant"""'], {}), "(data, ((0, offset), (0, 0)), 'constant')\n", (2004, 2045), True, 'import numpy as np\n'), ((2591, 2611), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (2601, 2611), False, 'import os, sys\n'), ((3980, 4000), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (3990, 4000), False, 'import os, sys\n'), ((4670, 4713), 'os.path.join', 'os.path.join', (['self.indir', 'self.file_list[i]'], {}), '(self.indir, self.file_list[i])\n', (4682, 4713), False, 'import os, sys\n'), ((4733, 4749), 'numpy.load', 'np.load', (['in_path'], {}), '(in_path)\n', (4740, 4749), True, 'import numpy as np\n'), ((5907, 5927), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (5917, 5927), False, 'import os, sys\n'), ((7510, 7544), 'os.path.join', 'os.path.join', (['self.indir', 'filename'], {}), '(self.indir, filename)\n', (7522, 7544), False, 'import os, sys\n'), ((7570, 7586), 'numpy.load', 'np.load', (['in_path'], {}), '(in_path)\n', (7577, 7586), True, 'import numpy as np\n'), ((8862, 8882), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (8872, 8882), False, 'import os, sys\n'), ((9723, 9746), 'numpy.random.uniform', 'np.random.uniform', (['l', 'r'], {}), '(l, r)\n', (9740, 9746), True, 'import numpy as np\n'), ((9886, 9934), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(new_len, data.shape[1])'}), '(size=(new_len, data.shape[1]))\n', (9903, 9934), False, 'from torchvision import transforms\n'), ((9986, 10004), 'numpy.array', 'np.array', (['new_data'], {}), '(new_data)\n', (9994, 10004), True, 'import numpy as np\n'), ((2997, 3012), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (3009, 3012), False, 'import torch, torch.utils\n'), ((3840, 3855), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (3852, 3855), False, 'import torch, torch.utils\n'), ((6268, 6283), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (6280, 6283), False, 'import torch, torch.utils\n'), ((7799, 7856), 'more_itertools.windowed', 'mit.windowed', (['vocal_indxs'], {'n': 'search_len', 'step': 'self.stride'}), '(vocal_indxs, n=search_len, step=self.stride)\n', (7811, 7856), True, 'import more_itertools as mit\n'), ((8364, 8379), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (8376, 8379), False, 'import torch, torch.utils\n'), ((9223, 9238), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (9235, 9238), False, 'import torch, torch.utils\n'), ((2902, 2911), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2908, 2911), True, 'import numpy as np\n'), ((3745, 3754), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (3751, 3754), True, 'import numpy as np\n'), ((6167, 6176), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (6173, 6176), True, 'import numpy as np\n'), ((8274, 8283), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (8280, 8283), True, 'import numpy as np\n'), ((9122, 9131), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (9128, 9131), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.io as sio
import scipy.sparse as sp
import scipy.sparse.linalg as slinalg
import scipy.linalg as linalg
from scipy.sparse.linalg.eigen.arpack import eigsh
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import normalize
import sys
from os import path
import copy
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time
import random
import tensorflow as tf
# import matplotlib.pyplot as plt
def save_sparse_csr(filename, array):
np.savez(filename, data=array.data, indices=array.indices,
indptr=array.indptr, shape=array.shape)
def load_sparse_csr(filename):
loader = np.load(filename)
return sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def get_triplet(y_train, train_mask, max_triplets):
# print('y_train----',y_train.shape)
index_nonzero = y_train.nonzero()
# for i in range(y_train.shape[1]):
# label_count.append(index_nonzero[1][[index_nonzero[1]==i]].size)
label_count = np.sum(y_train, axis=0)
all_count = np.sum(label_count)
index_nonzero = np.transpose(np.concatenate((index_nonzero[0][np.newaxis,:], index_nonzero[1]\
[np.newaxis, :]),axis=0)).tolist()
index_nonzero = sorted(index_nonzero, key = lambda s: s[1])
#print(index_nonzero)
#print(label_count)
def get_one_triplet(input_index, index_nonzero, label_count, all_count, max_triplets):
triplet = []
if label_count[input_index[1]]==0:
return 0
else:
# print('max_triplets', max_triplets)
# print(all_count)
# print(label_count[input_index[1]])
n_triplets = min(max_triplets, int(all_count-label_count[input_index[1]]))
# print('----------')
for j in range(int(label_count[input_index[1]])-1):
positives = []
negatives = []
for k, (value, label) in enumerate(index_nonzero):
#find a postive sample, and if only one sample then choose itself
if label == input_index[1] and (value != input_index[0] or label_count[input_index[1]]==1):
positives.append(index_nonzero[k])
if label != input_index[1]:
negatives.append(index_nonzero[k])
# print('positives' ,positives)
# print('negatives', negatives)
negatives = random.sample(list(negatives), n_triplets)
for value, label in negatives:
triplet.append([input_index[0], positives[j][0], value])
return triplet
triplet = []
for i, j in enumerate(index_nonzero):
triple = get_one_triplet(j, index_nonzero, label_count, all_count,max_triplets)
if triple == 0:
continue
else:
triplet.extend(triple)
np_triple = np.concatenate(np.array([triplet]), axis = 1)
return np_triple
def load_data(dataset_str, train_size, validation_size, model_config, shuffle=True):
"""Load data."""
if dataset_str in ['USPS-Fea', 'CIFAR-Fea', 'Cifar_10000_fea', 'Cifar_R10000_fea', 'MNIST-Fea', 'MNIST-10000', 'MNIST-5000']:
data = sio.loadmat('data/{}.mat'.format(dataset_str))
l = data['labels'].flatten()
labels = np.zeros([l.shape[0],np.max(data['labels'])+1])
labels[np.arange(l.shape[0]), l.astype(np.int8)] = 1
features = data['X']
sample = features[0].copy()
adj = data['G']
else:
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
# features = sp.eye(features.shape[0]).tolil()
# features = sp.lil_matrix(allx)
labels = np.vstack((ally, ty))
# labels = np.vstack(ally)
if dataset_str.startswith('nell'):
# Find relation nodes, add them as zero-vecs into the right position
test_idx_range_full = range(allx.shape[0], len(graph))
isolated_node_idx = np.setdiff1d(test_idx_range_full, test_idx_reorder)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - allx.shape[0], :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - allx.shape[0], :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_all = np.setdiff1d(range(len(graph)), isolated_node_idx)
if not os.path.isfile("data/planetoid/{}.features.npz".format(dataset_str)):
print("Creating feature vectors for relations - this might take a while...")
features_extended = sp.hstack((features, sp.lil_matrix((features.shape[0], len(isolated_node_idx)))),
dtype=np.int32).todense()
features_extended[isolated_node_idx, features.shape[1]:] = np.eye(len(isolated_node_idx))
features = sp.csr_matrix(features_extended, dtype=np.float32)
print("Done!")
save_sparse_csr("data/planetoid/{}.features".format(dataset_str), features)
else:
features = load_sparse_csr("data/planetoid/{}.features.npz".format(dataset_str))
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
features[test_idx_reorder, :] = features[test_idx_range, :]
labels[test_idx_reorder, :] = labels[test_idx_range, :]
features = preprocess_features(features, feature_type=model_config['feature'])
global all_labels
all_labels = labels.copy()
# split the data set
idx = np.arange(len(labels))
no_class = labels.shape[1] # number of class
# validation_size = validation_size * len(idx) // 100
# if not hasattr(train_size, '__getitem__'):
train_size = [train_size for i in range(labels.shape[1])]
if shuffle:
np.random.shuffle(idx)
idx_train = []
count = [0 for i in range(no_class)]
label_each_class = train_size
next = 0
for i in idx:
if count == label_each_class:
break
next += 1
for j in range(no_class):
if labels[i, j] and count[j] < label_each_class[j]:
idx_train.append(i)
count[j] += 1
test_size = model_config['test_size']
if model_config['validate']:
if test_size:
assert next+validation_size<len(idx)
idx_val = idx[next:next+validation_size]
assert next+validation_size+test_size < len(idx)
idx_test = idx[-test_size:] if test_size else idx[next+validation_size:]
else:
if test_size:
assert next+test_size<len(idx)
idx_val = idx[-test_size:] if test_size else idx[next:]
idx_test = idx[-test_size:] if test_size else idx[next:]
# else:
# labels_of_class = [0]
# while (np.prod(labels_of_class) == 0):
# np.random.shuffle(idx)
# idx_train = idx[0:int(len(idx) * train_size // 100)]
# labels_of_class = np.sum(labels[idx_train], axis=0)
# idx_val = idx[-500 - validation_size:-500]
# idx_test = idx[-500:]
print('labels of each class : ', np.sum(labels[idx_train], axis=0))
# idx_val = idx[len(idx) * train_size // 100:len(idx) * (train_size // 2 + 50) // 100]
# idx_test = idx[len(idx) * (train_size // 2 + 50) // 100:len(idx)]
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
# else:
# idx_test = test_idx_range.tolist()
# idx_train = range(len(y))
# idx_val = range(len(y), len(y) + 500)
#
# train_mask = sample_mask(idx_train, labels.shape[0])
# val_mask = sample_mask(idx_val, labels.shape[0])
# test_mask = sample_mask(idx_test, labels.shape[0])
#
# y_train = np.zeros(labels.shape)
# y_val = np.zeros(labels.shape)
# y_test = np.zeros(labels.shape)
# y_train[train_mask, :] = labels[train_mask, :]
# y_val[val_mask, :] = labels[val_mask, :]
# y_test[test_mask, :] = labels[test_mask, :]
size_of_each_class = np.sum(labels[idx_train], axis=0)
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return tf.SparseTensorValue(coords, values, np.array(shape, dtype=np.int64))
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features, feature_type):
if feature_type == 'bow':
# """Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
# normalize(features, norm='l1', axis=1, copy=False)
elif feature_type == 'tfidf':
transformer = TfidfTransformer(norm=None, use_idf=True, smooth_idf=True, sublinear_tf=False)
features = transformer.fit_transform(features)
elif feature_type == 'none':
features = sp.csr_matrix(sp.eye(features.shape[0]))
else:
raise ValueError('Invalid feature type: ' + str(feature_type))
return features
def normalize_adj(adj, type='sym'):
"""Symmetrically normalize adjacency matrix."""
if type == 'sym':
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
# d_inv_sqrt = np.power(rowsum, -0.5)
# d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
# return adj*d_inv_sqrt*d_inv_sqrt.flatten()
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
elif type == 'rw':
rowsum = np.array(adj.sum(1))
d_inv = np.power(rowsum, -1.0).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
adj_normalized = d_mat_inv.dot(adj)
return adj_normalized
def preprocess_adj(adj, type='sym', loop=True):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
if loop:
adj = adj + sp.eye(adj.shape[0])
adj_normalized = normalize_adj(adj, type=type) #
return sparse_to_tuple(adj_normalized)
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
# largest_eigval, _ = eigsh(laplacian, 1, which='LM')
# scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k + 1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], laplacian))
return sparse_to_tuple(t_k)
def absorption_probability(W, alpha, stored_A=None, column=None):
try:
# raise Exception('DEBUG')
A = np.load(stored_A + str(alpha) + '.npz')['arr_0']
print('load A from ' + stored_A + str(alpha) + '.npz')
if column is not None:
P = np.zeros(W.shape)
P[:, column] = A[:, column]
return P
else:
return A
except:
# W=sp.csr_matrix([[0,1],[1,0]])
# alpha = 1
n = W.shape[0]
print('Calculate absorption probability...')
W = W.copy().astype(np.float32)
D = W.sum(1).flat
L = sp.diags(D, dtype=np.float32) - W
L += alpha * sp.eye(W.shape[0], dtype=L.dtype)
L = sp.csc_matrix(L)
# print(np.linalg.det(L))
if column is not None:
A = np.zeros(W.shape)
# start = time.time()
A[:, column] = slinalg.spsolve(L, sp.csc_matrix(np.eye(L.shape[0], dtype='float32')[:, column])).toarray()
# print(time.time()-start)
return A
else:
# start = time.time()
A = slinalg.inv(L).toarray()
# print(time.time()-start)
if stored_A:
np.savez(stored_A + str(alpha) + '.npz', A)
return A
# fletcher_reeves
# slinalg.solve(L, np.ones(L.shape[0]))
# A_ = np.zeros(W.shape)
# I = sp.eye(n)
# Di = sp.diags(np.divide(1,np.array(D)+alpha))
# for i in range(10):
# # A_=
# A_ = Di*(I+W.dot(A_))
# print(time.time()-start)
def fletcher_reeves(A, B):
# A=np.array(A)
X = np.zeros(B.shape)
r = np.array(B - A.dot(X))
rsold = (r * r).sum(0)
p = r
for i in range(10):
Ap = np.array(A.dot(p))
pAp = (p * Ap).sum(0)
alpha = rsold / pAp
X += alpha * p
r -= alpha * Ap
rsnew = (r * r).sum(0)
if True:
pass
p = r + rsnew / rsold * p
rsold = rsnew
return X
def cotraining(W, t, alpha, y_train, train_mask, stored_A=None):
A = absorption_probability(W, alpha, stored_A, train_mask)
y_train = y_train.copy()
train_index = np.where(train_mask)[0]
already_labeled = np.sum(y_train, axis=1)
# if not isinstance(features, np.ndarray):
# features = features.toarray()
print("Additional Label:")
if not hasattr(t, '__getitem__'):
t = [t for _ in range(y_train.shape[1])]
for i in range(y_train.shape[1]):
y = y_train[:, i:i + 1]
a = A.dot(y)
a[already_labeled > 0] = 0
# a[W.dot(y) > 0] = 0
gate = (-np.sort(-a, axis=0))[t[i]]
index = np.where(a.flat > gate)[0]
# x1 = features[index, :].reshape((-1, 1, features.shape[1]))
# x2 = features[y_train[:, i].astype(np.bool)].reshape((1, -1, features.shape[1]))
# D = np.sum((x1 - x2) ** 2, axis=2) ** 0.5
# D = np.mean(D, axis=1)
# gate = 100000000 if t[i] >= D.shape[0] else np.sort(D, axis=0)[t[i]]
# index = index[D<gate]
train_index = np.hstack([train_index, index])
y_train[index, i] = 1
correct_label_count(index, i)
print()
train_mask = sample_mask(train_index, y_train.shape[0])
return y_train, train_mask
def selftraining(prediction, t, y_train, train_mask):
new_gcn_index = np.argmax(prediction, axis=1)
confidence = np.max(prediction, axis=1)
sorted_index = np.argsort(-confidence)
no_class = y_train.shape[1] # number of class
if hasattr(t, '__getitem__'):
assert len(t) >= no_class
index = []
count = [0 for i in range(no_class)]
for i in sorted_index:
for j in range(no_class):
if new_gcn_index[i] == j and count[j] < t[j] and not train_mask[i]:
index.append(i)
count[j] += 1
else:
index = sorted_index[:t]
indicator = np.zeros(train_mask.shape, dtype=np.bool)
indicator[index] = True
indicator = np.logical_and(np.logical_not(train_mask), indicator)
prediction = np.zeros(prediction.shape)
prediction[np.arange(len(new_gcn_index)), new_gcn_index] = 1.0
prediction[train_mask] = y_train[train_mask]
correct_labels = np.sum(prediction[indicator] * all_labels[indicator], axis=0)
count = np.sum(prediction[indicator], axis=0)
print('Additiona Label:')
for i, j in zip(correct_labels, count):
print(int(i), '/', int(j), sep='', end='\t')
print()
y_train = np.copy(y_train)
train_mask = np.copy(train_mask)
train_mask[indicator] = 1
y_train[indicator] = prediction[indicator]
return y_train, train_mask
def lp(adj, alpha, y_train, train_mask, y_test, stored_A=None):
P = absorption_probability(adj, alpha, stored_A=stored_A, column=train_mask)
P = P[:, train_mask]
# nearest clssifier
predicted_labels = np.argmax(P, axis=1)
# prediction = alpha*P
prediction = np.zeros(P.shape)
prediction[np.arange(P.shape[0]), predicted_labels] = 1
y = np.sum(train_mask)
label_per_sample = np.vstack([np.zeros(y), np.eye(y)])[np.add.accumulate(train_mask) * train_mask]
sample2label = label_per_sample.T.dot(y_train)
prediction = prediction.dot(sample2label)
test_acc = np.sum(prediction * y_test) / np.sum(y_test)
test_acc_of_class = np.sum(prediction * y_test, axis=0) / np.sum(y_test, axis=0)
# print(test_acc, test_acc_of_class)
return test_acc, test_acc_of_class, prediction
def union_intersection(prediction, t, y_train, train_mask, W, alpha, stored_A, union_or_intersection):
no_class = y_train.shape[1] # number of class
# gcn index
new_labels_gcn = np.argmax(prediction, axis=1)
confidence = np.max(prediction, axis=1)
sorted_index = np.argsort(-confidence)
if not hasattr(t, '__getitem__'):
t = [t for i in range(no_class)]
assert len(t) >= no_class
count = [0 for i in range(no_class)]
index_gcn = [[] for i in range(no_class)]
for i in sorted_index:
j = new_labels_gcn[i]
if count[j] < t[j] and not train_mask[i]:
index_gcn[j].append(i)
count[j] += 1
# lp
A = absorption_probability(W, alpha, stored_A, train_mask)
train_index = np.where(train_mask)[0]
already_labeled = np.sum(y_train, axis=1)
index_lp = []
for i in range(no_class):
y = y_train[:, i:i + 1]
a = np.sum(A[:, y.flat > 0], axis=1)
a[already_labeled > 0] = 0
# a[W.dot(y) > 0] = 0
gate = (-np.sort(-a, axis=0))[t[i]]
index = np.where(a.flat > gate)[0]
index_lp.append(index)
# print(list(map(len, index_gcn)))
# print(list(map(len, index_lp)))
y_train = y_train.copy()
print("Additional Label:")
for i in range(no_class):
assert union_or_intersection in ['union', 'intersection']
if union_or_intersection == 'union':
index = list(set(index_gcn[i]) | set(index_lp[i]))
else:
index = list(set(index_gcn[i]) & set(index_lp[i]))
y_train[index, i] = 1
train_mask[index] = True
print(np.sum(all_labels[index, i]), '/', len(index), sep='', end='\t')
return y_train, train_mask
def ap_approximate(adj, features, alpha, k):
adj = normalize(adj + sp.eye(adj.shape[0]), 'l1', axis=1) / (alpha + 1)
# D = sp.diags(np.array(adj.sum(axis=1)).flatten())+alpha*sp.eye(adj.shape[0])
# D = D.power(-1)
# adj = D*adj
# features = D*alpha*features
if sp.issparse(features):
features = features.toarray()
new_feature = np.zeros(features.shape)
for _ in range(k):
new_feature = adj * new_feature + features
new_feature *= alpha / (alpha + 1)
return new_feature
all_labels = None
# dataset = None
def correct_label_count(indicator, i):
count = np.sum(all_labels[:, i][indicator])
if indicator.dtype == np.bool:
total = np.where(indicator)[0].shape[0]
elif indicator.dtype in [np.int, np.int8, np.int16, np.int32, np.int64]:
total = indicator.shape[0]
else:
raise TypeError('indicator must be of data type np.bool or np.int')
# print(" for class {}, {}/{} is correct".format(i, count, total))
print(count, '/', total, sep='', end='\t')
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def preprocess_model_config(model_config):
if model_config['Model'] not in [17, 23]:
model_config['connection'] = list(model_config['connection'])
# judge if parameters are legal
for c in model_config['connection']:
if c not in ['c', 'd', 'r', 'f', 'C']:
raise ValueError(
'connection string specified by --connection can only contain "c", "d", "r", "f", "C" but "{}" found'.format(
c))
for i in model_config['layer_size']:
if not isinstance(i, int):
raise ValueError('layer_size should be a list of int, but found {}'.format(model_config['layer_size']))
if i <= 0:
raise ValueError('layer_size must be greater than 0, but found {}' % i)
if not len(model_config['connection']) == len(model_config['layer_size']) + 1:
raise ValueError('length of connection string should be equal to length of layer_size list plus 1')
# Generate name
if not model_config['name']:
model_name = str(model_config['Model'])
if model_config['Model'] != 'lp':
model_name += '_' + model_config['connection'][0]
for char, size in \
zip(model_config['connection'][1:], model_config['layer_size']):
model_name += str(size) + char
if model_config['conv'] == 'cheby':
model_name += '_cheby' + str(model_config['max_degree'])
elif model_config['conv'] == 'taubin':
model_name += '_conv_taubin' + str(model_config['taubin_lambda']) \
+ '_' + str(model_config['taubin_mu']) \
+ '_' + str(model_config['taubin_repeat'])
elif model_config['conv'] == 'test21':
model_name += '_' + 'conv_test21' + '_' + str(model_config['alpha']) + '_' + str(model_config['beta'])
elif model_config['conv'] == 'gcn_unnorm':
model_name += '_' + 'gcn_unnorm'
elif model_config['conv'] == 'gcn_noloop':
model_name += '_' + 'gcn_noloop'
if model_config['validate']:
model_name += '_validate'
if model_config['Model'] == 'cotraining':
model_name += '_alpha_' + str(
model_config['alpha'])
# if model_config['Model'] == 'selftraining':
# Model_to_add_label = copy.deepcopy(model_config)
# if 'Model_to_add_label' in Model_to_add_label:
# del Model_to_add_label['Model_to_add_label']
# if 'Model_to_predict' in Model_to_add_label:
# del Model_to_add_label['Model_to_predict']
# Model_to_add_label.update({'Model': 'GCN'})
# model_config['Model_to_add_label'] = Model_to_add_label
# preprocess_model_config(model_config['Model_to_add_label'])
#
# Model_to_predict = copy.deepcopy(model_config)
# if 'Model_to_add_label' in Model_to_predict:
# del Model_to_predict['Model_to_add_label']
# if 'Model_to_predict' in Model_to_predict:
# del Model_to_predict['Model_to_predict']
# Model_to_predict.update({'Model': 'GCN'})
# model_config['Model_to_predict'] = Model_to_predict
# preprocess_model_config(model_config['Model_to_predict'])
# model_name = 'Model' + str(model_config['Model']) \
# + '_{' + model_config['Model_to_add_label']['name'] + '}' \
# + '_{' + model_config['Model_to_predict']['name'] + '}'
if model_config['Model'] in ['union', 'intersection','lp']:
model_name += '_alpha_' + str(model_config['alpha'])
if model_config['Model'] in ['union', 'intersection', 'selftraining']:
Model_to_add_label = copy.deepcopy(model_config)
if 'Model_to_add_label' in Model_to_add_label:
del Model_to_add_label['Model_to_add_label']
if 'Model_to_predict' in Model_to_add_label:
del Model_to_add_label['Model_to_predict']
Model_to_add_label.update({'Model': 'GCN'})
model_config['Model_to_add_label'] = Model_to_add_label
preprocess_model_config(model_config['Model_to_add_label'])
Model_to_predict = copy.deepcopy(model_config)
if 'Model_to_add_label' in Model_to_predict:
del Model_to_predict['Model_to_add_label']
if 'Model_to_predict' in Model_to_predict:
del Model_to_predict['Model_to_predict']
Model_to_predict.update({'Model': 'GCN'})
model_config['Model_to_predict'] = Model_to_predict
preprocess_model_config(model_config['Model_to_predict'])
model_config['name'] = model_name
if __name__ == '__main__':
pass | [
"sklearn.feature_extraction.text.TfidfTransformer",
"numpy.hstack",
"numpy.logical_not",
"numpy.argsort",
"numpy.array",
"copy.deepcopy",
"scipy.sparse.isspmatrix_coo",
"numpy.arange",
"networkx.from_dict_of_lists",
"numpy.savez",
"scipy.sparse.eye",
"scipy.sparse.linalg.inv",
"numpy.where",... | [((593, 696), 'numpy.savez', 'np.savez', (['filename'], {'data': 'array.data', 'indices': 'array.indices', 'indptr': 'array.indptr', 'shape': 'array.shape'}), '(filename, data=array.data, indices=array.indices, indptr=array.\n indptr, shape=array.shape)\n', (601, 696), True, 'import numpy as np\n'), ((751, 768), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (758, 768), True, 'import numpy as np\n'), ((780, 876), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (["(loader['data'], loader['indices'], loader['indptr'])"], {'shape': "loader['shape']"}), "((loader['data'], loader['indices'], loader['indptr']), shape=\n loader['shape'])\n", (793, 876), True, 'import scipy.sparse as sp\n'), ((1124, 1135), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (1132, 1135), True, 'import numpy as np\n'), ((1165, 1194), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (1173, 1194), True, 'import numpy as np\n'), ((1465, 1488), 'numpy.sum', 'np.sum', (['y_train'], {'axis': '(0)'}), '(y_train, axis=0)\n', (1471, 1488), True, 'import numpy as np\n'), ((1505, 1524), 'numpy.sum', 'np.sum', (['label_count'], {}), '(label_count)\n', (1511, 1524), True, 'import numpy as np\n'), ((9678, 9700), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (9686, 9700), True, 'import numpy as np\n'), ((9713, 9735), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (9721, 9735), True, 'import numpy as np\n'), ((9749, 9771), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (9757, 9771), True, 'import numpy as np\n'), ((10566, 10599), 'numpy.sum', 'np.sum', (['labels[idx_train]'], {'axis': '(0)'}), '(labels[idx_train], axis=0)\n', (10572, 10599), True, 'import numpy as np\n'), ((15737, 15754), 'numpy.zeros', 'np.zeros', (['B.shape'], {}), '(B.shape)\n', (15745, 15754), True, 'import numpy as np\n'), ((16341, 16364), 'numpy.sum', 'np.sum', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (16347, 16364), True, 'import numpy as np\n'), ((17472, 17501), 'numpy.argmax', 'np.argmax', (['prediction'], {'axis': '(1)'}), '(prediction, axis=1)\n', (17481, 17501), True, 'import numpy as np\n'), ((17519, 17545), 'numpy.max', 'np.max', (['prediction'], {'axis': '(1)'}), '(prediction, axis=1)\n', (17525, 17545), True, 'import numpy as np\n'), ((17565, 17588), 'numpy.argsort', 'np.argsort', (['(-confidence)'], {}), '(-confidence)\n', (17575, 17588), True, 'import numpy as np\n'), ((18055, 18096), 'numpy.zeros', 'np.zeros', (['train_mask.shape'], {'dtype': 'np.bool'}), '(train_mask.shape, dtype=np.bool)\n', (18063, 18096), True, 'import numpy as np\n'), ((18213, 18239), 'numpy.zeros', 'np.zeros', (['prediction.shape'], {}), '(prediction.shape)\n', (18221, 18239), True, 'import numpy as np\n'), ((18378, 18439), 'numpy.sum', 'np.sum', (['(prediction[indicator] * all_labels[indicator])'], {'axis': '(0)'}), '(prediction[indicator] * all_labels[indicator], axis=0)\n', (18384, 18439), True, 'import numpy as np\n'), ((18452, 18489), 'numpy.sum', 'np.sum', (['prediction[indicator]'], {'axis': '(0)'}), '(prediction[indicator], axis=0)\n', (18458, 18489), True, 'import numpy as np\n'), ((18644, 18660), 'numpy.copy', 'np.copy', (['y_train'], {}), '(y_train)\n', (18651, 18660), True, 'import numpy as np\n'), ((18678, 18697), 'numpy.copy', 'np.copy', (['train_mask'], {}), '(train_mask)\n', (18685, 18697), True, 'import numpy as np\n'), ((19026, 19046), 'numpy.argmax', 'np.argmax', (['P'], {'axis': '(1)'}), '(P, axis=1)\n', (19035, 19046), True, 'import numpy as np\n'), ((19091, 19108), 'numpy.zeros', 'np.zeros', (['P.shape'], {}), '(P.shape)\n', (19099, 19108), True, 'import numpy as np\n'), ((19178, 19196), 'numpy.sum', 'np.sum', (['train_mask'], {}), '(train_mask)\n', (19184, 19196), True, 'import numpy as np\n'), ((19829, 19858), 'numpy.argmax', 'np.argmax', (['prediction'], {'axis': '(1)'}), '(prediction, axis=1)\n', (19838, 19858), True, 'import numpy as np\n'), ((19876, 19902), 'numpy.max', 'np.max', (['prediction'], {'axis': '(1)'}), '(prediction, axis=1)\n', (19882, 19902), True, 'import numpy as np\n'), ((19922, 19945), 'numpy.argsort', 'np.argsort', (['(-confidence)'], {}), '(-confidence)\n', (19932, 19945), True, 'import numpy as np\n'), ((20449, 20472), 'numpy.sum', 'np.sum', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (20455, 20472), True, 'import numpy as np\n'), ((21661, 21682), 'scipy.sparse.issparse', 'sp.issparse', (['features'], {}), '(features)\n', (21672, 21682), True, 'import scipy.sparse as sp\n'), ((21740, 21764), 'numpy.zeros', 'np.zeros', (['features.shape'], {}), '(features.shape)\n', (21748, 21764), True, 'import numpy as np\n'), ((21991, 22026), 'numpy.sum', 'np.sum', (['all_labels[:, i][indicator]'], {}), '(all_labels[:, i][indicator])\n', (21997, 22026), True, 'import numpy as np\n'), ((3496, 3515), 'numpy.array', 'np.array', (['[triplet]'], {}), '([triplet])\n', (3504, 3515), True, 'import numpy as np\n'), ((4732, 4757), 'numpy.sort', 'np.sort', (['test_idx_reorder'], {}), '(test_idx_reorder)\n', (4739, 4757), True, 'import numpy as np\n'), ((5565, 5586), 'numpy.vstack', 'np.vstack', (['(ally, ty)'], {}), '((ally, ty))\n', (5574, 5586), True, 'import numpy as np\n'), ((7989, 8011), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (8006, 8011), True, 'import numpy as np\n'), ((9299, 9332), 'numpy.sum', 'np.sum', (['labels[idx_train]'], {'axis': '(0)'}), '(labels[idx_train], axis=0)\n', (9305, 9332), True, 'import numpy as np\n'), ((11561, 11576), 'scipy.sparse.diags', 'sp.diags', (['r_inv'], {}), '(r_inv)\n', (11569, 11576), True, 'import scipy.sparse as sp\n'), ((12191, 12209), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (12204, 12209), True, 'import scipy.sparse as sp\n'), ((12520, 12540), 'scipy.sparse.diags', 'sp.diags', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (12528, 12540), True, 'import scipy.sparse as sp\n'), ((13465, 13485), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (13471, 13485), True, 'import scipy.sparse as sp\n'), ((13679, 13699), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (13685, 13699), True, 'import scipy.sparse as sp\n'), ((13816, 13852), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['scaled_lap'], {'copy': '(True)'}), '(scaled_lap, copy=True)\n', (13829, 13852), True, 'import scipy.sparse as sp\n'), ((16295, 16315), 'numpy.where', 'np.where', (['train_mask'], {}), '(train_mask)\n', (16303, 16315), True, 'import numpy as np\n'), ((17193, 17224), 'numpy.hstack', 'np.hstack', (['[train_index, index]'], {}), '([train_index, index])\n', (17202, 17224), True, 'import numpy as np\n'), ((18156, 18182), 'numpy.logical_not', 'np.logical_not', (['train_mask'], {}), '(train_mask)\n', (18170, 18182), True, 'import numpy as np\n'), ((19413, 19440), 'numpy.sum', 'np.sum', (['(prediction * y_test)'], {}), '(prediction * y_test)\n', (19419, 19440), True, 'import numpy as np\n'), ((19443, 19457), 'numpy.sum', 'np.sum', (['y_test'], {}), '(y_test)\n', (19449, 19457), True, 'import numpy as np\n'), ((19482, 19517), 'numpy.sum', 'np.sum', (['(prediction * y_test)'], {'axis': '(0)'}), '(prediction * y_test, axis=0)\n', (19488, 19517), True, 'import numpy as np\n'), ((19520, 19542), 'numpy.sum', 'np.sum', (['y_test'], {'axis': '(0)'}), '(y_test, axis=0)\n', (19526, 19542), True, 'import numpy as np\n'), ((20403, 20423), 'numpy.where', 'np.where', (['train_mask'], {}), '(train_mask)\n', (20411, 20423), True, 'import numpy as np\n'), ((20565, 20597), 'numpy.sum', 'np.sum', (['A[:, y.flat > 0]'], {'axis': '(1)'}), '(A[:, y.flat > 0], axis=1)\n', (20571, 20597), True, 'import numpy as np\n'), ((4587, 4615), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (4608, 4615), True, 'import networkx as nx\n'), ((5846, 5897), 'numpy.setdiff1d', 'np.setdiff1d', (['test_idx_range_full', 'test_idx_reorder'], {}), '(test_idx_range_full, test_idx_reorder)\n', (5858, 5897), True, 'import numpy as np\n'), ((6386, 6407), 'numpy.vstack', 'np.vstack', (['(ally, ty)'], {}), '((ally, ty))\n', (6395, 6407), True, 'import numpy as np\n'), ((10811, 10832), 'scipy.sparse.isspmatrix_coo', 'sp.isspmatrix_coo', (['mx'], {}), '(mx)\n', (10828, 10832), True, 'import scipy.sparse as sp\n'), ((11021, 11052), 'numpy.array', 'np.array', (['shape'], {'dtype': 'np.int64'}), '(shape, dtype=np.int64)\n', (11029, 11052), True, 'import numpy as np\n'), ((11519, 11534), 'numpy.isinf', 'np.isinf', (['r_inv'], {}), '(r_inv)\n', (11527, 11534), True, 'import numpy as np\n'), ((11737, 11815), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'norm': 'None', 'use_idf': '(True)', 'smooth_idf': '(True)', 'sublinear_tf': '(False)'}), '(norm=None, use_idf=True, smooth_idf=True, sublinear_tf=False)\n', (11753, 11815), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((12468, 12488), 'numpy.isinf', 'np.isinf', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (12476, 12488), True, 'import numpy as np\n'), ((12786, 12801), 'scipy.sparse.diags', 'sp.diags', (['d_inv'], {}), '(d_inv)\n', (12794, 12801), True, 'import scipy.sparse as sp\n'), ((13064, 13084), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (13070, 13084), True, 'import scipy.sparse as sp\n'), ((14329, 14346), 'numpy.zeros', 'np.zeros', (['W.shape'], {}), '(W.shape)\n', (14337, 14346), True, 'import numpy as np\n'), ((14771, 14787), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['L'], {}), '(L)\n', (14784, 14787), True, 'import scipy.sparse as sp\n'), ((16786, 16809), 'numpy.where', 'np.where', (['(a.flat > gate)'], {}), '(a.flat > gate)\n', (16794, 16809), True, 'import numpy as np\n'), ((19124, 19145), 'numpy.arange', 'np.arange', (['P.shape[0]'], {}), '(P.shape[0])\n', (19133, 19145), True, 'import numpy as np\n'), ((19256, 19285), 'numpy.add.accumulate', 'np.add.accumulate', (['train_mask'], {}), '(train_mask)\n', (19273, 19285), True, 'import numpy as np\n'), ((20723, 20746), 'numpy.where', 'np.where', (['(a.flat > gate)'], {}), '(a.flat > gate)\n', (20731, 20746), True, 'import numpy as np\n'), ((21278, 21306), 'numpy.sum', 'np.sum', (['all_labels[index, i]'], {}), '(all_labels[index, i])\n', (21284, 21306), True, 'import numpy as np\n'), ((26857, 26884), 'copy.deepcopy', 'copy.deepcopy', (['model_config'], {}), '(model_config)\n', (26870, 26884), False, 'import copy\n'), ((27349, 27376), 'copy.deepcopy', 'copy.deepcopy', (['model_config'], {}), '(model_config)\n', (27362, 27376), False, 'import copy\n'), ((1563, 1658), 'numpy.concatenate', 'np.concatenate', (['(index_nonzero[0][np.newaxis, :], index_nonzero[1][np.newaxis, :])'], {'axis': '(0)'}), '((index_nonzero[0][np.newaxis, :], index_nonzero[1][np.\n newaxis, :]), axis=0)\n', (1577, 1658), True, 'import numpy as np\n'), ((3964, 3985), 'numpy.arange', 'np.arange', (['l.shape[0]'], {}), '(l.shape[0])\n', (3973, 3985), True, 'import numpy as np\n'), ((5421, 5442), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (5430, 5442), True, 'import scipy.sparse as sp\n'), ((7056, 7106), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['features_extended'], {'dtype': 'np.float32'}), '(features_extended, dtype=np.float32)\n', (7069, 7106), True, 'import scipy.sparse as sp\n'), ((7384, 7412), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (7405, 7412), True, 'import networkx as nx\n'), ((10879, 10906), 'numpy.vstack', 'np.vstack', (['(mx.row, mx.col)'], {}), '((mx.row, mx.col))\n', (10888, 10906), True, 'import numpy as np\n'), ((11474, 11494), 'numpy.power', 'np.power', (['rowsum', '(-1)'], {}), '(rowsum, -1)\n', (11482, 11494), True, 'import numpy as np\n'), ((12416, 12438), 'numpy.power', 'np.power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (12424, 12438), True, 'import numpy as np\n'), ((12744, 12759), 'numpy.isinf', 'np.isinf', (['d_inv'], {}), '(d_inv)\n', (12752, 12759), True, 'import numpy as np\n'), ((14670, 14699), 'scipy.sparse.diags', 'sp.diags', (['D'], {'dtype': 'np.float32'}), '(D, dtype=np.float32)\n', (14678, 14699), True, 'import scipy.sparse as sp\n'), ((14725, 14758), 'scipy.sparse.eye', 'sp.eye', (['W.shape[0]'], {'dtype': 'L.dtype'}), '(W.shape[0], dtype=L.dtype)\n', (14731, 14758), True, 'import scipy.sparse as sp\n'), ((14870, 14887), 'numpy.zeros', 'np.zeros', (['W.shape'], {}), '(W.shape)\n', (14878, 14887), True, 'import numpy as np\n'), ((16743, 16762), 'numpy.sort', 'np.sort', (['(-a)'], {'axis': '(0)'}), '(-a, axis=0)\n', (16750, 16762), True, 'import numpy as np\n'), ((19231, 19242), 'numpy.zeros', 'np.zeros', (['y'], {}), '(y)\n', (19239, 19242), True, 'import numpy as np\n'), ((19244, 19253), 'numpy.eye', 'np.eye', (['y'], {}), '(y)\n', (19250, 19253), True, 'import numpy as np\n'), ((20680, 20699), 'numpy.sort', 'np.sort', (['(-a)'], {'axis': '(0)'}), '(-a, axis=0)\n', (20687, 20699), True, 'import numpy as np\n'), ((21447, 21467), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (21453, 21467), True, 'import scipy.sparse as sp\n'), ((3922, 3944), 'numpy.max', 'np.max', (["data['labels']"], {}), "(data['labels'])\n", (3928, 3944), True, 'import numpy as np\n'), ((6263, 6284), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (6272, 6284), True, 'import scipy.sparse as sp\n'), ((11937, 11962), 'scipy.sparse.eye', 'sp.eye', (['features.shape[0]'], {}), '(features.shape[0])\n', (11943, 11962), True, 'import scipy.sparse as sp\n'), ((12697, 12719), 'numpy.power', 'np.power', (['rowsum', '(-1.0)'], {}), '(rowsum, -1.0)\n', (12705, 12719), True, 'import numpy as np\n'), ((22078, 22097), 'numpy.where', 'np.where', (['indicator'], {}), '(indicator)\n', (22086, 22097), True, 'import numpy as np\n'), ((4393, 4423), 'pickle.load', 'pkl.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (4401, 4423), True, 'import pickle as pkl\n'), ((4482, 4493), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (4490, 4493), True, 'import pickle as pkl\n'), ((15165, 15179), 'scipy.sparse.linalg.inv', 'slinalg.inv', (['L'], {}), '(L)\n', (15176, 15179), True, 'import scipy.sparse.linalg as slinalg\n'), ((14982, 15017), 'numpy.eye', 'np.eye', (['L.shape[0]'], {'dtype': '"""float32"""'}), "(L.shape[0], dtype='float32')\n", (14988, 15017), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# -*- coding: utf8 -*-
# Copyright (c) 2021 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example: General Molmer-Sorensen gate
Please visit https://quanlse.baidu.com/#/doc/tutorial-general-MS-gate for more details about this example.
"""
import numpy as np
from math import pi
from Quanlse.Utils.Functions import basis
from Quanlse.Utils.Functions import computationalBasisList
from Quanlse.Utils.Plot import plotBarGraph
from Quanlse.Utils import Plot
from Quanlse import Define
from Quanlse.remoteOptimizer import remoteIonGeneralMS as runGeneralIonMS
# Your token:
# Please visit http://quantum-hub.baidu.com
Define.hubToken = ''
# --------------------------------
# Define the system information.
# --------------------------------
# System qubit number.
ionNumber = 7
# System ion mass.
mass = 171
# XY, Z direction trap potential frequency.
omegaXY = 2 * pi * 2e6
omegaZ = 2 * pi * 0.2e6
# Phonon mode.
phononMode = "transverse"
args1 = (ionNumber, mass, omegaXY, omegaZ, phononMode)
# --------------------------------
# Define the gate information.
# --------------------------------
# Total time of quantum gate.
tg = 200
# The laser detuning, usually related with gate time. but can tuning around 2 * pi / tg.
mu = 2 * pi / tg
# The pulse sequence slice number, usually N > 3 * ionNumber.
N = 35
# Sampling period.
dt = tg / N
# Combine the parameters in list.
args2 = (N, dt, mu)
# Define two gate pairs of general Molmer-Sorensen gate.
gatePair = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
# ----------------------------------------
# Run the simulation and show the results.
# ----------------------------------------
# Run the simulation.
res, ureal = runGeneralIonMS(gatePair, args1=args1, args2=args2)
pulse = res['pulse_list']
# Choose the pulse sequence of ionpair.
ionpair = gatePair.index([0, 1])
# Plot the laser pulse.
Plot.plotPulse([np.arange(N) * dt * (N+1) / N], [pulse[ionpair]],
title=[f'Pulse for ionpair={gatePair[ionpair]} '],
xLabel=r'Time ($\mu$s)', yLabel=['Rabi frequency (a.u)'], color=['blue'])
# Print the result of simulation.
print(ureal)
# Print the infidelity of general MS gate.
print(f"The parallel Molmer-Sorensen gate infidelity:\n {res['infidelity']}")
print(f"The pulse residual error:\n {res['laser_residual_error']}")
# Plot the population.
finalState = (ureal @ np.array(basis(16, 0))).T[0]
population = [abs(state ** 2) for state in finalState]
basis = computationalBasisList(4, 2)
plotBarGraph(basis, population, "Population of a 4-Qubits GHZ state generated by General MS gate",
"Computational Basis", "Population")
| [
"Quanlse.Utils.Functions.computationalBasisList",
"Quanlse.remoteOptimizer.remoteIonGeneralMS",
"Quanlse.Utils.Plot.plotBarGraph",
"Quanlse.Utils.Functions.basis",
"numpy.arange"
] | [((2250, 2301), 'Quanlse.remoteOptimizer.remoteIonGeneralMS', 'runGeneralIonMS', (['gatePair'], {'args1': 'args1', 'args2': 'args2'}), '(gatePair, args1=args1, args2=args2)\n', (2265, 2301), True, 'from Quanlse.remoteOptimizer import remoteIonGeneralMS as runGeneralIonMS\n'), ((3025, 3053), 'Quanlse.Utils.Functions.computationalBasisList', 'computationalBasisList', (['(4)', '(2)'], {}), '(4, 2)\n', (3047, 3053), False, 'from Quanlse.Utils.Functions import computationalBasisList\n'), ((3054, 3197), 'Quanlse.Utils.Plot.plotBarGraph', 'plotBarGraph', (['basis', 'population', '"""Population of a 4-Qubits GHZ state generated by General MS gate"""', '"""Computational Basis"""', '"""Population"""'], {}), "(basis, population,\n 'Population of a 4-Qubits GHZ state generated by General MS gate',\n 'Computational Basis', 'Population')\n", (3066, 3197), False, 'from Quanlse.Utils.Plot import plotBarGraph\n'), ((2942, 2954), 'Quanlse.Utils.Functions.basis', 'basis', (['(16)', '(0)'], {}), '(16, 0)\n', (2947, 2954), False, 'from Quanlse.Utils.Functions import basis\n'), ((2444, 2456), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2453, 2456), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 17 May 2019
@author: <NAME>
"""
import os
import torch
import numpy as np
from torch import nn, optim
import matplotlib.pyplot as plt
class MultiLayerPerceptron:
"""
Multi Layer Perceptron
"""
def __init__(self, validation_rate, num_classes):
"""
Init
:param validation_rate: validation rate
:param num_classes: number of classes
"""
self.validation_rate = validation_rate
self.num_classes = num_classes
def training(self, train_data, train_label, visualize=None):
"""
Training for Multi Layer Perceptron
:param train_data: training data
:param train_label: train label
:param visualize: True/False to visualize training history
:return model: trained model
"""
# Convert training data
train_data = torch.tensor(np.array(train_data), dtype=torch.float32)
# One hot encode
onehot_train_label = torch.tensor(np.array(train_label), dtype=torch.long)
# Define the model
model = nn.Sequential(
nn.Linear(train_data.shape[1], 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 16),
nn.Linear(16, self.num_classes)
)
# Softmax Cross Entropy
loss_fn = nn.CrossEntropyLoss()
# SGD
optimizer = optim.Adam(model.parameters())
# To log losses
train_loss_history = []
train_accuracy_history = []
for epoch in range(600):
# Delete gradient value calculated in previous epoch
optimizer.zero_grad()
# Make prediction
predicted_label = model(train_data)
# Calculate Cross Entropy loss
loss = loss_fn(predicted_label, onehot_train_label)
loss.backward()
# Update gradient
optimizer.step()
# Append loss
train_loss_history.append(loss.item())
# Visualize losses
if visualize is True:
# plt.plot(train_accuracy_history)
# plt.plot(validation_accuracy_history)
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# Loss
plt.plot(train_loss_history)
# plt.plot(validation_loss_history)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
return model
def load_model(self, model_file_name: str):
"""
Load trained model
:param model_file_name: name of model file to load
:return model: trained model
"""
# Load model if it exists
assert os.path.exists(model_file_name), "Given model file does not exist"
return torch.load(model_file_name, map_location="cpu")
def save_model(self, model, output_directory: str):
"""
Save model
:param model: trained model
:param output_directory: output directory path
"""
torch.save(model.state_dict(), os.path.join(output_directory, "mlp.prm"), pickle_protocol=4)
def test(self, model, test_data, test_label, is_classification=True):
"""
Make a test for the given dataset
:param model: trained model
:param test_data: test data
:param test_label: test label
:param is_classification: Bool
:return result of test
"""
# One hot encode
onehot_test_label = torch.tensor(np.array(test_label), dtype=torch.long)
# Make prediction
prediction = model(torch.tensor(np.array(test_data), dtype=torch.float32))
_, predicted_classes = torch.max(prediction, 1)
# Treat max value as predicted class
predicted_classes = torch.max(prediction, 1)[1]
return (predicted_classes == onehot_test_label).sum().item()/len(test_label)
def predict(self, model, target_data):
"""
Make prediction to a given target data and return the prediction result with accuracy for each sample
:param model: trained model
:param target_data: target data without label
:return prediction array with probability
"""
# Make prediction to the target data
return model(torch.tensor(np.array(target_data), dtype=torch.float32))
| [
"os.path.exists",
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.ylabel",
"torch.load",
"torch.max",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.array",
"torch.nn.Linear",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.... | [((1460, 1481), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1479, 1481), False, 'from torch import nn, optim\n'), ((3043, 3074), 'os.path.exists', 'os.path.exists', (['model_file_name'], {}), '(model_file_name)\n', (3057, 3074), False, 'import os\n'), ((3125, 3172), 'torch.load', 'torch.load', (['model_file_name'], {'map_location': '"""cpu"""'}), "(model_file_name, map_location='cpu')\n", (3135, 3172), False, 'import torch\n'), ((4040, 4064), 'torch.max', 'torch.max', (['prediction', '(1)'], {}), '(prediction, 1)\n', (4049, 4064), False, 'import torch\n'), ((932, 952), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (940, 952), True, 'import numpy as np\n'), ((1043, 1064), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (1051, 1064), True, 'import numpy as np\n'), ((1155, 1190), 'torch.nn.Linear', 'nn.Linear', (['train_data.shape[1]', '(128)'], {}), '(train_data.shape[1], 128)\n', (1164, 1190), False, 'from torch import nn, optim\n'), ((1204, 1213), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1211, 1213), False, 'from torch import nn, optim\n'), ((1227, 1245), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (1236, 1245), False, 'from torch import nn, optim\n'), ((1259, 1268), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1266, 1268), False, 'from torch import nn, optim\n'), ((1282, 1299), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (1291, 1299), False, 'from torch import nn, optim\n'), ((1313, 1322), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1320, 1322), False, 'from torch import nn, optim\n'), ((1336, 1353), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(16)'], {}), '(32, 16)\n', (1345, 1353), False, 'from torch import nn, optim\n'), ((1367, 1398), 'torch.nn.Linear', 'nn.Linear', (['(16)', 'self.num_classes'], {}), '(16, self.num_classes)\n', (1376, 1398), False, 'from torch import nn, optim\n'), ((2295, 2322), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (2304, 2322), True, 'import matplotlib.pyplot as plt\n'), ((2335, 2357), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (2345, 2357), True, 'import matplotlib.pyplot as plt\n'), ((2370, 2389), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2380, 2389), True, 'import matplotlib.pyplot as plt\n'), ((2402, 2455), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper left"""'}), "(['train', 'validation'], loc='upper left')\n", (2412, 2455), True, 'import matplotlib.pyplot as plt\n'), ((2468, 2478), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2476, 2478), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2539), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss_history'], {}), '(train_loss_history)\n', (2519, 2539), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2623), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (2609, 2623), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2654), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2646, 2654), True, 'import matplotlib.pyplot as plt\n'), ((2667, 2686), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2677, 2686), True, 'import matplotlib.pyplot as plt\n'), ((2699, 2752), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper left"""'}), "(['train', 'validation'], loc='upper left')\n", (2709, 2752), True, 'import matplotlib.pyplot as plt\n'), ((2765, 2775), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2773, 2775), True, 'import matplotlib.pyplot as plt\n'), ((3405, 3446), 'os.path.join', 'os.path.join', (['output_directory', '"""mlp.prm"""'], {}), "(output_directory, 'mlp.prm')\n", (3417, 3446), False, 'import os\n'), ((3858, 3878), 'numpy.array', 'np.array', (['test_label'], {}), '(test_label)\n', (3866, 3878), True, 'import numpy as np\n'), ((4139, 4163), 'torch.max', 'torch.max', (['prediction', '(1)'], {}), '(prediction, 1)\n', (4148, 4163), False, 'import torch\n'), ((3965, 3984), 'numpy.array', 'np.array', (['test_data'], {}), '(test_data)\n', (3973, 3984), True, 'import numpy as np\n'), ((4652, 4673), 'numpy.array', 'np.array', (['target_data'], {}), '(target_data)\n', (4660, 4673), True, 'import numpy as np\n')] |
import numpy as np
import torch
from pytorchrl.distributions.base import Distribution
from pytorchrl.misc.tensor_utils import constant
class DiagonalGaussian(Distribution):
"""
Instead of a distribution, rather a collection of distribution.
"""
def __init__(self, means, log_stds):
"""
Parameters
----------
means (Variable):
log_stds (Variable):
"""
self.means = means
self.log_stds = log_stds
# dim is the dimension of action space
self.dim = self.means.size()[-1]
@classmethod
def from_dict(cls, means, log_stds):
"""
Parameters
----------
means (Variable):
log_std (Variable):
"""
return cls(means=means, log_stds=log_stds)
def entropy(self):
"""
Entropy of gaussian distribution is given by
1/2 * log(2 * \pi * e * sigma^2)
= log(sqrt(2 * \pi * e) * sigma))
= log(sigma) + log(sqrt(2 * \pi * e))
"""
return np.sum(self.log_stds.data.numpy() + np.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
def log_likelihood(self, a):
"""
Compute log likelihood of a.
Parameters
----------
a (Variable):
Returns
-------
logli (Variable)
"""
# First cast into float tensor
a = a.type(torch.FloatTensor)
# Convert into a sample of standard normal
zs = (a - self.means) / (self.log_stds.exp())
# TODO (ewei), I feel this equation is not correct.
# Mainly the first line
# TODO (ewei), still need to understand what is meaning of having
# -1 for axis in sum method, (same for numpy)
logli = - self.log_stds.sum(-1) - \
constant(0.5) * zs.pow(2).sum(-1) - \
constant(0.5) * constant(float(self.dim)) * constant(float(np.log(2 * np.pi)))
return logli
def kl_div(self, other):
"""
Given the distribution parameters of two diagonal multivariate Gaussians,
compute their KL divergence (vectorized)
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Kullback.E2.80.93Leibler_divergence_for_multivariate_normal_distributions
In general, for two n-dimensional distributions, we have
D_KL(N1||N2) =
1/2 ( tr(Σ_2^{-1}Σ_1) + (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) - n + ln(det(Σ_2) / det(Σ_1)) )
Here, Σ_1 and Σ_2 are diagonal. Hence this equation can be simplified.
In terms of the parameters of this method,
determinant of diagonal matrix is product of diagonal, thus
- ln(det(Σ_2) / det(Σ_1)) = sum(2 * (log_stds_2 - log_stds_1), axis=-1)
inverse of diagonal matrix is the diagonal matrix of elements at diagonal inverted, thus
- (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) = sum((means_1 - means_2)^2 / vars_2, axis=-1)
trace is sum of the diagonal elements
- tr(Σ_2^{-1}Σ_1) = sum(vars_1 / vars_2, axis=-1)
Where
- vars_1 = exp(2 * log_stds_1)
- vars_2 = exp(2 * log_stds_2)
Combined together, we have
D_KL(N1||N2)
= 1/2 ( tr(Σ_2^{-1}Σ_1) + (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) - n + ln(det(Σ_2) / det(Σ_1)) )
= sum(1/2 * ((vars_1 - vars_2) / vars_2 + (means_1 - means_2)^2 / vars_2 + 2 * (log_stds_2 - log_stds_1)), axis=-1)
= sum( ((means_1 - means_2)^2 + vars_1 - vars_2) / (2 * vars_2) + (log_stds_2 - log_stds_1)), axis=-1)
Parameters
----------
other (DiagonalGaussian):
Returns
-------
kl_div (Variable):
"""
# Constant should wrap in Variable to multiply with another Variable
# TODO (ewei) kl seems have problem
variance = (constant(2.0) * self.log_stds).exp()
other_variance = (constant(2.0) * other.log_stds).exp()
numerator = (self.means - other.means).pow(2) + \
variance - other_variance
denominator = constant(2.0) * other_variance + constant(1e-8)
# TODO (ewei), -1 for sum has a big impact, need to figure out why
kl_div = (numerator / denominator + other.log_stds - self.log_stds).sum(-1)
return kl_div
| [
"pytorchrl.misc.tensor_utils.constant",
"numpy.log",
"numpy.sqrt"
] | [((4057, 4072), 'pytorchrl.misc.tensor_utils.constant', 'constant', (['(1e-08)'], {}), '(1e-08)\n', (4065, 4072), False, 'from pytorchrl.misc.tensor_utils import constant\n'), ((4024, 4037), 'pytorchrl.misc.tensor_utils.constant', 'constant', (['(2.0)'], {}), '(2.0)\n', (4032, 4037), False, 'from pytorchrl.misc.tensor_utils import constant\n'), ((1082, 1107), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * np.e)'], {}), '(2 * np.pi * np.e)\n', (1089, 1107), True, 'import numpy as np\n'), ((1793, 1806), 'pytorchrl.misc.tensor_utils.constant', 'constant', (['(0.5)'], {}), '(0.5)\n', (1801, 1806), False, 'from pytorchrl.misc.tensor_utils import constant\n'), ((1843, 1856), 'pytorchrl.misc.tensor_utils.constant', 'constant', (['(0.5)'], {}), '(0.5)\n', (1851, 1856), False, 'from pytorchrl.misc.tensor_utils import constant\n'), ((3805, 3818), 'pytorchrl.misc.tensor_utils.constant', 'constant', (['(2.0)'], {}), '(2.0)\n', (3813, 3818), False, 'from pytorchrl.misc.tensor_utils import constant\n'), ((3868, 3881), 'pytorchrl.misc.tensor_utils.constant', 'constant', (['(2.0)'], {}), '(2.0)\n', (3876, 3881), False, 'from pytorchrl.misc.tensor_utils import constant\n'), ((1902, 1919), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1908, 1919), True, 'import numpy as np\n')] |
import sys
import datetime
import reportconfig
import projectmetrics
import os
import numpy as np
import matplotlib
import matplotlib.dates as mdates
# check for headless executions
if "DISPLAY" not in os.environ:
if os.system('python -c "import matplotlib.pyplot as plt; plt.figure()"') != 0:
print("INFO: Lack of display should generate an expected ImportError. Changing MatPlotLib backend.")
matplotlib.use('Agg')
import matplotlib.pyplot as plt
else:
import matplotlib.pyplot as plt
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA
plt.style.use('ggplot')
# plt.style.use('fivethirtyeight')
# plt.style.use('classic')
# plt.style.use('seaborn')
YEARS = mdates.YearLocator() # every year
MONTHS = mdates.MonthLocator() # every month
WEEKDAYS = mdates.WeekdayLocator(byweekday=(MO, TU, WE, TH, FR)) # every weekday
WEEKFINISH = mdates.WeekdayLocator(byweekday=SA) # every week start
YEARS_FORMAT = mdates.DateFormatter('%Y')
MONTHS_FORMAT = mdates.DateFormatter('%b %Y')
# DEFAULT_CMAP = "Set2"
# DEFAULT_CMAP = "Set3"
# DEFAULT_CMAP = "prism"
DEFAULT_CMAP = "tab10"
SECONDARY_CMAP = "gist_ncar"
DEFAULT_TREND_RANGE = [60, 30, 14, 7]
DEFAULT_SLOC_TYPES = ["HAND", "AC", "XML"]
DEFAULT_COMP_TYPES = ["Channels", "Commands", "Events", "Parameters", "Total Ports"]
DEFAULT_ISSUE_LABELS = ["Bug", "Req. Change", "Enhancement", "Process", "Issue"]
DEFAULT_BAR_WIDTH = 0.8
I = 'issues'
S = 'sloc'
C = 'comp'
class GitHubMetricsReport:
def __init__(self, args):
if "--config" in args:
config_file = args[args.index("--config") + 1]
else:
config_file = args[2]
self.config_opts = reportconfig.ReportConfiguration(config_file)
if "--username" in args:
self.config_opts.username = args[args.index("--username") + 1]
if "--git-api-key" in args:
self.config_opts.git_api_key = args[args.index("--git-api-key") + 1]
if "--zen-api-key" in args:
self.config_opts.zen_api_key = args[args.index("--zen-api-key") + 1]
if "--show" in args:
self.show = True
else:
self.show = False
self.metrics = projectmetrics.ProjectMetrics(None, config_opts=self.config_opts)
def create_graph_colors_list(data_types):
if len(data_types) > 20:
cmap = plt.get_cmap(SECONDARY_CMAP)
colors_list = cmap(np.linspace(0., 1., len(data_types)))
else:
cmap = plt.get_cmap(DEFAULT_CMAP)
colors_list = cmap(np.arange(len(data_types)))
return colors_list
def format_label_chart(fig, axs, x_data):
try:
for ax in axs:
ax.legend()
ax.set_xticks(np.array(list(range(len(x_data)))))
ax.set_xticklabels(x_data, rotation=90)
x_lim = ax.get_xlim()
ax.set_xlim(-1, len(x_data))
y_lim = ax.get_ylim()
ax.set_ylim(y_lim[0], 1.05 * y_lim[1])
except TypeError:
axs.legend()
axs.set_xticks(np.array(list(range(len(x_data)))))
axs.set_xticklabels(x_data, rotation=90)
x_lim = axs.get_xlim()
axs.set_xlim(-1, len(x_data))
y_lim = axs.get_ylim()
axs.set_ylim(y_lim[0], 1.05*y_lim[1])
fig.tight_layout()
return fig
def format_date_chart(fig, axs, x_data):
try:
for ax in axs:
ax.xaxis_date()
ax.legend()
ax.xaxis.set_major_locator(MONTHS)
ax.xaxis.set_major_formatter(MONTHS_FORMAT)
y_lim = ax.get_ylim()
ax.set_ylim(y_lim[0], 1.05 * y_lim[1])
# if len(data_x) <= 120:
# ax.xaxis.set_minor_locator(WEEKDAYS)
# else:
# ax.xaxis.set_minor_locator(WEEKFINISH)
except TypeError:
axs.xaxis_date()
axs.legend()
axs.xaxis.set_major_locator(MONTHS)
axs.xaxis.set_major_formatter(MONTHS_FORMAT)
y_lim = axs.get_ylim()
axs.set_ylim(y_lim[0], 1.05*y_lim[1])
# if len(data_x) <= 120:
# axs.xaxis.set_minor_locator(WEEKDAYS)
# else:
# axs.xaxis.set_minor_locator(WEEKFINISH)
fig.autofmt_xdate()
fig.tight_layout()
return fig
def finalize_figure(fig, title, directory=None, show=False):
if show:
plt.show()
plt.close(fig)
return
if directory is not None:
output_file = directory + title + ".png"
output_file = output_file.replace(" ", "_")
plt.savefig(output_file)
plt.close(fig)
return output_file
def generate_table(table_columns, data, title="", directory=None, show=False):
fig, ax = plt.subplots(1, 1, figsize=(10, (len(data) + 2) / 4 + 1))
# fig.patch.set_visible(False)
ax.axis('off')
table = ax.table(cellText=data, colLabels=table_columns, loc='center')
for index, header in enumerate(table_columns):
table.auto_set_column_width(index)
table.auto_set_font_size(True)
ax.set_title(title)
fig.tight_layout()
output_file = finalize_figure(fig, title, directory, show)
return output_file
def generate_line_plot(x_data, y_data, filled=None, data_labels=None, title="", directory=None, show=False,
date_plot=False, stacked=False):
if data_labels is None:
data_labels = list(y_data.keys())
if date_plot:
x_index = x_data
else:
x_index = np.array(list(range(len(x_data))))
y_offset = np.zeros((len(x_index),))
colors = create_graph_colors_list(data_labels)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
for index, label in enumerate(data_labels):
if isinstance(x_data, dict):
if stacked:
raise ValueError("Stacked line charts require shared x_data basis.")
x = x_data[label]
y_offset = np.zeros((len(x),))
else:
x = x_index
y = y_data[label]
if stacked:
y += y_offset
if date_plot:
ax.plot_date(x, y, '-', color=colors[index], label=label)
else:
ax.plot(x, y, '-', color=colors[index], label=label)
if filled and label in filled:
ax.fill_between(x, y, y_offset, color=colors[index], alpha=0.4)
if stacked:
y_offset += y
ax.set_title(title)
# format the ticks
if date_plot:
format_date_chart(fig, ax, x_data)
else:
format_label_chart(fig, ax, x_data)
# handles, labels = _sort_legend(ax)
# ax.legend(handles, labels)
output_file = finalize_figure(fig, title, directory, show)
return output_file
def _generate_complicated_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False,
date_plot=False, split=False, adjacent=False, stacked=False):
if data_labels is None:
data_labels = list(y_data.keys())
bar_width = DEFAULT_BAR_WIDTH
colors = create_graph_colors_list(data_labels)
if date_plot:
# TODO consider re-enabling; expand chart when range > 60 days
# sorted_x_data = sorted(x_data)
# fig_x = max(10., ((sorted_x_data[-1] - sorted_x_data[0]).days + 1) / 6.)
fig_x = 10
else:
# expand chart when components > 25
fig_x = max(10., len(x_data) / 2.5)
if split and len(data_labels) > 1:
fig, axs = plt.subplots(len(data_labels), 1, figsize=(fig_x, 5 * len(data_labels)))
ax = axs[0]
else:
axs = []
fig, ax = plt.subplots(1, 1, figsize=(fig_x, 10))
if date_plot:
x = x_data
else:
x = np.array(list(range(len(x_data))))
if adjacent:
bar_width /= len(data_labels)
x = x - (len(data_labels) - 1) * bar_width / 2
y_offset = np.zeros((len(x),))
for index, label in enumerate(data_labels):
if isinstance(x_data, dict):
if stacked:
raise ValueError("Stacked line charts require shared x_data basis.")
x = x_data[label]
y_offset = np.zeros((len(x),))
if split and len(data_labels) > 1:
ax = axs[index]
y = y_data[label]
bars = ax.bar(x, y, width=bar_width, bottom=y_offset, color=colors[index], label=label)
if not date_plot:
if adjacent:
x = x + bar_width
for position, bar in enumerate(bars):
height = bar.get_height()
if height != 0:
ax.text(bar.get_x() + bar.get_width() / 2., height + y_offset[position], " {} ".format(height),
ha='center', va='bottom')
# ha='center', va='bottom', rotation=90)
if stacked:
y_offset = y_offset + y_data[label]
if index == 0:
ax.set_title(title)
if split:
ax = axs
if date_plot:
format_date_chart(fig, ax, x_data)
else:
format_label_chart(fig, ax, x_data)
output_file = finalize_figure(fig, title, directory, show)
return output_file
def generate_stacked_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False, date_plot=False):
return _generate_complicated_bar_plot(x_data, y_data, data_labels, title, directory, show, date_plot, stacked=True)
def generate_split_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False, date_plot=False):
return _generate_complicated_bar_plot(x_data, y_data, data_labels, title, directory, show, date_plot, split=True)
def generate_adjacent_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False, date_plot=False):
return _generate_complicated_bar_plot(x_data, y_data, data_labels, title, directory, show, date_plot, adjacent=True)
def generate_pie_plot():
raise NotImplementedError()
def generate_stacked_pie_plot():
raise NotImplementedError()
def table_project_summary(reporter, categories=None, period=None, show=False, directory=None, title="Project Summary"):
metrics = reporter.metrics
table_columns = [""] + ["Current Value"]
table_data = []
# TODO evaluate issue label filter approach
# issue label counts, starting with overall
if categories[I]:
total = metrics.issue_totals[metrics.OV][metrics.NEW][-1] - metrics.issue_totals[metrics.OV][metrics.DONE][-1]
table_data.append([metrics.OV + " issues", total])
for key in categories[I]:
if key == metrics.OV:
continue
total = metrics.issue_totals[key][metrics.NEW][-1] - metrics.issue_totals[key][metrics.DONE][-1]
table_data.append([key + " issues", total])
# sloc
for category in categories[S]:
total = 0
for key in (list(metrics.sloc_data.keys())):
total += metrics.sloc_data[key].get(category) \
if metrics.sloc_data[key].get(category) is not None else 0
table_data.append([category, total])
# component counts
for comp in categories[C]:
total = 0
for key in list(metrics.comp_data.keys()):
total += metrics.comp_data[key].get(comp) \
if metrics.comp_data[key].get(comp) is not None else 0
table_data.append([comp, total])
output_file = generate_table(table_columns, table_data, title, directory, show)
return output_file
def table_issue_label_summary(reporter, categories=None, period=None, show=False, directory=None, title="Issue Label Summary"):
categories = {I: categories[I], S: [], C: []}
return table_project_summary(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_sloc_summary(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Summary"):
categories = {I: [], S: categories[S], C: []}
return table_project_summary(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_comp_summary(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Summary"):
categories = {I: [], S: [], C: categories[C]}
return table_project_summary(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_project_diffs(reporter, categories=None, period=None, show=False, directory=None, title="Project Changes"):
metrics = reporter.metrics
table_columns = [""] + ["%d Day Change" % x for x in period]
table_data = []
# issue label diffs, starting with overall
if categories[I]:
# TODO evaluate issue label filter approach
label_totals = metrics.issue_totals[metrics.OV]
table_data += [[metrics.OV] + ["+" + str(label_totals[metrics.NEW][-1] - label_totals[metrics.NEW][-x]) +
" / -" + str(label_totals[metrics.DONE][-1] - label_totals[metrics.DONE][-x])
if x <= len(metrics.issue_dates) else "" for x in period]]
for key in categories[I]:
if key == metrics.OV:
continue
label_totals = metrics.issue_totals[key]
row = [key] + ["+" + str(label_totals[metrics.NEW][-1] - label_totals[metrics.NEW][-x]) +
" / -" + str(label_totals[metrics.DONE][-1] - label_totals[metrics.DONE][-x])
if x <= len(metrics.issue_dates) else "" for x in period]
table_data.append(row)
# manual sloc diffs
if categories[S]:
dates = metrics.sloc_totals[metrics.DATE]
for key in categories[S]:
if key == metrics.DATE:
continue
label_totals = metrics.sloc_totals.get(key)
if label_totals is None:
continue
row = [key] + [str(label_totals[-1] - label_totals[-x])
if x <= len(dates) else "" for x in period]
for index, value in enumerate(row):
if index == 0:
continue
if value and int(value) >= 0:
row[index] = '+' + value
table_data.append(row)
# component counts
if categories[C]:
dates = metrics.comp_totals[metrics.DATE]
for key in categories[C]:
if key == metrics.DATE:
continue
label_totals = metrics.comp_totals.get(key)
if label_totals is None:
continue
row = [key] + [str(label_totals[-1] - label_totals[-x])
if x <= len(dates) else "" for x in period]
for index, value in enumerate(row):
if index == 0:
continue
if value and int(value) >= 0:
row[index] = '+' + value
table_data.append(row)
output_file = generate_table(table_columns, table_data, title, directory, show)
return output_file
def table_issue_label_diffs(reporter, categories=None, period=None, show=False, directory=None, title="Issue Label Changes"):
categories = {I: categories[I], S: [], C: []}
return table_project_diffs(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_sloc_diffs(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Changes"):
categories = {I: [], S: categories[S], C: []}
return table_project_diffs(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_comp_diffs(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Changes"):
categories = {I: [], S: [], C: categories[C]}
return table_project_diffs(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_task_list(reporter, categories=None, period=None, show=False, directory=None, title="Planned Task List"):
metrics = reporter.metrics
table_columns = metrics.task_items_header
table_data = [[task] + [metrics.plan_dict[task][header] for header in metrics.task_items_header[1:]]
for task in metrics.plan_task_list]
#
# table_data = [metrics.task_items[task] for task in metrics.plan_task_list]
output_file = generate_table(table_columns, table_data, title, directory, show)
return output_file
def line_plot_trend(reporter, categories=None, period=None, show=False, directory=None, title=""):
metrics = reporter.metrics
period_dates = []
data_source = []
data_categories = []
if categories[I]:
period_dates = np.array(metrics.issue_dates)
data_categories = [metrics.NEW, metrics.DONE, metrics.OPEN]
data_source = metrics.issue_totals[metrics.OV]
elif categories[S]:
period_dates = np.array(metrics.sloc_totals[metrics.DATE])
data_categories = categories[S]
data_source = metrics.sloc_totals
elif categories[C]:
period_dates = np.array(metrics.comp_totals[metrics.DATE])
data_categories = categories[C]
data_source = metrics.comp_totals
output_file = generate_line_plot(period_dates, data_source, data_labels=data_categories,
title=title, directory=directory, show=show, date_plot=True)
return output_file
def line_plot_issue_labels_trend(reporter, categories=None, period=None, show=False, directory=None, title="Issue Label Trendline"):
categories = {I: categories[I], S: [], C: []}
return line_plot_trend(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def line_plot_comp_trend(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Trendline"):
categories = {I: [], S: [], C: categories[C]}
return line_plot_trend(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def line_plot_sloc_trend(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Trendline"):
categories = {I: [], S: categories[S], C: []}
return line_plot_trend(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def stacked_bar_plot_trend(reporter, categories=None, period=None, show=False, directory=None, title=""):
metrics = reporter.metrics
data = {}
data_categories = []
# TODO evaluate issue label filter approach
if categories[I]:
period = min(len(metrics.issue_dates), max(period))
period_dates = np.array(metrics.issue_dates[-period:])
for key in categories[I]:
if key == metrics.OV:
continue
data[key] = np.array(metrics.issue_totals[key][metrics.OPEN][-period:])
data_categories.append(key)
# else if component sloc types defined
elif categories[S]:
period = min(len(metrics.sloc_totals[metrics.DATE]), max(period))
period_dates = metrics.sloc_totals[metrics.DATE][-period:]
for key in sorted(categories[S]):
data[key] = np.array(metrics.sloc_totals[key][-period:])
data_categories.append(key)
# else if component structure types defined
elif categories[C]:
period = min(len(metrics.comp_totals[metrics.DATE]), max(period))
period_dates = metrics.comp_totals[metrics.DATE][-period:]
for key in sorted(categories[C]):
data[key] = np.array(metrics.comp_totals[key][-period:])
data_categories.append(key)
else:
raise ValueError("No categories specified for visualization.")
if period == 1:
print("Warning: Unable to produce " + title + " with available data points. Visualization will be skipped.")
return
output_file = generate_stacked_bar_plot(period_dates, data, data_labels=data_categories,
title=title, directory=directory, show=show, date_plot=True)
return output_file
def stacked_bar_plot_issue_labels_trend(reporter, categories=None, period=None, show=False, directory=None, title="Issue Label Trend"):
categories = {I: categories[I], S: [], C: []}
return stacked_bar_plot_trend(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def stacked_bar_plot_comp_trend(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Trend"):
categories = {I: [], S: [], C: categories[C]}
return stacked_bar_plot_trend(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def stacked_bar_plot_sloc_trend(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Trend"):
categories = {I: [], S: categories[S], C: []}
return stacked_bar_plot_trend(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def stacked_bar_plot_compare(reporter, categories=None, period=None, show=False, directory=None, title=""):
metrics = reporter.metrics
if categories[S]:
components = sorted(metrics.sloc_data.keys())
data_categories = categories[S]
data_source = metrics.sloc_data
# else if component categories defined
elif categories[C]:
components = sorted(metrics.comp_data.keys())
data_categories = categories[C]
data_source = metrics.comp_data
else:
raise ValueError("No categories specified for visualization.")
data = {}
for key in data_categories:
data[key] = np.array([data_source[component][key] if data_source[component].get(key) is not None else 0
for component in components])
output_file = generate_stacked_bar_plot(components, data, data_labels=data_categories,
title=title, directory=directory, show=show)
return output_file
def stacked_bar_plot_comp_compare(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Comparison"):
categories = {I: [], S: [], C: categories[C]}
return stacked_bar_plot_compare(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def stacked_bar_plot_sloc_compare(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Comparison"):
categories = {I: [], S: categories[S], C: []}
return stacked_bar_plot_compare(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def bar_plot_component(reporter, categories=None, period=None, show=False, directory=None, title=""):
metrics = reporter.metrics
# if sloc categories defined
if categories[S]:
components = sorted(metrics.sloc_data.keys())
data_categories = categories[S]
data_source = metrics.sloc_data
# else if component categories defined
elif categories[C]:
components = sorted(metrics.comp_data.keys())
data_categories = categories[C]
data_source = metrics.comp_data
else:
raise ValueError("No categories specified for visualization.")
# use data structs to build display data
data = {}
for key in data_categories:
data_list = []
for component in components:
value = data_source[component].get(key)
if value is None:
value = 0
data_list.append(value)
data[key] = np.array(data_list)
output_file = generate_adjacent_bar_plot(components, data, data_labels=data_categories,
title=title, directory=directory, show=show)
return output_file
def bar_plot_sloc(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC"):
categories = {I: [], S: categories[S], C: []}
return bar_plot_component(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def bar_plot_comp(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure"):
categories = {I: [], S: [], C: categories[C]}
return bar_plot_component(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def split_bar_plot_component(reporter, categories=None, period=None, show=False, directory=None, title=""):
metrics = reporter.metrics
# if sloc categories defined
if categories[S]:
components = sorted(metrics.sloc_data.keys())
data_categories = categories[S]
data_source = metrics.sloc_data
# else if component categories defined
elif categories[C]:
components = sorted(metrics.comp_data.keys())
data_categories = categories[C]
data_source = metrics.comp_data
else:
raise ValueError("No categories specified for visualization.")
# use data structs to build display data
data = {}
for key in data_categories:
data_list = []
for component in components:
value = data_source[component].get(key)
if value is None:
value = 0
data_list.append(value)
data[key] = np.array(data_list)
output_file = generate_split_bar_plot(components, data, data_labels=data_categories,
title=title, directory=directory, show=show)
return output_file
def split_bar_plot_sloc(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC"):
categories = {I: [], S: categories[S], C: []}
return split_bar_plot_component(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def split_bar_plot_comp(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure"):
categories = {I: [], S: [], C: categories[C]}
return split_bar_plot_component(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def pie_plot_issue_labels_totals(reporter, categories=None, period=None, show=False, directory=None, title="Issue Label Totals"):
metrics = reporter.metrics
keys_list = []
totals_list = []
# TODO evaluate issue label filter approach
for key in sorted(metrics.issue_totals.keys()):
if key == metrics.OV:
continue
amount = metrics.issue_totals[key][metrics.NEW][-1] - metrics.issue_totals[key][metrics.DONE][-1]
if amount > 0:
keys_list.append(key)
totals_list.append(amount)
overall_amount = metrics.issue_totals[metrics.OV][metrics.NEW][-1] - metrics.issue_totals[metrics.OV][metrics.DONE][-1]
if (sum(totals_list)) < overall_amount:
keys_list.append(metrics.UL)
totals_list.append(overall_amount - sum(totals_list))
total = sum(totals_list)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
if len(keys_list) > 10:
cmap = plt.get_cmap(SECONDARY_CMAP)
colors = cmap(np.linspace(0., 1., len(keys_list)))
else:
cmap = plt.get_cmap(DEFAULT_CMAP)
colors = cmap(np.arange(len(keys_list)))
ax.pie(totals_list, labels=keys_list, colors=colors, autopct=lambda p: '{0:.0f} ({1:.2f}%)'.format(p * total / 100, p))
# plt.title(title)
ax.set_title(title)
fig.tight_layout()
if show:
plt.show()
plt.close(fig)
return
if directory is not None:
output = directory + title + ".png"
output = output.replace(" ", "_")
plt.savefig(output)
plt.close(fig)
return output
def pie_plot_category_totals(reporter, categories=None, period=None, show=False, directory=None, title=""):
metrics = reporter.metrics
# data struct prep
components = []
data_categories = []
data_source = {}
# if sloc categories defined
if categories[S]:
components = sorted(metrics.sloc_data.keys())
data_categories = categories[S]
data_source = metrics.sloc_data
# else if component categories defined
elif categories[C]:
components = sorted(metrics.comp_data.keys())
data_categories = categories[C]
data_source = metrics.comp_data
data_values = []
data_labels = []
for component in components:
data_values.append(0)
data_labels.append(component)
for category in data_categories:
value = data_source[component].get(category)
if value is None:
value = 0
data_values[-1] += value
total = sum(data_values)
# for index, value in enumerate(data_values):
# if value == 0 and not reporter.config_opts.metrics_display_zero_comps:
# del[data_values[index]]
# del[data_labels[index]]
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
if len(components) > 10:
cmap = plt.get_cmap(SECONDARY_CMAP)
colors = cmap(np.linspace(0., 1., len(components)))
else:
cmap = plt.get_cmap(DEFAULT_CMAP)
colors = cmap(np.arange(len(components)))
ax.pie(data_values, labels=data_labels, colors=colors, startangle=90,
autopct=lambda p: '{0:.0f} ({1:.2f}%)'.format(p * total / 100, p))
ax.set_title(title)
fig.tight_layout()
if show:
plt.show()
plt.close(fig)
return
if directory is not None:
output = directory + title + ".png"
output = output.replace(" ", "_")
plt.savefig(output)
plt.close(fig)
return output
def pie_plot_sloc_totals(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Totals"):
categories = {I: [], S: categories[S], C: []}
return pie_plot_category_totals(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def pie_plot_comp_totals(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Totals"):
categories = {I: [], S: [], C: categories[C]}
return pie_plot_category_totals(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def pie_plot_category_overview(reporter, categories=None, period=None, show=False, directory=None, title=""):
metrics = reporter.metrics
# data struct prep
components = []
data_categories = []
data_source = {}
# if sloc categories defined
if categories[S]:
components = sorted(metrics.sloc_data.keys())
data_categories = categories[S]
data_source = metrics.sloc_data
# else if component categories defined
elif categories[C]:
components = sorted(metrics.comp_data.keys())
data_categories = categories[C]
data_source = metrics.comp_data
data = []
data_labels = []
for category in data_categories:
data.append(0)
data_labels.append(category)
for component in components:
value = data_source[component].get(category)
if value is None:
value = 0
data[-1] += value
total = sum(data)
if len(data_categories) > 10:
cmap = plt.get_cmap(SECONDARY_CMAP)
colors = cmap(np.linspace(0., 1., len(data_categories)))
else:
cmap = plt.get_cmap(DEFAULT_CMAP)
colors = cmap(np.arange(len(data_categories)))
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.pie(data, labels=data_labels, colors=colors, autopct=lambda p: '{0:.0f} ({1:.2f}%)'.format(p * total / 100, p))
# plt.title(title)
ax.set_title(title)
fig.tight_layout()
if show:
plt.show()
plt.close(fig)
return
if directory is not None:
output = directory + title + ".png"
output = output.replace(" ", "_")
plt.savefig(output)
plt.close(fig)
return output
def pie_plot_sloc_overview(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Overview"):
categories = {I: [], S: categories[S], C: []}
return pie_plot_category_overview(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def pie_plot_comp_overview(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Overview"):
categories = {I: [], S: [], C: categories[C]}
return pie_plot_category_overview(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def bar_plot_sloc_snapshot(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Snapshot"):
metrics = reporter.metrics
config = reporter.config_opts
data_source = []
data_categories = []
if categories[S]:
for category in categories[S]:
data_categories.append(category)
data_source.append(metrics.sloc_totals[category][-1])
if config.metrics_sloc_estimation is not None:
data_categories.append("Estimated")
data_source.append(int(config.metrics_sloc_estimation))
else:
return
cmap = plt.get_cmap(DEFAULT_CMAP)
colors = cmap(np.arange(len(data_categories)))
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
xax = np.arange(len(data_categories))
bars = ax.bar(xax, np.array(data_source))
index = 0
for bar in bars:
bar.set_color(colors[index])
index += 1
height = bar.get_height()
ax.text(bar.get_x() + bar.get_width() / 2., 1.05 * height, '%d' % int(height), ha='center', va='bottom')
plt.grid()
ax.set_xticklabels(data_categories, rotation=60)
ax.set_title(title)
fig.tight_layout()
if show:
plt.show()
plt.close(fig)
return
if directory is not None:
output = directory + title + ".png"
output = output.replace(" ", "_")
plt.savefig(output)
plt.close(fig)
return output
def bar_chart_active_tasks(reporter, categories=None, period=None, show=False, directory=None, title="Active Task Progress"):
metrics = reporter.metrics
today = datetime.date.today()
xc = "Expected completion"
cc = "Current completion"
active_tasks = []
task_names = []
task_progress = {xc: [], cc: []}
categories = [xc, cc]
for task_key in metrics.plan_task_list:
if metrics.plan_dict[task_key][metrics.pv.START] <= today or metrics.plan_dict[task_key][metrics.CV] > 0:
active_tasks.append(metrics.plan_dict[task_key])
active_tasks = sorted(active_tasks, key=lambda task_item: task_item[metrics.pv.START])
for task in active_tasks:
pv = task[metrics.pv.EV]
if task[metrics.XV] < pv or task[metrics.CV] < pv:
task_names.append(task[metrics.pv.TASK])
task_progress[xc].append(round(task[metrics.XV] / pv, 2))
task_progress[cc].append(round(task[metrics.CV] / pv, 2))
output_file = generate_adjacent_bar_plot(task_names, task_progress, data_labels=categories,
title=title, directory=directory, show=show)
return output_file
def line_plot_ev_plan(reporter, categories=None, period=None, show=False, directory=None, title="Task Progress vs Plan"):
metrics = reporter.metrics
data = {}
data_x = {}
data_categories = []
filled = [metrics.EV]
if metrics.plan_progress:
data[metrics.EV] = metrics.plan_progress[metrics.EV]
data_x[metrics.EV] = metrics.plan_progress[metrics.DATE]
data_categories += [metrics.EV]
for plan_key in sorted(metrics.plan_totals.keys()):
data[plan_key] = metrics.plan_totals[plan_key][metrics.EV]
data_x[plan_key] = metrics.plan_totals[plan_key][metrics.DATE]
data_categories += [plan_key]
output_file = generate_line_plot(data_x, data, filled=filled, data_labels=data_categories,
title=title, directory=directory, show=show, date_plot=True)
return output_file
def sloc_overview_annotation(reporter, categories=None, period=None, show=False, directory=None, title="SLOC Overview Annotation"):
return annotation_insert(list(reporter.metrics.sloc_data.keys()))
def sloc_totals_annotation(reporter, categories=None, period=None, show=False, directory=None, title="SLOC Totals Annotation"):
types = [reporter.metrics.AC + t for t in categories[S]] + [reporter.metrics.MC + t for t in categories[S]]
return annotation_insert(types)
def comp_overview_annotation(reporter, categories=None, period=None, show=False, directory=None, title="Component Overview Annotation"):
return annotation_insert(list(reporter.metrics.comp_data.keys()))
def comp_totals_annotation(reporter, categories=None, period=None, show=False, directory=None, title="Component Totals Annotation"):
types = categories[C]
return annotation_insert(types)
def annotation_insert(types):
component_string = " "
for component in sorted(types):
component_string += component + ", "
if len(component_string) > 120:
newline = component_string[:-2].rfind(', ')
if newline > 0:
component_string = component_string[:newline + 2] + "\n " + component_string[newline + 2:]
component_string = "Included types:\n" + component_string[:-2] + "\n\n"
return component_string
def _sort_legend(ax):
handles, labels = ax.get_legend_handles_labels()
# sort both labels and handles by labels
labels, handles = list(zip(*sorted(zip(labels, handles), key=lambda t: t[0])))
return handles, labels
def _reverse_legend(ax):
handles, labels = ax.get_legend_handles_labels()
# reverse both labels and handles by labels
labels = labels[::-1]
handles = handles[::-1]
return handles, labels
def main(args):
ghereporter = GitHubMetricsReport(args)
# ghereporter.metrics.export()
metrics = ghereporter.metrics
config_opts =ghereporter.config_opts
categories = {}
if categories.get(I) is None:
categories[I] = sorted(metrics.issue_totals.keys()) \
if '*' in config_opts.metrics_issue_labels else config_opts.metrics_issue_labels
# categories[I] = sorted(list(set(metrics.issue_totals.keys()) - set(config_opts.pipeline_weights.keys()))) \
# if '*' in config_opts.metrics_issue_labels else config_opts.metrics_issue_labels
if categories.get(I) is None:
categories[I] = DEFAULT_ISSUE_LABELS
if categories.get(S) is None:
categories[S] = config_opts.metrics_sloc_types
if categories.get(S) is None:
categories[S] = DEFAULT_SLOC_TYPES
if categories[S] == ["sloc"]:
categories[S] = [val + "sloc" for val in [metrics.MC, metrics.AC, metrics.XML]]
if categories.get(C) is None:
categories[C] = config_opts.metrics_comp_types
if categories.get(C) is None:
categories[C] = DEFAULT_COMP_TYPES
period = config_opts.metrics_periods or DEFAULT_TREND_RANGE
show = config_opts.metrics_report_filename is None or ghereporter.show
report_dir = config_opts.metrics_report_dir
artifact_dir = config_opts.metrics_report_artifact_dir
# verify / prepare output structure
if report_dir != "":
if report_dir[-1] != "/":
report_dir = report_dir + "/"
# attempt to create directory, an errno of 17 means it already exists and we can ignore it
try:
os.mkdir(report_dir)
except OSError as err:
if err.errno != 17:
print("ERROR: " + err.message)
if artifact_dir != "":
if artifact_dir[-1] != "/":
artifact_dir = artifact_dir + "/"
if artifact_dir.startswith(report_dir):
artifact_dir = artifact_dir[len(report_dir):]
# attempt to create directory, an errno of 17 means it already exists and we can ignore it
try:
os.mkdir(report_dir + artifact_dir)
except OSError as err:
if err.errno != 17:
print("ERROR: " + err.message)
if config_opts.force_remote_history:
try:
os.mkdir(report_dir + "history/")
except OSError as err:
if err.errno != 17:
print("ERROR: " + err.message)
directory = report_dir + artifact_dir
# generate report artifacts and visualizations
artifacts = []
for section in config_opts.metrics_report_sections:
try:
artifacts.append(globals()[section](ghereporter, categories=categories, period=period, directory=directory, show=show))
# getattr(githubmetricsreport, 'bar')()
except BaseException as err:
print("Visualization " + section + " had an error and can not be generated. Error follows:")
print(err.message)
# build the artifacts into a markdown report
if artifacts:
report_file = config_opts.metrics_report_filename
with file(report_dir + report_file, "wb") as fp:
fp.write("Report generated: {}\n\n".format(str(datetime.date.today())))
for artifact in artifacts:
if artifact is None:
continue
if ".png" in artifact:
artifact_file = artifact[len(report_dir):]
section_name = artifact[len(directory):-4].replace("_", " ")
line = "## " + section_name + "\n\n\n"
else:
line = artifact
fp.write(line)
return
def output_mapping(location=""):
fs = []
maxi = 0
with open(location + "metricsreport.py") as file:
for line in file:
if "def" in line and "title=" in line:
func = line[4:line.find('(')]
title = line[line.find("title=\"") + 7:line.rfind('"')]
if len(title) > maxi:
maxi = len(title)
if title == "" or "Annotation" in title:
continue
fs.append((title, func))
fs = sorted(fs)
for f, t in fs:
print('| ' + f + " | " + t + " | |")
return
if __name__ == '__main__':
main(sys.argv)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.dates.WeekdayLocator",
"matplotlib.dates.MonthLocator",
"matplotlib.use",
"reportconfig.ReportConfiguration",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"... | [((615, 638), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (628, 638), True, 'import matplotlib.pyplot as plt\n'), ((737, 757), 'matplotlib.dates.YearLocator', 'mdates.YearLocator', ([], {}), '()\n', (755, 757), True, 'import matplotlib.dates as mdates\n'), ((781, 802), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (800, 802), True, 'import matplotlib.dates as mdates\n'), ((829, 882), 'matplotlib.dates.WeekdayLocator', 'mdates.WeekdayLocator', ([], {'byweekday': '(MO, TU, WE, TH, FR)'}), '(byweekday=(MO, TU, WE, TH, FR))\n', (850, 882), True, 'import matplotlib.dates as mdates\n'), ((913, 948), 'matplotlib.dates.WeekdayLocator', 'mdates.WeekdayLocator', ([], {'byweekday': 'SA'}), '(byweekday=SA)\n', (934, 948), True, 'import matplotlib.dates as mdates\n'), ((984, 1010), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y"""'], {}), "('%Y')\n", (1004, 1010), True, 'import matplotlib.dates as mdates\n'), ((1027, 1056), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b %Y"""'], {}), "('%b %Y')\n", (1047, 1056), True, 'import matplotlib.dates as mdates\n'), ((5609, 5645), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (5621, 5645), True, 'import matplotlib.pyplot as plt\n'), ((27059, 27095), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (27071, 27095), True, 'import matplotlib.pyplot as plt\n'), ((28992, 29028), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (29004, 29028), True, 'import matplotlib.pyplot as plt\n'), ((31565, 31601), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (31577, 31601), True, 'import matplotlib.pyplot as plt\n'), ((33306, 33332), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['DEFAULT_CMAP'], {}), '(DEFAULT_CMAP)\n', (33318, 33332), True, 'import matplotlib.pyplot as plt\n'), ((33399, 33435), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (33411, 33435), True, 'import matplotlib.pyplot as plt\n'), ((33768, 33778), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (33776, 33778), True, 'import matplotlib.pyplot as plt\n'), ((34311, 34332), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (34330, 34332), False, 'import datetime\n'), ((222, 292), 'os.system', 'os.system', (['"""python -c "import matplotlib.pyplot as plt; plt.figure()\\""""'], {}), '(\'python -c "import matplotlib.pyplot as plt; plt.figure()"\')\n', (231, 292), False, 'import os\n'), ((416, 437), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (430, 437), False, 'import matplotlib\n'), ((1717, 1762), 'reportconfig.ReportConfiguration', 'reportconfig.ReportConfiguration', (['config_file'], {}), '(config_file)\n', (1749, 1762), False, 'import reportconfig\n'), ((2235, 2300), 'projectmetrics.ProjectMetrics', 'projectmetrics.ProjectMetrics', (['None'], {'config_opts': 'self.config_opts'}), '(None, config_opts=self.config_opts)\n', (2264, 2300), False, 'import projectmetrics\n'), ((2389, 2417), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['SECONDARY_CMAP'], {}), '(SECONDARY_CMAP)\n', (2401, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2508, 2534), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['DEFAULT_CMAP'], {}), '(DEFAULT_CMAP)\n', (2520, 2534), True, 'import matplotlib.pyplot as plt\n'), ((4349, 4359), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4357, 4359), True, 'import matplotlib.pyplot as plt\n'), ((4368, 4382), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4377, 4382), True, 'import matplotlib.pyplot as plt\n'), ((4537, 4561), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {}), '(output_file)\n', (4548, 4561), True, 'import matplotlib.pyplot as plt\n'), ((4570, 4584), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4579, 4584), True, 'import matplotlib.pyplot as plt\n'), ((7568, 7607), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(fig_x, 10)'}), '(1, 1, figsize=(fig_x, 10))\n', (7580, 7607), True, 'import matplotlib.pyplot as plt\n'), ((16725, 16754), 'numpy.array', 'np.array', (['metrics.issue_dates'], {}), '(metrics.issue_dates)\n', (16733, 16754), True, 'import numpy as np\n'), ((18683, 18722), 'numpy.array', 'np.array', (['metrics.issue_dates[-period:]'], {}), '(metrics.issue_dates[-period:])\n', (18691, 18722), True, 'import numpy as np\n'), ((23635, 23654), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (23643, 23654), True, 'import numpy as np\n'), ((25369, 25388), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (25377, 25388), True, 'import numpy as np\n'), ((27139, 27167), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['SECONDARY_CMAP'], {}), '(SECONDARY_CMAP)\n', (27151, 27167), True, 'import matplotlib.pyplot as plt\n'), ((27252, 27278), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['DEFAULT_CMAP'], {}), '(DEFAULT_CMAP)\n', (27264, 27278), True, 'import matplotlib.pyplot as plt\n'), ((27544, 27554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27552, 27554), True, 'import matplotlib.pyplot as plt\n'), ((27563, 27577), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (27572, 27577), True, 'import matplotlib.pyplot as plt\n'), ((27717, 27736), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output'], {}), '(output)\n', (27728, 27736), True, 'import matplotlib.pyplot as plt\n'), ((27745, 27759), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (27754, 27759), True, 'import matplotlib.pyplot as plt\n'), ((29073, 29101), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['SECONDARY_CMAP'], {}), '(SECONDARY_CMAP)\n', (29085, 29101), True, 'import matplotlib.pyplot as plt\n'), ((29187, 29213), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['DEFAULT_CMAP'], {}), '(DEFAULT_CMAP)\n', (29199, 29213), True, 'import matplotlib.pyplot as plt\n'), ((29485, 29495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29493, 29495), True, 'import matplotlib.pyplot as plt\n'), ((29504, 29518), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (29513, 29518), True, 'import matplotlib.pyplot as plt\n'), ((29658, 29677), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output'], {}), '(output)\n', (29669, 29677), True, 'import matplotlib.pyplot as plt\n'), ((29686, 29700), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (29695, 29700), True, 'import matplotlib.pyplot as plt\n'), ((31350, 31378), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['SECONDARY_CMAP'], {}), '(SECONDARY_CMAP)\n', (31362, 31378), True, 'import matplotlib.pyplot as plt\n'), ((31469, 31495), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['DEFAULT_CMAP'], {}), '(DEFAULT_CMAP)\n', (31481, 31495), True, 'import matplotlib.pyplot as plt\n'), ((31814, 31824), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31822, 31824), True, 'import matplotlib.pyplot as plt\n'), ((31833, 31847), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (31842, 31847), True, 'import matplotlib.pyplot as plt\n'), ((31987, 32006), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output'], {}), '(output)\n', (31998, 32006), True, 'import matplotlib.pyplot as plt\n'), ((32015, 32029), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (32024, 32029), True, 'import matplotlib.pyplot as plt\n'), ((33501, 33522), 'numpy.array', 'np.array', (['data_source'], {}), '(data_source)\n', (33509, 33522), True, 'import numpy as np\n'), ((33902, 33912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33910, 33912), True, 'import matplotlib.pyplot as plt\n'), ((33921, 33935), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (33930, 33935), True, 'import matplotlib.pyplot as plt\n'), ((34075, 34094), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output'], {}), '(output)\n', (34086, 34094), True, 'import matplotlib.pyplot as plt\n'), ((34103, 34117), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (34112, 34117), True, 'import matplotlib.pyplot as plt\n'), ((16926, 16969), 'numpy.array', 'np.array', (['metrics.sloc_totals[metrics.DATE]'], {}), '(metrics.sloc_totals[metrics.DATE])\n', (16934, 16969), True, 'import numpy as np\n'), ((18840, 18899), 'numpy.array', 'np.array', (['metrics.issue_totals[key][metrics.OPEN][-period:]'], {}), '(metrics.issue_totals[key][metrics.OPEN][-period:])\n', (18848, 18899), True, 'import numpy as np\n'), ((39701, 39721), 'os.mkdir', 'os.mkdir', (['report_dir'], {}), '(report_dir)\n', (39709, 39721), False, 'import os\n'), ((40172, 40207), 'os.mkdir', 'os.mkdir', (['(report_dir + artifact_dir)'], {}), '(report_dir + artifact_dir)\n', (40180, 40207), False, 'import os\n'), ((40385, 40418), 'os.mkdir', 'os.mkdir', (["(report_dir + 'history/')"], {}), "(report_dir + 'history/')\n", (40393, 40418), False, 'import os\n'), ((17100, 17143), 'numpy.array', 'np.array', (['metrics.comp_totals[metrics.DATE]'], {}), '(metrics.comp_totals[metrics.DATE])\n', (17108, 17143), True, 'import numpy as np\n'), ((19215, 19259), 'numpy.array', 'np.array', (['metrics.sloc_totals[key][-period:]'], {}), '(metrics.sloc_totals[key][-period:])\n', (19223, 19259), True, 'import numpy as np\n'), ((19580, 19624), 'numpy.array', 'np.array', (['metrics.comp_totals[key][-period:]'], {}), '(metrics.comp_totals[key][-period:])\n', (19588, 19624), True, 'import numpy as np\n'), ((41312, 41333), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (41331, 41333), False, 'import datetime\n')] |
#!/usr/bin/env python3
import numpy as np
import cv2
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
import scipy
import scipy.signal
import math
import imutils
import img_util
def loadImage(path):
return cv2.imread(path, cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR)
card_regions = loadCardRegions()
orig_image = loadImage(card_regions[13]['file'])
image = imutils.resize(orig_image, width=400)
height, width, depth = image.shape
blurred = cv2.blur(image,(3,3),0)
hue, sat, val = hsv_img(blurred)
hough_circles = cv2.HoughCircles(sat, cv2.HOUGH_GRADIENT, .5, 10,
param1=10,
param2=20,
minRadius=2,
maxRadius=15)
circles = np.round(hough_circles[0, :]).astype("int")
print("finished detecting circles: ", len(circles))
displayCircles(image, circles)
destroyWindowOnKey()
radius_mode = radiiMode(circles)
#hist(circles[:,2], 100)
# make a binary image in which each pixel indicates
# if it's within the radius of a circle
def circleBinImage(circles):
bw = np.zeros((height,width,1), np.uint8)
for c in circles:
cv2.circle(bw,(c[0],c[1]),1,255,thickness=cv2.FILLED)
return bw
angleMode(circles)
sized_cs = circles[np.where(np.logical_and(circles[:,2]>=.8*radius_mode, circles[:,2]<=1.2*radius_mode))]
len(circles)
len(sized_cs)
displayCircles(sat, circles)
displayCircles(sat, sized_cs)
destroyWindowOnKey()
circle_bin = circleBinImage(sized_cs)
showImage(circle_bin)
lines = cv2.HoughLines(circle_bin,1,np.pi/180,7).reshape(-1, 2)
showImage(drawLines(image, lines))
line_angle_clusters = cluster_1d(lines[:,1] % (math.pi/2), bw=0.05)
cardinal_lines = lines_with_label_in(lines, line_angle_clusters.labels_, [0])
showImage(drawLines(image, cardinal_lines))
clustered_lines = cluster_2d(cardinal_lines, 0.02)
showImage(drawLines(image, clustered_lines))
line_angle_clusters2 = cluster_1d(clustered_lines[:,1], 0.02)
clean_cardinal_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [0])
clean_cardinal_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [1])
showImage(drawLines(image, clean_cardinal_lines))
line_angle_clusters2 = cluster_1d(clustered_lines[:,1], 0.1)
a_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [0])
b_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [1])
a_lines.sort(0)
b_lines.sort(0)
line_pairs = list(itertools.product(a_lines, b_lines))
intersections = [seg_intersect(*polar2seg(*a), *polar2seg(*b))for (a, b) in line_pairs]
intersection_splotches_r = [n_closest(image[:,:,0], inter.astype(np.uint8), d=2) for inter in intersections]
([np.mean(splotch) for splotch in intersection_splotches_r])
showImage(n_closest(image, intersections[20].astype(np.uint8), d=1))
showImage(drawLines(image, clustered_lines))
showImage(drawPoints(image, intersections))
print(lines)
print('done')
| [
"numpy.mean",
"numpy.logical_and",
"numpy.round",
"cv2.HoughCircles",
"imutils.resize",
"numpy.zeros",
"cv2.circle",
"cv2.HoughLines",
"cv2.imread",
"cv2.blur"
] | [((488, 525), 'imutils.resize', 'imutils.resize', (['orig_image'], {'width': '(400)'}), '(orig_image, width=400)\n', (502, 525), False, 'import imutils\n'), ((572, 598), 'cv2.blur', 'cv2.blur', (['image', '(3, 3)', '(0)'], {}), '(image, (3, 3), 0)\n', (580, 598), False, 'import cv2\n'), ((647, 750), 'cv2.HoughCircles', 'cv2.HoughCircles', (['sat', 'cv2.HOUGH_GRADIENT', '(0.5)', '(10)'], {'param1': '(10)', 'param2': '(20)', 'minRadius': '(2)', 'maxRadius': '(15)'}), '(sat, cv2.HOUGH_GRADIENT, 0.5, 10, param1=10, param2=20,\n minRadius=2, maxRadius=15)\n', (663, 750), False, 'import cv2\n'), ((328, 394), 'cv2.imread', 'cv2.imread', (['path', '(cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR)'], {}), '(path, cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR)\n', (338, 394), False, 'import cv2\n'), ((1239, 1277), 'numpy.zeros', 'np.zeros', (['(height, width, 1)', 'np.uint8'], {}), '((height, width, 1), np.uint8)\n', (1247, 1277), True, 'import numpy as np\n'), ((2876, 2892), 'numpy.mean', 'np.mean', (['splotch'], {}), '(splotch)\n', (2883, 2892), True, 'import numpy as np\n'), ((900, 929), 'numpy.round', 'np.round', (['hough_circles[0, :]'], {}), '(hough_circles[0, :])\n', (908, 929), True, 'import numpy as np\n'), ((1306, 1364), 'cv2.circle', 'cv2.circle', (['bw', '(c[0], c[1])', '(1)', '(255)'], {'thickness': 'cv2.FILLED'}), '(bw, (c[0], c[1]), 1, 255, thickness=cv2.FILLED)\n', (1316, 1364), False, 'import cv2\n'), ((1423, 1513), 'numpy.logical_and', 'np.logical_and', (['(circles[:, 2] >= 0.8 * radius_mode)', '(circles[:, 2] <= 1.2 * radius_mode)'], {}), '(circles[:, 2] >= 0.8 * radius_mode, circles[:, 2] <= 1.2 *\n radius_mode)\n', (1437, 1513), True, 'import numpy as np\n'), ((1679, 1724), 'cv2.HoughLines', 'cv2.HoughLines', (['circle_bin', '(1)', '(np.pi / 180)', '(7)'], {}), '(circle_bin, 1, np.pi / 180, 7)\n', (1693, 1724), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 19:13:59 2018
First Try with Naive Bayes
https://www.analyticsvidhya.com/blog/2017/09/naive-bayes-explained/
@author: Tobi
'ScaledForward', 'scaledLeftRightRatio', 'ScaledSpeed',
'isTurningLeft', 'isTurningRight', 'isKeepingStraight',
'isAccelerating'
"""
import os
import csv
import json
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
#by default Keras's model.compile() sets the shuffle argument as True.
#You should the set numpy seed before importing keras. e.g.
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Activation
#used to load data
DATA_DUMP_DIRECTORY = 'data_dump'
TRAINING_DATA_FILE = "training_data.csv"
NORMALIZE = True
class LearningManager:
def __init__(self):
print ("----------Build models, train them and test them....---------")
if os.path.exists(DATA_DUMP_DIRECTORY):
print ("Data_dump folder exists")
os.chdir(DATA_DUMP_DIRECTORY)
filepath = TRAINING_DATA_FILE
if os.path.isfile(filepath) and os.path.getsize(filepath) > 0:
print ("File with Data already exists, the ML-Algorithm can be trained -> Read File Data")
with open(TRAINING_DATA_FILE, 'rt') as csvfile:
file_reader = csv.reader(csvfile)
#skip header
next(file_reader)
next(file_reader)
whole_data=[]
for line in file_reader:
whole_data.append(line)
whole_data = np.asarray(whole_data)
whole_data = whole_data.astype(np.float)
print(whole_data)
#create train_x, train_Y, test_x, test_Y
#Used for normalizing input values
#Use min-max normalization
max_scaled_forward = maxValueListList(0, whole_data)
min_scaled_forward = minValueListList(0, whole_data)
max_scaled_speed = maxValueListList(2, whole_data)
min_scaled_speed = minValueListList(2, whole_data)
X= []
Y= []
for datavector in whole_data:
datavector = list(map(float, datavector))
#Normalize Training Data
#normalizeScaledForward
datavector[0] = (datavector[0] - min_scaled_forward) / (max_scaled_forward- min_scaled_forward)
#normalize speed
datavector[2] = (datavector[2] - min_scaled_speed) / (max_scaled_speed - min_scaled_speed)
#X has datavectors with [scaled_forward, scaledLeftRightRatio and scaledSpeed]
X.append([datavector[0],datavector[1], datavector[2]])
#Y has datavectors with [isTurningLeft, isTurningRight, isKeepingStraight, isAccelerating]
Y.append([datavector[3],datavector[4], datavector[5], datavector[6]])
print("Elements of X look like:" + str(X[0]))
print("Elements of Y look like:" + str(Y[0]))
X_TRAIN, X_TEST, Y_TRAIN, Y_TEST = train_test_split(X,Y, test_size=0.3, random_state=2, shuffle=False)
X_TRAIN = np.asarray(X_TRAIN)
X_TEST = np.asarray(X_TEST)
Y_TRAIN = np.asarray(Y_TRAIN)
Y_TEST = np.asarray(Y_TEST)
print("Anzahl an Trainingsdaten:" + str(len(X_TRAIN)))
print("Anzahl an Testdaten:" + str(len(X_TEST)))
#End create training testing datasets
#Create Model
self.model = Sequential()
self.model.add(Dense(8, input_shape=(3,), activation='relu'))
self.model.add(Dense(4, activation='sigmoid'))
self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#Train the model
self.model.fit(X_TRAIN, Y_TRAIN, epochs=1, validation_data=(X_TEST,Y_TEST), verbose =2)
#Evaluate the model
score = self.model.evaluate(X_TEST, Y_TEST, batch_size=32)
print("Score of Model: " + str(score))
else:
print ("File does not exist -> Tell Unity that the user has to train first")
else:
print ("File does not exist-> Tell Unity that the user has to train first")
os.chdir("..")
def predict(self, data):
data_as_np = np.array(data).astype(np.float)
predicted = self.model.predict(data_as_np)
print ("Predicted not round:" + str(predicted))
predicted = np.around(predicted[0]).astype(int).tolist()
return predicted
def maxValueListList(index, llist):
return max(sublist[index] for sublist in llist)
def minValueListList(index, llist):
return min(sublist[index] for sublist in llist)
if __name__ == '__main__':
learningManager = LearningManager()
print ("------------------")
print ("Input Array: scaledForward, scaledLeftRightRatio, scaledSpeed")
testArray = [0.22880003720806005, 0.0, 0.11407798294863335]
print ("Input Array: " + str(testArray))
print("Prediction:[isTurningLeft, isTurningRight, isKeepingStraight, isAccelerating]")
testPredict = learningManager.predict([testArray])
print ("Predicted round: " + str(testPredict))
| [
"os.path.exists",
"os.path.getsize",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"keras.models.Sequential",
"os.chdir",
"numpy.array",
"os.path.isfile",
"numpy.random.seed",
"numpy.around",
"keras.layers.Dense",
"csv.reader"
] | [((697, 717), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (711, 717), True, 'import numpy as np\n'), ((1077, 1112), 'os.path.exists', 'os.path.exists', (['DATA_DUMP_DIRECTORY'], {}), '(DATA_DUMP_DIRECTORY)\n', (1091, 1112), False, 'import os\n'), ((5247, 5261), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (5255, 5261), False, 'import os\n'), ((1172, 1201), 'os.chdir', 'os.chdir', (['DATA_DUMP_DIRECTORY'], {}), '(DATA_DUMP_DIRECTORY)\n', (1180, 1201), False, 'import os\n'), ((1259, 1283), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (1273, 1283), False, 'import os\n'), ((5317, 5331), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5325, 5331), True, 'import numpy as np\n'), ((1288, 1313), 'os.path.getsize', 'os.path.getsize', (['filepath'], {}), '(filepath)\n', (1303, 1313), False, 'import os\n'), ((1524, 1543), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (1534, 1543), False, 'import csv\n'), ((1813, 1835), 'numpy.asarray', 'np.asarray', (['whole_data'], {}), '(whole_data)\n', (1823, 1835), True, 'import numpy as np\n'), ((3753, 3821), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.3)', 'random_state': '(2)', 'shuffle': '(False)'}), '(X, Y, test_size=0.3, random_state=2, shuffle=False)\n', (3769, 3821), False, 'from sklearn.model_selection import train_test_split\n'), ((3851, 3870), 'numpy.asarray', 'np.asarray', (['X_TRAIN'], {}), '(X_TRAIN)\n', (3861, 3870), True, 'import numpy as np\n'), ((3900, 3918), 'numpy.asarray', 'np.asarray', (['X_TEST'], {}), '(X_TEST)\n', (3910, 3918), True, 'import numpy as np\n'), ((3949, 3968), 'numpy.asarray', 'np.asarray', (['Y_TRAIN'], {}), '(Y_TRAIN)\n', (3959, 3968), True, 'import numpy as np\n'), ((3998, 4016), 'numpy.asarray', 'np.asarray', (['Y_TEST'], {}), '(Y_TEST)\n', (4008, 4016), True, 'import numpy as np\n'), ((4329, 4341), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4339, 4341), False, 'from keras.models import Sequential\n'), ((4377, 4422), 'keras.layers.Dense', 'Dense', (['(8)'], {'input_shape': '(3,)', 'activation': '"""relu"""'}), "(8, input_shape=(3,), activation='relu')\n", (4382, 4422), False, 'from keras.layers import Dense, Activation\n'), ((4459, 4489), 'keras.layers.Dense', 'Dense', (['(4)'], {'activation': '"""sigmoid"""'}), "(4, activation='sigmoid')\n", (4464, 4489), False, 'from keras.layers import Dense, Activation\n'), ((5485, 5508), 'numpy.around', 'np.around', (['predicted[0]'], {}), '(predicted[0])\n', (5494, 5508), True, 'import numpy as np\n')] |
"""
# Custom colormap
This example shows how to create and use a custom colormap.
"""
import numpy as np
import numpy.random as nr
from datoviz import app, canvas, run, colormap
# Create the canvas, panel, and visual.
c = canvas(show_fps=True)
ctx = c.gpu().context()
panel = c.scene().panel(controller='panzoom')
visual = panel.visual('path', transform=None)
# Uniform parameters for the visual.
visual.data('linewidth', np.array([50]))
visual.data('cap_type', np.array([0]))
# Create a horizontal thick line.
n = 256
x = np.linspace(-1, 1, n)
y = np.zeros(n)
z = np.zeros(n)
pos = np.c_[x, y, z] # an (N, 3) array with the coordinates of the path vertices.
pos[:, 1] -= .25
# Create a first custom color map, ranging from red to green.
cmap = np.c_[np.arange(256), np.arange(256)[::-1], np.zeros(256), 255 * np.ones(256)]
ctx.colormap('mycmap0', cmap.astype(np.uint8))
# Add a first line.
visual.data('pos', pos)
visual.data('color', colormap(np.linspace(0, 1, n), cmap='mycmap0'))
# Create a second custom color map, ranging from green to blue.
cmap = np.c_[np.zeros(256), np.arange(256), np.arange(256)[::-1], 255 * np.ones(256)]
ctx.colormap('mycmap1', cmap.astype(np.uint8))
# Add a second line.
pos[:, 1] += .5
# NOTE: note the use of the .append() method here, to concatenate the array to the existing data.
visual.append('pos', pos)
visual.append('color', colormap(np.linspace(0, 1, n), cmap='mycmap1'))
# Set the length of each path.
visual.data('length', np.array([n, n]))
# Start the event loop.
run()
| [
"numpy.ones",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"datoviz.run",
"datoviz.canvas",
"numpy.arange"
] | [((227, 248), 'datoviz.canvas', 'canvas', ([], {'show_fps': '(True)'}), '(show_fps=True)\n', (233, 248), False, 'from datoviz import app, canvas, run, colormap\n'), ((530, 551), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'n'], {}), '(-1, 1, n)\n', (541, 551), True, 'import numpy as np\n'), ((556, 567), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (564, 567), True, 'import numpy as np\n'), ((572, 583), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (580, 583), True, 'import numpy as np\n'), ((1522, 1527), 'datoviz.run', 'run', ([], {}), '()\n', (1525, 1527), False, 'from datoviz import app, canvas, run, colormap\n'), ((428, 442), 'numpy.array', 'np.array', (['[50]'], {}), '([50])\n', (436, 442), True, 'import numpy as np\n'), ((468, 481), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (476, 481), True, 'import numpy as np\n'), ((1479, 1495), 'numpy.array', 'np.array', (['[n, n]'], {}), '([n, n])\n', (1487, 1495), True, 'import numpy as np\n'), ((760, 774), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (769, 774), True, 'import numpy as np\n'), ((798, 811), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (806, 811), True, 'import numpy as np\n'), ((955, 975), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (966, 975), True, 'import numpy as np\n'), ((1072, 1085), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (1080, 1085), True, 'import numpy as np\n'), ((1087, 1101), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (1096, 1101), True, 'import numpy as np\n'), ((1386, 1406), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (1397, 1406), True, 'import numpy as np\n'), ((776, 790), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (785, 790), True, 'import numpy as np\n'), ((819, 831), 'numpy.ones', 'np.ones', (['(256)'], {}), '(256)\n', (826, 831), True, 'import numpy as np\n'), ((1103, 1117), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (1112, 1117), True, 'import numpy as np\n'), ((1131, 1143), 'numpy.ones', 'np.ones', (['(256)'], {}), '(256)\n', (1138, 1143), True, 'import numpy as np\n')] |
from motionAE.src.models import lstmCVAE2
from torch.distributions.kl import kl_divergence
from torch.distributions.normal import Normal
import torch
from motionAE.src.motionCVAETrainer import motionCVAETrainer
import numpy as np
class motionCVAE2Trainer(motionCVAETrainer):
def load_param(self, arg_parser, **kwargs):
super().load_param(arg_parser, **kwargs)
def build_model(self):
if self.architecture == 'lstmCVAE2':
self.model = lstmCVAE2(
self.input_length, self.dim_pose, self.dim_z, len(self.all_classes))
else:
raise(ValueError)
self.model = self.model.cuda()
def sample(self, batch_size=20, used_class=None, gpu=True):
if used_class is not None:
class_vector = self.one_hot_encoder.transform([used_class])
else:
class_vector = self.one_hot_encoder.transform([self.used_class])
class_vector = np.tile(class_vector, (batch_size, 1))
z_sample_shape = [batch_size]
z_sample_shape.extend([self.dim_z])
z_sample = np.random.normal(size=z_sample_shape)
self.model.decoder.eval()
if gpu:
z_sample = self._to_torch(z_sample)
class_vector = self._to_torch(class_vector)
else:
z_sample = torch.from_numpy(z_sample.astype(np.float32))
class_vector = torch.from_numpy(class_vector.astype(np.float32))
with torch.no_grad():
mu_c, log_var_c = self.model.encoder_class(class_vector)
std = torch.exp(0.5 * log_var_c)
z_sample = z_sample * std + mu_c
motions = self.model.decoder(z_sample, class_vector)
if gpu:
motions = self._to_numpy(motions)
else:
motions = motions.numpy()
self.model.decoder.train()
return motions
def kld_loss(self, *args):
mu = args[3]
log_var = args[4]
mu_c = args[6]
log_var_c = args[7]
q = Normal(mu, torch.exp(0.5 * log_var))
pi = Normal(mu_c, torch.exp(0.5 * log_var_c))
return torch.mean(torch.sum(kl_divergence(q, pi), dim=1))
# sigma1 : mu, log_var
# sigma2 : mu_c, log_var_c
# return torch.mean(-0.5 * torch.sum(1 - log_var_c + log_var - ((mu - mu_c) ** 2 + log_var.exp()) / log_var_c.exp() , dim=1), dim=0).cuda()
| [
"numpy.random.normal",
"numpy.tile",
"torch.exp",
"torch.distributions.kl.kl_divergence",
"torch.no_grad"
] | [((940, 978), 'numpy.tile', 'np.tile', (['class_vector', '(batch_size, 1)'], {}), '(class_vector, (batch_size, 1))\n', (947, 978), True, 'import numpy as np\n'), ((1081, 1118), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'z_sample_shape'}), '(size=z_sample_shape)\n', (1097, 1118), True, 'import numpy as np\n'), ((1450, 1465), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1463, 1465), False, 'import torch\n'), ((1554, 1580), 'torch.exp', 'torch.exp', (['(0.5 * log_var_c)'], {}), '(0.5 * log_var_c)\n', (1563, 1580), False, 'import torch\n'), ((2020, 2044), 'torch.exp', 'torch.exp', (['(0.5 * log_var)'], {}), '(0.5 * log_var)\n', (2029, 2044), False, 'import torch\n'), ((2072, 2098), 'torch.exp', 'torch.exp', (['(0.5 * log_var_c)'], {}), '(0.5 * log_var_c)\n', (2081, 2098), False, 'import torch\n'), ((2136, 2156), 'torch.distributions.kl.kl_divergence', 'kl_divergence', (['q', 'pi'], {}), '(q, pi)\n', (2149, 2156), False, 'from torch.distributions.kl import kl_divergence\n')] |
"""
A few important functions necessary for the end product are stored in this file.
"""
import xarray as xr
import numpy as np
import os
import datetime
plain_text_to_long = {
"Temperature at 2m": "2m_temperature",
"Lake Cover": "lake_cover",
"Friction Velocity": "friction_velocity",
"Cloud Base Height": "cloud_base_height",
"Snow Albedo": "snow_albedo",
"Sea Surface Temperature": "sea_surface_temperature",
"Zonal Wind at 10 m": "10m_u_component_of_wind",
"Meridional Wind at 10 m": "10m_v_component_of_wind",
"Surface Pressure": "surface_pressure",
"Soil Temperature": "soil_temperature_level_1",
"Boundary Layer Height": "boundary_layer_height",
"Low Cloud Cover": "low_cloud_cover",
"Medium Cloud Cover": "medium_cloud_cover",
"High Cloud Cover": "high_cloud_cover"
}
def path():
""" Gives directory to the path of the data folder
Returns
-------
file_dir : str
Path to data folder
"""
home = os.path.expanduser("~")
file_name = ".era5vis"
file_path = os.path.join(home, file_name)
path = open(file_path)
file_dir = path.read()
return file_dir
def era5_pos(lon, lat):
""" Returns nearest location available in the ERA5 dataset corresponding to
the given location.
Author: <NAME>
Parameters
----------
lon : float
The longitude
lat : float
The latitude
Returns
-------
float
Longitude and latitude available in ERA5
Raises
------
ValueError
When longitude or latitude are out of range
"""
# Check input
if abs(lat) > 90 or abs(lon) > 180:
raise ValueError('The given coordinates ({}, {}) '.format(lon, lat) +
'do not fit to the available data range.')
# Compute
dx = 180 + lon
# Round to 0.25
dx = float(round(dx * 4) / 4)
lat = float(round(lat * 4) / 4)
return dx, lat
def process_date(start_year, start_month, end_year, end_month):
""" Gives back a range of years from the selected star year to end year and
a range of months from the selected star month to end month.
Author: <NAME>
Parameters
----------
start_year: str
The start year
start_month: str
The start month
end_year: str
The end year
end_month: str
The end month
Returns
--------
year_range: list
List of string with all the range of years
month_range: list
List of strings with all the range of months
"""
if int(start_year) == int(end_year):
year_range = [str(start_year)]
month_range = list(range(int(start_month), int(end_month) + 1))
month_range = ["{0:0=2d}".format(i) for i in month_range]
elif int(start_year) < int(end_year):
year_range = list(range(int(start_year), int(end_year) + 1))
year_range = ["{0:0=2d}".format(i) for i in year_range]
if int(start_month) > int(end_month):
month_range = list(range(int(start_month), 13)) + \
list(range(1, int(end_month) + 1))
month_range = ["{0:0=2d}".format(i) for i in month_range]
month_range.sort()
else:
month_range = list(range(1, 13))
month_range = ["{0:0=2d}".format(i) for i in month_range]
return year_range, month_range
def variables_to_download(variables):
""" Creates two vectors containing information regarding the information that needs to be
downloaded in order to plot the variables that are desired
Author: Gorosti Mancho
Parameters
----------
variables: list of two strings containing the variables of interest
Returns
--------
chosen_variables: list of strings
List of string containing the variables that need to be downloaded
regions: list
List of strings containing the ENSO regions of interest
"""
chosen_variables = []
regions = []
for i in variables:
if i.split(' ', 1)[0] == 'ENSO':
regions = regions + [i.split(' ', 1)[1]]
elif i == 'Energy Budget':
chosen_variables = chosen_variables + ['surface_latent_heat_flux',
'surface_net_solar_radiation',
'surface_net_thermal_radiation',
'surface_sensible_heat_flux']
elif i == "Snow Depth":
chosen_variables = chosen_variables + ['snow_depth',
'snow_density']
else:
chosen_variables.append(plain_text_to_long.get(i))
return chosen_variables, regions
def clim(filein):
"""Returns monthly climatology for a given region.
Author: <NAME>
Parameters
----------
filein: netcdf file
original monthly sea surface temperature (sst) data for a given region
and period.
Returns
-------
xarray Dataset
Sea surface temperature (sst) monthly clmatology.
"""
# Open data
data = xr.open_dataset(filein)
# Check that data exists
if not os.path.exists(filein):
raise ValueError("The file" + filein + "does not exist.")
# Compute regional-monthly mean
mo_data = data.groupby('time.month').mean()
mean_data = mo_data.mean(dim=['latitude', 'longitude'])
return mean_data
def yearly_evol(clim, filein, syear, fyear, smonth, fmonth):
"""Returns monthly anomalies for a given region and a given monthly
climatology.
Author: <NAME>
Parameters
----------
clim: xarray Dataset
monthly climatology.
filein: netcdf file
original monthly-mean data.
syear: integer
first year of the period of study.
smonth: integer
first month of the period of study.
fyear: integer
final year of the period of study.
fmonth: integer
last month of the period of study.
Returns
-------
xarray Dataset
monthly mean sst anomalies during a 12-month period.
"""
# Check input dates
if (fyear - syear) > 20:
raise ValueError(
"Period of study can not exceed the climatology period"
" (20 years).")
if datetime.datetime(syear, smonth, 1) >= datetime.datetime(fyear, fmonth,
1):
raise ValueError("Non-consistent start and final dates.")
data = xr.open_dataset(filein)
# Spatial mean for the study period
region_mean = data.mean(dim=['latitude', 'longitude'])
period = slice(str(syear) + '-' + str(smonth) + '-01',
str(fyear) + '-' + str(fmonth) + '-01')
data_period = region_mean.sst.sel(time=period)
# Compare climatology to our period.
npclim = np.array(clim.sst)
headclim = npclim[(smonth - 1):12]
if (fyear - syear) == 0:
midclim = None
else:
midclim = np.repeat(npclim, fyear - syear - 1)
tailclim = npclim[0:fmonth]
if fyear == syear: # only one year
totclim = npclim[(smonth - 1):fmonth]
else:
if fmonth == 12: # last year is complete
totclim = np.concatenate((headclim, midclim))
if smonth == 1: # first year is complete
totclim = np.concatenate((midclim, tailclim))
if smonth == 1 & fmonth == 12:
totclim = midclim
else:
totclim = np.concatenate((headclim, midclim, tailclim))
# Compute anomaly
ano = data_period - totclim
return ano
| [
"datetime.datetime",
"os.path.exists",
"numpy.repeat",
"os.path.join",
"numpy.array",
"numpy.concatenate",
"xarray.open_dataset",
"os.path.expanduser"
] | [((1031, 1054), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1049, 1054), False, 'import os\n'), ((1100, 1129), 'os.path.join', 'os.path.join', (['home', 'file_name'], {}), '(home, file_name)\n', (1112, 1129), False, 'import os\n'), ((5338, 5361), 'xarray.open_dataset', 'xr.open_dataset', (['filein'], {}), '(filein)\n', (5353, 5361), True, 'import xarray as xr\n'), ((6801, 6824), 'xarray.open_dataset', 'xr.open_dataset', (['filein'], {}), '(filein)\n', (6816, 6824), True, 'import xarray as xr\n'), ((7160, 7178), 'numpy.array', 'np.array', (['clim.sst'], {}), '(clim.sst)\n', (7168, 7178), True, 'import numpy as np\n'), ((5406, 5428), 'os.path.exists', 'os.path.exists', (['filein'], {}), '(filein)\n', (5420, 5428), False, 'import os\n'), ((6579, 6614), 'datetime.datetime', 'datetime.datetime', (['syear', 'smonth', '(1)'], {}), '(syear, smonth, 1)\n', (6596, 6614), False, 'import datetime\n'), ((6618, 6653), 'datetime.datetime', 'datetime.datetime', (['fyear', 'fmonth', '(1)'], {}), '(fyear, fmonth, 1)\n', (6635, 6653), False, 'import datetime\n'), ((7303, 7339), 'numpy.repeat', 'np.repeat', (['npclim', '(fyear - syear - 1)'], {}), '(npclim, fyear - syear - 1)\n', (7312, 7339), True, 'import numpy as np\n'), ((7548, 7583), 'numpy.concatenate', 'np.concatenate', (['(headclim, midclim)'], {}), '((headclim, midclim))\n', (7562, 7583), True, 'import numpy as np\n'), ((7658, 7693), 'numpy.concatenate', 'np.concatenate', (['(midclim, tailclim)'], {}), '((midclim, tailclim))\n', (7672, 7693), True, 'import numpy as np\n'), ((7803, 7848), 'numpy.concatenate', 'np.concatenate', (['(headclim, midclim, tailclim)'], {}), '((headclim, midclim, tailclim))\n', (7817, 7848), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_volume_cluster_signal [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_volume_cluster_signal&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-signals-volume-clustering).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from arpym.tools import trade_quote_processing, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_volume_cluster_signal-parameters)
k_0 = 121 # index of the first trade within the time window
k_1 = 210 # index of the last trade within the time window
tau_hl = 5 # decay rate
w = 30 # trailing window
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_volume_cluster_signal-implementation-step00): Load data
# +
path = '../../../databases/global-databases/high-frequency/db_US_10yr_Future_quotestrades/'
quotes = pd.read_csv(path + 'quotes.csv', index_col=0, parse_dates=True)
trades = pd.read_csv(path + 'trades.csv', index_col=0, parse_dates=True)
dates_quotes = pd.to_datetime(quotes.index).date
# time vector of quotes
t = np.array(list(map(lambda x: x.timestamp(), pd.to_datetime(quotes.index))))
p_bid = np.array(quotes.loc[:, 'bid']) # best bids
p_ask = np.array(quotes.loc[:, 'ask']) # best asks
q_bid = np.array(quotes.loc[:, 'bsiz']) # bid sizes
q_ask = np.array(quotes.loc[:, 'asiz']) # ask sizes
dates_trades = pd.to_datetime(trades.index).date
# time vector of trades
t_k = np.array(list(map(lambda x: x.timestamp(),
pd.to_datetime(trades.index))))
p_last = np.array(trades.loc[:, 'price']) # last transaction values
delta_q = np.array(trades.loc[:, 'siz']) # flow of traded contracts' sizes
delta_sgn = np.array(trades.loc[:, 'aggress']) # trade sign flow
match = np.array(trades.loc[:, 'mtch']) # match events
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_volume_cluster_signal-implementation-step01): Process the database
t, _, q_ask, p_ask, q_bid, p_bid, t_k, _, p_last, delta_q, _,\
_ = trade_quote_processing(t, dates_quotes, q_ask, p_ask, q_bid,
p_bid, t_k, dates_trades, p_last, delta_q,
delta_sgn, match)
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_volume_cluster_signal-implementation-step02): Compute the traded price, the bid/ask prices, the bid/ask sizes and the microprice
# +
tick_time = np.arange(len(p_last[k_0:k_1+1]))
i_ = len(tick_time)
# last transaction value within the time window as a function of tick time
p_last_k = p_last[k_0:k_1+1] # traded price
# indexes of bid/ask prices near to the traded prices
ti = np.zeros(i_, dtype=int)
for i in range(i_):
ti[i] = np.where(t <= t_k[k_0+i])[0][-1]
p_ask = p_ask[ti] # ask price in tick time
p_bid = p_bid[ti] # bid price in tick time
q_bid = q_bid[ti]
q_ask = q_ask[ti]
# microprice in tick time
p_mic = (p_bid * q_ask+p_ask * q_bid) / (q_ask+q_bid)
p_mid = (p_bid + p_ask) / 2 # mid-price in tick time
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_volume_cluster_signal-implementation-step03): Compute the volume clustering signal
# +
s_vol_clus = np.zeros((i_,)) # initialization
nu = np.log(2) / tau_hl
gamma_w = 1 + sum(np.exp(-nu*np.arange(1, w,)))
s_vol_clus[0] = 1 / gamma_w*(delta_q[k_0] +
sum(np.exp((-nu) * np.arange(1, w,)) *
delta_q[k_0:k_0-(w - 1):-1]))
for i in range(i_):
s_vol_clus[i] = (1 - np.exp(-nu)) *\
delta_q[k_0 + i] +\
np.exp(-nu) * s_vol_clus[i-1]
# -
# ## Plots
# +
plt.style.use('arpm')
# colors
lgray = [0.8, 0.8, 0.8]
dgreen = [0, 0.6, 0]
orange = [0.94, 0.35, 0]
dred = [0.8, 0, 0.2]
t_dt = []
for i in t:
t_dt.append(datetime.fromtimestamp(i))
t_dt = np.array(t_dt)
# microprice, bid/ask price, bid/ask size, transaction value, mid-price
fig = plt.figure()
plt.subplot2grid((2, 1), (0, 0)) # axes settings
q_bid_res = p_bid-q_bid / 100000 # q_bid rescaled
q_ask_res = p_ask+q_ask / 100000 # q_ask rescaled
xtick = np.linspace(tick_time[0], tick_time[-1], 7, dtype=int)
plt.axis([np.min(tick_time), np.max(tick_time), 132.41, 132.53])
plt.xticks(xtick)
plt.yticks(np.arange(132.36, 132.53 + 0.02, 0.02))
plt.ylabel('Price')
plt.title('US 10 yr Future: {date}'.format(date=t_dt[0].strftime('%Y-%b-%d')))
plt.grid(True)
plt.plot(tick_time, q_bid_res, color=lgray)
p0 = plt.plot(tick_time, q_ask_res, color=lgray, label='bid/ask size')
p1 = plt.plot(tick_time, p_bid, color=dgreen, label='bid/ask price')
plt.plot(tick_time, p_ask, color=dgreen)
p3 = plt.plot([tick_time[:i_], tick_time[:i_]],
[p_last[k_0:k_1+1], p_last[k_0:k_1+1]], c='b', marker='.',
label='traded price')
p2 = plt.plot(tick_time, p_mic, color=orange, label='microprice')
plt.legend(handles=[p0[0], p1[0], p2[0], p3[0]])
# signal: exponential moving average of the traded volume with a fast decay
plt.subplot2grid((2, 1), (1, 0))
plt.axis([min(tick_time), max(tick_time), 0, 155])
plt.xticks(xtick)
plt.yticks(np.arange(0, 200, 50))
p4 = plt.plot(tick_time, delta_q[k_0:k_1+1], color='c', marker='.',
label='traded volume')
maxticktime = len(tick_time) - 1
p5 = plt.plot([tick_time[:maxticktime], tick_time[:maxticktime]],
[s_vol_clus[:maxticktime], s_vol_clus[:maxticktime]],
lw=1, color='k', marker='.', label='signal')
p6 = plt.plot(tick_time, np.tile(30, i_), color=dred,
label='increase order trigger')
plt.legend(handles=[p4[0], p5[0], p6[0]])
plt.ylabel('Volume')
plt.xlabel('Tick time')
plt.title('Signal: exponential moving average of the traded volume')
add_logo(fig, location=6)
plt.tight_layout()
| [
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.array",
"pandas.to_datetime",
"numpy.arange",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"numpy.max",
"numpy.exp",
"numpy.linspace",
"nump... | [((1309, 1372), 'pandas.read_csv', 'pd.read_csv', (["(path + 'quotes.csv')"], {'index_col': '(0)', 'parse_dates': '(True)'}), "(path + 'quotes.csv', index_col=0, parse_dates=True)\n", (1320, 1372), True, 'import pandas as pd\n'), ((1382, 1445), 'pandas.read_csv', 'pd.read_csv', (["(path + 'trades.csv')"], {'index_col': '(0)', 'parse_dates': '(True)'}), "(path + 'trades.csv', index_col=0, parse_dates=True)\n", (1393, 1445), True, 'import pandas as pd\n'), ((1607, 1637), 'numpy.array', 'np.array', (["quotes.loc[:, 'bid']"], {}), "(quotes.loc[:, 'bid'])\n", (1615, 1637), True, 'import numpy as np\n'), ((1659, 1689), 'numpy.array', 'np.array', (["quotes.loc[:, 'ask']"], {}), "(quotes.loc[:, 'ask'])\n", (1667, 1689), True, 'import numpy as np\n'), ((1711, 1742), 'numpy.array', 'np.array', (["quotes.loc[:, 'bsiz']"], {}), "(quotes.loc[:, 'bsiz'])\n", (1719, 1742), True, 'import numpy as np\n'), ((1764, 1795), 'numpy.array', 'np.array', (["quotes.loc[:, 'asiz']"], {}), "(quotes.loc[:, 'asiz'])\n", (1772, 1795), True, 'import numpy as np\n'), ((1997, 2029), 'numpy.array', 'np.array', (["trades.loc[:, 'price']"], {}), "(trades.loc[:, 'price'])\n", (2005, 2029), True, 'import numpy as np\n'), ((2067, 2097), 'numpy.array', 'np.array', (["trades.loc[:, 'siz']"], {}), "(trades.loc[:, 'siz'])\n", (2075, 2097), True, 'import numpy as np\n'), ((2145, 2179), 'numpy.array', 'np.array', (["trades.loc[:, 'aggress']"], {}), "(trades.loc[:, 'aggress'])\n", (2153, 2179), True, 'import numpy as np\n'), ((2207, 2238), 'numpy.array', 'np.array', (["trades.loc[:, 'mtch']"], {}), "(trades.loc[:, 'mtch'])\n", (2215, 2238), True, 'import numpy as np\n'), ((2465, 2590), 'arpym.tools.trade_quote_processing', 'trade_quote_processing', (['t', 'dates_quotes', 'q_ask', 'p_ask', 'q_bid', 'p_bid', 't_k', 'dates_trades', 'p_last', 'delta_q', 'delta_sgn', 'match'], {}), '(t, dates_quotes, q_ask, p_ask, q_bid, p_bid, t_k,\n dates_trades, p_last, delta_q, delta_sgn, match)\n', (2487, 2590), False, 'from arpym.tools import trade_quote_processing, add_logo\n'), ((3099, 3122), 'numpy.zeros', 'np.zeros', (['i_'], {'dtype': 'int'}), '(i_, dtype=int)\n', (3107, 3122), True, 'import numpy as np\n'), ((3616, 3631), 'numpy.zeros', 'np.zeros', (['(i_,)'], {}), '((i_,))\n', (3624, 3631), True, 'import numpy as np\n'), ((4078, 4099), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""arpm"""'], {}), "('arpm')\n", (4091, 4099), True, 'import matplotlib.pyplot as plt\n'), ((4273, 4287), 'numpy.array', 'np.array', (['t_dt'], {}), '(t_dt)\n', (4281, 4287), True, 'import numpy as np\n'), ((4367, 4379), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4377, 4379), True, 'import matplotlib.pyplot as plt\n'), ((4380, 4412), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 1)', '(0, 0)'], {}), '((2, 1), (0, 0))\n', (4396, 4412), True, 'import matplotlib.pyplot as plt\n'), ((4540, 4594), 'numpy.linspace', 'np.linspace', (['tick_time[0]', 'tick_time[-1]', '(7)'], {'dtype': 'int'}), '(tick_time[0], tick_time[-1], 7, dtype=int)\n', (4551, 4594), True, 'import numpy as np\n'), ((4661, 4678), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xtick'], {}), '(xtick)\n', (4671, 4678), True, 'import matplotlib.pyplot as plt\n'), ((4730, 4749), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (4740, 4749), True, 'import matplotlib.pyplot as plt\n'), ((4829, 4843), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4837, 4843), True, 'import matplotlib.pyplot as plt\n'), ((4844, 4887), 'matplotlib.pyplot.plot', 'plt.plot', (['tick_time', 'q_bid_res'], {'color': 'lgray'}), '(tick_time, q_bid_res, color=lgray)\n', (4852, 4887), True, 'import matplotlib.pyplot as plt\n'), ((4893, 4958), 'matplotlib.pyplot.plot', 'plt.plot', (['tick_time', 'q_ask_res'], {'color': 'lgray', 'label': '"""bid/ask size"""'}), "(tick_time, q_ask_res, color=lgray, label='bid/ask size')\n", (4901, 4958), True, 'import matplotlib.pyplot as plt\n'), ((4964, 5027), 'matplotlib.pyplot.plot', 'plt.plot', (['tick_time', 'p_bid'], {'color': 'dgreen', 'label': '"""bid/ask price"""'}), "(tick_time, p_bid, color=dgreen, label='bid/ask price')\n", (4972, 5027), True, 'import matplotlib.pyplot as plt\n'), ((5028, 5068), 'matplotlib.pyplot.plot', 'plt.plot', (['tick_time', 'p_ask'], {'color': 'dgreen'}), '(tick_time, p_ask, color=dgreen)\n', (5036, 5068), True, 'import matplotlib.pyplot as plt\n'), ((5074, 5206), 'matplotlib.pyplot.plot', 'plt.plot', (['[tick_time[:i_], tick_time[:i_]]', '[p_last[k_0:k_1 + 1], p_last[k_0:k_1 + 1]]'], {'c': '"""b"""', 'marker': '"""."""', 'label': '"""traded price"""'}), "([tick_time[:i_], tick_time[:i_]], [p_last[k_0:k_1 + 1], p_last[k_0\n :k_1 + 1]], c='b', marker='.', label='traded price')\n", (5082, 5206), True, 'import matplotlib.pyplot as plt\n'), ((5231, 5291), 'matplotlib.pyplot.plot', 'plt.plot', (['tick_time', 'p_mic'], {'color': 'orange', 'label': '"""microprice"""'}), "(tick_time, p_mic, color=orange, label='microprice')\n", (5239, 5291), True, 'import matplotlib.pyplot as plt\n'), ((5292, 5340), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[p0[0], p1[0], p2[0], p3[0]]'}), '(handles=[p0[0], p1[0], p2[0], p3[0]])\n', (5302, 5340), True, 'import matplotlib.pyplot as plt\n'), ((5418, 5450), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 1)', '(1, 0)'], {}), '((2, 1), (1, 0))\n', (5434, 5450), True, 'import matplotlib.pyplot as plt\n'), ((5502, 5519), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xtick'], {}), '(xtick)\n', (5512, 5519), True, 'import matplotlib.pyplot as plt\n'), ((5560, 5652), 'matplotlib.pyplot.plot', 'plt.plot', (['tick_time', 'delta_q[k_0:k_1 + 1]'], {'color': '"""c"""', 'marker': '"""."""', 'label': '"""traded volume"""'}), "(tick_time, delta_q[k_0:k_1 + 1], color='c', marker='.', label=\n 'traded volume')\n", (5568, 5652), True, 'import matplotlib.pyplot as plt\n'), ((5698, 5866), 'matplotlib.pyplot.plot', 'plt.plot', (['[tick_time[:maxticktime], tick_time[:maxticktime]]', '[s_vol_clus[:maxticktime], s_vol_clus[:maxticktime]]'], {'lw': '(1)', 'color': '"""k"""', 'marker': '"""."""', 'label': '"""signal"""'}), "([tick_time[:maxticktime], tick_time[:maxticktime]], [s_vol_clus[:\n maxticktime], s_vol_clus[:maxticktime]], lw=1, color='k', marker='.',\n label='signal')\n", (5706, 5866), True, 'import matplotlib.pyplot as plt\n'), ((5988, 6029), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[p4[0], p5[0], p6[0]]'}), '(handles=[p4[0], p5[0], p6[0]])\n', (5998, 6029), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6050), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Volume"""'], {}), "('Volume')\n", (6040, 6050), True, 'import matplotlib.pyplot as plt\n'), ((6051, 6074), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tick time"""'], {}), "('Tick time')\n", (6061, 6074), True, 'import matplotlib.pyplot as plt\n'), ((6075, 6143), 'matplotlib.pyplot.title', 'plt.title', (['"""Signal: exponential moving average of the traded volume"""'], {}), "('Signal: exponential moving average of the traded volume')\n", (6084, 6143), True, 'import matplotlib.pyplot as plt\n'), ((6144, 6169), 'arpym.tools.add_logo', 'add_logo', (['fig'], {'location': '(6)'}), '(fig, location=6)\n', (6152, 6169), False, 'from arpym.tools import trade_quote_processing, add_logo\n'), ((6170, 6188), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6186, 6188), True, 'import matplotlib.pyplot as plt\n'), ((1462, 1490), 'pandas.to_datetime', 'pd.to_datetime', (['quotes.index'], {}), '(quotes.index)\n', (1476, 1490), True, 'import pandas as pd\n'), ((1825, 1853), 'pandas.to_datetime', 'pd.to_datetime', (['trades.index'], {}), '(trades.index)\n', (1839, 1853), True, 'import pandas as pd\n'), ((3655, 3664), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3661, 3664), True, 'import numpy as np\n'), ((4690, 4728), 'numpy.arange', 'np.arange', (['(132.36)', '(132.53 + 0.02)', '(0.02)'], {}), '(132.36, 132.53 + 0.02, 0.02)\n', (4699, 4728), True, 'import numpy as np\n'), ((5531, 5552), 'numpy.arange', 'np.arange', (['(0)', '(200)', '(50)'], {}), '(0, 200, 50)\n', (5540, 5552), True, 'import numpy as np\n'), ((5912, 5927), 'numpy.tile', 'np.tile', (['(30)', 'i_'], {}), '(30, i_)\n', (5919, 5927), True, 'import numpy as np\n'), ((4239, 4264), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['i'], {}), '(i)\n', (4261, 4264), False, 'from datetime import datetime\n'), ((4606, 4623), 'numpy.min', 'np.min', (['tick_time'], {}), '(tick_time)\n', (4612, 4623), True, 'import numpy as np\n'), ((4625, 4642), 'numpy.max', 'np.max', (['tick_time'], {}), '(tick_time)\n', (4631, 4642), True, 'import numpy as np\n'), ((1567, 1595), 'pandas.to_datetime', 'pd.to_datetime', (['quotes.index'], {}), '(quotes.index)\n', (1581, 1595), True, 'import pandas as pd\n'), ((1956, 1984), 'pandas.to_datetime', 'pd.to_datetime', (['trades.index'], {}), '(trades.index)\n', (1970, 1984), True, 'import pandas as pd\n'), ((3155, 3182), 'numpy.where', 'np.where', (['(t <= t_k[k_0 + i])'], {}), '(t <= t_k[k_0 + i])\n', (3163, 3182), True, 'import numpy as np\n'), ((4027, 4038), 'numpy.exp', 'np.exp', (['(-nu)'], {}), '(-nu)\n', (4033, 4038), True, 'import numpy as np\n'), ((3703, 3718), 'numpy.arange', 'np.arange', (['(1)', 'w'], {}), '(1, w)\n', (3712, 3718), True, 'import numpy as np\n'), ((3943, 3954), 'numpy.exp', 'np.exp', (['(-nu)'], {}), '(-nu)\n', (3949, 3954), True, 'import numpy as np\n'), ((3814, 3829), 'numpy.arange', 'np.arange', (['(1)', 'w'], {}), '(1, w)\n', (3823, 3829), True, 'import numpy as np\n')] |
"""
Micro Code 7.0.0
Author <NAME>
"""
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
import numpy as np
import base64
import zlib
import cv2
class MicroCode:
def compress(self,data):
data_ = data.encode()
data_ = zlib.compress(data_)
if len(data_) < len(data):
final = data_.decode(encoding='latin1')
else:
final = data
return final
def de_compress(self,data):
try:
data_ = data.encode(encoding='latin1')
data_ = zlib.decompress(data_)
return data_.decode()
except:
return data
def encrypt_(self,data,password):
f = Fernet(password)
return f.encrypt(data)
def encrypt(self,data,password,diff):
key = self.password_to_key(password)
data = data.encode()
for e in range(0,diff):
data = self.encrypt_(data,key)
return data.decode()
def decrypt_(self,data,password):
f = Fernet(password)
return f.decrypt(data)
def decrypt(self,data,password,diff):
key = self.password_to_key(password)
data = data.encode()
try:
for e in range(0, diff):
data = self.decrypt_(data, key)
data = data.decode()
except:
data = False
return data
def password_to_key(self, password):
salt = b'.-Kh)ura/)\xcef\xc8\x88u\xc2'
password = password.encode()
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=default_backend())
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def read(self, file_name, password, mode="normal", start_pos=(0, 0), dimed_brightness=0, brightness=0, difficulty=1):
pos_x = start_pos[0]
pos_y = start_pos[1]
if mode == "normal":
data = ""
try:
file = cv2.imread(file_name)
file = np.array(file)
file = file.tolist()
except:
return False,data
ascii_list = []
pos_cal_x = 0
pos_cal_y = 0
for each in file:
if pos_cal_y == pos_y:
for each_ in each:
if pos_cal_x == pos_x:
for each__ in each_:
if each__ == 0:
pass
else:
ascii_list.append(each__+dimed_brightness-brightness)
else:
pos_cal_x+=1
else:
pos_cal_y+=1
data = self.convert_ascii_list_to_chr(ascii_list)
data = self.decrypt(data,password,difficulty)
data = self.de_compress(data)
return data
else:
data = ""
try:
file = cv2.imread(file_name)
file = np.array(file)
file = file.tolist()
except:
return False,data
ascii_list = []
pos_cal_x = 0
pos_cal_y = 0
for each in file:
if pos_cal_y == pos_y:
for each_ in each:
if pos_cal_x == pos_x:
for each__ in each_:
if each__ == 0:
pass
else:
ascii_list.append(each__+dimed_brightness-brightness)
else:
pos_cal_x+=1
else:
pos_cal_y+=1
data = self.convert_ascii_list_to_chr(ascii_list)
data = self.decrypt(data,password,difficulty)
data = self.de_compress(data)
return data
def to_ascii(self,raw_data):
pre_final = []
for each in raw_data:
pre_final.append(ord(each))
return pre_final
def convert_ascii_list_to_chr(self,ascii_list):
final = []
for each in ascii_list:
if each == 0:
pass
else:
a = str(chr(int(each)))
final.append(a)
final_ = ""
for each in final:
final_+=each
return final_
def list_to_str_ascii(self,list_):
final_data = ""
for each in list_:
len_of_char = len(str(each))
if len_of_char == 3:
final_data += str(each)
else:
if len_of_char == 0:
pass
elif len_of_char == 1:
final_data += "00" + str(each)
elif len_of_char == 2:
final_data += "0" + str(each)
return final_data
def inversed_pattern(self,raw):
final = {}
raw = dict(raw)
keys = raw.keys()
values = raw.values()
c = 0
keys = list(keys)
for each in values:
final.setdefault(str(each), str(keys[c]))
c += 1
return final
def write(self,file_name,data_,password,mode="normal",size=(200,200),start_pos=(0,0),dim_brightness=0,brightness=0,vertical_spacing=0,spacing=0,difficulty=1):
x_size = size[0]
y_size = size[1]
pos_x = start_pos[0]
pos_y = start_pos[1]
data_ = self.compress(data_)
if mode == "normal":
micro_code = np.zeros((x_size, y_size, 3), dtype=np.uint8)
data = self.encrypt(data_,password,difficulty)
data = self.to_ascii(data)
pointer = 0
pointer_ = 0+pos_x
pointer__ = 0+pos_y
for each in data:
each = int(each)
pointer_ = pointer_+spacing
micro_code[pointer__][pointer_][pointer] = each-dim_brightness+brightness
if pointer == 2:
pointer=0
pointer_+=1
else:
pointer+=1
if pointer_ == x_size:
pointer_ = 0
pointer__+=1
if pointer__ == y_size-1:
break
cv2.imwrite(file_name,micro_code)
else:
micro_code = np.zeros((x_size,y_size, 3), dtype=np.uint8)
data = self.encrypt(data_,password,difficulty)
data = self.to_ascii(data)
pointer = 0
pointer_ = 0+pos_x
pointer__ = 0+pos_y
for each in data:
each = int(each)
if pointer_+spacing < x_size:
pointer_=pointer_+spacing
pointer__ = pointer__ + vertical_spacing
micro_code[pointer__][pointer_][pointer] = each-dim_brightness+brightness
pointer_+=1
if pointer_ == x_size:
pointer_ = 0
pointer__+=1
if pointer__ == y_size-1:
break
cv2.imwrite(file_name,micro_code)
if __name__ == '__main__':
mc = MicroCode()
mc.write("Hello_world_1.png","hello","hi") #writing microcode
print(mc.read("Hello_world_1.png","hi"))#reading microcode
| [
"cv2.imwrite",
"zlib.compress",
"cryptography.hazmat.primitives.hashes.SHA256",
"cryptography.fernet.Fernet",
"numpy.zeros",
"cryptography.hazmat.backends.default_backend",
"numpy.array",
"cv2.imread",
"zlib.decompress"
] | [((402, 422), 'zlib.compress', 'zlib.compress', (['data_'], {}), '(data_)\n', (415, 422), False, 'import zlib\n'), ((833, 849), 'cryptography.fernet.Fernet', 'Fernet', (['password'], {}), '(password)\n', (839, 849), False, 'from cryptography.fernet import Fernet\n'), ((1151, 1167), 'cryptography.fernet.Fernet', 'Fernet', (['password'], {}), '(password)\n', (1157, 1167), False, 'from cryptography.fernet import Fernet\n'), ((686, 708), 'zlib.decompress', 'zlib.decompress', (['data_'], {}), '(data_)\n', (701, 708), False, 'import zlib\n'), ((5741, 5786), 'numpy.zeros', 'np.zeros', (['(x_size, y_size, 3)'], {'dtype': 'np.uint8'}), '((x_size, y_size, 3), dtype=np.uint8)\n', (5749, 5786), True, 'import numpy as np\n'), ((6502, 6536), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'micro_code'], {}), '(file_name, micro_code)\n', (6513, 6536), False, 'import cv2\n'), ((6576, 6621), 'numpy.zeros', 'np.zeros', (['(x_size, y_size, 3)'], {'dtype': 'np.uint8'}), '((x_size, y_size, 3), dtype=np.uint8)\n', (6584, 6621), True, 'import numpy as np\n'), ((7325, 7359), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'micro_code'], {}), '(file_name, micro_code)\n', (7336, 7359), False, 'import cv2\n'), ((1667, 1682), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (1680, 1682), False, 'from cryptography.hazmat.primitives import hashes\n'), ((1733, 1750), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (1748, 1750), False, 'from cryptography.hazmat.backends import default_backend\n'), ((2103, 2124), 'cv2.imread', 'cv2.imread', (['file_name'], {}), '(file_name)\n', (2113, 2124), False, 'import cv2\n'), ((2148, 2162), 'numpy.array', 'np.array', (['file'], {}), '(file)\n', (2156, 2162), True, 'import numpy as np\n'), ((3143, 3164), 'cv2.imread', 'cv2.imread', (['file_name'], {}), '(file_name)\n', (3153, 3164), False, 'import cv2\n'), ((3188, 3202), 'numpy.array', 'np.array', (['file'], {}), '(file)\n', (3196, 3202), True, 'import numpy as np\n')] |
import os
import sys
import random
import datetime
import time
import shutil
import numpy as np
import pandas as pd
import scipy.io
import scipy.signal
import math
from skimage.measure import compare_ssim as sk_ssim
import torch
from torch import nn
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def calc_psnr(output, target):
psnr = 0.
mse = nn.MSELoss()(output, target)
psnr = 10 * math.log10(torch.max(output)/mse)
return psnr
def calc_ssim(output, target):
ssim = 0.
output = output.cpu().detach().numpy()
target = target.cpu().detach().numpy()
if output.ndim == 4:
for i in range(output.shape[0]):
output_i = np.squeeze(output[i,:,:,:])
output_i = np.moveaxis(output_i, 0, -1)
target_i = np.squeeze(target[i,:,:,:])
target_i = np.moveaxis(target_i, 0, -1)
batch_size = output.shape[0]
ssim += sk_ssim(output_i, target_i, data_range = output_i.max() - target_i.max(), multichannel=True)
else:
output_i = np.squeeze(output)
output_i = np.moveaxis(output_i, 0, -1)
target_i = np.squeeze(target)
target_i = np.moveaxis(target_i, 0, -1)
batch_size = 1
ssim += sk_ssim(output_i, target_i, data_range = output_i.max() - target_i.max(), multichannel=True)
ssim = ssim / batch_size
return ssim | [
"numpy.moveaxis",
"torch.nn.MSELoss",
"torch.max",
"numpy.squeeze"
] | [((1500, 1512), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1510, 1512), False, 'from torch import nn\n'), ((2205, 2223), 'numpy.squeeze', 'np.squeeze', (['output'], {}), '(output)\n', (2215, 2223), True, 'import numpy as np\n'), ((2244, 2272), 'numpy.moveaxis', 'np.moveaxis', (['output_i', '(0)', '(-1)'], {}), '(output_i, 0, -1)\n', (2255, 2272), True, 'import numpy as np\n'), ((2293, 2311), 'numpy.squeeze', 'np.squeeze', (['target'], {}), '(target)\n', (2303, 2311), True, 'import numpy as np\n'), ((2332, 2360), 'numpy.moveaxis', 'np.moveaxis', (['target_i', '(0)', '(-1)'], {}), '(target_i, 0, -1)\n', (2343, 2360), True, 'import numpy as np\n'), ((1832, 1862), 'numpy.squeeze', 'np.squeeze', (['output[i, :, :, :]'], {}), '(output[i, :, :, :])\n', (1842, 1862), True, 'import numpy as np\n'), ((1884, 1912), 'numpy.moveaxis', 'np.moveaxis', (['output_i', '(0)', '(-1)'], {}), '(output_i, 0, -1)\n', (1895, 1912), True, 'import numpy as np\n'), ((1937, 1967), 'numpy.squeeze', 'np.squeeze', (['target[i, :, :, :]'], {}), '(target[i, :, :, :])\n', (1947, 1967), True, 'import numpy as np\n'), ((1989, 2017), 'numpy.moveaxis', 'np.moveaxis', (['target_i', '(0)', '(-1)'], {}), '(target_i, 0, -1)\n', (2000, 2017), True, 'import numpy as np\n'), ((1557, 1574), 'torch.max', 'torch.max', (['output'], {}), '(output)\n', (1566, 1574), False, 'import torch\n')] |
from datetime import datetime
import os.path
import time
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
from six.moves import xrange
import tensorflow as tf
from CIFAR import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string( 'train_dir', '../cifar10_data', """Directory where to write event log""" )
tf.app.flags.DEFINE_integer( 'max_steps', 1000000, """Number of batches to run.""" )
tf.app.flags.DEFINE_boolean( 'log_device_placement', False, """Whether to log device placement""" )
def train():
with tf.Graph().as_default():
global_step = tf.Variable( 0, trainable = False )
images, labels = cifar10.distorted_inputs()
logits = cifar10.inference( images )
loss = cifar10.loss( logits, labels )
train_op - cifar10.train( loss, gloal_step )
saver = tf.train.Saver( tf.all_variables() )
summary_op = tf.summary.merge_all()
init = tf.initialize_all_variables()
sess = tf.Session( config = tf.ConfigProto( log_device_placement = FLAGS.log_device_placement ) )
sess.run( init )
tf.train.start_queue_runners( sess = sess )
summary_writer = tf.train.SumnarWriter( FLAGS.train_dir, graph_def = sess.graph_def )
for step in xrange( FLAGS.max_steps ):
start_time = time.time()
_, loss_value = sess.run( [train_op, loss] )
duration = time.time() - start_time
assert not np.isnan( loss_value ), 'Model divarged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float( duration )
format_str = ( '%s, step %d, loss = %.2f ( %.1f examples / sec; %.3f sec / batch' )
print( format_str % ( datetime.now(), step, loss_value, examples_per_sec, sec_per_batch ) )
if step % 100 == 0:
summary_str = sess.run( summary_op )
summary_write.add_summary( summary_str, step )
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join( FLAGS.train_dir, 'model.ckpt' )
saver.save( sess, checkpoint_path, global_step = step )
def main( argv = None ):
cifar10.maybe_download_and_extract()
if gfile.Exists( FLAGS.train_dir ):
gfile.DeleteRecursively( FLAGS.train_dir )
gfile.MakeDirs( FLAGS.train_dir )
train()
if __name__ == '__main__':
tf.app.run()
| [
"CIFAR.cifar10.distorted_inputs",
"six.moves.xrange",
"tensorflow.app.run",
"tensorflow.Graph",
"tensorflow.python.platform.gfile.MakeDirs",
"tensorflow.train.SumnarWriter",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.ConfigProto",
"tensorflow.python.platform.gfile.DeleteRecursively",
"tens... | [((266, 366), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_dir"""', '"""../cifar10_data"""', '"""Directory where to write event log"""'], {}), "('train_dir', '../cifar10_data',\n 'Directory where to write event log')\n", (292, 366), True, 'import tensorflow as tf\n'), ((369, 447), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_steps"""', '(1000000)', '"""Number of batches to run."""'], {}), "('max_steps', 1000000, 'Number of batches to run.')\n", (396, 447), True, 'import tensorflow as tf\n'), ((454, 551), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Whether to log device placement"""'], {}), "('log_device_placement', False,\n 'Whether to log device placement')\n", (481, 551), True, 'import tensorflow as tf\n'), ((624, 655), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (635, 655), True, 'import tensorflow as tf\n'), ((686, 712), 'CIFAR.cifar10.distorted_inputs', 'cifar10.distorted_inputs', ([], {}), '()\n', (710, 712), False, 'from CIFAR import cifar10\n'), ((731, 756), 'CIFAR.cifar10.inference', 'cifar10.inference', (['images'], {}), '(images)\n', (748, 756), False, 'from CIFAR import cifar10\n'), ((775, 803), 'CIFAR.cifar10.loss', 'cifar10.loss', (['logits', 'labels'], {}), '(logits, labels)\n', (787, 803), False, 'from CIFAR import cifar10\n'), ((936, 958), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (956, 958), True, 'import tensorflow as tf\n'), ((975, 1004), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1002, 1004), True, 'import tensorflow as tf\n'), ((1146, 1185), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (1174, 1185), True, 'import tensorflow as tf\n'), ((1216, 1280), 'tensorflow.train.SumnarWriter', 'tf.train.SumnarWriter', (['FLAGS.train_dir'], {'graph_def': 'sess.graph_def'}), '(FLAGS.train_dir, graph_def=sess.graph_def)\n', (1237, 1280), True, 'import tensorflow as tf\n'), ((1305, 1328), 'six.moves.xrange', 'xrange', (['FLAGS.max_steps'], {}), '(FLAGS.max_steps)\n', (1311, 1328), False, 'from six.moves import xrange\n'), ((2377, 2413), 'CIFAR.cifar10.maybe_download_and_extract', 'cifar10.maybe_download_and_extract', ([], {}), '()\n', (2411, 2413), False, 'from CIFAR import cifar10\n'), ((2425, 2454), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (2437, 2454), False, 'from tensorflow.python.platform import gfile\n'), ((2521, 2552), 'tensorflow.python.platform.gfile.MakeDirs', 'gfile.MakeDirs', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (2535, 2552), False, 'from tensorflow.python.platform import gfile\n'), ((2611, 2623), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2621, 2623), True, 'import tensorflow as tf\n'), ((826, 857), 'CIFAR.cifar10.train', 'cifar10.train', (['loss', 'gloal_step'], {}), '(loss, gloal_step)\n', (839, 857), False, 'from CIFAR import cifar10\n'), ((893, 911), 'tensorflow.all_variables', 'tf.all_variables', ([], {}), '()\n', (909, 911), True, 'import tensorflow as tf\n'), ((1357, 1368), 'time.time', 'time.time', ([], {}), '()\n', (1366, 1368), False, 'import time\n'), ((2470, 2510), 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (2493, 2510), False, 'from tensorflow.python.platform import gfile\n'), ((577, 587), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (585, 587), True, 'import tensorflow as tf\n'), ((1042, 1105), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': 'FLAGS.log_device_placement'}), '(log_device_placement=FLAGS.log_device_placement)\n', (1056, 1105), True, 'import tensorflow as tf\n'), ((1449, 1460), 'time.time', 'time.time', ([], {}), '()\n', (1458, 1460), False, 'import time\n'), ((1498, 1518), 'numpy.isnan', 'np.isnan', (['loss_value'], {}), '(loss_value)\n', (1506, 1518), True, 'import numpy as np\n'), ((1901, 1915), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1913, 1915), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
from sys import exit
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from capture_core import *
# 使用matplotlib绘制柱状图
import numpy as np
import matplotlib.pyplot as plt
import json
from monitor_system import start_monitor
from forged_packet import startForged
from multiprocessing import Process
class Ui_MainWindow(QMainWindow):
core = None
timer = None
Monitor = None
Forged = None
def setupUi(self):
self.setWindowTitle("WireWhale")
self.resize(950, 580)
#设置程序图标
icon = QIcon()
icon.addPixmap(QPixmap("img/shark.jpg"), QIcon.Normal, QIcon.Off)
self.setWindowIcon(icon)
self.setIconSize(QSize(20, 20))
#中间布局,设为透明
self.centralWidget = QWidget(self)
self.centralWidget.setStyleSheet("background:transparent;")
#栅栏布局,使得窗口自适应
self.gridLayout = QGridLayout(self.centralWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(6)
#顶部控件布局
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setContentsMargins(10, 2, 10, 1)
self.horizontalLayout.setSpacing(20)
#三个显示区布局
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setContentsMargins(10, 0, 3, 10)
self.verticalLayout.setSpacing(6)
# 初始主窗口字体
font = QFont()
with open('data.json', 'r') as file_obj:
'''读取json文件'''
old_font = json.load(file_obj) # 返回列表数据,也支持字典
if old_font["font"]:
font.setFamily(old_font["font"])
font.setPointSize(int(old_font["size"]))
else:
if platform == 'Windows':
font.setFamily("Lucida Sans Typewriter")
old_font["font"] = "Lucida Sans Typewriter"
if platform == "Linux":
font.setFamily("Noto Mono")
old_font["font"] = "Noto Mono"
font.setPointSize(11)
with open('data.json', 'w') as file_obj:
'''写入json文件'''
json.dump(old_font, file_obj)
#数据包显示框
self.info_tree = QTreeWidget(self.centralWidget)
self.info_tree.setFrameStyle(QFrame.Box | QFrame.Plain)
self.info_tree.setAutoScroll(True)
self.info_tree.setRootIsDecorated(False)
self.info_tree.setFont(font)
self.info_tree.setColumnCount(7) #设置表格为7列
#固定行高,取消每次刷新所有行,避免更新数据时不流畅
self.info_tree.setUniformRowHeights(True)
#设置表头
self.info_tree.headerItem().setText(0, "No.")
self.info_tree.headerItem().setText(1, "Time")
self.info_tree.headerItem().setText(2, "Source")
self.info_tree.headerItem().setText(3, "Destination")
self.info_tree.headerItem().setText(4, "Protocol")
self.info_tree.headerItem().setText(5, "Length")
self.info_tree.headerItem().setText(6, "Info")
self.info_tree.setStyleSheet("background:transparent;")
self.info_tree.setSortingEnabled(True)
self.info_tree.sortItems(0, Qt.AscendingOrder)
self.info_tree.setColumnWidth(0, 75)
self.info_tree.setColumnWidth(1, 130)
self.info_tree.setColumnWidth(2, 150)
self.info_tree.setColumnWidth(3, 150)
self.info_tree.setColumnWidth(4, 85)
self.info_tree.setColumnWidth(5, 60)
for i in range(7):
self.info_tree.headerItem().setBackground(i,
QBrush(QColor(Qt.white)))
self.info_tree.setSelectionBehavior(
QTreeWidget.SelectRows) #设置选中时为整行选中
self.info_tree.setSelectionMode(QTreeWidget.SingleSelection) #设置只能选中一行
"""显示排序图标"""
self.info_tree.header().setSortIndicatorShown(True)
self.info_tree.clicked.connect(self.on_tableview_clicked)
#数据包详细内容显示框
self.treeWidget = QTreeWidget(self.centralWidget)
self.treeWidget.setAutoScroll(True)
self.treeWidget.setTextElideMode(Qt.ElideMiddle)
self.treeWidget.header().setStretchLastSection(True)
self.treeWidget.setStyleSheet("background:transparent; color:white;")
self.treeWidget.header().hide()
self.treeWidget.setFont(font)
# 设为只有一列
self.treeWidget.setColumnCount(1)
self.treeWidget.setFrameStyle(QFrame.Box | QFrame.Plain)
#hex显示区域
self.hexBrowser = QTextBrowser(self.centralWidget)
self.hexBrowser.setText("")
self.hexBrowser.setFont(font)
self.hexBrowser.setStyleSheet("background:transparent; color:white;")
self.hexBrowser.setFrameStyle(QFrame.Box | QFrame.Plain)
# 允许用户通过拖动三个显示框的边界来控制子组件的大小
self.splitter = QSplitter(Qt.Vertical)
self.splitter.addWidget(self.info_tree)
self.splitter.addWidget(self.treeWidget)
self.splitter.addWidget(self.hexBrowser)
self.verticalLayout.addWidget(self.splitter)
self.gridLayout.addLayout(self.verticalLayout, 1, 0, 1, 1)
#过滤器输入框
self.Filter = QLineEdit(self.centralWidget)
self.Filter.setPlaceholderText("Apply a capture filter … ")
self.Filter.setStyleSheet("background:white")
self.Filter.setFont(font)
self.horizontalLayout.addWidget(self.Filter)
#过滤器按钮
self.FilterButton = QPushButton(self.centralWidget)
self.FilterButton.setText("开始")
icon1 = QIcon()
icon1.addPixmap(QPixmap("img/go.png"), QIcon.Normal, QIcon.Off)
self.FilterButton.setIcon(icon1)
self.FilterButton.setIconSize(QSize(20, 20))
self.FilterButton.setStyleSheet("background:white")
self.FilterButton.clicked.connect(self.on_start_action_clicked)
self.horizontalLayout.addWidget(self.FilterButton)
"""
网卡选择框
"""
self.choose_nicbox = QComboBox(self.centralWidget)
self.choose_nicbox.setFont(font)
self.choose_nicbox.setStyleSheet("background:white; color:black;")
self.horizontalLayout.addWidget(self.choose_nicbox)
self.horizontalLayout.setStretch(0, 8)
self.horizontalLayout.setStretch(1, 1)
self.horizontalLayout.setStretch(2, 4)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
"""初始网卡复选框"""
row_num = len(keys)
self.choose_nicbox.addItem("All")
for i in range(row_num):
self.choose_nicbox.addItem(keys[i])
self.setCentralWidget(self.centralWidget)
"""
顶部菜单栏
"""
self.menuBar = QMenuBar(self)
self.menuBar.setGeometry(QRect(0, 0, 953, 23))
self.menuBar.setAccessibleName("")
self.menuBar.setDefaultUp(True)
self.menu_F = QMenu(self.menuBar)
self.menu_F.setTitle("文件(F)")
self.edit_menu = QMenu(self.menuBar)
self.edit_menu.setTitle("编辑(E)")
self.capture_menu = QMenu(self.menuBar)
self.capture_menu.setTitle("捕获(C)")
self.menu_H = QMenu(self.menuBar)
self.menu_H.setTitle("帮助(H)")
self.menu_Analysis = QMenu(self.menuBar)
self.menu_Analysis.setTitle("分析(A)")
self.menu_Statistic = QMenu(self.menuBar)
self.menu_Statistic.setTitle("统计(S)")
self.setMenuBar(self.menuBar)
#顶部工具栏
self.mainToolBar = QToolBar(self)
self.addToolBar(Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QStatusBar(self)
self.mainToolBar.setStyleSheet("background: #EDEDED;")
self.mainToolBar.setMaximumHeight(25)
self.setStatusBar(self.statusBar)
#字体设置键
font_set = QAction(self)
font_set.setText("主窗口字体")
font_set.triggered.connect(self.on_font_set_clicked)
#背景图片设置
change_border = QAction(self)
change_border.setText("背景图片")
change_border.triggered.connect(self.on_change_border_clicked)
#开始键
self.start_action = QAction(self)
icon2 = QIcon()
icon2.addPixmap(QPixmap("img/start.png"), QIcon.Normal, QIcon.Off)
self.start_action.setIcon(icon2)
self.start_action.setText("开始")
self.start_action.setShortcut('F1')
self.start_action.triggered.connect(self.on_start_action_clicked)
#停止键
self.stop_action = QAction(self)
icon3 = QIcon()
icon3.addPixmap(QPixmap("img/stop.png"), QIcon.Normal, QIcon.Off)
self.stop_action.setIcon(icon3)
self.stop_action.setText("停止")
self.stop_action.setShortcut('F3')
self.stop_action.setDisabled(True) #开始时该按钮不可点击
self.stop_action.triggered.connect(self.on_stop_action_clicked)
#暂停键
self.pause_action = QAction(self)
p_icon = QIcon()
p_icon.addPixmap(QPixmap("img/pause.png"), QIcon.Normal, QIcon.Off)
self.pause_action.setIcon(p_icon)
self.pause_action.setText("暂停")
self.pause_action.setShortcut('F2')
self.pause_action.setDisabled(True) # 开始时该按钮不可点击
self.pause_action.triggered.connect(self.on_pause_action_clicked)
#重新开始键
self.actionRestart = QAction(self)
icon4 = QIcon()
icon4.addPixmap(QPixmap("img/restart.png"), QIcon.Normal, QIcon.Off)
self.actionRestart.setIcon(icon4)
self.actionRestart.setText("重新开始")
self.actionRestart.setShortcut('F4')
self.actionRestart.setDisabled(True) # 开始时该按钮不可点击
self.actionRestart.triggered.connect(self.on_actionRestart_clicked)
#更新数据键
self.action_update = QAction(self)
icon5 = QIcon()
icon5.addPixmap(QPixmap("img/update.png"), QIcon.Normal, QIcon.Off)
self.action_update.setIcon(icon5)
self.action_update.setText("继续更新")
self.action_update.setShortcut('F5')
self.action_update.setDisabled(True)
self.action_update.triggered.connect(
lambda: self.timer.start(flush_time) and self.action_update.setDisabled(True)
)
#帮助文档
action_readme = QAction(self)
action_readme.setText("使用文档")
action_about = QAction(self)
action_about.setText("关于")
action_about.triggered.connect(self.on_action_about_clicked)
#打开文件键
action_openfile = QAction(self)
action_openfile.setText("打开")
action_openfile.setShortcut("ctrl+O")
action_openfile.triggered.connect(self.on_action_openfile_clicked)
#保存文件键
action_savefile = QAction(self)
action_savefile.setText("保存")
action_savefile.setShortcut("ctrl+S")
action_savefile.triggered.connect(self.on_action_savefile_clicked)
#退出键
self.action_exit = QAction(self)
self.action_exit.setCheckable(False)
self.action_exit.setText("退出")
self.action_exit.triggered.connect(self.on_action_exit_clicked)
self.action_exit.setShortcut('ctrl+Q')
self.action_exit.setStatusTip('退出应用程序')
#构造包
self.forged_action = QAction(self)
self.forged_action.setText("伪造包")
self.forged_action.setShortcut('F7')
self.forged_action.triggered.connect(self.forged_action_clicked)
#流量监测
self.action_track = QAction(self)
self.action_track.setText("流量监测")
self.action_track.setShortcut('F6')
self.action_track.triggered.connect(self.on_action_track_clicked)
#IP地址类型统计图
self.IP_statistics = QAction(self)
self.IP_statistics.setText("IP地址类型统计")
self.IP_statistics.triggered.connect(self.on_IP_statistics_clicked)
#报文类型统计图
self.message_statistics = QAction(self)
self.message_statistics.setText("报文类型统计")
self.message_statistics.triggered.connect(
self.on_message_statistics_clicked)
"""
添加工具栏:开始,暂停,停止,重新开始
"""
self.mainToolBar.addAction(self.start_action)
self.mainToolBar.addAction(self.pause_action)
self.mainToolBar.addAction(self.stop_action)
self.mainToolBar.addAction(self.actionRestart)
self.mainToolBar.addAction(self.action_update)
self.menu_F.addAction(action_openfile)
self.menu_F.addAction(action_savefile)
self.menu_F.addAction(self.action_exit)
self.menu_F.showFullScreen()
self.edit_menu.addAction(font_set)
self.edit_menu.addAction(change_border)
#捕获菜单栏添加子菜单
self.capture_menu.addAction(self.start_action)
self.capture_menu.addAction(self.pause_action)
self.capture_menu.addAction(self.stop_action)
self.capture_menu.addAction(self.actionRestart)
self.menu_H.addAction(action_readme)
self.menu_H.addAction(action_about)
self.menu_Analysis.addAction(self.forged_action)
self.menu_Analysis.addAction(self.action_track)
self.menu_Statistic.addAction(self.IP_statistics)
self.menu_Statistic.addAction(self.message_statistics)
self.menuBar.addAction(self.menu_F.menuAction())
self.menuBar.addAction(self.edit_menu.menuAction())
self.menuBar.addAction(self.capture_menu.menuAction())
self.menuBar.addAction(self.menu_Analysis.menuAction())
self.menuBar.addAction(self.menu_Statistic.menuAction())
self.menuBar.addAction(self.menu_H.menuAction())
# self.statusBar.showMessage('实时更新的信息', 0) # 状态栏本身显示的信息 第二个参数是信息停留的时间,单位是毫秒,默认是0(0表示在下一个操作来临前一直显示)
"""底部状态栏
利用self.comNum.setText()实时更新状态栏信息
"""
self.comNum = QLabel('下载速度:')
self.baudNum = QLabel('上传速度:')
self.getSpeed = QLabel('收包速度:')
self.sendSpeed = QLabel('发包速度:')
self.netNic = QLabel('Welcome to WireWhale! ^ _ ^')
self.statusBar.setStyleSheet("background: #EDEDED;")
"""各个单元空间占比"""
self.statusBar.addPermanentWidget(self.netNic, stretch=2)
self.statusBar.addPermanentWidget(self.getSpeed, stretch=1)
self.statusBar.addPermanentWidget(self.sendSpeed, stretch=1)
self.statusBar.addPermanentWidget(self.comNum, stretch=1)
self.statusBar.addPermanentWidget(self.baudNum, stretch=1)
QMetaObject.connectSlotsByName(self)
self.core = Core(self)
# 设置定时器将抓包列表置底
self.timer = QTimer(self)
self.timer.timeout.connect(self.info_tree.scrollToBottom)
self.show()
"""
重写窗口关闭事件
"""
def closeEvent(self, QCloseEvent):
def close_to_do():
self.core.clean_out()
if self.Monitor and self.Monitor.is_alive():
self.Monitor.terminate()
if self.Forged and self.Forged.is_alive():
self.Forged.terminate()
exit()
if self.core.start_flag or self.core.pause_flag:
# 没有停止抓包
reply = QMessageBox.question(
self, 'Message', "您是否要停止捕获,并保存已捕获的分组?\n警告:若不保存,您捕获的分组将会丢失",
QMessageBox.Save | QMessageBox.Close | QMessageBox.Cancel,
QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
QCloseEvent.ignore()
if reply == QMessageBox.Close:
self.core.stop_capture()
close_to_do()
elif reply == QMessageBox.Save:
self.core.stop_capture()
self.on_action_savefile_clicked()
close_to_do()
elif self.core.stop_flag and not self.core.save_flag:
"""
已停止,但没有保存文件
"""
reply = QMessageBox.question(
self, 'Message', "您是否保存已捕获的分组?\n警告:若不保存,您捕获的分组将会丢失",
QMessageBox.Save | QMessageBox.Close | QMessageBox.Cancel,
QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
QCloseEvent.ignore()
elif reply == QMessageBox.Save:
self.on_action_savefile_clicked()
close_to_do()
else:
close_to_do()
elif self.core.save_flag or not self.core.start_flag:
"""
未工作状态
"""
reply = QMessageBox.question(self, 'Message', "您是否要退出本程序?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
close_to_do()
else:
QCloseEvent.ignore()
"""绘制背景"""
def paintEvent(self, a0: QPaintEvent):
painter = QPainter(self)
pixmap = QPixmap("img/Whale1.jpg")
painter.drawPixmap(self.rect(), pixmap)
"""
数据包视图 数据记录点击事件
点击列表中一条记录时,在下面的frame框中显示帧的详细信息
"""
def on_tableview_clicked(self):
selected_row = self.info_tree.currentItem().text(0) #当前选择的编号
#表格停止追踪更新
if selected_row and selected_row.isdigit():
self.timer.stop()
self.show_infoTree((int)(selected_row))
if not self.core.pause_flag and not self.core.stop_flag:
self.action_update.setDisabled(False)
"""
展开帧的详细信息
"""
def show_infoTree(self, selected_row):
"""
清空Frame Information内容
"""
self.treeWidget.clear()
"""
添加树节点
Item1: 第一层树节点
Item1_1: 第二层树节点,Item1的子节点
QTreeWidgetItem(parentNode, text) parentNode:父节点 text:当前节点内容
"""
parentList, childList, hex_dump = self.core.on_click_item(selected_row)
p_num = len(parentList)
for i in range(p_num):
item1 = QTreeWidgetItem(self.treeWidget)
item1.setText(0, parentList[i])
c_num = len(childList[i])
for j in range(c_num):
item1_1 = QTreeWidgetItem(item1)
item1_1.setText(0, childList[i][j])
self.set_hex_text(hex_dump)
"""
获取当前选择的网卡
"""
def get_choose_nic(self):
card = self.choose_nicbox.currentText()
self.netNic.setText('当前网卡:' + card)
if (card == 'All'):
a = None
elif platform == 'Windows':
a = netcards[card]
elif platform == 'Linux':
a = card
else:
a = None
return a
"""
设置hex区文本
"""
def set_hex_text(self, text):
self.hexBrowser.setText(text)
"""
设置字体点击事件
"""
def on_font_set_clicked(self):
font, ok = QFontDialog.getFont()
if ok:
with open('data.json', 'r') as file_obj:
'''读取json文件'''
old_font = json.load(file_obj) # 返回列表数据,也支持字典
old_font["font"] = font.family()
old_font["size"] = font.pointSize()
with open('data.json', 'w') as file:
json.dump(old_font, file)
self.info_tree.setFont(font)
self.treeWidget.setFont(font)
self.hexBrowser.setFont(font)
"""
设置背景图片
"""
def on_change_border_clicked(self):
imgName, imgType = QFileDialog.getOpenFileName(
self, "打开图片", "C:/", "*.jpg;;*.png;;All Files(*)")
with open('data.json', 'r') as file_obj:
'''读取json文件'''
old_image = json.load(file_obj) # 返回列表数据,也支持字典
old_image["imageUrl"] = imgName
with open('data.json', 'w') as file:
json.dump(old_image, file)
window_pale = QPalette()
window_pale.setBrush(self.backgroundRole(), QBrush(QPixmap(imgName)))
self.setPalette(window_pale)
"""
开始键点击事件
"""
def on_start_action_clicked(self):
if self.core.stop_flag:
# 重新开始清空面板内容
self.info_tree.clear()
self.treeWidget.clear()
self.set_hex_text("")
self.core.start_capture(self.get_choose_nic(), self.Filter.text())
"""
点击开始后,过滤器不可编辑,开始按钮、网卡选择框全部设为不可选
激活暂停、停止键、重新开始键
"""
self.start_action.setDisabled(True)
self.Filter.setEnabled(False)
self.FilterButton.setEnabled(False)
self.choose_nicbox.setEnabled(False)
self.actionRestart.setDisabled(False)
self.pause_action.setEnabled(True)
self.stop_action.setEnabled(True)
self.timer.start(flush_time)
"""
暂停事件点击事件
"""
def on_pause_action_clicked(self):
self.core.pause_capture()
"""
激活开始、停止、重新开始键、过滤器、网卡选择框
"""
self.start_action.setEnabled(True)
self.stop_action.setDisabled(False)
self.actionRestart.setDisabled(False)
self.Filter.setDisabled(True)
self.FilterButton.setDisabled(True)
self.choose_nicbox.setDisabled(False)
self.pause_action.setDisabled(True)
self.action_update.setDisabled(True)
self.timer.stop()
"""
菜单栏停止键点击事件
"""
def on_stop_action_clicked(self):
self.core.stop_capture()
"""
激活开始键、重新开始键、过滤器、网卡选择框
"""
self.stop_action.setDisabled(True)
self.pause_action.setDisabled(True)
self.start_action.setEnabled(True)
self.Filter.setDisabled(False)
self.FilterButton.setDisabled(False)
self.choose_nicbox.setDisabled(False)
self.action_update.setDisabled(True)
self.timer.stop()
"""
重新开始键响应事件
"""
def on_actionRestart_clicked(self):
# 重新开始清空面板内容
self.timer.stop()
self.core.restart_capture(self.get_choose_nic(), self.Filter.text())
self.info_tree.clear()
self.treeWidget.clear()
self.set_hex_text("")
"""
点击开始后,过滤器不可编辑,开始按钮,网卡选择框全部设为不可选
激活暂停、停止键、重新开始键
"""
self.actionRestart.setDisabled(False)
self.start_action.setDisabled(True)
self.Filter.setEnabled(False)
self.FilterButton.setEnabled(False)
self.choose_nicbox.setEnabled(False)
self.pause_action.setEnabled(True)
self.stop_action.setEnabled(True)
self.timer.start(flush_time)
"""
IP地址类型统计图绘制
"""
def on_IP_statistics_clicked(self):
IP = self.core.get_network_count()
IPv4_count = IP["ipv4"]
IPv6_count = IP["ipv6"]
IP_count = IPv4_count + IPv6_count
if IP_count == 0:
reply = QMessageBox.information(self, "提示", "你还没有抓包!",
QMessageBox.Cancel)
else:
IPv4_fre = IPv4_count / IP_count
IPv6_fre = IPv6_count / IP_count
data = {
'IPv4': (IPv4_fre, '#7199cf'),
'IPv6': (IPv6_fre, '#4fc4aa'),
}
fig = plt.figure(figsize=(6, 4))
# 创建绘图区域
ax1 = fig.add_subplot(111)
ax1.set_title('IPv4 & IPv6 Statistical Chart')
# 生成x轴的每个元素的位置,列表是[1,2,3,4]
xticks = np.arange(1, 3)
# 自定义柱状图的每个柱的宽度
bar_width = 0.6
IP_type = data.keys()
values = [x[0] for x in data.values()]
colors = [x[1] for x in data.values()]
# 画柱状图,设置柱的边缘为透明
bars = ax1.bar(xticks, values, width=bar_width, edgecolor='none')
# 设置y轴的标签
ax1.set_ylabel('Proportion')
ax1.set_xticks(xticks)
ax1.set_xticklabels(IP_type)
# 设置x,y轴的范围
ax1.set_xlim([0, 3.5])
ax1.set_ylim([0, 1])
# 给每一个bar分配颜色
for bar, color in zip(bars, colors):
bar.set_color(color)
plt.show()
"""
数据包类型数量统计
"""
def on_message_statistics_clicked(self):
trans = self.core.get_transport_count()
TCP_count = trans["tcp"]
UDP_count = trans["udp"]
ARP_count = trans["arp"]
ICMP_count = trans["icmp"]
if TCP_count + UDP_count + ARP_count + ICMP_count == 0:
reply = QMessageBox.information(self, "提示", "你还没有抓包!",
QMessageBox.Cancel)
else:
labels = 'TCP', 'ICMP', 'UDP', 'ARP'
fracs = [TCP_count, ICMP_count, UDP_count, ARP_count]
explode = [0.1, 0.1, 0.1, 0.1] # 0.1 凸出这部分,
plt.axes(
aspect=1
) # set this , Figure is round, otherwise it is an ellipse
# autopct ,show percet
plt.pie(
x=fracs,
labels=labels,
explode=explode,
autopct='%3.1f %%',
shadow=True,
labeldistance=1.1,
startangle=90,
pctdistance=0.6)
plt.show()
"""
打开文件事件
"""
def on_action_openfile_clicked(self):
if self.core.start_flag or self.core.pause_flag:
QMessageBox.warning(self, "警告", "请停止当前抓包!")
return
self.core.open_pcap_file()
"""
保存文件点击事件
"""
def on_action_savefile_clicked(self):
if self.core.start_flag or self.core.pause_flag:
QMessageBox.warning(self, "警告", "请停止当前抓包!")
return
self.core.save_captured_to_pcap()
"""
菜单栏追踪流键点击事件
"""
def on_action_track_clicked(self):
if not self.Monitor or not self.Monitor.is_alive():
self.Monitor = Process(target=start_monitor)
self.Monitor.start()
''
def forged_action_clicked(self):
if not self.Forged or not self.Forged.is_alive():
self.Forged = Process(target=startForged)
self.Forged.start()
about = "软件著作者:张桓皓 张兴\n\n" + "软件主要功能如下:\n" + "1.对网络接口数据包尽可能多的捕获,可以将网卡设置为混杂模式,然后进行数据包的采集;\n" + "2.对捕获的数据包进行一定的解析,将报文在网络层和传输层逐字段展开,对数据包的协议类型、源目的地址、数据包截获时间、数据包内容进行分析;\n" + "3.根据用户不同的要求能够依据特定指定地址、特定协议类型相关包等条件进行自定义监视;\n" + "4.针对应用进行流量监测,监测结果输出实时流量图显示,管理员可设置流量上限,当应用流量超过这个最高限度时可以向管理员进行报警;\n" + "5.系统提供了多种方式显示结果,如以饼状图的形式统计ARP报文、TCP报文、UDP报文ICMP报文进行统计,以柱状图的形式统计IPv4报文、IPv6报文进行统计,以折线图的形式实时显示具体应用流量;\n" + "6.实现数据包保存,便于日后分析,即将捕获到的数据包,可另存为一个文件,并能被本系统所读取和展示;\n" + "7.伪造报文实现网络反攻击或进行深入微调IP或传输层的域。\n\n" + "*解释权归著作者所有"
def on_action_about_clicked(self):
QMessageBox.information(self, "关于", self.about)
"""
退出点击事件
"""
def on_action_exit_clicked(self, event):
self.closeEvent(event)
"""
进度加载框
num: 加载数据数量
"""
def showDialog(self, num):
progress = QProgressDialog(self)
progress.setWindowTitle("请稍等")
progress.setLabelText("正在加载数据...")
progress.setCancelButtonText("取消")
progress.setMinimumDuration(1) #进度条加载时间
progress.setWindowModality(Qt.WindowModal)
progress.setRange(0, num)
for i in range(num):
progress.setValue(i)
if progress.wasCanceled():
QMessageBox.warning(self, "提示", "操作失败")
break
progress.setValue(num)
QMessageBox.information(self, "提示", "操作成功")
"""键盘点击事件"""
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_Up or event.key() == Qt.Key_Down:
self.timer.stop()
selected_row = self.info_tree.currentItem().text(0)
if selected_row and selected_row.isdigit():
self.show_infoTree(int(selected_row))
self.action_update.setDisabled(False)
if event.key() == Qt.Key_F5:
self.timer.start(flush_time)
self.action_update.setDisabled(True)
def start():
app = QApplication([])
ui = Ui_MainWindow()
ui.setupUi()
app.exec()
| [
"json.dump",
"multiprocessing.Process",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"sys.exit",
"json.load",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1523, 1542), 'json.load', 'json.load', (['file_obj'], {}), '(file_obj)\n', (1532, 1542), False, 'import json\n'), ((14741, 14747), 'sys.exit', 'exit', ([], {}), '()\n', (14745, 14747), False, 'from sys import exit\n'), ((19257, 19276), 'json.load', 'json.load', (['file_obj'], {}), '(file_obj)\n', (19266, 19276), False, 'import json\n'), ((19390, 19416), 'json.dump', 'json.dump', (['old_image', 'file'], {}), '(old_image, file)\n', (19399, 19416), False, 'import json\n'), ((22710, 22736), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (22720, 22736), True, 'import matplotlib.pyplot as plt\n'), ((22919, 22934), 'numpy.arange', 'np.arange', (['(1)', '(3)'], {}), '(1, 3)\n', (22928, 22934), True, 'import numpy as np\n'), ((23596, 23606), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23604, 23606), True, 'import matplotlib.pyplot as plt\n'), ((24267, 24285), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'aspect': '(1)'}), '(aspect=1)\n', (24275, 24285), True, 'import matplotlib.pyplot as plt\n'), ((24421, 24558), 'matplotlib.pyplot.pie', 'plt.pie', ([], {'x': 'fracs', 'labels': 'labels', 'explode': 'explode', 'autopct': '"""%3.1f %%"""', 'shadow': '(True)', 'labeldistance': '(1.1)', 'startangle': '(90)', 'pctdistance': '(0.6)'}), "(x=fracs, labels=labels, explode=explode, autopct='%3.1f %%', shadow\n =True, labeldistance=1.1, startangle=90, pctdistance=0.6)\n", (24428, 24558), True, 'import matplotlib.pyplot as plt\n'), ((24695, 24705), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24703, 24705), True, 'import matplotlib.pyplot as plt\n'), ((25361, 25390), 'multiprocessing.Process', 'Process', ([], {'target': 'start_monitor'}), '(target=start_monitor)\n', (25368, 25390), False, 'from multiprocessing import Process\n'), ((25554, 25581), 'multiprocessing.Process', 'Process', ([], {'target': 'startForged'}), '(target=startForged)\n', (25561, 25581), False, 'from multiprocessing import Process\n'), ((2116, 2145), 'json.dump', 'json.dump', (['old_font', 'file_obj'], {}), '(old_font, file_obj)\n', (2125, 2145), False, 'import json\n'), ((18620, 18639), 'json.load', 'json.load', (['file_obj'], {}), '(file_obj)\n', (18629, 18639), False, 'import json\n'), ((18814, 18839), 'json.dump', 'json.dump', (['old_font', 'file'], {}), '(old_font, file)\n', (18823, 18839), False, 'import json\n')] |
import os
import gym
import numpy as np
import matplotlib.pyplot as plt
from dqn_agent import DQNAgent
from utils import reward_engineering
import tensorflow as tf
def plot_points(point_list, style):
x = []
y = []
for point in point_list:
x.append(point[0])
y.append(point[1])
plt.plot(x, y, style)
NUM_EPISODES = 30 # Number of episodes used for evaluation
rom = 'CartPole-v1'
#rom = 'MountainCar-v0'
#rom = 'Assault-ram-v0'
fig_format = 'png'
# fig_format = 'eps'
# fig_format = 'svg'
# Comment this line to enable training using your GPU
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
tf.compat.v1.disable_eager_execution()
# Initiating the Environment
env = gym.make(rom)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
# Creating the DQN agent (with greedy policy, suited for evaluation)
agent = DQNAgent(state_size, action_size, epsilon=0.0, epsilon_min=0.0)
# Checking if weights from previous learning session exists
if os.path.exists('../models/DQN-' + rom + '.h5'):
print('Loading weights from previous learning session.')
agent.load('../models/DQN-' + rom + '.h5')
else:
print('No weights found from previous learning session. Unable to proceed.')
exit(-1)
return_history = []
for episodes in range(1, NUM_EPISODES + 1):
# Reset the environment
state = env.reset()
# This reshape is needed to keep compatibility with Keras
state = np.reshape(state, [1, state_size])
# Cumulative reward is the return since the beginning of the episode
cumulative_reward = 0.0
for time in range(1, 500):
# Render the environment for visualization
env.render()
# Select action
action = agent.act(state)
# Take action, observe reward and new state
next_state, reward, done, _ = env.step(action)
# Reshaping to keep compatibility with Keras
next_state = np.reshape(next_state, [1, state_size])
# Making reward engineering to keep compatibility with how training was done
reward = reward_engineering(state[0], action, reward, next_state[0], done, time)
state = next_state
# Accumulate reward
cumulative_reward = agent.gamma * cumulative_reward + reward
if done:
print("episode: {}/{}, time: {}, score: {:.6}, epsilon: {:.3}"
.format(episodes, NUM_EPISODES, time, cumulative_reward, agent.epsilon))
break
return_history.append(cumulative_reward)
# Prints mean return
print('Mean return: ', np.mean(return_history))
# Plots return history
plt.plot(return_history, 'b')
plt.xlabel('Episode')
plt.ylabel('Return')
plt.savefig('../plots/dqn_evaluation_' + rom + '.' + fig_format, fig_format=fig_format)
# Plots the greedy policy learned by DQN
plt.figure()
position = np.arange(-1.2, 0.5 + 0.025, 0.05)
velocity = np.arange(-0.07, 0.07 + 0.0025, 0.005)
push_left = []
none = []
push_right = []
for j in range(len(position)):
for k in range(len(velocity)):
pos = position[j]
vel = velocity[k]
state = np.array([[pos, vel]])
action = agent.act(state)
if action == 0:
push_left.append(state[0])
elif action == 1:
none.append(state[0])
else:
push_right.append(state[0])
plot_points(push_left, 'b.')
plot_points(none, 'r.')
plot_points(push_right, 'g.')
plt.xlabel('Position')
plt.ylabel('Velocity')
plt.title('Agent Policy')
plt.legend(['Left', 'None', 'Right'])
plt.savefig('../plots/agent_decision_' + rom + '.' + fig_format, format=fig_format)
plt.show()
| [
"os.path.exists",
"dqn_agent.DQNAgent",
"numpy.mean",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"utils.reward_engineering",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.compat.v1.disable_eager_execution",
"matplot... | [((626, 664), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (662, 664), True, 'import tensorflow as tf\n'), ((701, 714), 'gym.make', 'gym.make', (['rom'], {}), '(rom)\n', (709, 714), False, 'import gym\n'), ((870, 933), 'dqn_agent.DQNAgent', 'DQNAgent', (['state_size', 'action_size'], {'epsilon': '(0.0)', 'epsilon_min': '(0.0)'}), '(state_size, action_size, epsilon=0.0, epsilon_min=0.0)\n', (878, 933), False, 'from dqn_agent import DQNAgent\n'), ((998, 1044), 'os.path.exists', 'os.path.exists', (["('../models/DQN-' + rom + '.h5')"], {}), "('../models/DQN-' + rom + '.h5')\n", (1012, 1044), False, 'import os\n'), ((2601, 2630), 'matplotlib.pyplot.plot', 'plt.plot', (['return_history', '"""b"""'], {}), "(return_history, 'b')\n", (2609, 2630), True, 'import matplotlib.pyplot as plt\n'), ((2631, 2652), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (2641, 2652), True, 'import matplotlib.pyplot as plt\n'), ((2653, 2673), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Return"""'], {}), "('Return')\n", (2663, 2673), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2766), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../plots/dqn_evaluation_' + rom + '.' + fig_format)"], {'fig_format': 'fig_format'}), "('../plots/dqn_evaluation_' + rom + '.' + fig_format, fig_format\n =fig_format)\n", (2685, 2766), True, 'import matplotlib.pyplot as plt\n'), ((2804, 2816), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2814, 2816), True, 'import matplotlib.pyplot as plt\n'), ((2828, 2862), 'numpy.arange', 'np.arange', (['(-1.2)', '(0.5 + 0.025)', '(0.05)'], {}), '(-1.2, 0.5 + 0.025, 0.05)\n', (2837, 2862), True, 'import numpy as np\n'), ((2874, 2912), 'numpy.arange', 'np.arange', (['(-0.07)', '(0.07 + 0.0025)', '(0.005)'], {}), '(-0.07, 0.07 + 0.0025, 0.005)\n', (2883, 2912), True, 'import numpy as np\n'), ((3405, 3427), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (3415, 3427), True, 'import matplotlib.pyplot as plt\n'), ((3428, 3450), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity"""'], {}), "('Velocity')\n", (3438, 3450), True, 'import matplotlib.pyplot as plt\n'), ((3451, 3476), 'matplotlib.pyplot.title', 'plt.title', (['"""Agent Policy"""'], {}), "('Agent Policy')\n", (3460, 3476), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3514), 'matplotlib.pyplot.legend', 'plt.legend', (["['Left', 'None', 'Right']"], {}), "(['Left', 'None', 'Right'])\n", (3487, 3514), True, 'import matplotlib.pyplot as plt\n'), ((3515, 3603), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../plots/agent_decision_' + rom + '.' + fig_format)"], {'format': 'fig_format'}), "('../plots/agent_decision_' + rom + '.' + fig_format, format=\n fig_format)\n", (3526, 3603), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3609), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3607, 3609), True, 'import matplotlib.pyplot as plt\n'), ((311, 332), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', 'style'], {}), '(x, y, style)\n', (319, 332), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1479), 'numpy.reshape', 'np.reshape', (['state', '[1, state_size]'], {}), '(state, [1, state_size])\n', (1455, 1479), True, 'import numpy as np\n'), ((2552, 2575), 'numpy.mean', 'np.mean', (['return_history'], {}), '(return_history)\n', (2559, 2575), True, 'import numpy as np\n'), ((1923, 1962), 'numpy.reshape', 'np.reshape', (['next_state', '[1, state_size]'], {}), '(next_state, [1, state_size])\n', (1933, 1962), True, 'import numpy as np\n'), ((2065, 2136), 'utils.reward_engineering', 'reward_engineering', (['state[0]', 'action', 'reward', 'next_state[0]', 'done', 'time'], {}), '(state[0], action, reward, next_state[0], done, time)\n', (2083, 2136), False, 'from utils import reward_engineering\n'), ((3088, 3110), 'numpy.array', 'np.array', (['[[pos, vel]]'], {}), '([[pos, vel]])\n', (3096, 3110), True, 'import numpy as np\n')] |
from PIL import Image
import numpy as np
import cv2
PAPER_EXT = {".gloria_chx": "gloria_chx_open_image"}
def gloria_chx_open_image(img):
def _resize_img(img, scale):
"""
Args:
img - image as numpy array (cv2)
scale - desired output image-size as scale x scale
Return:
image resized to scale x scale with shortest dimension 0-padded
"""
size = img.shape
max_dim = max(size)
max_ind = size.index(max_dim)
# Resizing
if max_ind == 0:
# image is heigher
wpercent = scale / float(size[0])
hsize = int((float(size[1]) * float(wpercent)))
desireable_size = (scale, hsize)
else:
# image is wider
hpercent = scale / float(size[1])
wsize = int((float(size[0]) * float(hpercent)))
desireable_size = (wsize, scale)
resized_img = cv2.resize(
img, desireable_size[::-1], interpolation=cv2.INTER_AREA
) # this flips the desireable_size vector
# Padding
if max_ind == 0:
# height fixed at scale, pad the width
pad_size = scale - resized_img.shape[1]
left = int(np.floor(pad_size / 2))
right = int(np.ceil(pad_size / 2))
top = int(0)
bottom = int(0)
else:
# width fixed at scale, pad the height
pad_size = scale - resized_img.shape[0]
top = int(np.floor(pad_size / 2))
bottom = int(np.ceil(pad_size / 2))
left = int(0)
right = int(0)
resized_img = np.pad(
resized_img, [(top, bottom), (left, right)], "constant", constant_values=0
)
return resized_img
x = cv2.imread(str(img), 0)
x = _resize_img(x, 256)
img = Image.fromarray(x).convert("RGB")
return img
| [
"numpy.ceil",
"PIL.Image.fromarray",
"numpy.floor",
"numpy.pad",
"cv2.resize"
] | [((945, 1013), 'cv2.resize', 'cv2.resize', (['img', 'desireable_size[::-1]'], {'interpolation': 'cv2.INTER_AREA'}), '(img, desireable_size[::-1], interpolation=cv2.INTER_AREA)\n', (955, 1013), False, 'import cv2\n'), ((1657, 1743), 'numpy.pad', 'np.pad', (['resized_img', '[(top, bottom), (left, right)]', '"""constant"""'], {'constant_values': '(0)'}), "(resized_img, [(top, bottom), (left, right)], 'constant',\n constant_values=0)\n", (1663, 1743), True, 'import numpy as np\n'), ((1861, 1879), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (1876, 1879), False, 'from PIL import Image\n'), ((1247, 1269), 'numpy.floor', 'np.floor', (['(pad_size / 2)'], {}), '(pad_size / 2)\n', (1255, 1269), True, 'import numpy as np\n'), ((1295, 1316), 'numpy.ceil', 'np.ceil', (['(pad_size / 2)'], {}), '(pad_size / 2)\n', (1302, 1316), True, 'import numpy as np\n'), ((1510, 1532), 'numpy.floor', 'np.floor', (['(pad_size / 2)'], {}), '(pad_size / 2)\n', (1518, 1532), True, 'import numpy as np\n'), ((1559, 1580), 'numpy.ceil', 'np.ceil', (['(pad_size / 2)'], {}), '(pad_size / 2)\n', (1566, 1580), True, 'import numpy as np\n')] |
import os
import os.path
import sys
import torch
import torch.utils.data as data
from .datasets_wrapper import Dataset
import cv2
import numpy as np
WF_CLASSES = ("face")
class AnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
self.class_to_ind = class_to_ind or dict(
zip(WF_CLASSES, range(len(WF_CLASSES)))
)
self.keep_difficult = keep_difficult
def __call__(self, target):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = np.empty((0, 5))
for obj in target.iter("object"):
difficult = obj.find("difficult")
if difficult is not None:
difficult = int(difficult.text) == 1
else:
difficult = False
if not self.keep_difficult and difficult:
continue
name = obj.find("name").text.strip()
bbox = obj.find("bndbox")
pts = ["xmin", "ymin", "xmax", "ymax"]
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
# cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res = np.vstack((res, bndbox)) # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
width = int(target.find("size").find("width").text)
height = int(target.find("size").find("height").text)
img_info = (height, width)
return res, img_info
class WiderFaceDetection(Dataset):
def __init__(self, txt_path, img_size=(416, 416), preproc=None):
super().__init__(img_size)
self.preproc = preproc
self.imgs_path = []
self.words = []
f = open(txt_path,'r')
lines = f.readlines()
isFirst = True
labels = []
for line in lines:
line = line.rstrip()
if line.startswith('#'):
if isFirst is True:
isFirst = False
else:
labels_copy = labels.copy()
self.words.append(labels_copy)
labels.clear()
path = line[2:]
path = txt_path.replace('label.txt','images/') + path
self.imgs_path.append(path)
else:
line = line.split(' ')
label = [float(x) for x in line]
labels.append(label)
self.words.append(labels)
def __len__(self):
return len(self.imgs_path)
def pull_item(self, index):
img = cv2.imread(self.imgs_path[index])
height, width, _ = img.shape
img_info = (height, width)
labels = self.words[index]
annotations = np.zeros((0, 15))
if len(labels) == 0:
return annotations
for _, label in enumerate(labels):
annotation = np.zeros((1, 15))
# bbox
annotation[0, 0] = label[0] # x1
annotation[0, 1] = label[1] # y1
annotation[0, 2] = label[0] + label[2] # x2
annotation[0, 3] = label[1] + label[3] # y2
annotations = np.append(annotations, annotation, axis=0)
target = np.array(annotations)
return img, target, img_info, index
@Dataset.mosaic_getitem
def __getitem__(self, index):
img = cv2.imread(self.imgs_path[index])
height, width, _ = img.shape
labels = self.words[index]
annotations = np.zeros((0, 15))
if len(labels) == 0:
return annotations
for idx, label in enumerate(labels):
annotation = np.zeros((1, 15))
# bbox
annotation[0, 0] = label[0] # x1
annotation[0, 1] = label[1] # y1
annotation[0, 2] = label[0] + label[2] # x2
annotation[0, 3] = label[1] + label[3] # y2
# landmarks
annotation[0, 4] = label[4] # l0_x
annotation[0, 5] = label[5] # l0_y
annotation[0, 6] = label[7] # l1_x
annotation[0, 7] = label[8] # l1_y
annotation[0, 8] = label[10] # l2_x
annotation[0, 9] = label[11] # l2_y
annotation[0, 10] = label[13] # l3_x
annotation[0, 11] = label[14] # l3_y
annotation[0, 12] = label[16] # l4_x
annotation[0, 13] = label[17] # l4_y
if (annotation[0, 4]<0):
annotation[0, 14] = -1
else:
annotation[0, 14] = 1
annotations = np.append(annotations, annotation, axis=0)
target = np.array(annotations)
if self.preproc is not None:
img, target = self.preproc(img, target)
return torch.from_numpy(img), target
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets) | [
"torch.stack",
"torch.from_numpy",
"numpy.append",
"numpy.array",
"numpy.zeros",
"torch.is_tensor",
"numpy.empty",
"numpy.vstack",
"cv2.imread"
] | [((1202, 1218), 'numpy.empty', 'np.empty', (['(0, 5)'], {}), '((0, 5))\n', (1210, 1218), True, 'import numpy as np\n'), ((3444, 3477), 'cv2.imread', 'cv2.imread', (['self.imgs_path[index]'], {}), '(self.imgs_path[index])\n', (3454, 3477), False, 'import cv2\n'), ((3607, 3624), 'numpy.zeros', 'np.zeros', (['(0, 15)'], {}), '((0, 15))\n', (3615, 3624), True, 'import numpy as np\n'), ((4083, 4104), 'numpy.array', 'np.array', (['annotations'], {}), '(annotations)\n', (4091, 4104), True, 'import numpy as np\n'), ((4236, 4269), 'cv2.imread', 'cv2.imread', (['self.imgs_path[index]'], {}), '(self.imgs_path[index])\n', (4246, 4269), False, 'import cv2\n'), ((4365, 4382), 'numpy.zeros', 'np.zeros', (['(0, 15)'], {}), '((0, 15))\n', (4373, 4382), True, 'import numpy as np\n'), ((5500, 5521), 'numpy.array', 'np.array', (['annotations'], {}), '(annotations)\n', (5508, 5521), True, 'import numpy as np\n'), ((6456, 6476), 'torch.stack', 'torch.stack', (['imgs', '(0)'], {}), '(imgs, 0)\n', (6467, 6476), False, 'import torch\n'), ((2044, 2068), 'numpy.vstack', 'np.vstack', (['(res, bndbox)'], {}), '((res, bndbox))\n', (2053, 2068), True, 'import numpy as np\n'), ((3753, 3770), 'numpy.zeros', 'np.zeros', (['(1, 15)'], {}), '((1, 15))\n', (3761, 3770), True, 'import numpy as np\n'), ((4023, 4065), 'numpy.append', 'np.append', (['annotations', 'annotation'], {'axis': '(0)'}), '(annotations, annotation, axis=0)\n', (4032, 4065), True, 'import numpy as np\n'), ((4513, 4530), 'numpy.zeros', 'np.zeros', (['(1, 15)'], {}), '((1, 15))\n', (4521, 4530), True, 'import numpy as np\n'), ((5440, 5482), 'numpy.append', 'np.append', (['annotations', 'annotation'], {'axis': '(0)'}), '(annotations, annotation, axis=0)\n', (5449, 5482), True, 'import numpy as np\n'), ((5627, 5648), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (5643, 5648), False, 'import torch\n'), ((6243, 6263), 'torch.is_tensor', 'torch.is_tensor', (['tup'], {}), '(tup)\n', (6258, 6263), False, 'import torch\n'), ((6336, 6347), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (6344, 6347), True, 'import numpy as np\n'), ((6375, 6396), 'torch.from_numpy', 'torch.from_numpy', (['tup'], {}), '(tup)\n', (6391, 6396), False, 'import torch\n')] |
import numpy as np
import pandas as pd
from ..base import AbstractDensity
from .multinomial import Multinomial
from .piecewise_uniform import PiecewiseUniform
class JointDensity(AbstractDensity):
def __init__(self, numeric_params=None, verbose=False):
super().__init__()
self.Categorical = Multinomial
self.Numeric = PiecewiseUniform
self.numeric_params = numeric_params
self.verbose = verbose
def _fit_categorical(self, series):
model = self.Categorical()
model.train(series)
return model
def _fit_continuous(self, values):
params = {}
if self.numeric_params is not None:
params = self.numeric_params
model = self.Numeric(verbose=self.verbose-1, **params)
model.train(values)
return model
def _fit_univarite(self, series):
msg = "Fitting univariate density on " + str(series.name) + " as "
if series.name in self.categorical_features:
self.vp(msg + "categorical")
return self._fit_categorical(series)
else:
self.vp(msg + "continuous")
return self._fit_continuous(series)
def train(self, df, categorical_features=None):
assert isinstance(df, pd.DataFrame)
if categorical_features is None:
self.categorical_features = []
else:
assert isinstance(categorical_features, list)
self.categorical_features = categorical_features
self.columns = df.columns
self.univariates = {v: self._fit_univarite(df[v]) for v in self.columns}
#self.univariates = {v: stats.uniform(loc[k], scale[k]) for k, v in enumerate(self.columns)}
def density(self, x, log=False):
assert isinstance(x, pd.DataFrame)
assert all(x.columns==self.columns)
df_log_univariate = pd.DataFrame({
v: np.log(self.univariates[v].density(x[v]))
for v in self.columns
})
log_dens = df_log_univariate.sum(axis=1).values
if log:
return log_dens
return np.exp(log_dens)
def rvs(self, n):
''' Generate n samples from the fitted distribution '''
if not hasattr(self, 'univariates'):
raise Exception("Call `train` before you call `rvs`")
samples = {v: self.univariates[v].rvs(n) for v in self.columns}
return pd.DataFrame(samples)[self.columns] | [
"numpy.exp",
"pandas.DataFrame"
] | [((2096, 2112), 'numpy.exp', 'np.exp', (['log_dens'], {}), '(log_dens)\n', (2102, 2112), True, 'import numpy as np\n'), ((2398, 2419), 'pandas.DataFrame', 'pd.DataFrame', (['samples'], {}), '(samples)\n', (2410, 2419), True, 'import pandas as pd\n')] |
import torch
import json
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Union, List, Tuple, Dict
from omegaconf import DictConfig
from dataset.carla_helper import MapHelper
from dataset.exception import FailedProcessing
from dataset.utils import (
distance, rotate_vector, get_angle,
convert_status_to_np, standardize_angle,
pad_objects
)
class DataProcessor:
"""
This main objective of this class
is to process input and output data before feeding into model
"""
def __init__(
self,
configs: DictConfig,
map_path: str
):
self._configs = configs
# params
self._history_steps = configs.model.data.history_steps
self._future_steps = configs.model.data.future_steps
self._num_waypoints = configs.model.data.num_waypoints
self._dim = configs.model.data.dim
# map_helper
self._map_helper = MapHelper(map_path=map_path)
# init current agent's state
self._pos_agent, self._agent_heading = None, None
def __process_input(
self,
df: pd.DataFrame
) -> Dict:
"""
Main function to process input data
:param df: data in DataFrame
:return:
(Dict): all value are normalized by agent's orientation
- map: (num_waypoints, dim)
- traffic_light: (dim, )
- agent: (dim, )
- others: List[(history_steps, dim)]
"""
map = self.__process_map()
traffic_light = self.__process_traffic_light(df)
agent = self.__process_dynamics(df, object_type="AGENT")
others = self.__process_dynamics(df, object_type="OTHERS")
return {
"map": map,
"traffic_light": traffic_light,
"agent": agent,
"others": others
}
def __process_output(
self,
df: pd.DataFrame
) -> torch.Tensor:
"""
Main function to process output data
:param df: data in DataFrame
:return:
(torch.Tensor): (future_steps, 2) normalized by agent's orientation
"""
# get AGENT data
df_agent = df.loc[df["object_type"] == "AGENT"]
if len(df_agent) != self._future_steps:
raise FailedProcessing
_x = df_agent["center_x"].to_numpy()
_y = df_agent["center_y"].to_numpy()
_pos = np.stack([_x, _y], axis=-1)
# agent-orientation normalize
_normed_pos = rotate_vector(_pos, self._pos_agent, self._agent_heading)
torch_gt = torch.from_numpy(_normed_pos)
return torch_gt
def __process_map(self):
"""
Function to process waypoint map using self._map_helper
Positions are normalized by agent's orientation
:return:
(torch.Tensor): (num_waypoints, dim)
[0]: pos_x
[1]: pos_y
[2]: dist_from_agent_x
[3]: dist_from_agent_y
[4]: diff_heading_to_agent
"""
# get local waypoints
_, polygons = self._map_helper.get_local_lanes(
agent_x=self._pos_agent[0], agent_y=self._pos_agent[1],
heading=self._agent_heading
)
# terminate when can not find waypoint
if len(polygons) == 0:
raise FailedProcessing
waypoints = np.concatenate(polygons, axis=0) # (num_wp, 2)
# get N-nearest waypoints
dist_wp_agent = distance(waypoints, self._pos_agent)
idx_min_dist = dist_wp_agent.argsort()[:self._num_waypoints]
# pos
pos_wp = waypoints[idx_min_dist] # (self._num_waypoints, 2)
# rotate wp by agent's orientation
normed_pos_wp = rotate_vector(pos_wp, self._pos_agent, self._agent_heading)
# dist
# (self._num_waypoints, 1)
dist_wp = np.expand_dims(distance(pos_wp, self._pos_agent), axis=-1)
# diff-angle
v_agent_wp = pos_wp - self._pos_agent
v_x_unit = np.tile([1, 0], len(v_agent_wp)).reshape(-1, 2)
wp_angle_heading = get_angle(v_agent_wp, v_x_unit)
# (self._num_waypoints, 1)
diff_angle = np.expand_dims(
np.abs(self._agent_heading) - wp_angle_heading,
axis=-1
)
# gather all information
wp_data = np.concatenate([normed_pos_wp, dist_wp, diff_angle], axis=-1)
# pad zeros
wp_data = torch.from_numpy(wp_data)
num_wp, dim_wp = wp_data.shape
torch_wp = torch.zeros((self._num_waypoints, self._dim))
torch_wp[:num_wp, :dim_wp] = wp_data # (self._num_waypoints, self._dim)
return torch_wp
def __process_traffic_light(
self,
df: pd.DataFrame
) -> torch.Tensor:
"""
Function to process traffic light
Positions are normalized by agent's orientation
:param df: data in DataFrame
:return:
(torch.Tensor): (dim, )
[0]: pos_x
[1]: pos_y
[2]: is_red
[3]: is_yellow
[4]: is_green
"""
light_mapping = {
"RED": 0,
"YELLOW": 1,
"GREEN": 2
}
# get current state of traffic light
row = df.loc[df["type"] == "traffic_light"]
torch_tl = torch.zeros(self._dim)
# encode only if there is traffic light...
# else, all-zeros
if len(row) > 0:
row = row.iloc[-1]
# get traffic light data
tl_x = row["center_x"]
tl_y = row["center_y"]
tl_pos = np.array([[tl_x, tl_y]])
tl_stt = json.loads(row["status"])["light_state"]
# normalize traffic light position
normed_tl_pos = rotate_vector(tl_pos, self._pos_agent, self._agent_heading).squeeze()
# one-hot encoding traffic light status
encode_stt = np.zeros(3)
encode_stt[light_mapping[tl_stt]] = 1
tl_data = np.concatenate([normed_tl_pos, encode_stt])
tl_data = torch.from_numpy(tl_data)
# pad zeros
torch_tl[:len(tl_data)] = tl_data # (self._dim)
return torch_tl
def __process_dynamics(
self,
df: pd.DataFrame,
object_type: str
) -> Union[List[torch.Tensor], torch.Tensor]:
"""
Process state of dynamic object
including AGENT and OTHERS
:param df: data in DataFrame
:param object_type: should be in ["AGENT", "OTHERS"]
:return:
Could be List[torch.Tensor] or torch.Tensor
- List[torch.Tensor]: for OTHERS
(num_others, history_steps, dim)
- torch.Tensor: for AGENT
(history_steps, dim)
"""
def __process_each_object(df_group_by_object):
# get data
_x = df_group_by_object["center_x"].to_numpy()
_y = df_group_by_object["center_y"].to_numpy()
_heading = df_group_by_object["heading"].to_numpy()
_mag_vel = convert_status_to_np(df_group_by_object["status"], key="velocity")
_diff_vel = np.diff(_mag_vel)
_mag_acc = np.concatenate([[_diff_vel[0]], _diff_vel])
_diff_heading = standardize_angle(np.diff(_heading))
_turn_rate = np.concatenate([[_diff_heading[0]], _diff_heading])
# --- normalize data ---
# position
_pos = np.stack([_x, _y], axis=-1)
_normed_pos = rotate_vector(_pos, self._pos_agent, self._agent_heading) # (self._history_steps, 2)
# heading
_normed_heading = np.expand_dims(_heading - self._agent_heading, axis=-1) # (self._history_steps, 1)
# velocity
_vel_x = _mag_vel * np.cos(_heading)
_vel_y = _mag_vel * np.sin(_heading)
_vel = np.stack([_vel_x, _vel_y], axis=-1)
_normed_vel = rotate_vector(_vel, self._pos_agent, self._agent_heading) # (self._history_steps, 2)
# acceleration
_acc_x = _mag_acc * np.cos(_heading)
_acc_y = _mag_acc * np.sin(_heading)
_acc = np.stack([_acc_x, _acc_y], axis=-1)
_normed_acc = rotate_vector(_acc, self._pos_agent, self._agent_heading) # (self._history_steps, 2)
# turn rate
_normed_turn_rate = np.expand_dims(_turn_rate, axis=-1) # (self._history_steps, 1)
# gather information
dynamic_data = np.concatenate([
_normed_pos,
_normed_vel,
_normed_acc,
_normed_heading,
_normed_turn_rate
], axis=-1)
dynamic_data = torch.from_numpy(dynamic_data)
h_data, w_data = dynamic_data.shape
# pad zeros
torch_dynamic = torch.zeros((self._history_steps, self._dim))
torch_dynamic[:h_data, :w_data] = dynamic_data
return torch_dynamic
# --- main flow ---
if object_type not in ["AGENT", "OTHERS"]:
raise FailedProcessing
# AGENT
if object_type == "AGENT":
df_by_object = df.loc[df["object_type"] == "AGENT"]
return __process_each_object(df_by_object)
# OTHERS
container = list()
df_others = df.loc[df["object_type"] == "OTHERS"]
df_group_by_id = df_others.groupby(by=["id"])
for _, data in df_group_by_id:
container.append(__process_each_object(data))
return container
def __get_inp_out_data(
self,
df: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Process to separate input/output DataFrame
:param df: all DataFrame
:return:
Tuple[pd.DataFrame, pd.DataFrame]
- input: with number of timestamps = history_step
- output: with number of timestamps = future_step
"""
df_by_ts = df.groupby(by=["timestamp"])
# store inp/out data separately
inp_data = pd.DataFrame(columns=df.columns)
out_data = pd.DataFrame(columns=df.columns)
for i, (ts, df_tick) in enumerate(df_by_ts):
if i < self._history_steps:
inp_data = pd.concat([inp_data, df_tick])
else:
out_data = pd.concat([out_data, df_tick])
return inp_data, out_data
@staticmethod
def __get_current_agent_state(
df: pd.DataFrame
) -> Tuple[np.ndarray, float]:
"""
Get current state of AGENT
:param df: input data in DataFrame
:return:
(Tuple[np.ndarray, float]):
- position of agent in world coordinate: (agent_x, agent_y)
- heading of agent in world coordinate
"""
row = df.loc[df["object_type"] == "AGENT"].iloc[-1]
return (
np.array([row["center_x"], row["center_y"]]),
row["heading"]
)
def process(
self,
inputs: Union[str, pd.DataFrame],
is_inference=False
) -> Union[
Tuple[Dict, torch.Tensor],
Tuple[None, None]
]:
"""
Main function to process data
:param inputs: could be:
- path to csv file or
- DataFrame
:param is_inference: flag to trigger inference
- if False:
+ inputs should include timestamps for input and output
+ process output data (for training/validating process)
- else True:
+ inputs only include timestamps for input
+ do not process output data
:return:
Tuple[Dict, torch.Tensor]
- processed input data
- processed output data
if return (None, None) -> process failed
"""
assert isinstance(inputs, str) or isinstance(inputs, pd.DataFrame), "Inputs should be str_path or df"
if isinstance(inputs, str):
df = pd.read_csv(inputs)
else:
df = inputs
try:
inp_data, out_data = self.__get_inp_out_data(df)
# get current agent position
self._pos_agent, self._agent_heading = self.__get_current_agent_state(inp_data)
# process input
processed_input = self.__process_input(inp_data)
# process output
processed_output = None
if not is_inference:
processed_output = self.__process_output(out_data)
return processed_input, processed_output
except FailedProcessing:
return None, None
# --- utility function for DataLoader ---
def collate_fn(
data: List[Tuple]
):
"""
:param data: List of (inp, out) data
:return: stack into batch
"""
inp_batch, out_batch = process_batch_data(data)
return inp_batch, out_batch
def process_batch_data(
batch_data: List[Tuple]
):
"""
Function to stack data into batch dimension
:param batch_data: batch data in list
:return:
Tuple[Dict, torch.Tensor]
- Data input stacked in batch dimension
+ traffic_light: (batch, dim)
+ map: (batch, num_waypoints, dim)
+ agent: (batch, history_steps, dim)
+ others: (batch, num_others, history_steps, dim)
- Data output stacked in batch dimension
(batch, future_steps, 2)
"""
# clone data
batch_data = deepcopy(batch_data)
# input
light = list()
map = list()
agent = list()
others = list()
pad_objects(batch_data) # padding zeros for objects
# output
gt = list()
# append data
for inp, out in batch_data:
light.append(inp["traffic_light"])
map.append(inp["map"])
agent.append(inp["agent"])
others.append(inp["others"])
gt.append(out)
# stack batch data
x = {
"traffic_light": torch.stack(light, dim=0),
"map": torch.stack(map, dim=0),
"agent": torch.stack(agent, dim=0),
"others": torch.stack(others, dim=0)
}
y = torch.stack(gt, dim=0)
return x, y
| [
"dataset.utils.pad_objects",
"dataset.carla_helper.MapHelper",
"pandas.read_csv",
"torch.from_numpy",
"numpy.array",
"dataset.utils.convert_status_to_np",
"copy.deepcopy",
"numpy.sin",
"dataset.utils.rotate_vector",
"numpy.diff",
"numpy.stack",
"numpy.concatenate",
"pandas.DataFrame",
"num... | [((13710, 13730), 'copy.deepcopy', 'deepcopy', (['batch_data'], {}), '(batch_data)\n', (13718, 13730), False, 'from copy import deepcopy\n'), ((13822, 13845), 'dataset.utils.pad_objects', 'pad_objects', (['batch_data'], {}), '(batch_data)\n', (13833, 13845), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((14353, 14375), 'torch.stack', 'torch.stack', (['gt'], {'dim': '(0)'}), '(gt, dim=0)\n', (14364, 14375), False, 'import torch\n'), ((963, 991), 'dataset.carla_helper.MapHelper', 'MapHelper', ([], {'map_path': 'map_path'}), '(map_path=map_path)\n', (972, 991), False, 'from dataset.carla_helper import MapHelper\n'), ((2483, 2510), 'numpy.stack', 'np.stack', (['[_x, _y]'], {'axis': '(-1)'}), '([_x, _y], axis=-1)\n', (2491, 2510), True, 'import numpy as np\n'), ((2571, 2628), 'dataset.utils.rotate_vector', 'rotate_vector', (['_pos', 'self._pos_agent', 'self._agent_heading'], {}), '(_pos, self._pos_agent, self._agent_heading)\n', (2584, 2628), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((2648, 2677), 'torch.from_numpy', 'torch.from_numpy', (['_normed_pos'], {}), '(_normed_pos)\n', (2664, 2677), False, 'import torch\n'), ((3456, 3488), 'numpy.concatenate', 'np.concatenate', (['polygons'], {'axis': '(0)'}), '(polygons, axis=0)\n', (3470, 3488), True, 'import numpy as np\n'), ((3563, 3599), 'dataset.utils.distance', 'distance', (['waypoints', 'self._pos_agent'], {}), '(waypoints, self._pos_agent)\n', (3571, 3599), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((3820, 3879), 'dataset.utils.rotate_vector', 'rotate_vector', (['pos_wp', 'self._pos_agent', 'self._agent_heading'], {}), '(pos_wp, self._pos_agent, self._agent_heading)\n', (3833, 3879), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((4168, 4199), 'dataset.utils.get_angle', 'get_angle', (['v_agent_wp', 'v_x_unit'], {}), '(v_agent_wp, v_x_unit)\n', (4177, 4199), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((4414, 4475), 'numpy.concatenate', 'np.concatenate', (['[normed_pos_wp, dist_wp, diff_angle]'], {'axis': '(-1)'}), '([normed_pos_wp, dist_wp, diff_angle], axis=-1)\n', (4428, 4475), True, 'import numpy as np\n'), ((4515, 4540), 'torch.from_numpy', 'torch.from_numpy', (['wp_data'], {}), '(wp_data)\n', (4531, 4540), False, 'import torch\n'), ((4599, 4644), 'torch.zeros', 'torch.zeros', (['(self._num_waypoints, self._dim)'], {}), '((self._num_waypoints, self._dim))\n', (4610, 4644), False, 'import torch\n'), ((5432, 5454), 'torch.zeros', 'torch.zeros', (['self._dim'], {}), '(self._dim)\n', (5443, 5454), False, 'import torch\n'), ((10221, 10253), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (10233, 10253), True, 'import pandas as pd\n'), ((10273, 10305), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (10285, 10305), True, 'import pandas as pd\n'), ((14183, 14208), 'torch.stack', 'torch.stack', (['light'], {'dim': '(0)'}), '(light, dim=0)\n', (14194, 14208), False, 'import torch\n'), ((14225, 14248), 'torch.stack', 'torch.stack', (['map'], {'dim': '(0)'}), '(map, dim=0)\n', (14236, 14248), False, 'import torch\n'), ((14267, 14292), 'torch.stack', 'torch.stack', (['agent'], {'dim': '(0)'}), '(agent, dim=0)\n', (14278, 14292), False, 'import torch\n'), ((14312, 14338), 'torch.stack', 'torch.stack', (['others'], {'dim': '(0)'}), '(others, dim=0)\n', (14323, 14338), False, 'import torch\n'), ((3963, 3996), 'dataset.utils.distance', 'distance', (['pos_wp', 'self._pos_agent'], {}), '(pos_wp, self._pos_agent)\n', (3971, 3996), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((5717, 5741), 'numpy.array', 'np.array', (['[[tl_x, tl_y]]'], {}), '([[tl_x, tl_y]])\n', (5725, 5741), True, 'import numpy as np\n'), ((6027, 6038), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6035, 6038), True, 'import numpy as np\n'), ((6112, 6155), 'numpy.concatenate', 'np.concatenate', (['[normed_tl_pos, encode_stt]'], {}), '([normed_tl_pos, encode_stt])\n', (6126, 6155), True, 'import numpy as np\n'), ((6178, 6203), 'torch.from_numpy', 'torch.from_numpy', (['tl_data'], {}), '(tl_data)\n', (6194, 6203), False, 'import torch\n'), ((7201, 7267), 'dataset.utils.convert_status_to_np', 'convert_status_to_np', (["df_group_by_object['status']"], {'key': '"""velocity"""'}), "(df_group_by_object['status'], key='velocity')\n", (7221, 7267), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((7292, 7309), 'numpy.diff', 'np.diff', (['_mag_vel'], {}), '(_mag_vel)\n', (7299, 7309), True, 'import numpy as np\n'), ((7333, 7376), 'numpy.concatenate', 'np.concatenate', (['[[_diff_vel[0]], _diff_vel]'], {}), '([[_diff_vel[0]], _diff_vel])\n', (7347, 7376), True, 'import numpy as np\n'), ((7467, 7518), 'numpy.concatenate', 'np.concatenate', (['[[_diff_heading[0]], _diff_heading]'], {}), '([[_diff_heading[0]], _diff_heading])\n', (7481, 7518), True, 'import numpy as np\n'), ((7599, 7626), 'numpy.stack', 'np.stack', (['[_x, _y]'], {'axis': '(-1)'}), '([_x, _y], axis=-1)\n', (7607, 7626), True, 'import numpy as np\n'), ((7653, 7710), 'dataset.utils.rotate_vector', 'rotate_vector', (['_pos', 'self._pos_agent', 'self._agent_heading'], {}), '(_pos, self._pos_agent, self._agent_heading)\n', (7666, 7710), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((7791, 7846), 'numpy.expand_dims', 'np.expand_dims', (['(_heading - self._agent_heading)'], {'axis': '(-1)'}), '(_heading - self._agent_heading, axis=-1)\n', (7805, 7846), True, 'import numpy as np\n'), ((8015, 8050), 'numpy.stack', 'np.stack', (['[_vel_x, _vel_y]'], {'axis': '(-1)'}), '([_vel_x, _vel_y], axis=-1)\n', (8023, 8050), True, 'import numpy as np\n'), ((8077, 8134), 'dataset.utils.rotate_vector', 'rotate_vector', (['_vel', 'self._pos_agent', 'self._agent_heading'], {}), '(_vel, self._pos_agent, self._agent_heading)\n', (8090, 8134), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((8307, 8342), 'numpy.stack', 'np.stack', (['[_acc_x, _acc_y]'], {'axis': '(-1)'}), '([_acc_x, _acc_y], axis=-1)\n', (8315, 8342), True, 'import numpy as np\n'), ((8369, 8426), 'dataset.utils.rotate_vector', 'rotate_vector', (['_acc', 'self._pos_agent', 'self._agent_heading'], {}), '(_acc, self._pos_agent, self._agent_heading)\n', (8382, 8426), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n'), ((8511, 8546), 'numpy.expand_dims', 'np.expand_dims', (['_turn_rate'], {'axis': '(-1)'}), '(_turn_rate, axis=-1)\n', (8525, 8546), True, 'import numpy as np\n'), ((8635, 8739), 'numpy.concatenate', 'np.concatenate', (['[_normed_pos, _normed_vel, _normed_acc, _normed_heading, _normed_turn_rate]'], {'axis': '(-1)'}), '([_normed_pos, _normed_vel, _normed_acc, _normed_heading,\n _normed_turn_rate], axis=-1)\n', (8649, 8739), True, 'import numpy as np\n'), ((8857, 8887), 'torch.from_numpy', 'torch.from_numpy', (['dynamic_data'], {}), '(dynamic_data)\n', (8873, 8887), False, 'import torch\n'), ((8989, 9034), 'torch.zeros', 'torch.zeros', (['(self._history_steps, self._dim)'], {}), '((self._history_steps, self._dim))\n', (9000, 9034), False, 'import torch\n'), ((11064, 11108), 'numpy.array', 'np.array', (["[row['center_x'], row['center_y']]"], {}), "([row['center_x'], row['center_y']])\n", (11072, 11108), True, 'import numpy as np\n'), ((12201, 12220), 'pandas.read_csv', 'pd.read_csv', (['inputs'], {}), '(inputs)\n', (12212, 12220), True, 'import pandas as pd\n'), ((4284, 4311), 'numpy.abs', 'np.abs', (['self._agent_heading'], {}), '(self._agent_heading)\n', (4290, 4311), True, 'import numpy as np\n'), ((5763, 5788), 'json.loads', 'json.loads', (["row['status']"], {}), "(row['status'])\n", (5773, 5788), False, 'import json\n'), ((7423, 7440), 'numpy.diff', 'np.diff', (['_heading'], {}), '(_heading)\n', (7430, 7440), True, 'import numpy as np\n'), ((7930, 7946), 'numpy.cos', 'np.cos', (['_heading'], {}), '(_heading)\n', (7936, 7946), True, 'import numpy as np\n'), ((7979, 7995), 'numpy.sin', 'np.sin', (['_heading'], {}), '(_heading)\n', (7985, 7995), True, 'import numpy as np\n'), ((8222, 8238), 'numpy.cos', 'np.cos', (['_heading'], {}), '(_heading)\n', (8228, 8238), True, 'import numpy as np\n'), ((8271, 8287), 'numpy.sin', 'np.sin', (['_heading'], {}), '(_heading)\n', (8277, 8287), True, 'import numpy as np\n'), ((10426, 10456), 'pandas.concat', 'pd.concat', (['[inp_data, df_tick]'], {}), '([inp_data, df_tick])\n', (10435, 10456), True, 'import pandas as pd\n'), ((10502, 10532), 'pandas.concat', 'pd.concat', (['[out_data, df_tick]'], {}), '([out_data, df_tick])\n', (10511, 10532), True, 'import pandas as pd\n'), ((5880, 5939), 'dataset.utils.rotate_vector', 'rotate_vector', (['tl_pos', 'self._pos_agent', 'self._agent_heading'], {}), '(tl_pos, self._pos_agent, self._agent_heading)\n', (5893, 5939), False, 'from dataset.utils import distance, rotate_vector, get_angle, convert_status_to_np, standardize_angle, pad_objects\n')] |
import cv2
import os, sys
import numpy as np
save_path = 'images/train/'
# Helpful functions #
def save_image(name, img):
if not os.path.exists(save_path):
os.makedirs(save_path)
cv2.imwrite(save_path+name+'.tif', np.array(img, dtype=np.uint8))
def get_api_key():
if len(sys.argv) is 2:
print('Reading API key from input argument')
return sys.argv.pop()
else:
try:
from src import credentials
if hasattr(credentials, 'GOOGLE_MAPS_API_KEY'):
print('Reading API key from credentials.py')
return credentials.GOOGLE_MAPS_API_KEY
except:
if 'GOOGLE_MAPS_API_KEY' in os.environ:
print('Reading API key from environment')
return os.environ['GOOGLE_MAPS_API_KEY']
else:
print('API Key not found.')
sys.exit(1) | [
"os.path.exists",
"os.makedirs",
"numpy.array",
"sys.exit",
"sys.argv.pop"
] | [((136, 161), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (150, 161), False, 'import os, sys\n'), ((171, 193), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (182, 193), False, 'import os, sys\n'), ((233, 262), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (241, 262), True, 'import numpy as np\n'), ((380, 394), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (392, 394), False, 'import os, sys\n'), ((895, 906), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (903, 906), False, 'import os, sys\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.