id int64 | prompt string | docstring string |
|---|---|---|
165,612 | from enum import Enum
import random
import sys
import json
import progressbar
import model.torch_utils
import data_util.sql_util
import torch
def write_prediction(fileptr,
identifier,
input_seq,
probability,
prediction,
flat_prediction,
gold_query,
flat_gold_queries,
gold_tables,
index_in_interaction,
database_username,
database_password,
database_timeout,
compute_metrics=True,
beam=None):
pred_obj = {}
pred_obj["identifier"] = identifier
if len(identifier.split('/')) == 2:
database_id, interaction_id = identifier.split('/')
else:
database_id = 'atis'
interaction_id = identifier
pred_obj["database_id"] = database_id
pred_obj["interaction_id"] = interaction_id
pred_obj["input_seq"] = input_seq
pred_obj["probability"] = probability
pred_obj["prediction"] = prediction
pred_obj["flat_prediction"] = flat_prediction
pred_obj["gold_query"] = gold_query
pred_obj["flat_gold_queries"] = flat_gold_queries
pred_obj["index_in_interaction"] = index_in_interaction
pred_obj["gold_tables"] = str(gold_tables)
pred_obj["beam"] = beam
# Now compute the metrics we want.
if compute_metrics:
# First metric: whether flat predicted query is in the gold query set.
correct_string = " ".join(flat_prediction) in [
" ".join(q) for q in flat_gold_queries]
pred_obj["correct_string"] = correct_string
# Database metrics
if not correct_string:
syntactic, semantic, pred_table = sql_util.execution_results(
" ".join(flat_prediction), database_username, database_password, database_timeout)
pred_table = sorted(pred_table)
best_prec = 0.
best_rec = 0.
best_f1 = 0.
for gold_table in gold_tables:
num_overlap = float(len(set(pred_table) & set(gold_table)))
if len(set(gold_table)) > 0:
prec = num_overlap / len(set(gold_table))
else:
prec = 1.
if len(set(pred_table)) > 0:
rec = num_overlap / len(set(pred_table))
else:
rec = 1.
if prec > 0. and rec > 0.:
f1 = (2 * (prec * rec)) / (prec + rec)
else:
f1 = 1.
best_prec = max(best_prec, prec)
best_rec = max(best_rec, rec)
best_f1 = max(best_f1, f1)
else:
syntactic = True
semantic = True
pred_table = []
best_prec = 1.
best_rec = 1.
best_f1 = 1.
assert best_prec <= 1.
assert best_rec <= 1.
assert best_f1 <= 1.
pred_obj["syntactic"] = syntactic
pred_obj["semantic"] = semantic
correct_table = (pred_table in gold_tables) or correct_string
pred_obj["correct_table"] = correct_table
pred_obj["strict_correct_table"] = correct_table and syntactic
pred_obj["pred_table"] = str(pred_table)
pred_obj["table_prec"] = best_prec
pred_obj["table_rec"] = best_rec
pred_obj["table_f1"] = best_f1
fileptr.write(json.dumps(pred_obj) + "\n")
def get_progressbar(name, size):
"""Gets a progress bar object given a name and the total size.
Inputs:
name (str): The name to display on the side.
size (int): The maximum size of the progress bar.
"""
return progressbar.ProgressBar(maxval=size,
widgets=[name,
progressbar.Bar('=', '[', ']'),
' ',
progressbar.Percentage(),
' ',
progressbar.ETA()])
def update_sums(metrics,
metrics_sums,
predicted_sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing=False,
loss=None,
token_accuracy=0.,
database_username="",
database_password="",
database_timeout=0,
gold_table=None):
"""" Updates summing for metrics in an aggregator.
TODO: don't use sums, just keep the raw value.
"""
if Metrics.LOSS in metrics:
metrics_sums[Metrics.LOSS] += loss.item()
if Metrics.TOKEN_ACCURACY in metrics:
if gold_forcing:
metrics_sums[Metrics.TOKEN_ACCURACY] += token_accuracy
else:
num_tokens_correct = 0.
for j, token in enumerate(gold_query):
if len(
predicted_sequence) > j and predicted_sequence[j] == token:
num_tokens_correct += 1
metrics_sums[Metrics.TOKEN_ACCURACY] += num_tokens_correct / \
len(gold_query)
if Metrics.STRING_ACCURACY in metrics:
metrics_sums[Metrics.STRING_ACCURACY] += int(
flat_sequence == original_gold_query)
if Metrics.CORRECT_TABLES in metrics:
assert database_username, "You did not provide a database username"
assert database_password, "You did not provide a database password"
assert database_timeout > 0, "Database timeout is 0 seconds"
# Evaluate SQL
if flat_sequence != original_gold_query:
syntactic, semantic, table = sql_util.execution_results(
" ".join(flat_sequence), database_username, database_password, database_timeout)
else:
syntactic = True
semantic = True
table = gold_table
metrics_sums[Metrics.CORRECT_TABLES] += int(table == gold_table)
if Metrics.SYNTACTIC_QUERIES in metrics:
metrics_sums[Metrics.SYNTACTIC_QUERIES] += int(syntactic)
if Metrics.SEMANTIC_QUERIES in metrics:
metrics_sums[Metrics.SEMANTIC_QUERIES] += int(semantic)
if Metrics.STRICT_CORRECT_TABLES in metrics:
metrics_sums[Metrics.STRICT_CORRECT_TABLES] += int(
table == gold_table and syntactic)
def construct_averages(metrics_sums, total_num):
""" Computes the averages for metrics.
Inputs:
metrics_sums (dict Metric -> float): Sums for a metric.
total_num (int): Number to divide by (average).
"""
metrics_averages = {}
for metric, value in metrics_sums.items():
metrics_averages[metric] = value / total_num
if metric != "loss":
metrics_averages[metric] *= 100.
return metrics_averages
The provided code snippet includes necessary dependencies for implementing the `evaluate_interaction_sample` function. Write a Python function `def evaluate_interaction_sample(sample, model, max_generation_length, name="", gold_forcing=False, metrics=None, total_num=-1, database_username="", database_password="", database_timeout=0, use_predicted_queries=False, write_results=False, use_gpu=False, compute_metrics=False)` to solve the following problem:
Evaluates a sample of interactions.
Here is the function:
def evaluate_interaction_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
use_predicted_queries=False,
write_results=False,
use_gpu=False,
compute_metrics=False):
""" Evaluates a sample of interactions. """
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
predictions = []
use_gpu = not ("--no_gpus" in sys.argv or "--no_gpus=1" in sys.argv)
model.eval()
break_lst = ["baseball_1", "soccer_1", "formula_1", "cre_Drama_Workshop_Groups", "sakila_1"]
for i, interaction in enumerate(sample):
name = interaction.identifier.split('/')[0]
if name in break_lst:
continue
try:
with torch.no_grad():
if use_predicted_queries:
example_preds = model.predict_with_predicted_queries(
interaction,
max_generation_length)
else:
example_preds = model.predict_with_gold_queries(
interaction,
max_generation_length,
feed_gold_query=gold_forcing)
torch.cuda.empty_cache()
except RuntimeError as exception:
print("Failed on interaction: " + str(interaction.identifier))
print(exception)
print("\n\n")
exit()
predictions.extend(example_preds)
assert len(example_preds) == len(
interaction.interaction.utterances) or not example_preds
for j, pred in enumerate(example_preds):
num_utterances += 1
sequence, loss, token_accuracy, _, decoder_results = pred
if use_predicted_queries:
item = interaction.processed_utterances[j]
original_utt = interaction.interaction.utterances[item.index]
gold_query = original_utt.gold_query_to_use
original_gold_query = original_utt.original_gold_query
gold_table = original_utt.gold_sql_results
gold_queries = [q[0] for q in original_utt.all_gold_queries]
gold_tables = [q[1] for q in original_utt.all_gold_queries]
index = item.index
else:
item = interaction.gold_utterances()[j]
gold_query = item.gold_query()
original_gold_query = item.original_gold_query()
gold_table = item.gold_table()
gold_queries = item.original_gold_queries()
gold_tables = item.gold_tables()
index = item.utterance_index
if loss:
loss = loss / len(gold_query)
flat_sequence = item.flatten_sequence(sequence)
if write_results:
ori_beam = decoder_results.beam
beam = []
for x in ori_beam:
beam.append((-x[0], item.flatten_sequence(x[1].sequence)))
beam.sort()
write_prediction(
predictions_file,
identifier=interaction.identifier,
input_seq=item.input_sequence(),
probability=decoder_results.probability,
prediction=sequence,
flat_prediction=flat_sequence,
gold_query=gold_query,
flat_gold_queries=gold_queries,
gold_tables=gold_tables,
index_in_interaction=index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
compute_metrics=compute_metrics,
beam=beam)
update_sums(metrics,
metrics_sums,
sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=gold_table)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions | Evaluates a sample of interactions. |
165,613 | from enum import Enum
import random
import sys
import json
import progressbar
import model.torch_utils
import data_util.sql_util
import torch
def write_prediction(fileptr,
identifier,
input_seq,
probability,
prediction,
flat_prediction,
gold_query,
flat_gold_queries,
gold_tables,
index_in_interaction,
database_username,
database_password,
database_timeout,
compute_metrics=True,
beam=None):
pred_obj = {}
pred_obj["identifier"] = identifier
if len(identifier.split('/')) == 2:
database_id, interaction_id = identifier.split('/')
else:
database_id = 'atis'
interaction_id = identifier
pred_obj["database_id"] = database_id
pred_obj["interaction_id"] = interaction_id
pred_obj["input_seq"] = input_seq
pred_obj["probability"] = probability
pred_obj["prediction"] = prediction
pred_obj["flat_prediction"] = flat_prediction
pred_obj["gold_query"] = gold_query
pred_obj["flat_gold_queries"] = flat_gold_queries
pred_obj["index_in_interaction"] = index_in_interaction
pred_obj["gold_tables"] = str(gold_tables)
pred_obj["beam"] = beam
# Now compute the metrics we want.
if compute_metrics:
# First metric: whether flat predicted query is in the gold query set.
correct_string = " ".join(flat_prediction) in [
" ".join(q) for q in flat_gold_queries]
pred_obj["correct_string"] = correct_string
# Database metrics
if not correct_string:
syntactic, semantic, pred_table = sql_util.execution_results(
" ".join(flat_prediction), database_username, database_password, database_timeout)
pred_table = sorted(pred_table)
best_prec = 0.
best_rec = 0.
best_f1 = 0.
for gold_table in gold_tables:
num_overlap = float(len(set(pred_table) & set(gold_table)))
if len(set(gold_table)) > 0:
prec = num_overlap / len(set(gold_table))
else:
prec = 1.
if len(set(pred_table)) > 0:
rec = num_overlap / len(set(pred_table))
else:
rec = 1.
if prec > 0. and rec > 0.:
f1 = (2 * (prec * rec)) / (prec + rec)
else:
f1 = 1.
best_prec = max(best_prec, prec)
best_rec = max(best_rec, rec)
best_f1 = max(best_f1, f1)
else:
syntactic = True
semantic = True
pred_table = []
best_prec = 1.
best_rec = 1.
best_f1 = 1.
assert best_prec <= 1.
assert best_rec <= 1.
assert best_f1 <= 1.
pred_obj["syntactic"] = syntactic
pred_obj["semantic"] = semantic
correct_table = (pred_table in gold_tables) or correct_string
pred_obj["correct_table"] = correct_table
pred_obj["strict_correct_table"] = correct_table and syntactic
pred_obj["pred_table"] = str(pred_table)
pred_obj["table_prec"] = best_prec
pred_obj["table_rec"] = best_rec
pred_obj["table_f1"] = best_f1
fileptr.write(json.dumps(pred_obj) + "\n")
def get_progressbar(name, size):
"""Gets a progress bar object given a name and the total size.
Inputs:
name (str): The name to display on the side.
size (int): The maximum size of the progress bar.
"""
return progressbar.ProgressBar(maxval=size,
widgets=[name,
progressbar.Bar('=', '[', ']'),
' ',
progressbar.Percentage(),
' ',
progressbar.ETA()])
def update_sums(metrics,
metrics_sums,
predicted_sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing=False,
loss=None,
token_accuracy=0.,
database_username="",
database_password="",
database_timeout=0,
gold_table=None):
"""" Updates summing for metrics in an aggregator.
TODO: don't use sums, just keep the raw value.
"""
if Metrics.LOSS in metrics:
metrics_sums[Metrics.LOSS] += loss.item()
if Metrics.TOKEN_ACCURACY in metrics:
if gold_forcing:
metrics_sums[Metrics.TOKEN_ACCURACY] += token_accuracy
else:
num_tokens_correct = 0.
for j, token in enumerate(gold_query):
if len(
predicted_sequence) > j and predicted_sequence[j] == token:
num_tokens_correct += 1
metrics_sums[Metrics.TOKEN_ACCURACY] += num_tokens_correct / \
len(gold_query)
if Metrics.STRING_ACCURACY in metrics:
metrics_sums[Metrics.STRING_ACCURACY] += int(
flat_sequence == original_gold_query)
if Metrics.CORRECT_TABLES in metrics:
assert database_username, "You did not provide a database username"
assert database_password, "You did not provide a database password"
assert database_timeout > 0, "Database timeout is 0 seconds"
# Evaluate SQL
if flat_sequence != original_gold_query:
syntactic, semantic, table = sql_util.execution_results(
" ".join(flat_sequence), database_username, database_password, database_timeout)
else:
syntactic = True
semantic = True
table = gold_table
metrics_sums[Metrics.CORRECT_TABLES] += int(table == gold_table)
if Metrics.SYNTACTIC_QUERIES in metrics:
metrics_sums[Metrics.SYNTACTIC_QUERIES] += int(syntactic)
if Metrics.SEMANTIC_QUERIES in metrics:
metrics_sums[Metrics.SEMANTIC_QUERIES] += int(semantic)
if Metrics.STRICT_CORRECT_TABLES in metrics:
metrics_sums[Metrics.STRICT_CORRECT_TABLES] += int(
table == gold_table and syntactic)
def construct_averages(metrics_sums, total_num):
""" Computes the averages for metrics.
Inputs:
metrics_sums (dict Metric -> float): Sums for a metric.
total_num (int): Number to divide by (average).
"""
metrics_averages = {}
for metric, value in metrics_sums.items():
metrics_averages[metric] = value / total_num
if metric != "loss":
metrics_averages[metric] *= 100.
return metrics_averages
def evaluate_using_predicted_queries(sample,
model,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
snippet_keep_age=1):
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
assert not gold_forcing
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
predictions = []
for i, item in enumerate(sample):
int_predictions = []
item.start_interaction()
while not item.done():
utterance = item.next_utterance(snippet_keep_age)
predicted_sequence, loss, _, probability = model.eval_step(
utterance)
int_predictions.append((utterance, predicted_sequence))
flat_sequence = utterance.flatten_sequence(predicted_sequence)
if sql_util.executable(
flat_sequence,
username=database_username,
password=database_password,
timeout=database_timeout) and probability >= 0.24:
utterance.set_pred_query(
item.remove_snippets(predicted_sequence))
item.add_utterance(utterance,
item.remove_snippets(predicted_sequence),
previous_snippets=utterance.snippets())
else:
# Add the /previous/ predicted query, guaranteed to be syntactically
# correct
seq = []
utterance.set_pred_query(seq)
item.add_utterance(
utterance, seq, previous_snippets=utterance.snippets())
original_utt = item.interaction.utterances[utterance.index]
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=utterance.input_sequence(),
probability=probability,
prediction=predicted_sequence,
flat_prediction=flat_sequence,
gold_query=original_utt.gold_query_to_use,
flat_gold_queries=[
q[0] for q in original_utt.all_gold_queries],
gold_tables=[
q[1] for q in original_utt.all_gold_queries],
index_in_interaction=utterance.index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_sequence,
flat_sequence,
original_utt.gold_query_to_use,
original_utt.original_gold_query,
gold_forcing,
loss,
token_accuracy=0,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=original_utt.gold_sql_results)
predictions.append(int_predictions)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions | null |
165,618 | import torch.nn as nn
import torch
import math
import torch.nn.functional as F
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from .layer_norm import LayerNorm
from .position_ffn import PositionwiseFeedForward
from .multi_headed_attn import MultiHeadedAttention
from .rat_transformer_layer import RATTransoformer
The provided code snippet includes necessary dependencies for implementing the `save_matrix` function. Write a Python function `def save_matrix(matrix, label, i)` to solve the following problem:
matrix: n x n label: len(str) == n
Here is the function:
def save_matrix(matrix, label, i):
"""
matrix: n x n
label: len(str) == n
"""
plt.rcParams["figure.figsize"] = (1000,1000)
plt.matshow(matrix, cmap=plt.cm.Reds)
plt.xticks(np.arange(len(label)), label, rotation=90, fontsize=16)
plt.yticks(np.arange(len(label)), label, fontsize=16)
plt.margins(2)
plt.subplots_adjust()
plt.savefig('relation_' + str(i), dpi=300)
exit() | matrix: n x n label: len(str) == n |
165,619 | import torch.nn as nn
import torch
import math
import torch.nn.functional as F
from .layer_norm import LayerNorm
from .position_ffn import PositionwiseFeedForward
from .multi_headed_attn import MultiHeadedAttention
from ..encoder import Encoder as Encoder2
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | null |
165,623 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `compute_loss` function. Write a Python function `def compute_loss(gold_seq, scores, index_to_token_maps, gold_tok_to_id, noise=0.00000001)` to solve the following problem:
Computes the loss of a gold sequence given scores. Inputs: gold_seq (list of str): A sequence of gold tokens. scores (list of dy.Expression): Expressions representing the scores of potential output tokens for each token in gold_seq. index_to_token_maps (list of dict str->list of int): Maps from index in the sequence to a dictionary mapping from a string to a set of integers. gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token and some lookup function to the indices in the probability distribution where the gold token occurs. noise (float, optional): The amount of noise to add to the loss. Returns: dy.Expression representing the sum of losses over the sequence.
Here is the function:
def compute_loss(gold_seq,
scores,
index_to_token_maps,
gold_tok_to_id,
noise=0.00000001):
""" Computes the loss of a gold sequence given scores.
Inputs:
gold_seq (list of str): A sequence of gold tokens.
scores (list of dy.Expression): Expressions representing the scores of
potential output tokens for each token in gold_seq.
index_to_token_maps (list of dict str->list of int): Maps from index in the
sequence to a dictionary mapping from a string to a set of integers.
gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token
and some lookup function to the indices in the probability distribution
where the gold token occurs.
noise (float, optional): The amount of noise to add to the loss.
Returns:
dy.Expression representing the sum of losses over the sequence.
"""
assert len(gold_seq) == len(scores) == len(index_to_token_maps)
losses = []
for i, gold_tok in enumerate(gold_seq):
score = scores[i]
token_map = index_to_token_maps[i]
gold_indices = gold_tok_to_id(gold_tok, token_map)
assert len(gold_indices) > 0
noise_i = noise
if len(gold_indices) == 1:
noise_i = 0
probdist = score
prob_of_tok = noise_i + torch.sum(probdist[gold_indices])
losses.append(-torch.log(prob_of_tok))
return torch.sum(torch.stack(losses)) | Computes the loss of a gold sequence given scores. Inputs: gold_seq (list of str): A sequence of gold tokens. scores (list of dy.Expression): Expressions representing the scores of potential output tokens for each token in gold_seq. index_to_token_maps (list of dict str->list of int): Maps from index in the sequence to a dictionary mapping from a string to a set of integers. gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token and some lookup function to the indices in the probability distribution where the gold token occurs. noise (float, optional): The amount of noise to add to the loss. Returns: dy.Expression representing the sum of losses over the sequence. |
165,625 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `per_token_accuracy` function. Write a Python function `def per_token_accuracy(gold_seq, pred_seq)` to solve the following problem:
Returns the per-token accuracy comparing two strings (recall). Inputs: gold_seq (list of str): A list of gold tokens. pred_seq (list of str): A list of predicted tokens. Returns: float, representing the accuracy.
Here is the function:
def per_token_accuracy(gold_seq, pred_seq):
""" Returns the per-token accuracy comparing two strings (recall).
Inputs:
gold_seq (list of str): A list of gold tokens.
pred_seq (list of str): A list of predicted tokens.
Returns:
float, representing the accuracy.
"""
num_correct = 0
for i, gold_token in enumerate(gold_seq):
if i < len(pred_seq) and pred_seq[i] == gold_token:
num_correct += 1
return float(num_correct) / len(gold_seq) | Returns the per-token accuracy comparing two strings (recall). Inputs: gold_seq (list of str): A list of gold tokens. pred_seq (list of str): A list of predicted tokens. Returns: float, representing the accuracy. |
165,627 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `create_multilayer_lstm_params` function. Write a Python function `def create_multilayer_lstm_params(num_layers, in_size, state_size, name="")` to solve the following problem:
Adds a multilayer LSTM to the model parameters. Inputs: num_layers (int): Number of layers to create. in_size (int): The input size to the first layer. state_size (int): The size of the states. model (dy.ParameterCollection): The parameter collection for the model. name (str, optional): The name of the multilayer LSTM.
Here is the function:
def create_multilayer_lstm_params(num_layers, in_size, state_size, name=""):
""" Adds a multilayer LSTM to the model parameters.
Inputs:
num_layers (int): Number of layers to create.
in_size (int): The input size to the first layer.
state_size (int): The size of the states.
model (dy.ParameterCollection): The parameter collection for the model.
name (str, optional): The name of the multilayer LSTM.
"""
lstm_layers = []
for i in range(num_layers):
layer_name = name + "-" + str(i)
# print("LSTM " + layer_name + ": " + str(in_size) + " x " + str(state_size) + "; default Dynet initialization of hidden weights")
lstm_layer = torch.nn.LSTMCell(input_size=int(in_size), hidden_size=int(state_size), bias=True)
lstm_layers.append(lstm_layer)
in_size = state_size
return torch.nn.ModuleList(lstm_layers) | Adds a multilayer LSTM to the model parameters. Inputs: num_layers (int): Number of layers to create. in_size (int): The input size to the first layer. state_size (int): The size of the states. model (dy.ParameterCollection): The parameter collection for the model. name (str, optional): The name of the multilayer LSTM. |
165,628 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `add_params` function. Write a Python function `def add_params(size, name="")` to solve the following problem:
Adds parameters to the model. Inputs: model (dy.ParameterCollection): The parameter collection for the model. size (tuple of int): The size to create. name (str, optional): The name of the parameters.
Here is the function:
def add_params(size, name=""):
""" Adds parameters to the model.
Inputs:
model (dy.ParameterCollection): The parameter collection for the model.
size (tuple of int): The size to create.
name (str, optional): The name of the parameters.
"""
# if len(size) == 1:
# print("vector " + name + ": " + str(size[0]) + "; uniform in [-0.1, 0.1]")
# else:
# print("matrix " + name + ": " + str(size[0]) + " x " + str(size[1]) + "; uniform in [-0.1, 0.1]")
size_int = tuple([int(ss) for ss in size])
return torch.nn.Parameter(torch.empty(size_int).uniform_(-0.1, 0.1)) | Adds parameters to the model. Inputs: model (dy.ParameterCollection): The parameter collection for the model. size (tuple of int): The size to create. name (str, optional): The name of the parameters. |
165,629 | import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
UNK_TOK = "_UNK"
The provided code snippet includes necessary dependencies for implementing the `get_token_indices` function. Write a Python function `def get_token_indices(token, index_to_token)` to solve the following problem:
Maps from a gold token (string) to a list of indices. Inputs: token (string): String to look up. index_to_token (list of tokens): Ordered list of tokens. Returns: list of int, representing the indices of the token in the probability distribution.
Here is the function:
def get_token_indices(token, index_to_token):
""" Maps from a gold token (string) to a list of indices.
Inputs:
token (string): String to look up.
index_to_token (list of tokens): Ordered list of tokens.
Returns:
list of int, representing the indices of the token in the probability
distribution.
"""
if token in index_to_token:
if len(set(index_to_token)) == len(index_to_token): # no duplicates
return [index_to_token.index(token)]
else:
indices = []
for index, other_token in enumerate(index_to_token):
if token == other_token:
indices.append(index)
assert len(indices) == len(set(indices))
return indices
else:
return [index_to_token.index(UNK_TOK)] | Maps from a gold token (string) to a list of indices. Inputs: token (string): String to look up. index_to_token (list of tokens): Ordered list of tokens. Returns: list of int, representing the indices of the token in the probability distribution. |
165,630 | import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
DEL_TOK = ";"
The provided code snippet includes necessary dependencies for implementing the `flatten_utterances` function. Write a Python function `def flatten_utterances(utterances)` to solve the following problem:
Gets a flat sequence from a sequence of utterances. Inputs: utterances (list of list of str): Utterances to concatenate. Returns: list of str, representing the flattened sequence with separating delimiter tokens.
Here is the function:
def flatten_utterances(utterances):
""" Gets a flat sequence from a sequence of utterances.
Inputs:
utterances (list of list of str): Utterances to concatenate.
Returns:
list of str, representing the flattened sequence with separating
delimiter tokens.
"""
sequence = []
for i, utterance in enumerate(utterances):
sequence.extend(utterance)
if i < len(utterances) - 1:
sequence.append(DEL_TOK)
return sequence | Gets a flat sequence from a sequence of utterances. Inputs: utterances (list of list of str): Utterances to concatenate. Returns: list of str, representing the flattened sequence with separating delimiter tokens. |
165,631 | import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
The provided code snippet includes necessary dependencies for implementing the `encode_snippets_with_states` function. Write a Python function `def encode_snippets_with_states(snippets, states)` to solve the following problem:
Encodes snippets by using previous query states instead. Inputs: snippets (list of Snippet): Input snippets. states (list of dy.Expression): Previous hidden states to use. TODO: should this by dy.Expression or vector values?
Here is the function:
def encode_snippets_with_states(snippets, states):
""" Encodes snippets by using previous query states instead.
Inputs:
snippets (list of Snippet): Input snippets.
states (list of dy.Expression): Previous hidden states to use.
TODO: should this by dy.Expression or vector values?
"""
for snippet in snippets:
snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))
return snippets | Encodes snippets by using previous query states instead. Inputs: snippets (list of Snippet): Input snippets. states (list of dy.Expression): Previous hidden states to use. TODO: should this by dy.Expression or vector values? |
165,632 | import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
print(output_vocabulary.inorder_tokens)
print()
def read_glove_embedding(embedding_filename, embedding_size):
glove_embeddings = {}
with open(embedding_filename) as f:
cnt = 1
for line in f:
cnt += 1
if params.debug or not params.train:
if cnt == 1000:
print('Read 1000 word embeddings')
break
l_split = line.split()
word = " ".join(l_split[0:len(l_split) - embedding_size])
embedding = np.array([float(val) for val in l_split[-embedding_size:]])
glove_embeddings[word] = embedding
return glove_embeddings
print('Loading Glove Embedding from', params.embedding_filename)
glove_embedding_size = 300
# glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)
import pickle as pkl
# pkl.dump(glove_embeddings, open("glove_embeddings.pkl", "wb"))
# exit()
glove_embeddings = pkl.load(open("glove_embeddings.pkl", "rb"))
print('Done')
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
del glove_embeddings
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size | null |
165,633 | import os, json
import random as rd
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .bert import tokenization as tokenization
from .bert.modeling import BertConfig, BertModel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_bert(params):
BERT_PT_PATH = './model/bert/data/annotated_wikisql_and_PyTorch_bert_param'
map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12',
'uL': 'uncased_L-24_H-1024_A-16',
'cS': 'cased_L-12_H-768_A-12',
'cL': 'cased_L-24_H-1024_A-16',
'mcS': 'multi_cased_L-12_H-768_A-12'}
bert_type = map_bert_type_abb[params.bert_type_abb]
if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS':
do_lower_case = False
else:
do_lower_case = True
no_pretraining = False
bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json')
vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt')
init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin')
print('bert_config_file', bert_config_file)
print('vocab_file', vocab_file)
print('init_checkpoint', init_checkpoint)
bert_config = BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
bert_config.print_status()
model_bert = BertModel(bert_config)
if no_pretraining:
pass
else:
model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu'))
print("Load pre-trained parameters.")
model_bert.to(device)
return model_bert, tokenizer, bert_config | null |
165,636 | import re
import os
import json
import torch
import random
import pickle
import argparse
import torch.nn as nn
from tqdm import tqdm
from model import SegModel
from torch.cuda import amp
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset
from transformers import BertForNextSentencePrediction, AdamW, BertConfig, get_linear_schedule_with_warmup, set_seed, AutoModel
def get_mask(tensor):
attention_masks = []
for sent in tensor:
att_mask = [int(token_id > 0) for token_id in sent]
attention_masks.append(att_mask)
return torch.tensor(attention_masks) | null |
165,637 | import os
import re
import json
import torch
import random
import pickle
import IPython
import argparse
import subprocess
import numpy as np
from tqdm import tqdm
from torch.nn import CrossEntropyLoss
from collections import defaultdict, Counter
from transformers import BertTokenizer
from torch.nn.utils.rnn import pad_sequence
from keras.preprocessing.sequence import pad_sequences
from transformers import AutoTokenizer, AutoModel, set_seed, BertForNextSentencePrediction
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset
def gen_text(args):
data, topic_data = [], []
w, k = 2, 5
for dataset in ['dialseg711', 'doc2dial']:
todolist = [f'{args.dataroot}/{dataset}/'+i for i in os.listdir(f'{args.dataroot}/{dataset}') if not i.startswith('.')]
for i in tqdm(todolist):
dial_name = i.split('/')[-1][:-4]
cur_dials = open(i).read().split('\n')[:-1]
dials = [utt for utt in cur_dials if '=======' not in utt]
dial_len = len(dials)
for utt_idx in range(dial_len-1):
context, cur, neg, hard_neg = [], [], [], []
neg_index = random.choice(list(range(utt_idx-w+1)) + list(range(utt_idx+w+1, dial_len)))
negdial = [i for i in open(random.choice(todolist)).read().split('\n')[:-1] if '====' not in i]
neg_hard_index = random.choice(list(range(len(negdial))))
mid = utt_idx+1
l, r = utt_idx, utt_idx+1
for i in range(args.history):
if l > -1:
context.append(re.sub(r'\s([,?.!"](?:\s|$))', r'\1', dials[l]))
l -= 1
if r < dial_len:
cur.append(re.sub(r'\s([,?.!"](?:\s|$))', r'\1', dials[r]))
r += 1
if neg_index < dial_len:
neg.append(re.sub(r'\s([,?.!"](?:\s|$))', r'\1', dials[neg_index]))
neg_index += 1
if neg_hard_index < len(negdial):
hard_neg.append(re.sub(r'\s([,?.!"](?:\s|$))', r'\1', negdial[neg_hard_index]))
neg_hard_index += 1
context.reverse()
data.append([(context, cur), (context, neg), (context, hard_neg)])
topic_data.append((dials, mid))
assert len(dials) > mid
json.dump(data, open(f'{args.dataroot}/{dataset}_{args.version}.json', 'w'))
json.dump(topic_data, open(f'{args.dataroot}/{dataset}_topic_data.json', 'w')) | null |
165,638 | import torch
import bisect
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from transformers import BertForNextSentencePrediction, AutoModel
from transformers.models.bert.modeling_bert import *
def tet(scores):
output_scores = []
for i in range(len(scores)):
lflag, rflag = scores[i], scores[i]
if i == 0:
hl = scores[i]
for r in range(i+1,len(scores)):
if rflag <= scores[r]:
rflag = scores[r]
else:
break
elif i == len(scores)-1:
hr = scores[i]
for l in range(i-1, -1, -1):
if lflag <= scores[l]:
lflag = scores[l]
else:
break
else:
for r in range(i+1,len(scores)):
if rflag <= scores[r]:
rflag = scores[r]
else:
break
for l in range(i-1, -1, -1):
if lflag <= scores[l]:
lflag = scores[l]
else:
break
depth_score = 0.5*(lflag+rflag-2*scores[i])
output_scores.append(depth_score.cpu().detach())
return output_scores | null |
165,639 | import torch
from torch import Tensor
from typing import List
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `split_matrix` function. Write a Python function `def split_matrix(matrix: Tensor, lengths: List, reduction='mean') -> Tensor` to solve the following problem:
:param matrix: torch.tensor :param lengths: list :return:
Here is the function:
def split_matrix(matrix: Tensor, lengths: List, reduction='mean') -> Tensor:
"""
:param matrix: torch.tensor
:param lengths: list
:return:
"""
output_matrix = torch.zeros(size=(len(lengths), len(lengths)))
cumsum_lengths = np.cumsum([0] + lengths)
for i in range(len(lengths)):
for j in range(len(lengths)):
splited_matrix_block = matrix[cumsum_lengths[i]:cumsum_lengths[i+1], cumsum_lengths[j]:cumsum_lengths[j+1]]
if reduction == 'mean':
output_matrix[i, j] = splited_matrix_block.mean()
elif reduction == 'sum':
output_matrix[i, j] = splited_matrix_block.sum()
elif reduction == 'max':
output_matrix[i, j] = splited_matrix_block.max()
elif reduction == 'min':
output_matrix[i, j] = splited_matrix_block.min()
else:
raise ValueError('reduction=[%s] has not been supported.' % reduction)
return output_matrix | :param matrix: torch.tensor :param lengths: list :return: |
165,642 | import math
import torch
from torch.optim import Optimizer
from torch.nn.utils import clip_grad_norm_
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x | null |
165,643 | import os
import codecs
import argparse
import logging
from typing import List
import pickle
from tqdm import tqdm
from optimization import BERTAdam
from data import data_provider
from network import Dial2vec
from metrics import *
from utils import split_matrix
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.') | null |
165,644 | import os
import codecs
import math
from multiprocessing import Pool
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from transformers import AutoTokenizer, AutoConfig
import config
from model.plato.configuration_plato import PlatoConfig
The provided code snippet includes necessary dependencies for implementing the `line_statistics` function. Write a Python function `def line_statistics(file_name)` to solve the following problem:
统计文件行数
Here is the function:
def line_statistics(file_name):
"""
统计文件行数
"""
if file_name is None:
return 0
content = os.popen("wc -l %s" % file_name)
line_number = int(content.read().split(" ")[0])
return line_number | 统计文件行数 |
165,645 | import codecs
import os
import argparse
import config
The provided code snippet includes necessary dependencies for implementing the `get_session_content` function. Write a Python function `def get_session_content(file_path)` to solve the following problem:
读取数据
Here is the function:
def get_session_content(file_path):
"""
读取数据
"""
f_in = codecs.open(file_path, "r", encoding="utf-8")
line_list = f_in.readlines()
f_in.close()
# 加载topic_mapper
topic_mapper = {}
for line in line_list:
line_array = [s.strip() for s in line.split("\t")]
topic_id = line_array[3]
if topic_id in topic_mapper or topic_id.find("|") != -1:
continue
topic_mapper[topic_id] = str(len(topic_mapper) + 1)
data_dict = {}
for line in line_list:
line_array = [s.strip() for s in line.split("\t")]
session_id = line_array[0]
role = line_array[1]
text = line_array[2]
topic_id = line_array[3]
if topic_id not in topic_mapper or topic_id.find("|") != -1:
continue
if text.strip() == "":
continue
if session_id not in data_dict.keys():
data_dict[session_id] = {}
data_dict[session_id]["role"] = []
data_dict[session_id]["text"] = []
data_dict[session_id]["label"] = topic_mapper[topic_id]
text = text.replace(config.turn_sep_token, "")
text = text.replace(config.sample_sep_token, "")
data_dict[session_id]["role"].append(role)
data_dict[session_id]["text"].append(text)
return data_dict | 读取数据 |
165,646 | import codecs
from tqdm import tqdm
import random
import os
import config
sample_role = "0"
data_dict, flatten_neg_samples = get_data_dict("./rawdata/preprocess_session_%s.txt" % config.data_prefix)
The provided code snippet includes necessary dependencies for implementing the `get_data_dict` function. Write a Python function `def get_data_dict(path, min_session_rounds=5, max_load_sessions=100000)` to solve the following problem:
读取数据
Here is the function:
def get_data_dict(path, min_session_rounds=5, max_load_sessions=100000):
"""
读取数据
"""
with codecs.open(path, "r", "utf-8") as f_in:
all_lines = f_in.readlines()
data_dict = {}
flatten_neg_samples = []
for line in all_lines:
line_list = line.strip("\n").split("\t")
if line_list[0] not in data_dict.keys():
if len(data_dict) > max_load_sessions:
break
data_dict[line_list[0]] = {}
data_dict[line_list[0]]["role"] = []
data_dict[line_list[0]]["text"] = []
data_dict[line_list[0]]["response"] = []
data_dict[line_list[0]]["topic"] = []
data_dict[line_list[0]]["role"].append(line_list[1])
line_list[2] = line_list[2].replace(config.turn_sep_token, "")
line_list[2] = line_list[2].replace(config.sample_sep_token, "")
data_dict[line_list[0]]["text"].append(line_list[2])
data_dict[line_list[0]]["topic"].extend(line_list[3].split("|"))
if line_list[1] == sample_role:
flatten_neg_samples.append(line_list[2])
new_data_dict = {}
for key in data_dict:
if len(data_dict[key]["text"]) >= min_session_rounds:
new_data_dict[key] = data_dict[key]
return new_data_dict, flatten_neg_samples | 读取数据 |
165,647 | import codecs
from tqdm import tqdm
import random
import os
import config
def get_single_sample(data_dict, key, select_key, flatten_neg_samples, use_ins=False):
"""
构建一条样本
"""
text_str = ""
ins_samples = [data_dict[select_key]["text"][i] for i in range(len(data_dict[select_key]["text"]))
if data_dict[select_key]["role"][i] == sample_role]
ins_idx = 0
for i, s in enumerate(data_dict[key]["text"]):
if data_dict[key]["role"][i] == anchor_role:
text_str += s
elif use_ins is True:
if ins_idx < len(ins_samples):
text_str += ins_samples[ins_idx]
ins_idx += 1
else:
text_str += ins_samples[-1]
else:
text_str += random.choice(flatten_neg_samples)
text_str += config.turn_sep_token
text_str = text_str.strip(config.turn_sep_token)
return text_str
result_list = get_result(data_dict, config.samples_per_line, flatten_neg_samples)
The provided code snippet includes necessary dependencies for implementing the `get_result` function. Write a Python function `def get_result(data_dict, samples_per_line, flatten_neg_samples)` to solve the following problem:
构建数据集
Here is the function:
def get_result(data_dict, samples_per_line, flatten_neg_samples):
"""
构建数据集
"""
dict_keys = list(data_dict.keys())
result_list = []
for key in tqdm(dict_keys, desc="traversing_sessions"):
role_str = ""
for s in data_dict[key]["role"]:
role_str = role_str + s
for _ in range(samples_per_line):
text_str = "#".join(data_dict[key]["text"])
text_str += config.sample_sep_token
for i in range(1, samples_per_line):
select_key = random.choice(dict_keys)
text_str += get_single_sample(data_dict, key, select_key, flatten_neg_samples, use_ins=False)
text_str += config.sample_sep_token
text_str = text_str.strip(config.sample_sep_token)
result_list.append(config.line_sep_token.join([role_str, text_str, "0"]))
return result_list | 构建数据集 |
165,648 | import codecs
from tqdm import tqdm
import random
import os
import config
anchor_role = "1"
if os.path.exists(train_file_path) is False:
os.makedirs(train_file_path)
The provided code snippet includes necessary dependencies for implementing the `write_tsv` function. Write a Python function `def write_tsv(train_file_path, result_list, train_ratio)` to solve the following problem:
输出至训练文件
Here is the function:
def write_tsv(train_file_path, result_list, train_ratio):
"""
输出至训练文件
"""
num = int(len(result_list) * train_ratio)
train_data = result_list[:num]
with codecs.open(os.path.join(train_file_path, "train.tsv.%s" % anchor_role), "w", "utf-8") as f:
for line in train_data:
f.writelines(line + "\n") | 输出至训练文件 |
165,649 | import copy
import json
import math
import six
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
import numpy as np
from sklearn.metrics import f1_score
import config as user_config
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Here is the function:
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) |
165,650 |
The provided code snippet includes necessary dependencies for implementing the `semantic_relatedness` function. Write a Python function `def semantic_relatedness(y_true=None, features=None, scores_from_subject=None, scores_from_model=None)` to solve the following problem:
:param y_true: ground_truth labels about domains :param features: produced features :return:
Here is the function:
def semantic_relatedness(y_true=None, features=None, scores_from_subject=None, scores_from_model=None):
"""
:param y_true: ground_truth labels about domains
:param features: produced features
:return:
"""
if scores_from_subject is None and scores_from_model is None:
y_true = y_true.astype(int).reshape(-1, 1)
M_y = np.repeat(y_true, repeats=len(y_true), axis=-1)
assert (M_y.shape[0] == M_y.shape[1])
scores_from_subject = (M_y == M_y.T).astype(float) # .reshape(-1)
scores_from_model = feature_cosine_matrix(features) # .reshape(-1)
scores_from_subject = skip_diag_strided(scores_from_subject).reshape(-1)
scores_from_model = skip_diag_strided(scores_from_model).reshape(-1)
# correlation, p_value = spearmanr(scores_from_subject, scores_from_model)
correlation, p_value = kendalltau(scores_from_subject, scores_from_model)
return correlation | :param y_true: ground_truth labels about domains :param features: produced features :return: |
165,651 |
def clustering_evaluation(y_true, y_pred, logger=None):
y_true = np.array(y_true)
y_pred = np.array(y_pred)
y_true = np.array(y_true).astype(int)
y_pred = np.array(y_pred).astype(int)
## RI
pre = time()
RI = adjusted_rand_score(y_true, y_pred)
RI_time = time() - pre
## NMI
pre = time()
NMI = normalized_mutual_info_score(y_true, y_pred)
NMI_time = time() - pre
## acc
pre = time()
acc = get_accuracy(y_true, y_pred)
acc_time = time() - pre
## purity
pre = time()
purity = purity_score(y_true, y_pred)
purity_time = time() - pre
if logger is not None:
logger.info("\nclustering_task [LDA]: RI: %s NMI: %s Acc: %s Purity: %s" % (RI, NMI, acc, purity))
tb = PrettyTable()
tb.field_names = ['', 'RI', 'NMI', 'Acc', 'Purity']
tb.add_row(['Metrics'] + ['%.2f' % (v * 100) for v in [RI, NMI, acc, purity]])
tb.add_row(['Times'] + ['%.2f s' % (v) for v in [RI_time, NMI_time, acc_time, purity_time]])
logger.info('\n' + tb.__str__())
return EvaluationResult(
RI=RI,
NMI=NMI,
acc=acc,
purity=purity
) | null |
165,652 |
The provided code snippet includes necessary dependencies for implementing the `evaluate_all_metrics_at_once` function. Write a Python function `def evaluate_all_metrics_at_once(features, y_true, y_pred, tsne_visualization_output=None, logger=None, note='')` to solve the following problem:
:param features: :param y_true: :param y_pred: :param strategy: :param tsne_visualization_output: :return:
Here is the function:
def evaluate_all_metrics_at_once(features, y_true, y_pred, tsne_visualization_output=None, logger=None, note=''):
"""
:param features:
:param y_true:
:param y_pred:
:param strategy:
:param tsne_visualization_output:
:return:
"""
y_true = np.array(y_true).astype(int)
y_pred = np.array(y_pred).astype(int)
## RI
pre = time()
RI = adjusted_rand_score(y_true, y_pred)
RI_time = time() - pre
## NMI
pre = time()
NMI = normalized_mutual_info_score(y_true, y_pred)
NMI_time = time() - pre
## acc
pre = time()
acc = get_accuracy(y_true, y_pred)
acc_time = time() - pre
## purity
pre = time()
purity = purity_score(y_true, y_pred)
purity_time = time() - pre
scores_from_subject, scores_from_model = precalculate_scores_from_subject_and_model(y_true=y_true,
features=features)
## SR
pre = time()
SR = semantic_relatedness_precise(y_true=None,
features=None,
scores_from_subject=scores_from_subject,
scores_from_model=scores_from_model,
dtype='float64')
SR_time = time() - pre
# session retrieval
MRR, MAP, cost_time = session_retrieval_result(y_true=None,
features=None,
scores_from_subject=scores_from_subject,
scores_from_model=scores_from_model,
dtype='float64',
return_time=True)
if tsne_visualization_output is not None:
tsne_visualization(features, y_true, output_filename=tsne_visualization_output)
if logger is not None:
logger.info("\nclustering_task [%s]: RI: %s NMI: %s Acc: %s Purity: %s SR: %s MRR: %s MAP: %s" % (
note, RI, NMI, acc, purity, SR, MRR, MAP))
tb = PrettyTable()
tb.field_names = ['', 'RI', 'NMI', 'Acc', 'Purity', 'SR', 'MRR', 'MAP']
tb.add_row(['Metrics'] + ['%.2f' % (v * 100) for v in [RI, NMI, acc, purity, SR, MRR, MAP]])
tb.add_row(['Times'] + ['%.2f s' % (v) for v in [RI_time, NMI_time, acc_time, purity_time, SR_time,
cost_time['ranking']/2 + cost_time['mrr'],
cost_time['ranking']/2 + cost_time['map']]])
logger.info('\n' + tb.__str__())
return EvaluationResult(
RI=RI,
NMI=NMI,
acc=acc,
purity=purity,
SR=SR,
MRR=MRR,
MAP=MAP
) | :param features: :param y_true: :param y_pred: :param strategy: :param tsne_visualization_output: :return: |
165,653 |
The provided code snippet includes necessary dependencies for implementing the `feature_based_evaluation_at_once` function. Write a Python function `def feature_based_evaluation_at_once(features, labels, gpu_features=None, n_average=1, tsne_visualization_output=None, tasks=None, dtype='float64', logger=None, note='')` to solve the following problem:
Evaluate all metrics with features :param features: numpy.array :param labels: list :param n_average: :param tsne_visualization_output: :param tasks: :param dtype: :param logger: :param note: :return:
Here is the function:
def feature_based_evaluation_at_once(features, labels, gpu_features=None, n_average=1, tsne_visualization_output=None, tasks=None, dtype='float64', logger=None, note=''):
"""
Evaluate all metrics with features
:param features: numpy.array
:param labels: list
:param n_average:
:param tsne_visualization_output:
:param tasks:
:param dtype:
:param logger:
:param note:
:return:
"""
labels = np.array(labels).astype(int)
if gpu_features is not None:
gpu_labels = torch.tensor(labels, device=gpu_features.device)
features = np.array(features).astype(dtype) if features is not None else None
# n_classes
label_set = set()
for s in labels:
label_set.add(s)
# initialize
RI, NMI, acc, purity = 0., 0., 0., 0.
clustering_time, RI_time, NMI_time, acc_time, purity_time = 0., 0., 0., 0., 0.
SR, SR_time = 0., 0.
MRR, MAP, mrr_time, map_time, ranking_time, scoring_time = 0., 0., 0., 0., 0., 0.
alignment, adjusted_alignment, uniformity = 0., 0., 0.
align_uniform_time = 0.
# KMeans
if 'clustering' in tasks:
# logger.info('KMeans Evaluation for %s tries.' % n_average)
for _ in range(n_average):
# clustering
pre = time()
clf = KMeans(n_clusters=len(label_set), max_iter=500, tol=1e-5)
clf.fit(features)
y_pred = clf.predict(features)
clustering_time += (time() - pre) / n_average
## RI
pre = time()
RI += adjusted_rand_score(labels, y_pred) / n_average
RI_time += (time() - pre) / n_average
## NMI
pre = time()
NMI += normalized_mutual_info_score(labels, y_pred) / n_average
NMI_time += (time() - pre) / n_average
## acc
pre = time()
acc += get_accuracy(labels, y_pred) / n_average
acc_time += (time() - pre) / n_average
## purity
pre = time()
purity += purity_score(labels, y_pred) / n_average
purity_time += (time() - pre) / n_average
# scoring
if 'semantic_relatedness' in tasks or 'session_retrieval' in tasks:
pre = time()
scores_from_subject, scores_from_model = precalculate_scores_from_subject_and_model(y_true=labels, features=features)
scores_from_subject = skip_diag_strided(scores_from_subject)
scores_from_model = skip_diag_strided(scores_from_model)
scoring_time += (time() - pre)
# Semantic Relatedness
if 'semantic_relatedness' in tasks:
pre = time()
SR = semantic_relatedness_precise(y_true=None,
features=None,
scores_from_subject=scores_from_subject,
scores_from_model=scores_from_model,
dtype=dtype)
SR_time = time() - pre
# Session Retrieval
if 'session_retrieval' in tasks:
pre = time()
rankings = get_rankings(scores_from_model, dtype=dtype)
ranking_time = time() - pre
## MRR
pre = time()
# MRR = mean_reciprocal_rank(scores_from_subject, rankings, dtype=dtype) # Wrong
MRR = mean_reciprocal_rank(scores_from_subject, scores_from_model, dtype=dtype)
mrr_time = time() - pre
## MAP
pre = time()
MAP = mean_average_precision(scores_from_subject, rankings, dtype=dtype)
map_time = time() - pre
# Visualization
if 'visualization' in tasks:
if tsne_visualization_output is not None:
pre = time()
tsne_visualization(features, labels, output_filename=tsne_visualization_output)
visualization_time = time() - pre
logger.info('Visualization done. Time cost: %ss' % visualization_time)
# Alignment & Uniformity
if 'align_uniform' in tasks:
pre = time()
if gpu_features is not None:
normalized_features = F.normalize(gpu_features, p=2, dim=-1)
alignment, adjusted_alignment, uniformity = align_uniform(normalized_features=normalized_features,
labels=gpu_labels,
device=gpu_features.device)
else:
normalized_features = F.normalize(torch.tensor(features), p=2, dim=-1).cpu()
alignment, adjusted_alignment, uniformity = align_uniform(normalized_features=normalized_features,
labels=labels,
device='cpu')
align_uniform_time = time() - pre
# logger.info("Align_Uniform Time Costs: %s " % align_uniform_time)
if logger is not None:
logger.info("\nclustering_task [%s]: RI: %s NMI: %s Acc: %s Purity: %s" % (note, RI, NMI, acc, purity))
logger.info("\nSemantic Relatedness [%s]: SR: %s" % (note, SR))
logger.info("\nSession Retrieval [%s]: MRR: %s MAP: %s" % (note, MRR, MAP))
logger.info("\nRepresentation_Evaluation [%s]: Alignment: %.6f Alignment (adjusted): %.6f Uniformity: %.6f" % (note, alignment, adjusted_alignment, uniformity))
tb = PrettyTable()
tb.field_names = ['', 'RI', 'NMI', 'Acc', 'Purity', 'SR', 'MRR', 'MAP', 'Alignment', 'Adjusted Alignment', 'Uniformity']
tb.add_row(['Metrics'] + ['%.2f' % (v * 100) for v in [RI, NMI, acc, purity, SR, MRR, MAP]] + ['%.2f' % v for v in [alignment, adjusted_alignment, uniformity]])
tb.add_row(['Times'] + ['%.2f s' % v for v in [clustering_time/4 + RI_time,
clustering_time/4 + NMI_time,
clustering_time/4 + acc_time,
clustering_time/4 + purity_time,
scoring_time/3 + SR_time,
scoring_time/3 + ranking_time/2 + mrr_time,
scoring_time/3 + ranking_time/2 + map_time,
align_uniform_time/3,
align_uniform_time/3,
align_uniform_time/3]])
logger.info('\n' + tb.__str__())
return EvaluationResult(
RI=RI,
NMI=NMI,
acc=acc,
purity=purity,
SR=SR,
MRR=MRR,
MAP=MAP,
alignment=alignment,
adjusted_alignment=adjusted_alignment,
uniformity=uniformity
) | Evaluate all metrics with features :param features: numpy.array :param labels: list :param n_average: :param tsne_visualization_output: :param tasks: :param dtype: :param logger: :param note: :return: |
165,656 | import torch
import mmcv
from mmdet.core.bbox.match_costs.builder import MATCH_COST
The provided code snippet includes necessary dependencies for implementing the `smooth_l1_loss` function. Write a Python function `def smooth_l1_loss(pred, target, beta=1.0)` to solve the following problem:
Smooth L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: torch.Tensor: Calculated loss
Here is the function:
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
# assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss.sum(-1) | Smooth L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: torch.Tensor: Calculated loss |
165,668 | import copy
import mmcv
import numpy as np
import pyquaternion
import tempfile
import torch
import warnings
from nuscenes.utils.data_classes import Box as NuScenesBox
from os import path as osp
from mmdet3d.core import bbox3d2result, box3d_multiclass_nms, xywhr2xyxyr
from mmdet.datasets import DATASETS, CocoDataset
from mmdet3d.core import show_multi_modality_result
from mmdet3d.core.bbox import CameraInstance3DBoxes, get_box_type
from mmdet3d.datasets.pipelines import Compose
from mmdet3d.datasets.utils import extract_result_dict, get_loading_pipeline
The provided code snippet includes necessary dependencies for implementing the `output_to_nusc_box` function. Write a Python function `def output_to_nusc_box(detection)` to solve the following problem:
Convert the output to the box class in the nuScenes. Args: detection (dict): Detection results. - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. - scores_3d (torch.Tensor): Detection scores. - labels_3d (torch.Tensor): Predicted box labels. - attrs_3d (torch.Tensor, optional): Predicted attributes. Returns: list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes.
Here is the function:
def output_to_nusc_box(detection):
"""Convert the output to the box class in the nuScenes.
Args:
detection (dict): Detection results.
- boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
- scores_3d (torch.Tensor): Detection scores.
- labels_3d (torch.Tensor): Predicted box labels.
- attrs_3d (torch.Tensor, optional): Predicted attributes.
Returns:
list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes.
"""
box3d = detection['boxes_3d']
scores = detection['scores_3d'].numpy()
labels = detection['labels_3d'].numpy()
attrs = None
if 'attrs_3d' in detection:
attrs = detection['attrs_3d'].numpy()
box_gravity_center = box3d.gravity_center.numpy()
box_dims = box3d.dims.numpy()
box_yaw = box3d.yaw.numpy()
# convert the dim/rot to nuscbox convention
box_dims[:, [0, 1, 2]] = box_dims[:, [2, 0, 1]]
box_yaw = -box_yaw
box_list = []
for i in range(len(box3d)):
q1 = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
q2 = pyquaternion.Quaternion(axis=[1, 0, 0], radians=np.pi / 2)
quat = q2 * q1
velocity = (box3d.tensor[i, 7], 0.0, box3d.tensor[i, 8])
box = NuScenesBox(
box_gravity_center[i],
box_dims[i],
quat,
label=labels[i],
score=scores[i],
velocity=velocity)
box_list.append(box)
return box_list, attrs | Convert the output to the box class in the nuScenes. Args: detection (dict): Detection results. - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. - scores_3d (torch.Tensor): Detection scores. - labels_3d (torch.Tensor): Predicted box labels. - attrs_3d (torch.Tensor, optional): Predicted attributes. Returns: list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes. |
165,669 | import copy
import mmcv
import numpy as np
import pyquaternion
import tempfile
import torch
import warnings
from nuscenes.utils.data_classes import Box as NuScenesBox
from os import path as osp
from mmdet3d.core import bbox3d2result, box3d_multiclass_nms, xywhr2xyxyr
from mmdet.datasets import DATASETS, CocoDataset
from mmdet3d.core import show_multi_modality_result
from mmdet3d.core.bbox import CameraInstance3DBoxes, get_box_type
from mmdet3d.datasets.pipelines import Compose
from mmdet3d.datasets.utils import extract_result_dict, get_loading_pipeline
The provided code snippet includes necessary dependencies for implementing the `cam_nusc_box_to_global` function. Write a Python function `def cam_nusc_box_to_global(info, boxes, attrs, classes, eval_configs, eval_version='detection_cvpr_2019')` to solve the following problem:
Convert the box from camera to global coordinate. Args: info (dict): Info for a specific sample data, including the calibration information. boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. classes (list[str]): Mapped classes in the evaluation. eval_configs (object): Evaluation configuration object. eval_version (str): Evaluation version. Default: 'detection_cvpr_2019' Returns: list: List of standard NuScenesBoxes in the global coordinate.
Here is the function:
def cam_nusc_box_to_global(info,
boxes,
attrs,
classes,
eval_configs,
eval_version='detection_cvpr_2019'):
"""Convert the box from camera to global coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
classes (list[str]): Mapped classes in the evaluation.
eval_configs (object): Evaluation configuration object.
eval_version (str): Evaluation version.
Default: 'detection_cvpr_2019'
Returns:
list: List of standard NuScenesBoxes in the global
coordinate.
"""
box_list = []
attr_list = []
for (box, attr) in zip(boxes, attrs):
# Move box to ego vehicle coord system
box.rotate(pyquaternion.Quaternion(info['cam2ego_rotation']))
box.translate(np.array(info['cam2ego_translation']))
# filter det in ego.
cls_range_map = eval_configs.class_range
radius = np.linalg.norm(box.center[:2], 2)
det_range = cls_range_map[classes[box.label]]
if radius > det_range:
continue
# Move box to global coord system
box.rotate(pyquaternion.Quaternion(info['ego2global_rotation']))
box.translate(np.array(info['ego2global_translation']))
box_list.append(box)
attr_list.append(attr)
return box_list, attr_list | Convert the box from camera to global coordinate. Args: info (dict): Info for a specific sample data, including the calibration information. boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. classes (list[str]): Mapped classes in the evaluation. eval_configs (object): Evaluation configuration object. eval_version (str): Evaluation version. Default: 'detection_cvpr_2019' Returns: list: List of standard NuScenesBoxes in the global coordinate. |
165,670 | import copy
import mmcv
import numpy as np
import pyquaternion
import tempfile
import torch
import warnings
from nuscenes.utils.data_classes import Box as NuScenesBox
from os import path as osp
from mmdet3d.core import bbox3d2result, box3d_multiclass_nms, xywhr2xyxyr
from mmdet.datasets import DATASETS, CocoDataset
from mmdet3d.core import show_multi_modality_result
from mmdet3d.core.bbox import CameraInstance3DBoxes, get_box_type
from mmdet3d.datasets.pipelines import Compose
from mmdet3d.datasets.utils import extract_result_dict, get_loading_pipeline
The provided code snippet includes necessary dependencies for implementing the `global_nusc_box_to_cam` function. Write a Python function `def global_nusc_box_to_cam(info, boxes, classes, eval_configs, eval_version='detection_cvpr_2019')` to solve the following problem:
Convert the box from global to camera coordinate. Args: info (dict): Info for a specific sample data, including the calibration information. boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. classes (list[str]): Mapped classes in the evaluation. eval_configs (object): Evaluation configuration object. eval_version (str): Evaluation version. Default: 'detection_cvpr_2019' Returns: list: List of standard NuScenesBoxes in the global coordinate.
Here is the function:
def global_nusc_box_to_cam(info,
boxes,
classes,
eval_configs,
eval_version='detection_cvpr_2019'):
"""Convert the box from global to camera coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
classes (list[str]): Mapped classes in the evaluation.
eval_configs (object): Evaluation configuration object.
eval_version (str): Evaluation version.
Default: 'detection_cvpr_2019'
Returns:
list: List of standard NuScenesBoxes in the global
coordinate.
"""
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.translate(-np.array(info['ego2global_translation']))
box.rotate(
pyquaternion.Quaternion(info['ego2global_rotation']).inverse)
# filter det in ego.
cls_range_map = eval_configs.class_range
radius = np.linalg.norm(box.center[:2], 2)
det_range = cls_range_map[classes[box.label]]
if radius > det_range:
continue
# Move box to camera coord system
box.translate(-np.array(info['cam2ego_translation']))
box.rotate(pyquaternion.Quaternion(info['cam2ego_rotation']).inverse)
box_list.append(box)
return box_list | Convert the box from global to camera coordinate. Args: info (dict): Info for a specific sample data, including the calibration information. boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. classes (list[str]): Mapped classes in the evaluation. eval_configs (object): Evaluation configuration object. eval_version (str): Evaluation version. Default: 'detection_cvpr_2019' Returns: list: List of standard NuScenesBoxes in the global coordinate. |
165,671 | import copy
import mmcv
import numpy as np
import pyquaternion
import tempfile
import torch
import warnings
from nuscenes.utils.data_classes import Box as NuScenesBox
from os import path as osp
from mmdet3d.core import bbox3d2result, box3d_multiclass_nms, xywhr2xyxyr
from mmdet.datasets import DATASETS, CocoDataset
from mmdet3d.core import show_multi_modality_result
from mmdet3d.core.bbox import CameraInstance3DBoxes, get_box_type
from mmdet3d.datasets.pipelines import Compose
from mmdet3d.datasets.utils import extract_result_dict, get_loading_pipeline
The provided code snippet includes necessary dependencies for implementing the `nusc_box_to_cam_box3d` function. Write a Python function `def nusc_box_to_cam_box3d(boxes)` to solve the following problem:
Convert boxes from :obj:`NuScenesBox` to :obj:`CameraInstance3DBoxes`. Args: boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. Returns: tuple (:obj:`CameraInstance3DBoxes` | torch.Tensor | torch.Tensor): \ Converted 3D bounding boxes, scores and labels.
Here is the function:
def nusc_box_to_cam_box3d(boxes):
"""Convert boxes from :obj:`NuScenesBox` to :obj:`CameraInstance3DBoxes`.
Args:
boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
Returns:
tuple (:obj:`CameraInstance3DBoxes` | torch.Tensor | torch.Tensor): \
Converted 3D bounding boxes, scores and labels.
"""
locs = torch.Tensor([b.center for b in boxes]).view(-1, 3)
dims = torch.Tensor([b.wlh for b in boxes]).view(-1, 3)
rots = torch.Tensor([b.orientation.yaw_pitch_roll[0]
for b in boxes]).view(-1, 1)
velocity = torch.Tensor([b.velocity[:2] for b in boxes]).view(-1, 2)
# convert nusbox to cambox convention
dims[:, [0, 1, 2]] = dims[:, [1, 2, 0]]
rots = -rots
boxes_3d = torch.cat([locs, dims, rots, velocity], dim=1).cuda()
cam_boxes3d = CameraInstance3DBoxes(
boxes_3d, box_dim=9, origin=(0.5, 0.5, 0.5))
scores = torch.Tensor([b.score for b in boxes]).cuda()
labels = torch.LongTensor([b.label for b in boxes]).cuda()
nms_scores = scores.new_zeros(scores.shape[0], 10 + 1)
indices = labels.new_tensor(list(range(scores.shape[0])))
nms_scores[indices, labels] = scores
return cam_boxes3d, nms_scores, labels | Convert boxes from :obj:`NuScenesBox` to :obj:`CameraInstance3DBoxes`. Args: boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. Returns: tuple (:obj:`CameraInstance3DBoxes` | torch.Tensor | torch.Tensor): \ Converted 3D bounding boxes, scores and labels. |
165,672 | from __future__ import division
from typing import Any, List, Sequence, Tuple
import torch
from torch import device
from torch.nn import functional as F
from detectron2.utils.env import TORCH_VERSION
The provided code snippet includes necessary dependencies for implementing the `_as_tensor` function. Write a Python function `def _as_tensor(x: Tuple[int, int]) -> torch.Tensor` to solve the following problem:
An equivalent of `torch.as_tensor`, but works under tracing if input is a list of tensor. `torch.as_tensor` will record a constant in tracing, but this function will use `torch.stack` instead.
Here is the function:
def _as_tensor(x: Tuple[int, int]) -> torch.Tensor:
"""
An equivalent of `torch.as_tensor`, but works under tracing if input
is a list of tensor. `torch.as_tensor` will record a constant in tracing,
but this function will use `torch.stack` instead.
"""
if torch.jit.is_scripting():
return torch.as_tensor(x)
if isinstance(x, (list, tuple)) and all([isinstance(t, torch.Tensor) for t in x]):
return torch.stack(x)
return torch.as_tensor(x) | An equivalent of `torch.as_tensor`, but works under tracing if input is a list of tensor. `torch.as_tensor` will record a constant in tracing, but this function will use `torch.stack` instead. |
165,673 | import numpy as np
import torch
from pyquaternion import Quaternion
from torch.cuda import amp
from projects.mmdet3d_plugin.dd3d.utils.geometry import unproject_points2d
import projects.mmdet3d_plugin.dd3d.structures.transform3d as t3d
The provided code snippet includes necessary dependencies for implementing the `quaternion_to_matrix` function. Write a Python function `def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor` to solve the following problem:
Convert rotations given as quaternions to rotation matrices. Args: quaternions: quaternions with real part first, as tensor of shape (..., 4). Returns: Rotation matrices as tensor of shape (..., 3, 3).
Here is the function:
def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3)) | Convert rotations given as quaternions to rotation matrices. Args: quaternions: quaternions with real part first, as tensor of shape (..., 4). Returns: Rotation matrices as tensor of shape (..., 3, 3). |
165,674 | import numpy as np
import torch
from pyquaternion import Quaternion
from torch.cuda import amp
from projects.mmdet3d_plugin.dd3d.utils.geometry import unproject_points2d
import projects.mmdet3d_plugin.dd3d.structures.transform3d as t3d
def _to_tensor(x, dim):
if isinstance(x, torch.Tensor):
x = x.to(torch.float32)
elif isinstance(x, np.ndarray) or isinstance(x, list) or isinstance(x, tuple):
x = torch.tensor(x, dtype=torch.float32)
elif isinstance(x, Quaternion):
x = torch.tensor(x.elements, dtype=torch.float32)
else:
raise ValueError(f"Unsupported type: {type(x).__name__}")
if x.ndim == 1:
x = x.reshape(-1, dim)
elif x.ndim > 2:
raise ValueError(f"Invalid shape of input: {x.shape.__str__()}")
return x | null |
165,675 | import math
import warnings
from typing import List, Optional, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `_axis_angle_rotation` function. Write a Python function `def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor` to solve the following problem:
Return the rotation matrices for one of the rotations about an axis of which Euler angles describe, for each value of the angle given. Args: axis: Axis label "X" or "Y or "Z". angle: any shape tensor of Euler angles in radians Returns: Rotation matrices as tensor of shape (..., 3, 3).
Here is the function:
def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor:
"""
Return the rotation matrices for one of the rotations about an axis
of which Euler angles describe, for each value of the angle given.
Args:
axis: Axis label "X" or "Y or "Z".
angle: any shape tensor of Euler angles in radians
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
cos = torch.cos(angle)
sin = torch.sin(angle)
one = torch.ones_like(angle)
zero = torch.zeros_like(angle)
if axis == "X":
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
elif axis == "Y":
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
elif axis == "Z":
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
else:
raise ValueError("letter must be either X, Y or Z.")
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) | Return the rotation matrices for one of the rotations about an axis of which Euler angles describe, for each value of the angle given. Args: axis: Axis label "X" or "Y or "Z". angle: any shape tensor of Euler angles in radians Returns: Rotation matrices as tensor of shape (..., 3, 3). |
165,676 | import math
import warnings
from typing import List, Optional, Union
import torch
Device = Union[str, torch.device]
def get_device(x, device: Optional[Device] = None) -> torch.device:
"""
Gets the device of the specified variable x if it is a tensor, or
falls back to a default CPU device otherwise. Allows overriding by
providing an explicit device.
Args:
x: a torch.Tensor to get the device from or another type
device: Device (as str or torch.device) to fall back to
Returns:
A matching torch.device object
"""
# User overrides device
if device is not None:
return make_device(device)
# Set device based on input tensor
if torch.is_tensor(x):
return x.device
# Default device is cpu
return torch.device("cpu")
def _handle_coord(c, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
"""
Helper function for _handle_input.
Args:
c: Python scalar, torch scalar, or 1D torch tensor
Returns:
c_vec: 1D torch tensor
"""
if not torch.is_tensor(c):
c = torch.tensor(c, dtype=dtype, device=device)
if c.dim() == 0:
c = c.view(1)
if c.device != device or c.dtype != dtype:
c = c.to(device=device, dtype=dtype)
return c
The provided code snippet includes necessary dependencies for implementing the `_handle_input` function. Write a Python function `def _handle_input( x, y, z, dtype: torch.dtype, device: Optional[Device], name: str, allow_singleton: bool = False, ) -> torch.Tensor` to solve the following problem:
Helper function to handle parsing logic for building transforms. The output is always a tensor of shape (N, 3), but there are several types of allowed input. Case I: Single Matrix In this case x is a tensor of shape (N, 3), and y and z are None. Here just return x. Case II: Vectors and Scalars In this case each of x, y, and z can be one of the following - Python scalar - Torch scalar - Torch tensor of shape (N, 1) or (1, 1) In this case x, y and z are broadcast to tensors of shape (N, 1) and concatenated to a tensor of shape (N, 3) Case III: Singleton (only if allow_singleton=True) In this case y and z are None, and x can be one of the following: - Python scalar - Torch scalar - Torch tensor of shape (N, 1) or (1, 1) Here x will be duplicated 3 times, and we return a tensor of shape (N, 3) Returns: xyz: Tensor of shape (N, 3)
Here is the function:
def _handle_input(
x,
y,
z,
dtype: torch.dtype,
device: Optional[Device],
name: str,
allow_singleton: bool = False,
) -> torch.Tensor:
"""
Helper function to handle parsing logic for building transforms. The output
is always a tensor of shape (N, 3), but there are several types of allowed
input.
Case I: Single Matrix
In this case x is a tensor of shape (N, 3), and y and z are None. Here just
return x.
Case II: Vectors and Scalars
In this case each of x, y, and z can be one of the following
- Python scalar
- Torch scalar
- Torch tensor of shape (N, 1) or (1, 1)
In this case x, y and z are broadcast to tensors of shape (N, 1)
and concatenated to a tensor of shape (N, 3)
Case III: Singleton (only if allow_singleton=True)
In this case y and z are None, and x can be one of the following:
- Python scalar
- Torch scalar
- Torch tensor of shape (N, 1) or (1, 1)
Here x will be duplicated 3 times, and we return a tensor of shape (N, 3)
Returns:
xyz: Tensor of shape (N, 3)
"""
device_ = get_device(x, device)
# If x is actually a tensor of shape (N, 3) then just return it
if torch.is_tensor(x) and x.dim() == 2:
if x.shape[1] != 3:
msg = "Expected tensor of shape (N, 3); got %r (in %s)"
raise ValueError(msg % (x.shape, name))
if y is not None or z is not None:
msg = "Expected y and z to be None (in %s)" % name
raise ValueError(msg)
return x.to(device=device_, dtype=dtype)
if allow_singleton and y is None and z is None:
y = x
z = x
# Convert all to 1D tensors
xyz = [_handle_coord(c, dtype, device_) for c in [x, y, z]]
# Broadcast and concatenate
sizes = [c.shape[0] for c in xyz]
N = max(sizes)
for c in xyz:
if c.shape[0] != 1 and c.shape[0] != N:
msg = "Got non-broadcastable sizes %r (in %s)" % (sizes, name)
raise ValueError(msg)
xyz = [c.expand(N) for c in xyz]
xyz = torch.stack(xyz, dim=1)
return xyz | Helper function to handle parsing logic for building transforms. The output is always a tensor of shape (N, 3), but there are several types of allowed input. Case I: Single Matrix In this case x is a tensor of shape (N, 3), and y and z are None. Here just return x. Case II: Vectors and Scalars In this case each of x, y, and z can be one of the following - Python scalar - Torch scalar - Torch tensor of shape (N, 1) or (1, 1) In this case x, y and z are broadcast to tensors of shape (N, 1) and concatenated to a tensor of shape (N, 3) Case III: Singleton (only if allow_singleton=True) In this case y and z are None, and x can be one of the following: - Python scalar - Torch scalar - Torch tensor of shape (N, 1) or (1, 1) Here x will be duplicated 3 times, and we return a tensor of shape (N, 3) Returns: xyz: Tensor of shape (N, 3) |
165,677 | import math
import warnings
from typing import List, Optional, Union
import torch
Device = Union[str, torch.device]
def get_device(x, device: Optional[Device] = None) -> torch.device:
"""
Gets the device of the specified variable x if it is a tensor, or
falls back to a default CPU device otherwise. Allows overriding by
providing an explicit device.
Args:
x: a torch.Tensor to get the device from or another type
device: Device (as str or torch.device) to fall back to
Returns:
A matching torch.device object
"""
# User overrides device
if device is not None:
return make_device(device)
# Set device based on input tensor
if torch.is_tensor(x):
return x.device
# Default device is cpu
return torch.device("cpu")
def _handle_coord(c, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
"""
Helper function for _handle_input.
Args:
c: Python scalar, torch scalar, or 1D torch tensor
Returns:
c_vec: 1D torch tensor
"""
if not torch.is_tensor(c):
c = torch.tensor(c, dtype=dtype, device=device)
if c.dim() == 0:
c = c.view(1)
if c.device != device or c.dtype != dtype:
c = c.to(device=device, dtype=dtype)
return c
The provided code snippet includes necessary dependencies for implementing the `_handle_angle_input` function. Write a Python function `def _handle_angle_input( x, dtype: torch.dtype, device: Optional[Device], name: str ) -> torch.Tensor` to solve the following problem:
Helper function for building a rotation function using angles. The output is always of shape (N,). The input can be one of: - Torch tensor of shape (N,) - Python scalar - Torch scalar
Here is the function:
def _handle_angle_input(
x, dtype: torch.dtype, device: Optional[Device], name: str
) -> torch.Tensor:
"""
Helper function for building a rotation function using angles.
The output is always of shape (N,).
The input can be one of:
- Torch tensor of shape (N,)
- Python scalar
- Torch scalar
"""
device_ = get_device(x, device)
if torch.is_tensor(x) and x.dim() > 1:
msg = "Expected tensor of shape (N,); got %r (in %s)"
raise ValueError(msg % (x.shape, name))
else:
return _handle_coord(x, dtype, device_) | Helper function for building a rotation function using angles. The output is always of shape (N,). The input can be one of: - Torch tensor of shape (N,) - Python scalar - Torch scalar |
165,678 | import math
import warnings
from typing import List, Optional, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `_broadcast_bmm` function. Write a Python function `def _broadcast_bmm(a, b) -> torch.Tensor` to solve the following problem:
Batch multiply two matrices and broadcast if necessary. Args: a: torch tensor of shape (P, K) or (M, P, K) b: torch tensor of shape (N, K, K) Returns: a and b broadcast multiplied. The output batch dimension is max(N, M). To broadcast transforms across a batch dimension if M != N then expect that either M = 1 or N = 1. The tensor with batch dimension 1 is expanded to have shape N or M.
Here is the function:
def _broadcast_bmm(a, b) -> torch.Tensor:
"""
Batch multiply two matrices and broadcast if necessary.
Args:
a: torch tensor of shape (P, K) or (M, P, K)
b: torch tensor of shape (N, K, K)
Returns:
a and b broadcast multiplied. The output batch dimension is max(N, M).
To broadcast transforms across a batch dimension if M != N then
expect that either M = 1 or N = 1. The tensor with batch dimension 1 is
expanded to have shape N or M.
"""
if a.dim() == 2:
a = a[None]
if len(a) != len(b):
if not ((len(a) == 1) or (len(b) == 1)):
msg = "Expected batch dim for bmm to be equal or 1; got %r, %r"
raise ValueError(msg % (a.shape, b.shape))
if len(a) == 1:
a = a.expand(len(b), -1, -1)
if len(b) == 1:
b = b.expand(len(a), -1, -1)
return a.bmm(b) | Batch multiply two matrices and broadcast if necessary. Args: a: torch tensor of shape (P, K) or (M, P, K) b: torch tensor of shape (N, K, K) Returns: a and b broadcast multiplied. The output batch dimension is max(N, M). To broadcast transforms across a batch dimension if M != N then expect that either M = 1 or N = 1. The tensor with batch dimension 1 is expanded to have shape N or M. |
165,679 | import math
import warnings
from typing import List, Optional, Union
import torch
def _safe_det_3x3(t: torch.Tensor):
"""
Fast determinant calculation for a batch of 3x3 matrices.
Note, result of this function might not be the same as `torch.det()`.
The differences might be in the last significant digit.
Args:
t: Tensor of shape (N, 3, 3).
Returns:
Tensor of shape (N) with determinants.
"""
det = (
t[..., 0, 0] * (t[..., 1, 1] * t[..., 2, 2] - t[..., 1, 2] * t[..., 2, 1])
- t[..., 0, 1] * (t[..., 1, 0] * t[..., 2, 2] - t[..., 2, 0] * t[..., 1, 2])
+ t[..., 0, 2] * (t[..., 1, 0] * t[..., 2, 1] - t[..., 2, 0] * t[..., 1, 1])
)
return det
The provided code snippet includes necessary dependencies for implementing the `_check_valid_rotation_matrix` function. Write a Python function `def _check_valid_rotation_matrix(R, tol: float = 1e-7) -> None` to solve the following problem:
Determine if R is a valid rotation matrix by checking it satisfies the following conditions: ``RR^T = I and det(R) = 1`` Args: R: an (N, 3, 3) matrix Returns: None Emits a warning if R is an invalid rotation matrix.
Here is the function:
def _check_valid_rotation_matrix(R, tol: float = 1e-7) -> None:
"""
Determine if R is a valid rotation matrix by checking it satisfies the
following conditions:
``RR^T = I and det(R) = 1``
Args:
R: an (N, 3, 3) matrix
Returns:
None
Emits a warning if R is an invalid rotation matrix.
"""
N = R.shape[0]
eye = torch.eye(3, dtype=R.dtype, device=R.device)
eye = eye.view(1, 3, 3).expand(N, -1, -1)
orthogonal = torch.allclose(R.bmm(R.transpose(1, 2)), eye, atol=tol)
det_R = _safe_det_3x3(R)
no_distortion = torch.allclose(det_R, torch.ones_like(det_R))
if not (orthogonal and no_distortion):
msg = "R is not a valid rotation matrix"
warnings.warn(msg)
return | Determine if R is a valid rotation matrix by checking it satisfies the following conditions: ``RR^T = I and det(R) = 1`` Args: R: an (N, 3, 3) matrix Returns: None Emits a warning if R is an invalid rotation matrix. |
165,680 | import torch
The provided code snippet includes necessary dependencies for implementing the `smooth_l1_loss` function. Write a Python function `def smooth_l1_loss(input: torch.Tensor, target: torch.Tensor, beta: float, reduction: str = "none") -> torch.Tensor` to solve the following problem:
Smooth L1 loss defined in the Fast R-CNN paper as: | 0.5 * x ** 2 / beta if abs(x) < beta smoothl1(x) = | | abs(x) - 0.5 * beta otherwise, where x = input - target. Smooth L1 loss is related to Huber loss, which is defined as: | 0.5 * x ** 2 if abs(x) < beta huber(x) = | | beta * (abs(x) - 0.5 * beta) otherwise Smooth L1 loss is equal to huber(x) / beta. This leads to the following differences: - As beta -> 0, Smooth L1 loss converges to L1 loss, while Huber loss converges to a constant 0 loss. - As beta -> +inf, Smooth L1 converges to a constant 0 loss, while Huber loss converges to L2 loss. - For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1. For Huber loss, the slope of the L1 segment is beta. Smooth L1 loss can be seen as exactly L1 loss, but with the abs(x) < beta portion replaced with a quadratic function such that at abs(x) = beta, its slope is 1. The quadratic segment smooths the L1 loss near x = 0. Args: input (Tensor): input tensor of any shape target (Tensor): target value tensor with the same shape as input beta (float): L1 to L2 change point. For beta values < 1e-5, L1 loss is computed. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. Returns: The loss with the reduction option applied. Note: PyTorch's builtin "Smooth L1 loss" implementation does not actually implement Smooth L1 loss, nor does it implement Huber loss. It implements the special case of both in which they are equal (beta=1). See: https://pytorch.org/docs/stable/nn.html#torch.nn.SmoothL1Loss.
Here is the function:
def smooth_l1_loss(input: torch.Tensor, target: torch.Tensor, beta: float, reduction: str = "none") -> torch.Tensor:
"""
Smooth L1 loss defined in the Fast R-CNN paper as:
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
Smooth L1 loss is related to Huber loss, which is defined as:
| 0.5 * x ** 2 if abs(x) < beta
huber(x) = |
| beta * (abs(x) - 0.5 * beta) otherwise
Smooth L1 loss is equal to huber(x) / beta. This leads to the following
differences:
- As beta -> 0, Smooth L1 loss converges to L1 loss, while Huber loss
converges to a constant 0 loss.
- As beta -> +inf, Smooth L1 converges to a constant 0 loss, while Huber loss
converges to L2 loss.
- For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant
slope of 1. For Huber loss, the slope of the L1 segment is beta.
Smooth L1 loss can be seen as exactly L1 loss, but with the abs(x) < beta
portion replaced with a quadratic function such that at abs(x) = beta, its
slope is 1. The quadratic segment smooths the L1 loss near x = 0.
Args:
input (Tensor): input tensor of any shape
target (Tensor): target value tensor with the same shape as input
beta (float): L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
The loss with the reduction option applied.
Note:
PyTorch's builtin "Smooth L1 loss" implementation does not actually
implement Smooth L1 loss, nor does it implement Huber loss. It implements
the special case of both in which they are equal (beta=1).
See: https://pytorch.org/docs/stable/nn.html#torch.nn.SmoothL1Loss.
"""
# (dennis.park) Make it work with mixed precision training.
beta = torch.as_tensor(beta).to(input.dtype)
if beta < 1e-5:
# if beta == 0, then torch.where will result in nan gradients when
# the chain rule is applied due to pytorch implementation details
# (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of
# zeros, rather than "no gradient"). To avoid this issue, we define
# small values of beta to be exactly l1 loss.
loss = torch.abs(input - target)
else:
n = torch.abs(input - target)
cond = n < beta
a = 0.5 * n**2
b = n - 0.5 * beta
a, b = a.to(input.dtype), b.to(input.dtype)
loss = torch.where(cond, a, b)
# loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss | Smooth L1 loss defined in the Fast R-CNN paper as: | 0.5 * x ** 2 / beta if abs(x) < beta smoothl1(x) = | | abs(x) - 0.5 * beta otherwise, where x = input - target. Smooth L1 loss is related to Huber loss, which is defined as: | 0.5 * x ** 2 if abs(x) < beta huber(x) = | | beta * (abs(x) - 0.5 * beta) otherwise Smooth L1 loss is equal to huber(x) / beta. This leads to the following differences: - As beta -> 0, Smooth L1 loss converges to L1 loss, while Huber loss converges to a constant 0 loss. - As beta -> +inf, Smooth L1 converges to a constant 0 loss, while Huber loss converges to L2 loss. - For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1. For Huber loss, the slope of the L1 segment is beta. Smooth L1 loss can be seen as exactly L1 loss, but with the abs(x) < beta portion replaced with a quadratic function such that at abs(x) = beta, its slope is 1. The quadratic segment smooths the L1 loss near x = 0. Args: input (Tensor): input tensor of any shape target (Tensor): target value tensor with the same shape as input beta (float): L1 to L2 change point. For beta values < 1e-5, L1 loss is computed. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. Returns: The loss with the reduction option applied. Note: PyTorch's builtin "Smooth L1 loss" implementation does not actually implement Smooth L1 loss, nor does it implement Huber loss. It implements the special case of both in which they are equal (beta=1). See: https://pytorch.org/docs/stable/nn.html#torch.nn.SmoothL1Loss. |
165,681 | import torch
from fvcore.nn import sigmoid_focal_loss
from torch import nn
from torch.nn import functional as F
from detectron2.layers import Conv2d, batched_nms, cat, get_norm
from detectron2.structures import Boxes, Instances
from detectron2.utils.comm import get_world_size
from mmcv.runner import force_fp32
from projects.mmdet3d_plugin.dd3d.layers.iou_loss import IOULoss
from projects.mmdet3d_plugin.dd3d.layers.normalization import ModuleListDial, Scale
from projects.mmdet3d_plugin.dd3d.utils.comm import reduce_sum
def compute_ctrness_targets(reg_targets):
if len(reg_targets) == 0:
return reg_targets.new_zeros(len(reg_targets))
left_right = reg_targets[:, [0, 2]]
top_bottom = reg_targets[:, [1, 3]]
ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(ctrness) | null |
165,682 | import torch
import torch.nn.functional as F
from torch import nn
from detectron2.layers import Conv2d, cat, get_norm
from mmcv.runner import force_fp32
from projects.mmdet3d_plugin.dd3d.layers.normalization import ModuleListDial, Offset, Scale
from .disentangled_box3d_loss import DisentangledBox3DLoss
from projects.mmdet3d_plugin.dd3d.structures.boxes3d import Boxes3D
from projects.mmdet3d_plugin.dd3d.utils.geometry import allocentric_to_egocentric, unproject_points2d
EPS = 1e-7
class Boxes3D(GenericBoxes3D):
"""Vision-based 3D box container.
The tvec is computed from projected center, depth, and intrinsics.
"""
def __init__(self, quat, proj_ctr, depth, size, inv_intrinsics):
self.quat = quat
self.proj_ctr = proj_ctr
self.depth = depth
self.size = size
self.inv_intrinsics = inv_intrinsics
def tvec(self):
ray = unproject_points2d(self.proj_ctr, self.inv_intrinsics)
xyz = ray * self.depth
return xyz
def from_vectors(cls, vecs, intrinsics, device="cpu"):
"""
Parameters
----------
vecs: Iterable[np.ndarray]
Iterable of 10D pose representation.
intrinsics: np.ndarray
(3, 3) intrinsics matrix.
"""
if len(vecs) == 0:
quats = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 4)
proj_ctrs = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 2)
depths = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 1)
sizes = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 3)
inv_intrinsics = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 3, 3)
return cls(quats, proj_ctrs, depths, sizes, inv_intrinsics)
quats, proj_ctrs, depths, sizes = [], [], [], []
for vec in vecs:
quat = vec[:4]
proj_ctr = intrinsics.dot(vec[4:7])
proj_ctr = proj_ctr[:2] / proj_ctr[-1]
depth = vec[6:7]
size = vec[7:]
quats.append(quat)
proj_ctrs.append(proj_ctr)
depths.append(depth)
sizes.append(size)
quats = torch.as_tensor(np.array(quats), dtype=torch.float32, device=device)
proj_ctrs = torch.as_tensor(np.array(proj_ctrs), dtype=torch.float32, device=device)
depths = torch.as_tensor(np.array(depths), dtype=torch.float32, device=device)
sizes = torch.as_tensor(np.array(sizes), dtype=torch.float32, device=device)
inv_intrinsics = np.linalg.inv(intrinsics)
inv_intrinsics = torch.as_tensor(inv_intrinsics[None, ...], device=device).expand(len(vecs), 3, 3)
return cls(quats, proj_ctrs, depths, sizes, inv_intrinsics)
def cat(cls, boxes_list, dim=0):
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0), torch.empty(0), torch.empty(0), torch.empty(0), torch.empty(0))
assert all([isinstance(box, Boxes3D) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
quat = torch.cat([b.quat for b in boxes_list], dim=dim)
proj_ctr = torch.cat([b.proj_ctr for b in boxes_list], dim=dim)
depth = torch.cat([b.depth for b in boxes_list], dim=dim)
size = torch.cat([b.size for b in boxes_list], dim=dim)
inv_intrinsics = torch.cat([b.inv_intrinsics for b in boxes_list], dim=dim)
cat_boxes = cls(quat, proj_ctr, depth, size, inv_intrinsics)
return cat_boxes
def split(self, split_sizes, dim=0):
assert sum(split_sizes) == len(self)
quat_list = torch.split(self.quat, split_sizes, dim=dim)
proj_ctr_list = torch.split(self.proj_ctr, split_sizes, dim=dim)
depth_list = torch.split(self.depth, split_sizes, dim=dim)
size_list = torch.split(self.size, split_sizes, dim=dim)
inv_K_list = torch.split(self.inv_intrinsics, split_sizes, dim=dim)
return [Boxes3D(*x) for x in zip(quat_list, proj_ctr_list, depth_list, size_list, inv_K_list)]
def __getitem__(self, item):
"""
"""
if isinstance(item, int):
return Boxes3D(
self.quat[item].view(1, -1), self.proj_ctr[item].view(1, -1), self.depth[item].view(1, -1),
self.size[item].view(1, -1), self.inv_intrinsics[item].view(1, 3, 3)
)
quat = self.quat[item]
ctr = self.proj_ctr[item]
depth = self.depth[item]
size = self.size[item]
inv_K = self.inv_intrinsics[item]
assert quat.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert ctr.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert depth.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert size.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert inv_K.dim() == 3, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert inv_K.shape[1:] == (3, 3), "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
return Boxes3D(quat, ctr, depth, size, inv_K)
def __len__(self):
assert len(self.quat) == len(self.proj_ctr) == len(self.depth) == len(self.size) == len(self.inv_intrinsics)
return self.quat.shape[0]
def clone(self):
"""
"""
return Boxes3D(
self.quat.clone(), self.proj_ctr.clone(), self.depth.clone(), self.size.clone(), self.inv_intrinsics.clone()
)
def to(self, *args, **kwargs):
quat = self.quat.to(*args, **kwargs)
proj_ctr = self.proj_ctr.to(*args, **kwargs)
depth = self.depth.to(*args, **kwargs)
size = self.size.to(*args, **kwargs)
inv_K = self.inv_intrinsics.to(*args, **kwargs)
return Boxes3D(quat, proj_ctr, depth, size, inv_K)
def allocentric_to_egocentric(quat, proj_ctr, inv_intrinsics):
"""
Parameters
----------
quat: Tensor
(N, 4). Batch of (allocentric) quaternions.
proj_ctr: Tensor
(N, 2). Projected centers. xy coordninates.
inv_intrinsics: [type]
(N, 3, 3). Inverted intrinsics.
"""
R_obj_to_local = quaternion_to_matrix(quat)
# ray == z-axis in local orientaion
ray = unproject_points2d(proj_ctr, inv_intrinsics)
z = ray / ray.norm(dim=1, keepdim=True)
# gram-schmit process: local_y = global_y - global_y \dot local_z
y = z.new_tensor([[0., 1., 0.]]) - z[:, 1:2] * z
y = y / y.norm(dim=1, keepdim=True)
x = torch.cross(y, z, dim=1)
# local -> global
R_local_to_global = torch.stack([x, y, z], dim=-1)
# obj -> global
R_obj_to_global = torch.bmm(R_local_to_global, R_obj_to_local)
egocentric_quat = matrix_to_quaternion(R_obj_to_global)
# Make sure it's unit norm.
quat_norm = egocentric_quat.norm(dim=1, keepdim=True)
if not torch.allclose(quat_norm, torch.as_tensor(1.), atol=1e-3):
LOG.warning(
f"Some of the input quaternions are not unit norm: min={quat_norm.min()}, max={quat_norm.max()}; therefore normalizing."
)
egocentric_quat = egocentric_quat / quat_norm.clamp(min=EPS)
return egocentric_quat
def unproject_points2d(points2d, inv_K, scale=1.0):
"""
Parameters
----------
points2d: Tensor
xy coordinates. shape=(N, ..., 2)
E.g., (N, 2) or (N, K, 2) or (N, H, W, 2)
inv_K: Tensor
Inverted intrinsics; shape=(N, 3, 3)
scale: float, default: 1.0
Scaling factor.
Returns
-------
Tensor:
Unprojected 3D point. shape=(N, ..., 3)
E.g., (N, 3) or (N, K, 3) or (N, H, W, 3)
"""
points2d = homogenize_points(points2d)
siz = points2d.size()
points2d = points2d.view(-1, 3).unsqueeze(-1) # (N, 3, 1)
unprojected = torch.matmul(inv_K, points2d) # (N, 3, 3) x (N, 3, 1) -> (N, 3, 1)
unprojected = unprojected.view(siz)
return unprojected * scale
def predictions_to_boxes3d(
quat,
proj_ctr,
depth,
size,
locations,
inv_intrinsics,
canon_box_sizes,
min_depth,
max_depth,
scale_depth_by_focal_lengths_factor,
scale_depth_by_focal_lengths=True,
quat_is_allocentric=True,
depth_is_distance=False
):
# Normalize to make quat unit norm.
quat = quat / quat.norm(dim=1, keepdim=True).clamp(min=EPS)
# Make sure again it's numerically unit-norm.
quat = quat / quat.norm(dim=1, keepdim=True)
if scale_depth_by_focal_lengths:
pixel_size = torch.norm(torch.stack([inv_intrinsics[:, 0, 0], inv_intrinsics[:, 1, 1]], dim=-1), dim=-1)
depth = depth / (pixel_size * scale_depth_by_focal_lengths_factor)
if depth_is_distance:
depth = depth / unproject_points2d(locations, inv_intrinsics).norm(dim=1).clamp(min=EPS)
depth = depth.reshape(-1, 1).clamp(min_depth, max_depth)
proj_ctr = proj_ctr + locations
if quat_is_allocentric:
quat = allocentric_to_egocentric(quat, proj_ctr, inv_intrinsics)
size = (size.tanh() + 1.) * canon_box_sizes # max size = 2 * canon_size
return Boxes3D(quat, proj_ctr, depth, size, inv_intrinsics) | null |
165,683 | from collections import OrderedDict
import numpy as np
import seaborn as sns
from torch.utils.data import Dataset
from tqdm import tqdm
from detectron2.structures.boxes import BoxMode
from nuscenes.eval.detection.utils import category_to_detection_name
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.splits import create_splits_scenes
from projects.mmdet3d_plugin.dd3d.structures.boxes3d import GenericBoxes3D
from projects.mmdet3d_plugin.dd3d.structures.pose import Pose
from projects.mmdet3d_plugin.dd3d.utils.geometry import project_points3d
from projects.mmdet3d_plugin.dd3d.utils.visualization import float_to_uint8_color
The provided code snippet includes necessary dependencies for implementing the `_compute_iou` function. Write a Python function `def _compute_iou(box1, box2)` to solve the following problem:
Parameters ---------- box1, box2: (x1, y1, x2, y2)
Here is the function:
def _compute_iou(box1, box2):
"""
Parameters
----------
box1, box2:
(x1, y1, x2, y2)
"""
xx1 = max(box1[0], box2[0])
yy1 = max(box1[1], box2[1])
xx2 = min(box1[2], box2[2])
yy2 = min(box1[3], box2[3])
if xx1 >= xx2 or yy1 >= yy2:
return 0.
inter = (xx2 - xx1) * (yy2 - yy1)
a1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
a2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
return inter / (a1 + a2 - inter) | Parameters ---------- box1, box2: (x1, y1, x2, y2) |
165,684 | import numpy as np
import torch
from detectron2.data import transforms as T
from detectron2.structures import Boxes, BoxMode, Instances
from projects.mmdet3d_plugin.dd3d.structures.boxes3d import Boxes3D
The provided code snippet includes necessary dependencies for implementing the `transform_instance_annotations` function. Write a Python function `def transform_instance_annotations( annotation, transforms, image_size, )` to solve the following problem:
Adapted from: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/detection_utils.py#L254 The changes from original: - The presence of 2D bounding box (i.e. "bbox" field) is assumed by default in d2; here it's optional. - Add optional 3D bounding box support. - If the instance mask annotation is in RLE, then it's decoded into polygons, not bitmask, to save memory. =============================================================================================================== Apply transforms to box, segmentation and keypoints annotations of a single instance. It will use `transforms.apply_box` for the box, and `transforms.apply_coords` for segmentation polygons & keypoints. If you need anything more specially designed for each data structure, you'll need to implement your own version of this function or the transforms. Args: annotation (dict): dict of instance annotations for a single instance. It will be modified in-place. transforms (TransformList or list[Transform]): image_size (tuple): the height, width of the transformed image keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. Returns: dict: the same input dict with fields "bbox", "segmentation", "keypoints" transformed according to `transforms`. The "bbox_mode" field will be set to XYXY_ABS.
Here is the function:
def transform_instance_annotations(
annotation,
transforms,
image_size,
):
"""Adapted from:
https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/detection_utils.py#L254
The changes from original:
- The presence of 2D bounding box (i.e. "bbox" field) is assumed by default in d2; here it's optional.
- Add optional 3D bounding box support.
- If the instance mask annotation is in RLE, then it's decoded into polygons, not bitmask, to save memory.
===============================================================================================================
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# (dennis.park) Here 2D bounding box is optional.
if "bbox" in annotation:
assert "bbox_mode" in annotation, "'bbox' is present, but 'bbox_mode' is not."
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
bbox = transforms.apply_box(np.array([bbox]))[0]
# clip transformed bbox to image size
bbox = bbox.clip(min=0)
bbox = np.minimum(bbox, list(image_size + image_size)[::-1])
annotation["bbox"] = bbox
annotation["bbox_mode"] = BoxMode.XYXY_ABS
# Vertical flipping is not implemented (`flip_transform.py`). TODO: implement if needed.
if "bbox3d" in annotation:
bbox3d = np.array(annotation["bbox3d"])
annotation['bbox3d'] = transforms.apply_box3d(bbox3d)
return annotation | Adapted from: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/detection_utils.py#L254 The changes from original: - The presence of 2D bounding box (i.e. "bbox" field) is assumed by default in d2; here it's optional. - Add optional 3D bounding box support. - If the instance mask annotation is in RLE, then it's decoded into polygons, not bitmask, to save memory. =============================================================================================================== Apply transforms to box, segmentation and keypoints annotations of a single instance. It will use `transforms.apply_box` for the box, and `transforms.apply_coords` for segmentation polygons & keypoints. If you need anything more specially designed for each data structure, you'll need to implement your own version of this function or the transforms. Args: annotation (dict): dict of instance annotations for a single instance. It will be modified in-place. transforms (TransformList or list[Transform]): image_size (tuple): the height, width of the transformed image keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. Returns: dict: the same input dict with fields "bbox", "segmentation", "keypoints" transformed according to `transforms`. The "bbox_mode" field will be set to XYXY_ABS. |
165,685 | import numpy as np
import torch
from detectron2.data import transforms as T
from detectron2.structures import Boxes, BoxMode, Instances
from projects.mmdet3d_plugin.dd3d.structures.boxes3d import Boxes3D
def _create_empty_instances(image_size):
target = Instances(image_size)
target.gt_boxes = Boxes([])
target.gt_classes = torch.tensor([], dtype=torch.int64)
target.gt_boxes3d = Boxes3D.from_vectors([], torch.eye(3, dtype=torch.float32))
return target
class Boxes3D(GenericBoxes3D):
"""Vision-based 3D box container.
The tvec is computed from projected center, depth, and intrinsics.
"""
def __init__(self, quat, proj_ctr, depth, size, inv_intrinsics):
self.quat = quat
self.proj_ctr = proj_ctr
self.depth = depth
self.size = size
self.inv_intrinsics = inv_intrinsics
def tvec(self):
ray = unproject_points2d(self.proj_ctr, self.inv_intrinsics)
xyz = ray * self.depth
return xyz
def from_vectors(cls, vecs, intrinsics, device="cpu"):
"""
Parameters
----------
vecs: Iterable[np.ndarray]
Iterable of 10D pose representation.
intrinsics: np.ndarray
(3, 3) intrinsics matrix.
"""
if len(vecs) == 0:
quats = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 4)
proj_ctrs = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 2)
depths = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 1)
sizes = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 3)
inv_intrinsics = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 3, 3)
return cls(quats, proj_ctrs, depths, sizes, inv_intrinsics)
quats, proj_ctrs, depths, sizes = [], [], [], []
for vec in vecs:
quat = vec[:4]
proj_ctr = intrinsics.dot(vec[4:7])
proj_ctr = proj_ctr[:2] / proj_ctr[-1]
depth = vec[6:7]
size = vec[7:]
quats.append(quat)
proj_ctrs.append(proj_ctr)
depths.append(depth)
sizes.append(size)
quats = torch.as_tensor(np.array(quats), dtype=torch.float32, device=device)
proj_ctrs = torch.as_tensor(np.array(proj_ctrs), dtype=torch.float32, device=device)
depths = torch.as_tensor(np.array(depths), dtype=torch.float32, device=device)
sizes = torch.as_tensor(np.array(sizes), dtype=torch.float32, device=device)
inv_intrinsics = np.linalg.inv(intrinsics)
inv_intrinsics = torch.as_tensor(inv_intrinsics[None, ...], device=device).expand(len(vecs), 3, 3)
return cls(quats, proj_ctrs, depths, sizes, inv_intrinsics)
def cat(cls, boxes_list, dim=0):
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0), torch.empty(0), torch.empty(0), torch.empty(0), torch.empty(0))
assert all([isinstance(box, Boxes3D) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
quat = torch.cat([b.quat for b in boxes_list], dim=dim)
proj_ctr = torch.cat([b.proj_ctr for b in boxes_list], dim=dim)
depth = torch.cat([b.depth for b in boxes_list], dim=dim)
size = torch.cat([b.size for b in boxes_list], dim=dim)
inv_intrinsics = torch.cat([b.inv_intrinsics for b in boxes_list], dim=dim)
cat_boxes = cls(quat, proj_ctr, depth, size, inv_intrinsics)
return cat_boxes
def split(self, split_sizes, dim=0):
assert sum(split_sizes) == len(self)
quat_list = torch.split(self.quat, split_sizes, dim=dim)
proj_ctr_list = torch.split(self.proj_ctr, split_sizes, dim=dim)
depth_list = torch.split(self.depth, split_sizes, dim=dim)
size_list = torch.split(self.size, split_sizes, dim=dim)
inv_K_list = torch.split(self.inv_intrinsics, split_sizes, dim=dim)
return [Boxes3D(*x) for x in zip(quat_list, proj_ctr_list, depth_list, size_list, inv_K_list)]
def __getitem__(self, item):
"""
"""
if isinstance(item, int):
return Boxes3D(
self.quat[item].view(1, -1), self.proj_ctr[item].view(1, -1), self.depth[item].view(1, -1),
self.size[item].view(1, -1), self.inv_intrinsics[item].view(1, 3, 3)
)
quat = self.quat[item]
ctr = self.proj_ctr[item]
depth = self.depth[item]
size = self.size[item]
inv_K = self.inv_intrinsics[item]
assert quat.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert ctr.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert depth.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert size.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert inv_K.dim() == 3, "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
assert inv_K.shape[1:] == (3, 3), "Indexing on Boxes3D with {} failed to return a matrix!".format(item)
return Boxes3D(quat, ctr, depth, size, inv_K)
def __len__(self):
assert len(self.quat) == len(self.proj_ctr) == len(self.depth) == len(self.size) == len(self.inv_intrinsics)
return self.quat.shape[0]
def clone(self):
"""
"""
return Boxes3D(
self.quat.clone(), self.proj_ctr.clone(), self.depth.clone(), self.size.clone(), self.inv_intrinsics.clone()
)
def to(self, *args, **kwargs):
quat = self.quat.to(*args, **kwargs)
proj_ctr = self.proj_ctr.to(*args, **kwargs)
depth = self.depth.to(*args, **kwargs)
size = self.size.to(*args, **kwargs)
inv_K = self.inv_intrinsics.to(*args, **kwargs)
return Boxes3D(quat, proj_ctr, depth, size, inv_K)
The provided code snippet includes necessary dependencies for implementing the `annotations_to_instances` function. Write a Python function `def annotations_to_instances( annos, image_size, intrinsics=None, )` to solve the following problem:
Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: It will contain fields "gt_boxes", "gt_classes", "gt_masks", "gt_keypoints", if they can be obtained from `annos`. This is the format that builtin models expect.
Here is the function:
def annotations_to_instances(
annos,
image_size,
intrinsics=None,
):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
if len(annos) == 0:
return _create_empty_instances(image_size)
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
target.gt_boxes = Boxes(boxes)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "bbox3d" in annos[0]:
assert intrinsics is not None
target.gt_boxes3d = Boxes3D.from_vectors([anno['bbox3d'] for anno in annos], intrinsics)
if len(target.gt_boxes3d) != target.gt_boxes.tensor.shape[0]:
raise ValueError(
f"The sizes of `gt_boxes3d` and `gt_boxes` do not match: a={len(target.gt_boxes3d)}, b={target.gt_boxes.tensor.shape[0]}."
)
# NOTE: add nuscenes attributes here
# NOTE: instances will be filtered later
# NuScenes attributes
if len(annos) and "attribute_id" in annos[0]:
attributes = [obj["attribute_id"] for obj in annos]
target.gt_attributes = torch.tensor(attributes, dtype=torch.int64)
# Speed (magnitude of velocity)
if len(annos) and "speed" in annos[0]:
speeds = [obj["speed"] for obj in annos]
target.gt_speeds = torch.tensor(speeds, dtype=torch.float32)
assert len(boxes) == len(classes) == len(attributes) == len(speeds), \
'the numbers of annotations should be the same'
return target | Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: It will contain fields "gt_boxes", "gt_classes", "gt_masks", "gt_keypoints", if they can be obtained from `annos`. This is the format that builtin models expect. |
165,686 | import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `compute_features_locations` function. Write a Python function `def compute_features_locations(h, w, stride, dtype=torch.float32, device='cpu', offset="none")` to solve the following problem:
Adapted from AdelaiDet: https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py Key differnece: offset is configurable.
Here is the function:
def compute_features_locations(h, w, stride, dtype=torch.float32, device='cpu', offset="none"):
"""Adapted from AdelaiDet:
https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py
Key differnece: offset is configurable.
"""
shifts_x = torch.arange(0, w * stride, step=stride, dtype=dtype, device=device)
shifts_y = torch.arange(0, h * stride, step=stride, dtype=dtype, device=device)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
# (dennis.park)
# locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
locations = torch.stack((shift_x, shift_y), dim=1)
if offset == "half":
locations += stride // 2
else:
assert offset == "none"
return locations | Adapted from AdelaiDet: https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py Key differnece: offset is configurable. |
165,687 | import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `aligned_bilinear` function. Write a Python function `def aligned_bilinear(tensor, factor, offset="none")` to solve the following problem:
Adapted from AdelaiDet: https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py
Here is the function:
def aligned_bilinear(tensor, factor, offset="none"):
"""Adapted from AdelaiDet:
https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py
"""
assert tensor.dim() == 4
assert factor >= 1
assert int(factor) == factor
if factor == 1:
return tensor
h, w = tensor.size()[2:]
tensor = F.pad(tensor, pad=(0, 1, 0, 1), mode="replicate")
oh = factor * h + 1
ow = factor * w + 1
tensor = F.interpolate(tensor, size=(oh, ow), mode='bilinear', align_corners=True)
if offset == "half":
tensor = F.pad(tensor, pad=(factor // 2, 0, factor // 2, 0), mode="replicate")
return tensor[:, :, :oh - 1, :ow - 1] | Adapted from AdelaiDet: https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py |
165,688 | import logging
from functools import wraps
import torch.distributed as dist
from detectron2.utils import comm as d2_comm
LOG = logging.getLogger(__name__)
_NESTED_BROADCAST_FROM_MASTER = False
def is_distributed():
return d2_comm.get_world_size() > 1
The provided code snippet includes necessary dependencies for implementing the `broadcast_from_master` function. Write a Python function `def broadcast_from_master(fn)` to solve the following problem:
If distributed, only the master executes the function and broadcast the results to other workers. Usage: @broadcast_from_master def foo(a, b): ...
Here is the function:
def broadcast_from_master(fn):
"""If distributed, only the master executes the function and broadcast the results to other workers.
Usage:
@broadcast_from_master
def foo(a, b): ...
"""
@wraps(fn)
def wrapper(*args, **kwargs): # pylint: disable=unused-argument
global _NESTED_BROADCAST_FROM_MASTER
if not is_distributed():
return fn(*args, **kwargs)
if _NESTED_BROADCAST_FROM_MASTER:
assert d2_comm.is_main_process()
LOG.warning(f"_NESTED_BROADCAST_FROM_MASTER = True, {fn.__name__}")
return fn(*args, **kwargs)
if d2_comm.is_main_process():
_NESTED_BROADCAST_FROM_MASTER = True
ret = [fn(*args, **kwargs), ]
_NESTED_BROADCAST_FROM_MASTER = False
else:
ret = [None, ]
if dist.is_initialized():
dist.broadcast_object_list(ret)
ret = ret[0]
assert ret is not None
return ret
return wrapper | If distributed, only the master executes the function and broadcast the results to other workers. Usage: @broadcast_from_master def foo(a, b): ... |
165,689 | import logging
from functools import wraps
import torch.distributed as dist
from detectron2.utils import comm as d2_comm
The provided code snippet includes necessary dependencies for implementing the `master_only` function. Write a Python function `def master_only(fn)` to solve the following problem:
If distributed, only the master executes the function. Usage: @master_only def foo(a, b): ...
Here is the function:
def master_only(fn):
"""If distributed, only the master executes the function.
Usage:
@master_only
def foo(a, b): ...
"""
@wraps(fn)
def wrapped_fn(*args, **kwargs):
if d2_comm.is_main_process():
ret = fn(*args, **kwargs)
d2_comm.synchronize()
if d2_comm.is_main_process():
return ret
return wrapped_fn | If distributed, only the master executes the function. Usage: @master_only def foo(a, b): ... |
165,690 | import logging
from functools import wraps
import torch.distributed as dist
from detectron2.utils import comm as d2_comm
The provided code snippet includes necessary dependencies for implementing the `gather_dict` function. Write a Python function `def gather_dict(dikt)` to solve the following problem:
Gather python dictionaries from all workers to the rank=0 worker. Assumption: the keys of `dikt` are disjoint across all workers. If rank = 0, then returned aggregated dict. If rank > 0, then return `None`.
Here is the function:
def gather_dict(dikt):
"""Gather python dictionaries from all workers to the rank=0 worker.
Assumption: the keys of `dikt` are disjoint across all workers.
If rank = 0, then returned aggregated dict.
If rank > 0, then return `None`.
"""
dict_lst = d2_comm.gather(dikt, dst=0)
if d2_comm.is_main_process():
gathered_dict = {}
for dic in dict_lst:
for k in dic.keys():
assert k not in gathered_dict, f"Dictionary key overlaps: {k}"
gathered_dict.update(dic)
return gathered_dict
else:
return None | Gather python dictionaries from all workers to the rank=0 worker. Assumption: the keys of `dikt` are disjoint across all workers. If rank = 0, then returned aggregated dict. If rank > 0, then return `None`. |
165,691 | import logging
from functools import wraps
import torch.distributed as dist
from detectron2.utils import comm as d2_comm
def is_distributed():
return d2_comm.get_world_size() > 1
The provided code snippet includes necessary dependencies for implementing the `reduce_sum` function. Write a Python function `def reduce_sum(tensor)` to solve the following problem:
Adapted from AdelaiDet: https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py
Here is the function:
def reduce_sum(tensor):
"""
Adapted from AdelaiDet:
https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py
"""
if not is_distributed():
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
return tensor | Adapted from AdelaiDet: https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py |
165,692 | import colorsys
import os
import cv2
import matplotlib.colors as mplc
import numpy as np
from PIL import Image, ImageDraw
The provided code snippet includes necessary dependencies for implementing the `fill_color_polygon` function. Write a Python function `def fill_color_polygon(image, polygon, color, alpha=0.5)` to solve the following problem:
Color interior of polygon with alpha-blending. This function modified input in place.
Here is the function:
def fill_color_polygon(image, polygon, color, alpha=0.5):
"""Color interior of polygon with alpha-blending. This function modified input in place.
"""
_mask = Image.new('L', (image.shape[1], image.shape[0]), 0)
ImageDraw.Draw(_mask).polygon(polygon, outline=1, fill=1)
mask = np.array(_mask, np.bool)
for c in range(3):
channel = image[:, :, c]
channel[mask] = channel[mask] * (1. - alpha) + color[c] * alpha | Color interior of polygon with alpha-blending. This function modified input in place. |
165,693 | import colorsys
import os
import cv2
import matplotlib.colors as mplc
import numpy as np
from PIL import Image, ImageDraw
The provided code snippet includes necessary dependencies for implementing the `change_color_brightness` function. Write a Python function `def change_color_brightness(color, brightness_factor)` to solve the following problem:
Copied from detectron2.utils.visualizer.py ------------------------------------------- Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range.
Here is the function:
def change_color_brightness(color, brightness_factor):
"""
Copied from detectron2.utils.visualizer.py
-------------------------------------------
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color | Copied from detectron2.utils.visualizer.py ------------------------------------------- Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. |
165,694 | import colorsys
import os
import cv2
import matplotlib.colors as mplc
import numpy as np
from PIL import Image, ImageDraw
The provided code snippet includes necessary dependencies for implementing the `draw_text` function. Write a Python function `def draw_text(ax, text, position, *, font_size, color="g", horizontal_alignment="center", rotation=0)` to solve the following problem:
Copied from Visualizer.draw_text() ----------------------------------- Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn.
Here is the function:
def draw_text(ax, text, position, *, font_size, color="g", horizontal_alignment="center", rotation=0):
"""
Copied from Visualizer.draw_text()
-----------------------------------
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
ax.text(
x,
y,
text,
size=font_size,
family="sans-serif",
bbox={
"facecolor": "black",
"alpha": 0.8,
"pad": 0.7,
"edgecolor": "none"
},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return ax | Copied from Visualizer.draw_text() ----------------------------------- Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn. |
165,695 | import colorsys
import os
import cv2
import matplotlib.colors as mplc
import numpy as np
from PIL import Image, ImageDraw
def float_to_uint8_color(float_clr):
assert all([c >= 0. for c in float_clr])
assert all([c <= 1. for c in float_clr])
return [int(c * 255.) for c in float_clr] | null |
165,696 | import colorsys
import os
import cv2
import matplotlib.colors as mplc
import numpy as np
from PIL import Image, ImageDraw
The provided code snippet includes necessary dependencies for implementing the `mosaic` function. Write a Python function `def mosaic(items, scale=1.0, pad=3, grid_width=None)` to solve the following problem:
Creates a mosaic from list of images. Parameters ---------- items: list of np.ndarray List of images to mosaic. scale: float, default=1.0 Scale factor applied to images. scale > 1.0 enlarges images. pad: int, default=3 Padding size of the images before mosaic grid_width: int, default=None Mosaic width or grid width of the mosaic Returns ------- image: np.array of shape (H, W, 3) Image mosaic
Here is the function:
def mosaic(items, scale=1.0, pad=3, grid_width=None):
"""Creates a mosaic from list of images.
Parameters
----------
items: list of np.ndarray
List of images to mosaic.
scale: float, default=1.0
Scale factor applied to images. scale > 1.0 enlarges images.
pad: int, default=3
Padding size of the images before mosaic
grid_width: int, default=None
Mosaic width or grid width of the mosaic
Returns
-------
image: np.array of shape (H, W, 3)
Image mosaic
"""
# Determine tile width and height
N = len(items)
assert N > 0, 'No items to mosaic!'
grid_width = grid_width if grid_width else np.ceil(np.sqrt(N)).astype(int)
grid_height = np.ceil(N * 1. / grid_width).astype(np.int)
input_size = items[0].shape[:2]
target_shape = (int(input_size[1] * scale), int(input_size[0] * scale))
mosaic_items = []
for j in range(grid_width * grid_height):
if j < N:
# Only the first image is scaled, the rest are re-shaped
# to the same size as the previous image in the mosaic
im = cv2.resize(items[j], dsize=target_shape)
mosaic_items.append(im)
else:
mosaic_items.append(np.zeros_like(mosaic_items[-1]))
# Stack W tiles horizontally first, then vertically
im_pad = lambda im: cv2.copyMakeBorder(im, pad, pad, pad, pad, cv2.BORDER_CONSTANT, 0)
mosaic_items = [im_pad(im) for im in mosaic_items]
hstack = [np.hstack(mosaic_items[j:j + grid_width]) for j in range(0, len(mosaic_items), grid_width)]
mosaic_viz = np.vstack(hstack) if len(hstack) > 1 \
else hstack[0]
return mosaic_viz | Creates a mosaic from list of images. Parameters ---------- items: list of np.ndarray List of images to mosaic. scale: float, default=1.0 Scale factor applied to images. scale > 1.0 enlarges images. pad: int, default=3 Padding size of the images before mosaic grid_width: int, default=None Mosaic width or grid width of the mosaic Returns ------- image: np.array of shape (H, W, 3) Image mosaic |
165,697 | import logging
import cv2
import numpy as np
import torch
import torch.nn.functional as F
def project_points3d(Xw, K):
_, C = Xw.shape
assert C == 3
uv, _ = cv2.projectPoints(
Xw, np.zeros((3, 1), dtype=np.float32), np.zeros(3, dtype=np.float32), K, np.zeros(5, dtype=np.float32)
)
return uv.reshape(-1, 2) | null |
165,704 | from data_converter.create_gt_database import create_groundtruth_database
from data_converter import nuscenes_converter as nuscenes_converter
from data_converter import lyft_converter as lyft_converter
from data_converter import kitti_converter as kitti
from data_converter import indoor_converter as indoor
import argparse
from os import path as osp
import sys
def create_groundtruth_database(dataset_class_name,
data_path,
info_prefix,
info_path=None,
mask_anno_path=None,
used_classes=None,
database_save_path=None,
db_info_save_path=None,
relative_path=True,
add_rgb=False,
lidar_only=False,
bev_only=False,
coors_range=None,
with_mask=False):
"""Given the raw data, generate the ground truth database.
Args:
dataset_class_name (str): Name of the input dataset.
data_path (str): Path of the data.
info_prefix (str): Prefix of the info file.
info_path (str): Path of the info file.
Default: None.
mask_anno_path (str): Path of the mask_anno.
Default: None.
used_classes (list[str]): Classes have been used.
Default: None.
database_save_path (str): Path to save database.
Default: None.
db_info_save_path (str): Path to save db_info.
Default: None.
relative_path (bool): Whether to use relative path.
Default: True.
with_mask (bool): Whether to use mask.
Default: False.
"""
print(f'Create GT Database of {dataset_class_name}')
dataset_cfg = dict(
type=dataset_class_name, data_root=data_path, ann_file=info_path)
if dataset_class_name == 'KittiDataset':
file_client_args = dict(backend='disk')
dataset_cfg.update(
test_mode=False,
split='training',
modality=dict(
use_lidar=True,
use_depth=False,
use_lidar_intensity=True,
use_camera=with_mask,
),
pipeline=[
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=file_client_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=file_client_args)
])
elif dataset_class_name == 'NuScenesDataset':
dataset_cfg.update(
use_valid_flag=True,
pipeline=[
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=10,
use_dim=[0, 1, 2, 3, 4],
pad_empty_sweeps=True,
remove_close=True),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True)
])
elif dataset_class_name == 'WaymoDataset':
file_client_args = dict(backend='disk')
dataset_cfg.update(
test_mode=False,
split='training',
modality=dict(
use_lidar=True,
use_depth=False,
use_lidar_intensity=True,
use_camera=False,
),
pipeline=[
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=6,
use_dim=5,
file_client_args=file_client_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=file_client_args)
])
dataset = build_dataset(dataset_cfg)
if database_save_path is None:
database_save_path = osp.join(data_path, f'{info_prefix}_gt_database')
if db_info_save_path is None:
db_info_save_path = osp.join(data_path,
f'{info_prefix}_dbinfos_train.pkl')
mmcv.mkdir_or_exist(database_save_path)
all_db_infos = dict()
if with_mask:
coco = COCO(osp.join(data_path, mask_anno_path))
imgIds = coco.getImgIds()
file2id = dict()
for i in imgIds:
info = coco.loadImgs([i])[0]
file2id.update({info['file_name']: i})
group_counter = 0
for j in track_iter_progress(list(range(len(dataset)))):
input_dict = dataset.get_data_info(j)
dataset.pre_pipeline(input_dict)
example = dataset.pipeline(input_dict)
annos = example['ann_info']
image_idx = example['sample_idx']
points = example['points'].tensor.numpy()
gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy()
names = annos['gt_names']
group_dict = dict()
if 'group_ids' in annos:
group_ids = annos['group_ids']
else:
group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64)
difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32)
if 'difficulty' in annos:
difficulty = annos['difficulty']
num_obj = gt_boxes_3d.shape[0]
point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d)
if with_mask:
# prepare masks
gt_boxes = annos['gt_bboxes']
img_path = osp.split(example['img_info']['filename'])[-1]
if img_path not in file2id.keys():
print(f'skip image {img_path} for empty mask')
continue
img_id = file2id[img_path]
kins_annIds = coco.getAnnIds(imgIds=img_id)
kins_raw_info = coco.loadAnns(kins_annIds)
kins_ann_info = _parse_coco_ann_info(kins_raw_info)
h, w = annos['img_shape'][:2]
gt_masks = [
_poly2mask(mask, h, w) for mask in kins_ann_info['masks']
]
# get mask inds based on iou mapping
bbox_iou = bbox_overlaps(kins_ann_info['bboxes'], gt_boxes)
mask_inds = bbox_iou.argmax(axis=0)
valid_inds = (bbox_iou.max(axis=0) > 0.5)
# mask the image
# use more precise crop when it is ready
# object_img_patches = np.ascontiguousarray(
# np.stack(object_img_patches, axis=0).transpose(0, 3, 1, 2))
# crop image patches using roi_align
# object_img_patches = crop_image_patch_v2(
# torch.Tensor(gt_boxes),
# torch.Tensor(mask_inds).long(), object_img_patches)
object_img_patches, object_masks = crop_image_patch(
gt_boxes, gt_masks, mask_inds, annos['img'])
for i in range(num_obj):
filename = f'{image_idx}_{names[i]}_{i}.bin'
abs_filepath = osp.join(database_save_path, filename)
rel_filepath = osp.join(f'{info_prefix}_gt_database', filename)
# save point clouds and image patches for each object
gt_points = points[point_indices[:, i]]
gt_points[:, :3] -= gt_boxes_3d[i, :3]
if with_mask:
if object_masks[i].sum() == 0 or not valid_inds[i]:
# Skip object for empty or invalid mask
continue
img_patch_path = abs_filepath + '.png'
mask_patch_path = abs_filepath + '.mask.png'
mmcv.imwrite(object_img_patches[i], img_patch_path)
mmcv.imwrite(object_masks[i], mask_patch_path)
with open(abs_filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_info = {
'name': names[i],
'path': rel_filepath,
'image_idx': image_idx,
'gt_idx': i,
'box3d_lidar': gt_boxes_3d[i],
'num_points_in_gt': gt_points.shape[0],
'difficulty': difficulty[i],
}
local_group_id = group_ids[i]
# if local_group_id >= 0:
if local_group_id not in group_dict:
group_dict[local_group_id] = group_counter
group_counter += 1
db_info['group_id'] = group_dict[local_group_id]
if 'score' in annos:
db_info['score'] = annos['score'][i]
if with_mask:
db_info.update({'box2d_camera': gt_boxes[i]})
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print(f'load {len(v)} {k} database infos')
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
The provided code snippet includes necessary dependencies for implementing the `kitti_data_prep` function. Write a Python function `def kitti_data_prep(root_path, info_prefix, version, out_dir)` to solve the following problem:
Prepare data related to Kitti dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. out_dir (str): Output directory of the groundtruth database info.
Here is the function:
def kitti_data_prep(root_path, info_prefix, version, out_dir):
"""Prepare data related to Kitti dataset.
Related data consists of '.pkl' files recording basic infos,
2D annotations and groundtruth database.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
version (str): Dataset version.
out_dir (str): Output directory of the groundtruth database info.
"""
kitti.create_kitti_info_file(root_path, info_prefix)
kitti.create_reduced_point_cloud(root_path, info_prefix)
info_train_path = osp.join(root_path, f'{info_prefix}_infos_train.pkl')
info_val_path = osp.join(root_path, f'{info_prefix}_infos_val.pkl')
info_trainval_path = osp.join(root_path,
f'{info_prefix}_infos_trainval.pkl')
info_test_path = osp.join(root_path, f'{info_prefix}_infos_test.pkl')
kitti.export_2d_annotation(root_path, info_train_path)
kitti.export_2d_annotation(root_path, info_val_path)
kitti.export_2d_annotation(root_path, info_trainval_path)
kitti.export_2d_annotation(root_path, info_test_path)
create_groundtruth_database(
'KittiDataset',
root_path,
info_prefix,
f'{out_dir}/{info_prefix}_infos_train.pkl',
relative_path=False,
mask_anno_path='instances_train.json',
with_mask=(version == 'mask')) | Prepare data related to Kitti dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. out_dir (str): Output directory of the groundtruth database info. |
165,705 | from data_converter.create_gt_database import create_groundtruth_database
from data_converter import nuscenes_converter as nuscenes_converter
from data_converter import lyft_converter as lyft_converter
from data_converter import kitti_converter as kitti
from data_converter import indoor_converter as indoor
import argparse
from os import path as osp
import sys
The provided code snippet includes necessary dependencies for implementing the `nuscenes_data_prep` function. Write a Python function `def nuscenes_data_prep(root_path, can_bus_root_path, info_prefix, version, dataset_name, out_dir, max_sweeps=10)` to solve the following problem:
Prepare data related to nuScenes dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. dataset_name (str): The dataset class name. out_dir (str): Output directory of the groundtruth database info. max_sweeps (int): Number of input consecutive frames. Default: 10
Here is the function:
def nuscenes_data_prep(root_path,
can_bus_root_path,
info_prefix,
version,
dataset_name,
out_dir,
max_sweeps=10):
"""Prepare data related to nuScenes dataset.
Related data consists of '.pkl' files recording basic infos,
2D annotations and groundtruth database.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
version (str): Dataset version.
dataset_name (str): The dataset class name.
out_dir (str): Output directory of the groundtruth database info.
max_sweeps (int): Number of input consecutive frames. Default: 10
"""
nuscenes_converter.create_nuscenes_infos(
root_path, out_dir, can_bus_root_path, info_prefix, version=version, max_sweeps=max_sweeps)
if version == 'v1.0-test':
info_test_path = osp.join(
out_dir, f'{info_prefix}_infos_temporal_test.pkl')
nuscenes_converter.export_2d_annotation(
root_path, info_test_path, version=version)
else:
info_train_path = osp.join(
out_dir, f'{info_prefix}_infos_temporal_train.pkl')
info_val_path = osp.join(
out_dir, f'{info_prefix}_infos_temporal_val.pkl')
nuscenes_converter.export_2d_annotation(
root_path, info_train_path, version=version)
nuscenes_converter.export_2d_annotation(
root_path, info_val_path, version=version)
# create_groundtruth_database(dataset_name, root_path, info_prefix,
# f'{out_dir}/{info_prefix}_infos_train.pkl') | Prepare data related to nuScenes dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. dataset_name (str): The dataset class name. out_dir (str): Output directory of the groundtruth database info. max_sweeps (int): Number of input consecutive frames. Default: 10 |
165,706 | from data_converter.create_gt_database import create_groundtruth_database
from data_converter import nuscenes_converter as nuscenes_converter
from data_converter import lyft_converter as lyft_converter
from data_converter import kitti_converter as kitti
from data_converter import indoor_converter as indoor
import argparse
from os import path as osp
import sys
The provided code snippet includes necessary dependencies for implementing the `lyft_data_prep` function. Write a Python function `def lyft_data_prep(root_path, info_prefix, version, max_sweeps=10)` to solve the following problem:
Prepare data related to Lyft dataset. Related data consists of '.pkl' files recording basic infos. Although the ground truth database and 2D annotations are not used in Lyft, it can also be generated like nuScenes. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. max_sweeps (int, optional): Number of input consecutive frames. Defaults to 10.
Here is the function:
def lyft_data_prep(root_path, info_prefix, version, max_sweeps=10):
"""Prepare data related to Lyft dataset.
Related data consists of '.pkl' files recording basic infos.
Although the ground truth database and 2D annotations are not used in
Lyft, it can also be generated like nuScenes.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
version (str): Dataset version.
max_sweeps (int, optional): Number of input consecutive frames.
Defaults to 10.
"""
lyft_converter.create_lyft_infos(
root_path, info_prefix, version=version, max_sweeps=max_sweeps) | Prepare data related to Lyft dataset. Related data consists of '.pkl' files recording basic infos. Although the ground truth database and 2D annotations are not used in Lyft, it can also be generated like nuScenes. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. max_sweeps (int, optional): Number of input consecutive frames. Defaults to 10. |
165,707 | from data_converter.create_gt_database import create_groundtruth_database
from data_converter import nuscenes_converter as nuscenes_converter
from data_converter import lyft_converter as lyft_converter
from data_converter import kitti_converter as kitti
from data_converter import indoor_converter as indoor
import argparse
from os import path as osp
import sys
The provided code snippet includes necessary dependencies for implementing the `scannet_data_prep` function. Write a Python function `def scannet_data_prep(root_path, info_prefix, out_dir, workers)` to solve the following problem:
Prepare the info file for scannet dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used.
Here is the function:
def scannet_data_prep(root_path, info_prefix, out_dir, workers):
"""Prepare the info file for scannet dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
"""
indoor.create_indoor_info_file(
root_path, info_prefix, out_dir, workers=workers) | Prepare the info file for scannet dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. |
165,708 | from data_converter.create_gt_database import create_groundtruth_database
from data_converter import nuscenes_converter as nuscenes_converter
from data_converter import lyft_converter as lyft_converter
from data_converter import kitti_converter as kitti
from data_converter import indoor_converter as indoor
import argparse
from os import path as osp
import sys
The provided code snippet includes necessary dependencies for implementing the `s3dis_data_prep` function. Write a Python function `def s3dis_data_prep(root_path, info_prefix, out_dir, workers)` to solve the following problem:
Prepare the info file for s3dis dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used.
Here is the function:
def s3dis_data_prep(root_path, info_prefix, out_dir, workers):
"""Prepare the info file for s3dis dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
"""
indoor.create_indoor_info_file(
root_path, info_prefix, out_dir, workers=workers) | Prepare the info file for s3dis dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. |
165,709 | from data_converter.create_gt_database import create_groundtruth_database
from data_converter import nuscenes_converter as nuscenes_converter
from data_converter import lyft_converter as lyft_converter
from data_converter import kitti_converter as kitti
from data_converter import indoor_converter as indoor
import argparse
from os import path as osp
import sys
The provided code snippet includes necessary dependencies for implementing the `sunrgbd_data_prep` function. Write a Python function `def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers)` to solve the following problem:
Prepare the info file for sunrgbd dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used.
Here is the function:
def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers):
"""Prepare the info file for sunrgbd dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
"""
indoor.create_indoor_info_file(
root_path, info_prefix, out_dir, workers=workers) | Prepare the info file for sunrgbd dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. |
165,710 | from data_converter.create_gt_database import create_groundtruth_database
from data_converter import nuscenes_converter as nuscenes_converter
from data_converter import lyft_converter as lyft_converter
from data_converter import kitti_converter as kitti
from data_converter import indoor_converter as indoor
import argparse
from os import path as osp
import sys
def create_groundtruth_database(dataset_class_name,
data_path,
info_prefix,
info_path=None,
mask_anno_path=None,
used_classes=None,
database_save_path=None,
db_info_save_path=None,
relative_path=True,
add_rgb=False,
lidar_only=False,
bev_only=False,
coors_range=None,
with_mask=False):
"""Given the raw data, generate the ground truth database.
Args:
dataset_class_name (str): Name of the input dataset.
data_path (str): Path of the data.
info_prefix (str): Prefix of the info file.
info_path (str): Path of the info file.
Default: None.
mask_anno_path (str): Path of the mask_anno.
Default: None.
used_classes (list[str]): Classes have been used.
Default: None.
database_save_path (str): Path to save database.
Default: None.
db_info_save_path (str): Path to save db_info.
Default: None.
relative_path (bool): Whether to use relative path.
Default: True.
with_mask (bool): Whether to use mask.
Default: False.
"""
print(f'Create GT Database of {dataset_class_name}')
dataset_cfg = dict(
type=dataset_class_name, data_root=data_path, ann_file=info_path)
if dataset_class_name == 'KittiDataset':
file_client_args = dict(backend='disk')
dataset_cfg.update(
test_mode=False,
split='training',
modality=dict(
use_lidar=True,
use_depth=False,
use_lidar_intensity=True,
use_camera=with_mask,
),
pipeline=[
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=4,
use_dim=4,
file_client_args=file_client_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=file_client_args)
])
elif dataset_class_name == 'NuScenesDataset':
dataset_cfg.update(
use_valid_flag=True,
pipeline=[
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=10,
use_dim=[0, 1, 2, 3, 4],
pad_empty_sweeps=True,
remove_close=True),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True)
])
elif dataset_class_name == 'WaymoDataset':
file_client_args = dict(backend='disk')
dataset_cfg.update(
test_mode=False,
split='training',
modality=dict(
use_lidar=True,
use_depth=False,
use_lidar_intensity=True,
use_camera=False,
),
pipeline=[
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=6,
use_dim=5,
file_client_args=file_client_args),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
file_client_args=file_client_args)
])
dataset = build_dataset(dataset_cfg)
if database_save_path is None:
database_save_path = osp.join(data_path, f'{info_prefix}_gt_database')
if db_info_save_path is None:
db_info_save_path = osp.join(data_path,
f'{info_prefix}_dbinfos_train.pkl')
mmcv.mkdir_or_exist(database_save_path)
all_db_infos = dict()
if with_mask:
coco = COCO(osp.join(data_path, mask_anno_path))
imgIds = coco.getImgIds()
file2id = dict()
for i in imgIds:
info = coco.loadImgs([i])[0]
file2id.update({info['file_name']: i})
group_counter = 0
for j in track_iter_progress(list(range(len(dataset)))):
input_dict = dataset.get_data_info(j)
dataset.pre_pipeline(input_dict)
example = dataset.pipeline(input_dict)
annos = example['ann_info']
image_idx = example['sample_idx']
points = example['points'].tensor.numpy()
gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy()
names = annos['gt_names']
group_dict = dict()
if 'group_ids' in annos:
group_ids = annos['group_ids']
else:
group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64)
difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32)
if 'difficulty' in annos:
difficulty = annos['difficulty']
num_obj = gt_boxes_3d.shape[0]
point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d)
if with_mask:
# prepare masks
gt_boxes = annos['gt_bboxes']
img_path = osp.split(example['img_info']['filename'])[-1]
if img_path not in file2id.keys():
print(f'skip image {img_path} for empty mask')
continue
img_id = file2id[img_path]
kins_annIds = coco.getAnnIds(imgIds=img_id)
kins_raw_info = coco.loadAnns(kins_annIds)
kins_ann_info = _parse_coco_ann_info(kins_raw_info)
h, w = annos['img_shape'][:2]
gt_masks = [
_poly2mask(mask, h, w) for mask in kins_ann_info['masks']
]
# get mask inds based on iou mapping
bbox_iou = bbox_overlaps(kins_ann_info['bboxes'], gt_boxes)
mask_inds = bbox_iou.argmax(axis=0)
valid_inds = (bbox_iou.max(axis=0) > 0.5)
# mask the image
# use more precise crop when it is ready
# object_img_patches = np.ascontiguousarray(
# np.stack(object_img_patches, axis=0).transpose(0, 3, 1, 2))
# crop image patches using roi_align
# object_img_patches = crop_image_patch_v2(
# torch.Tensor(gt_boxes),
# torch.Tensor(mask_inds).long(), object_img_patches)
object_img_patches, object_masks = crop_image_patch(
gt_boxes, gt_masks, mask_inds, annos['img'])
for i in range(num_obj):
filename = f'{image_idx}_{names[i]}_{i}.bin'
abs_filepath = osp.join(database_save_path, filename)
rel_filepath = osp.join(f'{info_prefix}_gt_database', filename)
# save point clouds and image patches for each object
gt_points = points[point_indices[:, i]]
gt_points[:, :3] -= gt_boxes_3d[i, :3]
if with_mask:
if object_masks[i].sum() == 0 or not valid_inds[i]:
# Skip object for empty or invalid mask
continue
img_patch_path = abs_filepath + '.png'
mask_patch_path = abs_filepath + '.mask.png'
mmcv.imwrite(object_img_patches[i], img_patch_path)
mmcv.imwrite(object_masks[i], mask_patch_path)
with open(abs_filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_info = {
'name': names[i],
'path': rel_filepath,
'image_idx': image_idx,
'gt_idx': i,
'box3d_lidar': gt_boxes_3d[i],
'num_points_in_gt': gt_points.shape[0],
'difficulty': difficulty[i],
}
local_group_id = group_ids[i]
# if local_group_id >= 0:
if local_group_id not in group_dict:
group_dict[local_group_id] = group_counter
group_counter += 1
db_info['group_id'] = group_dict[local_group_id]
if 'score' in annos:
db_info['score'] = annos['score'][i]
if with_mask:
db_info.update({'box2d_camera': gt_boxes[i]})
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print(f'load {len(v)} {k} database infos')
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
The provided code snippet includes necessary dependencies for implementing the `waymo_data_prep` function. Write a Python function `def waymo_data_prep(root_path, info_prefix, version, out_dir, workers, max_sweeps=5)` to solve the following problem:
Prepare the info file for waymo dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. max_sweeps (int): Number of input consecutive frames. Default: 5 \ Here we store pose information of these frames for later use.
Here is the function:
def waymo_data_prep(root_path,
info_prefix,
version,
out_dir,
workers,
max_sweeps=5):
"""Prepare the info file for waymo dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
max_sweeps (int): Number of input consecutive frames. Default: 5 \
Here we store pose information of these frames for later use.
"""
from tools.data_converter import waymo_converter as waymo
splits = ['training', 'validation', 'testing']
for i, split in enumerate(splits):
load_dir = osp.join(root_path, 'waymo_format', split)
if split == 'validation':
save_dir = osp.join(out_dir, 'kitti_format', 'training')
else:
save_dir = osp.join(out_dir, 'kitti_format', split)
converter = waymo.Waymo2KITTI(
load_dir,
save_dir,
prefix=str(i),
workers=workers,
test_mode=(split == 'test'))
converter.convert()
# Generate waymo infos
out_dir = osp.join(out_dir, 'kitti_format')
kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps)
create_groundtruth_database(
'WaymoDataset',
out_dir,
info_prefix,
f'{out_dir}/{info_prefix}_infos_train.pkl',
relative_path=False,
with_mask=False) | Prepare the info file for waymo dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. max_sweeps (int): Number of input consecutive frames. Default: 5 \ Here we store pose information of these frames for later use. |
165,723 | import argparse
import base64
import mmcv
import numpy as np
from nuimages import NuImages
from nuimages.utils.utils import mask_decode, name_to_index_mapping
from os import path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Data converter arg parser')
parser.add_argument(
'--data-root',
type=str,
default='./data/nuimages',
help='specify the root path of dataset')
parser.add_argument(
'--version',
type=str,
nargs='+',
default=['v1.0-mini'],
required=False,
help='specify the dataset version')
parser.add_argument(
'--out-dir',
type=str,
default='./data/nuimages/annotations/',
required=False,
help='path to save the exported json')
parser.add_argument(
'--nproc',
type=int,
default=4,
required=False,
help='workers to process semantic masks')
parser.add_argument('--extra-tag', type=str, default='nuimages')
args = parser.parse_args()
return args | null |
165,724 | import argparse
import base64
import mmcv
import numpy as np
from nuimages import NuImages
from nuimages.utils.utils import mask_decode, name_to_index_mapping
from os import path as osp
nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',
'barrier')
def get_img_annos(nuim, img_info, cat2id, out_dir, data_root, seg_root):
def export_nuim_to_coco(nuim, data_root, out_dir, extra_tag, version, nproc):
print('Process category information')
categories = []
categories = [
dict(id=nus_categories.index(cat_name), name=cat_name)
for cat_name in nus_categories
]
cat2id = {k_v['name']: k_v['id'] for k_v in categories}
images = []
print('Process image meta information...')
for sample_info in mmcv.track_iter_progress(nuim.sample_data):
if sample_info['is_key_frame']:
img_idx = len(images)
images.append(
dict(
id=img_idx,
token=sample_info['token'],
file_name=sample_info['filename'],
width=sample_info['width'],
height=sample_info['height']))
seg_root = f'{out_dir}semantic_masks'
mmcv.mkdir_or_exist(seg_root)
mmcv.mkdir_or_exist(osp.join(data_root, 'calibrated'))
global process_img_anno
def process_img_anno(img_info):
single_img_annos, max_cls_id = get_img_annos(nuim, img_info, cat2id,
out_dir, data_root,
seg_root)
return single_img_annos, max_cls_id
print('Process img annotations...')
if nproc > 1:
outputs = mmcv.track_parallel_progress(
process_img_anno, images, nproc=nproc)
else:
outputs = []
for img_info in mmcv.track_iter_progress(images):
outputs.append(process_img_anno(img_info))
# Determine the index of object annotation
print('Process annotation information...')
annotations = []
max_cls_ids = []
for single_img_annos, max_cls_id in outputs:
max_cls_ids.append(max_cls_id)
for img_anno in single_img_annos:
img_anno.update(id=len(annotations))
annotations.append(img_anno)
max_cls_id = max(max_cls_ids)
print(f'Max ID of class in the semantic map: {max_cls_id}')
coco_format_json = dict(
images=images, annotations=annotations, categories=categories)
mmcv.mkdir_or_exist(out_dir)
out_file = osp.join(out_dir, f'{extra_tag}_{version}.json')
print(f'Annotation dumped to {out_file}')
mmcv.dump(coco_format_json, out_file) | null |
165,726 | import numpy as np
from collections import OrderedDict
from concurrent import futures as futures
from os import path as osp
from pathlib import Path
from skimage import io
def get_image_index_str(img_idx, use_prefix_id=False):
if use_prefix_id:
return '{:07d}'.format(img_idx)
else:
return '{:06d}'.format(img_idx)
def kitti_result_line(result_dict, precision=4):
prec_float = '{' + ':.{}f'.format(precision) + '}'
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', 0.0),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError('you must specify a value for {}'.format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError('unknown key. supported key:{}'.format(
res_dict.keys()))
return ' '.join(res_line)
def kitti_anno_to_label_file(annos, folder):
folder = Path(folder)
for anno in annos:
image_idx = anno['metadata']['image_idx']
label_lines = []
for j in range(anno['bbox'].shape[0]):
label_dict = {
'name': anno['name'][j],
'alpha': anno['alpha'][j],
'bbox': anno['bbox'][j],
'location': anno['location'][j],
'dimensions': anno['dimensions'][j],
'rotation_y': anno['rotation_y'][j],
'score': anno['score'][j],
}
label_line = kitti_result_line(label_dict)
label_lines.append(label_line)
label_file = folder / f'{get_image_index_str(image_idx)}.txt'
label_str = '\n'.join(label_lines)
with open(label_file, 'w') as f:
f.write(label_str) | null |
165,727 | import mmcv
import numpy as np
from collections import OrderedDict
from nuscenes.utils.geometry_utils import view_points
from pathlib import Path
from mmdet3d.core.bbox import box_np_ops
from .kitti_data_utils import get_kitti_image_info, get_waymo_image_info
from .nuscenes_converter import post_process_coords
The provided code snippet includes necessary dependencies for implementing the `convert_to_kitti_info_version2` function. Write a Python function `def convert_to_kitti_info_version2(info)` to solve the following problem:
convert kitti info v1 to v2 if possible. Args: info (dict): Info of the input kitti data. - image (dict): image info - calib (dict): calibration info - point_cloud (dict): point cloud info
Here is the function:
def convert_to_kitti_info_version2(info):
"""convert kitti info v1 to v2 if possible.
Args:
info (dict): Info of the input kitti data.
- image (dict): image info
- calib (dict): calibration info
- point_cloud (dict): point cloud info
"""
if 'image' not in info or 'calib' not in info or 'point_cloud' not in info:
info['image'] = {
'image_shape': info['img_shape'],
'image_idx': info['image_idx'],
'image_path': info['img_path'],
}
info['calib'] = {
'R0_rect': info['calib/R0_rect'],
'Tr_velo_to_cam': info['calib/Tr_velo_to_cam'],
'P2': info['calib/P2'],
}
info['point_cloud'] = {
'velodyne_path': info['velodyne_path'],
} | convert kitti info v1 to v2 if possible. Args: info (dict): Info of the input kitti data. - image (dict): image info - calib (dict): calibration info - point_cloud (dict): point cloud info |
165,728 | import mmcv
import numpy as np
from concurrent import futures as futures
from os import path as osp
from scipy import io as sio
The provided code snippet includes necessary dependencies for implementing the `random_sampling` function. Write a Python function `def random_sampling(points, num_points, replace=None, return_choices=False)` to solve the following problem:
Random sampling. Sampling point cloud to a certain number of points. Args: points (ndarray): Point cloud. num_points (int): The number of samples. replace (bool): Whether the sample is with or without replacement. return_choices (bool): Whether to return choices. Returns: points (ndarray): Point cloud after sampling.
Here is the function:
def random_sampling(points, num_points, replace=None, return_choices=False):
"""Random sampling.
Sampling point cloud to a certain number of points.
Args:
points (ndarray): Point cloud.
num_points (int): The number of samples.
replace (bool): Whether the sample is with or without replacement.
return_choices (bool): Whether to return choices.
Returns:
points (ndarray): Point cloud after sampling.
"""
if replace is None:
replace = (points.shape[0] < num_points)
choices = np.random.choice(points.shape[0], num_points, replace=replace)
if return_choices:
return points[choices], choices
else:
return points[choices] | Random sampling. Sampling point cloud to a certain number of points. Args: points (ndarray): Point cloud. num_points (int): The number of samples. replace (bool): Whether the sample is with or without replacement. return_choices (bool): Whether to return choices. Returns: points (ndarray): Point cloud after sampling. |
165,729 | import argparse
import numpy as np
import os
def fix_lyft(root_folder='./data/lyft', version='v1.01'):
# refer to https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/discussion/110000 # noqa
lidar_path = 'lidar/host-a011_lidar1_1233090652702363606.bin'
root_folder = os.path.join(root_folder, f'{version}-train')
lidar_path = os.path.join(root_folder, lidar_path)
assert os.path.isfile(lidar_path), f'Please download the complete Lyft ' \
f'dataset and make sure {lidar_path} is present.'
points = np.fromfile(lidar_path, dtype=np.float32, count=-1)
try:
points.reshape([-1, 5])
print(f'This fix is not required for version {version}.')
except ValueError:
new_points = np.array(list(points) + [100.0, 1.0], dtype='float32')
new_points.tofile(lidar_path)
print(f'Appended 100.0 and 1.0 to the end of {lidar_path}.') | null |
165,737 | import argparse
import torch
from mmcv.runner import save_checkpoint
from torch import nn as nn
from mmdet.apis import init_model
def fuse_conv_bn(conv, bn):
"""During inference, the functionary of batch norm layers is turned off but
only the mean and var alone channels are used, which exposes the chance to
fuse it with the preceding conv layers to save computations and simplify
network structures."""
conv_w = conv.weight
conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
bn.running_mean)
factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
conv.weight = nn.Parameter(conv_w *
factor.reshape([conv.out_channels, 1, 1, 1]))
conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
return conv
def fuse_module(m):
last_conv = None
last_conv_name = None
for name, child in m.named_children():
if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):
if last_conv is None: # only fuse BN that is after Conv
continue
fused_conv = fuse_conv_bn(last_conv, child)
m._modules[last_conv_name] = fused_conv
# To reduce changes, set BN as Identity instead of deleting it.
m._modules[name] = nn.Identity()
last_conv = None
elif isinstance(child, nn.Conv2d):
last_conv = child
last_conv_name = name
else:
fuse_module(child)
return m | null |
165,740 | from typing import Union, Dict, Optional, Any
import logging
import traceback
from typing_extensions import Literal
from .pygwalker import PygWalker
from pygwalker.services.data_parsers import get_parser
from pygwalker.services.preview_image import render_gw_chart_preview_html
from pygwalker.data_parsers.base import FieldSpec
from pygwalker.data_parsers.database_parser import Connector
from pygwalker._typing import DataFrame
from pygwalker.utils.randoms import generate_hash_code
from pygwalker.utils.check_walker_params import check_expired_params
logger = logging.getLogger(__name__)
class PygWalker:
"""PygWalker"""
def __init__(
self,
*,
gid: Optional[Union[int, str]],
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
spec: str,
source_invoke_code: str,
theme_key: Literal['vega', 'g2'],
dark: Literal['media', 'light', 'dark'],
show_cloud_tool: Optional[bool],
use_preview: bool,
use_kernel_calc: Optional[bool],
use_cloud_calc: Optional[bool],
use_save_tool: bool,
is_export_dataframe: bool,
kanaries_api_key: str,
default_tab: Literal["data", "vis"],
gw_mode: Literal["explore", "renderer", "filter_renderer", "table"],
**kwargs
):
self.kanaries_api_key = kanaries_api_key or GlobalVarManager.kanaries_api_key
if gid is None:
self.gid = generate_hash_code()
else:
self.gid = gid
self.cloud_service = CloudService(self.kanaries_api_key)
self.data_parser = self._get_data_parser(
dataset=dataset,
field_specs=field_specs,
use_cloud_calc=use_cloud_calc,
kanaries_api_key=self.kanaries_api_key,
cloud_service=self.cloud_service
)
self.use_kernel_calc = self.data_parser.data_size > JUPYTER_WIDGETS_BYTE_LIMIT if use_kernel_calc is None else use_kernel_calc
self.origin_data_source = self.data_parser.to_records(500 if self.use_kernel_calc else None)
self.field_specs = self.data_parser.raw_fields
self.spec = spec
self.source_invoke_code = source_invoke_code
self.theme_key = theme_key
self.dark = dark
self.data_source_id = rand_str()
self.other_props = kwargs
self.tunnel_id = "tunnel!"
self.show_cloud_tool = bool(self.kanaries_api_key) if show_cloud_tool is None else show_cloud_tool
self.use_preview = use_preview
self._init_spec(spec, self.field_specs)
self.use_save_tool = use_save_tool
self.parse_dsl_type = self._get_parse_dsl_type(self.data_parser)
self.gw_mode = gw_mode
self.dataset_type = self.data_parser.dataset_tpye
self.is_export_dataframe = is_export_dataframe
self._last_exported_dataframe = None
self.default_tab = default_tab
self.use_cloud_calc = use_cloud_calc
check_update()
# Temporarily adapt to pandas import module bug
if self.use_kernel_calc:
try:
self.data_parser.get_datas_by_sql("SELECT 1 FROM pygwalker_mid_table LIMIT 1")
except Exception:
pass
if GlobalVarManager.privacy == "offline":
self.show_cloud_tool = False
def last_exported_dataframe(self) -> Optional[pd.DataFrame]:
return self._last_exported_dataframe
def _get_data_parser(
self,
*,
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
use_cloud_calc: bool,
kanaries_api_key: str,
cloud_service: CloudService
) -> BaseDataParser:
data_parser = get_parser(
dataset,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
if not use_cloud_calc:
return data_parser
dataset_id = cloud_service.create_cloud_dataset(
data_parser,
f"temp_{rand_str()}",
False,
True
)
return get_parser(
dataset_id,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
def _get_parse_dsl_type(self, data_parser: BaseDataParser) -> Literal["server", "client"]:
if data_parser.dataset_tpye.startswith("connector"):
return "server"
if data_parser.dataset_tpye == "cloud_dataset":
return "server"
return "client"
def _init_spec(self, spec: Dict[str, Any], field_specs: List[Dict[str, Any]]):
spec_obj, spec_type = get_spec_json(spec)
self._update_vis_spec(spec_obj["config"] and fill_new_fields(spec_obj["config"], field_specs))
self.spec_type = spec_type
self._chart_map = self._parse_chart_map_dict(spec_obj["chart_map"])
self.spec_version = spec_obj.get("version", None)
self.workflow_list = spec_obj.get("workflow_list", [])
def _update_vis_spec(self, vis_spec: List[Dict[str, Any]]):
self.vis_spec = vis_spec
self._chart_name_index_map = {
item["name"]: index
for index, item in enumerate(vis_spec)
}
def _get_chart_map_dict(self, chart_map: Dict[str, ChartData]) -> Dict[str, Any]:
return {
key: value.dict(by_alias=True)
for key, value in chart_map.items()
}
def _parse_chart_map_dict(self, chart_map_dict: Dict[str, Any]) -> Dict[str, ChartData]:
return {
key: ChartData.parse_obj(value)
for key, value in chart_map_dict.items()
}
def to_html(self) -> str:
props = self._get_props()
return self._get_render_iframe(props)
def to_html_without_iframe(self) -> str:
props = self._get_props()
html = render_gwalker_html(self.gid, props)
return html
def display_on_convert_html(self):
"""
Display on jupyter-nbconvert html.
"""
props = self._get_props("jupyter")
iframe_html = self._get_render_iframe(props)
display_html(iframe_html)
def display_on_jupyter(self):
"""
Display on jupyter notebook/lab.
If share has large data loading, only sample data can be displayed when reload.
After that, it will be changed to python for data calculation,
and only a small amount of data will be output to the front end to complete the analysis of big data.
"""
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_BYTE_LIMIT)
props = self._get_props(
"jupyter",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props)
if len(self.origin_data_source) > len(data_source):
upload_tool = BatchUploadDatasToolOnJupyter()
display_html(iframe_html)
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id,
gid=self.gid,
tunnel_id=self.tunnel_id,
)
else:
display_html(iframe_html)
def display_on_jupyter_use_widgets(self, iframe_height: str = "1010px"):
"""
use ipywidgets, Display on jupyter notebook/lab.
When the kernel is down, the chart will not be displayed, so use `display_on_jupyter` to share
"""
comm = HackerCommunication(self.gid)
preview_tool = PreviewImageTool(self.gid)
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_WIDGETS_BYTE_LIMIT)
props = self._get_props(
"jupyter_widgets",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props, iframe_height=iframe_height)
html_widgets = ipywidgets.Box(
[ipywidgets.HTML(iframe_html), comm.get_widgets()],
layout=ipywidgets.Layout(display='block')
)
self._init_callback(comm, preview_tool)
display_html(html_widgets)
preview_tool.init_display()
preview_tool.render_gw_review(self._get_gw_preview_html())
def display_preview_on_jupyter(self):
"""
Display preview on jupyter notebook/lab.
"""
display_html(self._get_gw_preview_html())
def chart_list(self) -> List[str]:
"""
Get the list of saved charts.
"""
return list(self._chart_map.keys())
def save_chart_to_file(self, chart_name: str, path: str, save_type: Literal["html", "png"] = "png"):
"""
Save the chart to a file.
"""
if save_type == "html":
content = self.export_chart_html(chart_name)
write_mode = "w"
encoding = "utf-8"
elif save_type == "png":
content = self.export_chart_png(chart_name)
write_mode = "wb"
encoding = None
else:
raise ValueError(f"save_type must be html or png, but got {save_type}")
with open(path, write_mode, encoding=encoding) as f:
f.write(content)
def export_chart_html(self, chart_name: str) -> str:
"""
Export the chart as a html string.
"""
return self._get_gw_chart_preview_html(
chart_name,
title="",
desc=""
)
def export_chart_png(self, chart_name: str) -> bytes:
"""
Export the chart as a png bytes.
"""
chart_data = self._get_chart_by_name(chart_name)
with urllib.request.urlopen(chart_data.single_chart) as png_string:
return png_string.read()
def display_chart(self, chart_name: str, *, title: Optional[str] = None, desc: str = ""):
"""
Display the chart in the notebook.
"""
if title is None:
title = chart_name
html = self._get_gw_chart_preview_html(
chart_name,
title=title,
desc=desc
)
display_html(html)
def _get_chart_by_name(self, chart_name: str) -> ChartData:
if chart_name not in self._chart_map:
raise ValueError(f"chart_name: {chart_name} not found, please confirm whether to save")
return self._chart_map[chart_name]
def _init_callback(self, comm: BaseCommunication, preview_tool: PreviewImageTool = None):
upload_tool = BatchUploadDatasToolOnWidgets(comm)
def reuqest_data_callback(_):
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id
)
return {}
def get_latest_vis_spec(_):
return {"visSpec": self.vis_spec}
def save_chart_endpoint(data: Dict[str, Any]):
chart_data = ChartData.parse_obj(data)
self._chart_map[data["title"]] = chart_data
def update_spec(data: Dict[str, Any]):
spec_obj = {
"config": data["visSpec"],
"chart_map": {},
"version": __version__,
"workflow_list": data.get("workflowList", [])
}
self._update_vis_spec(data["visSpec"])
self.spec_version = __version__
self.workflow_list = data.get("workflowList", [])
if self.use_preview:
preview_tool.render_gw_review(self._get_gw_preview_html())
save_chart_endpoint(data["chartData"])
if self.spec_type == "json_file":
with open(self.spec, "w", encoding="utf-8") as f:
f.write(json.dumps(spec_obj))
if self.spec_type == "json_ksf":
self.cloud_service.write_config_to_cloud(self.spec[6:], json.dumps(spec_obj))
def upload_spec_to_cloud(data: Dict[str, Any]):
if data["newToken"]:
set_config({"kanaries_token": data["newToken"]})
GlobalVarManager.kanaries_api_key = data["newToken"]
spec_obj = {
"config": self.vis_spec,
"chart_map": {},
"version": __version__,
"workflow_list": self.workflow_list,
}
file_name = data["fileName"]
workspace_name = self.cloud_service.get_kanaries_user_info()["workspaceName"]
path = f"{workspace_name}/{file_name}"
self.cloud_service.write_config_to_cloud(path, json.dumps(spec_obj))
return {"specFilePath": path}
def _get_datas(data: Dict[str, Any]):
sql = data["sql"]
datas = self.data_parser.get_datas_by_sql(sql)
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _get_datas_by_payload(data: Dict[str, Any]):
datas = self.data_parser.get_datas_by_payload(data["payload"])
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _batch_get_datas_by_sql(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_sql(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _batch_get_datas_by_payload(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_payload(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _get_spec_by_text(data: Dict[str, Any]):
callback = self.other_props.get(
"custom_ask_callback",
self.cloud_service.get_spec_by_text
)
return {
"data": callback(data["metas"], data["query"])
}
def _export_dataframe_by_payload(data: Dict[str, Any]):
df = pd.DataFrame(self.data_parser.get_datas_by_payload(data["payload"]))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _export_dataframe_by_sql(data: Dict[str, Any]):
sql = data["sql"]
df = pd.DataFrame(self.data_parser.get_datas_by_sql(sql))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _upload_to_cloud_charts(data: Dict[str, Any]):
chart_id = self.cloud_service.upload_cloud_chart(
data_parser=self.data_parser,
chart_name=data["chartName"],
dataset_name=data["datasetName"],
workflow=data["workflow"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
)
return {"chartId": chart_id}
def _upload_to_cloud_dashboard(data: Dict[str, Any]):
dashboard_id = self.cloud_service.upload_cloud_dashboard(
data_parser=self.data_parser,
dashboard_name=data["chartName"],
dataset_name=data["datasetName"],
workflow_list=data["workflowList"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
dark=self.dark
)
return {"dashboardId": dashboard_id}
comm.register("get_latest_vis_spec", get_latest_vis_spec)
comm.register("request_data", reuqest_data_callback)
if self.use_save_tool:
comm.register("upload_spec_to_cloud", upload_spec_to_cloud)
comm.register("update_spec", update_spec)
comm.register("save_chart", save_chart_endpoint)
if self.show_cloud_tool:
comm.register("upload_to_cloud_charts", _upload_to_cloud_charts)
comm.register("upload_to_cloud_dashboard", _upload_to_cloud_dashboard)
comm.register("get_spec_by_text", _get_spec_by_text)
if self.use_kernel_calc:
comm.register("get_datas", _get_datas)
comm.register("get_datas_by_payload", _get_datas_by_payload)
comm.register("batch_get_datas_by_sql", _batch_get_datas_by_sql)
comm.register("batch_get_datas_by_payload", _batch_get_datas_by_payload)
if self.is_export_dataframe:
comm.register("export_dataframe_by_payload", _export_dataframe_by_payload)
comm.register("export_dataframe_by_sql", _export_dataframe_by_sql)
def _send_props_track(self, props: Dict[str, Any]):
needed_fields = {
"id", "version", "hashcode", "themeKey",
"dark", "env", "specType", "needLoadDatas", "showCloudTool",
"useKernelCalc", "useSaveTool", "parseDslType", "gwMode", "datasetType",
"defaultTab", "useCloudCalc"
}
event_info = {key: value for key, value in props.items() if key in needed_fields}
event_info["hasKanariesToken"] = bool(self.kanaries_api_key)
track_event("invoke_props", event_info)
def _get_props(
self,
env: str = "",
data_source: Optional[Dict[str, Any]] = None,
need_load_datas: bool = False
) -> Dict[str, Any]:
if data_source is None:
data_source = self.origin_data_source
props = {
"id": self.gid,
"dataSource": data_source,
"len": len(data_source),
"version": __version__,
"hashcode": get_local_user_id(),
"userConfig": {
"privacy": GlobalVarManager.privacy,
},
"visSpec": self.vis_spec,
"rawFields": [
{**field, "offset": 0}
for field in self.field_specs
],
"fieldkeyGuard": False,
"themeKey": self.theme_key,
"dark": self.dark,
"sourceInvokeCode": self.source_invoke_code,
"dataSourceProps": {
'tunnelId': self.tunnel_id,
'dataSourceId': self.data_source_id,
},
"env": env,
"specType": self.spec_type,
"needLoadDatas": not self.use_kernel_calc and need_load_datas,
"showCloudTool": self.show_cloud_tool,
"needInitChart": not self._chart_map,
"useKernelCalc": self.use_kernel_calc,
"useSaveTool": self.use_save_tool,
"parseDslType": self.parse_dsl_type,
"gwMode": self.gw_mode,
"needLoadLastSpec": True,
"datasetType": self.dataset_type,
"extraConfig": self.other_props,
"fieldMetas": self.data_parser.field_metas,
"isExportDataFrame": self.is_export_dataframe,
"defaultTab": self.default_tab,
"useCloudCalc": self.use_cloud_calc
}
self._send_props_track(props)
return props
def _get_render_iframe(
self,
props: Dict[str, Any],
return_iframe: bool = True,
iframe_height: str = "1010px"
) -> str:
html = render_gwalker_html(self.gid, props)
if return_iframe:
srcdoc = m_html.escape(html)
return render_gwalker_iframe(self.gid, srcdoc, iframe_height)
else:
return html
def _get_gw_preview_html(self) -> str:
if not self.workflow_list:
return ""
datas = []
for workflow in self.workflow_list:
try:
datas.append(self.data_parser.get_datas_by_payload(workflow))
except ParserException:
datas.append([])
html = render_gw_preview_html(
self.vis_spec,
datas,
self.theme_key,
self.gid,
self.dark
)
return html
def _get_gw_chart_preview_html(self, chart_name: int, title: str, desc: str) -> str:
if chart_name not in self._chart_name_index_map:
raise ValueError(f"chart_name: {chart_name} not found.")
chart_index = self._chart_name_index_map[chart_name]
if not self.workflow_list:
return ""
data = self.data_parser.get_datas_by_payload(self.workflow_list[chart_index])
return render_gw_chart_preview_html(
single_vis_spec=self.vis_spec[chart_index],
data=data,
theme_key=self.theme_key,
title=title,
desc=desc,
dark=self.dark
)
class FieldSpec(NamedTuple):
"""Field specification.
Args:
- semanticType: '?' | 'nominal' | 'ordinal' | 'temporal' | 'quantitative'. default to '?'.
- analyticType: '?' | 'dimension' | 'measure'. default to '?'.
- display_as: str. The field name displayed. None means using the original column name.
"""
semanticType: Literal['?', 'nominal', 'ordinal', 'temporal', 'quantitative'] = '?'
analyticType: Literal['?', 'dimension', 'measure'] = '?'
display_as: str = None
DataFrame = TypeVar("DataFrame", *dataframe_types)
def generate_hash_code() -> str:
now_time = int(datetime.now(timezone.utc).timestamp() * 1000 * 1000)
pre_str = format(now_time, "x")
pre_str = pre_str.zfill(16)
return pre_str + rand_str(16)
def check_expired_params(params: Dict[str, Any]):
expired_params_map = {
"fieldSpecs": "field_specs",
"themeKey": "theme_key",
"debug": "spec_io_mode",
}
for old_param, new_param in expired_params_map.items():
if old_param in params:
logger.warning(
f"Parameter `{old_param}` is expired, please use `{new_param}` instead."
)
The provided code snippet includes necessary dependencies for implementing the `to_html` function. Write a Python function `def to_html( df: DataFrame, gid: Union[int, str] = None, *, spec: str = "", field_specs: Optional[Dict[str, FieldSpec]] = None, theme_key: Literal['vega', 'g2'] = 'g2', dark: Literal['media', 'light', 'dark'] = 'media', default_tab: Literal["data", "vis"] = "vis", **kwargs )` to solve the following problem:
Generate embeddable HTML code of Graphic Walker with data of `df`. Args: - df (pl.DataFrame | pd.DataFrame, optional): dataframe. - gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}') Kargs: - field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. - spec (str): chart config data. config id, json, remote file url - theme_key ('vega' | 'g2'): theme type. - dark ('media' | 'light' | 'dark'): 'media': auto detect OS theme.
Here is the function:
def to_html(
df: DataFrame,
gid: Union[int, str] = None,
*,
spec: str = "",
field_specs: Optional[Dict[str, FieldSpec]] = None,
theme_key: Literal['vega', 'g2'] = 'g2',
dark: Literal['media', 'light', 'dark'] = 'media',
default_tab: Literal["data", "vis"] = "vis",
**kwargs
):
"""
Generate embeddable HTML code of Graphic Walker with data of `df`.
Args:
- df (pl.DataFrame | pd.DataFrame, optional): dataframe.
- gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}')
Kargs:
- field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified.
- spec (str): chart config data. config id, json, remote file url
- theme_key ('vega' | 'g2'): theme type.
- dark ('media' | 'light' | 'dark'): 'media': auto detect OS theme.
"""
check_expired_params(kwargs)
if gid is None:
gid = generate_hash_code()
if field_specs is None:
field_specs = {}
walker = PygWalker(
gid=gid,
dataset=df,
field_specs=field_specs,
spec=spec,
source_invoke_code="",
theme_key=theme_key,
dark=dark,
show_cloud_tool=False,
use_preview=False,
use_kernel_calc=False,
use_save_tool=False,
gw_mode="explore",
is_export_dataframe=False,
kanaries_api_key="",
default_tab=default_tab,
use_cloud_calc=False,
**kwargs
)
try:
html = walker.to_html()
except Exception as e:
logger.error(traceback.format_exc())
return f"<div>{str(e)}</div>"
return html | Generate embeddable HTML code of Graphic Walker with data of `df`. Args: - df (pl.DataFrame | pd.DataFrame, optional): dataframe. - gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}') Kargs: - field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. - spec (str): chart config data. config id, json, remote file url - theme_key ('vega' | 'g2'): theme type. - dark ('media' | 'light' | 'dark'): 'media': auto detect OS theme. |
165,741 | from typing import Union, Dict, Optional, Any
import logging
import traceback
from typing_extensions import Literal
from .pygwalker import PygWalker
from pygwalker.services.data_parsers import get_parser
from pygwalker.services.preview_image import render_gw_chart_preview_html
from pygwalker.data_parsers.base import FieldSpec
from pygwalker.data_parsers.database_parser import Connector
from pygwalker._typing import DataFrame
from pygwalker.utils.randoms import generate_hash_code
from pygwalker.utils.check_walker_params import check_expired_params
def get_parser(
dataset: Union[DataFrame, Connector, str],
field_specs: Optional[Dict[str, FieldSpec]] = None,
infer_string_to_date: bool = False,
infer_number_to_dimension: bool = True,
other_params: Optional[Dict[str, Any]] = None
) -> BaseDataParser:
if field_specs is None:
field_specs = {}
if other_params is None:
other_params = {}
parser = _get_data_parser(dataset)(
dataset,
field_specs,
infer_string_to_date,
infer_number_to_dimension,
other_params
)
return parser
def render_gw_chart_preview_html(
*,
single_vis_spec: Dict[str, Any],
data: List[Dict[str, Any]],
theme_key: str,
title: str,
desc: str,
dark: str,
) -> str:
"""
Render html for single chart(use purerenderer mode of graphic-wlaker, not png preview)
"""
props = {
"visSpec": single_vis_spec,
"data": _compress_data(data),
"themeKey": theme_key,
"title": title,
"desc": desc,
"dark": dark,
}
container_id = f"pygwalker-chart-preview-{generate_hash_code()[:20]}"
template = jinja_env.get_template("index.html")
html = template.render(
gwalker={
'id': container_id,
'gw_script': GWALKER_SCRIPT_BASE64,
"component_script": "PyGWalkerApp.ChartPreviewApp(props, gw_id);",
"props": json.dumps(props, cls=DataFrameEncoder)
}
)
return html
class Connector:
"""
database connector, it will cache engine by url.
- url: database url, refer to sqlalchemy doc for url. example: mysql+pymysql://user:password@host:port/database
- view_sql: view sql, example: SELECT * FROM table_name
- engine_params: engine params, refer to sqlalchemy doc for params. example: {"pool_size": 10}
"""
engine_map = {}
def __init__(self, url: str, view_sql: str, engine_params: Optional[Dict[str, Any]] = None) -> "Connector":
_check_view_sql(view_sql)
if engine_params is None:
engine_params = {}
self.url = url
self.engine = self._get_engine(engine_params)
self.view_sql = view_sql
def _get_engine(self, engine_params: Dict[str, Any]) -> Engine:
if self.url not in self.engine_map:
engine = create_engine(self.url, **engine_params)
engine.dialect.requires_name_normalize = False
self.engine_map[self.url] = engine
return self.engine_map[self.url]
def query_datas(self, sql: str) -> List[Dict[str, Any]]:
field_type_map = {}
with self.engine.connect() as connection:
result = connection.execute(text(sql))
if self.dialect_name == "snowflake":
field_type_map = {
column_desc.name: column_desc.type_code
for column_desc in result.cursor.description
}
return [
{
key: json.loads(value) if field_type_map.get(key, -1) in {9, 10} else value
for key, value in item.items()
}
for item in result.mappings()
]
def dialect_name(self) -> str:
return self.engine.dialect.name
DataFrame = TypeVar("DataFrame", *dataframe_types)
def dsl_to_workflow(dsl: Dict[str, Any]) -> Dict[str, Any]:
return json.loads(_dsl_to_workflow_js(json.dumps(dsl)))
def vega_to_dsl(vega_config: Dict[str, Any], fields: List[Dict[str, Any]]) -> Dict[str, Any]:
return json.loads(_vega_to_dsl_js(json.dumps({
"vl": vega_config,
"allFields": fields,
"visId": rand_str(6),
"name": rand_str(6)
})))
The provided code snippet includes necessary dependencies for implementing the `to_chart_html` function. Write a Python function `def to_chart_html( dataset: Union[DataFrame, Connector, str], spec: Dict[str, Any], *, spec_type: Literal["graphic-walker", "vega"] = "graphic-walker", theme_key: Literal['vega', 'g2'] = 'g2', dark: Literal['media', 'light', 'dark'] = 'media', ) -> str` to solve the following problem:
Generate HTML code of a chart by graphic-walker or vega spec. Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataset. - spec (Dict[str, Any]): chart config data. Kargs: - spec_type (Literal["graphic-walker", "vega"]): type of spec. - theme_key ('vega' | 'g2'): theme type. - dark ('media' | 'light' | 'dark'): 'media': auto detect OS theme.
Here is the function:
def to_chart_html(
dataset: Union[DataFrame, Connector, str],
spec: Dict[str, Any],
*,
spec_type: Literal["graphic-walker", "vega"] = "graphic-walker",
theme_key: Literal['vega', 'g2'] = 'g2',
dark: Literal['media', 'light', 'dark'] = 'media',
) -> str:
"""
Generate HTML code of a chart by graphic-walker or vega spec.
Args:
- dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataset.
- spec (Dict[str, Any]): chart config data.
Kargs:
- spec_type (Literal["graphic-walker", "vega"]): type of spec.
- theme_key ('vega' | 'g2'): theme type.
- dark ('media' | 'light' | 'dark'): 'media': auto detect OS theme.
"""
# pylint: disable=import-outside-toplevel
# Since the compatibility of quick js is not certain, the related methods are lazy loaded.
from pygwalker.utils.dsl_transform import vega_to_dsl, dsl_to_workflow
data_parser = get_parser(dataset)
if spec_type == "vega":
gw_dsl = vega_to_dsl(spec, data_parser.raw_fields)
else:
gw_dsl = spec
workflow = dsl_to_workflow(gw_dsl)
data = data_parser.get_datas_by_payload(workflow)
return render_gw_chart_preview_html(
single_vis_spec=gw_dsl,
data=data,
theme_key=theme_key,
dark=dark,
title="",
desc=""
) | Generate HTML code of a chart by graphic-walker or vega spec. Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataset. - spec (Dict[str, Any]): chart config data. Kargs: - spec_type (Literal["graphic-walker", "vega"]): type of spec. - theme_key ('vega' | 'g2'): theme type. - dark ('media' | 'light' | 'dark'): 'media': auto detect OS theme. |
165,742 | from typing import Union, Dict, Optional
from typing_extensions import Literal
from .pygwalker import PygWalker
from pygwalker.communications.gradio_comm import (
BASE_URL_PATH,
GradioCommunication,
PYGWALKER_ROUTE
)
from pygwalker.data_parsers.base import FieldSpec
from pygwalker.data_parsers.database_parser import Connector
from pygwalker._typing import DataFrame
from pygwalker.utils.check_walker_params import check_expired_params
class PygWalker:
"""PygWalker"""
def __init__(
self,
*,
gid: Optional[Union[int, str]],
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
spec: str,
source_invoke_code: str,
theme_key: Literal['vega', 'g2'],
dark: Literal['media', 'light', 'dark'],
show_cloud_tool: Optional[bool],
use_preview: bool,
use_kernel_calc: Optional[bool],
use_cloud_calc: Optional[bool],
use_save_tool: bool,
is_export_dataframe: bool,
kanaries_api_key: str,
default_tab: Literal["data", "vis"],
gw_mode: Literal["explore", "renderer", "filter_renderer", "table"],
**kwargs
):
self.kanaries_api_key = kanaries_api_key or GlobalVarManager.kanaries_api_key
if gid is None:
self.gid = generate_hash_code()
else:
self.gid = gid
self.cloud_service = CloudService(self.kanaries_api_key)
self.data_parser = self._get_data_parser(
dataset=dataset,
field_specs=field_specs,
use_cloud_calc=use_cloud_calc,
kanaries_api_key=self.kanaries_api_key,
cloud_service=self.cloud_service
)
self.use_kernel_calc = self.data_parser.data_size > JUPYTER_WIDGETS_BYTE_LIMIT if use_kernel_calc is None else use_kernel_calc
self.origin_data_source = self.data_parser.to_records(500 if self.use_kernel_calc else None)
self.field_specs = self.data_parser.raw_fields
self.spec = spec
self.source_invoke_code = source_invoke_code
self.theme_key = theme_key
self.dark = dark
self.data_source_id = rand_str()
self.other_props = kwargs
self.tunnel_id = "tunnel!"
self.show_cloud_tool = bool(self.kanaries_api_key) if show_cloud_tool is None else show_cloud_tool
self.use_preview = use_preview
self._init_spec(spec, self.field_specs)
self.use_save_tool = use_save_tool
self.parse_dsl_type = self._get_parse_dsl_type(self.data_parser)
self.gw_mode = gw_mode
self.dataset_type = self.data_parser.dataset_tpye
self.is_export_dataframe = is_export_dataframe
self._last_exported_dataframe = None
self.default_tab = default_tab
self.use_cloud_calc = use_cloud_calc
check_update()
# Temporarily adapt to pandas import module bug
if self.use_kernel_calc:
try:
self.data_parser.get_datas_by_sql("SELECT 1 FROM pygwalker_mid_table LIMIT 1")
except Exception:
pass
if GlobalVarManager.privacy == "offline":
self.show_cloud_tool = False
def last_exported_dataframe(self) -> Optional[pd.DataFrame]:
return self._last_exported_dataframe
def _get_data_parser(
self,
*,
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
use_cloud_calc: bool,
kanaries_api_key: str,
cloud_service: CloudService
) -> BaseDataParser:
data_parser = get_parser(
dataset,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
if not use_cloud_calc:
return data_parser
dataset_id = cloud_service.create_cloud_dataset(
data_parser,
f"temp_{rand_str()}",
False,
True
)
return get_parser(
dataset_id,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
def _get_parse_dsl_type(self, data_parser: BaseDataParser) -> Literal["server", "client"]:
if data_parser.dataset_tpye.startswith("connector"):
return "server"
if data_parser.dataset_tpye == "cloud_dataset":
return "server"
return "client"
def _init_spec(self, spec: Dict[str, Any], field_specs: List[Dict[str, Any]]):
spec_obj, spec_type = get_spec_json(spec)
self._update_vis_spec(spec_obj["config"] and fill_new_fields(spec_obj["config"], field_specs))
self.spec_type = spec_type
self._chart_map = self._parse_chart_map_dict(spec_obj["chart_map"])
self.spec_version = spec_obj.get("version", None)
self.workflow_list = spec_obj.get("workflow_list", [])
def _update_vis_spec(self, vis_spec: List[Dict[str, Any]]):
self.vis_spec = vis_spec
self._chart_name_index_map = {
item["name"]: index
for index, item in enumerate(vis_spec)
}
def _get_chart_map_dict(self, chart_map: Dict[str, ChartData]) -> Dict[str, Any]:
return {
key: value.dict(by_alias=True)
for key, value in chart_map.items()
}
def _parse_chart_map_dict(self, chart_map_dict: Dict[str, Any]) -> Dict[str, ChartData]:
return {
key: ChartData.parse_obj(value)
for key, value in chart_map_dict.items()
}
def to_html(self) -> str:
props = self._get_props()
return self._get_render_iframe(props)
def to_html_without_iframe(self) -> str:
props = self._get_props()
html = render_gwalker_html(self.gid, props)
return html
def display_on_convert_html(self):
"""
Display on jupyter-nbconvert html.
"""
props = self._get_props("jupyter")
iframe_html = self._get_render_iframe(props)
display_html(iframe_html)
def display_on_jupyter(self):
"""
Display on jupyter notebook/lab.
If share has large data loading, only sample data can be displayed when reload.
After that, it will be changed to python for data calculation,
and only a small amount of data will be output to the front end to complete the analysis of big data.
"""
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_BYTE_LIMIT)
props = self._get_props(
"jupyter",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props)
if len(self.origin_data_source) > len(data_source):
upload_tool = BatchUploadDatasToolOnJupyter()
display_html(iframe_html)
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id,
gid=self.gid,
tunnel_id=self.tunnel_id,
)
else:
display_html(iframe_html)
def display_on_jupyter_use_widgets(self, iframe_height: str = "1010px"):
"""
use ipywidgets, Display on jupyter notebook/lab.
When the kernel is down, the chart will not be displayed, so use `display_on_jupyter` to share
"""
comm = HackerCommunication(self.gid)
preview_tool = PreviewImageTool(self.gid)
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_WIDGETS_BYTE_LIMIT)
props = self._get_props(
"jupyter_widgets",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props, iframe_height=iframe_height)
html_widgets = ipywidgets.Box(
[ipywidgets.HTML(iframe_html), comm.get_widgets()],
layout=ipywidgets.Layout(display='block')
)
self._init_callback(comm, preview_tool)
display_html(html_widgets)
preview_tool.init_display()
preview_tool.render_gw_review(self._get_gw_preview_html())
def display_preview_on_jupyter(self):
"""
Display preview on jupyter notebook/lab.
"""
display_html(self._get_gw_preview_html())
def chart_list(self) -> List[str]:
"""
Get the list of saved charts.
"""
return list(self._chart_map.keys())
def save_chart_to_file(self, chart_name: str, path: str, save_type: Literal["html", "png"] = "png"):
"""
Save the chart to a file.
"""
if save_type == "html":
content = self.export_chart_html(chart_name)
write_mode = "w"
encoding = "utf-8"
elif save_type == "png":
content = self.export_chart_png(chart_name)
write_mode = "wb"
encoding = None
else:
raise ValueError(f"save_type must be html or png, but got {save_type}")
with open(path, write_mode, encoding=encoding) as f:
f.write(content)
def export_chart_html(self, chart_name: str) -> str:
"""
Export the chart as a html string.
"""
return self._get_gw_chart_preview_html(
chart_name,
title="",
desc=""
)
def export_chart_png(self, chart_name: str) -> bytes:
"""
Export the chart as a png bytes.
"""
chart_data = self._get_chart_by_name(chart_name)
with urllib.request.urlopen(chart_data.single_chart) as png_string:
return png_string.read()
def display_chart(self, chart_name: str, *, title: Optional[str] = None, desc: str = ""):
"""
Display the chart in the notebook.
"""
if title is None:
title = chart_name
html = self._get_gw_chart_preview_html(
chart_name,
title=title,
desc=desc
)
display_html(html)
def _get_chart_by_name(self, chart_name: str) -> ChartData:
if chart_name not in self._chart_map:
raise ValueError(f"chart_name: {chart_name} not found, please confirm whether to save")
return self._chart_map[chart_name]
def _init_callback(self, comm: BaseCommunication, preview_tool: PreviewImageTool = None):
upload_tool = BatchUploadDatasToolOnWidgets(comm)
def reuqest_data_callback(_):
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id
)
return {}
def get_latest_vis_spec(_):
return {"visSpec": self.vis_spec}
def save_chart_endpoint(data: Dict[str, Any]):
chart_data = ChartData.parse_obj(data)
self._chart_map[data["title"]] = chart_data
def update_spec(data: Dict[str, Any]):
spec_obj = {
"config": data["visSpec"],
"chart_map": {},
"version": __version__,
"workflow_list": data.get("workflowList", [])
}
self._update_vis_spec(data["visSpec"])
self.spec_version = __version__
self.workflow_list = data.get("workflowList", [])
if self.use_preview:
preview_tool.render_gw_review(self._get_gw_preview_html())
save_chart_endpoint(data["chartData"])
if self.spec_type == "json_file":
with open(self.spec, "w", encoding="utf-8") as f:
f.write(json.dumps(spec_obj))
if self.spec_type == "json_ksf":
self.cloud_service.write_config_to_cloud(self.spec[6:], json.dumps(spec_obj))
def upload_spec_to_cloud(data: Dict[str, Any]):
if data["newToken"]:
set_config({"kanaries_token": data["newToken"]})
GlobalVarManager.kanaries_api_key = data["newToken"]
spec_obj = {
"config": self.vis_spec,
"chart_map": {},
"version": __version__,
"workflow_list": self.workflow_list,
}
file_name = data["fileName"]
workspace_name = self.cloud_service.get_kanaries_user_info()["workspaceName"]
path = f"{workspace_name}/{file_name}"
self.cloud_service.write_config_to_cloud(path, json.dumps(spec_obj))
return {"specFilePath": path}
def _get_datas(data: Dict[str, Any]):
sql = data["sql"]
datas = self.data_parser.get_datas_by_sql(sql)
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _get_datas_by_payload(data: Dict[str, Any]):
datas = self.data_parser.get_datas_by_payload(data["payload"])
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _batch_get_datas_by_sql(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_sql(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _batch_get_datas_by_payload(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_payload(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _get_spec_by_text(data: Dict[str, Any]):
callback = self.other_props.get(
"custom_ask_callback",
self.cloud_service.get_spec_by_text
)
return {
"data": callback(data["metas"], data["query"])
}
def _export_dataframe_by_payload(data: Dict[str, Any]):
df = pd.DataFrame(self.data_parser.get_datas_by_payload(data["payload"]))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _export_dataframe_by_sql(data: Dict[str, Any]):
sql = data["sql"]
df = pd.DataFrame(self.data_parser.get_datas_by_sql(sql))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _upload_to_cloud_charts(data: Dict[str, Any]):
chart_id = self.cloud_service.upload_cloud_chart(
data_parser=self.data_parser,
chart_name=data["chartName"],
dataset_name=data["datasetName"],
workflow=data["workflow"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
)
return {"chartId": chart_id}
def _upload_to_cloud_dashboard(data: Dict[str, Any]):
dashboard_id = self.cloud_service.upload_cloud_dashboard(
data_parser=self.data_parser,
dashboard_name=data["chartName"],
dataset_name=data["datasetName"],
workflow_list=data["workflowList"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
dark=self.dark
)
return {"dashboardId": dashboard_id}
comm.register("get_latest_vis_spec", get_latest_vis_spec)
comm.register("request_data", reuqest_data_callback)
if self.use_save_tool:
comm.register("upload_spec_to_cloud", upload_spec_to_cloud)
comm.register("update_spec", update_spec)
comm.register("save_chart", save_chart_endpoint)
if self.show_cloud_tool:
comm.register("upload_to_cloud_charts", _upload_to_cloud_charts)
comm.register("upload_to_cloud_dashboard", _upload_to_cloud_dashboard)
comm.register("get_spec_by_text", _get_spec_by_text)
if self.use_kernel_calc:
comm.register("get_datas", _get_datas)
comm.register("get_datas_by_payload", _get_datas_by_payload)
comm.register("batch_get_datas_by_sql", _batch_get_datas_by_sql)
comm.register("batch_get_datas_by_payload", _batch_get_datas_by_payload)
if self.is_export_dataframe:
comm.register("export_dataframe_by_payload", _export_dataframe_by_payload)
comm.register("export_dataframe_by_sql", _export_dataframe_by_sql)
def _send_props_track(self, props: Dict[str, Any]):
needed_fields = {
"id", "version", "hashcode", "themeKey",
"dark", "env", "specType", "needLoadDatas", "showCloudTool",
"useKernelCalc", "useSaveTool", "parseDslType", "gwMode", "datasetType",
"defaultTab", "useCloudCalc"
}
event_info = {key: value for key, value in props.items() if key in needed_fields}
event_info["hasKanariesToken"] = bool(self.kanaries_api_key)
track_event("invoke_props", event_info)
def _get_props(
self,
env: str = "",
data_source: Optional[Dict[str, Any]] = None,
need_load_datas: bool = False
) -> Dict[str, Any]:
if data_source is None:
data_source = self.origin_data_source
props = {
"id": self.gid,
"dataSource": data_source,
"len": len(data_source),
"version": __version__,
"hashcode": get_local_user_id(),
"userConfig": {
"privacy": GlobalVarManager.privacy,
},
"visSpec": self.vis_spec,
"rawFields": [
{**field, "offset": 0}
for field in self.field_specs
],
"fieldkeyGuard": False,
"themeKey": self.theme_key,
"dark": self.dark,
"sourceInvokeCode": self.source_invoke_code,
"dataSourceProps": {
'tunnelId': self.tunnel_id,
'dataSourceId': self.data_source_id,
},
"env": env,
"specType": self.spec_type,
"needLoadDatas": not self.use_kernel_calc and need_load_datas,
"showCloudTool": self.show_cloud_tool,
"needInitChart": not self._chart_map,
"useKernelCalc": self.use_kernel_calc,
"useSaveTool": self.use_save_tool,
"parseDslType": self.parse_dsl_type,
"gwMode": self.gw_mode,
"needLoadLastSpec": True,
"datasetType": self.dataset_type,
"extraConfig": self.other_props,
"fieldMetas": self.data_parser.field_metas,
"isExportDataFrame": self.is_export_dataframe,
"defaultTab": self.default_tab,
"useCloudCalc": self.use_cloud_calc
}
self._send_props_track(props)
return props
def _get_render_iframe(
self,
props: Dict[str, Any],
return_iframe: bool = True,
iframe_height: str = "1010px"
) -> str:
html = render_gwalker_html(self.gid, props)
if return_iframe:
srcdoc = m_html.escape(html)
return render_gwalker_iframe(self.gid, srcdoc, iframe_height)
else:
return html
def _get_gw_preview_html(self) -> str:
if not self.workflow_list:
return ""
datas = []
for workflow in self.workflow_list:
try:
datas.append(self.data_parser.get_datas_by_payload(workflow))
except ParserException:
datas.append([])
html = render_gw_preview_html(
self.vis_spec,
datas,
self.theme_key,
self.gid,
self.dark
)
return html
def _get_gw_chart_preview_html(self, chart_name: int, title: str, desc: str) -> str:
if chart_name not in self._chart_name_index_map:
raise ValueError(f"chart_name: {chart_name} not found.")
chart_index = self._chart_name_index_map[chart_name]
if not self.workflow_list:
return ""
data = self.data_parser.get_datas_by_payload(self.workflow_list[chart_index])
return render_gw_chart_preview_html(
single_vis_spec=self.vis_spec[chart_index],
data=data,
theme_key=self.theme_key,
title=title,
desc=desc,
dark=self.dark
)
BASE_URL_PATH = "/_pygwalker/comm/".strip("/")
class GradioCommunication(BaseCommunication):
"""
Hacker streamlit communication class.
only support receive message.
"""
def __init__(self, gid: str) -> None:
super().__init__()
gradio_comm_map[gid] = self
self.gid = gid
class FieldSpec(NamedTuple):
"""Field specification.
Args:
- semanticType: '?' | 'nominal' | 'ordinal' | 'temporal' | 'quantitative'. default to '?'.
- analyticType: '?' | 'dimension' | 'measure'. default to '?'.
- display_as: str. The field name displayed. None means using the original column name.
"""
semanticType: Literal['?', 'nominal', 'ordinal', 'temporal', 'quantitative'] = '?'
analyticType: Literal['?', 'dimension', 'measure'] = '?'
display_as: str = None
class Connector:
"""
database connector, it will cache engine by url.
- url: database url, refer to sqlalchemy doc for url. example: mysql+pymysql://user:password@host:port/database
- view_sql: view sql, example: SELECT * FROM table_name
- engine_params: engine params, refer to sqlalchemy doc for params. example: {"pool_size": 10}
"""
engine_map = {}
def __init__(self, url: str, view_sql: str, engine_params: Optional[Dict[str, Any]] = None) -> "Connector":
_check_view_sql(view_sql)
if engine_params is None:
engine_params = {}
self.url = url
self.engine = self._get_engine(engine_params)
self.view_sql = view_sql
def _get_engine(self, engine_params: Dict[str, Any]) -> Engine:
if self.url not in self.engine_map:
engine = create_engine(self.url, **engine_params)
engine.dialect.requires_name_normalize = False
self.engine_map[self.url] = engine
return self.engine_map[self.url]
def query_datas(self, sql: str) -> List[Dict[str, Any]]:
field_type_map = {}
with self.engine.connect() as connection:
result = connection.execute(text(sql))
if self.dialect_name == "snowflake":
field_type_map = {
column_desc.name: column_desc.type_code
for column_desc in result.cursor.description
}
return [
{
key: json.loads(value) if field_type_map.get(key, -1) in {9, 10} else value
for key, value in item.items()
}
for item in result.mappings()
]
def dialect_name(self) -> str:
return self.engine.dialect.name
DataFrame = TypeVar("DataFrame", *dataframe_types)
def check_expired_params(params: Dict[str, Any]):
expired_params_map = {
"fieldSpecs": "field_specs",
"themeKey": "theme_key",
"debug": "spec_io_mode",
}
for old_param, new_param in expired_params_map.items():
if old_param in params:
logger.warning(
f"Parameter `{old_param}` is expired, please use `{new_param}` instead."
)
The provided code snippet includes necessary dependencies for implementing the `get_html_on_gradio` function. Write a Python function `def get_html_on_gradio( dataset: Union[DataFrame, Connector], gid: Union[int, str] = None, *, field_specs: Optional[Dict[str, FieldSpec]] = None, theme_key: Literal['vega', 'g2'] = 'g2', dark: Literal['media', 'light', 'dark'] = 'media', spec: str = "", spec_io_mode: Literal["r", "rw"] = "r", use_kernel_calc: bool = True, kanaries_api_key: str = "", default_tab: Literal["data", "vis"] = "vis", **kwargs ) -> str` to solve the following problem:
Get pygwalker html render to gradio Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. - gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}') Kargs: - env: (Literal['Jupyter' | 'Streamlit'], optional): The enviroment using pygwalker. Default as 'Jupyter' - field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - spec (str): chart config data. config id, json, remote file url - spec_io_mode (Literal["r", "rw"]): spec io mode, Default to "r", "r" for read, "rw" for read and write. - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to True. - kanaries_api_key (str): kanaries api key, Default to "". - default_tab (Literal["data", "vis"]): default tab to show. Default to "vis"
Here is the function:
def get_html_on_gradio(
dataset: Union[DataFrame, Connector],
gid: Union[int, str] = None,
*,
field_specs: Optional[Dict[str, FieldSpec]] = None,
theme_key: Literal['vega', 'g2'] = 'g2',
dark: Literal['media', 'light', 'dark'] = 'media',
spec: str = "",
spec_io_mode: Literal["r", "rw"] = "r",
use_kernel_calc: bool = True,
kanaries_api_key: str = "",
default_tab: Literal["data", "vis"] = "vis",
**kwargs
) -> str:
"""Get pygwalker html render to gradio
Args:
- dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe.
- gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}')
Kargs:
- env: (Literal['Jupyter' | 'Streamlit'], optional): The enviroment using pygwalker. Default as 'Jupyter'
- field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified.
- theme_key ('vega' | 'g2'): theme type.
- dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme.
- spec (str): chart config data. config id, json, remote file url
- spec_io_mode (Literal["r", "rw"]): spec io mode, Default to "r", "r" for read, "rw" for read and write.
- use_kernel_calc(bool): Whether to use kernel compute for datas, Default to True.
- kanaries_api_key (str): kanaries api key, Default to "".
- default_tab (Literal["data", "vis"]): default tab to show. Default to "vis"
"""
check_expired_params(kwargs)
walker = PygWalker(
gid=gid,
dataset=dataset,
field_specs=field_specs if field_specs is not None else {},
spec=spec,
source_invoke_code="",
theme_key=theme_key,
dark=dark,
show_cloud_tool=False,
use_preview=False,
use_kernel_calc=isinstance(dataset, Connector) or use_kernel_calc,
use_save_tool="w" in spec_io_mode,
is_export_dataframe=False,
kanaries_api_key=kanaries_api_key,
default_tab=default_tab,
use_cloud_calc=False,
gw_mode="explore",
**kwargs
)
props = walker._get_props("gradio")
props["communicationUrl"] = BASE_URL_PATH
comm = GradioCommunication(str(walker.gid))
walker._init_callback(comm)
html = walker._get_render_iframe(props, True)
return html | Get pygwalker html render to gradio Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. - gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}') Kargs: - env: (Literal['Jupyter' | 'Streamlit'], optional): The enviroment using pygwalker. Default as 'Jupyter' - field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - spec (str): chart config data. config id, json, remote file url - spec_io_mode (Literal["r", "rw"]): spec io mode, Default to "r", "r" for read, "rw" for read and write. - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to True. - kanaries_api_key (str): kanaries api key, Default to "". - default_tab (Literal["data", "vis"]): default tab to show. Default to "vis" |
165,743 | from typing import Union, Dict, Optional, TYPE_CHECKING, List, Any
from distutils.version import StrictVersion
import json
from typing_extensions import Literal
from pydantic import BaseModel
from cachetools import cached, TTLCache
import arrow
import streamlit.components.v1 as components
from .pygwalker import PygWalker
from pygwalker.communications.streamlit_comm import (
hack_streamlit_server,
BASE_URL_PATH,
StreamlitCommunication
)
from pygwalker.data_parsers.base import FieldSpec
from pygwalker.data_parsers.database_parser import Connector
from pygwalker._typing import DataFrame
from pygwalker.utils.randoms import rand_str
from pygwalker.utils.check_walker_params import check_expired_params
from pygwalker.services.streamlit_components import render_explore_modal_button
def hack_streamlit_server():
tornado_obj = None
for obj in gc.get_objects():
if isinstance(obj, Application):
tornado_obj = obj
break
tornado_obj.add_handlers(".*", [(PYGWALKER_API_PATH, PygwalkerHandler)])
The provided code snippet includes necessary dependencies for implementing the `init_streamlit_comm` function. Write a Python function `def init_streamlit_comm()` to solve the following problem:
Initialize pygwalker communication in streamlit
Here is the function:
def init_streamlit_comm():
"""Initialize pygwalker communication in streamlit"""
hack_streamlit_server() | Initialize pygwalker communication in streamlit |
165,744 | from typing import Union, Dict, Optional, TYPE_CHECKING, List, Any
from distutils.version import StrictVersion
import json
from typing_extensions import Literal
from pydantic import BaseModel
from cachetools import cached, TTLCache
import arrow
import streamlit.components.v1 as components
from .pygwalker import PygWalker
from pygwalker.communications.streamlit_comm import (
hack_streamlit_server,
BASE_URL_PATH,
StreamlitCommunication
)
from pygwalker.data_parsers.base import FieldSpec
from pygwalker.data_parsers.database_parser import Connector
from pygwalker._typing import DataFrame
from pygwalker.utils.randoms import rand_str
from pygwalker.utils.check_walker_params import check_expired_params
from pygwalker.services.streamlit_components import render_explore_modal_button
class StreamlitRenderer:
"""Streamlit Renderer"""
def __init__(
self,
dataset: Union[DataFrame, Connector],
gid: Union[int, str] = None,
*,
field_specs: Optional[Dict[str, FieldSpec]] = None,
theme_key: Literal['vega', 'g2'] = 'g2',
dark: Literal['media', 'light', 'dark'] = 'media',
spec: str = "",
spec_io_mode: Literal["r", "rw"] = "r",
use_kernel_calc: Optional[bool] = True,
show_cloud_tool: Optional[bool] = None,
kanaries_api_key: str = "",
default_tab: Literal["data", "vis"] = "vis",
**kwargs
):
"""Get pygwalker html render to streamlit
Args:
- dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe.
- gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}')
Kargs:
- field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified.
- theme_key ('vega' | 'g2'): theme type.
- dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme.
- spec (str): chart config data. config id, json, remote file url
- spec_io_mode (Literal["r", "rw"]): spec io mode, Default to "r", "r" for read, "rw" for read and write.
- use_kernel_calc(bool): Whether to use kernel compute for datas, Default to True.
- kanaries_api_key (str): kanaries api key, Default to "".
- default_tab (Literal["data", "vis"]): default tab to show. Default to "vis"
"""
check_expired_params(kwargs)
init_streamlit_comm()
self.walker = PygWalker(
gid=gid,
dataset=dataset,
field_specs=field_specs if field_specs is not None else {},
spec=spec,
source_invoke_code="",
theme_key=theme_key,
dark=dark,
show_cloud_tool=show_cloud_tool,
use_preview=False,
use_kernel_calc=isinstance(dataset, Connector) or use_kernel_calc,
use_save_tool="w" in spec_io_mode,
is_export_dataframe=False,
kanaries_api_key=kanaries_api_key,
default_tab=default_tab,
use_cloud_calc=False,
gw_mode="explore",
**kwargs
)
comm = StreamlitCommunication(str(self.walker.gid))
self.walker._init_callback(comm)
self.global_pre_filters = None
def _get_html_with_params_str_cache(self, params_str: str) -> str:
params = dict(json.loads(params_str))
mode = params.pop("mode")
vis_spec = params.pop("vis_spec")
kwargs = params
props = self.walker._get_props("streamlit")
props["communicationUrl"] = BASE_URL_PATH
props["gwMode"] = mode
if vis_spec is not None:
props["visSpec"] = vis_spec
props.update(kwargs)
return self.walker._get_render_iframe(props, False)
def _get_html(
self,
*,
mode: Literal["explore", "renderer", "filter_renderer"] = "explore",
vis_spec: Optional[List[Dict[str, Any]]] = None,
**kwargs: Dict[str, Any]
) -> str:
"""
Get the html for streamlit.
Kwargs will update origin props.
"""
params_str = json.dumps(sorted({
"mode": mode,
"vis_spec": vis_spec,
**kwargs
}.items()))
return self._get_html_with_params_str_cache(params_str)
def _convert_pre_filters_to_gw_config(
self,
pre_filters: List[PreFilter],
spec_obj: Dict[str, Any]
) -> List[Dict[str, Any]]:
field_map = {
field["name"]: field
for field in spec_obj["encodings"]["dimensions"] + spec_obj["encodings"]["measures"]
}
gw_filters = []
for pre_filter in pre_filters:
if pre_filter.op == "temporal range":
values = [
int(arrow.get(value).timestamp() * 1000)
for value in pre_filter.value
]
else:
values = pre_filter.value
gw_filters.append({
**field_map[pre_filter.field],
"dragId": "gw_" + rand_str(4),
"rule": {
"type": pre_filter.op,
"value": values
}
})
return gw_filters
def set_global_pre_filters(self, pre_filters: List[PreFilter]):
"""It will append new filters to exists charts."""
self.global_pre_filters = pre_filters
def render_filter_renderer(
self,
width: Optional[int] = None,
height: int = 1010,
scrolling: bool = False,
) -> "DeltaGenerator":
"""Render filter renderer UI"""
html = self._get_html(mode="filter_renderer")
return components.html(html, height=height, width=width, scrolling=scrolling)
def render_explore(
self,
width: Optional[int] = None,
height: int = 1010,
scrolling: bool = False,
default_tab: Literal["data", "vis"] = "vis"
) -> "DeltaGenerator":
"""Render explore UI(it can drag and drop fields)"""
html = self._get_html(**{"defaultTab": default_tab})
return components.html(html, height=height, width=width, scrolling=scrolling)
def render_pure_chart(
self,
index: int,
width: Optional[int] = None,
height: Optional[int] = None,
scrolling: bool = False,
pre_filters: Optional[List[PreFilter]] = None,
) -> "DeltaGenerator":
"""
Render pure chart, index is the order of chart, starting from 0.
If you set `pre_filters`, it will overwritre global_pre_filters.
"""
cur_spec_obj = self.walker.vis_spec[index]
if StrictVersion(self.walker.spec_version) > StrictVersion("0.3.11"):
chart_size_config = cur_spec_obj["layout"]["size"]
else:
chart_size_config = cur_spec_obj["config"]["size"]
chart_size_config["mode"] = "fixed"
explore_button_size = 20
if pre_filters is None:
pre_filters = self.global_pre_filters
if pre_filters is not None:
pre_filters_json = self._convert_pre_filters_to_gw_config(
pre_filters, cur_spec_obj
)
cur_spec_obj["encodings"]["filters"].extend(pre_filters_json)
if width is None:
width = chart_size_config["width"]
left = width + 6
else:
width = width - explore_button_size - 6
chart_size_config["width"] = width
left = width + 6
if height is None:
height = chart_size_config["height"]
else:
chart_size_config["height"] = height
html = self._get_html(
mode="renderer",
vis_spec=[cur_spec_obj]
)
explore_html = self._get_html(
vis_spec=[cur_spec_obj],
needLoadLastSpec=False,
useSaveTool=False
)
render_explore_modal_button(explore_html, left, explore_button_size)
return components.html(html, height=height, width=width, scrolling=scrolling)
class FieldSpec(NamedTuple):
"""Field specification.
Args:
- semanticType: '?' | 'nominal' | 'ordinal' | 'temporal' | 'quantitative'. default to '?'.
- analyticType: '?' | 'dimension' | 'measure'. default to '?'.
- display_as: str. The field name displayed. None means using the original column name.
"""
semanticType: Literal['?', 'nominal', 'ordinal', 'temporal', 'quantitative'] = '?'
analyticType: Literal['?', 'dimension', 'measure'] = '?'
display_as: str = None
class Connector:
"""
database connector, it will cache engine by url.
- url: database url, refer to sqlalchemy doc for url. example: mysql+pymysql://user:password@host:port/database
- view_sql: view sql, example: SELECT * FROM table_name
- engine_params: engine params, refer to sqlalchemy doc for params. example: {"pool_size": 10}
"""
engine_map = {}
def __init__(self, url: str, view_sql: str, engine_params: Optional[Dict[str, Any]] = None) -> "Connector":
_check_view_sql(view_sql)
if engine_params is None:
engine_params = {}
self.url = url
self.engine = self._get_engine(engine_params)
self.view_sql = view_sql
def _get_engine(self, engine_params: Dict[str, Any]) -> Engine:
if self.url not in self.engine_map:
engine = create_engine(self.url, **engine_params)
engine.dialect.requires_name_normalize = False
self.engine_map[self.url] = engine
return self.engine_map[self.url]
def query_datas(self, sql: str) -> List[Dict[str, Any]]:
field_type_map = {}
with self.engine.connect() as connection:
result = connection.execute(text(sql))
if self.dialect_name == "snowflake":
field_type_map = {
column_desc.name: column_desc.type_code
for column_desc in result.cursor.description
}
return [
{
key: json.loads(value) if field_type_map.get(key, -1) in {9, 10} else value
for key, value in item.items()
}
for item in result.mappings()
]
def dialect_name(self) -> str:
return self.engine.dialect.name
DataFrame = TypeVar("DataFrame", *dataframe_types)
The provided code snippet includes necessary dependencies for implementing the `get_streamlit_html` function. Write a Python function `def get_streamlit_html( dataset: Union[DataFrame, Connector], gid: Union[int, str] = None, *, field_specs: Optional[Dict[str, FieldSpec]] = None, theme_key: Literal['vega', 'g2'] = 'g2', dark: Literal['media', 'light', 'dark'] = 'media', spec: str = "", use_kernel_calc: Optional[bool] = None, show_cloud_tool: Optional[bool] = None, spec_io_mode: Literal["r", "rw"] = "r", kanaries_api_key: str = "", mode: Literal["explore", "filter_renderer"] = "explore", default_tab: Literal["data", "vis"] = "vis", **kwargs ) -> str` to solve the following problem:
Get pygwalker html render to streamlit Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. - gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}') Kargs: - field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - spec (str): chart config data. config id, json, remote file url - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None. - spec_io_mode (Literal["r", "rw"]): spec io mode, Default to "r", "r" for read, "rw" for read and write. - kanaries_api_key (str): kanaries api key, Default to "". - default_tab (Literal["data", "vis"]): default tab to show. Default to "vis"
Here is the function:
def get_streamlit_html(
dataset: Union[DataFrame, Connector],
gid: Union[int, str] = None,
*,
field_specs: Optional[Dict[str, FieldSpec]] = None,
theme_key: Literal['vega', 'g2'] = 'g2',
dark: Literal['media', 'light', 'dark'] = 'media',
spec: str = "",
use_kernel_calc: Optional[bool] = None,
show_cloud_tool: Optional[bool] = None,
spec_io_mode: Literal["r", "rw"] = "r",
kanaries_api_key: str = "",
mode: Literal["explore", "filter_renderer"] = "explore",
default_tab: Literal["data", "vis"] = "vis",
**kwargs
) -> str:
"""Get pygwalker html render to streamlit
Args:
- dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe.
- gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}')
Kargs:
- field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified.
- theme_key ('vega' | 'g2'): theme type.
- dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme.
- spec (str): chart config data. config id, json, remote file url
- use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None.
- spec_io_mode (Literal["r", "rw"]): spec io mode, Default to "r", "r" for read, "rw" for read and write.
- kanaries_api_key (str): kanaries api key, Default to "".
- default_tab (Literal["data", "vis"]): default tab to show. Default to "vis"
"""
if field_specs is None:
field_specs = {}
renderer = StreamlitRenderer(
gid=gid,
dataset=dataset,
field_specs=field_specs,
spec=spec,
theme_key=theme_key,
dark=dark,
spec_io_mode=spec_io_mode,
use_kernel_calc=use_kernel_calc,
show_cloud_tool=show_cloud_tool,
kanaries_api_key=kanaries_api_key,
default_tab=default_tab,
**kwargs
)
return renderer._get_html(mode=mode) | Get pygwalker html render to streamlit Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. - gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}') Kargs: - field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - spec (str): chart config data. config id, json, remote file url - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None. - spec_io_mode (Literal["r", "rw"]): spec io mode, Default to "r", "r" for read, "rw" for read and write. - kanaries_api_key (str): kanaries api key, Default to "". - default_tab (Literal["data", "vis"]): default tab to show. Default to "vis" |
165,745 | from typing import Dict, Optional, Union
from datetime import datetime
from pygwalker.data_parsers.base import FieldSpec
from pygwalker._typing import DataFrame
from pygwalker.utils.display import display_html
from pygwalker.data_parsers.database_parser import Connector
from pygwalker.services.cloud_service import CloudService
from pygwalker.services.data_parsers import get_parser
from pygwalker.services.global_var import GlobalVarManager
DataFrame = TypeVar("DataFrame", *dataframe_types)
class Connector:
"""
database connector, it will cache engine by url.
- url: database url, refer to sqlalchemy doc for url. example: mysql+pymysql://user:password@host:port/database
- view_sql: view sql, example: SELECT * FROM table_name
- engine_params: engine params, refer to sqlalchemy doc for params. example: {"pool_size": 10}
"""
engine_map = {}
def __init__(self, url: str, view_sql: str, engine_params: Optional[Dict[str, Any]] = None) -> "Connector":
_check_view_sql(view_sql)
if engine_params is None:
engine_params = {}
self.url = url
self.engine = self._get_engine(engine_params)
self.view_sql = view_sql
def _get_engine(self, engine_params: Dict[str, Any]) -> Engine:
if self.url not in self.engine_map:
engine = create_engine(self.url, **engine_params)
engine.dialect.requires_name_normalize = False
self.engine_map[self.url] = engine
return self.engine_map[self.url]
def query_datas(self, sql: str) -> List[Dict[str, Any]]:
field_type_map = {}
with self.engine.connect() as connection:
result = connection.execute(text(sql))
if self.dialect_name == "snowflake":
field_type_map = {
column_desc.name: column_desc.type_code
for column_desc in result.cursor.description
}
return [
{
key: json.loads(value) if field_type_map.get(key, -1) in {9, 10} else value
for key, value in item.items()
}
for item in result.mappings()
]
def dialect_name(self) -> str:
return self.engine.dialect.name
class CloudService:
"""A class to manage cloud service"""
def __init__(self, api_key: str):
self.session = PrivateSession(api_key)
def _upload_file_dataset_meta(
self,
name: str,
file_type: str = Literal["parquet", "csv"],
is_public: bool = True,
kind: Literal["TEMP", "FILE"] = "FILE"
) -> Dict[str, Any]:
param_file_type_map = {
"csv": "TEXT_FILE",
"parquet": "PARQUET"
}
url = f"{GlobalVarManager.kanaries_api_host}/dataset/upload"
params = {
"name": name,
"fileName": name + "." + file_type,
"isPublic": is_public,
"desc": "",
"meta": {
"extractHeader": True,
"encoding": "utf-8",
"type": param_file_type_map.get(file_type, "TEXT_FILE"),
"separator": ",",
},
"type": kind,
}
resp = self.session.post(url, json=params, timeout=10)
return resp.json()["data"]
def _upload_dataset_callback(self, dataset_id: str, fid_list: List[str]) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/dataset/callback"
params = {
"datasetId": dataset_id,
"fidList": fid_list
}
resp = self.session.post(url, json=params, timeout=10)
return resp.json()
def _create_chart(
self,
*,
dataset_id: str,
name: str,
meta: str,
workflow: List[Dict[str, Any]],
thumbnail: str,
is_public: bool,
) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/chart"
params = {
"datasetId": dataset_id,
"meta": meta,
"query": json.dumps({"datasetId": dataset_id, "workflow": workflow}),
"config": "{}",
"name": name,
"desc": "",
"isPublic": is_public,
"chartType": "",
"thumbnail": thumbnail,
}
resp = self.session.post(url, json={"chart": params}, timeout=10)
return resp.json()["data"]
def _create_notebook(self, title: str, chart_id: str) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/notebook"
markdown = "\n".join([
"# " + title,
f"::chart[{chart_id}]"
])
params = {
"title": title,
"markdown": markdown,
"isPublic": True,
}
resp = self.session.post(url, json=params, timeout=10)
return resp.json()["data"]
def _get_chart_by_name(self, name: str, workspace_name: str) -> Optional[Dict[str, Any]]:
url = f"{GlobalVarManager.kanaries_main_host}/api/pygwalker/chart"
try:
resp = self.session.get(url, params={"chartName": name, "workspaceName": workspace_name}, timeout=15)
except CloudFunctionError as e:
if e.code == ErrorCode.CLOUD_CHART_NOT_FOUND:
return None
raise e
return resp.json()["data"]
def _get_auth_code_info(self) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/auth/code"
resp = self.session.get(url, timeout=15)
return resp.json()["data"]
def write_config_to_cloud(self, path: str, config: str):
"""Write config to cloud"""
url = f"{GlobalVarManager.kanaries_api_host}/pygConfig"
self.session.put(url, json={
"path": path,
"config": config
})
def get_cloud_graphic_walker(self, workspace_name: str, chart_name: str) -> str:
chart_data = self._get_chart_by_name(chart_name, workspace_name)
if chart_data is None:
raise CloudFunctionError("chart not exists", code=ErrorCode.CLOUD_CHART_NOT_FOUND)
try:
auto_code_info = self._get_auth_code_info()
except CloudFunctionError:
auto_code_info = {}
pre_redirect_uri = _generate_chart_pre_redirect_uri(chart_data["chartId"], auto_code_info)
return pre_redirect_uri
def create_cloud_graphic_walker(
self,
*,
chart_name: str,
workspace_name: str,
dataset_content: io.BytesIO,
field_specs: List[Dict[str, Any]],
) -> str:
fid_list = [field["fid"] for field in field_specs]
meta = json.dumps({
"dataSources": [{
"id": "dataSource-0",
"data": []
}],
"datasets": [{
"id": 'dataset-0',
"name": 'DataSet',
"rawFields": field_specs,
"dsId": 'dataSource-0',
}],
"specList": []
})
chart_data = self._get_chart_by_name(chart_name, workspace_name)
if chart_data is not None:
raise CloudFunctionError("chart name already exists", code=ErrorCode.UNKNOWN_ERROR)
dataset_name = f"pygwalker_{datetime.now().strftime('%Y%m%d%H%M')}"
dataset_info = self._upload_file_dataset_meta(dataset_name, "parquet")
dataset_id = dataset_info["datasetId"]
upload_url = dataset_info["uploadUrl"]
_upload_file_to_s3(upload_url, dataset_content)
self._upload_dataset_callback(dataset_id, fid_list)
self._create_chart(
dataset_id=dataset_id,
name=chart_name,
meta=meta,
workflow={},
thumbnail="",
is_public=True
)
def get_kanaries_user_info(self) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/user/info"
resp = self.session.get(url, timeout=15)
return resp.json()["data"]
def get_spec_by_text(self, metas: List[Dict[str, Any]], text: str) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/vis/text2gw"
resp = self.session.post(
url,
json={"metas": metas, "messages": [{"role": "user", "content": text}]},
timeout=15
)
return resp.json()["data"]
def create_file_dataset(
self,
dataset_name: str,
dataset_content: io.BytesIO,
fid_list: List[str],
is_public: bool,
kind: Literal["TEMP", "FILE"]
) -> str:
dataset_info = self._upload_file_dataset_meta(dataset_name, "parquet", is_public, kind=kind)
dataset_id = dataset_info["datasetId"]
upload_url = dataset_info["uploadUrl"]
_upload_file_to_s3(upload_url, dataset_content)
self._upload_dataset_callback(dataset_id, fid_list)
return dataset_id
def create_datasource(
self,
name: str,
database_url: str,
database_type: str,
) -> str:
url = f"{GlobalVarManager.kanaries_api_host}/datasource"
params = {
"name": name,
"connectionconfiguration": {
"url": database_url,
},
"datasourceType": database_type,
"desc": ""
}
resp = self.session.post(url, json=params, timeout=15)
return resp.json()["data"]["datasourceId"]
def get_datasource_by_name(self, name: str) -> Optional[str]:
url = f"{GlobalVarManager.kanaries_api_host}/datasource/search"
resp = self.session.post(url, params={"fullName": name}, timeout=15)
datasources = resp.json()["data"]["datasourceList"]
return datasources[0]["id"] if datasources else None
def create_database_dataset(
self,
name: str,
datasource_id: str,
is_public: bool,
view_sql: str,
) -> str:
url = f"{GlobalVarManager.kanaries_api_host}/dataset"
params = {
"name": name,
"desc": "",
"datasourceId": datasource_id,
"isPublic": is_public,
"type": "DATABASE",
"meta": {
"viewSql": view_sql,
}
}
resp = self.session.post(url, json=params, timeout=60)
return resp.json()["data"]["datasetId"]
def query_from_dataset(self, dataset_id: str, payload: Dict[str, Any]) -> List[Dict[str, Any]]:
url = f"{GlobalVarManager.kanaries_api_host}/public/query"
params = {
"datasetId": dataset_id,
"query": payload,
}
resp = self.session.post(url, json=params, timeout=15)
return resp.json()["data"]
def batch_query_from_dataset(self, dataset_id: str, query_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
url = f"{GlobalVarManager.kanaries_api_host}/v1/dataset/{dataset_id}/query"
params = {
"query": query_list,
}
resp = self.session.post(url, json=params, timeout=60)
return resp.json()["data"]
def create_cloud_dataset(
self,
data_parser: BaseDataParser,
name: str,
is_public: bool,
is_temp_dataset: bool = False
) -> str:
if name is None:
name = f"pygwalker_{datetime.now().strftime('%Y%m%d%H%M')}"
if data_parser.dataset_tpye == "cloud_dataset":
raise ValueError("dataset is already a cloud dataset")
if data_parser.dataset_tpye.startswith("connector"):
connector = data_parser.conn
datasource_name = "pygwalker_" + hashlib.md5(connector.url.encode()).hexdigest()
datasource_id = self.get_datasource_by_name(datasource_name)
if datasource_id is None:
datasource_id = self.create_datasource(
datasource_name,
connector.url,
_get_database_type_from_dialect_name(connector.dialect_name)
)
dataset_id = self.create_database_dataset(
name,
datasource_id,
is_public,
connector.view_sql
)
return dataset_id
else:
dataset_id = self.create_file_dataset(
name,
data_parser.to_parquet(),
[field["name"] for field in data_parser.raw_fields],
is_public,
kind="TEMP" if is_temp_dataset else "FILE"
)
return dataset_id
def create_dashboard(
self,
*,
name: str,
layout: List[Any],
config: Dict[str, Any],
is_public: bool
) -> str:
url = f"{GlobalVarManager.kanaries_api_host}/report"
params = {
"title": name,
"version": "0.0.1",
"desc": "",
"size": {},
"config": config,
"layout": layout,
"public": is_public
}
resp = self.session.post(url, json=params, timeout=60)
return resp.json()["data"]["id"]
def upload_cloud_chart(
self,
*,
chart_name: str,
dataset_name: str,
data_parser: BaseDataParser,
workflow: List[Dict[str, Any]],
spec_list: List[Dict[str, Any]],
is_public: bool,
) -> str:
workspace_name = self.get_kanaries_user_info()["workspaceName"]
chart_data = self._get_chart_by_name(chart_name, workspace_name)
if chart_data is not None:
raise CloudFunctionError("chart name already exists", code=ErrorCode.UNKNOWN_ERROR)
dataset_id = self.create_cloud_dataset(data_parser, dataset_name, False)
chart_info = self._create_chart(
dataset_id=dataset_id,
name=chart_name,
meta=json.dumps({
"dataSources": [{
"id": "dataSource-0",
"data": []
}],
"datasets": [{
"id": 'dataset-0',
"name": 'DataSet',
"rawFields": data_parser.raw_fields,
"dsId": 'dataSource-0',
}],
"specList": spec_list
}),
workflow=workflow,
thumbnail="",
is_public=is_public
)
return chart_info["chartId"]
def upload_cloud_dashboard(
self,
*,
dashboard_name: str,
dataset_name: str,
data_parser: BaseDataParser,
workflow_list: List[List[Dict[str, Any]]],
spec_list: List[Dict[str, Any]],
is_public: bool,
dark: str,
) -> str:
dataset_id = self.create_cloud_dataset(data_parser, dataset_name, False)
chart_info_list = []
for spec, workflow in zip(spec_list, workflow_list):
chart_info = self._create_chart(
dataset_id=dataset_id,
name=f"{dashboard_name}-{spec['name']}",
meta=json.dumps({
"dataSources": [{
"id": "dataSource-0",
"data": []
}],
"datasets": [{
"id": 'dataset-0',
"name": 'DataSet',
"rawFields": data_parser.raw_fields,
"dsId": 'dataSource-0',
}],
"specList": [spec]
}),
workflow=workflow,
thumbnail="",
is_public=is_public
)
chart_info_list.append(chart_info)
dashboard_id = self.create_dashboard(
name=dashboard_name,
is_public=is_public,
config={
"items": [
{"id": "dashboard_title", "content": f"# {dashboard_name}", "type": "text", "name": "Text"},
{
"id": "chart_tab",
"dark": dark,
"name": "Charts",
"type": "data",
"tabs": [
{"chartId": chart_info["chartId"], "title": spec["name"]}
for spec, chart_info in zip(spec_list, chart_info_list)
],
"mode": "gwtabs",
}
],
},
layout=[
{"i": "dashboard_title", "h": 2, "w": 4, "x": 0, "y": 0},
{"i": "chart_tab", "h": 20, "w": 4, "x": 0, "y": 2},
]
)
return dashboard_id
def get_parser(
dataset: Union[DataFrame, Connector, str],
field_specs: Optional[Dict[str, FieldSpec]] = None,
infer_string_to_date: bool = False,
infer_number_to_dimension: bool = True,
other_params: Optional[Dict[str, Any]] = None
) -> BaseDataParser:
if field_specs is None:
field_specs = {}
if other_params is None:
other_params = {}
parser = _get_data_parser(dataset)(
dataset,
field_specs,
infer_string_to_date,
infer_number_to_dimension,
other_params
)
return parser
The provided code snippet includes necessary dependencies for implementing the `create_cloud_dataset` function. Write a Python function `def create_cloud_dataset( dataset: Union[DataFrame, Connector], *, name: Optional[str] = None, is_public: bool = False, kanaries_api_key: str = "" ) -> str` to solve the following problem:
Create a dataset in kanaries cloud Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataset. Kargs: - name (str): dataset name in kanaries cloud. - is_public (bool): whether to make this dataset public. Returns: str: dataset id in kanaries cloud
Here is the function:
def create_cloud_dataset(
dataset: Union[DataFrame, Connector],
*,
name: Optional[str] = None,
is_public: bool = False,
kanaries_api_key: str = ""
) -> str:
"""
Create a dataset in kanaries cloud
Args:
- dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataset.
Kargs:
- name (str): dataset name in kanaries cloud.
- is_public (bool): whether to make this dataset public.
Returns:
str: dataset id in kanaries cloud
"""
cloud_service = CloudService(kanaries_api_key)
data_parser = get_parser(dataset, False, None)
if name is None:
name = f"pygwalker_{datetime.now().strftime('%Y%m%d%H%M')}"
dataset_id = cloud_service.create_cloud_dataset(data_parser, name, is_public)
return dataset_id | Create a dataset in kanaries cloud Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataset. Kargs: - name (str): dataset name in kanaries cloud. - is_public (bool): whether to make this dataset public. Returns: str: dataset id in kanaries cloud |
165,746 | from typing import Dict, Optional, Union
from datetime import datetime
from pygwalker.data_parsers.base import FieldSpec
from pygwalker._typing import DataFrame
from pygwalker.utils.display import display_html
from pygwalker.data_parsers.database_parser import Connector
from pygwalker.services.cloud_service import CloudService
from pygwalker.services.data_parsers import get_parser
from pygwalker.services.global_var import GlobalVarManager
class FieldSpec(NamedTuple):
"""Field specification.
Args:
- semanticType: '?' | 'nominal' | 'ordinal' | 'temporal' | 'quantitative'. default to '?'.
- analyticType: '?' | 'dimension' | 'measure'. default to '?'.
- display_as: str. The field name displayed. None means using the original column name.
"""
semanticType: Literal['?', 'nominal', 'ordinal', 'temporal', 'quantitative'] = '?'
analyticType: Literal['?', 'dimension', 'measure'] = '?'
display_as: str = None
DataFrame = TypeVar("DataFrame", *dataframe_types)
class CloudService:
"""A class to manage cloud service"""
def __init__(self, api_key: str):
self.session = PrivateSession(api_key)
def _upload_file_dataset_meta(
self,
name: str,
file_type: str = Literal["parquet", "csv"],
is_public: bool = True,
kind: Literal["TEMP", "FILE"] = "FILE"
) -> Dict[str, Any]:
param_file_type_map = {
"csv": "TEXT_FILE",
"parquet": "PARQUET"
}
url = f"{GlobalVarManager.kanaries_api_host}/dataset/upload"
params = {
"name": name,
"fileName": name + "." + file_type,
"isPublic": is_public,
"desc": "",
"meta": {
"extractHeader": True,
"encoding": "utf-8",
"type": param_file_type_map.get(file_type, "TEXT_FILE"),
"separator": ",",
},
"type": kind,
}
resp = self.session.post(url, json=params, timeout=10)
return resp.json()["data"]
def _upload_dataset_callback(self, dataset_id: str, fid_list: List[str]) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/dataset/callback"
params = {
"datasetId": dataset_id,
"fidList": fid_list
}
resp = self.session.post(url, json=params, timeout=10)
return resp.json()
def _create_chart(
self,
*,
dataset_id: str,
name: str,
meta: str,
workflow: List[Dict[str, Any]],
thumbnail: str,
is_public: bool,
) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/chart"
params = {
"datasetId": dataset_id,
"meta": meta,
"query": json.dumps({"datasetId": dataset_id, "workflow": workflow}),
"config": "{}",
"name": name,
"desc": "",
"isPublic": is_public,
"chartType": "",
"thumbnail": thumbnail,
}
resp = self.session.post(url, json={"chart": params}, timeout=10)
return resp.json()["data"]
def _create_notebook(self, title: str, chart_id: str) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/notebook"
markdown = "\n".join([
"# " + title,
f"::chart[{chart_id}]"
])
params = {
"title": title,
"markdown": markdown,
"isPublic": True,
}
resp = self.session.post(url, json=params, timeout=10)
return resp.json()["data"]
def _get_chart_by_name(self, name: str, workspace_name: str) -> Optional[Dict[str, Any]]:
url = f"{GlobalVarManager.kanaries_main_host}/api/pygwalker/chart"
try:
resp = self.session.get(url, params={"chartName": name, "workspaceName": workspace_name}, timeout=15)
except CloudFunctionError as e:
if e.code == ErrorCode.CLOUD_CHART_NOT_FOUND:
return None
raise e
return resp.json()["data"]
def _get_auth_code_info(self) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/auth/code"
resp = self.session.get(url, timeout=15)
return resp.json()["data"]
def write_config_to_cloud(self, path: str, config: str):
"""Write config to cloud"""
url = f"{GlobalVarManager.kanaries_api_host}/pygConfig"
self.session.put(url, json={
"path": path,
"config": config
})
def get_cloud_graphic_walker(self, workspace_name: str, chart_name: str) -> str:
chart_data = self._get_chart_by_name(chart_name, workspace_name)
if chart_data is None:
raise CloudFunctionError("chart not exists", code=ErrorCode.CLOUD_CHART_NOT_FOUND)
try:
auto_code_info = self._get_auth_code_info()
except CloudFunctionError:
auto_code_info = {}
pre_redirect_uri = _generate_chart_pre_redirect_uri(chart_data["chartId"], auto_code_info)
return pre_redirect_uri
def create_cloud_graphic_walker(
self,
*,
chart_name: str,
workspace_name: str,
dataset_content: io.BytesIO,
field_specs: List[Dict[str, Any]],
) -> str:
fid_list = [field["fid"] for field in field_specs]
meta = json.dumps({
"dataSources": [{
"id": "dataSource-0",
"data": []
}],
"datasets": [{
"id": 'dataset-0',
"name": 'DataSet',
"rawFields": field_specs,
"dsId": 'dataSource-0',
}],
"specList": []
})
chart_data = self._get_chart_by_name(chart_name, workspace_name)
if chart_data is not None:
raise CloudFunctionError("chart name already exists", code=ErrorCode.UNKNOWN_ERROR)
dataset_name = f"pygwalker_{datetime.now().strftime('%Y%m%d%H%M')}"
dataset_info = self._upload_file_dataset_meta(dataset_name, "parquet")
dataset_id = dataset_info["datasetId"]
upload_url = dataset_info["uploadUrl"]
_upload_file_to_s3(upload_url, dataset_content)
self._upload_dataset_callback(dataset_id, fid_list)
self._create_chart(
dataset_id=dataset_id,
name=chart_name,
meta=meta,
workflow={},
thumbnail="",
is_public=True
)
def get_kanaries_user_info(self) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/user/info"
resp = self.session.get(url, timeout=15)
return resp.json()["data"]
def get_spec_by_text(self, metas: List[Dict[str, Any]], text: str) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/vis/text2gw"
resp = self.session.post(
url,
json={"metas": metas, "messages": [{"role": "user", "content": text}]},
timeout=15
)
return resp.json()["data"]
def create_file_dataset(
self,
dataset_name: str,
dataset_content: io.BytesIO,
fid_list: List[str],
is_public: bool,
kind: Literal["TEMP", "FILE"]
) -> str:
dataset_info = self._upload_file_dataset_meta(dataset_name, "parquet", is_public, kind=kind)
dataset_id = dataset_info["datasetId"]
upload_url = dataset_info["uploadUrl"]
_upload_file_to_s3(upload_url, dataset_content)
self._upload_dataset_callback(dataset_id, fid_list)
return dataset_id
def create_datasource(
self,
name: str,
database_url: str,
database_type: str,
) -> str:
url = f"{GlobalVarManager.kanaries_api_host}/datasource"
params = {
"name": name,
"connectionconfiguration": {
"url": database_url,
},
"datasourceType": database_type,
"desc": ""
}
resp = self.session.post(url, json=params, timeout=15)
return resp.json()["data"]["datasourceId"]
def get_datasource_by_name(self, name: str) -> Optional[str]:
url = f"{GlobalVarManager.kanaries_api_host}/datasource/search"
resp = self.session.post(url, params={"fullName": name}, timeout=15)
datasources = resp.json()["data"]["datasourceList"]
return datasources[0]["id"] if datasources else None
def create_database_dataset(
self,
name: str,
datasource_id: str,
is_public: bool,
view_sql: str,
) -> str:
url = f"{GlobalVarManager.kanaries_api_host}/dataset"
params = {
"name": name,
"desc": "",
"datasourceId": datasource_id,
"isPublic": is_public,
"type": "DATABASE",
"meta": {
"viewSql": view_sql,
}
}
resp = self.session.post(url, json=params, timeout=60)
return resp.json()["data"]["datasetId"]
def query_from_dataset(self, dataset_id: str, payload: Dict[str, Any]) -> List[Dict[str, Any]]:
url = f"{GlobalVarManager.kanaries_api_host}/public/query"
params = {
"datasetId": dataset_id,
"query": payload,
}
resp = self.session.post(url, json=params, timeout=15)
return resp.json()["data"]
def batch_query_from_dataset(self, dataset_id: str, query_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
url = f"{GlobalVarManager.kanaries_api_host}/v1/dataset/{dataset_id}/query"
params = {
"query": query_list,
}
resp = self.session.post(url, json=params, timeout=60)
return resp.json()["data"]
def create_cloud_dataset(
self,
data_parser: BaseDataParser,
name: str,
is_public: bool,
is_temp_dataset: bool = False
) -> str:
if name is None:
name = f"pygwalker_{datetime.now().strftime('%Y%m%d%H%M')}"
if data_parser.dataset_tpye == "cloud_dataset":
raise ValueError("dataset is already a cloud dataset")
if data_parser.dataset_tpye.startswith("connector"):
connector = data_parser.conn
datasource_name = "pygwalker_" + hashlib.md5(connector.url.encode()).hexdigest()
datasource_id = self.get_datasource_by_name(datasource_name)
if datasource_id is None:
datasource_id = self.create_datasource(
datasource_name,
connector.url,
_get_database_type_from_dialect_name(connector.dialect_name)
)
dataset_id = self.create_database_dataset(
name,
datasource_id,
is_public,
connector.view_sql
)
return dataset_id
else:
dataset_id = self.create_file_dataset(
name,
data_parser.to_parquet(),
[field["name"] for field in data_parser.raw_fields],
is_public,
kind="TEMP" if is_temp_dataset else "FILE"
)
return dataset_id
def create_dashboard(
self,
*,
name: str,
layout: List[Any],
config: Dict[str, Any],
is_public: bool
) -> str:
url = f"{GlobalVarManager.kanaries_api_host}/report"
params = {
"title": name,
"version": "0.0.1",
"desc": "",
"size": {},
"config": config,
"layout": layout,
"public": is_public
}
resp = self.session.post(url, json=params, timeout=60)
return resp.json()["data"]["id"]
def upload_cloud_chart(
self,
*,
chart_name: str,
dataset_name: str,
data_parser: BaseDataParser,
workflow: List[Dict[str, Any]],
spec_list: List[Dict[str, Any]],
is_public: bool,
) -> str:
workspace_name = self.get_kanaries_user_info()["workspaceName"]
chart_data = self._get_chart_by_name(chart_name, workspace_name)
if chart_data is not None:
raise CloudFunctionError("chart name already exists", code=ErrorCode.UNKNOWN_ERROR)
dataset_id = self.create_cloud_dataset(data_parser, dataset_name, False)
chart_info = self._create_chart(
dataset_id=dataset_id,
name=chart_name,
meta=json.dumps({
"dataSources": [{
"id": "dataSource-0",
"data": []
}],
"datasets": [{
"id": 'dataset-0',
"name": 'DataSet',
"rawFields": data_parser.raw_fields,
"dsId": 'dataSource-0',
}],
"specList": spec_list
}),
workflow=workflow,
thumbnail="",
is_public=is_public
)
return chart_info["chartId"]
def upload_cloud_dashboard(
self,
*,
dashboard_name: str,
dataset_name: str,
data_parser: BaseDataParser,
workflow_list: List[List[Dict[str, Any]]],
spec_list: List[Dict[str, Any]],
is_public: bool,
dark: str,
) -> str:
dataset_id = self.create_cloud_dataset(data_parser, dataset_name, False)
chart_info_list = []
for spec, workflow in zip(spec_list, workflow_list):
chart_info = self._create_chart(
dataset_id=dataset_id,
name=f"{dashboard_name}-{spec['name']}",
meta=json.dumps({
"dataSources": [{
"id": "dataSource-0",
"data": []
}],
"datasets": [{
"id": 'dataset-0',
"name": 'DataSet',
"rawFields": data_parser.raw_fields,
"dsId": 'dataSource-0',
}],
"specList": [spec]
}),
workflow=workflow,
thumbnail="",
is_public=is_public
)
chart_info_list.append(chart_info)
dashboard_id = self.create_dashboard(
name=dashboard_name,
is_public=is_public,
config={
"items": [
{"id": "dashboard_title", "content": f"# {dashboard_name}", "type": "text", "name": "Text"},
{
"id": "chart_tab",
"dark": dark,
"name": "Charts",
"type": "data",
"tabs": [
{"chartId": chart_info["chartId"], "title": spec["name"]}
for spec, chart_info in zip(spec_list, chart_info_list)
],
"mode": "gwtabs",
}
],
},
layout=[
{"i": "dashboard_title", "h": 2, "w": 4, "x": 0, "y": 0},
{"i": "chart_tab", "h": 20, "w": 4, "x": 0, "y": 2},
]
)
return dashboard_id
def get_parser(
dataset: Union[DataFrame, Connector, str],
field_specs: Optional[Dict[str, FieldSpec]] = None,
infer_string_to_date: bool = False,
infer_number_to_dimension: bool = True,
other_params: Optional[Dict[str, Any]] = None
) -> BaseDataParser:
if field_specs is None:
field_specs = {}
if other_params is None:
other_params = {}
parser = _get_data_parser(dataset)(
dataset,
field_specs,
infer_string_to_date,
infer_number_to_dimension,
other_params
)
return parser
The provided code snippet includes necessary dependencies for implementing the `create_cloud_walker` function. Write a Python function `def create_cloud_walker( dataset: DataFrame, *, chart_name: str, workspace_name: str, field_specs: Optional[Dict[str, FieldSpec]] = None, kanaries_api_key: str = "" ) -> str` to solve the following problem:
(deprecated) Create a pygwalker in kanaries cloud Args: - dataset (pl.DataFrame | pd.DataFrame, optional): dataframe. Kargs: - chart_name (str): pygwalker chart name in kanaries cloud. - workspace_name (str): kanaries workspace name. - field_specs (Dict[str, FieldSpec]): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. Returns: str: pygwalker url in kanaries cloud
Here is the function:
def create_cloud_walker(
dataset: DataFrame,
*,
chart_name: str,
workspace_name: str,
field_specs: Optional[Dict[str, FieldSpec]] = None,
kanaries_api_key: str = ""
) -> str:
"""
(deprecated)
Create a pygwalker in kanaries cloud
Args:
- dataset (pl.DataFrame | pd.DataFrame, optional): dataframe.
Kargs:
- chart_name (str): pygwalker chart name in kanaries cloud.
- workspace_name (str): kanaries workspace name.
- field_specs (Dict[str, FieldSpec]): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified.
Returns:
str: pygwalker url in kanaries cloud
"""
if field_specs is None:
field_specs = {}
cloud_service = CloudService(kanaries_api_key)
data_parser = get_parser(dataset, False, field_specs)
cloud_service.create_cloud_graphic_walker(
chart_name=chart_name,
workspace_name=workspace_name,
dataset_content=data_parser.to_parquet(),
field_specs=data_parser.raw_fields
) | (deprecated) Create a pygwalker in kanaries cloud Args: - dataset (pl.DataFrame | pd.DataFrame, optional): dataframe. Kargs: - chart_name (str): pygwalker chart name in kanaries cloud. - workspace_name (str): kanaries workspace name. - field_specs (Dict[str, FieldSpec]): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. Returns: str: pygwalker url in kanaries cloud |
165,747 | from typing import Dict, Optional, Union
from datetime import datetime
from pygwalker.data_parsers.base import FieldSpec
from pygwalker._typing import DataFrame
from pygwalker.utils.display import display_html
from pygwalker.data_parsers.database_parser import Connector
from pygwalker.services.cloud_service import CloudService
from pygwalker.services.data_parsers import get_parser
from pygwalker.services.global_var import GlobalVarManager
def display_html(
html: Union[str, HTML, ipywidgets.Widget],
*,
slot_id: str = None
):
"""Judge the presentation method to be used based on the context
Args:
- html (str): html string to display.
- env: (Literal['Widgets' | 'Streamlit' | 'Jupyter'], optional): The enviroment using pygwalker
*
- slot_id(str): display with given id.
"""
if isinstance(html, str):
widget = HTML(html)
else:
widget = html
if slot_id is None:
display(widget)
else:
handler = DISPLAY_HANDLER.get(slot_id)
if handler is None:
handler = display(widget, display_id=slot_id)
DISPLAY_HANDLER[slot_id] = handler
else:
handler.update(widget)
class CloudService:
"""A class to manage cloud service"""
def __init__(self, api_key: str):
self.session = PrivateSession(api_key)
def _upload_file_dataset_meta(
self,
name: str,
file_type: str = Literal["parquet", "csv"],
is_public: bool = True,
kind: Literal["TEMP", "FILE"] = "FILE"
) -> Dict[str, Any]:
param_file_type_map = {
"csv": "TEXT_FILE",
"parquet": "PARQUET"
}
url = f"{GlobalVarManager.kanaries_api_host}/dataset/upload"
params = {
"name": name,
"fileName": name + "." + file_type,
"isPublic": is_public,
"desc": "",
"meta": {
"extractHeader": True,
"encoding": "utf-8",
"type": param_file_type_map.get(file_type, "TEXT_FILE"),
"separator": ",",
},
"type": kind,
}
resp = self.session.post(url, json=params, timeout=10)
return resp.json()["data"]
def _upload_dataset_callback(self, dataset_id: str, fid_list: List[str]) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/dataset/callback"
params = {
"datasetId": dataset_id,
"fidList": fid_list
}
resp = self.session.post(url, json=params, timeout=10)
return resp.json()
def _create_chart(
self,
*,
dataset_id: str,
name: str,
meta: str,
workflow: List[Dict[str, Any]],
thumbnail: str,
is_public: bool,
) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/chart"
params = {
"datasetId": dataset_id,
"meta": meta,
"query": json.dumps({"datasetId": dataset_id, "workflow": workflow}),
"config": "{}",
"name": name,
"desc": "",
"isPublic": is_public,
"chartType": "",
"thumbnail": thumbnail,
}
resp = self.session.post(url, json={"chart": params}, timeout=10)
return resp.json()["data"]
def _create_notebook(self, title: str, chart_id: str) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/notebook"
markdown = "\n".join([
"# " + title,
f"::chart[{chart_id}]"
])
params = {
"title": title,
"markdown": markdown,
"isPublic": True,
}
resp = self.session.post(url, json=params, timeout=10)
return resp.json()["data"]
def _get_chart_by_name(self, name: str, workspace_name: str) -> Optional[Dict[str, Any]]:
url = f"{GlobalVarManager.kanaries_main_host}/api/pygwalker/chart"
try:
resp = self.session.get(url, params={"chartName": name, "workspaceName": workspace_name}, timeout=15)
except CloudFunctionError as e:
if e.code == ErrorCode.CLOUD_CHART_NOT_FOUND:
return None
raise e
return resp.json()["data"]
def _get_auth_code_info(self) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/auth/code"
resp = self.session.get(url, timeout=15)
return resp.json()["data"]
def write_config_to_cloud(self, path: str, config: str):
"""Write config to cloud"""
url = f"{GlobalVarManager.kanaries_api_host}/pygConfig"
self.session.put(url, json={
"path": path,
"config": config
})
def get_cloud_graphic_walker(self, workspace_name: str, chart_name: str) -> str:
chart_data = self._get_chart_by_name(chart_name, workspace_name)
if chart_data is None:
raise CloudFunctionError("chart not exists", code=ErrorCode.CLOUD_CHART_NOT_FOUND)
try:
auto_code_info = self._get_auth_code_info()
except CloudFunctionError:
auto_code_info = {}
pre_redirect_uri = _generate_chart_pre_redirect_uri(chart_data["chartId"], auto_code_info)
return pre_redirect_uri
def create_cloud_graphic_walker(
self,
*,
chart_name: str,
workspace_name: str,
dataset_content: io.BytesIO,
field_specs: List[Dict[str, Any]],
) -> str:
fid_list = [field["fid"] for field in field_specs]
meta = json.dumps({
"dataSources": [{
"id": "dataSource-0",
"data": []
}],
"datasets": [{
"id": 'dataset-0',
"name": 'DataSet',
"rawFields": field_specs,
"dsId": 'dataSource-0',
}],
"specList": []
})
chart_data = self._get_chart_by_name(chart_name, workspace_name)
if chart_data is not None:
raise CloudFunctionError("chart name already exists", code=ErrorCode.UNKNOWN_ERROR)
dataset_name = f"pygwalker_{datetime.now().strftime('%Y%m%d%H%M')}"
dataset_info = self._upload_file_dataset_meta(dataset_name, "parquet")
dataset_id = dataset_info["datasetId"]
upload_url = dataset_info["uploadUrl"]
_upload_file_to_s3(upload_url, dataset_content)
self._upload_dataset_callback(dataset_id, fid_list)
self._create_chart(
dataset_id=dataset_id,
name=chart_name,
meta=meta,
workflow={},
thumbnail="",
is_public=True
)
def get_kanaries_user_info(self) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/user/info"
resp = self.session.get(url, timeout=15)
return resp.json()["data"]
def get_spec_by_text(self, metas: List[Dict[str, Any]], text: str) -> Dict[str, Any]:
url = f"{GlobalVarManager.kanaries_api_host}/vis/text2gw"
resp = self.session.post(
url,
json={"metas": metas, "messages": [{"role": "user", "content": text}]},
timeout=15
)
return resp.json()["data"]
def create_file_dataset(
self,
dataset_name: str,
dataset_content: io.BytesIO,
fid_list: List[str],
is_public: bool,
kind: Literal["TEMP", "FILE"]
) -> str:
dataset_info = self._upload_file_dataset_meta(dataset_name, "parquet", is_public, kind=kind)
dataset_id = dataset_info["datasetId"]
upload_url = dataset_info["uploadUrl"]
_upload_file_to_s3(upload_url, dataset_content)
self._upload_dataset_callback(dataset_id, fid_list)
return dataset_id
def create_datasource(
self,
name: str,
database_url: str,
database_type: str,
) -> str:
url = f"{GlobalVarManager.kanaries_api_host}/datasource"
params = {
"name": name,
"connectionconfiguration": {
"url": database_url,
},
"datasourceType": database_type,
"desc": ""
}
resp = self.session.post(url, json=params, timeout=15)
return resp.json()["data"]["datasourceId"]
def get_datasource_by_name(self, name: str) -> Optional[str]:
url = f"{GlobalVarManager.kanaries_api_host}/datasource/search"
resp = self.session.post(url, params={"fullName": name}, timeout=15)
datasources = resp.json()["data"]["datasourceList"]
return datasources[0]["id"] if datasources else None
def create_database_dataset(
self,
name: str,
datasource_id: str,
is_public: bool,
view_sql: str,
) -> str:
url = f"{GlobalVarManager.kanaries_api_host}/dataset"
params = {
"name": name,
"desc": "",
"datasourceId": datasource_id,
"isPublic": is_public,
"type": "DATABASE",
"meta": {
"viewSql": view_sql,
}
}
resp = self.session.post(url, json=params, timeout=60)
return resp.json()["data"]["datasetId"]
def query_from_dataset(self, dataset_id: str, payload: Dict[str, Any]) -> List[Dict[str, Any]]:
url = f"{GlobalVarManager.kanaries_api_host}/public/query"
params = {
"datasetId": dataset_id,
"query": payload,
}
resp = self.session.post(url, json=params, timeout=15)
return resp.json()["data"]
def batch_query_from_dataset(self, dataset_id: str, query_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
url = f"{GlobalVarManager.kanaries_api_host}/v1/dataset/{dataset_id}/query"
params = {
"query": query_list,
}
resp = self.session.post(url, json=params, timeout=60)
return resp.json()["data"]
def create_cloud_dataset(
self,
data_parser: BaseDataParser,
name: str,
is_public: bool,
is_temp_dataset: bool = False
) -> str:
if name is None:
name = f"pygwalker_{datetime.now().strftime('%Y%m%d%H%M')}"
if data_parser.dataset_tpye == "cloud_dataset":
raise ValueError("dataset is already a cloud dataset")
if data_parser.dataset_tpye.startswith("connector"):
connector = data_parser.conn
datasource_name = "pygwalker_" + hashlib.md5(connector.url.encode()).hexdigest()
datasource_id = self.get_datasource_by_name(datasource_name)
if datasource_id is None:
datasource_id = self.create_datasource(
datasource_name,
connector.url,
_get_database_type_from_dialect_name(connector.dialect_name)
)
dataset_id = self.create_database_dataset(
name,
datasource_id,
is_public,
connector.view_sql
)
return dataset_id
else:
dataset_id = self.create_file_dataset(
name,
data_parser.to_parquet(),
[field["name"] for field in data_parser.raw_fields],
is_public,
kind="TEMP" if is_temp_dataset else "FILE"
)
return dataset_id
def create_dashboard(
self,
*,
name: str,
layout: List[Any],
config: Dict[str, Any],
is_public: bool
) -> str:
url = f"{GlobalVarManager.kanaries_api_host}/report"
params = {
"title": name,
"version": "0.0.1",
"desc": "",
"size": {},
"config": config,
"layout": layout,
"public": is_public
}
resp = self.session.post(url, json=params, timeout=60)
return resp.json()["data"]["id"]
def upload_cloud_chart(
self,
*,
chart_name: str,
dataset_name: str,
data_parser: BaseDataParser,
workflow: List[Dict[str, Any]],
spec_list: List[Dict[str, Any]],
is_public: bool,
) -> str:
workspace_name = self.get_kanaries_user_info()["workspaceName"]
chart_data = self._get_chart_by_name(chart_name, workspace_name)
if chart_data is not None:
raise CloudFunctionError("chart name already exists", code=ErrorCode.UNKNOWN_ERROR)
dataset_id = self.create_cloud_dataset(data_parser, dataset_name, False)
chart_info = self._create_chart(
dataset_id=dataset_id,
name=chart_name,
meta=json.dumps({
"dataSources": [{
"id": "dataSource-0",
"data": []
}],
"datasets": [{
"id": 'dataset-0',
"name": 'DataSet',
"rawFields": data_parser.raw_fields,
"dsId": 'dataSource-0',
}],
"specList": spec_list
}),
workflow=workflow,
thumbnail="",
is_public=is_public
)
return chart_info["chartId"]
def upload_cloud_dashboard(
self,
*,
dashboard_name: str,
dataset_name: str,
data_parser: BaseDataParser,
workflow_list: List[List[Dict[str, Any]]],
spec_list: List[Dict[str, Any]],
is_public: bool,
dark: str,
) -> str:
dataset_id = self.create_cloud_dataset(data_parser, dataset_name, False)
chart_info_list = []
for spec, workflow in zip(spec_list, workflow_list):
chart_info = self._create_chart(
dataset_id=dataset_id,
name=f"{dashboard_name}-{spec['name']}",
meta=json.dumps({
"dataSources": [{
"id": "dataSource-0",
"data": []
}],
"datasets": [{
"id": 'dataset-0',
"name": 'DataSet',
"rawFields": data_parser.raw_fields,
"dsId": 'dataSource-0',
}],
"specList": [spec]
}),
workflow=workflow,
thumbnail="",
is_public=is_public
)
chart_info_list.append(chart_info)
dashboard_id = self.create_dashboard(
name=dashboard_name,
is_public=is_public,
config={
"items": [
{"id": "dashboard_title", "content": f"# {dashboard_name}", "type": "text", "name": "Text"},
{
"id": "chart_tab",
"dark": dark,
"name": "Charts",
"type": "data",
"tabs": [
{"chartId": chart_info["chartId"], "title": spec["name"]}
for spec, chart_info in zip(spec_list, chart_info_list)
],
"mode": "gwtabs",
}
],
},
layout=[
{"i": "dashboard_title", "h": 2, "w": 4, "x": 0, "y": 0},
{"i": "chart_tab", "h": 20, "w": 4, "x": 0, "y": 2},
]
)
return dashboard_id
The provided code snippet includes necessary dependencies for implementing the `walk_on_cloud` function. Write a Python function `def walk_on_cloud(workspace_name: str, chart_name: str, kanaries_api_key: str = "")` to solve the following problem:
(deprecated) render a pygwalker in kanaries cloud Args: - chart_name (str): pygwalker chart name in kanaries cloud. - workspace_name (str): kanaries workspace name.
Here is the function:
def walk_on_cloud(workspace_name: str, chart_name: str, kanaries_api_key: str = ""):
"""
(deprecated)
render a pygwalker in kanaries cloud
Args:
- chart_name (str): pygwalker chart name in kanaries cloud.
- workspace_name (str): kanaries workspace name.
"""
cloud_service = CloudService(kanaries_api_key)
cloud_url = cloud_service.get_cloud_graphic_walker(workspace_name, chart_name)
iframe_html = f"""
<iframe
width="100%"
height="900px"
src="{cloud_url}"
frameborder="0"
allow="clipboard-read; clipboard-write"
allowfullscreen>
</iframe>
"""
display_html(iframe_html) | (deprecated) render a pygwalker in kanaries cloud Args: - chart_name (str): pygwalker chart name in kanaries cloud. - workspace_name (str): kanaries workspace name. |
165,748 | from typing import Union, Dict, Optional
import inspect
from typing_extensions import Literal
from .pygwalker import PygWalker
from pygwalker.data_parsers.base import FieldSpec
from pygwalker.data_parsers.database_parser import Connector
from pygwalker._typing import DataFrame
from pygwalker.services.format_invoke_walk_code import get_formated_spec_params_code_from_frame
from pygwalker.services.kaggle import auto_set_kanaries_api_key_on_kaggle, adjust_kaggle_default_font_size
from pygwalker.utils.execute_env_check import check_convert, get_kaggle_run_type, check_kaggle
from pygwalker.utils.check_walker_params import check_expired_params
class PygWalker:
"""PygWalker"""
def __init__(
self,
*,
gid: Optional[Union[int, str]],
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
spec: str,
source_invoke_code: str,
theme_key: Literal['vega', 'g2'],
dark: Literal['media', 'light', 'dark'],
show_cloud_tool: Optional[bool],
use_preview: bool,
use_kernel_calc: Optional[bool],
use_cloud_calc: Optional[bool],
use_save_tool: bool,
is_export_dataframe: bool,
kanaries_api_key: str,
default_tab: Literal["data", "vis"],
gw_mode: Literal["explore", "renderer", "filter_renderer", "table"],
**kwargs
):
self.kanaries_api_key = kanaries_api_key or GlobalVarManager.kanaries_api_key
if gid is None:
self.gid = generate_hash_code()
else:
self.gid = gid
self.cloud_service = CloudService(self.kanaries_api_key)
self.data_parser = self._get_data_parser(
dataset=dataset,
field_specs=field_specs,
use_cloud_calc=use_cloud_calc,
kanaries_api_key=self.kanaries_api_key,
cloud_service=self.cloud_service
)
self.use_kernel_calc = self.data_parser.data_size > JUPYTER_WIDGETS_BYTE_LIMIT if use_kernel_calc is None else use_kernel_calc
self.origin_data_source = self.data_parser.to_records(500 if self.use_kernel_calc else None)
self.field_specs = self.data_parser.raw_fields
self.spec = spec
self.source_invoke_code = source_invoke_code
self.theme_key = theme_key
self.dark = dark
self.data_source_id = rand_str()
self.other_props = kwargs
self.tunnel_id = "tunnel!"
self.show_cloud_tool = bool(self.kanaries_api_key) if show_cloud_tool is None else show_cloud_tool
self.use_preview = use_preview
self._init_spec(spec, self.field_specs)
self.use_save_tool = use_save_tool
self.parse_dsl_type = self._get_parse_dsl_type(self.data_parser)
self.gw_mode = gw_mode
self.dataset_type = self.data_parser.dataset_tpye
self.is_export_dataframe = is_export_dataframe
self._last_exported_dataframe = None
self.default_tab = default_tab
self.use_cloud_calc = use_cloud_calc
check_update()
# Temporarily adapt to pandas import module bug
if self.use_kernel_calc:
try:
self.data_parser.get_datas_by_sql("SELECT 1 FROM pygwalker_mid_table LIMIT 1")
except Exception:
pass
if GlobalVarManager.privacy == "offline":
self.show_cloud_tool = False
def last_exported_dataframe(self) -> Optional[pd.DataFrame]:
return self._last_exported_dataframe
def _get_data_parser(
self,
*,
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
use_cloud_calc: bool,
kanaries_api_key: str,
cloud_service: CloudService
) -> BaseDataParser:
data_parser = get_parser(
dataset,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
if not use_cloud_calc:
return data_parser
dataset_id = cloud_service.create_cloud_dataset(
data_parser,
f"temp_{rand_str()}",
False,
True
)
return get_parser(
dataset_id,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
def _get_parse_dsl_type(self, data_parser: BaseDataParser) -> Literal["server", "client"]:
if data_parser.dataset_tpye.startswith("connector"):
return "server"
if data_parser.dataset_tpye == "cloud_dataset":
return "server"
return "client"
def _init_spec(self, spec: Dict[str, Any], field_specs: List[Dict[str, Any]]):
spec_obj, spec_type = get_spec_json(spec)
self._update_vis_spec(spec_obj["config"] and fill_new_fields(spec_obj["config"], field_specs))
self.spec_type = spec_type
self._chart_map = self._parse_chart_map_dict(spec_obj["chart_map"])
self.spec_version = spec_obj.get("version", None)
self.workflow_list = spec_obj.get("workflow_list", [])
def _update_vis_spec(self, vis_spec: List[Dict[str, Any]]):
self.vis_spec = vis_spec
self._chart_name_index_map = {
item["name"]: index
for index, item in enumerate(vis_spec)
}
def _get_chart_map_dict(self, chart_map: Dict[str, ChartData]) -> Dict[str, Any]:
return {
key: value.dict(by_alias=True)
for key, value in chart_map.items()
}
def _parse_chart_map_dict(self, chart_map_dict: Dict[str, Any]) -> Dict[str, ChartData]:
return {
key: ChartData.parse_obj(value)
for key, value in chart_map_dict.items()
}
def to_html(self) -> str:
props = self._get_props()
return self._get_render_iframe(props)
def to_html_without_iframe(self) -> str:
props = self._get_props()
html = render_gwalker_html(self.gid, props)
return html
def display_on_convert_html(self):
"""
Display on jupyter-nbconvert html.
"""
props = self._get_props("jupyter")
iframe_html = self._get_render_iframe(props)
display_html(iframe_html)
def display_on_jupyter(self):
"""
Display on jupyter notebook/lab.
If share has large data loading, only sample data can be displayed when reload.
After that, it will be changed to python for data calculation,
and only a small amount of data will be output to the front end to complete the analysis of big data.
"""
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_BYTE_LIMIT)
props = self._get_props(
"jupyter",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props)
if len(self.origin_data_source) > len(data_source):
upload_tool = BatchUploadDatasToolOnJupyter()
display_html(iframe_html)
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id,
gid=self.gid,
tunnel_id=self.tunnel_id,
)
else:
display_html(iframe_html)
def display_on_jupyter_use_widgets(self, iframe_height: str = "1010px"):
"""
use ipywidgets, Display on jupyter notebook/lab.
When the kernel is down, the chart will not be displayed, so use `display_on_jupyter` to share
"""
comm = HackerCommunication(self.gid)
preview_tool = PreviewImageTool(self.gid)
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_WIDGETS_BYTE_LIMIT)
props = self._get_props(
"jupyter_widgets",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props, iframe_height=iframe_height)
html_widgets = ipywidgets.Box(
[ipywidgets.HTML(iframe_html), comm.get_widgets()],
layout=ipywidgets.Layout(display='block')
)
self._init_callback(comm, preview_tool)
display_html(html_widgets)
preview_tool.init_display()
preview_tool.render_gw_review(self._get_gw_preview_html())
def display_preview_on_jupyter(self):
"""
Display preview on jupyter notebook/lab.
"""
display_html(self._get_gw_preview_html())
def chart_list(self) -> List[str]:
"""
Get the list of saved charts.
"""
return list(self._chart_map.keys())
def save_chart_to_file(self, chart_name: str, path: str, save_type: Literal["html", "png"] = "png"):
"""
Save the chart to a file.
"""
if save_type == "html":
content = self.export_chart_html(chart_name)
write_mode = "w"
encoding = "utf-8"
elif save_type == "png":
content = self.export_chart_png(chart_name)
write_mode = "wb"
encoding = None
else:
raise ValueError(f"save_type must be html or png, but got {save_type}")
with open(path, write_mode, encoding=encoding) as f:
f.write(content)
def export_chart_html(self, chart_name: str) -> str:
"""
Export the chart as a html string.
"""
return self._get_gw_chart_preview_html(
chart_name,
title="",
desc=""
)
def export_chart_png(self, chart_name: str) -> bytes:
"""
Export the chart as a png bytes.
"""
chart_data = self._get_chart_by_name(chart_name)
with urllib.request.urlopen(chart_data.single_chart) as png_string:
return png_string.read()
def display_chart(self, chart_name: str, *, title: Optional[str] = None, desc: str = ""):
"""
Display the chart in the notebook.
"""
if title is None:
title = chart_name
html = self._get_gw_chart_preview_html(
chart_name,
title=title,
desc=desc
)
display_html(html)
def _get_chart_by_name(self, chart_name: str) -> ChartData:
if chart_name not in self._chart_map:
raise ValueError(f"chart_name: {chart_name} not found, please confirm whether to save")
return self._chart_map[chart_name]
def _init_callback(self, comm: BaseCommunication, preview_tool: PreviewImageTool = None):
upload_tool = BatchUploadDatasToolOnWidgets(comm)
def reuqest_data_callback(_):
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id
)
return {}
def get_latest_vis_spec(_):
return {"visSpec": self.vis_spec}
def save_chart_endpoint(data: Dict[str, Any]):
chart_data = ChartData.parse_obj(data)
self._chart_map[data["title"]] = chart_data
def update_spec(data: Dict[str, Any]):
spec_obj = {
"config": data["visSpec"],
"chart_map": {},
"version": __version__,
"workflow_list": data.get("workflowList", [])
}
self._update_vis_spec(data["visSpec"])
self.spec_version = __version__
self.workflow_list = data.get("workflowList", [])
if self.use_preview:
preview_tool.render_gw_review(self._get_gw_preview_html())
save_chart_endpoint(data["chartData"])
if self.spec_type == "json_file":
with open(self.spec, "w", encoding="utf-8") as f:
f.write(json.dumps(spec_obj))
if self.spec_type == "json_ksf":
self.cloud_service.write_config_to_cloud(self.spec[6:], json.dumps(spec_obj))
def upload_spec_to_cloud(data: Dict[str, Any]):
if data["newToken"]:
set_config({"kanaries_token": data["newToken"]})
GlobalVarManager.kanaries_api_key = data["newToken"]
spec_obj = {
"config": self.vis_spec,
"chart_map": {},
"version": __version__,
"workflow_list": self.workflow_list,
}
file_name = data["fileName"]
workspace_name = self.cloud_service.get_kanaries_user_info()["workspaceName"]
path = f"{workspace_name}/{file_name}"
self.cloud_service.write_config_to_cloud(path, json.dumps(spec_obj))
return {"specFilePath": path}
def _get_datas(data: Dict[str, Any]):
sql = data["sql"]
datas = self.data_parser.get_datas_by_sql(sql)
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _get_datas_by_payload(data: Dict[str, Any]):
datas = self.data_parser.get_datas_by_payload(data["payload"])
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _batch_get_datas_by_sql(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_sql(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _batch_get_datas_by_payload(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_payload(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _get_spec_by_text(data: Dict[str, Any]):
callback = self.other_props.get(
"custom_ask_callback",
self.cloud_service.get_spec_by_text
)
return {
"data": callback(data["metas"], data["query"])
}
def _export_dataframe_by_payload(data: Dict[str, Any]):
df = pd.DataFrame(self.data_parser.get_datas_by_payload(data["payload"]))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _export_dataframe_by_sql(data: Dict[str, Any]):
sql = data["sql"]
df = pd.DataFrame(self.data_parser.get_datas_by_sql(sql))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _upload_to_cloud_charts(data: Dict[str, Any]):
chart_id = self.cloud_service.upload_cloud_chart(
data_parser=self.data_parser,
chart_name=data["chartName"],
dataset_name=data["datasetName"],
workflow=data["workflow"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
)
return {"chartId": chart_id}
def _upload_to_cloud_dashboard(data: Dict[str, Any]):
dashboard_id = self.cloud_service.upload_cloud_dashboard(
data_parser=self.data_parser,
dashboard_name=data["chartName"],
dataset_name=data["datasetName"],
workflow_list=data["workflowList"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
dark=self.dark
)
return {"dashboardId": dashboard_id}
comm.register("get_latest_vis_spec", get_latest_vis_spec)
comm.register("request_data", reuqest_data_callback)
if self.use_save_tool:
comm.register("upload_spec_to_cloud", upload_spec_to_cloud)
comm.register("update_spec", update_spec)
comm.register("save_chart", save_chart_endpoint)
if self.show_cloud_tool:
comm.register("upload_to_cloud_charts", _upload_to_cloud_charts)
comm.register("upload_to_cloud_dashboard", _upload_to_cloud_dashboard)
comm.register("get_spec_by_text", _get_spec_by_text)
if self.use_kernel_calc:
comm.register("get_datas", _get_datas)
comm.register("get_datas_by_payload", _get_datas_by_payload)
comm.register("batch_get_datas_by_sql", _batch_get_datas_by_sql)
comm.register("batch_get_datas_by_payload", _batch_get_datas_by_payload)
if self.is_export_dataframe:
comm.register("export_dataframe_by_payload", _export_dataframe_by_payload)
comm.register("export_dataframe_by_sql", _export_dataframe_by_sql)
def _send_props_track(self, props: Dict[str, Any]):
needed_fields = {
"id", "version", "hashcode", "themeKey",
"dark", "env", "specType", "needLoadDatas", "showCloudTool",
"useKernelCalc", "useSaveTool", "parseDslType", "gwMode", "datasetType",
"defaultTab", "useCloudCalc"
}
event_info = {key: value for key, value in props.items() if key in needed_fields}
event_info["hasKanariesToken"] = bool(self.kanaries_api_key)
track_event("invoke_props", event_info)
def _get_props(
self,
env: str = "",
data_source: Optional[Dict[str, Any]] = None,
need_load_datas: bool = False
) -> Dict[str, Any]:
if data_source is None:
data_source = self.origin_data_source
props = {
"id": self.gid,
"dataSource": data_source,
"len": len(data_source),
"version": __version__,
"hashcode": get_local_user_id(),
"userConfig": {
"privacy": GlobalVarManager.privacy,
},
"visSpec": self.vis_spec,
"rawFields": [
{**field, "offset": 0}
for field in self.field_specs
],
"fieldkeyGuard": False,
"themeKey": self.theme_key,
"dark": self.dark,
"sourceInvokeCode": self.source_invoke_code,
"dataSourceProps": {
'tunnelId': self.tunnel_id,
'dataSourceId': self.data_source_id,
},
"env": env,
"specType": self.spec_type,
"needLoadDatas": not self.use_kernel_calc and need_load_datas,
"showCloudTool": self.show_cloud_tool,
"needInitChart": not self._chart_map,
"useKernelCalc": self.use_kernel_calc,
"useSaveTool": self.use_save_tool,
"parseDslType": self.parse_dsl_type,
"gwMode": self.gw_mode,
"needLoadLastSpec": True,
"datasetType": self.dataset_type,
"extraConfig": self.other_props,
"fieldMetas": self.data_parser.field_metas,
"isExportDataFrame": self.is_export_dataframe,
"defaultTab": self.default_tab,
"useCloudCalc": self.use_cloud_calc
}
self._send_props_track(props)
return props
def _get_render_iframe(
self,
props: Dict[str, Any],
return_iframe: bool = True,
iframe_height: str = "1010px"
) -> str:
html = render_gwalker_html(self.gid, props)
if return_iframe:
srcdoc = m_html.escape(html)
return render_gwalker_iframe(self.gid, srcdoc, iframe_height)
else:
return html
def _get_gw_preview_html(self) -> str:
if not self.workflow_list:
return ""
datas = []
for workflow in self.workflow_list:
try:
datas.append(self.data_parser.get_datas_by_payload(workflow))
except ParserException:
datas.append([])
html = render_gw_preview_html(
self.vis_spec,
datas,
self.theme_key,
self.gid,
self.dark
)
return html
def _get_gw_chart_preview_html(self, chart_name: int, title: str, desc: str) -> str:
if chart_name not in self._chart_name_index_map:
raise ValueError(f"chart_name: {chart_name} not found.")
chart_index = self._chart_name_index_map[chart_name]
if not self.workflow_list:
return ""
data = self.data_parser.get_datas_by_payload(self.workflow_list[chart_index])
return render_gw_chart_preview_html(
single_vis_spec=self.vis_spec[chart_index],
data=data,
theme_key=self.theme_key,
title=title,
desc=desc,
dark=self.dark
)
class FieldSpec(NamedTuple):
"""Field specification.
Args:
- semanticType: '?' | 'nominal' | 'ordinal' | 'temporal' | 'quantitative'. default to '?'.
- analyticType: '?' | 'dimension' | 'measure'. default to '?'.
- display_as: str. The field name displayed. None means using the original column name.
"""
semanticType: Literal['?', 'nominal', 'ordinal', 'temporal', 'quantitative'] = '?'
analyticType: Literal['?', 'dimension', 'measure'] = '?'
display_as: str = None
class Connector:
"""
database connector, it will cache engine by url.
- url: database url, refer to sqlalchemy doc for url. example: mysql+pymysql://user:password@host:port/database
- view_sql: view sql, example: SELECT * FROM table_name
- engine_params: engine params, refer to sqlalchemy doc for params. example: {"pool_size": 10}
"""
engine_map = {}
def __init__(self, url: str, view_sql: str, engine_params: Optional[Dict[str, Any]] = None) -> "Connector":
_check_view_sql(view_sql)
if engine_params is None:
engine_params = {}
self.url = url
self.engine = self._get_engine(engine_params)
self.view_sql = view_sql
def _get_engine(self, engine_params: Dict[str, Any]) -> Engine:
if self.url not in self.engine_map:
engine = create_engine(self.url, **engine_params)
engine.dialect.requires_name_normalize = False
self.engine_map[self.url] = engine
return self.engine_map[self.url]
def query_datas(self, sql: str) -> List[Dict[str, Any]]:
field_type_map = {}
with self.engine.connect() as connection:
result = connection.execute(text(sql))
if self.dialect_name == "snowflake":
field_type_map = {
column_desc.name: column_desc.type_code
for column_desc in result.cursor.description
}
return [
{
key: json.loads(value) if field_type_map.get(key, -1) in {9, 10} else value
for key, value in item.items()
}
for item in result.mappings()
]
def dialect_name(self) -> str:
return self.engine.dialect.name
DataFrame = TypeVar("DataFrame", *dataframe_types)
def get_formated_spec_params_code_from_frame(frame: FrameType) -> str:
try:
source_invoke_code = get_formated_spec_params_code(
str(InvokeCodeParser(frame))
)
except Exception:
return _get_default_code()
if source_invoke_code == '':
return _get_default_code()
return source_invoke_code
def auto_set_kanaries_api_key_on_kaggle():
"""Auto set kanaries api key on kaggle."""
from kaggle_secrets import UserSecretsClient
if not GlobalVarManager.kanaries_api_key:
try:
GlobalVarManager.set_kanaries_api_key(
UserSecretsClient().get_secret("kanaries_api_key")
)
except Exception:
pass
def adjust_kaggle_default_font_size():
"""Adjust kaggle default font size."""
display_html("""<style>html {font-size: 16px;}</style>""")
def check_convert() -> bool:
"""
Check if the current process is a jupyter-nbconvert process.
"""
if psutil.Process().parent() is None:
return False
cmd_list = psutil.Process().parent().cmdline()
for cmd in cmd_list:
if re.search(r"jupyter-nbconvert", cmd):
return True
return False
def check_kaggle() -> bool:
"""Check if the code is running on Kaggle."""
return bool(os.environ.get("KAGGLE_KERNEL_RUN_TYPE"))
def get_kaggle_run_type() -> Literal["batch", "interactive"]:
"""Get the run type of Kaggle kernel."""
return os.environ.get("KAGGLE_KERNEL_RUN_TYPE", "").lower()
def check_expired_params(params: Dict[str, Any]):
expired_params_map = {
"fieldSpecs": "field_specs",
"themeKey": "theme_key",
"debug": "spec_io_mode",
}
for old_param, new_param in expired_params_map.items():
if old_param in params:
logger.warning(
f"Parameter `{old_param}` is expired, please use `{new_param}` instead."
)
The provided code snippet includes necessary dependencies for implementing the `walk` function. Write a Python function `def walk( dataset: Union[DataFrame, Connector, str], gid: Union[int, str] = None, *, env: Literal['Jupyter', 'JupyterWidget'] = 'JupyterWidget', field_specs: Optional[Dict[str, FieldSpec]] = None, theme_key: Literal['vega', 'g2'] = 'g2', dark: Literal['media', 'light', 'dark'] = 'media', spec: str = "", use_kernel_calc: Optional[bool] = None, use_cloud_calc: bool = False, show_cloud_tool: bool = True, kanaries_api_key: str = "", default_tab: Literal["data", "vis"] = "vis", **kwargs )` to solve the following problem:
Walk through pandas.DataFrame df with Graphic Walker Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. - gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}') Kargs: - env: (Literal['Jupyter' | 'JupyterWidget'], optional): The enviroment using pygwalker. Default as 'JupyterWidget' - field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - spec (str): chart config data. config id, json, remote file url - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None, automatically determine whether to use kernel calculation. - kanaries_api_key (str): kanaries api key, Default to "". - default_tab (Literal["data", "vis"]): default tab to show. Default to "vis" - use_cloud_calc(bool): Whether to use cloud compute for datas, it upload your data to kanaries cloud. Default to False.
Here is the function:
def walk(
dataset: Union[DataFrame, Connector, str],
gid: Union[int, str] = None,
*,
env: Literal['Jupyter', 'JupyterWidget'] = 'JupyterWidget',
field_specs: Optional[Dict[str, FieldSpec]] = None,
theme_key: Literal['vega', 'g2'] = 'g2',
dark: Literal['media', 'light', 'dark'] = 'media',
spec: str = "",
use_kernel_calc: Optional[bool] = None,
use_cloud_calc: bool = False,
show_cloud_tool: bool = True,
kanaries_api_key: str = "",
default_tab: Literal["data", "vis"] = "vis",
**kwargs
):
"""Walk through pandas.DataFrame df with Graphic Walker
Args:
- dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe.
- gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}')
Kargs:
- env: (Literal['Jupyter' | 'JupyterWidget'], optional): The enviroment using pygwalker. Default as 'JupyterWidget'
- field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified.
- theme_key ('vega' | 'g2'): theme type.
- dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme.
- spec (str): chart config data. config id, json, remote file url
- use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None, automatically determine whether to use kernel calculation.
- kanaries_api_key (str): kanaries api key, Default to "".
- default_tab (Literal["data", "vis"]): default tab to show. Default to "vis"
- use_cloud_calc(bool): Whether to use cloud compute for datas, it upload your data to kanaries cloud. Default to False.
"""
check_expired_params(kwargs)
if field_specs is None:
field_specs = {}
source_invoke_code = get_formated_spec_params_code_from_frame(
inspect.stack()[1].frame
)
walker = PygWalker(
gid=gid,
dataset=dataset,
field_specs=field_specs,
spec=spec,
source_invoke_code=source_invoke_code,
theme_key=theme_key,
dark=dark,
show_cloud_tool=show_cloud_tool,
use_preview=True,
use_kernel_calc=isinstance(dataset, (Connector, str)) or use_kernel_calc,
use_save_tool=True,
gw_mode="explore",
is_export_dataframe=True,
kanaries_api_key=kanaries_api_key,
default_tab=default_tab,
use_cloud_calc=use_cloud_calc,
**kwargs
)
if check_kaggle():
auto_set_kanaries_api_key_on_kaggle()
if get_kaggle_run_type() == "batch":
adjust_kaggle_default_font_size()
env = "JupyterPreview"
elif check_convert():
env = "JupyterConvert"
env_display_map = {
"JupyterWidget": walker.display_on_jupyter_use_widgets,
"Jupyter": walker.display_on_jupyter,
"JupyterConvert": walker.display_on_convert_html,
"JupyterPreview": walker.display_preview_on_jupyter
}
display_func = env_display_map.get(env, lambda: None)
display_func()
return walker | Walk through pandas.DataFrame df with Graphic Walker Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. - gid (Union[int, str], optional): GraphicWalker container div's id ('gwalker-{gid}') Kargs: - env: (Literal['Jupyter' | 'JupyterWidget'], optional): The enviroment using pygwalker. Default as 'JupyterWidget' - field_specs (Dict[str, FieldSpec], optional): Specifications of some fields. They'll been automatically inferred from `df` if some fields are not specified. - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - spec (str): chart config data. config id, json, remote file url - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None, automatically determine whether to use kernel calculation. - kanaries_api_key (str): kanaries api key, Default to "". - default_tab (Literal["data", "vis"]): default tab to show. Default to "vis" - use_cloud_calc(bool): Whether to use cloud compute for datas, it upload your data to kanaries cloud. Default to False. |
165,749 | from typing import Union, Dict, Optional
import inspect
from typing_extensions import Literal
from .pygwalker import PygWalker
from pygwalker.data_parsers.base import FieldSpec
from pygwalker.data_parsers.database_parser import Connector
from pygwalker._typing import DataFrame
from pygwalker.services.format_invoke_walk_code import get_formated_spec_params_code_from_frame
from pygwalker.services.kaggle import auto_set_kanaries_api_key_on_kaggle, adjust_kaggle_default_font_size
from pygwalker.utils.execute_env_check import check_convert, get_kaggle_run_type, check_kaggle
from pygwalker.utils.check_walker_params import check_expired_params
class PygWalker:
"""PygWalker"""
def __init__(
self,
*,
gid: Optional[Union[int, str]],
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
spec: str,
source_invoke_code: str,
theme_key: Literal['vega', 'g2'],
dark: Literal['media', 'light', 'dark'],
show_cloud_tool: Optional[bool],
use_preview: bool,
use_kernel_calc: Optional[bool],
use_cloud_calc: Optional[bool],
use_save_tool: bool,
is_export_dataframe: bool,
kanaries_api_key: str,
default_tab: Literal["data", "vis"],
gw_mode: Literal["explore", "renderer", "filter_renderer", "table"],
**kwargs
):
self.kanaries_api_key = kanaries_api_key or GlobalVarManager.kanaries_api_key
if gid is None:
self.gid = generate_hash_code()
else:
self.gid = gid
self.cloud_service = CloudService(self.kanaries_api_key)
self.data_parser = self._get_data_parser(
dataset=dataset,
field_specs=field_specs,
use_cloud_calc=use_cloud_calc,
kanaries_api_key=self.kanaries_api_key,
cloud_service=self.cloud_service
)
self.use_kernel_calc = self.data_parser.data_size > JUPYTER_WIDGETS_BYTE_LIMIT if use_kernel_calc is None else use_kernel_calc
self.origin_data_source = self.data_parser.to_records(500 if self.use_kernel_calc else None)
self.field_specs = self.data_parser.raw_fields
self.spec = spec
self.source_invoke_code = source_invoke_code
self.theme_key = theme_key
self.dark = dark
self.data_source_id = rand_str()
self.other_props = kwargs
self.tunnel_id = "tunnel!"
self.show_cloud_tool = bool(self.kanaries_api_key) if show_cloud_tool is None else show_cloud_tool
self.use_preview = use_preview
self._init_spec(spec, self.field_specs)
self.use_save_tool = use_save_tool
self.parse_dsl_type = self._get_parse_dsl_type(self.data_parser)
self.gw_mode = gw_mode
self.dataset_type = self.data_parser.dataset_tpye
self.is_export_dataframe = is_export_dataframe
self._last_exported_dataframe = None
self.default_tab = default_tab
self.use_cloud_calc = use_cloud_calc
check_update()
# Temporarily adapt to pandas import module bug
if self.use_kernel_calc:
try:
self.data_parser.get_datas_by_sql("SELECT 1 FROM pygwalker_mid_table LIMIT 1")
except Exception:
pass
if GlobalVarManager.privacy == "offline":
self.show_cloud_tool = False
def last_exported_dataframe(self) -> Optional[pd.DataFrame]:
return self._last_exported_dataframe
def _get_data_parser(
self,
*,
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
use_cloud_calc: bool,
kanaries_api_key: str,
cloud_service: CloudService
) -> BaseDataParser:
data_parser = get_parser(
dataset,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
if not use_cloud_calc:
return data_parser
dataset_id = cloud_service.create_cloud_dataset(
data_parser,
f"temp_{rand_str()}",
False,
True
)
return get_parser(
dataset_id,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
def _get_parse_dsl_type(self, data_parser: BaseDataParser) -> Literal["server", "client"]:
if data_parser.dataset_tpye.startswith("connector"):
return "server"
if data_parser.dataset_tpye == "cloud_dataset":
return "server"
return "client"
def _init_spec(self, spec: Dict[str, Any], field_specs: List[Dict[str, Any]]):
spec_obj, spec_type = get_spec_json(spec)
self._update_vis_spec(spec_obj["config"] and fill_new_fields(spec_obj["config"], field_specs))
self.spec_type = spec_type
self._chart_map = self._parse_chart_map_dict(spec_obj["chart_map"])
self.spec_version = spec_obj.get("version", None)
self.workflow_list = spec_obj.get("workflow_list", [])
def _update_vis_spec(self, vis_spec: List[Dict[str, Any]]):
self.vis_spec = vis_spec
self._chart_name_index_map = {
item["name"]: index
for index, item in enumerate(vis_spec)
}
def _get_chart_map_dict(self, chart_map: Dict[str, ChartData]) -> Dict[str, Any]:
return {
key: value.dict(by_alias=True)
for key, value in chart_map.items()
}
def _parse_chart_map_dict(self, chart_map_dict: Dict[str, Any]) -> Dict[str, ChartData]:
return {
key: ChartData.parse_obj(value)
for key, value in chart_map_dict.items()
}
def to_html(self) -> str:
props = self._get_props()
return self._get_render_iframe(props)
def to_html_without_iframe(self) -> str:
props = self._get_props()
html = render_gwalker_html(self.gid, props)
return html
def display_on_convert_html(self):
"""
Display on jupyter-nbconvert html.
"""
props = self._get_props("jupyter")
iframe_html = self._get_render_iframe(props)
display_html(iframe_html)
def display_on_jupyter(self):
"""
Display on jupyter notebook/lab.
If share has large data loading, only sample data can be displayed when reload.
After that, it will be changed to python for data calculation,
and only a small amount of data will be output to the front end to complete the analysis of big data.
"""
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_BYTE_LIMIT)
props = self._get_props(
"jupyter",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props)
if len(self.origin_data_source) > len(data_source):
upload_tool = BatchUploadDatasToolOnJupyter()
display_html(iframe_html)
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id,
gid=self.gid,
tunnel_id=self.tunnel_id,
)
else:
display_html(iframe_html)
def display_on_jupyter_use_widgets(self, iframe_height: str = "1010px"):
"""
use ipywidgets, Display on jupyter notebook/lab.
When the kernel is down, the chart will not be displayed, so use `display_on_jupyter` to share
"""
comm = HackerCommunication(self.gid)
preview_tool = PreviewImageTool(self.gid)
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_WIDGETS_BYTE_LIMIT)
props = self._get_props(
"jupyter_widgets",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props, iframe_height=iframe_height)
html_widgets = ipywidgets.Box(
[ipywidgets.HTML(iframe_html), comm.get_widgets()],
layout=ipywidgets.Layout(display='block')
)
self._init_callback(comm, preview_tool)
display_html(html_widgets)
preview_tool.init_display()
preview_tool.render_gw_review(self._get_gw_preview_html())
def display_preview_on_jupyter(self):
"""
Display preview on jupyter notebook/lab.
"""
display_html(self._get_gw_preview_html())
def chart_list(self) -> List[str]:
"""
Get the list of saved charts.
"""
return list(self._chart_map.keys())
def save_chart_to_file(self, chart_name: str, path: str, save_type: Literal["html", "png"] = "png"):
"""
Save the chart to a file.
"""
if save_type == "html":
content = self.export_chart_html(chart_name)
write_mode = "w"
encoding = "utf-8"
elif save_type == "png":
content = self.export_chart_png(chart_name)
write_mode = "wb"
encoding = None
else:
raise ValueError(f"save_type must be html or png, but got {save_type}")
with open(path, write_mode, encoding=encoding) as f:
f.write(content)
def export_chart_html(self, chart_name: str) -> str:
"""
Export the chart as a html string.
"""
return self._get_gw_chart_preview_html(
chart_name,
title="",
desc=""
)
def export_chart_png(self, chart_name: str) -> bytes:
"""
Export the chart as a png bytes.
"""
chart_data = self._get_chart_by_name(chart_name)
with urllib.request.urlopen(chart_data.single_chart) as png_string:
return png_string.read()
def display_chart(self, chart_name: str, *, title: Optional[str] = None, desc: str = ""):
"""
Display the chart in the notebook.
"""
if title is None:
title = chart_name
html = self._get_gw_chart_preview_html(
chart_name,
title=title,
desc=desc
)
display_html(html)
def _get_chart_by_name(self, chart_name: str) -> ChartData:
if chart_name not in self._chart_map:
raise ValueError(f"chart_name: {chart_name} not found, please confirm whether to save")
return self._chart_map[chart_name]
def _init_callback(self, comm: BaseCommunication, preview_tool: PreviewImageTool = None):
upload_tool = BatchUploadDatasToolOnWidgets(comm)
def reuqest_data_callback(_):
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id
)
return {}
def get_latest_vis_spec(_):
return {"visSpec": self.vis_spec}
def save_chart_endpoint(data: Dict[str, Any]):
chart_data = ChartData.parse_obj(data)
self._chart_map[data["title"]] = chart_data
def update_spec(data: Dict[str, Any]):
spec_obj = {
"config": data["visSpec"],
"chart_map": {},
"version": __version__,
"workflow_list": data.get("workflowList", [])
}
self._update_vis_spec(data["visSpec"])
self.spec_version = __version__
self.workflow_list = data.get("workflowList", [])
if self.use_preview:
preview_tool.render_gw_review(self._get_gw_preview_html())
save_chart_endpoint(data["chartData"])
if self.spec_type == "json_file":
with open(self.spec, "w", encoding="utf-8") as f:
f.write(json.dumps(spec_obj))
if self.spec_type == "json_ksf":
self.cloud_service.write_config_to_cloud(self.spec[6:], json.dumps(spec_obj))
def upload_spec_to_cloud(data: Dict[str, Any]):
if data["newToken"]:
set_config({"kanaries_token": data["newToken"]})
GlobalVarManager.kanaries_api_key = data["newToken"]
spec_obj = {
"config": self.vis_spec,
"chart_map": {},
"version": __version__,
"workflow_list": self.workflow_list,
}
file_name = data["fileName"]
workspace_name = self.cloud_service.get_kanaries_user_info()["workspaceName"]
path = f"{workspace_name}/{file_name}"
self.cloud_service.write_config_to_cloud(path, json.dumps(spec_obj))
return {"specFilePath": path}
def _get_datas(data: Dict[str, Any]):
sql = data["sql"]
datas = self.data_parser.get_datas_by_sql(sql)
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _get_datas_by_payload(data: Dict[str, Any]):
datas = self.data_parser.get_datas_by_payload(data["payload"])
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _batch_get_datas_by_sql(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_sql(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _batch_get_datas_by_payload(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_payload(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _get_spec_by_text(data: Dict[str, Any]):
callback = self.other_props.get(
"custom_ask_callback",
self.cloud_service.get_spec_by_text
)
return {
"data": callback(data["metas"], data["query"])
}
def _export_dataframe_by_payload(data: Dict[str, Any]):
df = pd.DataFrame(self.data_parser.get_datas_by_payload(data["payload"]))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _export_dataframe_by_sql(data: Dict[str, Any]):
sql = data["sql"]
df = pd.DataFrame(self.data_parser.get_datas_by_sql(sql))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _upload_to_cloud_charts(data: Dict[str, Any]):
chart_id = self.cloud_service.upload_cloud_chart(
data_parser=self.data_parser,
chart_name=data["chartName"],
dataset_name=data["datasetName"],
workflow=data["workflow"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
)
return {"chartId": chart_id}
def _upload_to_cloud_dashboard(data: Dict[str, Any]):
dashboard_id = self.cloud_service.upload_cloud_dashboard(
data_parser=self.data_parser,
dashboard_name=data["chartName"],
dataset_name=data["datasetName"],
workflow_list=data["workflowList"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
dark=self.dark
)
return {"dashboardId": dashboard_id}
comm.register("get_latest_vis_spec", get_latest_vis_spec)
comm.register("request_data", reuqest_data_callback)
if self.use_save_tool:
comm.register("upload_spec_to_cloud", upload_spec_to_cloud)
comm.register("update_spec", update_spec)
comm.register("save_chart", save_chart_endpoint)
if self.show_cloud_tool:
comm.register("upload_to_cloud_charts", _upload_to_cloud_charts)
comm.register("upload_to_cloud_dashboard", _upload_to_cloud_dashboard)
comm.register("get_spec_by_text", _get_spec_by_text)
if self.use_kernel_calc:
comm.register("get_datas", _get_datas)
comm.register("get_datas_by_payload", _get_datas_by_payload)
comm.register("batch_get_datas_by_sql", _batch_get_datas_by_sql)
comm.register("batch_get_datas_by_payload", _batch_get_datas_by_payload)
if self.is_export_dataframe:
comm.register("export_dataframe_by_payload", _export_dataframe_by_payload)
comm.register("export_dataframe_by_sql", _export_dataframe_by_sql)
def _send_props_track(self, props: Dict[str, Any]):
needed_fields = {
"id", "version", "hashcode", "themeKey",
"dark", "env", "specType", "needLoadDatas", "showCloudTool",
"useKernelCalc", "useSaveTool", "parseDslType", "gwMode", "datasetType",
"defaultTab", "useCloudCalc"
}
event_info = {key: value for key, value in props.items() if key in needed_fields}
event_info["hasKanariesToken"] = bool(self.kanaries_api_key)
track_event("invoke_props", event_info)
def _get_props(
self,
env: str = "",
data_source: Optional[Dict[str, Any]] = None,
need_load_datas: bool = False
) -> Dict[str, Any]:
if data_source is None:
data_source = self.origin_data_source
props = {
"id": self.gid,
"dataSource": data_source,
"len": len(data_source),
"version": __version__,
"hashcode": get_local_user_id(),
"userConfig": {
"privacy": GlobalVarManager.privacy,
},
"visSpec": self.vis_spec,
"rawFields": [
{**field, "offset": 0}
for field in self.field_specs
],
"fieldkeyGuard": False,
"themeKey": self.theme_key,
"dark": self.dark,
"sourceInvokeCode": self.source_invoke_code,
"dataSourceProps": {
'tunnelId': self.tunnel_id,
'dataSourceId': self.data_source_id,
},
"env": env,
"specType": self.spec_type,
"needLoadDatas": not self.use_kernel_calc and need_load_datas,
"showCloudTool": self.show_cloud_tool,
"needInitChart": not self._chart_map,
"useKernelCalc": self.use_kernel_calc,
"useSaveTool": self.use_save_tool,
"parseDslType": self.parse_dsl_type,
"gwMode": self.gw_mode,
"needLoadLastSpec": True,
"datasetType": self.dataset_type,
"extraConfig": self.other_props,
"fieldMetas": self.data_parser.field_metas,
"isExportDataFrame": self.is_export_dataframe,
"defaultTab": self.default_tab,
"useCloudCalc": self.use_cloud_calc
}
self._send_props_track(props)
return props
def _get_render_iframe(
self,
props: Dict[str, Any],
return_iframe: bool = True,
iframe_height: str = "1010px"
) -> str:
html = render_gwalker_html(self.gid, props)
if return_iframe:
srcdoc = m_html.escape(html)
return render_gwalker_iframe(self.gid, srcdoc, iframe_height)
else:
return html
def _get_gw_preview_html(self) -> str:
if not self.workflow_list:
return ""
datas = []
for workflow in self.workflow_list:
try:
datas.append(self.data_parser.get_datas_by_payload(workflow))
except ParserException:
datas.append([])
html = render_gw_preview_html(
self.vis_spec,
datas,
self.theme_key,
self.gid,
self.dark
)
return html
def _get_gw_chart_preview_html(self, chart_name: int, title: str, desc: str) -> str:
if chart_name not in self._chart_name_index_map:
raise ValueError(f"chart_name: {chart_name} not found.")
chart_index = self._chart_name_index_map[chart_name]
if not self.workflow_list:
return ""
data = self.data_parser.get_datas_by_payload(self.workflow_list[chart_index])
return render_gw_chart_preview_html(
single_vis_spec=self.vis_spec[chart_index],
data=data,
theme_key=self.theme_key,
title=title,
desc=desc,
dark=self.dark
)
class Connector:
"""
database connector, it will cache engine by url.
- url: database url, refer to sqlalchemy doc for url. example: mysql+pymysql://user:password@host:port/database
- view_sql: view sql, example: SELECT * FROM table_name
- engine_params: engine params, refer to sqlalchemy doc for params. example: {"pool_size": 10}
"""
engine_map = {}
def __init__(self, url: str, view_sql: str, engine_params: Optional[Dict[str, Any]] = None) -> "Connector":
_check_view_sql(view_sql)
if engine_params is None:
engine_params = {}
self.url = url
self.engine = self._get_engine(engine_params)
self.view_sql = view_sql
def _get_engine(self, engine_params: Dict[str, Any]) -> Engine:
if self.url not in self.engine_map:
engine = create_engine(self.url, **engine_params)
engine.dialect.requires_name_normalize = False
self.engine_map[self.url] = engine
return self.engine_map[self.url]
def query_datas(self, sql: str) -> List[Dict[str, Any]]:
field_type_map = {}
with self.engine.connect() as connection:
result = connection.execute(text(sql))
if self.dialect_name == "snowflake":
field_type_map = {
column_desc.name: column_desc.type_code
for column_desc in result.cursor.description
}
return [
{
key: json.loads(value) if field_type_map.get(key, -1) in {9, 10} else value
for key, value in item.items()
}
for item in result.mappings()
]
def dialect_name(self) -> str:
return self.engine.dialect.name
DataFrame = TypeVar("DataFrame", *dataframe_types)
The provided code snippet includes necessary dependencies for implementing the `render` function. Write a Python function `def render( dataset: Union[DataFrame, Connector, str], spec: str, *, theme_key: Literal['vega', 'g2'] = 'g2', dark: Literal['media', 'light', 'dark'] = 'media', use_kernel_calc: Optional[bool] = None, kanaries_api_key: str = "", **kwargs )` to solve the following problem:
Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. - spec (str): chart config data. config id, json, remote file url Kargs: - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None. - kanaries_api_key (str): kanaries api key, Default to "".
Here is the function:
def render(
dataset: Union[DataFrame, Connector, str],
spec: str,
*,
theme_key: Literal['vega', 'g2'] = 'g2',
dark: Literal['media', 'light', 'dark'] = 'media',
use_kernel_calc: Optional[bool] = None,
kanaries_api_key: str = "",
**kwargs
):
"""
Args:
- dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe.
- spec (str): chart config data. config id, json, remote file url
Kargs:
- theme_key ('vega' | 'g2'): theme type.
- dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme.
- use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None.
- kanaries_api_key (str): kanaries api key, Default to "".
"""
walker = PygWalker(
gid=None,
dataset=dataset,
field_specs={},
spec=spec,
source_invoke_code="",
theme_key=theme_key,
dark=dark,
show_cloud_tool=False,
use_preview=False,
use_kernel_calc=isinstance(dataset, (Connector, str)) or use_kernel_calc,
use_save_tool=False,
gw_mode="filter_renderer",
is_export_dataframe=True,
kanaries_api_key=kanaries_api_key,
default_tab="vis",
use_cloud_calc=False,
**kwargs
)
walker.display_on_jupyter_use_widgets() | Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. - spec (str): chart config data. config id, json, remote file url Kargs: - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None. - kanaries_api_key (str): kanaries api key, Default to "". |
165,750 | from typing import Union, Dict, Optional
import inspect
from typing_extensions import Literal
from .pygwalker import PygWalker
from pygwalker.data_parsers.base import FieldSpec
from pygwalker.data_parsers.database_parser import Connector
from pygwalker._typing import DataFrame
from pygwalker.services.format_invoke_walk_code import get_formated_spec_params_code_from_frame
from pygwalker.services.kaggle import auto_set_kanaries_api_key_on_kaggle, adjust_kaggle_default_font_size
from pygwalker.utils.execute_env_check import check_convert, get_kaggle_run_type, check_kaggle
from pygwalker.utils.check_walker_params import check_expired_params
class PygWalker:
"""PygWalker"""
def __init__(
self,
*,
gid: Optional[Union[int, str]],
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
spec: str,
source_invoke_code: str,
theme_key: Literal['vega', 'g2'],
dark: Literal['media', 'light', 'dark'],
show_cloud_tool: Optional[bool],
use_preview: bool,
use_kernel_calc: Optional[bool],
use_cloud_calc: Optional[bool],
use_save_tool: bool,
is_export_dataframe: bool,
kanaries_api_key: str,
default_tab: Literal["data", "vis"],
gw_mode: Literal["explore", "renderer", "filter_renderer", "table"],
**kwargs
):
self.kanaries_api_key = kanaries_api_key or GlobalVarManager.kanaries_api_key
if gid is None:
self.gid = generate_hash_code()
else:
self.gid = gid
self.cloud_service = CloudService(self.kanaries_api_key)
self.data_parser = self._get_data_parser(
dataset=dataset,
field_specs=field_specs,
use_cloud_calc=use_cloud_calc,
kanaries_api_key=self.kanaries_api_key,
cloud_service=self.cloud_service
)
self.use_kernel_calc = self.data_parser.data_size > JUPYTER_WIDGETS_BYTE_LIMIT if use_kernel_calc is None else use_kernel_calc
self.origin_data_source = self.data_parser.to_records(500 if self.use_kernel_calc else None)
self.field_specs = self.data_parser.raw_fields
self.spec = spec
self.source_invoke_code = source_invoke_code
self.theme_key = theme_key
self.dark = dark
self.data_source_id = rand_str()
self.other_props = kwargs
self.tunnel_id = "tunnel!"
self.show_cloud_tool = bool(self.kanaries_api_key) if show_cloud_tool is None else show_cloud_tool
self.use_preview = use_preview
self._init_spec(spec, self.field_specs)
self.use_save_tool = use_save_tool
self.parse_dsl_type = self._get_parse_dsl_type(self.data_parser)
self.gw_mode = gw_mode
self.dataset_type = self.data_parser.dataset_tpye
self.is_export_dataframe = is_export_dataframe
self._last_exported_dataframe = None
self.default_tab = default_tab
self.use_cloud_calc = use_cloud_calc
check_update()
# Temporarily adapt to pandas import module bug
if self.use_kernel_calc:
try:
self.data_parser.get_datas_by_sql("SELECT 1 FROM pygwalker_mid_table LIMIT 1")
except Exception:
pass
if GlobalVarManager.privacy == "offline":
self.show_cloud_tool = False
def last_exported_dataframe(self) -> Optional[pd.DataFrame]:
return self._last_exported_dataframe
def _get_data_parser(
self,
*,
dataset: Union[DataFrame, Connector, str],
field_specs: Dict[str, Any],
use_cloud_calc: bool,
kanaries_api_key: str,
cloud_service: CloudService
) -> BaseDataParser:
data_parser = get_parser(
dataset,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
if not use_cloud_calc:
return data_parser
dataset_id = cloud_service.create_cloud_dataset(
data_parser,
f"temp_{rand_str()}",
False,
True
)
return get_parser(
dataset_id,
field_specs,
other_params={"kanaries_api_key": kanaries_api_key}
)
def _get_parse_dsl_type(self, data_parser: BaseDataParser) -> Literal["server", "client"]:
if data_parser.dataset_tpye.startswith("connector"):
return "server"
if data_parser.dataset_tpye == "cloud_dataset":
return "server"
return "client"
def _init_spec(self, spec: Dict[str, Any], field_specs: List[Dict[str, Any]]):
spec_obj, spec_type = get_spec_json(spec)
self._update_vis_spec(spec_obj["config"] and fill_new_fields(spec_obj["config"], field_specs))
self.spec_type = spec_type
self._chart_map = self._parse_chart_map_dict(spec_obj["chart_map"])
self.spec_version = spec_obj.get("version", None)
self.workflow_list = spec_obj.get("workflow_list", [])
def _update_vis_spec(self, vis_spec: List[Dict[str, Any]]):
self.vis_spec = vis_spec
self._chart_name_index_map = {
item["name"]: index
for index, item in enumerate(vis_spec)
}
def _get_chart_map_dict(self, chart_map: Dict[str, ChartData]) -> Dict[str, Any]:
return {
key: value.dict(by_alias=True)
for key, value in chart_map.items()
}
def _parse_chart_map_dict(self, chart_map_dict: Dict[str, Any]) -> Dict[str, ChartData]:
return {
key: ChartData.parse_obj(value)
for key, value in chart_map_dict.items()
}
def to_html(self) -> str:
props = self._get_props()
return self._get_render_iframe(props)
def to_html_without_iframe(self) -> str:
props = self._get_props()
html = render_gwalker_html(self.gid, props)
return html
def display_on_convert_html(self):
"""
Display on jupyter-nbconvert html.
"""
props = self._get_props("jupyter")
iframe_html = self._get_render_iframe(props)
display_html(iframe_html)
def display_on_jupyter(self):
"""
Display on jupyter notebook/lab.
If share has large data loading, only sample data can be displayed when reload.
After that, it will be changed to python for data calculation,
and only a small amount of data will be output to the front end to complete the analysis of big data.
"""
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_BYTE_LIMIT)
props = self._get_props(
"jupyter",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props)
if len(self.origin_data_source) > len(data_source):
upload_tool = BatchUploadDatasToolOnJupyter()
display_html(iframe_html)
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id,
gid=self.gid,
tunnel_id=self.tunnel_id,
)
else:
display_html(iframe_html)
def display_on_jupyter_use_widgets(self, iframe_height: str = "1010px"):
"""
use ipywidgets, Display on jupyter notebook/lab.
When the kernel is down, the chart will not be displayed, so use `display_on_jupyter` to share
"""
comm = HackerCommunication(self.gid)
preview_tool = PreviewImageTool(self.gid)
data_source = get_max_limited_datas(self.origin_data_source, JUPYTER_WIDGETS_BYTE_LIMIT)
props = self._get_props(
"jupyter_widgets",
data_source,
len(self.origin_data_source) > len(data_source)
)
iframe_html = self._get_render_iframe(props, iframe_height=iframe_height)
html_widgets = ipywidgets.Box(
[ipywidgets.HTML(iframe_html), comm.get_widgets()],
layout=ipywidgets.Layout(display='block')
)
self._init_callback(comm, preview_tool)
display_html(html_widgets)
preview_tool.init_display()
preview_tool.render_gw_review(self._get_gw_preview_html())
def display_preview_on_jupyter(self):
"""
Display preview on jupyter notebook/lab.
"""
display_html(self._get_gw_preview_html())
def chart_list(self) -> List[str]:
"""
Get the list of saved charts.
"""
return list(self._chart_map.keys())
def save_chart_to_file(self, chart_name: str, path: str, save_type: Literal["html", "png"] = "png"):
"""
Save the chart to a file.
"""
if save_type == "html":
content = self.export_chart_html(chart_name)
write_mode = "w"
encoding = "utf-8"
elif save_type == "png":
content = self.export_chart_png(chart_name)
write_mode = "wb"
encoding = None
else:
raise ValueError(f"save_type must be html or png, but got {save_type}")
with open(path, write_mode, encoding=encoding) as f:
f.write(content)
def export_chart_html(self, chart_name: str) -> str:
"""
Export the chart as a html string.
"""
return self._get_gw_chart_preview_html(
chart_name,
title="",
desc=""
)
def export_chart_png(self, chart_name: str) -> bytes:
"""
Export the chart as a png bytes.
"""
chart_data = self._get_chart_by_name(chart_name)
with urllib.request.urlopen(chart_data.single_chart) as png_string:
return png_string.read()
def display_chart(self, chart_name: str, *, title: Optional[str] = None, desc: str = ""):
"""
Display the chart in the notebook.
"""
if title is None:
title = chart_name
html = self._get_gw_chart_preview_html(
chart_name,
title=title,
desc=desc
)
display_html(html)
def _get_chart_by_name(self, chart_name: str) -> ChartData:
if chart_name not in self._chart_map:
raise ValueError(f"chart_name: {chart_name} not found, please confirm whether to save")
return self._chart_map[chart_name]
def _init_callback(self, comm: BaseCommunication, preview_tool: PreviewImageTool = None):
upload_tool = BatchUploadDatasToolOnWidgets(comm)
def reuqest_data_callback(_):
upload_tool.run(
records=self.origin_data_source,
sample_data_count=0,
data_source_id=self.data_source_id
)
return {}
def get_latest_vis_spec(_):
return {"visSpec": self.vis_spec}
def save_chart_endpoint(data: Dict[str, Any]):
chart_data = ChartData.parse_obj(data)
self._chart_map[data["title"]] = chart_data
def update_spec(data: Dict[str, Any]):
spec_obj = {
"config": data["visSpec"],
"chart_map": {},
"version": __version__,
"workflow_list": data.get("workflowList", [])
}
self._update_vis_spec(data["visSpec"])
self.spec_version = __version__
self.workflow_list = data.get("workflowList", [])
if self.use_preview:
preview_tool.render_gw_review(self._get_gw_preview_html())
save_chart_endpoint(data["chartData"])
if self.spec_type == "json_file":
with open(self.spec, "w", encoding="utf-8") as f:
f.write(json.dumps(spec_obj))
if self.spec_type == "json_ksf":
self.cloud_service.write_config_to_cloud(self.spec[6:], json.dumps(spec_obj))
def upload_spec_to_cloud(data: Dict[str, Any]):
if data["newToken"]:
set_config({"kanaries_token": data["newToken"]})
GlobalVarManager.kanaries_api_key = data["newToken"]
spec_obj = {
"config": self.vis_spec,
"chart_map": {},
"version": __version__,
"workflow_list": self.workflow_list,
}
file_name = data["fileName"]
workspace_name = self.cloud_service.get_kanaries_user_info()["workspaceName"]
path = f"{workspace_name}/{file_name}"
self.cloud_service.write_config_to_cloud(path, json.dumps(spec_obj))
return {"specFilePath": path}
def _get_datas(data: Dict[str, Any]):
sql = data["sql"]
datas = self.data_parser.get_datas_by_sql(sql)
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _get_datas_by_payload(data: Dict[str, Any]):
datas = self.data_parser.get_datas_by_payload(data["payload"])
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": datas
}
def _batch_get_datas_by_sql(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_sql(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _batch_get_datas_by_payload(data: Dict[str, Any]):
result = self.data_parser.batch_get_datas_by_payload(data["queryList"])
for datas in result:
if len(datas) > RESPONSE_MAX_DATA_LENGTH:
raise DataCountLimitError()
return {
"datas": result
}
def _get_spec_by_text(data: Dict[str, Any]):
callback = self.other_props.get(
"custom_ask_callback",
self.cloud_service.get_spec_by_text
)
return {
"data": callback(data["metas"], data["query"])
}
def _export_dataframe_by_payload(data: Dict[str, Any]):
df = pd.DataFrame(self.data_parser.get_datas_by_payload(data["payload"]))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _export_dataframe_by_sql(data: Dict[str, Any]):
sql = data["sql"]
df = pd.DataFrame(self.data_parser.get_datas_by_sql(sql))
GlobalVarManager.set_last_exported_dataframe(df)
self._last_exported_dataframe = df
def _upload_to_cloud_charts(data: Dict[str, Any]):
chart_id = self.cloud_service.upload_cloud_chart(
data_parser=self.data_parser,
chart_name=data["chartName"],
dataset_name=data["datasetName"],
workflow=data["workflow"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
)
return {"chartId": chart_id}
def _upload_to_cloud_dashboard(data: Dict[str, Any]):
dashboard_id = self.cloud_service.upload_cloud_dashboard(
data_parser=self.data_parser,
dashboard_name=data["chartName"],
dataset_name=data["datasetName"],
workflow_list=data["workflowList"],
spec_list=data["visSpec"],
is_public=data["isPublic"],
dark=self.dark
)
return {"dashboardId": dashboard_id}
comm.register("get_latest_vis_spec", get_latest_vis_spec)
comm.register("request_data", reuqest_data_callback)
if self.use_save_tool:
comm.register("upload_spec_to_cloud", upload_spec_to_cloud)
comm.register("update_spec", update_spec)
comm.register("save_chart", save_chart_endpoint)
if self.show_cloud_tool:
comm.register("upload_to_cloud_charts", _upload_to_cloud_charts)
comm.register("upload_to_cloud_dashboard", _upload_to_cloud_dashboard)
comm.register("get_spec_by_text", _get_spec_by_text)
if self.use_kernel_calc:
comm.register("get_datas", _get_datas)
comm.register("get_datas_by_payload", _get_datas_by_payload)
comm.register("batch_get_datas_by_sql", _batch_get_datas_by_sql)
comm.register("batch_get_datas_by_payload", _batch_get_datas_by_payload)
if self.is_export_dataframe:
comm.register("export_dataframe_by_payload", _export_dataframe_by_payload)
comm.register("export_dataframe_by_sql", _export_dataframe_by_sql)
def _send_props_track(self, props: Dict[str, Any]):
needed_fields = {
"id", "version", "hashcode", "themeKey",
"dark", "env", "specType", "needLoadDatas", "showCloudTool",
"useKernelCalc", "useSaveTool", "parseDslType", "gwMode", "datasetType",
"defaultTab", "useCloudCalc"
}
event_info = {key: value for key, value in props.items() if key in needed_fields}
event_info["hasKanariesToken"] = bool(self.kanaries_api_key)
track_event("invoke_props", event_info)
def _get_props(
self,
env: str = "",
data_source: Optional[Dict[str, Any]] = None,
need_load_datas: bool = False
) -> Dict[str, Any]:
if data_source is None:
data_source = self.origin_data_source
props = {
"id": self.gid,
"dataSource": data_source,
"len": len(data_source),
"version": __version__,
"hashcode": get_local_user_id(),
"userConfig": {
"privacy": GlobalVarManager.privacy,
},
"visSpec": self.vis_spec,
"rawFields": [
{**field, "offset": 0}
for field in self.field_specs
],
"fieldkeyGuard": False,
"themeKey": self.theme_key,
"dark": self.dark,
"sourceInvokeCode": self.source_invoke_code,
"dataSourceProps": {
'tunnelId': self.tunnel_id,
'dataSourceId': self.data_source_id,
},
"env": env,
"specType": self.spec_type,
"needLoadDatas": not self.use_kernel_calc and need_load_datas,
"showCloudTool": self.show_cloud_tool,
"needInitChart": not self._chart_map,
"useKernelCalc": self.use_kernel_calc,
"useSaveTool": self.use_save_tool,
"parseDslType": self.parse_dsl_type,
"gwMode": self.gw_mode,
"needLoadLastSpec": True,
"datasetType": self.dataset_type,
"extraConfig": self.other_props,
"fieldMetas": self.data_parser.field_metas,
"isExportDataFrame": self.is_export_dataframe,
"defaultTab": self.default_tab,
"useCloudCalc": self.use_cloud_calc
}
self._send_props_track(props)
return props
def _get_render_iframe(
self,
props: Dict[str, Any],
return_iframe: bool = True,
iframe_height: str = "1010px"
) -> str:
html = render_gwalker_html(self.gid, props)
if return_iframe:
srcdoc = m_html.escape(html)
return render_gwalker_iframe(self.gid, srcdoc, iframe_height)
else:
return html
def _get_gw_preview_html(self) -> str:
if not self.workflow_list:
return ""
datas = []
for workflow in self.workflow_list:
try:
datas.append(self.data_parser.get_datas_by_payload(workflow))
except ParserException:
datas.append([])
html = render_gw_preview_html(
self.vis_spec,
datas,
self.theme_key,
self.gid,
self.dark
)
return html
def _get_gw_chart_preview_html(self, chart_name: int, title: str, desc: str) -> str:
if chart_name not in self._chart_name_index_map:
raise ValueError(f"chart_name: {chart_name} not found.")
chart_index = self._chart_name_index_map[chart_name]
if not self.workflow_list:
return ""
data = self.data_parser.get_datas_by_payload(self.workflow_list[chart_index])
return render_gw_chart_preview_html(
single_vis_spec=self.vis_spec[chart_index],
data=data,
theme_key=self.theme_key,
title=title,
desc=desc,
dark=self.dark
)
class Connector:
"""
database connector, it will cache engine by url.
- url: database url, refer to sqlalchemy doc for url. example: mysql+pymysql://user:password@host:port/database
- view_sql: view sql, example: SELECT * FROM table_name
- engine_params: engine params, refer to sqlalchemy doc for params. example: {"pool_size": 10}
"""
engine_map = {}
def __init__(self, url: str, view_sql: str, engine_params: Optional[Dict[str, Any]] = None) -> "Connector":
_check_view_sql(view_sql)
if engine_params is None:
engine_params = {}
self.url = url
self.engine = self._get_engine(engine_params)
self.view_sql = view_sql
def _get_engine(self, engine_params: Dict[str, Any]) -> Engine:
if self.url not in self.engine_map:
engine = create_engine(self.url, **engine_params)
engine.dialect.requires_name_normalize = False
self.engine_map[self.url] = engine
return self.engine_map[self.url]
def query_datas(self, sql: str) -> List[Dict[str, Any]]:
field_type_map = {}
with self.engine.connect() as connection:
result = connection.execute(text(sql))
if self.dialect_name == "snowflake":
field_type_map = {
column_desc.name: column_desc.type_code
for column_desc in result.cursor.description
}
return [
{
key: json.loads(value) if field_type_map.get(key, -1) in {9, 10} else value
for key, value in item.items()
}
for item in result.mappings()
]
def dialect_name(self) -> str:
return self.engine.dialect.name
DataFrame = TypeVar("DataFrame", *dataframe_types)
The provided code snippet includes necessary dependencies for implementing the `table` function. Write a Python function `def table( dataset: Union[DataFrame, Connector, str], *, theme_key: Literal['vega', 'g2'] = 'g2', dark: Literal['media', 'light', 'dark'] = 'media', use_kernel_calc: Optional[bool] = None, kanaries_api_key: str = "", **kwargs )` to solve the following problem:
Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. Kargs: - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None. - kanaries_api_key (str): kanaries api key, Default to "".
Here is the function:
def table(
dataset: Union[DataFrame, Connector, str],
*,
theme_key: Literal['vega', 'g2'] = 'g2',
dark: Literal['media', 'light', 'dark'] = 'media',
use_kernel_calc: Optional[bool] = None,
kanaries_api_key: str = "",
**kwargs
):
"""
Args:
- dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe.
Kargs:
- theme_key ('vega' | 'g2'): theme type.
- dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme.
- use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None.
- kanaries_api_key (str): kanaries api key, Default to "".
"""
walker = PygWalker(
gid=None,
dataset=dataset,
field_specs={},
spec="",
source_invoke_code="",
theme_key=theme_key,
dark=dark,
show_cloud_tool=False,
use_preview=False,
use_kernel_calc=isinstance(dataset, (Connector, str)) or use_kernel_calc,
use_save_tool=False,
gw_mode="table",
is_export_dataframe=True,
kanaries_api_key=kanaries_api_key,
default_tab="vis",
use_cloud_calc=False,
**kwargs
)
walker.display_on_jupyter_use_widgets("800px") | Args: - dataset (pl.DataFrame | pd.DataFrame | Connector, optional): dataframe. Kargs: - theme_key ('vega' | 'g2'): theme type. - dark (Literal['media' | 'light' | 'dark']): 'media': auto detect OS theme. - use_kernel_calc(bool): Whether to use kernel compute for datas, Default to None. - kanaries_api_key (str): kanaries api key, Default to "". |
165,751 | from typing import Any, Dict, List, Optional
from functools import lru_cache
from decimal import Decimal
import logging
import json
import io
from sqlalchemy import create_engine, text
from sqlalchemy.engine import Engine
import pandas as pd
import sqlglot.expressions as exp
import sqlglot
from .base import BaseDataParser, get_data_meta_type, INFINITY_DATA_SIZE
from .pandas_parser import PandasDataFrameDataParser
from pygwalker.data_parsers.base import FieldSpec
from pygwalker.utils.custom_sqlglot import DuckdbDialect
from pygwalker.utils.payload_to_sql import get_sql_from_payload
from pygwalker.errors import ViewSqlSameColumnError
class ViewSqlSameColumnError(BaseError):
"""Raised when the view sql is invalid."""
pass
The provided code snippet includes necessary dependencies for implementing the `_check_view_sql` function. Write a Python function `def _check_view_sql(sql: str) -> None` to solve the following problem:
check view sql, it will raise ViewSqlSameColumnError if view sql contain same column
Here is the function:
def _check_view_sql(sql: str) -> None:
"""check view sql, it will raise ViewSqlSameColumnError if view sql contain same column"""
select_columns = [
select.alias_or_name
for select in sqlglot.parse_one(sql).find(exp.Select)
]
has_join = sqlglot.parse_one(sql).find(exp.Join) is not None
has_select_all = any(column == "*" for column in select_columns)
select_expr_count = len(select_columns)
hash_same_column = len(set(select_columns)) != select_expr_count
if has_select_all and select_expr_count > 1:
raise ViewSqlSameColumnError("fields with the same name may appear when use select * and select other fields")
if has_join and has_select_all:
raise ViewSqlSameColumnError("fields with the same name may appear when multi table join and use select *")
if hash_same_column:
raise ViewSqlSameColumnError("view sql can not contain same column") | check view sql, it will raise ViewSqlSameColumnError if view sql contain same column |
165,752 | from typing import Generic, Dict, List, Any, Optional, NamedTuple
from typing_extensions import Literal
from functools import lru_cache
from datetime import datetime, date
from datetime import timedelta
import abc
import io
import duckdb
import arrow
import pytz
from pygwalker._typing import DataFrame
from pygwalker.utils.payload_to_sql import get_sql_from_payload
from pygwalker.utils.estimate_tools import estimate_average_data_size
The provided code snippet includes necessary dependencies for implementing the `is_temporal_field` function. Write a Python function `def is_temporal_field(value: Any, infer_string_to_date: bool) -> bool` to solve the following problem:
check if field is temporal
Here is the function:
def is_temporal_field(value: Any, infer_string_to_date: bool) -> bool:
"""check if field is temporal"""
if infer_string_to_date:
try:
arrow.get(str(value))
except Exception:
return False
return True
return isinstance(value, (datetime, date)) | check if field is temporal |
165,753 | from typing import Generic, Dict, List, Any, Optional, NamedTuple
from typing_extensions import Literal
from functools import lru_cache
from datetime import datetime, date
from datetime import timedelta
import abc
import io
import duckdb
import arrow
import pytz
from pygwalker._typing import DataFrame
from pygwalker.utils.payload_to_sql import get_sql_from_payload
from pygwalker.utils.estimate_tools import estimate_average_data_size
The provided code snippet includes necessary dependencies for implementing the `is_geo_field` function. Write a Python function `def is_geo_field(field_name: str) -> bool` to solve the following problem:
check if filed is
Here is the function:
def is_geo_field(field_name: str) -> bool:
"""check if filed is """
field_name = field_name.lower().strip(" .")
return field_name in {
"latitude", "longitude",
"lat", "long", "lon"
} | check if filed is |
165,754 | from typing import Generic, Dict, List, Any, Optional, NamedTuple
from typing_extensions import Literal
from functools import lru_cache
from datetime import datetime, date
from datetime import timedelta
import abc
import io
import duckdb
import arrow
import pytz
from pygwalker._typing import DataFrame
from pygwalker.utils.payload_to_sql import get_sql_from_payload
from pygwalker.utils.estimate_tools import estimate_average_data_size
The provided code snippet includes necessary dependencies for implementing the `format_temporal_string` function. Write a Python function `def format_temporal_string(value: str) -> str` to solve the following problem:
Convert temporal fields to a fixed format
Here is the function:
def format_temporal_string(value: str) -> str:
"""Convert temporal fields to a fixed format"""
return arrow.get(value).strftime("%Y-%m-%d %H:%M:%S") | Convert temporal fields to a fixed format |
165,755 | from typing import Generic, Dict, List, Any, Optional, NamedTuple
from typing_extensions import Literal
from functools import lru_cache
from datetime import datetime, date
from datetime import timedelta
import abc
import io
import duckdb
import arrow
import pytz
from pygwalker._typing import DataFrame
from pygwalker.utils.payload_to_sql import get_sql_from_payload
from pygwalker.utils.estimate_tools import estimate_average_data_size
def get_data_meta_type(data: Dict[str, Any]) -> List[Dict[str, str]]:
meta_types = []
for key, value in data.items():
if isinstance(value, datetime):
field_meta_type = "datetime"
if value.tzinfo:
field_meta_type = "datetime_tz"
elif isinstance(value, (int, float)):
field_meta_type = "number"
else:
field_meta_type = "string"
meta_types.append({
"key": key,
"type": field_meta_type
})
return meta_types | null |
165,756 | from typing import Generic, Dict, List, Any, Optional, NamedTuple
from typing_extensions import Literal
from functools import lru_cache
from datetime import datetime, date
from datetime import timedelta
import abc
import io
import duckdb
import arrow
import pytz
from pygwalker._typing import DataFrame
from pygwalker.utils.payload_to_sql import get_sql_from_payload
from pygwalker.utils.estimate_tools import estimate_average_data_size
def get_timezone_base_offset(offset_seconds: int) -> Optional[str]:
utc_offset = timedelta(seconds=offset_seconds)
now = datetime.now(pytz.utc)
for tz in map(pytz.timezone, pytz.all_timezones_set):
if now.astimezone(tz).utcoffset() == utc_offset:
return tz.zone | null |
165,757 | from typing import Dict, Any, Optional
import segment.analytics as analytics
import kanaries_track
from pygwalker.services.global_var import GlobalVarManager
from pygwalker.services.config import get_local_user_id
analytics.write_key = 'z58N15R8LShkpUbBSt1ZjdDSdSEF5VpR'
kanaries_track.config.auth_token = kanaries_public_key
kanaries_track.config.proxies = {}
kanaries_track.config.max_retries = 2
class GlobalVarManager:
"""A class to manage global variables."""
env = None
privacy = get_config("privacy") or "events"
kanaries_api_key = get_config("kanaries_token") or os.getenv("KANARIES_API_KEY", "")
kanaries_api_host = "https://api.kanaries.net"
kanaries_main_host = "https://kanaries.net"
last_exported_dataframe = None
def set_env(cls, env: Literal['Jupyter', 'Streamlit']):
cls.env = env
def get_env(cls) -> Literal['Jupyter', 'Streamlit']:
return cls.env
def set_kanaries_api_key(cls, api_key: str):
cls.kanaries_api_key = api_key
def set_kanaries_api_host(cls, api_host: str):
cls.kanaries_api_host = api_host
def set_kanaries_main_host(cls, main_host: str):
cls.kanaries_main_host = main_host
def set_privacy(cls, privacy: Literal['offline', 'update-only', 'events']):
cls.privacy = privacy
def set_last_exported_dataframe(cls, df: DataFrame):
cls.last_exported_dataframe = df
def get_local_user_id() -> str:
return _read_and_create_file(
USER_CONFIG_PATH,
{"user_id": generate_hash_code()}
).get("user_id", "")
The provided code snippet includes necessary dependencies for implementing the `track_event` function. Write a Python function `def track_event(event: str, properties: Optional[Dict[str, Any]] = None)` to solve the following problem:
Track an event in Segment and Kanaries. When privacy config of user is 'events', PyGWalker will collect certain events data share which events about which feature is used in pygwalker, it only contains events tag about which feature you arrive for product optimization. No DATA YOU ANALYZE IS SENT. We only use these data to improve the user experience of pygwalker. Events data will bind with a unique id, which is generated by pygwalker when it is installed based on timestamp. We will not collect any other information about you. EXAMPLE: - pygwalker's version - pygwalker's mode: 'light', 'dark' or 'auto' - pygwalker's spec type: 'json', 'file', 'url'. We won't collect the exact value of spec. No DATA YOU ANALYZE OR THEIR METADATA IS COLLECTED. - privacy ['offline', 'update-only', 'events'] (default: events). "offline": fully offline, no data is send or api is requested "update-only": only check whether this is a new version of pygwalker to update "events": share which events about which feature is used in pygwalker, it only contains events data about which feature you arrive for product optimization. No DATA YOU ANALYZE IS SENT.
Here is the function:
def track_event(event: str, properties: Optional[Dict[str, Any]] = None):
"""
Track an event in Segment and Kanaries.
When privacy config of user is 'events',
PyGWalker will collect certain events data share which events about which feature is used in pygwalker, it only contains events tag about which feature you arrive for product optimization. No DATA YOU ANALYZE IS SENT.
We only use these data to improve the user experience of pygwalker. Events data will bind with a unique id, which is generated by pygwalker when it is installed based on timestamp. We will not collect any other information about you.
EXAMPLE:
- pygwalker's version
- pygwalker's mode: 'light', 'dark' or 'auto'
- pygwalker's spec type: 'json', 'file', 'url'. We won't collect the exact value of spec. No DATA YOU ANALYZE OR THEIR METADATA IS COLLECTED.
- privacy ['offline', 'update-only', 'events'] (default: events).
"offline": fully offline, no data is send or api is requested
"update-only": only check whether this is a new version of pygwalker to update
"events": share which events about which feature is used in pygwalker, it only contains events data about which feature you arrive for product optimization. No DATA YOU ANALYZE IS SENT.
"""
if GlobalVarManager.privacy == "events":
try:
analytics.track(
user_id=get_local_user_id(),
event=event,
properties=properties
)
kanaries_track.track({**properties, "user_id": get_local_user_id()})
except Exception:
pass | Track an event in Segment and Kanaries. When privacy config of user is 'events', PyGWalker will collect certain events data share which events about which feature is used in pygwalker, it only contains events tag about which feature you arrive for product optimization. No DATA YOU ANALYZE IS SENT. We only use these data to improve the user experience of pygwalker. Events data will bind with a unique id, which is generated by pygwalker when it is installed based on timestamp. We will not collect any other information about you. EXAMPLE: - pygwalker's version - pygwalker's mode: 'light', 'dark' or 'auto' - pygwalker's spec type: 'json', 'file', 'url'. We won't collect the exact value of spec. No DATA YOU ANALYZE OR THEIR METADATA IS COLLECTED. - privacy ['offline', 'update-only', 'events'] (default: events). "offline": fully offline, no data is send or api is requested "update-only": only check whether this is a new version of pygwalker to update "events": share which events about which feature is used in pygwalker, it only contains events data about which feature you arrive for product optimization. No DATA YOU ANALYZE IS SENT. |
165,758 | from urllib import request
from typing import Tuple, Dict, Any, List
from distutils.version import StrictVersion
from copy import deepcopy
import json
import os
from pygwalker.services.global_var import GlobalVarManager
from pygwalker.utils.randoms import rand_str
from pygwalker.services.fname_encodings import rename_columns
from pygwalker.services.cloud_service import read_config_from_cloud
from pygwalker.errors import InvalidConfigIdError, PrivacyError
def rand_str(n: int = 8, options: str = string.ascii_letters + string.digits) -> str:
return ''.join(random.sample(options, n))
The provided code snippet includes necessary dependencies for implementing the `fill_new_fields` function. Write a Python function `def fill_new_fields(config: List[Dict[str, Any]], all_fields: List[Dict[str, str]]) -> List[Dict[str, Any]]` to solve the following problem:
when df schema changed, fill new fields to every chart config
Here is the function:
def fill_new_fields(config: List[Dict[str, Any]], all_fields: List[Dict[str, str]]) -> List[Dict[str, Any]]:
"""when df schema changed, fill new fields to every chart config"""
config = deepcopy(config)
for chart_item in config:
field_set = {
field["fid"]
for field in chart_item["encodings"]["dimensions"] + chart_item["encodings"]["measures"]
}
new_dimension_fields = []
new_measure_fields = []
for field in all_fields:
if field["fid"] not in field_set:
gw_field = {
**field,
"basename": field["name"],
"dragId": "GW_" + rand_str()
}
if field["analyticType"] == "dimension":
new_dimension_fields.append(gw_field)
else:
new_measure_fields.append(gw_field)
chart_item["encodings"]["dimensions"].extend(new_dimension_fields)
chart_item["encodings"]["measures"].extend(new_measure_fields)
return config | when df schema changed, fill new fields to every chart config |
165,759 | from urllib import request
from typing import Tuple, Dict, Any, List
from distutils.version import StrictVersion
from copy import deepcopy
import json
import os
from pygwalker.services.global_var import GlobalVarManager
from pygwalker.utils.randoms import rand_str
from pygwalker.services.fname_encodings import rename_columns
from pygwalker.services.cloud_service import read_config_from_cloud
from pygwalker.errors import InvalidConfigIdError, PrivacyError
def _get_spec_json_from_diff_source(spec: str) -> Tuple[str, str]:
def _config_adapter(config: str) -> str:
def _config_adapter_045a5(config: List[Dict[str, Any]]):
def get_spec_json(spec: str) -> Tuple[Dict[str, Any], str]:
spec, spec_type = _get_spec_json_from_diff_source(spec)
if not spec:
return {"chart_map": {}, "config": [], "workflow_list": []}, spec_type
try:
spec_obj = json.loads(spec)
except json.decoder.JSONDecodeError as e:
raise ValueError("spec is not a valid json") from e
if isinstance(spec_obj, list):
spec_obj = {"chart_map": {}, "config": json.dumps(spec_obj), "workflow_list": []}
if StrictVersion(spec_obj.get("version", "0.1.0")) <= StrictVersion("0.3.17a4"):
spec_obj["config"] = _config_adapter(spec_obj["config"])
if isinstance(spec_obj["config"], str):
spec_obj["config"] = json.loads(spec_obj["config"])
if StrictVersion(spec_obj.get("version", "0.1.0")) <= StrictVersion("0.4.7a5"):
spec_obj["config"] = _config_adapter_045a5(spec_obj["config"])
return spec_obj, spec_type | null |
165,760 | from typing import Dict, Any, List
import time
import json
import html as m_html
from pygwalker.utils.randoms import rand_str
from pygwalker.utils.display import display_html
from pygwalker.utils.encode import DataFrameEncoder
from pygwalker.communications.base import BaseCommunication
from pygwalker import __hash__
def _send_js(js_code: str, slot_id: str):
display_html(
f"""<style onload="(()=>{{let f=()=>{{{m_html.escape(js_code)}}};setTimeout(f,0);}})();" />""",
slot_id=slot_id
)
class DataFrameEncoder(json.JSONEncoder):
"""JSON encoder for DataFrame"""
def default(self, o):
if isinstance(o, datetime):
if o.tzinfo is None:
o = pytz.utc.localize(o)
return int(o.timestamp() * 1000)
if isinstance(o, Decimal):
if o.is_nan():
return None
return float(o)
try:
return json.JSONEncoder.default(self, o)
except Exception:
try:
return str(o)
except TypeError:
return None
def _send_upload_data_msg(gid: int, msg: Dict[str, Any], slot_id: str):
msg = json.dumps(msg, cls=DataFrameEncoder)
js_code = (
f"document.getElementById('gwalker-{gid}')?"
".contentWindow?"
f".postMessage({msg}, '*');"
)
_send_js(js_code, slot_id) | null |
165,761 | from typing import Dict, Any, List
import time
import json
import html as m_html
from pygwalker.utils.randoms import rand_str
from pygwalker.utils.display import display_html
from pygwalker.utils.encode import DataFrameEncoder
from pygwalker.communications.base import BaseCommunication
from pygwalker import __hash__
def rand_str(n: int = 8, options: str = string.ascii_letters + string.digits) -> str:
__hash__ = __rand_str()
def _rand_slot_id():
return __hash__ + '-' + rand_str(6) | null |
165,762 | from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any
from urllib.parse import urlparse, parse_qs, quote
from threading import Thread, Lock
import socket
import webbrowser
from pygwalker.services.config import set_config
AUTH_HOST = "https://kanaries.net"
auth_info = {}
wait_lock = Lock()
class TextStyle:
RESET = '\033[0m'
GREEN = '\033[32m'
RED = '\033[31m'
UNDERLINE = '\033[4m'
def _find_free_port() -> int:
"""Find a free port on localhost"""
temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
temp_socket.bind(('localhost', 0))
_, port = temp_socket.getsockname()
temp_socket.close()
return port
def _run_callback_server(port: int):
server_address = ('localhost', port)
httpd = HTTPServer(server_address, _CallbackHandler)
httpd.serve_forever()
def kanaries_login():
wait_lock.acquire()
port = _find_free_port()
callback_server = Thread(target=_run_callback_server, args=(port,), daemon=True)
callback_server.start()
callback_url = f'http://localhost:{port}'
auth_url = f"{AUTH_HOST}/home/cli?redirect_url={quote(callback_url)}"
print(f'Please visit {TextStyle.GREEN}{auth_url}{TextStyle.RESET} to log in.')
print('Waiting for authorization...')
webbrowser.open_new(auth_url)
wait_flag = wait_lock.acquire(blocking=True, timeout=300)
if not wait_flag:
print(f'{TextStyle.RED}Authorization timeout.{TextStyle.RESET}')
return
print((
f'{TextStyle.GREEN}Authorization success and kanaries token is configured!{TextStyle.RESET}\n'
f'user: {TextStyle.UNDERLINE}{auth_info["user_name"]}{TextStyle.RESET}\n'
f'workspace: {TextStyle.UNDERLINE}{auth_info["workspace_name"]}{TextStyle.RESET}'
)) | null |
165,763 | from typing import List
from math import ceil
from collections import defaultdict
def base36encode(s: str) -> str:
"""Converts an string to a base36 string."""
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
number = int.from_bytes(s.encode(), "big")
if not isinstance(number, int):
raise TypeError('number must be an integer')
base36 = ''
if 0 <= number < len(alphabet):
return alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return base36
The provided code snippet includes necessary dependencies for implementing the `fname_encode` function. Write a Python function `def fname_encode(fname: str) -> str` to solve the following problem:
Encode fname in base32 Args: - fname (str): Suppose to be str Returns: str
Here is the function:
def fname_encode(fname: str) -> str:
"""Encode fname in base32
Args:
- fname (str): Suppose to be str
Returns:
str
"""
return "GW_" + base36encode(fname) | Encode fname in base32 Args: - fname (str): Suppose to be str Returns: str |
165,764 | from typing import List
from math import ceil
from collections import defaultdict
def base36decode(s: str) -> str:
"""Converts a base36 string to an string."""
number = int(s, 36)
return number.to_bytes(ceil(number.bit_length() / 8), "big").decode()
The provided code snippet includes necessary dependencies for implementing the `fname_decode` function. Write a Python function `def fname_decode(encode_fname: str) -> str` to solve the following problem:
Decode fname in base32
Here is the function:
def fname_decode(encode_fname: str) -> str:
"""Decode fname in base32"""
return base36decode(encode_fname[3:]) | Decode fname in base32 |
165,765 | from typing import Optional, List, Dict, Any
import base64
import zlib
import json
from pydantic import BaseModel, Field
from pygwalker.utils.encode import DataFrameEncoder
from pygwalker.utils.display import display_html
from pygwalker.utils.randoms import generate_hash_code
from pygwalker.services.render import jinja_env, GWALKER_SCRIPT_BASE64
class ChartData(BaseModel):
charts: List[ImgData]
single_chart: str = Field(..., alias="singleChart")
n_rows: int = Field(..., alias="nRows")
n_cols: int = Field(..., alias="nCols")
title: str
def render_preview_html(
chart_data: ChartData,
div_id: str,
*,
custom_title: Optional[str] = None,
desc: str = "",
) -> str:
image_list = [[None] * chart_data.n_cols for _ in range(chart_data.n_rows)]
for image in chart_data.charts:
image_list[image.row_index][image.col_index] = image
html = jinja_env.get_template("preview.html").render(
image_list=image_list,
div_id=div_id,
title=custom_title if custom_title is not None else chart_data.title,
desc=desc,
)
return html
jinja_env = Environment(
loader=PackageLoader("pygwalker"),
autoescape=(()), # select_autoescape()
)
def render_preview_html_for_multi_charts(charts_map: Dict[str, ChartData], gid: str, preview_id: str) -> str:
tab_name = "tab-pyg-" + str(gid)
items = []
for chart_data in charts_map.values():
div_id = f"{gid}-{chart_data.title}".replace(" ", "")
chart_html = render_preview_html(chart_data, div_id, custom_title="")
items.append({
"tab_id": "tab-" + div_id,
"chart_title": chart_data.title,
"chart_html": chart_html
})
html = jinja_env.get_template("preview_list.html").render(
tab_name=tab_name,
preview_id=preview_id,
items=items
)
return html | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.