content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import reduce
import numpy as np
class SampleBatch(object):
"""Wrapper around a dictionary with string keys and array-like values.
For example, {"obs": [1, 2, 3], "reward": [0, -1, 1]} is a batch of three
samples, each with an "obs" and "reward" attribute.
"""
def __init__(self, *args, **kwargs):
"""Constructs a sample batch (same params as dict constructor)."""
self.data = dict(*args, **kwargs)
lengths = []
for k, v in self.data.copy().items():
assert type(k) == str, self
lengths.append(len(v))
assert len(set(lengths)) == 1, "data columns must be same length"
@staticmethod
def concat(self, other):
"""Returns a new SampleBatch with each data column concatenated.
Examples:
>>> b1 = SampleBatch({"a": [1, 2]})
>>> b2 = SampleBatch({"a": [3, 4, 5]})
>>> print(b1.concat(b2))
{"a": [1, 2, 3, 4, 5]}
"""
assert self.data.keys() == other.data.keys(), "must have same columns"
out = {}
for k in self.data.keys():
out[k] = np.concatenate([self.data[k], other.data[k]])
return SampleBatch(out)
def rows(self):
"""Returns an iterator over data rows, i.e. dicts with column values.
Examples:
>>> batch = SampleBatch({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> for row in batch.rows():
print(row)
{"a": 1, "b": 4}
{"a": 2, "b": 5}
{"a": 3, "b": 6}
"""
num_rows = len(list(self.data.values())[0])
for i in range(num_rows):
row = {}
for k in self.data.keys():
row[k] = self[k][i]
yield row
def columns(self, keys):
"""Returns a list of just the specified columns.
Examples:
>>> batch = SampleBatch({"a": [1], "b": [2], "c": [3]})
>>> print(batch.columns(["a", "b"]))
[[1], [2]]
"""
out = []
for k in keys:
out.append(self.data[k])
return out
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
11748,
299,
32152,
355,
45941,
628,
19... | 2.099628 | 1,074 |
r"""
.. todo::
doc
"""
__all__ = ["StaticEmbedding"]
import os
import warnings
from collections import defaultdict
from copy import deepcopy
import json
from typing import Union
import numpy as np
import torch
import torch.nn as nn
from .embedding import TokenEmbedding
from ..core import logger
from ..core.vocabulary import Vocabulary
from ..io.file_utils import PRETRAIN_STATIC_FILES, _get_embedding_url, cached_path
from ..io.file_utils import _get_file_name_base_on_postfix
VOCAB_FILENAME = 'vocab.txt'
STATIC_HYPER_FILENAME = 'static_hyper.json'
STATIC_EMBED_FILENAME = 'static.txt'
class StaticEmbedding(TokenEmbedding):
r"""
StaticEmbedding组件. 给定预训练embedding的名称或路径,根据vocab从embedding中抽取相应的数据(只会将出现在vocab中的词抽取出来,
如果没有找到,则会随机初始化一个值(但如果该word是被标记为no_create_entry的话,则不会单独创建一个值,而是会被指向unk的index))。
当前支持自动下载的预训练vector有:
.. code::
en: 实际为en-glove-840b-300d(常用)
en-glove-6b-50d: glove官方的50d向量
en-glove-6b-100d: glove官方的100d向量
en-glove-6b-200d: glove官方的200d向量
en-glove-6b-300d: glove官方的300d向量
en-glove-42b-300d: glove官方使用42B数据训练版本
en-glove-840b-300d:
en-glove-twitter-27b-25d:
en-glove-twitter-27b-50d:
en-glove-twitter-27b-100d:
en-glove-twitter-27b-200d:
en-word2vec-300d: word2vec官方发布的300d向量
en-fasttext-crawl: fasttext官方发布的300d英文预训练
cn-char-fastnlp-100d: fastNLP训练的100d的character embedding
cn-bi-fastnlp-100d: fastNLP训练的100d的bigram embedding
cn-tri-fastnlp-100d: fastNLP训练的100d的trigram embedding
cn-fasttext: fasttext官方发布的300d中文预训练embedding
Example::
>>> from fastNLP import Vocabulary
>>> from fastNLP.embeddings import StaticEmbedding
>>> vocab = Vocabulary().add_word_lst("The whether is good .".split())
>>> embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-50d')
>>> vocab = Vocabulary().add_word_lst(["The", 'the', "THE"])
>>> embed = StaticEmbedding(vocab, model_dir_or_name="en-glove-50d", lower=True)
>>> # "the", "The", "THE"它们共用一个vector,且将使用"the"在预训练词表中寻找它们的初始化表示。
>>> vocab = Vocabulary().add_word_lst(["The", "the", "THE"])
>>> embed = StaticEmbedding(vocab, model_dir_or_name=None, embedding_dim=5, lower=True)
>>> words = torch.LongTensor([[vocab.to_index(word) for word in ["The", "the", "THE"]]])
>>> embed(words)
>>> tensor([[[ 0.5773, 0.7251, -0.3104, 0.0777, 0.4849],
[ 0.5773, 0.7251, -0.3104, 0.0777, 0.4849],
[ 0.5773, 0.7251, -0.3104, 0.0777, 0.4849]]],
grad_fn=<EmbeddingBackward>) # 每种word的输出是一致的。
"""
def __init__(self,
vocab: Vocabulary,
model_dir_or_name: Union[str, None] = 'en',
embedding_dim=-1,
requires_grad: bool = True,
init_method=None,
lower=False,
dropout=0,
word_dropout=0,
normalize=False,
min_freq=1,
word_transform=None,
store_freq=False,
**kwargs):
r"""
:param Vocabulary vocab: 词表. StaticEmbedding只会加载包含在词表中的词的词向量,在预训练向量中没找到的使用随机初始化
:param model_dir_or_name: 可以有两种方式调用预训练好的static embedding:第一种是传入embedding文件夹(文件夹下应该只有一个
以.txt作为后缀的文件)或文件路径;第二种是传入embedding的名称,第二种情况将自动查看缓存中是否存在该模型,没有的话将自动下载。
如果输入为None则使用embedding_dim的维度随机初始化一个embedding。
:param int embedding_dim: 随机初始化的embedding的维度,当该值为大于0的值时,将忽略model_dir_or_name。
:param bool requires_grad: 是否需要gradient. 默认为True
:param callable init_method: 如何初始化没有找到的值。可以使用torch.nn.init.*中各种方法, 传入的方法应该接受一个tensor,并
inplace地修改其值。
:param bool lower: 是否将vocab中的词语小写后再和预训练的词表进行匹配。如果你的词表中包含大写的词语,或者就是需要单独
为大写的词语开辟一个vector表示,则将lower设置为False。
:param float dropout: 以多大的概率对embedding的表示进行Dropout。0.1即随机将10%的值置为0。
:param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。
:param bool normalize: 是否对vector进行normalize,使得每个vector的norm为1。
:param int min_freq: Vocabulary词频数小于这个数量的word将被指向unk。
:param dict kwargs:
bool only_train_min_freq: 仅对train中的词语使用min_freq筛选;
bool only_norm_found_vector: 默认为False, 是否仅对在预训练中找到的词语使用normalize;
bool only_use_pretrain_word: 默认为False, 仅使用出现在pretrain词表中的词,如果该词没有在预训练的词表中出现则为unk。如果embedding不需要更新建议设置为True。
"""
super(StaticEmbedding, self).__init__(vocab,
word_dropout=word_dropout,
dropout=dropout)
if embedding_dim > 0:
if model_dir_or_name:
logger.info(
f"StaticEmbedding will ignore `model_dir_or_name`, and randomly initialize embedding with"
f" dimension {embedding_dim}. If you want to use pre-trained embedding, "
f"set `embedding_dim` to 0.")
model_dir_or_name = None
# 得到cache_path
if model_dir_or_name is None:
assert embedding_dim >= 1, "The dimension of embedding should be larger than 1."
embedding_dim = int(embedding_dim)
model_path = None
elif model_dir_or_name.lower() in PRETRAIN_STATIC_FILES:
model_url = _get_embedding_url('static', model_dir_or_name.lower())
model_path = cached_path(model_url, name='embedding')
# 检查是否存在
elif os.path.isfile(
os.path.abspath(os.path.expanduser(model_dir_or_name))):
model_path = os.path.abspath(os.path.expanduser(model_dir_or_name))
elif os.path.isdir(
os.path.abspath(os.path.expanduser(model_dir_or_name))):
model_path = _get_file_name_base_on_postfix(
os.path.abspath(os.path.expanduser(model_dir_or_name)), '.txt')
else:
raise ValueError(f"Cannot recognize {model_dir_or_name}.")
kwargs['min_freq'] = min_freq
kwargs['lower'] = lower
# 根据min_freq缩小vocab
truncate_vocab = (vocab.min_freq is None
and min_freq > 1) or (vocab.min_freq
and vocab.min_freq < min_freq)
if truncate_vocab:
truncated_vocab = deepcopy(vocab)
truncated_vocab.min_freq = min_freq
truncated_vocab.word2idx = None
if lower: # 如果有lower,将大小写的的freq需要同时考虑到
lowered_word_count = defaultdict(int)
for word, count in truncated_vocab.word_count.items():
lowered_word_count[word.lower()] += count
for word in truncated_vocab.word_count.keys():
word_count = truncated_vocab.word_count[word]
if lowered_word_count[word.lower(
)] >= min_freq and word_count < min_freq:
truncated_vocab.add_word_lst(
[word] * (min_freq - word_count),
no_create_entry=truncated_vocab.
_is_word_no_create_entry(word))
# 只限制在train里面的词语使用min_freq筛选
if kwargs.get('only_train_min_freq',
False) and model_dir_or_name is not None:
for word in truncated_vocab.word_count.keys():
if truncated_vocab._is_word_no_create_entry(
word
) and truncated_vocab.word_count[word] < min_freq:
truncated_vocab.add_word_lst(
[word] *
(min_freq - truncated_vocab.word_count[word]),
no_create_entry=True)
truncated_vocab.build_vocab()
truncated_words_to_words = torch.arange(len(vocab))
for word, index in vocab:
truncated_words_to_words[index] = truncated_vocab.to_index(
word)
logger.info(
f"{len(vocab) - len(truncated_vocab)} words have frequency less than {min_freq}. "
f"{len(truncated_vocab)} is created.")
vocab = truncated_vocab
self.only_use_pretrain_word = kwargs.get('only_use_pretrain_word',
False)
self.only_norm_found_vector = kwargs.get('only_norm_found_vector',
False)
# 读取embedding
if lower:
lowered_vocab = Vocabulary(padding=vocab.padding,
unknown=vocab.unknown,
specials=vocab.specials)
for word, index in vocab:
lowered_vocab.add_word(
word.lower(),
no_create_entry=vocab._is_word_no_create_entry(word))
logger.info(
f"All word in the vocab have been lowered. There are {len(vocab)} words, {len(lowered_vocab)} "
f"unique lowered words.")
if model_path:
embedding = self._load_with_vocab(
model_path,
vocab=lowered_vocab,
init_method=init_method,
word_transform=word_transform)
else:
embedding = self._randomly_init_embed(len(lowered_vocab),
embedding_dim,
init_method)
self.register_buffer('words_to_words',
torch.arange(len(vocab)).long())
if lowered_vocab.unknown:
unknown_idx = lowered_vocab.unknown_idx
else:
unknown_idx = embedding.size(0) - 1 # 否则是最后一个为unknow
words_to_words = torch.full((len(vocab), ),
fill_value=unknown_idx,
dtype=torch.long)
for word, index in vocab:
if word not in lowered_vocab:
word = word.lower()
if word not in lowered_vocab and lowered_vocab._is_word_no_create_entry(
word):
continue # 如果不需要创建entry,已经默认unknown了
words_to_words[index] = self.words_to_words[
lowered_vocab.to_index(word)]
self.register_buffer('words_to_words', words_to_words)
self._word_unk_index = lowered_vocab.unknown_idx # 替换一下unknown的index
else:
if model_path:
embedding = self._load_with_vocab(
model_path,
vocab=vocab,
init_method=init_method,
word_transform=word_transform)
else:
embedding = self._randomly_init_embed(len(vocab),
embedding_dim,
init_method)
self.register_buffer('words_to_words',
torch.arange(len(vocab)))
if not self.only_norm_found_vector and normalize:
embedding /= (torch.norm(embedding, dim=1, keepdim=True) + 1e-12)
if truncate_vocab:
for i in range(len(truncated_words_to_words)):
index_in_truncated_vocab = truncated_words_to_words[i]
truncated_words_to_words[i] = self.words_to_words[
index_in_truncated_vocab]
del self.words_to_words
self.register_buffer('words_to_words', truncated_words_to_words)
self.embedding = nn.Embedding(num_embeddings=embedding.shape[0],
embedding_dim=embedding.shape[1],
padding_idx=vocab.padding_idx,
max_norm=None,
norm_type=2,
scale_grad_by_freq=False,
sparse=False,
_weight=embedding)
self._embed_size = self.embedding.weight.size(1)
self.requires_grad = requires_grad
self.kwargs = kwargs
self.mapped_counts: torch.Tensor
if store_freq:
counts = torch.zeros(self.embedding.num_embeddings)
for idx_in_user_vocab, i in enumerate(self.words_to_words.tolist()):
if idx_in_user_vocab == self._word_vocab.unknown_idx:
continue
word = self._word_vocab.idx2word[idx_in_user_vocab]
counts[i] += self._word_vocab.word_count[word]
# idx unk and specials is inherited
counts[self._word_vocab.unknown_idx] = 1
for spe in self._word_vocab.specials:
counts[self._word_vocab[spe]] = 1
self.register_buffer('mapped_counts', counts)
@property
def _randomly_init_embed(self,
num_embedding,
embedding_dim,
init_embed=None):
r"""
:param int num_embedding: embedding的entry的数量
:param int embedding_dim: embedding的维度大小
:param callable init_embed: 初始化方法
:return: torch.FloatTensor
"""
embed = torch.zeros(num_embedding, embedding_dim)
if init_embed is None:
nn.init.uniform_(embed, -np.sqrt(3 / embedding_dim),
np.sqrt(3 / embedding_dim))
elif init_embed == 'normal':
nn.init.normal_(embed)
else:
init_embed(embed)
return embed
def _load_with_vocab(self,
embed_filepath,
vocab,
dtype=np.float32,
padding='<pad>',
unknown='<unk>',
error='ignore',
init_method=None,
word_transform=None):
r"""
从embed_filepath这个预训练的词向量中抽取出vocab这个词表的词的embedding。EmbedLoader将自动判断embed_filepath是
word2vec(第一行只有两个元素)还是glove格式的数据。
:param str embed_filepath: 预训练的embedding的路径。
:param vocab: 词表 :class:`~fastNLP.Vocabulary` 类型,读取出现在vocab中的词的embedding。
没有出现在vocab中的词的embedding将通过找到的词的embedding的正态分布采样出来,以使得整个Embedding是同分布的。
:param dtype: 读出的embedding的类型
:param str padding: 词表中padding的token
:param str unknown: 词表中unknown的token
:param str error: `ignore` , `strict` ; 如果 `ignore` ,错误将自动跳过; 如果 `strict` , 错误将抛出。
这里主要可能出错的地方在于词表有空行或者词表出现了维度不一致。
:param init_method: 如何初始化没有找到的值。可以使用torch.nn.init.*中各种方法。默认使用torch.nn.init.zeros_
:return torch.tensor: shape为 [len(vocab), dimension], dimension由pretrain的embedding决定。
"""
assert isinstance(vocab,
Vocabulary), "Only fastNLP.Vocabulary is supported."
if not os.path.exists(embed_filepath):
raise FileNotFoundError(
"`{}` does not exist.".format(embed_filepath))
with open(embed_filepath, 'r', encoding='utf-8') as f:
line = f.readline().strip()
parts = line.split()
start_idx = 0
if len(parts) == 2:
dim = int(parts[1])
start_idx += 1
else:
dim = len(parts) - 1
f.seek(0)
matrix = {
} # index是word在vocab中的index,value是vector或None(如果在pretrain中没有找到该word)
if vocab.padding:
matrix[vocab.padding_idx] = torch.zeros(dim)
if vocab.unknown:
matrix[vocab.unknown_idx] = torch.zeros(dim)
for special in vocab.specials:
matrix[vocab[special]] = torch.zeros(dim)
found_count = 0
found_unknown = False
added_in_matrix = set()
for idx, line in enumerate(f, start_idx):
try:
parts = line.strip().split()
word = ''.join(parts[:-dim])
nums = parts[-dim:]
# 对齐unk与pad
if word == padding and vocab.padding is not None:
word = vocab.padding
elif word == unknown and vocab.unknown is not None:
word = vocab.unknown
found_unknown = True
if word in vocab:
index = vocab.to_index(word)
if index in matrix:
if index in added_in_matrix:
added_in_matrix.remove(index)
found_count -= 1
else:
warnings.warn(
f"Word has more than one vector in embedding file. Set logger level to "
f"DEBUG for detail.")
logger.debug(
f"Word:{word} occurs again in line:{idx}(starts from 0)"
)
matrix[index] = torch.from_numpy(
np.fromstring(' '.join(nums),
sep=' ',
dtype=dtype,
count=dim))
if self.only_norm_found_vector:
matrix[index] = matrix[index] / np.linalg.norm(
matrix[index])
found_count += 1
elif word.lower() in vocab:
index = vocab.to_index(word.lower())
if index in matrix:
continue
added_in_matrix.add(index)
matrix[index] = torch.from_numpy(
np.fromstring(' '.join(nums),
sep=' ',
dtype=dtype,
count=dim))
if self.only_norm_found_vector:
matrix[index] = matrix[index] / np.linalg.norm(
matrix[index])
found_count += 1
elif word_transform is not None and (word := word_transform(word)) in vocab:
index = vocab.to_index(word)
if index in matrix:
continue
added_in_matrix.add(index)
matrix[index] = torch.from_numpy(
np.fromstring(' '.join(nums),
sep=' ',
dtype=dtype,
count=dim))
if self.only_norm_found_vector:
matrix[index] = matrix[index] / np.linalg.norm(
matrix[index])
found_count += 1
except Exception as e:
if error == 'ignore':
warnings.warn(
"Error occurred at the {} line.".format(idx))
else:
logger.error(
"Error occurred at the {} line.".format(idx))
raise e
logger.info(
"Found {} out of {} words in the pre-training embedding.".
format(found_count, len(vocab)))
if not self.only_use_pretrain_word: # 如果只用pretrain中的值就不要为未找到的词创建entry了
for word, index in vocab:
if index not in matrix and not vocab._is_word_no_create_entry(
word):
if found_unknown: # 如果有unkonwn,用unknown初始化
matrix[index] = matrix[vocab.unknown_idx]
else:
matrix[index] = None
# matrix中代表是需要建立entry的词
vectors = self._randomly_init_embed(len(matrix), dim, init_method)
if vocab.unknown is None: # 创建一个专门的unknown
unknown_idx = len(matrix)
vectors = torch.cat((vectors, torch.zeros(1, dim)),
dim=0).contiguous()
else:
unknown_idx = vocab.unknown_idx
self.register_buffer(
'words_to_words',
torch.full((len(vocab), ),
fill_value=unknown_idx,
dtype=torch.long).long())
index = 0
for word, index_in_vocab in vocab:
if index_in_vocab in matrix:
vec = matrix.get(index_in_vocab)
if vec is not None: # 使用找到的vector, 如果为None说明需要训练
vectors[index] = vec
self.words_to_words[index_in_vocab] = index
index += 1
return vectors
def forward(self, words):
r"""
传入words的index
:param words: torch.LongTensor, [batch_size, max_len]
:return: torch.FloatTensor, [batch_size, max_len, embed_size]
"""
if hasattr(self, 'words_to_words'):
words = self.words_to_words[words]
words = self.drop_word(words)
words = self.embedding(words)
words = self.dropout(words)
return words
def save(self, folder):
"""
将embedding存储到folder下,之后可以通过使用load方法读取
:param str folder: 会在该folder下生成三个文件, vocab.txt, static_embed_hyper.txt, static_embed_hyper.json.
其中vocab.txt可以用Vocabulary通过load读取; embedding.txt按照word2vec的方式存储,以空格的方式隔开元素,
第一行只有两个元素,剩下的行首先是word然后是各个维度的值; static_embed_hyper.json是StaticEmbedding的超参数
:return:
"""
os.makedirs(folder, exist_ok=True)
vocab = self.get_word_vocab()
vocab_fp = os.path.join(folder, VOCAB_FILENAME)
vocab.save(vocab_fp)
kwargs = self.kwargs.copy()
kwargs['dropout'] = self.dropout_layer.p
kwargs['word_dropout'] = self.word_dropout
kwargs['requires_grad'] = self.requires_grad
kwargs['only_norm_found_vector'] = False
kwargs['only_use_pretrain_word'] = True
with open(os.path.join(folder, STATIC_HYPER_FILENAME),
'w',
encoding='utf-8') as f:
json.dump(kwargs, f, indent=2)
with open(os.path.join(folder, STATIC_EMBED_FILENAME),
'w',
encoding='utf-8') as f:
f.write('{}\n'.format(' ' * 30)) # 留白之后再来填写
word_count = 0
saved_word = {}
valid_word_count = 0
for i in range(len(self.words_to_words)):
word = vocab.to_word(i)
if not vocab._is_word_no_create_entry(word):
word_count += 1
if kwargs['lower']:
word = word.lower()
if word in saved_word:
continue
saved_word[word] = 1
vec_i = self.words_to_words[i]
if vec_i == vocab.unknown_idx and i != vocab.unknown_idx:
continue
vec = self.embedding.weight.data[vec_i].tolist()
vec_str = ' '.join(map(str, vec))
f.write(f'{word} {vec_str}\n')
valid_word_count += 1
f.seek(0)
f.write('{} {}'.format(valid_word_count, self.embedding_dim))
logger.debug(f"StaticEmbedding has been saved to {folder}.")
@classmethod
def load(cls, folder):
"""
:param str folder: 该folder下应该有以下三个文件vocab.txt, static_embed.txt, static_hyper.json
:return:
"""
for name in [
VOCAB_FILENAME, STATIC_EMBED_FILENAME, STATIC_HYPER_FILENAME
]:
assert os.path.exists(os.path.join(
folder, name)), f"{name} not found in {folder}."
vocab = Vocabulary.load(os.path.join(folder, VOCAB_FILENAME))
with open(os.path.join(folder, STATIC_HYPER_FILENAME),
'r',
encoding='utf-8') as f:
hyper = json.load(f)
logger.info(f"Load StaticEmbedding from {folder}.")
embed = cls(vocab=vocab,
model_dir_or_name=os.path.join(folder,
STATIC_EMBED_FILENAME),
**hyper)
return embed
| [
81,
37811,
198,
492,
284,
4598,
3712,
198,
220,
220,
220,
2205,
198,
37811,
198,
198,
834,
439,
834,
796,
14631,
45442,
31567,
6048,
278,
8973,
198,
11748,
28686,
198,
11748,
14601,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
4866,
... | 1.560727 | 16,006 |
#This was not made by me although I'm putting this here to implement into Saudtool
#!/usr/bin/env python3
import json
import sys
import os
from datetime import datetime
from io import BytesIO
from os.path import isfile
from pathlib import Path
from pprint import pprint
import httpx
from PIL import Image
from geopy.geocoders import Nominatim
import config
from lib.banner import banner
import lib.gmaps as gmaps
import lib.youtube as ytb
from lib.photos import gpics
from lib.utils import *
import lib.calendar as gcalendar
if __name__ == "__main__":
banner()
# We change the current working directory to allow using GHunt from anywhere
os.chdir(Path(__file__).parents[0])
if len(sys.argv) <= 1:
exit("Please put an email address.")
if not isfile(config.data_path):
exit("Please generate cookies and tokens first.")
email = sys.argv[1]
auth = ""
hangouts_token = ""
cookies = ""
with open(config.data_path, 'r') as f:
out = json.loads(f.read())
auth = out["auth"]
hangouts_token = out["keys"]["hangouts"]
cookies = out["cookies"]
client = httpx.Client(cookies=cookies, headers=config.headers)
data = is_email_google_account(client, auth, cookies, email,
hangouts_token)
is_within_docker = within_docker()
if is_within_docker:
print("[+] Docker detected, profile pictures will not be saved.")
geolocator = Nominatim(user_agent="nominatim")
print(f"[+] {len(data['matches'])} account found !")
for user in data["matches"]:
print("\n------------------------------\n")
gaiaID = user["personId"][0]
email = user["lookupId"]
infos = data["people"][gaiaID]
# get name
name = get_account_name(client, gaiaID)
if name:
print(f"Name : {name}")
else:
if "name" not in infos:
print("Couldn't find name")
else:
for i in range(len(infos["name"])):
print(f"Name : {infos['name'][i]['displayName']}")
if len(infos["name"]) > 0:
name = infos["name"][0]["displayName"]
print("[-] Couldn't find name")
# profile picture
profile_pic_link = infos["photo"][0]["url"]
req = client.get(profile_pic_link)
profile_pic_img = Image.open(BytesIO(req.content))
profile_pic_hash = image_hash(profile_pic_img)
is_default_profile_pic = detect_default_profile_pic(profile_pic_hash)
if not is_default_profile_pic and not is_within_docker:
print("\n[+] Custom profile picture !")
print(f"=> {profile_pic_link}")
if config.write_profile_pic and not is_within_docker:
open(Path(config.profile_pics_dir) / f'{email}.jpg', 'wb').write(req.content)
print("Profile picture saved !")
else:
print("\n[-] Default profile picture")
# last edit
timestamp = int(infos["metadata"]["lastUpdateTimeMicros"][:-3])
last_edit = datetime.utcfromtimestamp(timestamp).strftime("%Y/%m/%d %H:%M:%S (UTC)")
print(f"\nLast profile edit : {last_edit}\n"
f"\nEmail : {email}\nGoogle ID : {gaiaID}\n")
# is bot?
profile_pic = infos["photo"][0]["url"]
if "extendedData" in infos:
isBot = infos["extendedData"]["hangoutsExtendedData"]["isBot"]
if isBot:
print("Hangouts Bot : Yes !")
else:
print("Hangouts Bot : No")
else:
print("Hangouts Bot : Unknown")
# decide to check YouTube
ytb_hunt = False
try:
services = [x["appType"].lower() if x["appType"].lower() != "babel" else "hangouts" for x in
infos["inAppReachability"]]
if "youtube" in services and name:
ytb_hunt = True
print("\n[+] Activated Google services :")
print('\n'.join(["- " + x.capitalize() for x in services]))
except KeyError:
ytb_hunt = True
print("\n[-] Unable to fetch connected Google services.")
# check YouTube
if ytb_hunt or config.ytb_hunt_always:
confidence = None
data = ytb.get_channels(client, name, config.data_path,
config.gdocs_public_doc)
if not data:
print("\n[-] YouTube channel not found.")
else:
confidence, channels = ytb.get_confidence(data, name, profile_pic_hash)
if confidence:
print(f"\n[+] YouTube channel (confidence => {confidence}%) :")
for channel in channels:
print(f"- [{channel['name']}] {channel['profile_url']}")
possible_usernames = ytb.extract_usernames(channels)
if possible_usernames:
print("\n[+] Possible usernames found :")
for username in possible_usernames:
print(f"- {username}")
else:
print("\n[-] YouTube channel not found.")
# TODO: return gpics function output here
#gpics(gaiaID, client, cookies, config.headers, config.regexs["albums"], config.regexs["photos"],
# config.headless)
# reviews
reviews = gmaps.scrape(gaiaID, client, cookies, config, config.headers, config.regexs["review_loc_by_id"], config.headless)
if reviews:
confidence, locations = gmaps.get_confidence(reviews, config.gmaps_radius)
print(f"\n[+] Probable location (confidence => {confidence}) :")
loc_names = []
for loc in locations:
loc_names.append(
f"- {loc['avg']['town']}, {loc['avg']['country']}"
)
loc_names = set(loc_names) # delete duplicates
for loc in loc_names:
print(loc)
# Google Calendar
calendar_response = gcalendar.fetch(email, client, config)
if calendar_response:
print("[+] Public Google Calendar found !")
events = calendar_response["events"]
if events:
gcalendar.out(events)
else:
print("=> No recent events found.")
else:
print("[-] No public Google Calendar.")
| [
2,
1212,
373,
407,
925,
416,
502,
3584,
314,
1101,
5137,
428,
994,
284,
3494,
656,
6746,
25981,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
4818,
8079,... | 2.092298 | 3,142 |
from setuptools import setup
setup(
name='jetstore',
author='ArtiSoft',
version='1.0',
python_requires='>=3.9',
install_requires=['absl-py', 'apsw', 'antlr4-python3-runtime'],
packages=['jets', 'jets.bridge', 'jets.compiler'],
package_data = {
'jets.bridge': ['jetrule_rete_test.db'],
'jets.compiler': ['JetRule.g4', 'test_data/*', '*.interp', '*.tokens']
},
# include_package_data=True,
license='ArtiSoft Inc.',
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
1438,
11639,
31173,
8095,
3256,
198,
220,
1772,
11639,
8001,
72,
18380,
3256,
198,
220,
2196,
11639,
16,
13,
15,
3256,
198,
220,
21015,
62,
47911,
11639,
29,
28,
18,
... | 2.413043 | 184 |
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 11:55:11 2021
@author: Qian.Cao
Generate a series of Voronoi Rod (AND PLATE) phantoms
"""
import sys
sys.path.append('../') # use bonebox from source without having to install/build
from bonebox.phantoms.TrabeculaeVoronoi import *
import numpy as np
import matplotlib.pyplot as plt
# import mcubes
from bonebox.FEA.fea import *
plt.ion()
print('Running example for TrabeculaeVoronoi')
rhoBone = 2e-3 # g/mm3
voxelSize = (0.05, 0.05, 0.05) # mm
pixelSize = (0.05, 0.05) # mm
radiusTBS = 5 # pixels
plattenThicknessVoxels = 5 # voxels
plattenThicknessMM = plattenThicknessVoxels * voxelSize[0] # mm
#% Generate Phantoms with different dilation Radii and seeds
out_dir = "/data/BoneBox-out/"
phantoms_dir = "/data/BoneBox-out/phantoms/"
# Projection/TBS Settings
all_randState = [1,2,3,4]
all_dilationRadius = np.linspace(1,3,10)
all_Nseeds = np.arange(3,10).astype(int)
edgesRetainFraction = 0.8
bvtvs = np.zeros((len(all_randState),len(all_Nseeds),len(all_dilationRadius)))
# Es = np.zeros(bvtvs.shape)
#%%
for rr, randState in enumerate(all_randState):
for nn, Nseeds in enumerate(all_Nseeds):
for ra, dilationRadius in enumerate(all_dilationRadius):
print(rr,nn,ra)
volume, bvtv, edgeVerticesRetain = makePhantom(dilationRadius, Nseeds, edgesRetainFraction, randState)
np.save(phantoms_dir+"phan_"+str(rr)+"_"+str(nn)+"_"+str(ra), volume)
# E = computeFEA(volume)
bvtvs[rr,nn,ra] = bvtv
# Es[rr,nn,ra] = E
np.save(out_dir+"FEA_bvtvs_2", bvtvs)
# np.save("FEA_Es_2", Es)
#%%
bvtvs = np.load(out_dir+"FEA_bvtvs_2.npy")
import scipy.ndimage
# plot mean and Std of bvtvs
meanbvtv = scipy.ndimage.zoom(np.mean(bvtvs,axis=0), 10, order=1)
extent=[np.min(all_dilationRadius),np.max(all_dilationRadius),
np.min(all_Nseeds),np.max(all_Nseeds)]
fig, ax = plt.subplots()
ax.imshow(meanbvtv, interpolation="nearest",extent=extent,aspect='auto', origin='lower')
cs = ax.contour(meanbvtv, extent=extent, colors='w', origin='lower')
ax.clabel(cs, fontsize=9, inline=True)
plt.xlabel("Dilation Radius")
plt.ylabel("N")
# Get contour lines
samplesPerVal = 4
isobvtv_vals = [0.32]
isobvtv_xy = []
isobvtv_xyeval = []
for vv, isobvtv in enumerate(isobvtv_vals):
cs = ax.contour(meanbvtv, [isobvtv], extent=extent)
p = cs.collections[0].get_paths()[0]
isobvtv_xy.append(p.vertices)
for ii, xy in enumerate(isobvtv_xy):
xylist = []
Nxy = xy.shape[0]
Dxy = Nxy // (samplesPerVal-1)
# select 4 evenly spaced points along the isocline
for ss in range(samplesPerVal-1):
xylist.append(xy[ss*Dxy,:])
xylist.append(xy[-1,:])
isobvtv_xyeval.append(np.array(xylist))
#%% Export Triplanar images of the phantoms
for rr, randState in enumerate(all_randState):
for nn, Nseeds in enumerate(all_Nseeds):
for ra, dilationRadius in enumerate(all_dilationRadius):
volume = np.load(phantoms_dir+"phan_"+str(rr)+"_"+str(nn)+"_"+str(ra)+".npy")
plt.subplot(1, 3, 1)
plt.imshow(volume[:,:,50],interpolation="nearest",cmap="gray")
plt.title("XY"); plt.axis("off")
plt.subplot(1, 3, 2)
plt.imshow(volume[:,50,:],interpolation="nearest",cmap="gray")
plt.title("XZ"); plt.axis("off")
plt.subplot(1, 3, 3)
plt.imshow(volume[50,:,:],interpolation="nearest",cmap="gray")
plt.title("YZ"); plt.axis("off")
plt.savefig(phantoms_dir+"fig_"+str(rr)+"_"+str(nn)+"_"+str(ra))
plt.close("all")
#%% Evaluates points along isobvtv_xyeval
phantoms_iso_dir = "/data/BoneBox-out/phantoms_iso/"
randStates = [2, 3]
EsArr= np.zeros((len(isobvtv_vals),samplesPerVal,len(randStates)))
for ii, isoval in enumerate(isobvtv_vals):
dilationRadiusArray = isobvtv_xyeval[ii][:,0]
NseedsArray = isobvtv_xyeval[ii][:,1].astype(int)
for pp in range(samplesPerVal):
for rr, randState in enumerate(randStates):
print(ii, pp, rr)
volume, bvtv, edgeVerticesRetain = makePhantom(dilationRadiusArray[pp],
NseedsArray[pp],
edgesRetainFraction,
randState)
np.save(phantoms_iso_dir+"phan_"+str(ii)+"_"+str(pp)+"_"+str(rr), volume)
plt.subplot(1, 3, 1)
plt.imshow(volume[:,:,50],interpolation="nearest",cmap="gray")
plt.title("XY"); plt.axis("off")
plt.subplot(1, 3, 2)
plt.imshow(volume[:,50,:],interpolation="nearest",cmap="gray")
plt.title("XZ"); plt.axis("off")
plt.subplot(1, 3, 3)
plt.imshow(volume[50,:,:],interpolation="nearest",cmap="gray")
plt.title("YZ"); plt.axis("off")
plt.savefig(phantoms_iso_dir+"fig_"+str(ii)+"_"+str(pp)+"_"+str(rr))
plt.close("all")
E = computeFEA(volume)
EsArr[ii,pp,rr] = E
print(E)
np.save(out_dir+"EsArr", EsArr)
#%%
vol = np.ones(volume.shape).astype(bool)
vol[0,:,:] = False; vol[-1,:,:] = False;
vol[:,0,:] = False; vol[:,-1,:] = False;
vol[:,:,0] = False; vol[:,:,-1] = False;
E0 = computeFEA(vol)
#%%
a_strings = ["%.2f" % x for x in dilationRadiusArray]
plt.boxplot(np.squeeze(EsArr).T/E0)
plt.xticks([1, 2, 3, 4], a_strings)
plt.grid()
#%%
# EsArray = np.array(EsList)
# bvtvs = np.load(out_dir+"FEA_bvtvs_2.npy")
# Es = np.load("FEA_Es_2.npy")
# plt.plot(bvtvs.flatten(), -Es.flatten(),'ko')
# # np.save("FEAbvtvs",bvtvs)
# # np.save("FEAEs",Es)
# plt.imshow(np.mean(bvtvs,axis=0))
# plt.axis("off")
# plt.colorbar()
# plt.imshow(np.std(bvtvs,axis=0))
# plt.axis("off")
# plt.colorbar()
# plt.imshow(np.mean(Es,axis=0))
# plt.axis("off")
# plt.colorbar()
# plt.imshow(np.std(Es,axis=0))
# plt.axis("off")
# plt.colorbar()
# plt.plot(bvtvs.flatten(),Es.flatten(),'ko') | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
1737,
1596,
1367,
25,
2816,
25,
1157,
33448,
198,
198,
31,
9800,
25,
44696,
13,
34,
5488,
198,
198,
8645,
378,
257,
2168,
286,
44143,
261... | 1.914303 | 3,244 |
import logging
import sys
import requests
import typer
from colorama import Fore
from .upm.upmapi import UpmApi
from .util import browser
app_safemode = typer.Typer()
@app_safemode.callback()
def safemode(ctx: typer.Context):
"""Controls the upm safemode"""
@app_safemode.command("status")
def safemode_status(
ctx: typer.Context, web: bool = typer.Option(False, help="open upm in web browser after showing safemode status"),
):
""" prints out the safemode status """
try:
upm = UpmApi(ctx.obj.get("base_url"))
safemode_st = f"{Fore.YELLOW}enabled{Fore.RESET}" if upm.get_safemode() else f"{Fore.GREEN}disabled{Fore.RESET}"
logging.info("Safe-mode is currently %s", safemode_st)
except requests.exceptions.ConnectionError:
logging.error("Could not connect to host - check your base-url")
sys.exit(1)
except Exception as e:
logging.error("An error occured - check your credentials")
logging.error(f"{e}")
sys.exit(1)
if web:
browser.open_web_upm(ctx.obj.get("base_url"))
@app_safemode.command("enable")
@app_safemode.command("disable")
| [
11748,
18931,
198,
11748,
25064,
198,
198,
11748,
7007,
198,
11748,
1259,
525,
198,
6738,
3124,
1689,
1330,
4558,
198,
198,
6738,
764,
929,
76,
13,
929,
8899,
72,
1330,
3205,
42646,
14415,
198,
6738,
764,
22602,
1330,
6444,
198,
198,
... | 2.54102 | 451 |
"""Ground truth generator for lane estimator benchmarking."""
import codecs
import numpy as np
from scipy.interpolate import splev, splprep
import yaml
from .views import filter_within_frame
| [
37811,
35539,
3872,
17301,
329,
11193,
3959,
1352,
18335,
278,
526,
15931,
198,
11748,
40481,
82,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
3849,
16104,
378,
1330,
599,
2768,
11,
4328,
46012,
198,
11748,
331,
4369... | 3.581818 | 55 |
from typing import TypeVar, Generic
from winton_kafka_streams.processor.serialization import Serde
from winton_kafka_streams.processor.serialization.serdes import *
from .key_value_store_factory import KeyValueStoreFactory
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
| [
6738,
19720,
1330,
5994,
19852,
11,
42044,
198,
198,
6738,
266,
2371,
62,
74,
1878,
4914,
62,
5532,
82,
13,
41341,
13,
46911,
1634,
1330,
2930,
2934,
198,
6738,
266,
2371,
62,
74,
1878,
4914,
62,
5532,
82,
13,
41341,
13,
46911,
1634... | 3.173913 | 92 |
VERSION = '0.2.25'
| [
43717,
796,
705,
15,
13,
17,
13,
1495,
6,
198
] | 1.9 | 10 |
import os
import json
import subprocess
import sys
import logging
import psutil
from subprocess import TimeoutExpired, STDOUT
logger = logging.getLogger(__name__)
RET_INTERRUPT = -1
RET_TIMEOUT = -2
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
18931,
198,
11748,
26692,
22602,
198,
198,
6738,
850,
14681,
1330,
3862,
448,
3109,
6474,
11,
48571,
12425,
198,
198,
6404,
1362,
796,
18931,
13,
1136... | 2.74359 | 78 |
# vim: ts=4 sw=4 noexpandtab
"""Battlefield -- ModManager.
This is a Module Manager for BattleField
It enables users to add and remove modules dynamicly using
a centralised configuration which is multi server friendly.
The location of the config file modmanager.con is determined by the directory identified
by +config command line option for BF2 and by the directory identified by the +overlayPath
command line option on BF2142. If neither are set it will look in the <mod>/settings directory.
It provides a fully expandable and configurable Module
framework and Rcon implementation.
===== Config =====
# The sub path where modules are to be found
modmanager.moduleBase "modules"
# Auto save config when shutting down
modmanager.autoSave 1
# The path to look in when @HOME@ is seen in the path
# of the servers main config file
modmanager.homeGuess "C:/Documents and Settings/Administrator/My Documents/Battlefield 2/"
# The name of the core rcon module
modmanager.rconModule "mm_rcon"
# The name of the core ban manager module
modmanager.banManagerModule "mm_banmanager"
# The name of the core logger module
modmanager.logModule "mm_logger"
# Enable / disable debug logging
modmanager.debugEnable 0
# The file to send debug logging to
modmanager.debugFile "modmanager_debug.log"
# The log verbosity:
# 0 = errors
# 1 = + warnings
# 2 = + info ( default )
# 3 = + debug
# ...
modmanager.logLevel 2,
# If the log file is auto flushed after every write
modmanager.logAutoFlush 1
# The format for the log date
modmanager.logDateFormat "[%Y-%m-%d %H:%M:%S] "
===== Rcon methods =====
# Print the running config
mm printRunningConfig
# Save the running config
mm saveConfig
# Load the specified module
mm loadModule <module_name>
# List the known modules an their states
mm listModules
# Shutdown the specified module
mm shutdownModule <module_name>
# Start the specified module
mm startModule <module_name>
# Reload the specified module
mm reloadModule <module_name>
# Set the parameter of a module
mm setParam <module_name> <param> <value>
===== Notes =====
* Its not currently garanteed that shutdown methods are called when the server exits. Due to this autoSave may be unreliable
* Setting the parameter of a module may not take effect until that module is reloaded or the server restarted ( requires saveConfig )
===== History =====
v2.2c - 14/05/2013
Added dependant_day_night to Heroes
v2.2b - 29/01/2013
Added compatibility for long values
v2.2a - 11/12/2012
Added ruin_snow to Heroes
v2.2 - 10/12/2012
Added new map to Play4Free
v2.1v - 22/10/2012
Added new gametype to Play4Free
v2.1u - 19/09/2012
Added dependent_day to Heroes
v2.1t - 17/07/2012
Added Ruin_Day to Heroes
v2.1s - 21/06/2012
Added river to Heroes
v2.1r - 24/05/2012
Added Trail to Play 4 Free
v2.1q - 17/04/2012
Added Lunar Landing to Battlefield Heroes
v2.1p - 23/02/2012
Added Mashtuur City to Play 4 Free
v2.1o - 24/01/2012
Added royal_rumble_day to Battlefield Heroes
v2.1n - 13/12/2011
Added new Battlefield Heroes map: royal_rumble_snow
v2.1m - 08/12/2011
Added new map for Battlefield Play for Free
Missing version info:
v2.1l - Added CTF Gametype to Battlefield Heroes
v2.1k - Added royal_rumble for Battlefield Heroes
v2.1j Added Dragon Valley for Battlefield Play 4 Free
v2.1h - 27/06/2011
Corrected isXXX description
v2.1 - 12/01/2011
Added Support for Battlefield Play for Free
v2.0c - 10/11/2009:
Added support for Windows paths in automatic fallback on BF2 for config location
v2.0 - 07/05/2009:
Added isBattleFieldHeroes
v1.9 - 21/04/2009:
Version bump for BF2 v1.50 patch support
v1.8 - 10/05/2008:
Version bump for BF2142 v1.50 patch support
v1.7 - 21/02/2007:
setParam is now prevented from altering mm_rcon.restrictedGametypes and mm_rcon.lockedSettings
Added a method to retrieve the rcon handle
v1.6 - 08/02/2007:
Version bump
Increased error checking for config parsing
setParam is now prevented from altering restricted parameters
v1.5 - 03/11/2006:
Added new rcon methods runRconCommand and getRconContext
v1.5-rc2 - 18/10/2006:
Corrected loading of legacy modules
v1.5-rc1 - 16/08/2006:
Now uses .search instead of .match in regexp
Removed invalid supported games output
Added 2142 compatibility
Added isBattleField2142, isBattleField2 and getGame methods to enable game checks
Version bump
v1.4 - 30/05/2006:
Version bump
v1.3 - 30/05/2006:
Version bump
v1.2a - 22/03/2006:
Swapped rcon and banManager initialisation order so that banManager can register rcon commands
v1.2 - 22/02/2006:
Bumped version number for ban manager release
Now also searches admin/ + moduleBase + /libs for python modules
v1.1a - 15/02/2006:
Removed proxy methods for ban manager methods to reduce the design coupling of mm and banmanager
All Ban Manager methods should now be accessed via mm.banManager().<method>
v1.1 - 13/02/2005:
Fixed random module dependency load problem
Added Ban Manager proxy methods
v1.0 - 04/10/2005:
Corrected autoSave ( still unreliable as the servers doesnt call shutdown most the time )
v1.0 - rc-5 23/08/2005:
Added configPath() method
Updated time logic to take into account none started matches and correct roundTimeLeft()
v1.0 - rc-4 16/08/2005:
shutdown() and init() are now mandatory methods for modules
Added startTimeWall and startTimeUTC properties which identifies then the round started excluding start delay
Added roundTime() and roundTimeLeft() methods which return the number of seconds the round has been playing for and has left respectively. Pauses and start delay are taken into account
v1.0 - beta1 - 02/08/2005:
Initial version
Copyright (c)2008 Multiplay
Author: Steven 'Killing' Hartland
"""
import sys
import mm_utils
import datetime
import time
import re
import host
import bf2
import bf2.stats.constants
__version__ = 2.2
__description__ = "Multiplay, ModManager v%s" % __version__
__all__ = [
"rconModule",
"banManagerModule",
"logModule",
"logLevel",
"logFilename"
"logDateFormat"
"banFilename",
"debugEnable",
"debugFile",
"isWin32",
]
configDefaults = {
#
# Module settings
'moduleBase': 'modules',
'autoSave': 1,
'homeGuess': 'C:/Documents and Settings/Administrator/My Documents/Battlefield 2/',
#
# Core modules
#
'rconModule': 'mm_rcon',
'banManagerModule': 'mm_banmanager',
'logModule' : 'mm_logger',
#
# Debug settings
#
'debugEnable': 0,
'debugFile': 'modmanager_debug.log',
#
# Logging settings
#
# The log verbosity:
# 0 = errors
# 1 = + warnings
# 2 = + info ( default )
# 3 = + debug
# ...
'logLevel': 2,
# If the log file is auto flushed after every write
'logAutoFlush' : 1,
# Append to the log
'logAppend': 0,
# The format for the log date
'logDateFormat': "[%Y-%m-%d %H:%M:%S] ",
}
class DebugLogger:
"""The logger used for debugging ModManager."""
def __init__( self, filename, logAppend, autoFlush ):
"""Opens the given filename for appending."""
self.autoFlush = autoFlush
try:
if logAppend:
self.__file = open( filename, 'a+' )
else:
self.__file = open( filename, 'w+' )
except StandardError, detail:
msg = "Failed to open '%s' (%s)" % ( filename, detail )
raise IOError, msg
def write( self, str ):
"""Writes to the debug log flushing if required."""
self.__file.write( str )
if self.autoFlush:
self.__file.flush()
def close( self ):
"""Close the debug log."""
if self.__file:
self.__file.close()
class ModManager( object ):
"""The core manager which looks after the modules and configuration."""
def __init__( self, debuglog ):
"""Create a module manager, loading the configuration and any requested modules."""
# default init
self.__bf2String = 'Battlefield 2'
self.__bf2142String = 'Battlefield 2142'
self.__bfheroesString = 'Battlefield Heroes'
self.__bfp4fString = 'Battlefield Play 4 Free'
self.__bf2Id = 'bf2'
self.__bf2142Id = 'bf2142'
self.__bfheroesId = 'bfheroes'
self.__bfp4fId = 'bfp4f'
self.__gameId = self.__bf2Id
self.__gameString = self.__bf2String
self.__gameName = self.__bf2Id
self.__state = 0
self.__logger = None
self.__modules = {}
self.__updateRequestors = []
self.__configFull = { __name__: {} }
self.__playCount = 0
self.__pauseStart = 0
self.__matchTimeLost = 0
self.__timeLimit = 0
self.lastGameStatus = bf2.GameStatus.PreGame
self.currentGameStatus = bf2.GameStatus.PreGame
self.gamePlaying = False
self.roundStarted = False
self.startTimeUTC = int( time.time() )
self.startTimeWall = int( host.timer_getWallTime() )
# Load our config default and set to local vars
for var in configDefaults:
val = configDefaults[var]
self.__configFull[__name__][var] = val
self.__setattr__( var, val )
mm_utils.init( self )
# Determine where to load the config from
self.__setConfigFile();
# parse the configuration file
self.__parseConfig()
# Enable debug if required
if self.debugEnable:
# close the startup log
# Note: we dont always close this as that would cause issues
if debuglog:
debuglog.close()
# open the new one ( settings may have changed
sys.stdout = sys.stderr = DebugLogger( self.debugFile, self.logAppend, self.logAutoFlush )
# add the module base to the path
sys.path.append( 'admin/' + self.moduleBase )
sys.path.append( 'admin/' + self.moduleBase + '/libs' )
# create the logger
self.__initLogger()
self.info( 'Creating Multiplay, ModManager v%s for %s (www.multiplay.co.uk)' % ( __version__, self.__gameString ) )
# We listen to state changes so we can tell others what
# the current state is if they are dynamically loaded
host.registerGameStatusHandler( self.onGameStatusChanged )
# create the rcon
self.__initRcon()
# Initialise the ban system
self.__initBanManager()
# Register our rcon command handlers
# Our internal commands
self.__cmds = {
'printRunningConfig': { 'method': self.cmdPrintRunningConfig, 'aliases': [ 'print' ], 'level': 80 },
'saveConfig': { 'method': self.cmdSaveConfig, 'aliases': [ 'save' ], 'level': 90 },
'loadModule': { 'method': self.cmdLoadModule, 'args': '<module_name>', 'aliases': [ 'load' ], 'level': 90 },
'listModules': { 'method': self.cmdListModules, 'aliases': [ 'list' ], 'level': 70 },
'shutdownModule': { 'method': self.cmdShutdownModule, 'args': '<module_name>', 'aliases': [ 'shutdown' ], 'level': 90 },
'startModule': { 'method': self.cmdStartModule, 'args': '<module_name>', 'aliases': [ 'start' ], 'level': 90 },
'reloadModule': { 'method': self.cmdReloadModule, 'args': '<module_name>', 'aliases': [ 'reload' ], 'level': 90 },
'setParam': { 'method': self.cmdSetParam, 'args': '<module_name> <param> <value>', 'aliases': [ 'set' ], 'level': 90 },
}
# load the modules
self.__loadModules();
def isBattleField2142( self ):
"""Is the game BattleField 2142"""
return self.__gameId == self.__bf2142Id
def isBattleField2( self ):
"""Is the game BattleField 2"""
return self.__gameId == self.__bf2Id
def isBattleFieldHeroes( self ):
"""Is the game BattleField Heroes"""
return self.__gameId == self.__bfheroesId
def isBattleFieldPlay4Free( self ):
"""Is the game BattleField Play 4 Free"""
return self.__gameId == self.__bfp4fId
def getGameString( self ):
"""Return the game string"""
return self.__gameString
def getGameId( self ):
"""Return the game id"""
return self.__gameId
def pause( self ):
"""Pause the game."""
if bf2.GameStatus.Playing == self.currentGameStatus:
msg = host.rcon_invoke( 'gameLogic.togglePause' )
# TODO: remove when fixed
self.onGameStatusChanged( bf2.GameStatus.Paused )
else:
msg = ''
return msg
def unpause( self ):
"""Unpause the game."""
if bf2.GameStatus.Paused == self.currentGameStatus:
msg = host.rcon_invoke( 'gameLogic.togglePause' )
# TODO: remove when fixed
self.onGameStatusChanged( bf2.GameStatus.Playing )
else:
msg = ''
return msg
def onGameStatusChanged( self, status ):
"""Make a note of the game status"""
self.debug( 1, "STATUS: %d = %s" % ( status, mm_utils.status_name( status ) ) )
if bf2.GameStatus.EndGame == status:
self.__playCount = 0
self.__pauseStart = 0
self.__timeLimit = 0
self.gamePlaying = False
self.roundStarted = False
elif bf2.GameStatus.Playing == status:
self.gamePlaying = True
now = int( host.timer_getWallTime() )
if bf2.GameStatus.PreGame == self.currentGameStatus:
# normal transition i.e. not pause
start_delay = int( host.rcon_invoke( 'sv.startDelay' ) )
self.__playCount += 1
self.__timeLimit = host.ss_getParam( 'timeLimit' )
self.__matchTimeLost = 0
self.startTimeUTC = int( time.time() ) + start_delay
self.startTimeWall = now + start_delay
if 2 == self.__playCount:
# We see state change from PreGame -> Playing twice before the round really starts
self.roundStarted = True
elif bf2.GameStatus.Paused == self.currentGameStatus:
self.__matchTimeLost += ( now - self.__pauseStart )
self.__pauseStart = 0
elif bf2.GameStatus.Paused == status:
self.__pauseStart = int( host.timer_getWallTime() )
else:
self.__pauseStart = 0
self.lastGameStatus = self.currentGameStatus
self.currentGameStatus = status
#self.debug( 3, "STATUS NOW: last = %s, current = %s, playing = %s, started = %s" % ( mm_utils.status_name( self.lastGameStatus ), mm_utils.status_name( self.currentGameStatus ), self.gamePlaying, self.roundStarted ) )
def roundTime( self ):
"""Return how long a round has been running for.
Takes into account start delay and any pauses.
"""
now = int( host.timer_getWallTime() )
if bf2.GameStatus.Paused == self.currentGameStatus:
cur_pause = now - self.__pauseStart
round_time = now - self.startTimeWall - self.__matchTimeLost - cur_pause
else:
round_time = now - self.startTimeWall - self.__matchTimeLost
if 0 > round_time:
return 0
else:
return round_time
def roundTimeLeft( self ):
"""Return how long a round has left to play.
Returns 0 if there is no time limit or the round hasn't started
"""
if self.__timeLimit:
self.debug( 2, "TIME: %d, %d" % ( self.__timeLimit, self.roundTime() ) )
time_left = self.__timeLimit - self.roundTime()
if 0 > time_left:
# game which hasnt really started so timelimit is not in effect
return 0
else:
return time_left
else:
self.debug( 2, "TIME1: 0" )
return 0
def configPath( self ):
"""Returns the config directory."""
return self.__configPath
#
# Private Methods
#
def __setConfigFile( self ):
"""Determines where our config file is."""
try:
# Try 2142 way
self.__configPath = host.sgl_getOverlayDirectory()
try:
test = bf2.stats.constants.TYPE_BFHEROES
self.__gameString = self.__bfheroesString
self.__gameId = self.__bfheroesId
except:
try:
test = bf2.stats.constants.ARMY_US
self.__gameString = self.__bfp4fString
self.__gameId = self.__bfp4fId
except:
self.__gameString = self.__bf2142String
self.__gameId = self.__bf2142Id
except:
# Failed 2142 so fall back to determining from the config
self.__gameString = self.__bf2String
self.__gameId = self.__bf2Id
configFileParts = host.rcon_invoke( 'sv.configFile' ).replace( '\\', '/' ).split( '/' );
del configFileParts[len( configFileParts ) - 1]
self.__configPath = "/".join( configFileParts )
if self.__configPath.startswith( '@HOME@' ):
# since we have no access to the environment and cant
# do any directory listings atm we have to just guess
self.warn( "Guessing '@HOME@' = '%s'" % self.homeGuess )
self.__configPath = "%s%s" % ( self.homeGuess, self.__configPath[5:] )
filename = "%s/%s.con" % ( self.__configPath, __name__ )
# lets check
try:
check = open( filename, 'r' )
except:
# nope no good so lets check in the standard place
self.__configPath = "%s/settings" % host.sgl_getModDirectory().replace( '\\', '/' )
filename = "%s/%s.con" % ( self.__configPath, __name__ )
try:
check = open( filename, 'r' )
except:
self.error( "Failed to determine location of '%s.con'" % __name__ )
else:
self.__configFile = filename
self.warn( "Using config file '%s'" % filename )
check.close()
else:
check.close()
self.__configFile = filename
def __getattr__( self, name ):
"""Return the attributes value."""
try:
return self.__dict__[name]
except AttributeError:
raise AttributeError, name
def __setattr__( self, name, value ):
"""Set the attributes value."""
self.__dict__[name] = value
def __delattr__( self, name ):
"""Delete the attribute."""
del self.__dict__[name]
def __initLogger( self ):
"""Initialise the logger."""
self.__addModule( self.logModule )
self.__logger = self.__loadModule( self.logModule )
if not self.__logger:
return 0
return self.__initModule( self.logModule )
def __initRcon( self ):
"""Initialise rcon."""
self.__addModule( self.rconModule )
self.__rcon = self.__loadModule( self.rconModule )
if not self.__rcon:
return 0
return self.__initModule( self.rconModule )
def __initBanManager( self ):
"""Initialise ban Manager."""
self.__addModule( self.banManagerModule )
self.__banManager = self.__loadModule( self.banManagerModule )
if not self.__banManager:
return 0
return self.__initModule( self.banManagerModule )
def __addModule( self, module_name ):
"""Add a module to our list of modules."""
self.debug( 2, "addModule '%s'" % module_name )
self.__modules[module_name] = { 'status': ModuleStatus.unloaded }
def __loadModules( self ):
"""Load all modules"""
# Note: We have to use .keys() as __loadModule may change the size
# of .__modules
module_names = self.__modules.keys()
loaded = 0
for module_name in module_names:
if ModuleStatus.unloaded == self.__modules[module_name]['status']:
# modules not currently loaded so load
if self.__loadModule( module_name ):
loaded += 1
self.info( "Loaded %d additional modules" % loaded )
def __loadModule( self, module_name, ctx=None ):
"""Load a specific module."""
try:
self.debug( 2, "Loading module '%s'" % module_name )
module = __import__( module_name )
except StandardError, detail:
msg = "Failed to load module '%s' ( %s )\n" % ( module_name, detail )
self.error( msg, True )
if ctx is not None:
ctx.write( msg )
return 0
# check we are a high enough version
if module.__required_modules__[__name__] > __version__:
msg = "Module '%s' v%s requires ModManager v%s or higher\n" % ( module_name, module.__version__, module.__required_modules__[__name__] )
self.error( msg )
if ctx is not None:
ctx.write( msg )
return 0
# Check we are a supported game
if hasattr( module, '__supported_games__' ):
if not module.__supported_games__[self.__gameId]:
msg = "Module '%s' v%s does not support '%s'\n" % ( module_name, module.__version__, self.__gameId )
self.error( msg )
if ctx is not None:
ctx.write( msg )
return 0
else:
if self.isBattleField2():
msg = "Legacy module '%s' v%s detected assuming '%s' support" % ( module_name, module.__version__, self.__bf2String )
self.warn( msg )
if ctx is not None:
ctx.write( msg )
else:
msg = "Legacy module '%s' v%s detected assuming NO '%s' support" % ( module_name, module.__version__, self.__bf2142String )
self.error( msg )
if ctx is not None:
ctx.write( msg )
return 0
# ensure we have all dependent modules
# NOTE: we dont check for infinte loops here
for req_module_name in module.__required_modules__:
if __name__ == req_module_name:
continue
self.debug( 2, "%s requires %s (autoloading)" % ( module_name, req_module_name ) )
if not self.__modules.has_key( req_module_name ):
# module we dont know about yet
self.__addModule( req_module_name )
if not self.__loadModule( req_module_name ):
return 0
if ModuleStatus.loaded > self.__modules[req_module_name]['status']:
# we know the module but its not loaded yet
if not self.__loadModule( req_module_name ):
return 0
if module.__required_modules__[req_module_name] > self.__modules[req_module_name]['module'].__version__:
# module version is too low
msg = "Module '%s' v%s requires '%s' v%s or higher (found %s)\n" % ( module_name, module.__version__, req_module_name, module.__required_modules__[req_module_name], self.__modules[req_module_name]['module'].__version__ )
self.error( msg )
if ctx is not None:
ctx.write( msg )
return 0
try:
obj = module.mm_load( self )
self.__modules[module_name]['object'] = obj
except:
msg = "Failed to mm_load '%s'\n" % module_name
self.error( msg, True )
if ctx is not None:
ctx.write( msg )
return 0
# all good
self.__modules[module_name]['module'] = module
self.__modules[module_name]['status'] = ModuleStatus.loaded
msg = "Module '%s' v%s loaded\n" % ( module_name, module.__version__ )
self.info( msg )
if ctx is not None:
ctx.write( msg )
return obj
def __unloadModule( self, module_name ):
"""Unload a specific module."""
try:
module = self.__modules[module_name]['module']
try:
self.__modules[module_name]['status'] = ModuleStatus.unloaded
self.__modules[module_name]['module'] = None
del module
self.error( "Module '%s' was unloaded loaded" % module_name )
except StandardError, detail:
self.error( "Module '%s' failed to unload (%s)" % ( module_name, detail ) )
except KeyError:
self.error( "Module '%s' was not loaded" % module_name )
def __initModule( self, module_name, ctx=None ):
"""Initialise a module."""
if self.__modules[module_name]['status'] == ModuleStatus.loaded:
try:
self.__modules[module_name]['object'].init()
self.__modules[module_name]['status'] = ModuleStatus.running
msg = "Module '%s' initialised\n" % module_name
self.info( msg )
if ctx is not None:
ctx.write( msg )
return 1
except Exception, detail:
# Ignore this module doesnt support init
msg = "Failed to initialise module '%s' (%s)" % ( module_name, detail )
self.error( msg, True )
if ctx is not None:
ctx.write( msg )
return 0
def __shutdownModule( self, module_name, ctx=None ):
"""Shutdown a module."""
if self.__modules[module_name]['status'] == ModuleStatus.running:
try:
self.__modules[module_name]['object'].shutdown()
self.__modules[module_name]['status'] = ModuleStatus.loaded
msg = "Module '%s' shutdown\n" % module_name
self.info( msg )
if ctx is not None:
ctx.write( msg )
return 1
except Exception, detail:
msg = "Failed to shutdown module '%s' (%s)\n" % ( module_name, detail )
self.error( msg, True )
if ctx is not None:
ctx.write( msg )
elif self.__modules[module_name]['status'] == ModuleStatus.loaded:
msg = "Module '%s' already shutdown\n" % ( module_name )
self.debug( 2, msg )
if ctx is not None:
ctx.write( msg )
return 1;
return 0
def __decodeConfigValue( self, key, value ):
"""Converts a config value from string to int if required."""
if value is None:
return None
# remove white space
if value.startswith( '"' ):
# String value
# remove outer quotes
value = value.strip( '"' )
self.debug( 1, "Setting '%s' = '%s'" % ( key, value ) )
else:
# Int value
try:
value = int( value )
self.debug( 1, "Setting '%s' = %d" % ( key, value ) )
except:
self.warn( "Invalid config for '%s' ( Unquoted string? )" )
return value
def __parseConfig( self ):
"""Parse the configuration file."""
self.info( "Loading config '%s'" % ( self.__configFile ) )
add_re = re.compile( '^add([A-Z].*)$' )
set_re = re.compile( '^set([A-Z].*)$' )
try:
config = open( self.__configFile, 'r' )
lineNo = 0
for line in config:
lineNo += 1
line = line.strip()
if 0 != len( line ) and not line.startswith( "#" ):
try:
( key, val1, val2 ) = mm_utils.largs( line, ' ', 3, None, True )
val1 = self.__decodeConfigValue( key, val1 )
val2 = self.__decodeConfigValue( key, val2 )
try:
oldVal = self.__getattr__( key )
self.warn( "Overriding %s = '%s' with '%s'" % ( key, oldVal, val1 ) )
except KeyError:
# ignore
pass
# load to the relavent place
( module, module_key ) = mm_utils.lsplit( key, '.', 2 )
#self.debug( 2, "%s . %s = %s" % ( module, module_key, value ) )
if __name__ == module:
if "loadModule" == module_key:
# loadable module
self.__addModule( val1 )
else:
# core config file
self.__setattr__( module_key, val1 )
self.__setParam( module, module_key, val1 )
else:
match = add_re.search( module_key )
if match is not None:
# user multi setting
# matches things like:
# <module>.addProfileId 1
# <module>.addProfileId 2
# and sets:
# <module>.profileIds = [ 1, 2 ]
# or:
# <module>.addCmdAlias "k" "kick"
# <module>.addCmdAlias "b" "ban"
# and sets:
# <module>.cmdAliass = { 'k': 'kick', 'b': 'ban' }
self.__addParam( module, "%c%ss" % ( module_key[3:4].lower(), module_key[4:] ), val1, val2 )
else:
# user single setting
self.__setParam( module, module_key, val1 )
except Exception, detail:
self.error( 'Syntax error in "%s" on line %d (%s)' % ( self.__configFile, lineNo, detail ), True )
config.close()
except IOError, detail:
self.error( "Couldn't read '%s' (%s)" % ( self.__configFile, detail ) )
def __log( self, level, msg ):
"""Log the message if its level is less than or equal to the current log level."""
if self.logLevel >= level:
currentDate = datetime.datetime.today()
dateString = time.strftime( self.logDateFormat, currentDate.timetuple() )
output = ''
cr = '\n'
if msg.endswith( '\n' ):
cr = ''
if 0 == level:
output = "%sError: %s%s" % ( dateString, msg, cr )
sys.stderr.write( output )
elif 1 == level:
output = "%sWarn: %s%s" % ( dateString, msg, cr )
sys.stderr.write( output )
elif 2 == level:
output = "%sInfo: %s%s" % ( dateString, msg, cr )
sys.stdout.write( output )
else:
output = "%sDebug[%s]: %s%s" % ( dateString, level, msg, cr )
sys.stdout.write( output )
if self.__logger:
self.__logger.write( output )
def __setParam( self, module, var, val ):
"""Set the modules configuration parameter."""
if self.__configFull.has_key( module ):
self.__configFull[module][var] = val
else:
self.__configFull[module] = { var: val }
if __name__ == module:
self.__setattr__( var, val )
def __removeParam( self, module, key, idx ):
"""Remove the modules configuration parameter."""
if not self.__configFull.has_key( module ):
self.warn( "Failed to remove parameter %s.%s[%d] ( unknown module )" % ( module, key, idx ) )
return 0
if not self.__configFull[module].has_key( key ):
self.warn( "Failed to remove parameter %s.%s[%d] ( unknown key )" % ( module, key, idx ) )
return 0
l = len( self.__configFull[module][key] )
if l <= idx:
self.warn( "Failed to remove parameter %s.%s[%d] > %d ( invalid index )" % ( module, key, idx, l ) )
return 0
del self.__configFull[module][key][idx]
return 1
def __addParam( self, module, key, val1, val2=None ):
"""Add the modules configuration parameter."""
#self.debug( 2, "ADD2: %s.%s = %s, %s" % ( module, key, val1, val2 ) )
if val2 is None:
#self.debug( 2, "LIST2: %s.%s = %s" % ( module, key, val1 ) )
# Straight value
if self.__configFull.has_key( module ):
if self.__configFull[module].has_key( key ):
self.__configFull[module][key].append( val1 )
else:
self.__configFull[module][key] = [ val1 ]
else:
module_keys = { key: [ val1 ] }
self.__configFull[module] = module_keys
else:
#self.debug( 2, "DICT2: %s.%s = %s, %s" % ( module, key, val1, val2 ) )
# value pair
try:
val2 = int( val2 )
#self.debug( 2, "INT: %d" % ( val2 ) )
except:
pass
#self.debug( 2, "HASH2: %s %s" % ( val1, val2 ) )
if self.__configFull.has_key( module ):
if self.__configFull[module].has_key( key ):
self.__configFull[module][key][val1] = val2
else:
self.__configFull[module][key] = { val1: val2 }
else:
self.__configFull[module] = { key: { val1: val2 } }
def __getParam( self, module, key ):
"""Return the modules configuration parameter."""
try:
return self.__configFull[module][key];
except KeyError:
self.warn( "Request for invalid param '%s.%s'" % ( module, key ) )
def __getModuleConfig( self, module ):
"""Return the modules configuration."""
if self.__configFull.has_key( module ):
return self.__configFull[module]
else:
return {}
def __saveConfig( self, ctx=None ):
"""Save the entire config.
Note: return value 0 indicates success
"""
try:
fh = open( self.__configFile, 'w' )
except IOError, detail:
msg = "Failed to open config '%s' for write (%s)\n" % ( self.__configFile, detail )
self.error( msg )
if ctx is not None:
ctx.write( msg )
return 0
if not self.__writeConfig( fh ):
msg = "Failed to save config\n"
else:
msg = "Config saved\n"
self.info( msg )
if ctx is not None:
ctx.write( msg )
fh.close()
return 0
def __writeConfig( self, fh ):
"""Write the module config to the file handle."""
try:
# Global first
self.__writeModuleConfig( fh, __name__ )
# Addon modules
fh.write( "\n# Modules\n" )
for module_name in self.__modules:
if module_name != self.logModule and module_name != self.rconModule and module_name != self.banManagerModule:
fh.write( '%s.loadModule "%s"\n' % ( __name__, module_name ) )
fh.write( "\n" )
# Now all the known modules configs
modules = self.__configFull.keys()
modules.sort()
for module in modules:
if __name__ != module:
self.__writeModuleConfig( fh, module )
except IOError, detail:
msg = "Failed to save config '%s' (%s)\n" % ( self.__configFile, detail )
self.error( msg )
if ctx is not None:
ctx.write( msg )
return 0
return 1
def __writeModuleConfig( self, fh, module_name ):
"""Write out the named modules header and config to the passed file handle."""
# Write the descriptive header
if self.__modules.has_key( module_name ):
# module is loaded
if self.__modules[module_name]['status'] > ModuleStatus.unloaded:
# modules loaded use the description
try:
fh.write( "#\n# %s\n#\n" % self.__modules[module_name]['module'].__description__ )
except:
fh.write( "#\n# %s\n#\n" % module_name )
else:
fh.write( "#\n# %s\n#\n" % module_name )
elif __name__ == module_name:
# Us
fh.write( "#\n# %s\n#\n" % __description__ )
else:
# Unloaded module
fh.write( "#\n# %s\n#\n" % module_name )
# Now the parameters for this module
params = self.__configFull[module_name]
var_names = params.keys()
var_names.sort()
for param in var_names:
value = params[param]
if isinstance( value, ( int, long ) ):
# just an int
fh.write( '%s.%s %s\n' % ( module_name, param, value ) )
elif isinstance( value, dict ):
# multi value list
if param.endswith( 's' ):
# strip the s
multi_param = "add%s%s" % ( param[0:1].upper(), param[1:len( param ) - 1] )
else:
multi_param = "add%s%s" % ( param[0:1].upper(), param[1:] )
for k in value:
val = value[k]
if isinstance( k, ( int, long ) ):
if isinstance( val, ( int, long ) ):
fh.write( '%s.%s %d %d\n' % ( module_name, multi_param, k, val ) )
else:
fh.write( '%s.%s %d "%s"\n' % ( module_name, multi_param, k, val ) )
else:
# string, double quote it
if isinstance( val, ( int, long ) ):
fh.write( '%s.%s "%s" %d\n' % ( module_name, multi_param, k, val ) )
else:
fh.write( '%s.%s "%s" "%s"\n' % ( module_name, multi_param, k, val ) )
elif isinstance( value, list ):
# multi value list
if param.endswith( 's' ):
# strip the s
multi_param = "add%s%s" % ( param[0:1].upper(), param[1:len( param ) - 1] )
else:
multi_param = "add%s%s" % ( param[0:1].upper(), param[1:] )
for multi in value:
if isinstance( multi, ( int, long ) ):
# just an int
fh.write( '%s.%s %s\n' % ( module_name, multi_param, multi ) )
else:
# string, double quote it
fh.write( '%s.%s "%s"\n' % ( module_name, multi_param, multi ) )
else:
# string, double quote it
fh.write( '%s.%s "%s"\n' % ( module_name, param, value ) )
fh.write( "\n" )
#
# Rcon methods
#
def cmdExec( self, ctx, cmd ):
"""Execute a ModManager sub command."""
return mm_utils.exec_subcmd( self, self.__cmds, ctx, cmd )
def cmdReloadModule( self, ctx, module_name ):
"""Reload and existing module."""
if self.rconModule == module_name or self.logModule == module_name or self.banManagerModule == module_name:
msg = "Failed to shutdown module '%s' ( not permitted )\n" % module_name
self.warn( msg )
ctx.write( msg )
return 0
if not self.__modules.has_key( module_name ):
msg = "Failed to reload module '%s' ( module not loaded )\n" % module_name
self.warn( msg )
ctx.write( msg )
return 0
details = self.__modules[module_name]
# check if the initial load failed
if details['status'] == ModuleStatus.unloaded:
# Initial load failed just try to load
if not self.__loadModule( module_name, ctx ):
return 0
return 1
module = details['module']
if not module.__dict__.has_key( '__supports_reload__' ) or not module.__supports_reload__:
msg = "Failed to reload module '%s' ( reload not supported )\n" % module_name
self.warn( msg )
ctx.write( msg )
return 0
# module supports reload
# call shutdown on the existing copy
if not self.__shutdownModule( module_name, ctx ):
return 0
try:
module = reload( module )
self.__modules[module_name]['module'] = module
self.__modules[module_name]['status'] = ModuleStatus.loaded
except Exception, detail:
msg = "Failed to reload module '%s' (%s)\n" % ( module_name, detail )
self.error( msg, True)
ctx.write( msg )
return 0
try:
self.__modules[module_name]['object'] = module.mm_load( self )
except:
msg = "Failed to mm_load '%s'\n" % module_name
self.error( msg, True )
if ctx is not None:
ctx.write( msg )
return 0
msg = "Module '%s' reloaded\n" % module_name
self.info( msg )
ctx.write( msg )
# Version compatibility check
if module.__required_modules__['modmanager'] > __version__:
msg = "Module '%s' v%s requires ModManager v%s or higher\n" % ( module_name, module.__version__, module.__required_modules__['modmanager'] )
self.error( msg )
ctx.write( msg )
return 0
# Initialise the module
if not self.__initModule( module_name, ctx ):
return 0
return 1;
def cmdSetParam( self, ctx, cmd ):
"""Sets a module parameter."""
parts = cmd.split( None, 2 )
if 3 != len( parts ):
msg = "Invalid set arguments '%s'" % cmd
self.warn( msg )
ctx.write( msg )
( module_name, var, val ) = parts
if not self.__configFull.has_key( module_name ):
msg = "Failed to set param '%s' ( invalid module )\n" % cmd
self.warn( msg )
ctx.write( msg )
return 0
if not self.__configFull[module_name].has_key( var ):
msg = "Failed to set param '%s' ( invalid param )\n" % cmd
self.warn( msg )
ctx.write( msg )
return 0
if self.rconModule == module_name:
if "restrictedGametypes" == var or "lockedSettings" == var:
msg = "Failed to set param '%s' ( restricted / locked param )\n" % cmd
self.warn( msg )
ctx.write( msg )
return 0
if not val.startswith( '"' ):
try:
int_val = int( val )
except TypeError:
msg = "Failed to set param '%s' ( invalid var, expected quote string or integer value )\n" % cmd
self.warn( msg )
ctx.write( msg )
return 0
self.__configFull[module_name][var] = int_val
else:
self.__configFull[module_name][var] = val.strip( '"' )
msg = "Param %s.%s set to %s\n" % ( module_name, var, val )
self.info( msg )
ctx.write( msg )
return 1
def cmdLoadModule( self, ctx, module_name ):
"""Load a new module."""
if self.__modules.has_key( module_name ):
msg = "Failed to load module '%s' ( already loaded )\n" % module_name
self.warn( msg )
ctx.write( msg )
return 0
self.__addModule( module_name )
if not self.__loadModule( module_name, ctx ):
# Remove all reference
del self.__modules[module_name]
return 0
if not self.__initModule( module_name, ctx ):
return 0
return 1;
def cmdStartModule( self, ctx, module_name ):
"""Start and existing module."""
if not self.__modules.has_key( module_name ):
msg = "Failed to shutdown module '%s' ( not loaded )\n" % module_name
self.warn( msg )
ctx.write( msg )
return 0
return self.__initModule( module_name, ctx )
def cmdShutdownModule( self, ctx, module_name ):
"""Shutdown and existing module."""
if self.rconModule == module_name or self.logModule == module_name or self.banManagerModule == module_name:
msg = "Failed to shutdown module '%s' ( not permitted )\n" % module_name
self.warn( msg )
ctx.write( msg )
return 0
if not self.__modules.has_key( module_name ):
msg = "Failed to shutdown module '%s' ( not loaded )\n" % module_name
self.warn( msg )
ctx.write( msg )
return 0
return self.__shutdownModule( module_name, ctx )
def cmdListModules( self, ctx, cmd ):
"""List the loaded modules."""
# would be nice to be able to list the available modules
# but the python core is crippled
ctx.write( 'Loaded modules:\n' )
for module_name in self.__modules:
details = self.__modules[module_name]
status = details['status']
if ModuleStatus.unloaded == status:
ctx.write( ' %s ( unloaded )\n' % module_name )
elif ModuleStatus.loaded == status:
ctx.write( ' %s v%s ( loaded )\n' % ( module_name, details['module'].__version__ ) )
elif ModuleStatus.running == status:
ctx.write( ' %s v%s ( running )\n' % ( module_name, details['module'].__version__ ) )
return 1
def cmdSaveConfig( self, ctx, cmd ):
"""Start and existing module."""
return self.__saveConfig( ctx )
def cmdPrintRunningConfig( self, ctx, cmd ):
"""Write the config to ctx."""
return self.__writeConfig( ctx )
#
# Config methods
#
def configFile( self ):
"""Return the config filename."""
return self.__configFile
def getModuleConfig( self, moduleDefaults={} ):
"""Return the modules configuration, adding any missing default values."""
module = mm_utils.caller_module()
moduleConfig = self.__getModuleConfig( module )
for key in moduleDefaults:
if not moduleConfig.has_key( key ):
# missing default add it to the config
self.debug( 1, "missing %s" % key )
if isinstance( moduleDefaults[key], dict ):
self.debug( 1, "DICT1: %s => %s" % ( module, key ) )
if moduleDefaults[key]:
vals = moduleDefaults[key]
for k in vals.keys():
self.__addParam( module, key, k, vals[k] )
else:
# fake the empty dict
self.__setParam( module, key, {} )
elif isinstance( moduleDefaults[key], list ):
self.debug( 1, "LIST1: %s => %s" % ( module, key ) )
# multi val
if moduleDefaults[key]:
for val in moduleDefaults[key]:
self.__addParam( module, key, val )
else:
# fake the empty list
self.__setParam( module, key, [] )
else:
# single val
self.__setParam( module, key, moduleDefaults[key] )
# reget to ensure we have all the defaults
return self.__getModuleConfig( module )
def setParam( self, key, value ):
"""Set the calling modules parameter."""
return self.__setParam( mm_utils.caller_module(), key, value )
def addParam( self, key, val1, val2=None ):
"""Add a value to the the calling modules parameters."""
return self.__addParam( mm_utils.caller_module(), key, val1, val2 )
def removeParam( self, key, idx ):
"""Remove one of the calling modules parameter values."""
return self.__removeParam( mm_utils.caller_module(), key, idx )
def getParam( self, key ):
"""Return the calling modules parameter."""
return self.__getParam( mm_utils.caller_module(), key )
def setRconParam( self, key, value ):
"""Set the rcon modules parameter."""
return self.__getParam( self.rconModule, value )
def getRconParam( self, key ):
"""Return the rcon modules parameter."""
return self.__getParam( self.rconModule )
def saveConfig( self ):
"""Save the current ModManager configuration."""
return self.__saveConfig()
#
# Logging methods
#
def debug( self, level, msg ):
"""Log the message at the given debug level."""
self.__log( 2 + level, msg )
def info( self, msg ):
"""Log the message at the info level."""
self.__log( 2, msg )
def warn( self, msg ):
"""Log the message at the warn level."""
self.__log( 1, msg )
def error( self, msg, traceback=False ):
"""Log the message at the error level."""
self.__log( 0, msg )
if traceback:
self.__log( 0, self.exceptionString() )
def exceptionString( self ):
"""Returns a formatted string of the exception."""
return "".join( mm_utils.format_exception( *sys.exc_info() ) )
#
# Rcon methods
#
def runRconCommand( self, client, cmd ):
"""Run an rcon command."""
return self.__rcon.onRemoteCommand( client, cmd )
def getRconContext( self, clientid ):
"""Run an rcon command."""
return self.__rcon.getContext( clientid )
def registerRconCmdHandler( self, name, details ):
"""Register a new rcon function hander."""
return self.__rcon.registerCmdHandler( name, details )
def unregisterRconCmdHandler( self, name ):
"""Unregister an existing rcon function handler."""
return self.__rcon.unregisterCmdHandler( name )
def registerRconConnectHandler( self, func ):
"""Register a new rcon connect hander."""
return self.__rcon.registerConnectHandler( func )
def unregisterRconConnectHandler( self, func ):
"""Unregister an existing rcon connect handler."""
return self.__rcon.unregisterConnectHandler( func )
def registerRconDisconnectHandler( self, func ):
"""Register a new rcon disconnect hander."""
return self.__rcon.registerDisconnectHandler( func )
def unregisterRconDisconnectHandler( self, func ):
"""Unregister an existing rcon disconnect handler."""
return self.__rcon.unregisterDisconnectHandler( func )
def registerRconAuthHandler( self, auth_func, check_func ):
"""Register a new rcon auth hander."""
return self.__rcon.registerAuthHandler( auth_func, check_func )
def unregisterRconAuthHandler( self, func ):
"""Unregister an existing rcon auth handler."""
return self.__rcon.unregisterAuthHandler( func )
def registerRconAuthedHandler( self, func ):
"""Register a new rcon authed hander."""
return self.__rcon.registerAuthedHandler( func )
def unregisterRconAuthedHandler( self, func ):
"""Unregister an existing rcon auth handler."""
return self.__rcon.unregisterAuthedHandler( func )
def rcon( self ):
"""Return the rcon handle."""
return self.__rcon
#
# Ban Manager methods
#
def banManager( self ):
"""Return the ban manager handle."""
return self.__banManager
#
# Update methods
#
def registerUpdates( self, method ):
"""Requests updates."""
self.__updateRequestors.append( method )
def unregisterUpdates( self, method ):
"""Cancel request for updates."""
self.__updateRequestors.remove( method )
#
# Core game methods
#
def init( self ):
"""Initialise the module manager and all loaded modules."""
self.__state = 1
# Register our handers
self.registerRconCmdHandler( 'mm', { 'method': self.cmdExec, 'subcmds': self.__cmds } )
initialised = 0
for module_name in self.__modules:
initialised += self.__initModule( module_name )
self.info( "Initialised %d modules" % initialised )
def shutdown( self ):
"""Shutdown the Module manager and all modules."""
self.info( 'ModManager shutting down' )
# Save the config if required
if self.autoSave:
self.__saveConfig()
shutdown = 0
for module_name in self.__modules:
shutdown += self.__shutdownModule( module_name )
self.info( "%d modules shutdown" % shutdown )
host.unregisterGameStatusHandler( self.onGameStatusChanged )
def update( self ):
"""Update all the modules that have registered for updates.
Note / Warning:
We could use the same method as for shutdown and init but
due to the frequency of update calls we use this optimised
methodology.
"""
for method in self.__updateRequestors:
try:
method()
except StandardError, detail:
self.error( "Failed to update '%s' (%s)" % ( mm_utils.method_name( method ), detail ), True )
# Create the singleton manger
# TODO: enforce singleton status
debuglog = None
if configDefaults['debugEnable']:
debuglog = sys.stdout = sys.stderr = DebugLogger( configDefaults['debugFile'], configDefaults['logAppend'], configDefaults['logAutoFlush'] )
modManager = ModManager( debuglog );
def getInstance():
"""Return the module manager instance."""
return modManager
# These functions are called from the engine -- we implement them in terms of a
# class instance:
def init():
"""Call initialise on the module manager."""
modManager.init();
def shutdown():
"""Call shutdown on the module manager."""
modManager.shutdown()
def update():
"""Call update on the module manager."""
modManager.update()
| [
2,
43907,
25,
40379,
28,
19,
1509,
28,
19,
645,
11201,
392,
8658,
198,
198,
37811,
24064,
3245,
1377,
3401,
13511,
13,
198,
198,
1212,
318,
257,
19937,
9142,
329,
5838,
15878,
198,
1026,
13536,
2985,
284,
751,
290,
4781,
13103,
8925,
... | 2.652016 | 17,314 |
"""Users Migration
Revision ID: 2268d72d8102
Revises: 94ee4bd6a597
Create Date: 2021-09-15 17:21:21.280507
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '2268d72d8102'
down_revision = '94ee4bd6a597'
branch_labels = None
depends_on = None
| [
37811,
14490,
36991,
198,
198,
18009,
1166,
4522,
25,
362,
25022,
67,
4761,
67,
23,
15377,
198,
18009,
2696,
25,
10048,
1453,
19,
17457,
21,
64,
43239,
198,
16447,
7536,
25,
33448,
12,
2931,
12,
1314,
1596,
25,
2481,
25,
2481,
13,
2... | 2.618321 | 131 |
import random
import re
from twisted.internet import reactor
from helga.plugins import command, preprocessor
silence_acks = (
u'silence is golden',
u'shutting up',
u'biting my tongue',
u'fine, whatever',
)
unsilence_acks = (
u'speaking once again',
u'did you miss me?',
u'FINALLY',
u'thanks {nick}, i was getting bored'
)
snarks = (
u'why would you want to do that {nick}?',
u'do you really despise me that much {nick}?',
u'whatever i do what i want',
u'no can do, i love the sound of my own voice',
)
# Set of silenced channels
silenced = set()
@preprocessor
@command('stfu', aliases=['speak'],
help="Tell the bot to be quiet or not. Usage: helga (speak|stfu [for <time_in_minutes>])")
| [
11748,
4738,
198,
11748,
302,
198,
198,
6738,
19074,
13,
37675,
1330,
21905,
198,
198,
6738,
932,
4908,
13,
37390,
1330,
3141,
11,
662,
41341,
628,
198,
18217,
594,
62,
4595,
796,
357,
198,
220,
220,
220,
334,
338,
346,
594,
318,
10... | 2.627178 | 287 |
import audioop
import math
from pydub.utils import ratio_to_db
| [
11748,
6597,
404,
198,
11748,
10688,
198,
198,
6738,
279,
5173,
549,
13,
26791,
1330,
8064,
62,
1462,
62,
9945,
628,
628,
628
] | 3 | 23 |
#!/usr/bin/python
# Classification (U)
"""Program: is_cfg_valid.py
Description: Unit testing of is_cfg_valid in mysql_libs.py.
Usage:
test/unit/mysql_libs/is_cfg_valid.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import mysql_libs
import version
__version__ = version.__version__
class Server(object):
"""Class: Server
Description: Class stub holder for Server class.
Methods:
__init__ -> Class initialization.
"""
def __init__(self, name, def_file=None):
"""Method: __init__
Description: Class initialization.
Arguments:
"""
self.name = name
self.extra_def_file = def_file
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Initialize testing environment.
test_multi_both_fail -> Test with multiple servers with both failed.
test_multi_one_fail -> Test with multiple servers with one failed.
test_multi_servers -> Test with multiple servers valid.
test_no_extra_file -> Test with no extra file present.
test_chk_fails -> Test with check file fails.
test_cfg_valid -> Test with extra cfg file is valid.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
msg = "Error Message"
self.slave1 = Server("Slave1", "Extra_Def_File")
self.slave2 = Server("Slave2")
self.err_msg = msg
self.err_msg2 = [msg, "Slave1: Extra_Def_File is missing."]
self.results = ["Slave2: extra_def_file is not set."]
self.results2 = [msg, "Slave1: Extra_Def_File is missing.",
"Slave2: extra_def_file is not set."]
@mock.patch("mysql_libs.gen_libs.chk_crt_file")
def test_multi_both_fail(self, mock_chk):
"""Function: test_multi_both_fail
Description: Test with multiple servers with both failed.
Arguments:
"""
mock_chk.return_value = (False, self.err_msg)
self.assertEqual(mysql_libs.is_cfg_valid([self.slave1, self.slave2]),
(False, self.results2))
@mock.patch("mysql_libs.gen_libs.chk_crt_file")
def test_multi_one_fail(self, mock_chk):
"""Function: test_multi_one_fail
Description: Test with multiple servers with one failed.
Arguments:
"""
mock_chk.return_value = (True, None)
self.assertEqual(mysql_libs.is_cfg_valid([self.slave1, self.slave2]),
(False, self.results))
@mock.patch("mysql_libs.gen_libs.chk_crt_file")
def test_multi_servers(self, mock_chk):
"""Function: test_multi_servers
Description: Test with multiple servers valid.
Arguments:
"""
mock_chk.return_value = (True, None)
self.assertEqual(mysql_libs.is_cfg_valid([self.slave1, self.slave1]),
(True, []))
def test_no_extra_file(self):
"""Function: test_no_extra_file
Description: Test with no extra file present.
Arguments:
"""
self.assertEqual(mysql_libs.is_cfg_valid([self.slave2]),
(False, self.results))
@mock.patch("mysql_libs.gen_libs.chk_crt_file")
def test_chk_fails(self, mock_chk):
"""Function: test_chk_fails
Description: Test with check file fails.
Arguments:
"""
mock_chk.return_value = (False, self.err_msg)
self.assertEqual(mysql_libs.is_cfg_valid([self.slave1]),
(False, self.err_msg2))
@mock.patch("mysql_libs.gen_libs.chk_crt_file")
def test_cfg_valid(self, mock_chk):
"""Function: test_cfg_valid
Description: Test with extra cfg file is valid.
Arguments:
"""
mock_chk.return_value = (True, None)
self.assertEqual(mysql_libs.is_cfg_valid([self.slave1]), (True, []))
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
40984,
357,
52,
8,
198,
198,
37811,
15167,
25,
220,
318,
62,
37581,
62,
12102,
13,
9078,
628,
220,
220,
220,
12489,
25,
220,
11801,
4856,
286,
318,
62,
37581,
62,
12102,
287,
48761,
62... | 2.252336 | 1,926 |
from neo4j import GraphDatabase
from Database import Database
if __name__ == "__main__":
db = Database("../config.ini").connect()
config = ConfigParser()
config.read("../config.ini")
graph = KGL_graph(config["NEO4J"]["GRAPH_HOST"], config["NEO4J"]["GRAPH_USER"], config["NEO4J"]["GRAPH_PASSWORD"])
articles = []
articles += db.execute("SELECT DISTINCT articles.link, articles.content, articles.release_date, articles.language FROM (articles INNER JOIN chemical_location_relations ON articles.link = chemical_location_relations.article)")
articles += db.execute("SELECT DISTINCT articles.link, articles.content, articles.release_date, articles.language FROM (articles INNER JOIN company_chemical_relations ON articles.link = company_chemical_relations.article)")
articles += db.execute("SELECT DISTINCT articles.link, articles.content, articles.release_date, articles.language FROM (articles INNER JOIN company_location_relations ON articles.link = company_location_relations.article)")
articles = list(dict.fromkeys(articles)) # herausfiltern aller article die Verknüpfungen zu mindestens 2 der 3 entities haben
articleID = 0 # eigentlich Abfrage der derzeit höchsten ArticleID
for article in articles:
graph.write_node(articleID, article[0], article[1], article[2], article[3])
articleID += 1
company_location_relation = db.execute("SELECT DISTINCT article, company, location FROM company_location_relations")
graph.write_relation(company_location_relation, "Location")
company_chemical_relation = db.execute("SELECT DISTINCT article, company, chemical FROM company_chemical_relations")
graph.write_relation(company_chemical_relation, "chemical_compound")
chemical_location_relation = db.execute("SELECT DISTINCT article, chemical, location FROM chemical_location_relations")
graph.write_relation(chemical_location_relation, "Location")
chemical_location_relation = db.execute("SELECT DISTINCT article, location, chemical FROM chemical_location_relations")
graph.write_relation(chemical_location_relation, "chemical_compound")
db.close()
graph.close()
| [
6738,
19102,
19,
73,
1330,
29681,
38105,
201,
198,
6738,
24047,
1330,
24047,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
20613,
796,
24047,
7203,
40720,
112... | 3.114846 | 714 |
################################
# MIT License #
# ---------------------------- #
# Copyright (c) 2021 Bluntano #
################################
import os
import json
import requests # for downloading MIDI file from link
import audio_metadata # for checking converted WAV metadata
from midi2audio import FluidSynth
# for debugging info
from pytz import timezone
from datetime import datetime
tz = timezone('UTC')
soundfonts = []
DEBUG = False
class ConversionError(Exception):
"""Raised when there is an error with converting MIDI to WAV"""
| [
29113,
198,
2,
17168,
13789,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
198,
2,
34400,
10541,
1303,
198,
2,
15069,
357,
66,
8,
33448,
1086,
2797,
5733,
220,
1303,
198,
29113,
198,
11... | 3.837838 | 148 |
# coding=utf-8
# 服务器打包外部包装的一个脚本 内部调用san_slg工程下面的脚本
import os
import sys
from file_utils import *
# # 定义本地热更新目录
# LOCAL_UPDATE_PATH = "/Users/san/packserver/bin/Update_server_"
# # 定义内网热跟新目录
# INTRANET_UPDATE_PATN = "10.241.107.31:/app/frontPack/intratest/"
# # 定义外网热更新目录
# EXTRANET_UPDATE_PATH = "10.241.107.31:/"
# 添加环境变量 这里主要针对外部进程调用Python语言 Python的环境变量继承了父进程
NDK_PATH = "/Users/san/enviroment/sdk/ndk-bundle"
NDK_MODULE_PATH = "/Users/san/frontend/cocos2d/cocos:/Users/san/frontend/cocos2d/external:/Users/san/frontend/cocos2d"
GRADLE_PATH = "/Users/san/enviroment/gradle-2.14.1/bin"
GRADLE_HOME = "/Users/san/enviroment/gradle-2.14.1"
if 'PATH' in os.environ:
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + NDK_PATH + os.pathsep + GRADLE_PATH
# 服务器运行脚本的时候需要这些环境变量
else:
os.environ['PATH'] = NDK_PATH + os.pathsep + GRADLE_PATH
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin' + ':/Library/Java/JavaVirtualMachines/jdk1.8.0_101.jdk/Contents/Home/bin'
os.environ['NDK_MODULE_PATH'] = NDK_MODULE_PATH
os.environ['GRADLE_USER_HOME'] = GRADLE_HOME
packParams = ""
for params in sys.argv[1:]:
packParams = packParams + " " + params
os.system("python %s" % packParams)
# 将热更新文件上传到服务器
# if "-time" in sys.argv:
# PACK_TIME = sys.argv[sys.argv.index("-time") + 1]
# if "-v" in sys.argv:
# PACK_VERSION = sys.argv[sys.argv.index("-v") + 1]
# PACK_NAME = PACK_VERSION + "_" + PACK_TIME + ".zip"
# if "-m" in sys.argv:
# if "etc" in sys.argv:
# LOCAL_UPDATE_FILE = LOCAL_UPDATE_PATH + "etc" + "/" + PACK_NAME
# elif "pvr" in sys.argv:
# LOCAL_UPDATE_FILE = LOCAL_UPDATE_PATH + "pvr" + "/" + PACK_NAME
# uploadCommand = "scp %s root@%s" % (LOCAL_UPDATE_FILE, INTRANET_UPDATE_PATN)
# os.system(uploadCommand)
# 删除某一个文件
if "-d" in sys.argv:
FileUtils.DeleteFile(sys.argv[sys.argv.index("-d") + 1]) | [
2,
19617,
28,
40477,
12,
23,
198,
198,
2,
42164,
235,
27950,
94,
161,
247,
101,
33699,
241,
44293,
227,
13783,
244,
32849,
101,
44293,
227,
35318,
21410,
31660,
10310,
103,
164,
226,
248,
17312,
105,
10263,
228,
227,
32849,
101,
164,
... | 1.736406 | 1,085 |
#!/usr/bin/python3 -I
from __future__ import print_function # python2
import sys
import os
import os.path
import pwd
import grp
import tempfile
import shutil
# check system behavior for suid-sudo
if sys.version_info[0] == 2:
_ispython2 = True
if sys.version_info < (2, 7, 13):
raise RuntimeError("too old python2 version")
else:
pass
else:
_ispython2 = False
if sys.version_info < (3, 5, 3):
raise RuntimeError("too old python3 version")
else:
pass
if _ispython2:
import fcntl
global_affected_paths = {}
group_cache = {}
user_cache = {}
trusted_groups = set()
trusted_users = set()
g = S()
g.test_cnt = 0
g.ok_cnt = 0
g.warn_cnt = 0
g.ng_cnt = 0
g.running_test = ""
g.running_subtest = ""
g.previous_subtest = ""
g.errored_tests = set()
g.warned_tests = set()
__all__ = ["check_affected_by", "global_affected_paths", "_ispython2",
"do_test", "subtest_start", "test_ok", "test_warn", "test_ng", "test_grave", "test_summary", "test_debug"]
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
532,
40,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
1303,
21015,
17,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
279,
16993,
198,
11748,
1036,
... | 2.404255 | 423 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import Optional, Union
import megengine._internal as mgb
from megengine._internal.opr_param_defs import CollectiveComm as CollParam
from .util import (
get_backend,
get_group_id,
get_master_ip,
get_master_port,
get_rank,
get_world_size,
)
def collective_comm_symvar(
inp: Union[mgb.SymbolVar, mgb.CompGraph],
key: Optional[str] = None,
op: CollParam.Mode = None,
nr_ranks: Optional[int] = None,
is_root: Optional[bool] = None,
rank: Optional[int] = None,
local_grad: Optional[bool] = False,
dtype: Optional[type] = None,
device: Optional[mgb.CompNode] = None,
comp_graph: Optional[mgb.CompGraph] = None,
) -> mgb.SymbolVar:
"""Helper function for creating collective_comm operators
:param inp: tensor or comp_graph
:param key: unique identifier for collective communication
:param op: mode of collective communication
:param nr_ranks: number of ranks, use util.get_world_size() as default
:param is_root: whether this node is root node
:param rank: rank of this node
:param local_grad: whether use local grad
:param dtype: output data type, use dtype of inp as default
:param device: output comp node, use comp node of inp as default
:param comp_graph: output comp graph, use comp graph of inp as default
"""
return mgb.opr.collective_comm(
inp,
key=key if key is not None else ("collective_comm_" + str(get_group_id())),
nr_devices=nr_ranks if nr_ranks is not None else get_world_size(),
is_root=is_root if is_root is not None else (get_rank() == 0),
rank=rank if rank is not None else get_rank(),
local_grad=local_grad,
server_addr=get_master_ip(),
port=get_master_port(),
param=CollParam(mode=op),
dtype=dtype,
backend=get_backend(),
comp_node=device,
comp_graph=comp_graph,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
8336,
13798,
318,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
4943,
198,
2,
198,
2,
15069,
357,
66,
8,
1946,
12,
42334,
8336,
85... | 2.687719 | 855 |
from flask import Flask, jsonify, make_response, request
import pandas as pd
import numpy as np
from timeit import default_timer as timer
import pickle
# this is our machine learning model
model = pickle.load(open('f1-model.pkl','rb'))
app = Flask(__name__)
@app.route('/predict',methods = ['POST', 'GET'])
@app.errorhandler(404)
| [
6738,
42903,
1330,
46947,
11,
33918,
1958,
11,
787,
62,
26209,
11,
2581,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
640,
270,
1330,
4277,
62,
45016,
355,
19781,
198,
11748,
2298,
293,
198,
198... | 2.991071 | 112 |
# -*- coding: utf-8 -*-
import Assignment3 as a3
print("Start training...")
voc, train_counts = a3.buildVocabulary(False) # do not ignore stop words
x, y, socket = a3.generateXY(voc)
hard_limit = 50
lbds = [0.01, 0.02, 0.05, 0.1] # 0.01-0.1
lr = 0.01
w0 = [1.0 for _ in range(len(voc) + 1)]
for lbd in lbds:
w = a3.trainW(x, y, w0, hard_limit, lbd, lr)
test_counts = a3.logisticRegressionTest(voc, w, socket)
ham_accuracy = test_counts[0][0] / sum(test_counts[0])
spam_accuracy = test_counts[1][1] / sum(test_counts[1])
total_accuracy = (test_counts[0][0] + test_counts[1][1]) / (sum(test_counts[0]) + sum(test_counts[1]))
print("Accuracy of Logistic Regression with lambda = %.2f:" % lbd)
print("Ham: %.5f" % ham_accuracy)
print("Spam: %.5f" % spam_accuracy)
print("Total: %.5f" % total_accuracy)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
50144,
18,
355,
257,
18,
198,
198,
4798,
7203,
10434,
3047,
9313,
8,
198,
18893,
11,
4512,
62,
9127,
82,
796,
257,
18,
13,
11249,
53,
420,
22528,
7,
25101,
8,... | 2.185379 | 383 |
digc = [1,10,100,1000,10000,100000,1000000]
fraction = ""
prod = 1
for i in range(10**18):
fraction += str(i)
for j in digc:
prod *= int(fraction[j])
print(prod) | [
12894,
66,
796,
685,
16,
11,
940,
11,
3064,
11,
12825,
11,
49388,
11,
3064,
830,
11,
16,
10535,
60,
201,
198,
69,
7861,
796,
13538,
201,
198,
1676,
67,
796,
352,
201,
198,
1640,
1312,
287,
2837,
7,
940,
1174,
1507,
2599,
201,
19... | 2.095238 | 84 |
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from uks_app.views import *
| [
6738,
42625,
14208,
13,
9288,
1330,
17427,
14402,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
11,
10568,
198,
6738,
334,
591,
62,
1324,
13,
33571,
1330,
1635,
628
] | 3.516129 | 31 |
"""Example of running a custom hand-coded policy alongside trainable policies.
This example has two policies:
(1) a simple PG policy
(2) a hand-coded policy that acts at random in the env (doesn't learn)
In the console output, you can see the PG policy does much better than random:
Result for PG_multi_cartpole_0:
...
policy_reward_mean:
pg_policy: 185.23
random: 21.255
...
"""
import argparse
import gym
import os
import ray
from ray import tune
from ray.tune.registry import register_env
from ray.rllib.examples.env.multi_agent import MultiAgentCartPole
from ray.rllib.examples.policy.random_policy import RandomPolicy
from ray.rllib.utils.test_utils import check_learning_achieved
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.")
parser.add_argument(
"--stop-iters",
type=int,
default=20,
help="Number of iterations to train.")
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train.")
parser.add_argument(
"--stop-reward",
type=float,
default=150.0,
help="Reward at which we stop training.")
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
# Simple environment with 4 independent cartpole entities
register_env("multi_agent_cartpole",
lambda _: MultiAgentCartPole({"num_agents": 4}))
single_env = gym.make("CartPole-v0")
obs_space = single_env.observation_space
act_space = single_env.action_space
stop = {
"training_iteration": args.stop_iters,
"episode_reward_mean": args.stop_reward,
"timesteps_total": args.stop_timesteps,
}
config = {
"env": "multi_agent_cartpole",
"multiagent": {
"policies": {
"pg_policy": (None, obs_space, act_space, {
"framework": args.framework,
}),
"random": (RandomPolicy, obs_space, act_space, {}),
},
"policy_mapping_fn": (
lambda agent_id: ["pg_policy", "random"][agent_id % 2]),
},
"framework": args.framework,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
}
results = tune.run("PG", config=config, stop=stop, verbose=1)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown()
| [
37811,
16281,
286,
2491,
257,
2183,
1021,
12,
40976,
2450,
7848,
4512,
540,
4788,
13,
198,
198,
1212,
1672,
468,
734,
4788,
25,
198,
220,
220,
220,
357,
16,
8,
257,
2829,
23842,
2450,
198,
220,
220,
220,
357,
17,
8,
257,
1021,
12,... | 2.435398 | 1,130 |
#!/usr/bin/env python
"""
This is the sub class seisflows.preprocess.PyatoaMaui
Slightly altered processing function for the New Zealand tomography scenario
"""
import sys
import pyatoa
from pyasdf import ASDFDataSet
from seisflows3.config import custom_import
from pyatoa.utils.read import read_station_codes
PAR = sys.modules["seisflows_parameters"]
PATH = sys.modules["seisflows_paths"]
def process_event(self, source_name, codes=None, **kwargs):
"""
.. note::
MONKEY PATCH: This patch of the Pyaflowa function provides a slight
alteration to the Pyaflowa source code by treating some data differently
in the processing workflow
The main processing function for Pyaflowa misfit quantification.
Processes waveform data for all stations related to a given event,
produces waveform and map plots during the processing step, saves data
to an ASDFDataSet and writes adjoint sources and STATIONS_ADJOINT file,
required by SPECFEM3D's adjoint simulations, to disk.
Kwargs passed to pyatoa.Manager.flow() function.
:type source_name: str
:param source_name: event id to be used for data gathering, processing
:type codes: list of str
:param codes: list of station codes to be used for processing. If None,
will read station codes from the provided STATIONS file
:rtype: float
:return: the total scaled misfit collected during the processing chain
"""
# Create the event specific configurations and attribute container (io)
io = self.setup(source_name)
# Allow user to provide a list of codes, else read from station file
if codes is None:
codes = read_station_codes(io.paths.stations_file,
loc="??", cha="HH?")
# Open the dataset as a context manager and process all events in serial
with ASDFDataSet(io.paths.dsfid) as ds:
mgmt = pyatoa.Manager(ds=ds, config=io.config)
for code in codes:
net, sta, loc, cha = code.split(".")
# Don't remove response for temp networks as they are already
# in physical units. Redundant bool for clarity
rem_resp = bool(net.upper() not in ["ZX", "Z8"])
mgmt_out, io = self.process_station(mgmt=mgmt, code=code,
io=io, remove_response=rem_resp,
**kwargs)
scaled_misfit = self.finalize(io)
return scaled_misfit
class PyatoaNz(custom_import("preprocess", "pyatoa")):
"""
Data preprocessing class using the Pyatoa package with a custom processing
function to deal with NZ data
"""
def prepare_eval_grad(self, path, source_name):
"""
Prepare the gradient evaluation by gathering, preprocessing waveforms,
and measuring misfit between observations and synthetics using Pyatoa.
This is a process specific task and intended to be run in parallel
:type path: str
:param path: path to the current function evaluation for saving residual
:type source_name: str
:param source_name: the event id to be used for tagging and data lookup
"""
# Late import because preprocess is loaded before optimize,
# Optimize required to know which iteration/step_count we are at
optimize = sys.modules["seisflows_optimize"]
# Apply the monkey patch before initiating
pyatoa.Pyaflowa.process_event = process_event
# Inititate the Pyaflowa class which abstracts processing functions
# Communicate to Pyaflowa the current iteration and step count
pyaflowa = pyatoa.Pyaflowa(structure="seisflows", sfpaths=PATH,
sfpar=PAR, iteration=optimize.iter,
step_count=optimize.line_search.step_count,
plot=True)
# Process all the stations for a given event using Pyaflowa
misfit = pyaflowa.process_event(source_name,
fix_windows=PAR.FIX_WINDOWS)
# Generate the necessary files to continue the inversion
if misfit:
# Event misfit defined by Tape et al. (2010)
self.write_residuals(path=path, scaled_misfit=misfit,
source_name=source_name)
self.snapshot()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
1212,
318,
262,
850,
1398,
384,
271,
44041,
13,
3866,
14681,
13,
20519,
5549,
64,
44,
559,
72,
198,
198,
50,
30945,
14294,
7587,
2163,
329,
262,
968,
8936,
16667,
4867,
8... | 2.551564 | 1,726 |
# -*- coding: utf-8 -*-
#!/usr/bin/python3.9
################################################################################
## Automated Fuzzing Harness Generator - Vintage 2021 Python 3.9 ##
################################################################################
# Licenced under GPLv3 ##
# https://www.gnu.org/licenses/gpl-3.0.en.html ##
# ##
# The above copyright notice and this permission notice shall be included in ##
# all copies or substantial portions of the Software. ##
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
################################################################################
TESTING = True
"""
"""
################################################################################
############## IMPORTS #################
################################################################################
import cpp
import lief
import gzip
import sys,os
import logging
import inspect
import argparse
import traceback
import threading
import subprocess
import pandas as pd
from pathlib import Path
from datetime import date
from os import _exit as exit
from ast import literal_eval
from signal import SIGINT, signal
from subprocess import DEVNULL, STDOUT
TESTING = True
################################################################################
# Terminal Colorication Imports
################################################################################
try:
import colorama
from colorama import init
init()
from colorama import Fore, Back, Style
if TESTING == True:
COLORMEQUALIFIED = True
except ImportError as derp:
print("[-] NO COLOR PRINTING FUNCTIONS AVAILABLE, Install the Colorama Package from pip")
COLORMEQUALIFIED = False
print("[+] Basic imports completed")
###############################################################################
# LOGGING
################################################################ ###############
log_file = 'LOGGING LOGGER LOG'
logging.basicConfig(filename=log_file, format='%(asctime)s %(message)s', filemode='w')
logger = logging.getLogger()
script_cwd = Path().absolute()
script_osdir = Path(__file__).parent.absolute()
###############################################################################
# Lambdas
###############################################################################
redprint = lambda text: print(Fore.RED + ' ' + text + ' ' + Style.RESET_ALL) if (COLORMEQUALIFIED == True) else print(text)
blueprint = lambda text: print(Fore.BLUE + ' ' + text + ' ' + Style.RESET_ALL) if (COLORMEQUALIFIED == True) else print(text)
greenprint = lambda text: print(Fore.GREEN + ' ' + text + ' ' + Style.RESET_ALL) if (COLORMEQUALIFIED == True) else print(text)
yellowboldprint = lambda text: print(Fore.YELLOW + Style.BRIGHT + ' {} '.format(text) + Style.RESET_ALL) if (COLORMEQUALIFIED == True) else print(text)
makeyellow = lambda text: Fore.YELLOW + ' ' + text + ' ' + Style.RESET_ALL if (COLORMEQUALIFIED == True) else text
makered = lambda text: Fore.RED + ' ' + text + ' ' + Style.RESET_ALL if (COLORMEQUALIFIED == True) else None
makegreen = lambda text: Fore.GREEN + ' ' + text + ' ' + Style.RESET_ALL if (COLORMEQUALIFIED == True) else None
makeblue = lambda text: Fore.BLUE + ' ' + text + ' ' + Style.RESET_ALL if (COLORMEQUALIFIED == True) else None
debugmessage = lambda message: logger.debug(blueprint(message))
info_message = lambda message: logger.info(greenprint(message))
warning_message = lambda message: logger.warning(yellowboldprint(message))
error_message = lambda message: logger.error(redprint(message))
critical_message = lambda message: logger.critical(yellowboldprint(message))
gzcompress = lambda inputdata: {"data" : gzip.compress(inputdata)}
scanfilesbyextension = lambda directory,extension: [f for f in os.listdir(directory) if f.endswith(extension)]
################################################################################
############## SYSTEM AND ENVIRONMENT #################
################################################################################
class GenPerpThreader():
'''
General Purpose threading implementation that accepts a generic programmatic entity
'''
################################################################################
############## CORE #################
################################################################################
class PybashyRunFunction():
'''
This is the class you should use to run one off functions, established inline,
deep in a complex structure that you do not wish to pick apart
The function should contain only a "steps" variable and format()
'''
class PybashyRunSingleJSON():
'''
This is the class you should use to run one off commands, established inline,
deep in a complex structure that you do not wish to pick apart
The input should contain only a single json Command() item and format()
{
"IPTablesAcceptNAT": {
"command" : "iptables -t nat -I PREROUTING 1 -s {} -j ACCEPT".format(self.remote_IP),
"info_message" : "[+] Accept All Incomming On NAT Subnet",
"success_message" : "[+] Command Sucessful",
"failure_message" : "[-] Command Failed! Check the logfile!"
}
}
'''
################################################################################
############## COMMAND LINE ARGUMENTS #################
################################################################################
parser = argparse.ArgumentParser(description="""\
A program to help you to automatically create fuzzing harnesses.
""")
parser.add_argument('--librarypath',
dest = 'library',
action = "store" ,
default = "/workspace" ,
help = "path to lib",
required=True
)
parser.add_argument('--codeqlpath',
dest = 'codeqlpath',
action = "store",
default = "" ,
help = "path to codeql modules, database, and binary",
required=True
)
parser.add_argument('--database',
dest = 'database',
action = "store",
default = "" ,
help = "Codeql database",
required=True
)
parser.add_argument('--multiharness',
dest = 'multiharness',
action = "store_true",
default = False ,
help = " use this flag for multiple argument harnesses",
required=False
)
parser.add_argument('--outputdir',
dest = 'outputdir',
action = "store",
default = False ,
help = "Output directory",
required=True
)
parser.add_argument('--compilerflags',
dest = 'compilerflags',
action = "store",
default = False ,
help = "compiler flags (include)",
required=False
)
parser.add_argument('--headers',
dest = 'headers',
action = "store",
default = False ,
help = "header files, CSV string",
required=False)
parser.add_argument('--debug',
dest = 'debug',
action = "store_true",
default = False ,
help = "display debugging information"
)
parser.add_argument('--detection',
dest = 'detection',
action = "store",
default = 'headers' ,
help = "'headers' to Auto-detect headers \n\
'functions' for function definitions? what is this dogin?.", required=True)
arguments = parser.parse_args()
cwd = lambda : os.getcwd()
def writecodeql(scanoperation:dict):
'''feed a dict formed as thus
{'name': str, 'filedata' : textblock }
'''
filehandle = open(scanoperation['name'])
filehandle.write(scanoperation['filedata'])
filehandle.close()
################################################################################
############## CODE SCANNER #################
################################################################################
#commands, top down
# if automatic detection of headers
#SEG2
#elif int(arguments.detection) == 1:
#"cp " + cwd + "/oneargfunc.ql " + arguments.ql, shell=True)
# subprocess.check_output("cd "+ arguments.ql + ";" +arguments.ql+ "codeql query run oneargfunc.ql -o " + arguments.output + "onearg.bqrs -d " + arguments.ql + arguments.database +";" + arguments.ql + "codeql bqrs decode --format=csv " + arguments.output + "onearg.bqrs -o " + arguments.output + "onearg.csv", shell=True)
for index, define in enumerate(scanner.object_functions["output"]):
for index2, cur in enumerate(total_functions["function"]):
if (str(cur) in define):
defined_functions["function"].append(cur)
defined_functions["type"].append(total_functions["type"][index2])
defined_functions["object"].append(scanner.object_functions["object"][index])
defined_functions["type_or_loc"].append(total_functions["type_or_loc"][index2])
for i in range(len(defined_functions["function"])):
if ".so" not in str(defined_functions["object"][i]):
elf = lief.parse(arguments.library + str(defined_functions["object"][i]))
try:
addr = elf.get_function_address(str(defined_functions["function"][i]))
except:
continue
elf.add_exported_function(addr, str(defined_functions["function"][i]))
elf[lief.ELF.DYNAMIC_TAGS.FLAGS_1].remove(lief.ELF.DYNAMIC_FLAGS_1.PIE)
outfile = "lib%s.so" % str(defined_functions["function"][i])
elf.write(outfile)
elf_functions["function"].append(str(defined_functions["function"][i]))
elf_functions["type"].append(str(defined_functions["type"][i]))
elf_functions["object"].append(outfile)
elf_functions["type_or_loc"].append(str(defined_functions["type_or_loc"][i]))
else:
shared_functions["function"].append(str(defined_functions["function"][i]))
shared_functions["type"].append(str(defined_functions["type"][i]))
shared_functions["object"].append(str(defined_functions["object"][i]))
shared_functions["type_or_loc"].append(str(defined_functions["type_or_loc"][i]))
for index3 in range(len(shared_functions["function"])):
header_section = ""
if not arguments.headers:
if int(arguments.detection) == 0:
header_section = "#include \"" + os.path.basename(shared_functions["type_or_loc"][index3]) + "\"\n\n"
else:
header_section = ""
else:
header_list = arguments.headers.split(",")
for x in header_list:
header_section+= "#include \"" + x + "\"\n\n"
if int(arguments.detection) == 0:
main_section = "int LLVMFuzzerTestOneInput(" + str(shared_functions["type"][index3]) + " Data, long Size) {\n\t" + str(shared_functions["function"][index3]) + "(Data);\n\treturn 0;\n}"
else:
main_section = str(shared_functions["type_or_loc"][index3]) + " " + str(shared_functions["function"][index3]) + "(" + str(shared_functions["type"][index3])+ " testcase);\n" + "int LLVMFuzzerTestOneInput(" + str(shared_functions["type"][index3]) + " Data, long Size) {\n\t" + str(shared_functions["function"][index3]) + "(Data);\n\treturn 0;\n}"
full_source = header_section + main_section
filename = "".join([c for c in str(shared_functions["function"][index3]) if c.isalpha() or c.isdigit() or c==' ']).rstrip()
f = open(arguments.output + filename +".c", "w")
f.write(full_source)
if int(arguments.detection) == 0:
if arguments.flags is not None and int(arguments.debug) == 1:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " -L " + arguments.output + " -L " +arguments.library + " -I" + os.path.dirname(shared_functions["type_or_loc"][index3]) + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True)
elif arguments.flags is not None and int(arguments.debug) == 0:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " -L " + arguments.output + " -L " +arguments.library + " -I" + os.path.dirname(shared_functions["type_or_loc"][index3]) + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
elif arguments.flags is None and int(arguments.debug) == 1:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer -L " + arguments.output + " -L " +arguments.library + " -I" + os.path.dirname(shared_functions["type_or_loc"][index3]) + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True)
else:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer -L " + arguments.output + " -L " +arguments.library + " -I" + os.path.dirname(shared_functions["type_or_loc"][index3]) + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
else:
if arguments.flags is not None and int(arguments.debug) == 1:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " -L " + arguments.output + " -L " +arguments.library + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True)
elif arguments.flags is not None and int(arguments.debug) == 0:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " -L " + arguments.output + " -L " +arguments.library + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
elif arguments.flags is None and int(arguments.debug) == 1:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer -L " + arguments.output + " -L " +arguments.library + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True)
else:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer -L " + arguments.output + " -L " +arguments.library + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
if (int(arguments.detection) == 1):
for index4 in range(len(elf_functions["function"])):
header_section = ""
if not arguments.headers:
header_section = ""
else:
header_list = arguments.headers.split(",")
for x in header_list:
header_section+= "#include \"" + x + "\"\n\n"
main_section = "#include <stdlib.h>\n#include <dlfcn.h>\n\nvoid* library=NULL;\ntypedef " + str(elf_functions["type_or_loc"][index4]) + "(*" + str(elf_functions["function"][index4]) + "_t)(" + str(elf_functions["type"][index4]) + ");\n" + "void CloseLibrary()\n{\nif(library){\n\tdlclose(library);\n\tlibrary=NULL;\n}\n}\nint LoadLibrary(){\n\tlibrary = dlopen(\"" + arguments.library + str(elf_functions["object"][index4]) + "\",RTLD_LAZY);\n\tatexit(CloseLibrary);\n\treturn library != NULL;\n}\nint LLVMFuzzerTestOneInput(" + str(elf_functions["type"][index4]) + " Data, long Size) {\n\tLoadLibrary();\n\t" + str(elf_functions["function"][index4]) + "_t " + str(elf_functions["function"][index4]) + "_s = (" + str(elf_functions["function"][index4]) + "_t)dlsym(library,\"" + str(elf_functions["function"][index4]) + "\");\n\t" + str(elf_functions["function"][index4]) + "_s(Data);\n\treturn 0;\n}"
full_source = header_section + main_section
filename = "".join([c for c in str(elf_functions["function"][index4]) if c.isalpha() or c.isdigit() or c==' ']).rstrip()
f = open(arguments.output + filename +".c", "w")
f.write(full_source)
if arguments.flags is not None and int(arguments.debug) == 1:
print("clang -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " " + arguments.output + filename +".c -o " + arguments.output + filename)
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True)
elif arguments.flags is not None and int(arguments.debug) == 0:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
elif arguments.flags is None and int(arguments.debug) == 1:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True)
else:
subprocess.Popen("clang -g -fsanitize=address,undefined,fuzzer " + arguments.output + filename +".c -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
elif (int(arguments.mode) == 1):
scanner.shared_objects=[]
func_objects=[]
object_functions={"output":[],"object":[]}
cwd = os.getcwd()
if (int(arguments.detection) == 0):
subprocess.check_output("cp " + cwd + "/multiarglocation.ql " + arguments.ql, shell=True)
subprocess.check_output("cd "+ arguments.ql + ";" +arguments.ql+ "codeql query run multiarglocation.ql -o " + arguments.output + "multiarg.bqrs -d " + arguments.ql + arguments.database +";" + arguments.ql + "codeql bqrs decode --format=csv " + arguments.output + "multiarg.bqrs -o " + arguments.output + "multiarg.csv", shell=True)
elif (int(arguments.detection) == 1):
subprocess.check_output("cp " + cwd + "/multiargfunc.ql " + arguments.ql, shell=True)
subprocess.check_output("cd "+ arguments.ql + ";" +arguments.ql+ "codeql query run multiargfunc.ql -o " + arguments.output + "multiarg.bqrs -d " + arguments.ql + arguments.database +";" + arguments.ql + "codeql bqrs decode --format=csv " + arguments.output + "multiarg.bqrs -o " + arguments.output + "multiarg.csv", shell=True)
data = pd.read_csv(arguments.output + "multiarg.csv")
total_functions = data.drop_duplicates().groupby(["f", "g"], as_index=False)["t"].agg(list)
print(total_functions)
os.chdir(arguments.library)
defined_functions = pd.DataFrame(columns=["f","t","g","object"])
for filename in os.listdir(arguments.library):
if "shared object" in subprocess.run(["file", filename], stdout=subprocess.PIPE).stdout.decode('utf-8'):
print("Found shared object " + filename)
scanner.shared_objects.append(filename)
for obj in scanner.shared_objects:
scanner.object_functions["output"].append(subprocess.run(["readelf", "-a",obj], stdout=subprocess.PIPE).stdout.decode('utf-8'))
scanner.object_functions["object"].append(obj)
for index, defe in enumerate(scanner.object_functions["output"]):
for index2, cur in enumerate(total_functions["f"]):
if (str(cur) in defe):
func_objects.append(scanner.object_functions["object"][index])
defined_functions = defined_functions.append([total_functions.iloc[index2,:]])
defined_functions["object"] = func_objects
defined_functions = defined_functions.to_dict(orient='list')
elf_functions={"function":[], "type":[],"object": [],"type_or_loc":[]}
shared_functions={"function":[], "type":[],"object": [],"type_or_loc":[]}
for i in range(len(defined_functions["f"])):
if ".so" not in str(defined_functions["object"][i]):
elf = lief.parse(arguments.library + str(defined_functions["object"][i]))
try:
addr = elf.get_function_address(str(defined_functions["f"][i]))
except:
continue
elf.add_exported_function(addr, str(defined_functions["f"][i]))
elf[lief.ELF.DYNAMIC_TAGS.FLAGS_1].remove(lief.ELF.DYNAMIC_FLAGS_1.PIE)
outfile = "lib%s.so" % str(defined_functions["f"][i])
elf.write(outfile)
elf_functions["function"].append(str(defined_functions["f"][i]))
elf_functions["type"].append(str(defined_functions["t"][i]))
elf_functions["object"].append(outfile)
elf_functions["type_or_loc"].append(str(defined_functions["g"][i]))
else:
shared_functions["function"].append(str(defined_functions["f"][i]))
shared_functions["type"].append(str(defined_functions["t"][i]))
shared_functions["object"].append(str(defined_functions["object"][i]))
shared_functions["type_or_loc"].append(str(defined_functions["g"][i]))
for index3 in range(len(shared_functions["function"])):
header_section = ""
if not arguments.headers:
if (int(arguments.detection) == 0):
header_section += "#include <fuzzer/FuzzedDataProvider.h>\n#include <stddef.h>\n#include <stdint.h>\n#include <string.h>\n" + "#include \"" + os.path.basename(shared_functions["type_or_loc"][index3]) + "\"\n\n"
else:
header_section += "#include <fuzzer/FuzzedDataProvider.h>\n#include <stddef.h>\n#include <stdint.h>\n#include <string.h>\n"
else:
header_list = arguments.headers.split(",")
header_section += "#include <fuzzer/FuzzedDataProvider.h>\n#include <stddef.h>\n#include <stdint.h>\n#include <string.h>\n"
for x in header_list:
header_section+= "#include \"" + x + "\"\n\n"
stub = ""
marker = 1
param = ""
header_args = ""
for ty in literal_eval(shared_functions["type"][index3]):
if ty.count('*') == 1:
if "long" in ty or "int" in ty or "short" in ty and "long double" not in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n"
param += "pointer" + str(marker) + ", "
header_args += ty + "pointer" + str(marker) + ", "
elif "char" in ty or "string" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n"
param += "pointer" + str(marker) + ", "
header_args += ty + "pointer" + str(marker) + ", "
elif "float" in ty or "double" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeFloatingPoint<" + ty.replace("*", "") +">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n"
param += "pointer" + str(marker) + ", "
header_args += ty + "pointer" + str(marker) + ", "
elif "bool" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeBool();\n" + ty + "pointer"+ str(marker) + " = &data" + str(marker) + ";\n"
param += "pointer" + str(marker) + ", "
header_args += ty + "pointer" + str(marker) + ", "
else:
continue
elif ty.count('*') == 2:
if "long" in ty or "int" in ty or "short" in ty and "long double" not in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n" + ty.replace("*", "") + "**doublepointer"+str(marker) + " = &pointer"+ str(marker) + ";\n"
param += "doublepointer" + str(marker) + ", "
header_args += ty + "doublepointer" + str(marker) + ", "
elif "char" in ty or "string" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n" + ty.replace("*", "") + "**doublepointer"+str(marker) + " = &pointer"+ str(marker) + ";\n"
param += "doublepointer" + str(marker) + ", "
header_args += ty + "doublepointer" + str(marker) + ", "
elif "float" in ty or "double" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeFloatingPoint<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n" + ty.replace("*", "") + "**doublepointer"+str(marker) + " = &pointer"+ str(marker) + ";\n"
param += "doublepointer" + str(marker) + ", "
header_args += ty + "doublepointer" + str(marker) + ", "
elif "bool" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeBool();\n" + ty.replace("*", "") + "*pointer" + str(marker) + " = &data" + str(marker) + ";\n" + ty.replace("*", "") + "**doublepointer"+str(marker) + " = &pointer"+ str(marker) + ";\n"
param += "doublepointer" + str(marker) + ", "
header_args += ty + "doublepointer" + str(marker) + ", "
else:
continue
else:
if "long" in ty or "int" in ty or "short" in ty and "long double" not in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty +">();\n"
param += "data" + str(marker) + ", "
header_args += ty + " data" + str(marker) + ", "
elif "char" in ty or "string" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty +">();\n"
param += "data" + str(marker) + ", "
header_args += ty + " data" + str(marker) + ", "
elif "float" in ty or "double" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeFloatingPoint<" + ty +">();\n"
param += "data" + str(marker) + ", "
header_args += ty + " data" + str(marker) + ", "
elif "bool" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeBool();\n"
param += "data" + str(marker) + ", "
header_args += ty + " data" + str(marker) + ", "
else:
continue
marker+= 1
param = rreplace(param,', ','',1)
header_args = rreplace(header_args,', ','',1)
if (int(arguments.detection) == 0):
main_section = "extern \"C\" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {\n\tFuzzedDataProvider provider(data, size);\n\t" + stub + str(shared_functions["function"][index3]) + "(" + param + ");\nreturn 0;\n}"
else:
main_section = str(shared_functions["type_or_loc"][index3]) + " " + str(shared_functions["function"][index3]) +"(" + header_args + ");\n\nextern \"C\" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {\n\tFuzzedDataProvider provider(data, size);\n\t" + stub + str(shared_functions["function"][index3]) + "(" + param + ");\nreturn 0;\n}"
full_source = header_section + main_section
filename = "".join([c for c in str(shared_functions["function"][index3]) if c.isalpha() or c.isdigit() or c==' ']).rstrip()
f = open(arguments.output + filename +".cc", "w")
f.write(full_source)
if int(arguments.detection) == 0:
if arguments.flags is not None and int(arguments.debug) == 1:
print("clang++ -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " -L " + arguments.output + " -L " +arguments.library + " -I" + os.path.dirname(shared_functions["type_or_loc"][index3]) + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".cc -o " + arguments.output + filename)
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " -L " + arguments.output + " -L " +arguments.library + " -I" + os.path.dirname(shared_functions["type_or_loc"][index3]) + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True)
elif arguments.flags is not None and int(arguments.debug) == 0:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " -L " + arguments.output + " -L " +arguments.library + " -I" + os.path.dirname(shared_functions["type_or_loc"][index3]) + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
elif arguments.flags is None and int(arguments.debug) == 1:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer -L " + arguments.output + " -L " +arguments.library + " -I" + os.path.dirname(shared_functions["type_or_loc"][index3]) + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True)
else:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer -L " + arguments.output + " -L " +arguments.library + " -I" + os.path.dirname(shared_functions["type_or_loc"][index3]) + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
else:
if arguments.flags is not None and int(arguments.debug) == 1:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " -L " + arguments.output + " -L " +arguments.library + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True)
elif arguments.flags is not None and int(arguments.debug) == 0:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " -L " + arguments.output + " -L " +arguments.library + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
elif arguments.flags is None and int(arguments.debug) == 1:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer -L " + arguments.output + " -L " +arguments.library + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True)
else:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer -L " + arguments.output + " -L " +arguments.library + " -l:" + str((shared_functions["object"][index3])) + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
if (int(arguments.detection) == 1):
for index4 in range(len(elf_functions["function"])):
header_section = ""
if not arguments.headers:
header_section += "#include <fuzzer/FuzzedDataProvider.h>\n#include <stddef.h>\n#include <stdint.h>\n#include <string.h>\n"
else:
header_list = arguments.headers.split(",")
header_section += "#include <fuzzer/FuzzedDataProvider.h>\n#include <stddef.h>\n#include <stdint.h>\n#include <string.h>\n"
for x in header_list:
header_section+= "#include \"" + x + "\"\n"
stub = ""
marker = 1
param = ""
header_args = ""
for ty in literal_eval(elf_functions["type"][index4]):
if ty.count('*') == 1:
if "long" in ty or "int" in ty or "short" in ty and "long double" not in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n"
param += "pointer" + str(marker) + ", "
header_args += ty + "pointer" + str(marker) + ", "
elif "char" in ty or "string" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n"
param += "pointer" + str(marker) + ", "
header_args += ty + "pointer" + str(marker) + ", "
elif "float" in ty or "double" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeFloatingPoint<" + ty.replace("*", "") +">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n"
param += "pointer" + str(marker) + ", "
header_args += ty + "pointer" + str(marker) + ", "
elif "bool" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeBool();\n" + ty + "pointer"+ str(marker) + " = &data" + str(marker) + ";\n"
param += "pointer" + str(marker) + ", "
header_args += ty + "pointer" + str(marker) + ", "
else:
continue
elif ty.count('*') == 2:
if "long" in ty or "int" in ty or "short" in ty and "long double" not in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n" + ty.replace("*", "") + "**doublepointer"+str(marker) + " = &pointer"+ str(marker) + ";\n"
param += "doublepointer" + str(marker) + ", "
header_args += ty + "doublepointer" + str(marker) + ", "
elif "char" in ty or "string" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n" + ty.replace("*", "") + "**doublepointer"+str(marker) + " = &pointer"+ str(marker) + ";\n"
param += "doublepointer" + str(marker) + ", "
header_args += ty + "doublepointer" + str(marker) + ", "
elif "float" in ty or "double" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeFloatingPoint<" + ty.replace("*", "") + ">();\n" + ty.replace("*", "") + "*pointer"+ str(marker) + " = &data" + str(marker) + ";\n" + ty.replace("*", "") + "**doublepointer"+str(marker) + " = &pointer"+ str(marker) + ";\n"
param += "doublepointer" + str(marker) + ", "
header_args += ty + "doublepointer" + str(marker) + ", "
elif "bool" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeBool();\n" + ty.replace("*", "") + "*pointer" + str(marker) + " = &data" + str(marker) + ";\n" + ty.replace("*", "") + "**doublepointer"+str(marker) + " = &pointer"+ str(marker) + ";\n"
param += "doublepointer" + str(marker) + ", "
header_args += ty + "doublepointer" + str(marker) + ", "
else:
continue
else:
if "long" in ty or "int" in ty or "short" in ty and "long double" not in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty +">();\n"
param += "data" + str(marker) + ", "
header_args += ty + " data" + str(marker) + ", "
elif "char" in ty or "string" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeIntegral<" + ty +">();\n"
param += "data" + str(marker) + ", "
header_args += ty + " data" + str(marker) + ", "
elif "float" in ty or "double" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeFloatingPoint<" + ty +">();\n"
param += "data" + str(marker) + ", "
header_args += ty + " data" + str(marker) + ", "
elif "bool" in ty:
stub += "auto data" + str(marker) + "= provider.ConsumeBool();\n"
param += "data" + str(marker) + ", "
header_args += ty + " data" + str(marker) + ", "
else:
continue
marker+= 1
param = rreplace(param,', ','',1)
header_args = rreplace(header_args,', ','',1)
main_section = "#include <stdlib.h>\n#include <dlfcn.h>\n\nvoid* library=NULL;\ntypedef " + str(elf_functions["type_or_loc"][index4]) + "(*" + str(elf_functions["function"][index4]) + "_t)(" + header_args + ");\nvoid CloseLibrary()\n{\nif(library){\n\tdlclose(library);\n\tlibrary=NULL;\n}\n}\nint LoadLibrary(){\n\tlibrary = dlopen(\"" + arguments.library + str(elf_functions["object"][index4]) + "\",RTLD_LAZY);\n\tatexit(CloseLibrary);\n\treturn library != NULL;\n}\nextern \"C\" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {\n\tFuzzedDataProvider provider(data, size);\n\t\n\tLoadLibrary();\n\t" + stub + str(elf_functions["function"][index4]) + "_t " + str(elf_functions["function"][index4]) + "_s = (" + str(elf_functions["function"][index4]) + "_t)dlsym(library,\"" + str(elf_functions["function"][index4]) + "\");\n\t" + str(elf_functions["function"][index4]) + "_s(" + param + ");\n\treturn 0;\n}"
full_source = header_section + main_section
filename = "".join([c for c in str(elf_functions["function"][index4]) if c.isalpha() or c.isdigit() or c==' ']).rstrip()
f = open(arguments.output + filename +".cc", "w")
f.write(full_source)
if arguments.flags is not None and int(arguments.debug) == 1:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True)
elif arguments.flags is not None and int(arguments.debug) == 0:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer " + arguments.flags + " " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
elif arguments.flags is None and int(arguments.debug) == 1:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True)
else:
subprocess.Popen("clang++ -g -fsanitize=address,undefined,fuzzer " + arguments.output + filename +".cc -o " + arguments.output + filename, env=self.env, shell=True, stdout=DEVNULL, stderr=STDOUT)
else:
print("Invalid Mode")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
48443,
14629,
14,
8800,
14,
29412,
18,
13,
24,
201,
198,
29113,
29113,
14468,
201,
198,
2235,
220,
220,
220,
220,
220,
220,
17406,
515,
376,
4715,
278,
2113,
... | 2.219731 | 19,269 |
BOT_TOKEN = '' | [
33,
2394,
62,
10468,
43959,
796,
10148
] | 2 | 7 |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch
PAGE_HEIGHT=defaultPageSize[1]
PAGE_WIDTH=defaultPageSize[0]
styles = getSampleStyleSheet()
# First we import some constructors, some paragraph styles and other conveniences from other modules.
Title = "Hello world"
pageinfo = "platypus example"
| [
6738,
989,
23912,
13,
489,
265,
4464,
385,
1330,
17427,
23579,
30800,
11,
2547,
6111,
11,
1338,
11736,
198,
6738,
989,
23912,
13,
8019,
13,
47720,
1330,
651,
36674,
21466,
3347,
316,
198,
6738,
989,
23912,
13,
45895,
62,
11250,
1330,
... | 3.307143 | 140 |
#!/usr/bin/python
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
"""See docstring for PackageInfoVersioner class"""
# Disabling warnings for env members and imports that only affect recipe-
# specific processors.
# pylint: disable=e1101,f0401
from xml.dom import minidom
from autopkglib import Processor, ProcessorError
__all__ = ["PackageInfoVersioner"]
class PackageInfoVersioner(Processor):
"""Get version from a PackageInfo file in a distribution/bundle package"""
description = __doc__
input_variables = {
"package_info_path": {
"required": True,
"description": ("Path to PackageInfo file inside a distribution",
"/bundle package.")
}
}
output_variables = {
"version": {
"description": "Version returned from pkg-info field in PacakgeInfo."
}
}
__doc__ = description
if __name__ == "__main__":
PROCESSOR = PackageInfoVersioner()
PROCESSOR.execute_shell()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
347,
10305,
12,
7635,
5964,
1043,
287,
262,
198,
2,
38559,
... | 3.172507 | 371 |
## l2_attack.py -- attack a network optimizing for l_2 distance
##
## Copyright (C) 2017, Hongge Chen <chenhg@mit.edu>.
## Copyright (C) 2017, Huan Zhang <ecezhang@ucdavis.edu>.
## Copyright (C) 2016, Nicholas Carlini <nicholas@carlini.com>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
import sys
import tensorflow as tf
import numpy as np
import time
import timeit
from im2txt.inference_utils import vocabulary
from im2txt.inference_utils import caption_generator
BINARY_SEARCH_STEPS = 1 # number of times to adjust the constant with binary search
MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent
ABORT_EARLY = True # if we stop improving, abort gradient descent early
LEARNING_RATE = 2e-3 # larger values converge faster to less accurate results
TARGETED = True # should we target one specific class? or just be wrong?
CONFIDENCE = 0 # how strong the adversarial example should be
INITIAL_CONST = 1 # the initial constant c to pick as a first guess
| [
2235,
300,
17,
62,
20358,
13,
9078,
1377,
1368,
257,
3127,
45780,
329,
300,
62,
17,
5253,
198,
2235,
198,
2235,
15069,
357,
34,
8,
2177,
11,
9764,
469,
12555,
1279,
6607,
71,
70,
31,
2781,
13,
15532,
28401,
198,
2235,
15069,
357,
... | 3.213018 | 338 |
import pandas as pd
import numpy as np
import argparse
from graphviz import Graph
from colour import Color
import umap.umap_ as umap
import matplotlib.pyplot as plt
import matplotlib
'''
Creates visualization of sample relatedness (using LOD scores from CrosscheckFingerprints)
Samples with low coverage at fingerprinting sites will be more opaque than other samples
Run command:
python3 sample_relatedness_network.py \
-M /path/to/matrix_output.txt \
-S /path/to/sample_individual_map.tsv
'''
hex_codes = ["#000000","#FFFF00","#1CE6FF","#FF34FF","#FF4A46","#008941","#006FA6", "#FFDBE5", "#0000A6","#63FFAC","#8FB0FF", "#5A0007", "#FEFFE6", "#4FC601", "#BA0900","#6B7900","#00C2A0","#FFAA92","#FF90C9","#B903AA","#D16100","#DDEFFF", "#A1C299", "#0AA6D8","#013349","#00846F","#372101","#FFB500","#C2FFED","#A079BF","#CC0744","#C0B9B2","#C2FF99","#001E09","#00489C","#6F0062","#0CBD66","#EEC3FF","#456D75","#B77B68","#7A87A1","#788D66","#885578","#FAD09F","#FF8A9A","#D157A0","#BEC459","#456648","#0086ED","#886F4C","#34362D","#B4A8BD","#00A6AA","#452C2C","#636375","#A3C8C9","#FF913F","#938A81","#575329","#00FECF","#B05B6F","#8CD0FF","#3B9700","#04F757","#C8A1A1","#1E6E00","#7900D7","#A77500","#6367A9","#A05837","#6B002C","#772600","#D790FF","#9B9700","#549E79","#FFF69F","#201625","#72418F","#BC23FF","#99ADC0","#3A2465","#922329","#5B4534","#FDE8DC","#404E55","#0089A3","#CB7E98","#A4E804","#324E72","#6A3A4C","#83AB58","#001C1E","#D1F7CE","#004B28","#C8D0F6","#A3A489","#806C66","#222800","#BF5650","#E83000","#66796D","#DA007C","#FF1A59","#8ADBB4","#1E0200","#5B4E51","#C895C5","#320033","#FF6832","#66E1D3","#CFCDAC","#D0AC94","#7ED379","#012C58","#7A7BFF","#D68E01","#353339","#78AFA1","#FEB2C6","#75797C","#837393","#943A4D","#B5F4FF","#D2DCD5","#9556BD","#6A714A","#001325","#02525F","#0AA3F7","#E98176","#DBD5DD","#5EBCD1","#3D4F44","#7E6405","#02684E","#962B75","#8D8546","#9695C5","#E773CE","#D86A78","#3E89BE","#CA834E","#518A87","#5B113C","#55813B","#E704C4","#00005F","#A97399","#4B8160","#59738A","#FF5DA7","#F7C9BF","#643127","#513A01","#6B94AA","#51A058","#A45B02","#1D1702","#E20027","#E7AB63","#4C6001","#9C6966","#64547B","#97979E","#006A66","#391406","#F4D749","#0045D2","#006C31","#DDB6D0","#7C6571","#9FB2A4","#00D891","#15A08A","#BC65E9","#FFFFFE","#C6DC99","#203B3C","#671190","#6B3A64","#F5E1FF","#FFA0F2","#CCAA35","#374527","#8BB400","#797868","#C6005A","#3B000A","#C86240","#29607C","#402334","#7D5A44","#CCB87C","#B88183","#AA5199","#B5D6C3","#A38469","#9F94F0","#A74571","#B894A6","#71BB8C","#00B433","#789EC9","#6D80BA","#953F00","#5EFF03","#E4FFFC","#1BE177","#BCB1E5","#76912F","#003109","#0060CD","#D20096","#895563","#29201D","#5B3213","#A76F42","#89412E","#1A3A2A","#494B5A","#A88C85","#F4ABAA","#A3F3AB","#00C6C8","#EA8B66","#958A9F","#BDC9D2","#9FA064","#BE4700","#658188","#83A485","#453C23","#47675D","#3A3F00","#061203","#DFFB71","#868E7E","#98D058","#6C8F7D","#D7BFC2","#3C3E6E","#D83D66","#2F5D9B","#6C5E46","#D25B88","#5B656C","#00B57F","#545C46","#866097","#365D25","#252F99","#00CCFF","#674E60","#FC009C","#92896B"]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-S', default=None, type=str, help='sample individual map')
parser.add_argument('-M', default=None, type=str, help='matrix output from crosscheck')
args = parser.parse_args()
graph_network(args.S, args.M)
| [
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
6738,
4823,
85,
528,
1330,
29681,
198,
6738,
9568,
1330,
5315,
198,
11748,
334,
8899,
13,
388,
499,
62,
355,
334,
8899,
198,
11748... | 1.945727 | 1,732 |
from modules.bounty.models.bounty import Bounty
from modules.bounty.models.user_bounty import UserBounty
| [
6738,
13103,
13,
65,
17705,
13,
27530,
13,
65,
17705,
1330,
38166,
198,
6738,
13103,
13,
65,
17705,
13,
27530,
13,
7220,
62,
65,
17705,
1330,
11787,
33,
17705,
198
] | 3.5 | 30 |
#!/home/pi/git-repos/IoT-Microservice/venv/bin/python3
# CHANGE PYTHON PATH TO MATCH YOUR LOCAL INSTALLATION
from datetime import datetime
from dbManager import add_entry_scd41, convertTimeStampToUTCString
from raspberry_pi_i2c_scd4x_python.i2c_class import I2C
from raspberry_pi_i2c_scd4x_python.sensor_class import SCD4x
i2c = I2C()
scd41 = SCD4x()
i2c.sensirion_i2c_hal_init()
scd41.scd4x_measure_single_shot()
if scd41.scd4x_get_data_ready_status():
timestamp = convertTimeStampToUTCString(datetime.now())
m = scd41.scd4x_read_measurement()
if m is not None:
print(f"CO2: {m[0]:.2f}ppm, temp: {m[1]:.2f}°C, rh: {m[2]:.2f}%")
add_entry_scd41(timestamp, m[0], m[1], m[2])
i2c.sensirion_i2c_hal_free()
| [
2,
48443,
11195,
14,
14415,
14,
18300,
12,
260,
1930,
14,
40,
78,
51,
12,
13031,
15271,
14,
574,
85,
14,
8800,
14,
29412,
18,
198,
2,
5870,
27746,
350,
56,
4221,
1340,
46490,
5390,
337,
11417,
16592,
37347,
1847,
40589,
7036,
6234,
... | 2.081921 | 354 |
##library
import tkinter as tk
from tkinter.ttk import *
from tkinter import *
from tkcalendar import DateEntry
import datetime
import time
from settings import Variables
import operation as op
from databse import data, return_data
v = Variables()
d = data()
rd = return_data()
| [
2235,
32016,
201,
198,
11748,
256,
74,
3849,
355,
256,
74,
201,
198,
6738,
256,
74,
3849,
13,
926,
74,
1330,
1635,
201,
198,
6738,
256,
74,
3849,
1330,
1635,
201,
198,
6738,
256,
74,
9948,
9239,
1330,
7536,
30150,
201,
198,
201,
1... | 2.577236 | 123 |
import re
from .. import factories as f
| [
11748,
302,
198,
6738,
11485,
1330,
17590,
355,
277,
628
] | 4.1 | 10 |
import json
from django import template
from django.utils.safestring import mark_safe
from django_editorjs_fields.config import IFRAME_ALLOWED_SITES_EXEC_JS
from urllib.parse import urlparse
register = template.Library()
@register.filter(is_safe=True)
| [
11748,
33918,
198,
198,
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
26791,
13,
49585,
395,
1806,
1330,
1317,
62,
21230,
198,
6738,
42625,
14208,
62,
35352,
8457,
62,
25747,
13,
11250,
1330,
314,
10913,
10067,
62,
7036,
... | 3.011236 | 89 |
## Method to display all elements inside the Linked List
## Insertion of an element
## 1. Beginning of the Linked List
## Time Complexity : O(1)
## 2. End of the Linked List
## Time Complexity : O(n)
ll = LinkedList()
ll.insert_at_begining(10)
ll.insert_at_begining(20)
ll.insert_at_end(55)
ll.insert_at_begining(45)
ll.print() | [
198,
220,
22492,
11789,
284,
3359,
477,
4847,
2641,
262,
7502,
276,
7343,
628,
220,
22492,
35835,
295,
286,
281,
5002,
628,
220,
22492,
352,
13,
25976,
286,
262,
7502,
276,
7343,
198,
220,
22492,
3862,
19157,
414,
1058,
440,
7,
16,
... | 2.76 | 125 |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import fastapi
from starlette.routing import Match
from opentelemetry.configuration import Configuration
from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
from opentelemetry.instrumentation.fastapi.version import __version__ # noqa
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
_excluded_urls = Configuration()._excluded_urls("fastapi")
class FastAPIInstrumentor(BaseInstrumentor):
"""An instrumentor for FastAPI
See `BaseInstrumentor`
"""
_original_fastapi = None
@staticmethod
def instrument_app(app: fastapi.FastAPI):
"""Instrument an uninstrumented FastAPI application.
"""
if not getattr(app, "is_instrumented_by_opentelemetry", False):
app.add_middleware(
OpenTelemetryMiddleware,
excluded_urls=_excluded_urls,
span_details_callback=_get_route_details,
)
app.is_instrumented_by_opentelemetry = True
def _get_route_details(scope):
"""Callback to retrieve the fastapi route being served.
TODO: there is currently no way to retrieve http.route from
a starlette application from scope.
See: https://github.com/encode/starlette/pull/804
"""
app = scope["app"]
route = None
for starlette_route in app.routes:
match, _ = starlette_route.matches(scope)
if match == Match.FULL:
route = starlette_route.path
break
if match == Match.PARTIAL:
route = starlette_route.path
# method only exists for http, if websocket
# leave it blank.
span_name = route or scope.get("method", "")
attributes = {}
if route:
attributes["http.route"] = route
return span_name, attributes
| [
2,
15069,
383,
4946,
31709,
41935,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
9... | 2.841855 | 841 |
import pytest
pytestmark = pytest.mark.sphinx('html', testroot='acquire')
@pytest.mark.parametrize('page', ['folder1/about.html', ], indirect=True)
@pytest.mark.parametrize('json_page', ['debug_dump.json', ], indirect=True)
| [
11748,
12972,
9288,
198,
198,
9078,
9288,
4102,
796,
12972,
9288,
13,
4102,
13,
82,
746,
28413,
10786,
6494,
3256,
1332,
15763,
11639,
330,
29782,
11537,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
7700,
3256,
... | 2.72619 | 84 |
from gym.envs.registration import register
register(
id='tic_tac_toe-v0',
entry_point='gym_tic_tac_toe.envs:TicTacToeEnv',
)
| [
6738,
11550,
13,
268,
14259,
13,
2301,
33397,
1330,
7881,
198,
198,
30238,
7,
198,
220,
220,
220,
4686,
11639,
13370,
62,
83,
330,
62,
44579,
12,
85,
15,
3256,
198,
220,
220,
220,
5726,
62,
4122,
11639,
1360,
76,
62,
13370,
62,
83... | 2.126984 | 63 |
import requests, sys
from datetime import datetime
if __name__ == "__main__":
import schedule, time
schedule.every(1).minutes.do(saveJob)
print('Starting script to save exchange statistics...')
while True:
schedule.run_pending()
time.sleep(1)
| [
11748,
7007,
11,
25064,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
201,
198,
220,
220,
220,
1330,
7269,
11,
640,
201,
198,
201,
198,
220,
220,... | 2.419355 | 124 |
# -*- coding: utf-8 -*-
from persistencia.perdocumento import PerDocumento
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
21160,
29634,
13,
525,
22897,
78,
1330,
2448,
24941,
78,
628
] | 2.923077 | 26 |
import ROOT, atexit, sys
#-----prepare python exit-----------------------------------------------
ROOT.gInterpreter.ProcessLine('typedef double Double32_t')
| [
11748,
15107,
2394,
11,
379,
37023,
11,
25064,
198,
2,
30934,
46012,
533,
21015,
8420,
3880,
24305,
198,
198,
13252,
2394,
13,
70,
9492,
3866,
353,
13,
18709,
13949,
10786,
774,
9124,
891,
4274,
11198,
2624,
62,
83,
11537,
628
] | 3.975 | 40 |
"""
This example shows that most computations can be performed by numpy functions
on arrays of UTPM objects.
Just bear in mind that is much faster use UTPM instances of matrices than numpy.ndarrays
with UTPM elements.
"""
import numpy, os
from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv
N,D,P = 2,2,1
cg = CGraph()
x = numpy.array([ Function(UTPM(numpy.random.rand(*(D,P)))) for n in range(N)])
A = numpy.outer(x,x)
A = numpy.exp(A)
y = numpy.dot(A,x)
cg.independentFunctionList = list(x)
cg.dependentFunctionList = list(y)
cg.plot(os.path.join(os.path.dirname(__file__),'numpy_dot_graph.svg'))
| [
37811,
198,
1212,
1672,
2523,
326,
749,
2653,
602,
460,
307,
6157,
416,
299,
32152,
5499,
198,
261,
26515,
286,
19255,
5868,
5563,
13,
198,
198,
5703,
6842,
287,
2000,
326,
318,
881,
5443,
779,
19255,
5868,
10245,
286,
2603,
45977,
62... | 2.605932 | 236 |
thetao_30lon_30lat = ds['thetao'].sel(lon=30, lat=30, method='nearest', tolerance=1)
thetao_30lon_30lat | [
1169,
83,
5488,
62,
1270,
14995,
62,
1270,
15460,
796,
288,
82,
17816,
1169,
83,
5488,
6,
4083,
741,
7,
14995,
28,
1270,
11,
3042,
28,
1270,
11,
2446,
11639,
710,
12423,
3256,
15621,
28,
16,
8,
198,
1169,
83,
5488,
62,
1270,
14995... | 2.191489 | 47 |
APP_ERROR_SEND_NOTIFICATION = True
APP_ERROR_RECIPIENT_EMAIL = None
APP_ERROR_SUBJECT_PREFIX = ""
APP_ERROR_MASK_WITH = "THIS_IS_MASK"
APP_ERROR_MASKED_KEY_HAS = ('key', 'password', 'secret')
APP_ERROR_URL_PREFIX = "/dev/error"
| [
24805,
62,
24908,
62,
50,
10619,
62,
11929,
30643,
6234,
796,
6407,
198,
24805,
62,
24908,
62,
38827,
4061,
28495,
62,
27630,
4146,
796,
6045,
198,
24805,
62,
24908,
62,
50,
10526,
23680,
62,
47,
31688,
10426,
796,
13538,
198,
24805,
... | 2.326531 | 98 |
""" A3C in Code - Centralized/ Gobal Network Parameter Server/ Controller
Based On:
A3C Code as in the book Deep Reinforcement Learning, Chapter 12.
Runtime: Python 3.6.5
Dependencies: numpy, matplotlib, tensorflow (/ tensorflow-gpu), gym
DocStrings: GoogleStyle
Author : Mohit Sewak (p20150023@goa-bits-pilani.ac.in)
Inspired from: A3C implementation on TensorFLow official github repository (Tensorflow/models/research)
**********************************************************************
Adjusted by Seabstian Taciak as part of develeopment of MLxE Architecture
@author: sebtac
@contact: https://www.linkedin.com/in/sebastian-taciak-5893861/
"""
# SET BEFORE RUNNIG
# AGENT TYPE
# 0 - Sewak Base Agent (Fixed)
# 1 - Sewak DNN Adjusted
# 2 - Sewak "Task" Modified
# 3 - Sewak ISTB (Iterative, Synchronous Thread Based)
Agent_Type = 3
learning_rate = 0.0001
import multiprocessing
cores = multiprocessing.cpu_count() # DEFAULT SETTING
#cores = 1 # FOR DEBUGGING
# GENERAL IMPORTS
import sys
sys.path.append(r'C:\Users\surface\Documents\Python\RL\MLxE\Mohit Sewak RL\Mohit12_A3C')
import time
import winsound
import logging
import os
import numpy as np
import matplotlib.pyplot as plt
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# DEEP LEARING and ENVIRONEMENT RELATER IMPORTS
import tensorflow as tf
import tensorflow_addons as tfa # ST for DNN Adjustment
import gym
# CUSTOM SEWAK's MODULES with OPTIONAL SEBTAC ADJUSTMENTS
from experience_replay_sewak import SimpleListBasedMemory
if Agent_Type == 0:
from actorcritic_model_sewak import ActorCriticModel as ACModel # For Sewak Fixed version
from a3c_worker_sewak_base import A3C_Worker # the intial Sewak's implementation with fixes of the Policy_Loss Calcultion
elif Agent_Type == 1:
from actorcritic_model_sewak import ActorCriticModel_Dimond as ACModel
from a3c_worker_sewak_DNN_Adjusted import A3C_Worker
elif Agent_Type == 2:
from actorcritic_model_sewak import ActorCriticModel_Dimond as ACModel
from a3c_worker_sewak_Task_Modifications import A3C_Worker
elif Agent_Type == 3:
from actorcritic_model_sewak import ActorCriticModel_DoubleDimond as ACModel
from a3c_worker_sewak_ISTB import A3C_Worker
# SEWAK's Implementation Fix
"""
- Policy Loss Calcualtion
- Using actual play in example generation (was random)
"""
# DNN Adjustments
"""
- Adding monotonic decrease in Learing Rate relative to the number of episodes run with:
self.alpha_power = 0.998
self.alpha_limit = 0.000001
- Modifying the Model to: common_network_size=[128,256,128], policy_network_size=[64,128,64], value_network_size=[64,128,64]
- Changing the Optimizer to RectifiedAdam -- requaires tensorflow_addons
- Changing Gamma coeffcient to 0.97
"""
# Task Specific Modifications
"""
- Modified state representation with addition of 5th parameter representing the squared distance of the cart from the center of the plane
- Adverse Initial Position
- Negative Reward: -10.0 (originally 0.0)
- Monotonically Decreasing Discount Factor (Gamma Coefficent)
- Goal Specific Reward for cart being close to center of the pland and the pole being close to vertical
"""
class A3C_Master():
"""A3C Master
Centralized Master class of A3C used for hosting the global network parameters and spawning the agents.
Args:
env_name (str): Name of a valid gym environment
model_dir (str): Directory for saving the model during training, and loading the same while playing
learning_rate (float): The learning rate (alpha) for the optimizer
Examples:
agent = A3C_Master()
agent.train()
agent.play()
"""
def train(self, cores):
"""Train the A3C agent
Main function to train the A3C agent after instantiation.
This method uses the number of processor cores to spawns as many Workers. The workers are spawned as
multiple parallel threads instead of multiple parallel processes. Being a threaded execution, the workers
share memory and hence can write directly into the shared global variables.
A more optimal, completely asynchronous implementation could be to spawn the workers as different processes
using a task queue or multiprocessing. In case if this is adopted, then the shared variables need to made
accessible in the distributed environment.
"""
a3c_workers = [A3C_Worker(self.master_model,
self.optimizer,
i,
self.env_name,
self.model_dir,
workers_num = cores,
learning_rate = learning_rate)
for i in range(cores)]
for i, worker in enumerate(a3c_workers):
logger.info("Starting worker {}".format(i))
worker.start()
[worker.join() for worker in a3c_workers]
self.plot_training_statistics()
def play(self):
"""Play the environment using a trained agent
This function opens a (graphical) window that will play a trained agent. The function will try to retrieve
the model saved in the model_dir with filename formatted to contain the associated env_name.
If the model is not found, then the function will first call the train function to start the training.
"""
env = self.env.unwrapped
state = env.reset()
model = self.master_model
model_path = os.path.join(self.model_dir, 'model_{}.h5'.format(self.env_name))
if not os.path.exists(model_path):
logger.info('A3CMaster: No model found at {}, starting fresh training before playing!'.format(model_path))
self.train()
logger.info('A3CMaster: Playing env, Loading model from: {}'.format(model_path))
print("Model Path:", model_path)
#model.load_weights(model_path)
done = False
step_counter = 0
reward_sum = 0
try:
while not done:
env.render(mode='rgb_array')
policy, value = model(tf.convert_to_tensor(state[None, :], dtype=tf.float32))
policy = tf.nn.softmax(policy)
action = np.argmax(policy)
state, reward, done, _ = env.step(action)
reward_sum += reward
logger.info("{}. Reward: {}, action: {}".format(step_counter, reward_sum, action))
step_counter += 1
except KeyboardInterrupt:
print("Received Keyboard Interrupt. Shutting down.")
finally:
env.close()
def plot_training_statistics(self, training_statistics=None):
"""Plot training statistics
This function plot the training statistics like the steps, rewards, discounted_rewards, and loss in each
of the training episode.
"""
training_statistics = A3C_Worker.global_shared_training_stats if training_statistics is None \
else training_statistics
all_episodes = []
all_steps = []
all_rewards = []
all_discounted_rewards = []
all_losses = []
for stats in training_statistics:
worker, episode, steps, reward, discounted_rewards, loss = stats
all_episodes.append(episode)
all_steps.append(steps)
all_rewards.append(reward)
all_discounted_rewards.append(discounted_rewards)
all_losses.append(loss)
self._make_double_axis_plot(all_episodes, all_steps, all_rewards)
self._make_double_axis_plot(all_episodes,all_discounted_rewards,all_losses, label_y1="Discounted Reward",
label_y2="Loss", color_y1="cyan", color_y2="black")
np.savetxt('run.csv', all_steps, delimiter=',', fmt='%d')
@staticmethod
def _make_double_axis_plot(data_x, data_y1, data_y2, x_label='Episodes (e)', label_y1='Steps To Episode Completion',
label_y2='Reward in each Episode', color_y1="red", color_y2="blue"):
"""Internal helper function for plotting dual axis plots
"""
fig, ax1 = plt.subplots()
ax1.set_xlabel(x_label)
ax1.set_ylabel(label_y1, color=color_y1)
ax1.plot(data_x, data_y1, color=color_y1)
ax2 = ax1.twinx()
ax2.set_ylabel(label_y2, color=color_y2)
ax2.plot(data_x, data_y2, color=color_y2)
fig.tight_layout()
plt.show()
if __name__ == "__main__":
"""Main function for testing the A3C Master code's implementation
"""
agent = A3C_Master(Agent_Type=Agent_Type)
agent.train(cores)
#agent.play()
for i in range(10):
winsound.Beep(500,500)
| [
37811,
317,
18,
34,
287,
6127,
532,
5694,
1143,
14,
402,
2572,
7311,
25139,
2357,
9652,
14,
22741,
198,
198,
15001,
1550,
25,
198,
220,
220,
220,
220,
198,
32,
18,
34,
6127,
355,
287,
262,
1492,
10766,
22299,
13442,
18252,
11,
7006,... | 2.485786 | 3,588 |
from tkinter import Label
from tkinter import StringVar
from gui.labels.styles import LABEL_FONT
| [
6738,
256,
74,
3849,
1330,
36052,
198,
6738,
256,
74,
3849,
1330,
10903,
19852,
198,
198,
6738,
11774,
13,
23912,
1424,
13,
47720,
1330,
406,
6242,
3698,
62,
37,
35830,
628
] | 3.193548 | 31 |
import sys
if __name__ == "__main__":
generate( sys.argv[ len(sys.argv) - 2],
sys.argv[ len(sys.argv) - 1],
sys.argv[1:-2] )
| [
11748,
25064,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
7716,
7,
25064,
13,
853,
85,
58,
18896,
7,
17597,
13,
853,
85,
8,
532,
362,
4357,
198,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.786517 | 89 |
'''Department models.'''
from django.contrib.auth.models import User as UserModel
from django.db import models
class Department(models.Model):
'''Represent a department.'''
name = models.CharField(max_length=128)
class UserDepartmentRelation(models.Model):
'''Relates an user and a department.'''
user = models.ForeignKey(UserModel)
department = models.ForeignKey(Department)
leader = models.BooleanField(default=False)
| [
7061,
6,
36261,
4981,
2637,
7061,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
355,
11787,
17633,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
4871,
2732,
7,
27530,
13,
17633,
2599,
198,
... | 3.244604 | 139 |
import tensorflow as tf
import logictensornetworks as ltn
import numpy as np
from logictensornetworks import Forall,Exists, Equiv, Implies, And, Or, Not
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# generate artificial data
nr_of_bb = 4000
# minimal and maximal position and dimension of rectangles
min_xywh = [.0,.0,.2,.2]
max_xywh = [1.,1.,1.,1.]
# four lists of rectangles:\
# - bbs1 and bbs2 are used to generate examples R(x,y) with x in bbs1 and y in bbs2;
# - bbs12 = bbs1 + bbs2
# - bbst is the set of rectangles for test
bbs1 = np.random.uniform(min_xywh,max_xywh, size=(nr_of_bb, 4))
bbs2 = np.random.uniform(min_xywh,max_xywh, size=(nr_of_bb, 4))
bbs12 = np.concatenate([bbs1,bbs2],axis=0)
bbst = np.random.uniform([0, 0, .2, .2], [1, 1, 1, 1], size=(nr_of_bb, 4))
# funcitions that ocmputes training examples or relations between BB
# pairs of rectangles for training
left_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_left(bbs1[i],bbs2[i])])
left_data = np.squeeze(left_data)
right_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_right(bbs1[i],bbs2[i])])
right_data = np.squeeze(right_data)
above_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_above(bbs1[i],bbs2[i])])
above_data = np.squeeze(above_data)
below_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_below(bbs1[i],bbs2[i])])
below_data = np.squeeze(below_data)
contain_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if contains(bbs1[i],bbs2[i])])
contain_data = np.squeeze(contain_data)
in_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_in(bbs1[i],bbs2[i])])
in_data = np.squeeze(in_data)
non_left_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_left(bbs1[i],bbs2[i])])
non_left_data = np.squeeze(non_left_data)
non_right_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_right(bbs1[i],bbs2[i])])
not_right_data = np.squeeze(non_right_data)
non_above_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_above(bbs1[i],bbs2[i])])
non_above_data = np.squeeze(non_above_data)
non_below_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_below(bbs1[i],bbs2[i])])
non_below_data = np.squeeze(non_below_data)
non_contain_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if not_contains(bbs1[i],bbs2[i])])
non_contain_data = np.squeeze(non_contain_data)
non_in_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_in(bbs1[i],bbs2[i])])
non_in_data = np.squeeze(non_in_data)
# and of data generations
# start the definition of the language:
# variables for pairs of rectangles ....
# ... for positive examples of every relation
lxy = ltn.variable("left_xy", tf.cast(left_data, tf.float32))
rxy = ltn.variable("right_xy",tf.cast(right_data,tf.float32))
bxy = ltn.variable("below_xy",tf.cast(below_data,tf.float32))
axy = ltn.variable("above_xy",tf.cast(above_data,tf.float32))
cxy = ltn.variable("contains_xy",tf.cast(contain_data,tf.float32))
ixy = ltn.variable("in_xy", tf.cast(in_data, tf.float32))
# ... for negative examples (they are placeholders which are filled with data
# randomly generated every 100 trian epochs
nlxy = ltn.variable("not_left_xy", 8)
nrxy = ltn.variable("not_right_xy",8)
nbxy = ltn.variable("not_below_xy",8)
naxy = ltn.variable("not_above_xy",8)
ncxy = ltn.variable("not_conts_xy",8)
nixy = ltn.variable("not_is_in_xy",8)
# printing out the cardinality of examples
pxy = [lxy, rxy, bxy, axy, cxy, ixy]
npxy = [nlxy,nrxy,nbxy,naxy,ncxy,nixy]
for xy in pxy:
print(xy.name,xy.shape)
# variables for single rectangles
x = ltn.variable("x",4)
y = ltn.variable("y",4)
z = ltn.variable("z",4)
# a rectangle and a set of rectangle used to show the results
ct = ltn.constant("ct",[.5,.5,.3,.3])
t = ltn.variable("t",tf.cast(bbst,tf.float32))
# relational predicates
L = ltn.predicate("left",8)
R = ltn.predicate("right",8)
B = ltn.predicate("below",8)
A = ltn.predicate("above",8)
C = ltn.predicate("contains",8)
I = ltn.predicate("in",8)
P = [L,R,B,A,C,I]
inv_P = [R,L,A,B,I,C]
# constraints/axioms
constraints = [Forall(pxy[i],P[i](pxy[i]))
for i in range(6)]
constraints += [Forall(npxy[i],Not(P[i](npxy[i])))
for i in range(6)]
constraints += [Forall((x,y),Implies(P[i](x,y),inv_P[i](y,x)))
for i in range(6)]
constraints += [Forall((x,y),Not(And(P[i](x,y),P[i](y,x))))
for i in range(6)]
# constraints += [Forall((x,y,z),Implies(I(x,y),Implies(P[i](y,z),P[i](x,z)))) for i in range(6)]
loss = -tf.reduce_min(tf.concat(constraints,axis=0))
opt = tf.train.AdamOptimizer(0.05).minimize(loss)
init = tf.global_variables_initializer()
# generations of data for negative examples and generic rectangles used to feed the variables x,y,z
nr_random_bbs = 50
with tf.Session() as sess:
# training:
sess.run(init)
feed_dict = get_feed_dict()
for i in range(10000):
sess.run(opt,feed_dict=feed_dict)
if i % 100 == 0:
sat_level=sess.run(-loss, feed_dict=feed_dict)
print(i, "sat level ----> ", sat_level)
if sat_level > .99:
break
# evaluate the truth value of a formula ....
print(sess.run([Forall((x,y,z),Implies(I(x,y),
Implies(P[i](y,z),P[i](x,z))))
for i in range(6)],feed_dict=feed_dict))
# evaluate the truth value of P(ct,t) where ct is a central rectangle, and
# t is a set of rectangles randomly generated.
preds = sess.run([X(ct,t) for X in P])
# plotting the value of the relation, on the centroid of t.
fig = plt.figure(figsize=(12,8))
jet = cm = plt.get_cmap('jet')
cbbst = bbst[:,:2] + 0.5*bbst[:,2:]
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.scatter(cbbst[:,0], cbbst[:,1], c=preds[j][:, 0])
plt.show()
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
2604,
713,
641,
1211,
316,
5225,
355,
300,
34106,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2604,
713,
641,
1211,
316,
5225,
1330,
1114,
439,
11,
3109,
1023,
11,
7889,
452,
11,
34... | 2.051129 | 3,188 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from mpl_toolkits.mplot3d import Axes3D
# font size
rcParams['axes.labelsize'] = 12
rcParams['xtick.labelsize'] = 12
rcParams['ytick.labelsize'] = 12
rcParams['legend.fontsize'] = 12
# typeface
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
1279,
46803,
18982,
29,
18,
13,
15,
3556,
46803,
18982,
29,
198,
198,
2,
1279,
8189,
3846,
29,
628,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
801... | 2.442708 | 192 |
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define the PreviousScans table and provide for import from multiple bases.
This table consists of the following fields
ScanID = Column(Integer, primary_key=True)
TimeStamp = Column(DateTime, nullable=False)
"""
from __future__ import print_function, absolute_import
import os
import csv
from ._dbtablebase import DBTableBase
from ._mysqldbmixin import MySQLDBMixin
__all__ = ['PreviousScansTable']
class PreviousScansTable(DBTableBase):
"""
Abstract class for PreviousScansTable
This table contains a single entry, the last time a scan was executed.
"""
key_field = 'ScanID'
fields = [key_field, 'TimeStamp']
table_name = 'PreviousScans'
@classmethod
def factory(cls, db_dict, db_type, verbose):
"""Factory method to select subclass based on database type.
Currently the types sql and csv are supported.
Returns instance object of the defined type.
"""
inst = None
if verbose:
print('notification factory datafile %s dbtype %s verbose %s'
% (db_dict, db_type, verbose))
if db_type == 'csv':
inst = CsvPreviousScansTable(db_dict, db_type, verbose)
elif db_type == 'mysql':
inst = MySQLPreviousScansTable(db_dict, db_type, verbose)
else:
ValueError('Invalid prevscan table factory db_type %s' % db_type)
return inst
class CsvPreviousScansTable(PreviousScansTable):
"""
PreviousScans Table functions for csv based table
"""
class SQLPreviousScansTable(PreviousScansTable):
""""
Table representing the PreviousScans database table
This table supports a single dictionary that contains the data
when the table is intialized.
"""
def __init__(self, db_dict, dbtype, verbose):
"""Pass through to SQL"""
if verbose:
print('SQL Database type %s verbose=%s' % (db_dict, verbose))
super(SQLPreviousScansTable, self).__init__(db_dict, dbtype, verbose)
self.connection = None
def db_info(self):
"""
Display the db info and Return info on the database used as a
dictionary.
"""
try:
print('Database characteristics')
for key in self.db_dict:
print('%s: %s' % key, self.db_dict[key])
except ValueError as ve:
print('Invalid database configuration exception %s' % ve)
return self.db_dict
class MySQLPreviousScansTable(PreviousScansTable, MySQLDBMixin):
""" Class representing the connection with a mysql database"""
def __init__(self, db_dict, dbtype, verbose):
"""Read the input file into a dictionary."""
super(MySQLPreviousScansTable, self).__init__(db_dict, dbtype, verbose)
self.connectdb(db_dict, verbose)
self._load_table()
| [
2,
357,
34,
8,
15069,
2177,
554,
10071,
7712,
3457,
13,
198,
2,
1439,
6923,
33876,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.745469 | 1,269 |
from pymote import * # @UnusedWildImport
import sys
import os # @Reimport
import numpy
from PySide.QtGui import QMainWindow, QMenu, QCursor, QFileDialog, QMessageBox
from PySide.QtCore import SIGNAL, QRect, QSize, QEvent
from matplotlib.figure import Figure
from matplotlib.patches import Circle
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg \
as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg \
as NavigationToolbar
from networkx.drawing.nx_pylab import draw_networkx_edges
from datetime import datetime
from matplotlib.collections import PatchCollection, LineCollection
import networkx as nx
from pymote.algorithm import NodeAlgorithm
from simulationui import Ui_SimulationWindow
from dictionarytreemodel import DictionaryTreeModel
from pymote.utils.localization.helpers import align_clusters, get_rms
from pymote.utils.memory.positions import Positions
from copy import deepcopy
class NodeCircle(Circle):
""" Circle with node data. """
class MessageCircle(Circle):
""" Circle with message data. """
from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4
def create_window(window_class, **kwargs):
"""Create a QT window in Python, or interactively in IPython with QT GUI
event loop integration.
"""
global app
app = get_app_qt4(sys.argv)
app.references = set()
net = None
fname = None
if len(sys.argv) > 1:
fname = sys.argv[1]
if os.path.exists(fname):
net = read_pickle(fname)
else:
QMessageBox.critical(
None, "Error opening file %s", fname,
QMessageBox.Ok, QMessageBox.NoButton)
window = window_class(net, fname)
app.references.add(window)
window.show()
start_event_loop_qt4(app)
return window
if __name__ == '__main__':
main()
| [
6738,
279,
4948,
1258,
1330,
1635,
220,
1303,
2488,
3118,
1484,
25946,
20939,
198,
11748,
25064,
198,
11748,
28686,
220,
1303,
2488,
3041,
11748,
198,
11748,
299,
32152,
198,
6738,
9485,
24819,
13,
48,
83,
8205,
72,
1330,
1195,
13383,
2... | 2.504447 | 787 |
from abc import ABC
from datetime import timedelta
from typing import Mapping, Optional, Sequence
from snuba.clickhouse.translators.snuba.mappers import (
ColumnToFunction,
ColumnToLiteral,
ColumnToMapping,
ColumnToColumn,
SubscriptableMapper,
)
from snuba.clickhouse.translators.snuba.mapping import TranslationMappers
from snuba.datasets.entity import Entity
from snuba.datasets.plans.single_storage import SingleStorageQueryPlanBuilder
from snuba.datasets.storages import StorageKey
from snuba.datasets.storages.factory import get_writable_storage
from snuba.query.expressions import Column, FunctionCall, Literal
from snuba.query.extensions import QueryExtension
from snuba.query.processors import QueryProcessor
from snuba.query.processors.performance_expressions import (
apdex_processor,
failure_rate_processor,
)
from snuba.query.processors.basic_functions import BasicFunctionsProcessor
from snuba.query.processors.tags_expander import TagsExpanderProcessor
from snuba.query.processors.timeseries_processor import TimeSeriesProcessor
from snuba.query.project_extension import ProjectExtension
from snuba.query.timeseries_extension import TimeSeriesExtension
transaction_translator = TranslationMappers(
columns=[
ColumnToFunction(
None,
"ip_address",
"coalesce",
(
FunctionCall(
None, "IPv4NumToString", (Column(None, None, "ip_address_v4"),),
),
FunctionCall(
None, "IPv6NumToString", (Column(None, None, "ip_address_v6"),),
),
),
),
ColumnToFunction(
None, "user", "nullIf", (Column(None, None, "user"), Literal(None, ""))
),
# These column aliases originally existed in the ``discover`` dataset,
# but now live here to maintain compatibility between the composite
# ``discover`` dataset and the standalone ``transaction`` dataset. In
# the future, these aliases should be defined on the Transaction entity
# instead of the dataset.
ColumnToLiteral(None, "type", "transaction"),
ColumnToColumn(None, "timestamp", None, "finish_ts"),
ColumnToColumn(None, "username", None, "user_name"),
ColumnToColumn(None, "email", None, "user_email"),
ColumnToColumn(None, "transaction", None, "transaction_name"),
ColumnToColumn(None, "message", None, "transaction_name"),
ColumnToColumn(None, "title", None, "transaction_name"),
ColumnToMapping(None, "geo_country_code", None, "contexts", "geo.country_code"),
ColumnToMapping(None, "geo_region", None, "contexts", "geo.region"),
ColumnToMapping(None, "geo_city", None, "contexts", "geo.city"),
],
subscriptables=[
SubscriptableMapper(None, "tags", None, "tags"),
SubscriptableMapper(None, "contexts", None, "contexts"),
SubscriptableMapper(None, "measurements", None, "measurements", nullable=True),
],
)
| [
6738,
450,
66,
1330,
9738,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
19720,
1330,
337,
5912,
11,
32233,
11,
45835,
198,
198,
6738,
3013,
22013,
13,
12976,
4803,
13,
7645,
75,
2024,
13,
16184,
22013,
13,
76,
46629,
1330,
35... | 2.61387 | 1,168 |
import os
# import time
import unittest
from dotenv import load_dotenv, find_dotenv
import signa
load_dotenv(find_dotenv())
aws_region = os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')
aws_access_key = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
aws_s3_provider = os.environ.get('AWS_S3_PROVIDER') or 's3'
aws_s3_bucket = os.environ.get('AWS_S3_BUCKET')
onesignal_app_id = os.environ.get('ONESIGNAL_APP_ID')
onesignal_api_key = os.environ.get('ONESIGNAL_API_KEY')
| [
11748,
28686,
198,
2,
1330,
640,
198,
11748,
555,
715,
395,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
11,
1064,
62,
26518,
24330,
198,
11748,
1051,
64,
198,
198,
2220,
62,
26518,
24330,
7,
19796,
62,
26518,
24330,
28955,
... | 2.3125 | 224 |
# -*- coding: utf-8 -*-
import os.path
from datetime import datetime
from PyQt4 import QtCore
from PyQt4.QtCore import qDebug
from sqlobject.sqlbuilder import AND
from serieswatcher.const import *
from serieswatcher.models import Serie, Episode
from serieswatcher.thetvdb import TheTVDBSerie
class DownloadSerieTask(QtCore.QObject):
"""Task to update serie from the online database."""
serieUpdated = QtCore.pyqtSignal(int)
serieUpdateStatus = QtCore.pyqtSignal(int, int, dict)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
13,
6978,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
9485,
48,
83,
19,
1330,
33734,
14055,
198,
6738,
9485,
48,
83,
19,
13,
48,
83,
14055,
... | 2.987879 | 165 |
from practicum import McuBoard,find_mcu_boards
####################################
| [
6738,
1970,
39901,
1330,
1982,
84,
29828,
11,
19796,
62,
23209,
84,
62,
12821,
198,
198,
29113,
4242,
198
] | 4.473684 | 19 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Graba video leido desde la arducam
# Se le debe indicar el archivo de video a grabar y
# la duración de la captura en segundos.
# SINTAXIS: python capturar_video.py VIDEO TIEMPO
# 1- Ruta del video
# 2- Tiempo de grabacion en segundos
from ctypes import *
import ctypes
import sys
import os
import time
from PIL import Image
import numpy as np
import thread as thread
import math
from select import select
from evdev import InputDevice
from evdev import ecodes
from astropy.io import fits
import ArducamSDK
# Analisis de argumentos
if (len(sys.argv)==3):
NOMBREIMG = sys.argv[1];
NUMIMG = int(sys.argv[2]);
else:
print ("Se requieren 2 argumentos: NOMBRE_IMAGENES NUMERO_IMAGENES")
exit()
#### CONFIGURACION ARDUCAMSDK ################
COLOR_BYTE2RGB = 47 # No se modifico del original
CAMERA_MT9M001 = 0x4D091031 # No se modifico del original
SensorShipAddr = 186
I2C_MODE_8_16 = 1
usbVid = 0x52CB # No se modifico del original
Width = 1280 #1280
Height = 1024 #1024
cfg ={"u32CameraType":CAMERA_MT9M001,
"u32Width":Width,"u32Height":Height,
"u32UsbVersion":1,
"u8PixelBytes":1,
"u16Vid":0x52cb,
"u8PixelBits":8,
"u32SensorShipAddr":SensorShipAddr,
"emI2cMode":I2C_MODE_8_16 }
# FLAGS
global saveFlag,downFlag,flag,H_value,V_value,lx,ly,mx,my,dx,dy,W_zoom,H_zooM,handle,openFlag,initTime,storeFlag,bufferData,globalGain
global testPatternFlag
global integrationTime
global shutterWidth
openFlag = False
handle = {}
downFlag = False
flag = True
saveFlag = False
storeFlag = False
saveNum=0
H_value = 0
V_value = 0
W_zoom = 0
H_zoom = 0
lx = 0
ly = 0
mx = 0
my = 0
dx = 0
dy = 0
testPatternFlag = False;
regArr=[[0x01, 0x000C], # Row Start
[0x02, 0x0014], # Column Start
[0x03, Height - 1], # Window Height 0x03FF
[0x04, Width - 1], # Window Width 0x04FF
[0x05, 0x0009], # Horizontal Blanking
[0x06, 0x0019], # Vertical Blanking
[0x07, 0x0002], # Output Control
[0x09, 0x0419], # Shutter Width 0x0419 (max: 0x3FFF)
[0x0B, 0x0000], # Frame Restart
[0x0C, 0x0000],#0x0100],
[0x0D, 0x0000],
[0x1E, 0x8000], # Read Mode 1 0x8000
[0x20, 0x1104],
[0x2B, 0x0008],
[0x2C, 0x0008],
[0x2D, 0x0008],
[0x2E, 0x0008],
[0x32, 0x0FFC], # Test Data Register
[0x35, 0x0067], # Global Gain 0x0008 (max: 0x0067)
[0x5F, 0x0904],
#[0x60, 0x0000], # BLC offset: Even row, even column
#[0x61, 0x0000], # BLC offset: Odd row, odd column
#[0x62, 0x049F], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)
#[0x63, 0x0000], # BLC offset: Even row, odd column
#[0x64, 0x0000], # BLC offset: Odd row, Even column
[0x60, 0x002F], # BLC offset: Even row, even column
[0x61, 0x002F], # BLC offset: Odd row, odd column
[0x62, 0x0499], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)
[0x63, 0x000F], # BLC offset: Even row, odd column
[0x64, 0x000F], # BLC offset: Odd row, Even column
[0xF1, 0x0001],
[0xFFFF, 0xFFFF]
]
globalGain = regArr[18][1];
# Cálculo del tiempo de integración inicial (pag 16 del datasheet)
rowTime = regArr[3][1] + 1 + 244 + regArr[4][1] - 19; #[pixel clock periods] default: 1514
resetDelay = 4*regArr[9][1] #[pixel clock periods] default: 0
overheadTime = 180; #[pixel clock periods]
shutterWidth = regArr[7][1]
integrationPeriods = shutterWidth*rowTime - overheadTime - resetDelay;
clockPeriod = 1000.0/24e6; #[ms]
integrationTime = integrationPeriods * clockPeriod; #[ms]
with open('integrationtime.txt','w') as it:
it.write(str(integrationTime)+"\n")
print ("Initial integration time: %.3fms"%(integrationTime));
print ("Initial gain: 0x%02x"%(globalGain));
a_lock = thread.allocate_lock();
thread.start_new_thread( readThread,("Thread-2", flag,))
pass
thread.start_new_thread( showAndSave,("Thread-3",flag))
pass
if __name__ == "__main__":
initTime = time.time();
init_and_read_arducam();
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
1902,
15498,
2008,
443,
17305,
748,
2934,
8591,
610,
6077,
321,
198,
2,
1001,
443,
390,
1350,
2699,
283,
1288,
393... | 2.282658 | 1,776 |
import select
import socket
from wsproto import WSConnection
from wsproto.connection import ConnectionState, SERVER
from wsproto.events import AcceptConnection, CloseConnection, Message, Ping, Request
from wsproto.extensions import PerMessageDeflate
count = 0
if __name__ == '__main__':
try:
start_listener()
except KeyboardInterrupt:
pass
| [
11748,
2922,
198,
11748,
17802,
198,
198,
6738,
266,
82,
1676,
1462,
1330,
25290,
32048,
198,
6738,
266,
82,
1676,
1462,
13,
38659,
1330,
26923,
9012,
11,
18871,
5959,
198,
6738,
266,
82,
1676,
1462,
13,
31534,
1330,
21699,
32048,
11,
... | 3.110169 | 118 |
from django.conf import settings
from django.contrib import messages as django_messages
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse, reverse_lazy
from django.views.generic import (
CreateView,
DetailView,
FormView,
ListView,
TemplateView,
UpdateView,
View,
)
from common.mixins import LargePanelMixin, PrivateMixin
from common.views import BaseOAuth2AuthorizationView
from data_import.models import DataType
# TODO: move this to common
from open_humans.mixins import SourcesContextMixin
from .forms import (
MessageProjectMembersForm,
OAuth2DataRequestProjectForm,
OnSiteDataRequestProjectForm,
RemoveProjectMembersForm,
SelectDatatypesForm,
)
from .models import (
ActivityFeed,
DataRequestProject,
DataRequestProjectMember,
OAuth2DataRequestProject,
OnSiteDataRequestProject,
)
MAX_UNAPPROVED_MEMBERS = settings.MAX_UNAPPROVED_MEMBERS
class CoordinatorOrActiveMixin(object):
"""
- Always let the coordinator view this page
- Only let members view it if the project is active
- Only let members view it if the project is not approved and less than
MAX_UNAPPROVED_MEMBERS have joined.
"""
class ProjectMemberMixin(object):
"""
Add project_member and related helper methods.
"""
@property
@property
@property
class OnSiteDetailView(ProjectMemberMixin, CoordinatorOrActiveMixin, DetailView):
"""
A base DetailView for on-site projects.
"""
model = OnSiteDataRequestProject
class JoinOnSiteDataRequestProjectView(PrivateMixin, LargePanelMixin, OnSiteDetailView):
"""
Display the consent form for a project.
"""
template_name = "private_sharing/join-on-site.html"
def get(self, request, *args, **kwargs):
"""
If the member has already accepted the consent form redirect them to
the authorize page.
"""
if self.project_joined_by_member:
return HttpResponseRedirect(
reverse_lazy(
"direct-sharing:authorize-on-site",
kwargs={"slug": self.get_object().slug},
)
)
return super().get(request, *args, **kwargs)
# pylint: disable=unused-argument
class ConnectedSourcesMixin(object):
"""
Add context for connected/unconnected sources.
"""
class AuthorizeOnSiteDataRequestProjectView(
PrivateMixin, LargePanelMixin, ConnectedSourcesMixin, OnSiteDetailView
):
"""
Display the requested permissions for a project.
"""
template_name = "private_sharing/authorize-on-site.html"
def get(self, request, *args, **kwargs):
"""
If the member hasn't already accepted the consent form redirect them to
the consent form page.
"""
# the opposite of the test in the join page
if not self.project_joined_by_member:
return HttpResponseRedirect(
reverse_lazy(
"direct-sharing:join-on-site",
kwargs={"slug": self.get_object().slug},
)
)
return super().get(request, *args, **kwargs)
# pylint: disable=unused-argument
class AuthorizeOAuth2ProjectView(
ConnectedSourcesMixin, ProjectMemberMixin, BaseOAuth2AuthorizationView
):
"""
Override oauth2_provider view to add origin, context, and customize login
prompt.
"""
template_name = "private_sharing/authorize-oauth2.html"
def post(self, request, *args, **kwargs):
"""
Get whether or not the member has requested hidden membership.
"""
self.hidden = request.POST.get("hide-membership", None)
return super().post(request, *args, **kwargs)
def form_valid(self, form):
"""
Override the OAuth2 AuthorizationView's form_valid to authorize a
project member if the user authorizes the OAuth2 request.
"""
allow = form.cleaned_data.get("allow")
if allow:
if self.hidden == "hidden_membership":
hidden = True
else:
hidden = False
self.authorize_member(hidden)
return super().form_valid(form)
class CoordinatorOnlyView(View):
"""
Only let coordinators and superusers view these pages.
"""
class UpdateDataRequestProjectView(
PrivateMixin, LargePanelMixin, CoordinatorOnlyView, UpdateView
):
"""
Base view for updating a project.
"""
success_url = reverse_lazy("direct-sharing:manage-projects")
class CreateDataRequestProjectView(PrivateMixin, LargePanelMixin, CreateView):
"""
Base view for creating a project.
"""
login_message = "Please log in to create a project."
success_url = reverse_lazy("direct-sharing:manage-projects")
def form_valid(self, form):
"""
Override to add current user as coordinator.
"""
form.instance.coordinator = self.request.user.member
return super().form_valid(form)
class CreateOAuth2DataRequestProjectView(CreateDataRequestProjectView):
"""
Create an OAuth2DataRequestProject.
"""
template_name = "private_sharing/create-project.html"
model = OAuth2DataRequestProject
form_class = OAuth2DataRequestProjectForm
class CreateOnSiteDataRequestProjectView(CreateDataRequestProjectView):
"""
Create an OnSiteDataRequestProject.
"""
template_name = "private_sharing/create-project.html"
model = OnSiteDataRequestProject
form_class = OnSiteDataRequestProjectForm
class UpdateOAuth2DataRequestProjectView(UpdateDataRequestProjectView):
"""
Update an OAuth2DataRequestProject.
"""
template_name = "private_sharing/update-project.html"
model = OAuth2DataRequestProject
form_class = OAuth2DataRequestProjectForm
def get_initial(self):
"""
Populate the form with common DataRequestProject bits
"""
initial = super().get_initial()
initial["enrollment_url"] = self.object.enrollment_url
initial["redirect_url"] = self.object.redirect_url
initial["deauth_webhook"] = self.object.deauth_webhook
return initial
class UpdateOnSiteDataRequestProjectView(UpdateDataRequestProjectView):
"""
Update an OnSiteDataRequestProject.
"""
template_name = "private_sharing/update-project.html"
model = OnSiteDataRequestProject
form_class = OnSiteDataRequestProjectForm
def get_initial(self):
"""
Populate the form with common DataRequestProject bits
"""
initial = super().get_initial()
initial["consent_text"] = self.object.consent_text
initial["post_sharing_url"] = self.object.post_sharing_url
return initial
class RefreshTokenMixin(object):
"""
A mixin that adds a POST handler for refreshing a project's token.
"""
# pylint: disable=unused-argument
class OAuth2DataRequestProjectDetailView(
PrivateMixin, CoordinatorOnlyView, RefreshTokenMixin, DetailView
):
"""
Display an OAuth2DataRequestProject.
"""
template_name = "private_sharing/project-detail.html"
model = OAuth2DataRequestProject
class OnSiteDataRequestProjectDetailView(
PrivateMixin, CoordinatorOnlyView, RefreshTokenMixin, DetailView
):
"""
Display an OnSiteDataRequestProject.
"""
template_name = "private_sharing/project-detail.html"
model = OnSiteDataRequestProject
class ManageDataRequestActivitiesView(PrivateMixin, TemplateView):
"""
A view for listing all data request activities for the current user.
"""
login_message = "Please log in to manage your projects."
template_name = "private_sharing/manage.html"
class InDevelopmentView(TemplateView):
"""
Add in-development projects to template context.
"""
template_name = "private_sharing/in-development.html"
class OverviewView(SourcesContextMixin, TemplateView):
"""
Add current sources to template context.
"""
template_name = "direct-sharing/overview.html"
class ProjectLeaveView(PrivateMixin, DetailView):
"""
Let a member remove themselves from a project.
"""
template_name = "private_sharing/leave-project.html"
model = DataRequestProjectMember
# pylint: disable=unused-argument
class BaseProjectMembersView(PrivateMixin, CoordinatorOnlyView, DetailView, FormView):
"""
Base class for views for coordinators to take bulk action on proj members.
"""
model = DataRequestProject
class MessageProjectMembersView(BaseProjectMembersView):
"""
A view for coordinators to message their project members.
"""
form_class = MessageProjectMembersForm
template_name = "private_sharing/message-project-members.html"
class RemoveProjectMembersView(BaseProjectMembersView):
"""
A view for coordinators to remove project members.
"""
form_class = RemoveProjectMembersForm
template_name = "private_sharing/remove-project-members.html"
class DataRequestProjectWithdrawnView(PrivateMixin, CoordinatorOnlyView, ListView):
"""
A view for coordinators to list members that have requested data removal.
"""
model = DataRequestProject
paginate_by = 100
template_name = "private_sharing/project-withdrawn-members-view.html"
def withdrawn_members(self):
"""
Returns a queryset with the members that have requested data erasure.
"""
return self.object.project_members.get_queryset().filter(revoked=True)
def get_object(self, queryset=None):
"""
Impliment get_object as a convenience funtion.
"""
slug = self.request.path.split("/")[4]
if queryset is None:
queryset = self.get_queryset()
self.object = queryset.get(slug=slug)
return self.object
class SelectDatatypesView(PrivateMixin, CoordinatorOnlyView, UpdateView):
"""
Select the datatypes for a project.
"""
form_class = SelectDatatypesForm
model = DataRequestProject
success_url = reverse_lazy("direct-sharing:manage-projects")
template_name = "private_sharing/select-datatypes.html"
def dispatch(self, *args, **kwargs):
"""
Override dispatch to redirect if project is approved
"""
self.object = self.get_object()
if self.object.approved:
django_messages.error(
self.request,
(
"Sorry, {0} has been approved and the project's datatypes cannot be changed "
"without re-approval.".format(self.object.name)
),
)
return HttpResponseRedirect(
reverse(
"direct-sharing:detail-{0}".format(self.object.type),
kwargs={"slug": self.object.slug},
)
)
return super().dispatch(*args, **kwargs)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
355,
42625,
14208,
62,
37348,
1095,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
6738,
42625,
... | 2.701416 | 4,096 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 12/8/2018 9:00 PM
# @Author : chinshin
# @FileName: parsers.py
import os
from logging import getLogger
import numpy
import pandas
import pickle
from rdkit import Chem
from tqdm import tqdm
from chainer_chemistry.dataset.parsers.base_parser import BaseFileParser
from chainer_chemistry.dataset.preprocessors.common import MolFeatureExtractionError # NOQA
from chainer_chemistry.dataset.preprocessors.mol_preprocessor import MolPreprocessor # NOQA
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset
import traceback
class CSVFileParserForPair(BaseFileParser):
"""data frame parser
This FileParser parses pandas dataframe.
It should contain column which contain SMILES as input, and
label column which is the target to predict.
Args:
preprocessor (BasePreprocessor): preprocessor instance
labels (str or list or None): labels column
smiles_cols (list): smiles columns
postprocess_label (Callable): post processing function if necessary
postprocess_fn (Callable): post processing function if necessary
logger:
"""
def parse(self, filepath, return_smiles_pair=False, return_smiles_pair_original=False, target_index=None,
return_is_successful=False):
"""parse DataFrame using `preprocessor`
Label is extracted from `labels` columns and input features are
extracted from smiles information in `smiles` column.
Args:
filepath (str): file path to be parsed.
return_smiles_pair (bool): If set to `True`, smiles list is returned in
the key 'smiles', it is a list of SMILES from which input
features are successfully made.
If set to `False`, `None` is returned in the key 'smiles'.
target_index (list or None): target index list to partially extract
dataset. If None (default), all examples are parsed.
return_is_successful (bool): If set to `True`, boolean list is
returned in the key 'is_successful'. It represents
preprocessing has succeeded or not for each SMILES.
If set to False, `None` is returned in the key 'is_success'.
Returns (dict): dictionary that contains Dataset, 1-d numpy array with
dtype=object(string) which is a vector of smiles for each example
or None.
"""
df = pandas.read_csv(filepath)
logger = self.logger
pp = self.preprocessor
smiles_pair_list = []
smiles_pair_list_original = []
is_successful_list = []
# counter = 0
if isinstance(pp, MolPreprocessor):
# No influence.
if target_index is not None:
df = df.iloc[target_index]
features = None
smiles_1_index = df.columns.get_loc(self.smiles_cols[0])
smiles_2_index = df.columns.get_loc(self.smiles_cols[1])
if self.labels is None:
labels_index = [] # dummy list
else:
labels_index = [df.columns.get_loc(c) for c in self.labels]
total_count = df.shape[0]
fail_count = 0
success_count = 0
# iteration on every row within the csv file
for row in tqdm(df.itertuples(index=False), total=df.shape[0]):
smiles_1 = row[smiles_1_index]
smiles_2 = row[smiles_2_index]
# currently it assumes list
labels = [int(row[i]) for i in labels_index]
try:
mol_1 = Chem.MolFromSmiles(smiles_1)
mol_2 = Chem.MolFromSmiles(smiles_2)
if mol_1 is None or mol_2 is None:
fail_count += 1
if return_is_successful:
is_successful_list.append(False)
continue
# Note that smiles expression is not unique.
# we obtain canonical smiles
# canonical_smiles_1, mol_1 = pp.prepare_smiles_and_mol(mol_1)
# input_features_1 = pp.get_input_features(mol_1)
# canonical_smiles_2, mol_2 = pp.prepare_smiles_and_mol(mol_2)
# input_features_2 = pp.get_input_features(mol_2)
input_features_1 = pp.get_input_features(mol_1)
input_features_2 = pp.get_input_features(mol_2)
# Extract label
if self.postprocess_label is not None:
labels = self.postprocess_label(labels)
# if return_smiles_pair:
# smiles_pair_list.append([canonical_smiles_1, canonical_smiles_2])
if return_smiles_pair:
smiles_pair_list.append([smiles_1, smiles_2])
if return_smiles_pair_original:
smiles_pair_list_original.append([smiles_1, smiles_2])
except MolFeatureExtractionError as e:
# This is expected error that extracting feature failed,
# skip this molecule.
fail_count += 1
if return_is_successful:
is_successful_list.append(False)
continue
except Exception as e:
logger.warning('parse(), type: {}, {}'
.format(type(e).__name__, e.args))
logger.info(traceback.format_exc())
fail_count += 1
if return_is_successful:
is_successful_list.append(False)
continue
# Initialize features: list of list
if features is None:
if isinstance(input_features_1, tuple):
num_features_1 = len(input_features_1)
else:
num_features_1 = 1
if isinstance(input_features_2, tuple):
num_features_2 = len(input_features_2)
else:
num_features_2 = 1
num_features = num_features_1 + num_features_2
if self.labels is not None:
num_features += 1
# list of list, a sublist corresponding to a certain feature
features = [[] for _ in range(num_features)]
# for every row in csv file
if isinstance(input_features_1, tuple):
for i in range(len(input_features_1)):
# features[i] a list containing the i-th feature
features[i].append(input_features_1[i])
else:
features[0].append(input_features_1)
offset = len(input_features_1)
if isinstance(input_features_2, tuple):
for i in range(len(input_features_2)):
features[offset + i].append(input_features_2[i])
else:
features[offset].append(input_features_2)
# last column corresponding to targeted label
if self.labels is not None:
features[len(features) - 1].append(labels)
success_count += 1
if return_is_successful:
is_successful_list.append(True)
ret = []
for feature in features:
try:
feat_array = numpy.asarray(feature)
except ValueError:
# Temporal work around.
# See,
# https://stackoverflow.com/questions/26885508/why-do-i-get-error-trying-to-cast-np-arraysome-list-valueerror-could-not-broa
feat_array = numpy.empty(len(feature), dtype=numpy.ndarray)
feat_array[:] = feature[:]
ret.append(feat_array)
result = tuple(ret)
logger.info('Preprocess finished. FAIL {}, SUCCESS {}, TOTAL {}'
.format(fail_count, success_count, total_count))
else:
raise NotImplementedError
smiles_pairs = numpy.array(smiles_pair_list) if return_smiles_pair else None
smiles_pairs_original = numpy.array(smiles_pair_list_original) if return_smiles_pair_original else None
if return_is_successful:
is_successful = numpy.array(is_successful_list)
else:
is_successful = None
if isinstance(result, tuple):
if self.postprocess_fn is not None:
result = self.postprocess_fn(*result)
dataset = NumpyTupleDataset(*result)
else:
if self.postprocess_fn is not None:
result = self.postprocess_fn(result)
dataset = NumpyTupleDataset(result)
return {"dataset": dataset,
"smiles_pair": smiles_pairs,
"smiles_pair_original": smiles_pairs_original,
"is_successful": is_successful}
def extract_total_num(self, df):
"""Extracts total number of data which can be parsed
We can use this method to determine the value fed to `target_index`
option of `parse` method. For example, if we want to extract input
feature from 10% of whole dataset, we need to know how many samples
are in a file. The returned value of this method may not to be same as
the final dataset size.
Args:
df (pandas.DataFrame): dataframe to be parsed.
Returns (int): total number of dataset can be parsed.
"""
return len(df)
class Mol2VecParserForPair(BaseFileParser):
"""data frame parser
This FileParser parses pandas dataframe.
It should contain column which contain SMILES as input, and
label column which is the target to predict.
Args:
preprocessor (BasePreprocessor): preprocessor instance
labels (str or list or None): labels column
smiles_cols (list): smiles columns
postprocess_label (Callable): post processing function if necessary
postprocess_fn (Callable): post processing function if necessary
logger:
"""
def extract_total_num(self, df):
"""Extracts total number of data which can be parsed
We can use this method to determine the value fed to `target_index`
option of `parse` method. For example, if we want to extract input
feature from 10% of whole dataset, we need to know how many samples
are in a file. The returned value of this method may not to be same as
the final dataset size.
Args:
df (pandas.DataFrame): dataframe to be parsed.
Returns (int): total number of dataset can be parsed.
"""
return len(df)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
2488,
7575,
220,
220,
220,
1058,
1105,
14,
23,
14,
7908,
860,
25,
405,
3122,
201,
198,
2,
2488,
13838,... | 2.03097 | 5,586 |
from django.contrib import messages
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
from allauth.utils import generate_unique_username, email_address_exists
from allauth.account.utils import send_email_confirmation, \
perform_login, complete_signup
from allauth.account import app_settings as account_settings
from models import SocialLogin
import app_settings
import signals
def _name_from_url(url):
"""
>>> _name_from_url('http://google.com/dir/file.ext')
u'file.ext'
>>> _name_from_url('http://google.com/dir/')
u'dir'
>>> _name_from_url('http://google.com/dir')
u'dir'
>>> _name_from_url('http://google.com/dir/..')
u'dir'
>>> _name_from_url('http://google.com/dir/../')
u'dir'
>>> _name_from_url('http://google.com')
u'google.com'
>>> _name_from_url('http://google.com/dir/subdir/file..ext')
u'file.ext'
"""
from urlparse import urlparse
p = urlparse(url)
for base in (p.path.split('/')[-1],
p.path,
p.netloc):
name = ".".join(filter(lambda s: s,
map(slugify, base.split("."))))
if name:
return name
# TODO: Factor out callable importing functionality
# See: account.utils.user_display
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
28243,
1330,
... | 2.536585 | 615 |
# Copyright (C) 2011, 2012 Lars Wirzenius
# Copyright (C) 2012 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import errno
import fcntl
import logging
import os
import select
import subprocess
import cliapp
def runcmd(argv, *args, **kwargs):
'''Run external command or pipeline.
Example: ``runcmd(['grep', 'foo'], ['wc', '-l'],
feed_stdin='foo\nbar\n')``
Return the standard output of the command.
Raise ``cliapp.AppException`` if external command returns
non-zero exit code. ``*args`` and ``**kwargs`` are passed
onto ``subprocess.Popen``.
'''
our_options = (
('ignore_fail', False),
('log_error', True),
)
opts = {}
for name, default in our_options:
opts[name] = default
if name in kwargs:
opts[name] = kwargs[name]
del kwargs[name]
exit, out, err = runcmd_unchecked(argv, *args, **kwargs)
if exit != 0:
msg = 'Command failed: %s\n%s' % (' '.join(argv), err)
if opts['ignore_fail']:
if opts['log_error']:
logging.info(msg)
else:
if opts['log_error']:
logging.error(msg)
raise cliapp.AppException(msg)
return out
def runcmd_unchecked(argv, *argvs, **kwargs):
'''Run external command or pipeline.
Return the exit code, and contents of standard output and error
of the command.
See also ``runcmd``.
'''
argvs = [argv] + list(argvs)
logging.debug('run external command: %s' % repr(argvs))
feed_stdin = pop_kwarg('feed_stdin', '')
pipe_stdin = pop_kwarg('stdin', subprocess.PIPE)
pipe_stdout = pop_kwarg('stdout', subprocess.PIPE)
pipe_stderr = pop_kwarg('stderr', subprocess.PIPE)
try:
pipeline = _build_pipeline(argvs,
pipe_stdin,
pipe_stdout,
pipe_stderr,
kwargs)
return _run_pipeline(pipeline, feed_stdin, pipe_stdin,
pipe_stdout, pipe_stderr)
except OSError, e: # pragma: no cover
if e.errno == errno.ENOENT and e.filename is None:
e.filename = argv[0]
raise e
else:
raise
def shell_quote(s):
'''Return a shell-quoted version of s.'''
lower_ascii = 'abcdefghijklmnopqrstuvwxyz'
upper_ascii = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
digits = '0123456789'
punctuation = '-_/=.,:'
safe = set(lower_ascii + upper_ascii + digits + punctuation)
quoted = []
for c in s:
if c in safe:
quoted.append(c)
elif c == "'":
quoted.append('"\'"')
else:
quoted.append("'%c'" % c)
return ''.join(quoted)
def ssh_runcmd(target, argv, **kwargs): # pragma: no cover
'''Run command in argv on remote host target.
This is similar to runcmd, but the command is run on the remote
machine. The command is given as an argv array; elements in the
array are automatically quoted so they get passed to the other
side correctly.
An optional ``tty=`` parameter can be passed to ``ssh_runcmd`` in
order to force or disable pseudo-tty allocation. This is often
required to run ``sudo`` on another machine and might be useful
in other situations as well. Supported values are ``tty=True`` for
forcing tty allocation, ``tty=False`` for disabling it and
``tty=None`` for not passing anything tty related to ssh.
With the ``tty`` option,
``cliapp.runcmd(['ssh', '-tt', 'user@host', '--', 'sudo', 'ls'])``
can be written as
``cliapp.ssh_runcmd('user@host', ['sudo', 'ls'], tty=True)``
which is more intuitive.
The target is given as-is to ssh, and may use any syntax ssh
accepts.
Environment variables may or may not be passed to the remote
machine: this is dependent on the ssh and sshd configurations.
Invoke env(1) explicitly to pass in the variables you need to
exist on the other end.
Pipelines are not supported.
'''
tty = kwargs.get('tty', None)
if tty:
ssh_cmd = ['ssh', '-tt', target, '--']
elif tty is False:
ssh_cmd = ['ssh', '-T', target, '--']
else:
ssh_cmd = ['ssh', target, '--']
if 'tty' in kwargs:
del kwargs['tty']
local_argv = ssh_cmd + map(shell_quote, argv)
return runcmd(local_argv, **kwargs)
| [
2,
15069,
357,
34,
8,
2813,
11,
2321,
220,
31239,
370,
343,
4801,
3754,
198,
2,
15069,
357,
34,
8,
2321,
220,
18720,
2788,
676,
15302,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
130... | 2.398696 | 2,147 |
# -*- coding: utf-8 -*-
| [
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198
] | 1.6875 | 16 |
import tkinter
import random
root = tkinter.Tk()
root.geometry('300x300')
root.title('Jogar Dado')
# display do dado
label = tkinter.Label(root, text='', font=('Arial', 260))
# função ativada por botão
# botão
botao = tkinter.Button(root, text='Jogar Dado', foreground='black', command=jogar_dados)
botao.pack()
# mantem a janela aberta
root.mainloop() | [
11748,
256,
74,
3849,
198,
11748,
4738,
198,
198,
15763,
796,
256,
74,
3849,
13,
51,
74,
3419,
198,
15763,
13,
469,
15748,
10786,
6200,
87,
6200,
11537,
198,
15763,
13,
7839,
10786,
41,
519,
283,
360,
4533,
11537,
198,
198,
2,
3359,... | 2.486111 | 144 |
from owlready2 import *
from util import *
# Locations of used ontologies and serialization format transforms.
bfo_url = "http://purl.obolibrary.org/obo/bfo.owl"
cco_url = "https://raw.githubusercontent.com/CommonCoreOntology/CommonCoreOntologies/master/cco-merged/MergedAllCoreOntology_v1.3.ttl"
cco_location = "cco.nt"
turtle_to_nt(cco_location, cco_location)
ogc_gml_location = "gml_32_geometries.rdf.xml" # Local copy removing dc import that raised owlready2 parsing error.
# Load the used ontologies and get the references to the namespaces required.
bfo_ont = get_ontology(bfo_url).load()
bfo = bfo_ont.get_namespace("http://purl.obolibrary.org/obo/")
cco_ont = get_ontology("cco.nt").load()
cco = cco_ont.get_namespace("http://www.ontologyrepository.com/CommonCoreOntologies/")
ogc_gml_ont = get_ontology(ogc_gml_location).load()
gml = ogc_gml_ont.get_namespace("http://www.opengis.net/ont/gml")
geosparql = ogc_gml_ont.get_namespace("http://www.opengis.net/ont/geosparql")
# The ontology defined here:
bfiao = get_ontology("https://w3id.org/bfiao")
bfiao.base_iri = "https://w3id.org/bfiao/"
#print(list(bfiao.annotation_properties()))
#with bfiao:
# class my_annotation(AnnotationProperty):
# pass
#bfiao.my_annotation = "Hello"
#print(list(bfiao.annotation_properties()))
#print(list(bfo_ont.annotation_properties()))
class Node(bfo.BFO_0000141):
"""
subclass_of: http://purl.obolibrary.org/obo/BFO_0000141 (immaterial entity)
"""
namespace = bfiao
comment = "An immaterial entity defining an area of interest in an unfolding situation."
editor_note = "Nodes are defined for a purpose of tracking typically under some form of geospatial-based \
visualization."
class geo_location(Thing >> geosparql.Geometry):
"""The (current) geospatial location of something
"""
namespace = bfiao
class defined_by(Thing >> bfo.BFO_0000040):
"""
range: http://purl.obolibrary.org/obo/BFO_0000040 (material entity)
"""
namespace = bfiao
class member_part_of(bfo.BFO_0000030 >> bfo.BFO_0000027):
"""
domain: http://purl.obolibrary.org/obo/BFO_0000030 (object)
range: http://purl.obolibrary.org/obo/BFO_0000027 (object aggregate)
"""
namespace = bfiao
comment = "this is the member part of relation as defined in BFO."
class InfrastructureAggregate(bfo.BFO_0000027):
"""
subclass_of: http://purl.obolibrary.org/obo/BFO_0000027 (object aggregate)
used in axiom:
"""
namespace = bfiao
comment = ""
# equivalent_to = [bfo.BFO_0000027 & member_part_of.all(cco.Infrastructure)] | [
6738,
39610,
1493,
17,
1330,
1635,
198,
6738,
7736,
1330,
1635,
628,
198,
2,
41277,
286,
973,
39585,
5823,
290,
11389,
1634,
5794,
31408,
13,
220,
198,
65,
6513,
62,
6371,
796,
366,
4023,
1378,
79,
6371,
13,
672,
349,
4115,
13,
2398... | 2.577299 | 1,022 |
import os
import csv
# csvpath = os.path.join('Desktop','UCFLM201907DATA2','03-Python', 'Homework','Instructions','PyPoll','Resources','election_data.csv')
csvpath = "election_data.csv"
counter = 0
candidate_Khan = 0
candidate_Correy = 0
candidate_Li = 0
candidate_Tooley = 0
list_of_candidates = []
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile,delimiter=',')
if csv.Sniffer().has_header:
next(csvreader)
for row in csvreader:
if str(row[2]) not in list_of_candidates:
list_of_candidates.append(str(row[2]))
list_of_candidates_string = ', '.join(list_of_candidates)
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile,delimiter=',')
if csv.Sniffer().has_header:
next(csvreader)
for row in csvreader:
counter += 1
candidate_votes = str(row[2])
if candidate_votes == "Khan":
candidate_Khan += 1
elif candidate_votes == "Correy":
candidate_Correy +=1
elif candidate_votes == "Li":
candidate_Li += 1
else:
candidate_Tooley += 1
percentage_Khan = round(((candidate_Khan / counter) * 100),2)
percentage_Correy = round(((candidate_Correy / counter) * 100),2)
percentage_Li = round(((candidate_Li / counter) * 100),2)
percentage_Tooley = round(((candidate_Tooley / counter) * 100),2)
print("The candidates are: " + str(list_of_candidates_string))
print("Total Votes: " + str(counter))
print("Khan: " + str(percentage_Khan) + "% " + "(" + str(candidate_Khan) + ")")
print("Correy: " + str(percentage_Correy) + "% " + "(" + str(candidate_Correy) + ")")
print("Li: " + str(percentage_Li) + "% " + "(" + str(candidate_Li) + ")")
print("O'Tooley: " + str(percentage_Tooley) + "% " + "(" + str(candidate_Tooley) + ")")
if candidate_Khan > candidate_Correy and candidate_Li and candidate_Tooley:
print("Winner: Khan")
elif candidate_Correy > candidate_Khan and candidate_Li and candidate_Tooley:
print("Winner: Correy")
elif candidate_Li > candidate_Correy and candidate_Khan and candidate_Tooley:
print("Winner: Li")
else:
print("Winner: O'Tooley")
text_file = open("election_results.txt", "w")
text_file.write("The candidates are: " + str(list_of_candidates_string) + "\n")
text_file.write("Total Votes: " + str(counter) + "\n")
text_file.write("Khan: " + str(percentage_Khan) + "% " + "(" + str(candidate_Khan) + ")" + "\n")
text_file.write("Correy: " + str(percentage_Correy) + "% " + "(" + str(candidate_Correy) + ")" + "\n")
text_file.write("Li: " + str(percentage_Li) + "% " + "(" + str(candidate_Li) + ")" + "\n")
text_file.write("O'Tooley: " + str(percentage_Tooley) + "% " + "(" + str(candidate_Tooley) + ")" + "\n")
if candidate_Khan > candidate_Correy and candidate_Li and candidate_Tooley:
text_file.write("Winner: Khan")
elif candidate_Correy > candidate_Khan and candidate_Li and candidate_Tooley:
text_file.write("Winner: Correy")
elif candidate_Li > candidate_Correy and candidate_Khan and candidate_Tooley:
text_file.write("Winner: Li")
else:
text_file.write("Winner: O'Tooley")
| [
11748,
28686,
201,
198,
201,
198,
11748,
269,
21370,
201,
198,
201,
198,
2,
269,
21370,
6978,
796,
28686,
13,
6978,
13,
22179,
10786,
36881,
41707,
9598,
3697,
44,
23344,
2998,
26947,
17,
41707,
3070,
12,
37906,
3256,
705,
28718,
6433,
... | 2.307958 | 1,445 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 14 10:57:42 2021
@author: liang
"""
import pandas as pd
import requests
if __name__ == "__main__":
import redis
pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)
redis_curse = redis.Redis(connection_pool=pool)
sample = {"user_list":['60b4cdc47789d200098dc87d', '60b4cc777789d200098dc879'], "item_list":["BTC","TRX","TUSD","BTT","DAI","BNB","TEL","SNX","DCR","LTC","DOT","SNT","XLM","DEGO","ETC","ETH","KNC","VET","DRGN","SOLVE","ROOBEE","AION","CRPT","RBTC","AERGO","TRTL","IOTX","OPEN","VIDT","TOKO","KICK","UTK","KAT" ]}
itemid_list = sample['item_list']
item_df = get_data_redis(redis_curse, itemid_list, 'item_id')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
1737,
1478,
838,
25,
3553,
25,
3682,
33448,
198,
198,
31,
9800,
25,
7649,
648,
198... | 2.153005 | 366 |
import re
import tempfile
import pandas as pd
import camelot
import pandas as pd
import requests
import us
import textract
from can_tools.scrapers.official.base import StateDashboard
from can_tools.scrapers import variables, CMU
from typing import Any, Dict
| [
11748,
302,
198,
11748,
20218,
7753,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
41021,
313,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7007,
198,
11748,
514,
198,
11748,
2420,
974,
198,
198,
6738,
460,
62,
31391,
13,
141... | 3.434211 | 76 |
import datetime
import logging
from .state_ui import UI, State
import dateutil.parser
import blessings
## ========================================================================
## ========================================================================
## ========================================================================
if __name__ == "__main__":
term = blessings.Terminal()
ui = UI( 5.0 )
ui.add_state_listener(lambda s: _show_state(s,term) )
ui.start()
| [
11748,
4818,
8079,
198,
11748,
18931,
198,
198,
6738,
764,
5219,
62,
9019,
1330,
12454,
11,
1812,
198,
198,
11748,
3128,
22602,
13,
48610,
198,
11748,
28388,
198,
198,
2235,
38093,
1421,
18604,
198,
198,
2235,
38093,
1421,
18604,
198,
2... | 3.742647 | 136 |
import torch
from .linear_operator import aslinearoperator, TorchLinearOperator
| [
11748,
28034,
198,
198,
6738,
764,
29127,
62,
46616,
1330,
355,
29127,
46616,
11,
34868,
14993,
451,
18843,
1352,
628
] | 4.1 | 20 |
import numpy as np
"""
Here is a simple optimisation solver for the market clearing algorithm.
We minimise the squared of aggregate excess demand, under the constraint of
a Limit-Up-Limit-Down (LU-LD) circuit breaker, which limits prices to
[LD*p(t-1), LU*p(t-1)], with LD=1/2 and LU = 2
The solver uses the LEAP package: https://github.com/AureumChaos/LEAP
It is a package focusing on evolutionary algorithms, that has an amazing
ea_solve function that ... solves a real valued function.
"""
from leap_ec import Individual, Representation
from leap_ec import ops, probe
from leap_ec.algorithm import generational_ea
from leap_ec.problem import FunctionProblem
from leap_ec.real_rep import create_real_vector
from leap_ec.real_rep.ops import mutate_gaussian
"""Provides a simple, top-level interfact that optimizes a real-valued
function using a simple generational EA.
:param function: the function to optimize; should take lists of real
numbers as input and return a float fitness value
:param [(float, float)] bounds: a list of (min, max) bounds to define the
search space
:param int generations: the number of generations to run for
:param int pop_size: the population size
:param float mutation_std: the width of the mutation distribution
:param bool maximize: whether to maximize the function (else minimize)
:param bool viz: whether to display a live best-of-generation plot
:param bool hard_bounds: if True, bounds are enforced at all times during
evolution; otherwise they are only used to initialize the population. """
""" Example """
best_genome = ea_solve_noverbose(
f,
bounds=[(1, 10)],
generations=50,
pop_size=500,
mutation_std=0.1,
hard_bounds=True,
maximize=False,
)
print(best_genome)
| [
11748,
299,
32152,
355,
45941,
198,
198,
37811,
198,
4342,
318,
257,
2829,
6436,
5612,
1540,
332,
329,
262,
1910,
17304,
11862,
13,
220,
198,
1135,
10356,
786,
262,
44345,
286,
19406,
6992,
3512,
11,
739,
262,
32315,
286,
198,
64,
272... | 3.432485 | 511 |
import argparse
import math
import random
import os
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from tqdm import tqdm
# try:
# import wandb
#
# except ImportError:
# wandb = None
from model import Generator, Code2Style
from dataset import MultiResolutionDataset
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
from non_leaking import augment
from perceptual import LPIPF
# def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
# noise = torch.randn_like(fake_img) / math.sqrt(
# fake_img.shape[2] * fake_img.shape[3]
# )
# grad, = autograd.grad(
# outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
# )
# path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
#
# path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
#
# path_penalty = (path_lengths - path_mean).pow(2).mean()
#
# return path_penalty, path_mean.detach(), path_lengths
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="StyleGAN2 trainer")
parser.add_argument("path", type=str, help="path to the lmdb dataset")
parser.add_argument(
"--iter", type=int, default=800000, help="total training iterations"
)
parser.add_argument(
"--batch", type=int, default=16, help="batch sizes for each gpus"
)
parser.add_argument(
"--n_sample",
type=int,
default=64,
help="number of the samples generated during training",
)
parser.add_argument(
"--size", type=int, default=256, help="image sizes for the model"
)
parser.add_argument(
"--r1", type=float, default=10, help="weight of the r1 regularization"
)
# parser.add_argument(
# "--path_regularize",
# type=float,
# default=2,
# help="weight of the path length regularization",
# )
# parser.add_argument(
# "--path_batch_shrink",
# type=int,
# default=2,
# help="batch size reducing factor for the path length regularization (reduce memory consumption)",
# )
parser.add_argument(
"--d_reg_every",
type=int,
default=16,
help="interval of the applying r1 regularization",
)
# parser.add_argument(
# "--g_reg_every",
# type=int,
# default=4,
# help="interval of the applying path length regularization",
# )
parser.add_argument(
"--mixing", type=float, default=0., help="probability of latent code mixing"
)
parser.add_argument(
"--ckpt",
type=str,
default=None,
help="path to the checkpoints to resume training",
)
parser.add_argument("--lr", type=float, default=0.005, help="learning rate")
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help="channel multiplier factor for the model. config-f = 2, else = 1",
)
# parser.add_argument(
# "--wandb", action="store_true", help="use weights and biases logging"
# )
parser.add_argument(
"--local_rank", type=int, default=0, help="local rank for distributed training"
)
parser.add_argument(
"--augment", action="store_true", help="apply non leaking augmentation"
)
parser.add_argument(
"--augment_p",
type=float,
default=0,
help="probability of applying augmentation. 0 = use adaptive augmentation",
)
parser.add_argument(
"--ada_target",
type=float,
default=0.6,
help="target augmentation probability for adaptive augmentation",
)
parser.add_argument(
"--ada_length",
type=int,
default=500 * 1000,
help="target duraing to reach augmentation probability for adaptive augmentation",
)
parser.add_argument(
"--ada_every",
type=int,
default=256,
help="probability update interval of the adaptive augmentation",
)
args = parser.parse_args()
n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = n_gpu > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
args.latent = 512
args.n_mlp = 8
args.start_iter = 0
generator = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
pf = LPIPF(layer_res=[8, 4, 2, 0, 0]).to(device)
code_dim = 7168
code2style = Code2Style(args.size, code_dim, args.latent, args.n_mlp).to(device)
# discriminator = Discriminator(
# args.size, channel_multiplier=args.channel_multiplier
# ).to(device)
# g_ema = Generator(
# args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
# ).to(device)
# g_ema.eval()
# accumulate(g_ema, generator, 0)
# g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
g_reg_ratio = 1.0
# d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)
c2s_optim = optim.Adam(
code2style.parameters(),
lr=args.lr,
betas=(0, 0.99),
)
# g_optim = optim.Adam(
# generator.parameters(),
# lr=args.lr * g_reg_ratio,
# betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),
# )
# d_optim = optim.Adam(
# discriminator.parameters(),
# lr=args.lr * d_reg_ratio,
# betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
# )
if args.ckpt is not None:
print("load model:", args.ckpt)
ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)
try:
ckpt_name = os.path.basename(args.ckpt)
args.start_iter = int(os.path.splitext(ckpt_name)[0])
except ValueError:
pass
generator.load_state_dict(ckpt["g"], strict=False)
# discriminator.load_state_dict(ckpt["d"])
# g_ema.load_state_dict(ckpt["g_ema"])
# g_optim.load_state_dict(ckpt["g_optim"])
# d_optim.load_state_dict(ckpt["d_optim"])
if args.distributed:
generator = nn.parallel.DistributedDataParallel(
generator,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
code2style = nn.parallel.DistributedDataParallel(
code2style,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
# discriminator = nn.parallel.DistributedDataParallel(
# discriminator,
# device_ids=[args.local_rank],
# output_device=args.local_rank,
# broadcast_buffers=False,
# )
transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# TODO: Check normalization and LPIPF
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
]
)
dataset = MultiResolutionDataset(args.path, transform, args.size)
loader = data.DataLoader(
dataset,
batch_size=args.batch,
sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),
drop_last=True,
)
# if get_rank() == 0 and wandb is not None and args.wandb:
# wandb.init(project="stylegan 2")
train(args, loader, generator, code2style, c2s_optim, device, pf)
| [
11748,
1822,
29572,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
11,
1960,
519,
6335,
11,
6436,
198,
6738,
28034,
13,
20471,
1330,
1... | 2.259793 | 3,472 |
# -*- coding: utf-8 -*-
import decimal
import os
from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from django.core.urlresolvers import reverse
#from django.db import connection, reset_queries
from django.test import TestCase, Client
from ...util.tests import ViewsTestCase
from ..app import MagicProductApp
from ..forms import FormRegistry, variant_form_for_product
from . import (Parrot, ParrotVariant, DeadParrot, Cheese,
DeadParrotVariant, ZombieParrot, DeadParrotVariantForm)
__all__ = ['Models', 'Registry', 'Views', 'product_app']
product_app = TestProductApp()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
32465,
198,
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12286,
82,
1330,
7572,
11,
22... | 3.082126 | 207 |
"""Software simulated motor
Author: Friedrich Schotte
Date created: 2015-11-03
Date last modified: 2019-05-26
"""
__version__ = "1.2" # sim_EPICS_motor: readback
class sim_EPICS_motor(sim_motor):
"""Simulated EPICS motor"""
from persistent_property import persistent_property
__prefix__ = persistent_property("prefix","SIM:MOTOR")
__EPICS_enabled__ = persistent_property("EPICS_enabled",True)
def __init__(self,prefix="SIM:MOTOR",name="sim_motor",
description="simulated motor",unit=None,readback=None):
"""prefix: default name of motor record
name: mnemonic name
readback: PV name for readback value (RBV)
"""
sim_motor.__init__(self,prefix)
self.name = name
if self.__prefix__ == "SIM:MOTOR": self.__prefix__ = prefix
if self.description == "simulated motor": self.description = description
if unit is not None and self.unit == "mm": self.unit = unit
self.readback = readback
self.EPICS_enabled = self.EPICS_enabled
prefix = property(get_prefix,set_prefix)
EPICS_enabled = property(get_EPICS_enabled,set_EPICS_enabled)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG,format="%(asctime): %(message)s")
motor = sim_EPICS_motor
Slit1H = motor("14IDA:Slit1Hsize",name="Slit1H",description="White beam slits H gap",
readback="14IDA:Slit1Ht2.C")
from CA import caget,caput
print('Slit1H.prefix = %r' % Slit1H.prefix)
print('Slit1H.readback = %r' % Slit1H.readback)
print('Slit1H.EPICS_enabled = %r' % Slit1H.EPICS_enabled)
print('caget(Slit1H.prefix+".VAL")')
print('caget(Slit1H.readback)')
print('caput(Slit1H.readback,Slit1H.VAL)')
print('Slit1H.value += 0.001')
print('Slit1H.value')
| [
37811,
25423,
28590,
5584,
198,
13838,
25,
46099,
3059,
11404,
198,
10430,
2727,
25,
1853,
12,
1157,
12,
3070,
198,
10430,
938,
9518,
25,
13130,
12,
2713,
12,
2075,
198,
37811,
198,
834,
9641,
834,
796,
366,
16,
13,
17,
1,
1303,
985... | 2.434724 | 743 |
# -*- coding: utf-8 -*-
'''
otsu.fun - Website Api Initial
@version: 0.9
@author: PurePeace
@time: 2019-12-10
@describe: initial some config.
'''
from flask import Flask
from flask_cors import CORS
# initial(s)
app = Flask('engine')
app.config.update(RESTFUL_JSON=dict(ensure_ascii=False))
CORS(app)
# run? no.
if __name__ == '__main__':
print('only initial, so it dosent work!!')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
7061,
6,
201,
198,
1747,
84,
13,
12543,
532,
15887,
5949,
72,
20768,
201,
198,
201,
198,
31,
9641,
25,
657,
13,
24,
201,
198,
31,
9800,
25,
17129,
43445,
201,
... | 2.271739 | 184 |
from pymongo import MongoClient
import time
start = time.time()
# connect to MongoDB
client = MongoClient('') # Añadir URL a cluster de mongo db
# select database
db=client['tfm']
# select collection
collection_players=db['players']
collection_plus_minus=db['pairs_plus_minus']
numberOfClusters = 4
""" collection_players.update_many({}, { '$set': { \
'Shared Time PG Cluster 0': 0, 'Plus/Minus PG Cluster 0': 0, \
'Shared Time PG Cluster 1': 0, 'Plus/Minus PG Cluster 1': 0, \
'Shared Time PG Cluster 2': 0, 'Plus/Minus PG Cluster 2': 0, \
'Shared Time PG Cluster 3': 0, 'Plus/Minus PG Cluster 3': 0, \
'Shared Time SG Cluster 0': 0, 'Plus/Minus SG Cluster 0': 0, \
'Shared Time SG Cluster 1': 0, 'Plus/Minus SG Cluster 1': 0, \
'Shared Time SG Cluster 2': 0, 'Plus/Minus SG Cluster 2': 0, \
'Shared Time SG Cluster 3': 0, 'Plus/Minus SG Cluster 3': 0, \
'Shared Time SF Cluster 0': 0, 'Plus/Minus SF Cluster 0': 0, \
'Shared Time SF Cluster 1': 0, 'Plus/Minus SF Cluster 1': 0, \
'Shared Time SF Cluster 2': 0, 'Plus/Minus SF Cluster 2': 0, \
'Shared Time SF Cluster 3': 0, 'Plus/Minus SF Cluster 3': 0, \
'Shared Time PF Cluster 0': 0, 'Plus/Minus PF Cluster 0': 0, \
'Shared Time PF Cluster 1': 0, 'Plus/Minus PF Cluster 1': 0, \
'Shared Time PF Cluster 2': 0, 'Plus/Minus PF Cluster 2': 0, \
'Shared Time PF Cluster 3': 0, 'Plus/Minus PF Cluster 3': 0, \
'Shared Time C Cluster 0': 0, 'Plus/Minus C Cluster 0': 0, \
'Shared Time C Cluster 1': 0, 'Plus/Minus C Cluster 1': 0, \
'Shared Time C Cluster 2': 0, 'Plus/Minus C Cluster 2': 0, \
'Shared Time C Cluster 3': 0, 'Plus/Minus C Cluster 3': 0, \
} }) """
for pareja in collection_plus_minus.find():
playerAdocs = collection_players.find({'BasketballReference Player Id': pareja['PlayerA']})
playerBdocs = collection_players.find({'BasketballReference Player Id': pareja['PlayerB']})
for playerAdoc in playerAdocs:
for playerBdoc in playerBdocs:
for i in range(numberOfClusters):
playerA_position = playerAdoc['Position']
playerB_position = playerBdoc['Position']
collection_players.update_one({ '$and': [ \
{ 'BasketballReference Player Id': playerAdoc['BasketballReference Player Id'] },
{ 'Position': playerAdoc['Position'] } \
]}, \
{ '$inc': {\
'Shared Time ' + playerB_position + ' Cluster ' + str(i): pareja['Shared Time'], \
'Plus/Minus ' + playerB_position + ' Cluster ' + str(i): pareja['Plus/Minus'] * playerBdoc['Pertenencia Cluster ' + str(i)] \
}})
collection_players.update_one({ '$and': [ \
{ 'BasketballReference Player Id': playerBdoc['BasketballReference Player Id'] },
{ 'Position': playerBdoc['Position'] } \
]}, \
{ '$inc': {\
'Shared Time ' + playerA_position + ' Cluster ' + str(i): pareja['Shared Time'], \
'Plus/Minus ' + playerA_position + ' Cluster ' + str(i): pareja['Plus/Minus'] * playerAdoc['Pertenencia Cluster ' + str(i)] \
}})
playerBdocs.rewind() # rebobinamos el cursor al principio
for player in collection_players.find():
for position in ['PG','SG','SF','PF','C']:
try:
compatibilidadCluster0 = player['Plus/Minus ' + position + ' Cluster ' + str(0)] / player['Shared Time ' + position + ' Cluster ' + str(0)]
except ZeroDivisionError:
compatibilidadCluster0 = 0
try:
compatibilidadCluster1 = player['Plus/Minus ' + position + ' Cluster ' + str(1)] / player['Shared Time ' + position + ' Cluster ' + str(1)]
except ZeroDivisionError:
compatibilidadCluster1 = 0
try:
compatibilidadCluster2 = player['Plus/Minus ' + position + ' Cluster ' + str(2)] / player['Shared Time ' + position + ' Cluster ' + str(2)]
except ZeroDivisionError:
compatibilidadCluster2 = 0
try:
compatibilidadCluster3 = player['Plus/Minus ' + position + ' Cluster ' + str(3)] / player['Shared Time ' + position + ' Cluster ' + str(3)]
except ZeroDivisionError:
compatibilidadCluster3 = 0
compatibilidadMinima = min([compatibilidadCluster0, compatibilidadCluster1, compatibilidadCluster2, compatibilidadCluster3])
compatibilidadMaxima = max([compatibilidadCluster0, compatibilidadCluster1, compatibilidadCluster2, compatibilidadCluster3])
# normalizamos la compatibilidad al rango entre 0 y 1
try:
compatibilidadNormalizadaCluster0 = (compatibilidadCluster0 - compatibilidadMinima) / (compatibilidadMaxima - compatibilidadMinima)
except ZeroDivisionError:
compatibilidadNormalizadaCluster0 = 0
try:
compatibilidadNormalizadaCluster1 = (compatibilidadCluster1 - compatibilidadMinima) / (compatibilidadMaxima - compatibilidadMinima)
except ZeroDivisionError:
compatibilidadNormalizadaCluster1 = 0
try:
compatibilidadNormalizadaCluster2 = (compatibilidadCluster2 - compatibilidadMinima) / (compatibilidadMaxima - compatibilidadMinima)
except ZeroDivisionError:
compatibilidadNormalizadaCluster2 = 0
try:
compatibilidadNormalizadaCluster3 = (compatibilidadCluster3 - compatibilidadMinima) / (compatibilidadMaxima - compatibilidadMinima)
except ZeroDivisionError:
compatibilidadNormalizadaCluster3 = 0
collection_players.update_one({ '$and': [ \
{ 'BasketballReference Player Id': player['BasketballReference Player Id'] },
{ 'Position': player['Position'] } \
]}, \
{ '$set': {\
'Compatibilidad ' + position + ' Cluster ' + str(0): compatibilidadNormalizadaCluster0, \
'Compatibilidad ' + position + ' Cluster ' + str(1): compatibilidadNormalizadaCluster1, \
'Compatibilidad ' + position + ' Cluster ' + str(2): compatibilidadNormalizadaCluster2, \
'Compatibilidad ' + position + ' Cluster ' + str(3): compatibilidadNormalizadaCluster3, \
}})
end = time.time()
print('Tiempo de ejecución: ' + str(round(end-start,2)) + ' segundos') | [
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
11748,
640,
198,
198,
9688,
796,
640,
13,
2435,
3419,
198,
198,
2,
2018,
284,
42591,
11012,
198,
16366,
796,
42591,
11792,
7,
7061,
8,
1303,
317,
12654,
324,
343,
10289,
257,
13946,
390... | 2.26776 | 2,928 |
#! /usr/bin/env python
# coding=utf-8
"""Copy this skeleton to start new tool
"""
from __future__ import print_function, division
# GLOBAL IMPORTS HERE
import __commons as commons
def __do(arg):
"""Main routine, common for command line, and python scripts call"""
# refer to args: arg.foo
# IMPLEMENT HERE
return arg.bar + arg.foo
# Below: standard skeleton for astwro.tools
# CUSTOMIZE:
# 1. customize postional - obligatory arguments
# 2. decide whether to print results (__do return)
def main(positional1, positional2, **kwargs):
"""Entry point for python script calls. Parameters identical to command line"""
# Extract default arguments from command line parser and apply kwargs parameters
args = commons.bunch_kwargs(__arg_parser(), positional=[positional1, positional2], **kwargs)
# call main routine - common form command line and python calls
return __do(args)
if __name__ == '__main__':
code = commandline_entry()
exit(code) | [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
37811,
29881,
428,
18328,
284,
923,
649,
2891,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
2,
10188,
9864,
1847... | 3.270627 | 303 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import boto.sns
from alfajor import aws_sns
import uuid
sns = aws_sns.SNS()
print sns
#exit()
message = "test"
subject = "test " + uuid.uuid4().urn[-12:]
print sns.send_message(message, subject)
#sns.send_message(message, subject, arn)
print sns.get_topics()
sns.show_topics()
| [
11748,
275,
2069,
13,
82,
5907,
198,
6738,
435,
69,
1518,
1330,
3253,
82,
62,
82,
5907,
198,
11748,
334,
27112,
198,
198,
82,
5907,
796,
3253,
82,
62,
82,
5907,
13,
50,
8035,
3419,
198,
4798,
3013,
82,
198,
2,
37023,
3419,
198,
... | 2.330579 | 121 |
"""Test cases for render."""
import dataclasses
import io
import json
import os
import pathlib
import re
import sys
import textwrap
from pathlib import Path
from typing import (
Any,
Callable,
ContextManager,
Dict,
Generator,
Optional,
Protocol,
Union,
)
from unittest.mock import Mock
import httpx
import nbformat
import pytest
from _pytest.config import _PluggyPlugin
from nbformat import NotebookNode
from pytest_mock import MockerFixture
from rich import console
from nbpreview import notebook
from nbpreview.component.content.output.result.drawing import ImageDrawing
SKIP_TERMINEDIA_REASON = (
"terminedia is used to draw the images using block characters, and"
" is not importable on some systems due to a dependency on fcntl."
)
class RichOutput(Protocol):
"""Typing protocol for _rich_notebook_output."""
def __call__(
self,
cell: Union[Dict[str, Any], None],
plain: bool = False,
theme: str = "material",
no_wrap: bool = False,
unicode: Optional[bool] = None,
hide_output: bool = False,
nerd_font: bool = False,
files: bool = True,
negative_space: bool = True,
hyperlinks: bool = True,
hide_hyperlink_hints: bool = False,
images: Optional[bool] = None,
image_drawing: Optional[ImageDrawing] = None,
color: Optional[bool] = None,
relative_dir: Optional[Path] = None,
line_numbers: bool = False,
code_wrap: bool = False,
) -> str: # pragma: no cover
"""Callable types."""
...
@pytest.fixture
def adjust_for_fallback() -> Callable[[str, int], str]:
"""Fixture to automatically adjust expected outputs for fallback."""
def _adjust_for_fallback(rendered_output: str, newlines: int) -> str:
"""Add fallback text to end of output if import succeeds."""
fallback_text = newlines * f"{' ':>80}\n" + (
" \x1b[38;2;187;134"
";252mImage "
" \x1b"
"[0m\n"
)
adjusted_output = rendered_output + fallback_text
return adjusted_output
return _adjust_for_fallback
@dataclasses.dataclass
class LinkFilePathNotFoundError(Exception):
"""No hyperlink filepath found in output."""
def __post_init__(
self,
) -> None: # pragma: no cover
"""Constructor."""
super().__init__("No hyperlink filepath found in output")
@pytest.fixture
def parse_link_filepath() -> Callable[[str], Path]:
"""Return a helper function for parsing filepaths from links."""
def _parse_link_filepath(output: str) -> Path:
"""Extract the filepaths of hyperlinks in outputs."""
path_re = re.compile(r"(?:file://)(.+)(?:\x1b\\\x1b)")
link_filepath_match = re.search(path_re, output)
if link_filepath_match is not None:
link_filepath = link_filepath_match.group(1)
return pathlib.Path(link_filepath)
else: # pragma: no cover
raise LinkFilePathNotFoundError()
return _parse_link_filepath
@pytest.fixture
def rich_notebook_output(
rich_console: Callable[[Any, Union[bool, None]], str],
make_notebook: Callable[[Optional[Dict[str, Any]]], NotebookNode],
) -> RichOutput:
"""Fixture returning a function that returns the rendered output.
Args:
rich_console (Callable[[Any, Union[bool, None]], str]): Pytest
fixture that returns a rich console.
make_notebook (Callable[[Optional[Dict[str, Any]]], NotebookNode]):
A fixture that creates a notebook node.
Returns:
RichOutput: The output generating function.
"""
def _rich_notebook_output(
cell: Union[Dict[str, Any], None],
plain: Optional[bool] = None,
theme: str = "material",
no_wrap: Optional[bool] = None,
unicode: Optional[bool] = None,
hide_output: bool = False,
nerd_font: bool = False,
files: bool = True,
negative_space: bool = True,
hyperlinks: bool = True,
hide_hyperlink_hints: bool = False,
images: Optional[bool] = None,
image_drawing: Optional[Union[ImageDrawing, None]] = None,
color: Optional[bool] = None,
relative_dir: Optional[Path] = None,
line_numbers: bool = False,
code_wrap: bool = False,
) -> str:
"""Render the notebook containing the cell."""
notebook_node = make_notebook(cell)
rendered_notebook = notebook.Notebook(
notebook_node,
theme=theme,
plain=plain,
unicode=unicode,
hide_output=hide_output,
nerd_font=nerd_font,
files=files,
hyperlinks=hyperlinks,
hide_hyperlink_hints=hide_hyperlink_hints,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
relative_dir=relative_dir,
line_numbers=line_numbers,
code_wrap=code_wrap,
)
output = rich_console(rendered_notebook, no_wrap)
return output
return _rich_notebook_output
def test_automatic_plain(
make_notebook: Callable[[Optional[Dict[str, Any]]], NotebookNode]
) -> None:
"""It automatically renders in plain format when not a terminal."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "%%bash\necho 'lorep'",
}
output_file = io.StringIO()
con = console.Console(
file=output_file,
width=80,
color_system="truecolor",
legacy_windows=False,
force_terminal=False,
)
notebook_node = make_notebook(code_cell)
rendered_notebook = notebook.Notebook(notebook_node, theme="material")
con.print(rendered_notebook)
output = output_file.getvalue()
expected_output = (
"\x1b[38;2;137;221;255;49m%%\x1b[0m\x1b[38;2;187;1"
"28;179;49mbash\x1b[0m "
" "
" \n\x1b[38;2;130;170;255;49mecho\x1b"
"[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;195"
";232;141;49m'lorep'\x1b[0m "
" "
" \n"
)
assert output == expected_output
def test_notebook_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "### Lorep ipsum\n\n**dolor** _sit_ `amet`",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" "
" "
"\n \x1b[1;38;5;37m### \x1b[0m\x1b[1;38;5;37mLorep"
" ipsum\x1b[0m\x1b[1;38;5;37m "
" "
" \x1b[0m\n "
" "
" \n \x1b[1mdolor\x1b[0m \x1b[3msit\x1b[0m \x1b"
"[97;40mamet\x1b[0m "
" \n"
)
assert output == expected_output
def test_notebook_latex_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with latex equations."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "### Lorep ipsum\nLorep ipsum doret $\\gamma$ su\n"
"\n\n$$\ny = \\alpha + \\beta x\n$$\n\nsu ro\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" "
" "
"\n \x1b[1;38;5;37m### \x1b[0m\x1b[1;38;5;37mLorep"
" ipsum\x1b[0m\x1b[1;38;5;37m "
" "
" \x1b[0m\n "
" "
" \n Lorep ipsum doret $\\gamma$ "
"su "
" \n "
" "
" \n y = α+ βx "
" "
" \n "
" "
" \n su ro "
" "
" \n"
)
assert output == expected_output
def test_notebook_latex_and_table_markdown_cell(
rich_notebook_output: RichOutput,
) -> None:
"""It renders a markdown cell with latex equations and tables."""
source = textwrap.dedent(
"""\
# Lorep ipsum
Hey
| a | b | c |
| --- | --- | --- |
| 1 | 2 | 3 |
$$
X \\sim \\mathcal{N}(\\mu,\\,\\sigma^{2})\
$$
Hear
| a | b | c |
| --- | --- | --- |
| 1 | 2 | 3 |
Ehse
$$
rmse = \\sqrt{(\frac{1}{n})\\sum_{i=1}^{n}(y_{i} - x_{i})^{2}}
$$
Fin
"""
)
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": source,
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[1;38;5;231;48;5;57m \x1b[0m\x1b[1;38;5;231"
";48;5;57mLorep ipsum\x1b[0m\x1b[1;38;5;231;48;"
"5;57m \x1b[0m\x1b[1;38;5;231;48;5;57m "
" "
" \x1b[0m\n \x1b[2;38;5;57m─────"
"────────────────────────────────────────"
"─────────────────────────────────\x1b[0m\n "
" "
" \n "
" Hey "
" \n"
" "
" "
"\n \x1b[1ma\x1b[0m \x1b["
"1mb\x1b[0m \x1b[1mc\x1b["
"0m \n ────────────"
"────────────────────────────────────────"
"──────────────────────────\n 1 "
" 2 "
" 3 \n "
" "
" \n "
" "
" \n X ∼𝒩(μ, "
"σ^2) "
" \n "
" "
" \n Hear "
" "
" \n "
" "
" \n \x1b[1"
"ma\x1b[0m \x1b[1mb\x1b[0m"
" \x1b[1mc\x1b[0m "
" \n ───────────────────"
"────────────────────────────────────────"
"───────────────────\n 1 "
" 2 3 "
" \n "
" "
" \n Ehse "
" "
" \n "
" "
" \n rmse = √(( rac"
"1n)∑_i=1^n(y_i - x_i)^2) "
" \n "
" "
" \n Fin "
" "
" \n"
)
assert output == expected_output
def test_image_link_markdown_cell_request_error(
rich_notebook_output: RichOutput,
mocker: MockerFixture,
remove_link_ids: Callable[[str], str],
) -> None:
"""It falls back to rendering a message if RequestError occurs."""
mock = mocker.patch("httpx.get", side_effect=httpx.RequestError("Mock"))
mock.return_value.content = (
pathlib.Path(__file__).parent
/ pathlib.Path("assets", "outline_article_white_48dp.png")
).read_bytes()
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
output = rich_notebook_output(markdown_cell, image_drawing="braille")
expected_output = (
" \x1b]8;id=724062;https://github.com/paw-l"
"u/nbpreview/tests/assets/outline_article_white_48dp.png"
"\x1b\\\x1b[94m🌐 Click "
"to view Azores\x1b[0m\x1b]8;;\x1b\\ "
" "
"\n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_image_link_markdown_cell(
rich_notebook_output: RichOutput,
mocker: MockerFixture,
remove_link_ids: Callable[[str], str],
expected_output: str,
) -> None:
"""It renders a markdown cell with an image."""
mock = mocker.patch("httpx.get")
mock.return_value.content = (
pathlib.Path(__file__).parent
/ pathlib.Path("assets", "outline_article_white_48dp.png")
).read_bytes()
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
output = rich_notebook_output(markdown_cell, image_drawing="character")
assert remove_link_ids(output) == expected_output
def test_image_markdown_cell(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
expected_output: str,
) -> None:
"""It renders a markdown cell with an image."""
image_path = os.fsdecode(
pathlib.Path(__file__).parent
/ pathlib.Path("assets", "outline_article_white_48dp.png")
)
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": f"",
}
output = rich_notebook_output(markdown_cell, image_drawing="braille")
assert remove_link_ids(output) == expected_output
def test_image_markdown_cell_no_drawing(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a markdown cell with an image and skips drawing."""
image_path = os.fsdecode(
pathlib.Path(__file__).parent
/ pathlib.Path("assets", "outline_article_white_48dp.png")
)
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": f"",
}
output = rich_notebook_output(markdown_cell, image_drawing="braille", images=False)
expected_output = (
f" \x1b]8;id=378979;file://{image_path}\x1b\\\x1b[94m"
"🖼 Click to view Azores\x1b[0m\x1b]8;;\x1b\\ "
" "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_code_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with code."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "```python\nfor i in range(20):\n print(i)\n```",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;1"
"87;128;179;49mfor\x1b[0m\x1b[38;2;238;255;255;"
"49m \x1b[0m\x1b[38;2;238;255;255;49mi\x1b[0m\x1b[38;"
"2;238;255;255;49m \x1b[0m\x1b[3;38;2;137;221;2"
"55;49min\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mrange\x1b[0m\x1b[38;2;137"
";221;255;49m(\x1b[0m\x1b[38;2;247;140;108;49m2"
"0\x1b[0m\x1b[38;2;137;221;255;49m)\x1b[0m\x1b[38;2;1"
"37;221;255;49m:\x1b[0m "
" \n \x1b[3"
"8;2;238;255;255;49m \x1b[0m\x1b[38;2;13"
"0;170;255;49mprint\x1b[0m\x1b[38;2;137;221;255"
";49m(\x1b[0m\x1b[38;2;238;255;255;49mi\x1b[0m\x1b[38"
";2;137;221;255;49m)\x1b[0m "
" "
" \n"
)
assert output == expected_output
def test_table_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with tables."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": """# Hey buddy
*did you hear the news?*
```python
for i in range(20):
print(i)
```
| aaa | bbbb **ccc** |
| --- | --- |
| 111 **222** 333 | 222 |
| susu | lulu|
- so there you are
- words
| ddd | `eeee` fff |
| --- | --- |
| | |
--- | ---
sus | *spect*
rak
""",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[1;38;5;231;48;5;57m \x1b[0m\x1b[1;38;5;231"
";48;5;57mHey buddy\x1b[0m\x1b[1;38;5;231;48;5;"
"57m \x1b[0m\x1b[1;38;5;231;48;5;57m "
" "
" \x1b[0m\n \x1b[2;38;5;57m─────"
"────────────────────────────────────────"
"─────────────────────────────────\x1b[0m\n "
" "
" \n "
" \x1b[3mdid you hear the news?\x1b[0m "
" "
" \n "
" "
" \n \x1b[38;2;238;255;255;49m \x1b[0"
"m\x1b[38;2;187;128;179;49mfor\x1b[0m\x1b[38;2;238"
";255;255;49m \x1b[0m\x1b[38;2;238;255;255;49mi"
"\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[3;38;2;"
"137;221;255;49min\x1b[0m\x1b[38;2;238;255;255;"
"49m \x1b[0m\x1b[38;2;130;170;255;49mrange\x1b[0m\x1b"
"[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;247;140"
";108;49m20\x1b[0m\x1b[38;2;137;221;255;49m)\x1b[0"
"m\x1b[38;2;137;221;255;49m:\x1b[0m "
" "
" \n \x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mprint\x1b[0m\x1b[38;2;137"
";221;255;49m(\x1b[0m\x1b[38;2;238;255;255;49mi"
"\x1b[0m\x1b[38;2;137;221;255;49m)\x1b[0m "
" "
" \n "
" "
" \n \x1b[1maaa\x1b[0m "
" \x1b[1mbbbb \x1b[0m"
"\x1b[1mccc\x1b[0m "
"\n ─────────────────────────────────────"
"────────────────────────────────────────"
"─\n 111 \x1b[1m222\x1b[0m 333 "
" 222 "
" \n susu "
" lulu "
" \n "
" "
" \n "
" "
" \n • so there you are "
" "
" \n • words "
" "
" \n "
" "
" \n \x1b[1mddd\x1b[0m "
" \x1b[1;97;40mee"
"ee\x1b[0m\x1b[1m fff\x1b[0m "
" \n ──────────────────────────────"
"────────────────────────────────────────"
"────────\n "
" "
" \n "
" "
" \n "
" "
" \n "
" "
" \n ─────────────────────────"
"────────────────────────────────────────"
"─────────────\n sus "
" \x1b[3mspect\x1b[0m "
" \n "
" "
" \n rak "
" "
" \n"
)
assert output == expected_output
def test_heading_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with headings."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "# Heading 1\n## Heading 2\n### Heading 3\n#### Heading 4\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[1;38;5;231;48;5;57m \x1b[0m\x1b[1;38;5;231"
";48;5;57mHeading 1\x1b[0m\x1b[1;38;5;231;48;5;"
"57m \x1b[0m\x1b[1;38;5;231;48;5;57m "
" "
" \x1b[0m\n \x1b[2;38;5;57m─────"
"────────────────────────────────────────"
"─────────────────────────────────\x1b[0m\n "
" "
" \n "
" "
" \n"
" \x1b[1;38;5;37m## \x1b[0m\x1b[1;38;5;37mHeading"
" 2\x1b[0m\x1b[1;38;5;37m "
" "
" \x1b[0m\n \x1b[2;38;5;37m─────────────────"
"────────────────────────────────────────"
"─────────────────────\x1b[0m\n "
" "
" \n "
" "
" \n \x1b[1;38;5;3"
"7m### \x1b[0m\x1b[1;38;5;37mHeading 3\x1b[0m\x1b[1;3"
"8;5;37m "
" \x1b[0m\n "
" "
" \n "
"\x1b[1;38;5;37m#### \x1b[0m\x1b[1;38;5;37mHeading"
" 4\x1b[0m\x1b[1;38;5;37m "
" "
" \x1b[0m\n"
)
assert output == expected_output
def test_wide_heading_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It reduced the padding if the heading is long."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "# " + "A" * 80,
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b[1;38;5;231;48;5;57mAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAA…\x1b[0m\n \x1b[2;38;5;57m"
"────────────────────────────────────────"
"──────────────────────────────────────\x1b["
"0m\n"
)
assert output == expected_output
def test_ruler_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with a ruler."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "Section 1\n\n---\n\nsection 2\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" Section 1 "
" "
"\n "
" "
" \n ────────────────────────────────────"
"────────────────────────────────────────"
"──\n section 2 "
" "
" \n"
)
assert output == expected_output
def test_bullet_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with bullets."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "- Item 1\n- Item 2\n - Item 3\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" "
" "
"\n • Item 1 "
" "
" \n • Item 2 "
" "
" \n • Item 3 "
" "
" \n"
)
assert output == expected_output
def test_number_markdown_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown cell with numbers."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "1. Item 1\n2. Item 2\n3. Item 3\n",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" "
" "
"\n 1. Item 1 "
" "
" \n 2. Item 2 "
" "
" \n 3. Item 3 "
" "
" \n"
)
assert output == expected_output
def test_image_file_link_not_image_markdown_cell(
rich_notebook_output: RichOutput, remove_link_ids: Callable[[str], str]
) -> None:
"""It does not render an image link when file is not an image."""
bad_path = pathlib.Path(__file__).parent / pathlib.Path("assets", "bad_image.xyz")
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "![This is a weird file extension]" f"({bad_path})",
}
output = rich_notebook_output(markdown_cell, images=True)
expected_output = (
f" \x1b]8;id=228254;file://{bad_path}\x1b\\\x1b[94m🖼 Click to "
"view This is a weird file extension\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_image_file_link_bad_extension_markdown_cell(
rich_notebook_output: RichOutput, remove_link_ids: Callable[[str], str]
) -> None:
"""It does not render an image link when extension is unknown."""
bad_extension_path = __file__
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": f"",
}
output = rich_notebook_output(markdown_cell, images=True)
expected_output = (
f" \x1b]8;id=467471;file://{bad_extension_path}\x1b\\\x1b"
"[94m🖼 Click"
" to view This isn't even a image\x1b[0m\x1b]8;;\x1b\\"
" "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_image_file_link_not_exist_markdown_cell(
rich_notebook_output: RichOutput, remove_link_ids: Callable[[str], str]
) -> None:
"""It does not render an image link when the file does not exist."""
project_dir = pathlib.Path().resolve()
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
output = rich_notebook_output(markdown_cell)
expected_output = (
" \x1b]8;"
f"id=179352;file://{project_dir / 'i_do_not_exists.xyz'}"
"\x1b\\\x1b[94m🖼 Click to view This image does not "
"exist\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_notebook_code_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a code cell."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
output = rich_notebook_output(code_cell)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ \x1b[38;2;187;128;17"
"9;49mdef\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mfoo\x1b[0m\x1b[38;2;137;2"
"21;255;49m(\x1b[0m\x1b[38;2;238;255;255;49mx\x1b["
"0m\x1b[38;2;137;221;255;49m:\x1b[0m\x1b[38;2;238;"
"255;255;49m \x1b[0m\x1b[38;2;130;170;255;49mfl"
"oat\x1b[0m\x1b[38;2;137;221;255;49m,\x1b[0m\x1b[38;2"
";238;255;255;49m \x1b[0m\x1b[38;2;238;255;255;"
"49my\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m\x1b[38;"
"2;238;255;255;49m \x1b[0m\x1b[38;2;130;170;255"
";49mfloat\x1b[0m\x1b[38;2;137;221;255;49m)\x1b[0m"
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;137;22"
"1;255;49m-\x1b[0m\x1b[38;2;137;221;255;49m>\x1b[0"
"m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;130;1"
"70;255;49mfloat\x1b[0m\x1b[38;2;137;221;255;49"
"m:\x1b[0m "
" │\n │ \x1b[38;2;238;255;255;49m \x1b[0m"
"\x1b[38;2;187;128;179;49mreturn\x1b[0m\x1b[38;2;2"
"38;255;255;49m \x1b[0m\x1b[38;2;238;255;255;49"
"mx\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"137;221;255;49m+\x1b[0m\x1b[38;2;238;255;255;4"
"9m \x1b[0m\x1b[38;2;238;255;255;49my\x1b[0m "
" "
" │\n ╰──────────────────────"
"────────────────────────────────────────"
"───────────╯\n"
)
assert output == expected_output
def test_notebook_magic_code_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a code cell in a language specified by cell magic."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "%%bash\necho 'lorep'",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ \x1b[38;2;137;221;25"
"5;49m%%\x1b[0m\x1b[38;2;187;128;179;49mbash\x1b[0"
"m "
" │\n │ \x1b[38"
";2;130;170;255;49mecho\x1b[0m\x1b[38;2;238;255"
";255;49m \x1b[0m\x1b[38;2;195;232;141;49m'lore"
"p'\x1b[0m "
" │\n ╰──────"
"────────────────────────────────────────"
"───────────────────────────╯\n"
)
output = rich_notebook_output(code_cell)
assert output == expected_output
def test_notebook_raw_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a raw cell as plain text."""
code_cell = {
"cell_type": "raw",
"id": "emotional-amount",
"metadata": {},
"source": "Lorep ipsum",
}
expected_output = " ╭─────────────╮\n │ Lorep ipsum │\n ╰─────────────╯\n"
output = rich_notebook_output(code_cell)
assert output == expected_output
def test_notebook_non_syntax_magic_code_cell(rich_notebook_output: RichOutput) -> None:
"""It uses the default highlighting when magic is not a syntax."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "%%timeit\ndef foo(x: float, y: float) -> float:\n return x + y",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ \x1b[38;2;137;221;25"
"5;49m%%time\x1b[0m\x1b[38;2;238;255;255;49mit\x1b"
"[0m "
" │\n │ \x1b[38"
";2;187;128;179;49mdef\x1b[0m\x1b[38;2;238;255;"
"255;49m \x1b[0m\x1b[38;2;130;170;255;49mfoo\x1b[0"
"m\x1b[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;238;2"
"55;255;49mx\x1b[0m\x1b[38;2;137;221;255;49m:\x1b["
"0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;130;"
"170;255;49mfloat\x1b[0m\x1b[38;2;137;221;255;4"
"9m,\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2"
";238;255;255;49my\x1b[0m\x1b[38;2;137;221;255;"
"49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;"
"2;130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221"
";255;49m)\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m"
"\x1b[38;2;137;221;255;49m-\x1b[0m\x1b[38;2;137;22"
"1;255;49m>\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0"
"m\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;1"
"37;221;255;49m:\x1b[0m "
" │\n │ \x1b[38;2;238;255;25"
"5;49m \x1b[0m\x1b[38;2;187;128;179;49mretur"
"n\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;2"
"38;255;255;49mx\x1b[0m\x1b[38;2;238;255;255;49"
"m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b[0m\x1b[38;2;"
"238;255;255;49m \x1b[0m\x1b[38;2;238;255;255;4"
"9my\x1b[0m "
" │\n ╰─────────"
"────────────────────────────────────────"
"────────────────────────╯\n"
)
output = rich_notebook_output(code_cell)
assert output == expected_output
def test_notebook_plain_code_cell(rich_notebook_output: RichOutput) -> None:
"""It renders a code cell with plain formatting."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
output = rich_notebook_output(code_cell, plain=True)
expected_output = (
"\x1b[38;2;187;128;179;49mdef\x1b[0m\x1b[38;2;238;"
"255;255;49m \x1b[0m\x1b[38;2;130;170;255;49mfo"
"o\x1b[0m\x1b[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;2"
"38;255;255;49mx\x1b[0m\x1b[38;2;137;221;255;49"
"m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221;2"
"55;49m,\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b["
"38;2;238;255;255;49my\x1b[0m\x1b[38;2;137;221;"
"255;49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;137"
";221;255;49m)\x1b[0m\x1b[38;2;238;255;255;49m "
"\x1b[0m\x1b[38;2;137;221;255;49m-\x1b[0m\x1b[38;2;13"
"7;221;255;49m>\x1b[0m\x1b[38;2;238;255;255;49m"
" \x1b[0m\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38"
";2;137;221;255;49m:\x1b[0m "
" \n\x1b[38;2;238;25"
"5;255;49m \x1b[0m\x1b[38;2;187;128;179;49mr"
"eturn\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38"
";2;238;255;255;49mx\x1b[0m\x1b[38;2;238;255;25"
"5;49m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b[0m\x1b[3"
"8;2;238;255;255;49m \x1b[0m\x1b[38;2;238;255;2"
"55;49my\x1b[0m "
" \n"
)
assert output == expected_output
def test_render_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th></th>\n <th></th>"
"\n <th>lorep</th>\n <th colspan="
'"2" halign="left">hey</th>\n <th>bye'
"</th>\n </tr>\n <tr>\n <th></th>"
"\n <th></th>\n <th>ipsum</th>\n "
" <th>hi</th>\n <th>very_long_word"
"</th>\n <th>hi</th>\n </tr>\n <t"
"r>\n <th>first</th>\n <th>second"
"</th>\n <th>third</th>\n <th></t"
"h>\n <th></th>\n <th></th>\n <"
"/tr>\n </thead>\n <tbody>\n <tr>\n "
' <th rowspan="3" valign="top">bar</th>\n '
' <th rowspan="2" valign="top">one</t'
"h>\n <th>1</th>\n <td>1</td>\n "
" <td>2</td>\n <td>4</td>\n </tr>"
"\n <tr>\n <th>10</th>\n <td>3<"
"/td>\n <td>4</td>\n <td>-1</td>\n"
" </tr>\n <tr>\n <th>three</th>\n"
" <th>3</th>\n <td>3</td>\n "
"<td>4</td>\n <td>-1</td>\n </tr>\n "
" <tr>\n <th>foo</th>\n <th>one"
"</th>\n <th>1</th>\n <td>3</td>\n"
" <td>4</td>\n <td>-1</td>\n <"
"/tr>\n </tbody>\n</table>\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
"\x1b]8;id=1627258210.84976-39532;"
f"file://{tempfile_path}0.html\x1b\\\x1b[94"
"m🌐 Click to view HTML\x1b[0m\x1b]8;;\x1b\\ "
" "
" \n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m \x1b[1m \x1b["
"0m \x1b[1m \x1b[0m \x1b[1mlorep\x1b[0m "
" \x1b[1m hey\x1b[0m \x1b[1mbye\x1b[0m "
" \n \x1b[1m \x1b"
"[0m \x1b[1m \x1b[0m \x1b[1mipsum\x1b[0m \x1b"
"[1mhi\x1b[0m \x1b[1mvery_long_word\x1b[0m \x1b[1"
"m hi\x1b[0m \n \x1b"
"[1mfirst\x1b[0m \x1b[1msecond\x1b[0m \x1b[1mthir"
"d\x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b"
"[0m \x1b[1m \x1b[0m "
"\n ─────────────────────────────────"
"─────────────────── "
" \n \x1b[1m bar\x1b[0m \x1b[1m one\x1b[0m "
" \x1b[1m 1\x1b[0m 1 2 "
" 4 \n "
" \x1b[1m 10\x1b[0m 3 "
" 4 -1 \n "
" \x1b[1m three\x1b[0m \x1b[1m 3\x1b[0"
"m 3 4 -1 "
" \n \x1b[1m foo\x1b[0m \x1b[1m"
" one\x1b[0m \x1b[1m 1\x1b[0m 3 "
" 4 -1 \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_only_header_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with only headers."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\\n<style scoped>\\n .dataframe tb"
"ody tr th:only-of-type {\\n vertic"
"al-align: middle;\\n }\\n\\n .datafra"
"me tbody tr th {\\n vertical-align"
": top;\\n }\\n\\n .dataframe thead tr"
" th {\\n text-align: left;\\n }\\"
"n\\n .dataframe thead tr:last-of-type "
"th {\\n text-align: right;\\n }\\"
'n</style>\\n<table border="1" class="data'
'frame">\\n <thead>\\n <tr>\\n <th>'
'Model:</th>\\n <th colspan="2" halig'
'n="left">Decision Tree</th>\\n <th c'
'olspan="2" halign="left">Regression</th>'
'\\n <th colspan="2" halign="left">Ra'
"ndom</th>\\n </tr>\\n <tr>\\n <t"
"h>Predicted:</th>\\n <th>Tumour</th>"
"\\n <th>Non-Tumour</th>\\n <th>T"
"umour</th>\\n <th>Non-Tumour</th>\\n "
" <th>Tumour</th>\\n <th>Non-Tumo"
"ur</th>\\n </tr>\\n <tr>\\n <th>"
"Actual Label:</th>\\n <th></th>\\n "
" <th></th>\\n <th></th>\\n <th"
"></th>\\n <th></th>\\n <th></th>"
"\\n </tr>\\n </thead>\\n <tbody>\\n </"
"tbody>\\n</table>\\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=360825;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m Model:\x1b[0m "
" \x1b[1m Decision\x1b[0m \x1b[1mRegre"
"ssi…\x1b[0m \x1b[1m Random\x1b[0m \n "
" \x1b[1m Tree"
"\x1b[0m "
" \n \x1b[1mPredicte…\x1b[0m \x1b[1mT"
"umour\x1b[0m \x1b[1mNon-Tumo…\x1b[0m \x1b[1mTumo"
"ur\x1b[0m \x1b[1mNon-Tumo…\x1b[0m \x1b[1mTumour\x1b"
"[0m \x1b[1mNon-Tumo…\x1b[0m \n \x1b[1m A"
"ctual\x1b[0m \x1b[1m \x1b[0m \x1b[1m "
" \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b"
"[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m"
" \n \x1b[1m Label:\x1b[0m "
" "
" \n ───────────────────────"
"────────────────────────────────────────"
"───────────\n "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_mistagged_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It doesn't detect a DataFrame when it is not a table."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<not-a-table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th>Model:</th>\n <th"
' colspan="2" halign="left">Decision Tree'
'</th>\n <th colspan="2" halign="left'
'">Regression</th>\n <th colspan="2" '
'halign="left">Random</th>\n </tr>\n '
"<tr>\n <th>Predicted:</th>\n <th"
">Tumour</th>\n <th>Non-Tumour</th>\n "
" <th>Tumour</th>\n <th>Non-Tumou"
"r</th>\n <th>Tumour</th>\n <th>N"
"on-Tumour</th>\n </tr>\n <tr>\n "
"<th>Actual Label:</th>\n <th></th>\n "
" <th></th>\n <th></th>\n <th"
"></th>\n <th></th>\n <th></th>\n "
" </tr>\n </thead>\n <tbody>\n <tr>\n "
" <th>Tumour (Positive)</th>\n <t"
"d>38.0</td>\n <td>2.0</td>\n <td"
">18.0</td>\n <td>22.0</td>\n <td"
">21</td>\n <td>NaN</td>\n </tr>\n "
" <tr>\n <th>Non-Tumour (Negative)</"
"th>\n <td>19.0</td>\n <td>439.0<"
"/td>\n <td>6.0</td>\n <td>452.0<"
"/td>\n <td>226</td>\n <td>232.0<"
"/td>\n </tr>\n </tbody>\n</not-a-table>\n</div"
">"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=968899;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m Model: | Decision Tree | Reg"
"ression | Random "
" \n Predicted: | Tumour | Non-T"
"umour | Tumour | Non-Tumour | Tumour | "
" \n Non-Tumour "
" "
" \n Actual Label: | | | | "
" | | "
" \n Tumour (Positive) | 38.0"
" | 2.0 | 18.0 | 22.0 | 21 | NaN "
" \n Non-Tumour (Negative) |"
" 19.0 | 439.0 | 6.0 | 452.0 | 226 | 232."
"0 \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_multiindex_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a multiindex DataFrame."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th>Model:</th>\n <th"
' colspan="2" halign="left">Decision Tree'
'</th>\n <th colspan="2" halign="left'
'">Regression</th>\n <th colspan="2" '
'halign="left">Random</th>\n </tr>\n '
"<tr>\n <th>Predicted:</th>\n <th"
">Tumour</th>\n <th>Non-Tumour</th>\n "
" <th>Tumour</th>\n <th>Non-Tumou"
"r</th>\n <th>Tumour</th>\n <th>N"
"on-Tumour</th>\n </tr>\n <tr>\n "
"<th>Actual Label:</th>\n <th></th>\n "
" <th></th>\n <th></th>\n <th"
"></th>\n <th></th>\n <th></th>\n "
" </tr>\n </thead>\n <tbody>\n <tr>\n "
" <th>Tumour (Positive)</th>\n <t"
"d>38.0</td>\n <td>2.0</td>\n <td"
">18.0</td>\n <td>22.0</td>\n <td"
">21</td>\n <td>NaN</td>\n </tr>\n "
" <tr>\n <th>Non-Tumour (Negative)</"
"th>\n <td>19.0</td>\n <td>439.0<"
"/td>\n <td>6.0</td>\n <td>452.0<"
"/td>\n <td>226</td>\n <td>232.0<"
"/td>\n </tr>\n </tbody>\n</table>\n</div"
">"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=888128;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m Model:\x1b[0m "
" \x1b[1m Decision\x1b[0m \x1b[1mRegre"
"ssi…\x1b[0m \x1b[1m Random\x1b[0m \n "
" \x1b[1m Tree"
"\x1b[0m "
" \n \x1b[1mPredicte…\x1b[0m \x1b[1mT"
"umour\x1b[0m \x1b[1mNon-Tumo…\x1b[0m \x1b[1mTumo"
"ur\x1b[0m \x1b[1mNon-Tumo…\x1b[0m \x1b[1mTumour\x1b"
"[0m \x1b[1mNon-Tumo…\x1b[0m \n \x1b[1m A"
"ctual\x1b[0m \x1b[1m \x1b[0m \x1b[1m "
" \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b"
"[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m"
" \n \x1b[1m Label:\x1b[0m "
" "
" \n ───────────────────────"
"────────────────────────────────────────"
"───────────\n \x1b[1m Tumour\x1b[0m "
" 38.0 2.0 18.0 22.0 "
" 21 NaN \n \x1b[1m(Positiv"
"…\x1b[0m "
" \n \x1b[1"
"mNon-Tumo…\x1b[0m 19.0 439.0 "
" 6.0 452.0 226 232.0 \n "
" \x1b[1m(Negativ…\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_styled_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a styled DataFrame."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
'<style type="text/css">\n#T_7cafb_ td:hov'
"er {\n background-color: #ffffb3;\n}\n#T_7"
"cafb_ .index_name {\n font-style: italic"
";\n color: darkgrey;\n font-weight: norm"
"al;\n}\n#T_7cafb_ th:not(.index_name) {\n "
"background-color: #000066;\n color: whit"
"e;\n}\n#T_7cafb_ .true {\n background-colo"
"r: #e6ffe6;\n}\n#T_7cafb_ .false {\n backg"
"round-color: #ffe6e6;\n}\n</style>\n<table "
'id="T_7cafb_">\n <thead>\n <tr>\n '
'<th class="index_name level0" >Model:</t'
'h>\n <th class="col_heading level0 c'
'ol0" colspan="2">Decision Tree</th>\n '
' <th class="col_heading level0 col2" co'
'lspan="2">Regression</th>\n </tr>\n '
'<tr>\n <th class="index_name level1"'
' >Predicted:</th>\n <th class="col_h'
'eading level1 col0" >Tumour</th>\n <'
'th class="col_heading level1 col1" >Non-'
'Tumour</th>\n <th class="col_heading'
' level1 col2" >Tumour</th>\n <th cla'
'ss="col_heading level1 col3" >Non-Tumour'
"</th>\n </tr>\n <tr>\n <th class"
'="index_name level0" >Actual Label:</th>'
'\n <th class="blank col0" > </t'
'h>\n <th class="blank col1" > <'
'/th>\n <th class="blank col2" > '
';</th>\n <th class="blank col3" >&nb'
"sp;</th>\n </tr>\n </thead>\n <tbody>\n"
' <tr>\n <th id="T_7cafb_level0_ro'
'w0" class="row_heading level0 row0" >Tum'
'our (Positive)</th>\n <td id="T_7caf'
'b_row0_col0" class="data row0 col0 true '
'" >38</td>\n <td id="T_7cafb_row0_co'
'l1" class="data row0 col1 false " >2</td'
'>\n <td id="T_7cafb_row0_col2" class'
'="data row0 col2 true " >18</td>\n <'
'td id="T_7cafb_row0_col3" class="data ro'
'w0 col3 false " >22</td>\n </tr>\n <'
'tr>\n <th id="T_7cafb_level0_row1" c'
'lass="row_heading level0 row1" >Non-Tumo'
'ur (Negative)</th>\n <td id="T_7cafb'
'_row1_col0" class="data row1 col0 false '
'" >19</td>\n <td id="T_7cafb_row1_co'
'l1" class="data row1 col1 true " >439</t'
'd>\n <td id="T_7cafb_row1_col2" clas'
's="data row1 col2 false " >6</td>\n '
'<td id="T_7cafb_row1_col3" class="data r'
'ow1 col3 true " >452</td>\n </tr>\n </'
"tbody>\n</table>\n"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=698065;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m Model:\x1b["
"0m \x1b[1mDecision Tree\x1b[0m "
" \x1b[1mRegression\x1b[0m \n \x1b["
"1m Predicted:\x1b[0m \x1b[1mTumour"
"\x1b[0m \x1b[1m Non-Tumour\x1b[0m \x1b[1mTumou"
"r\x1b[0m \x1b[1mNon-Tumour\x1b[0m \n \x1b"
"[1m Actual Label:\x1b[0m \x1b[1m "
" \x1b[0m \x1b[1m \x1b[0m \x1b[1m "
" \x1b[0m \x1b[1m \x1b[0m \n ─"
"────────────────────────────────────────"
"───────────────────────────── \n "
" \x1b[1m Tumour (Positive)\x1b[0m 38 "
" 2 18 22 "
" \n \x1b[1mNon-Tumour (Negative)\x1b[0m "
" 19 439 6 "
" 452 \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_missing_column_name_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with a missing column index name."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th></th>\n <th>lorep"
"</th>\n <th>hey</th>\n <th>sup</"
"th>\n <th>bye</th>\n </tr>\n <tr"
">\n <th>hey</th>\n <th></th>\n "
" <th></th>\n <th></th>\n <th><"
"/th>\n </tr>\n </thead>\n <tbody>\n "
"<tr>\n <th>3</th>\n <th>1</th>\n "
" <td>1</td>\n <td>4</td>\n <"
"td>6</td>\n </tr>\n <tr>\n <th>4"
"</th>\n <th>1</th>\n <td>2</td>\n"
" <td>5</td>\n <td>7</td>\n </"
"tr>\n </tbody>\n</table>\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=337911;file://{tempfile_path}0.html\x1b\\\x1b[94m🌐 "
"Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m \x1b[0m \x1b[1mlorep\x1b[0m"
" \x1b[1mhey\x1b[0m \x1b[1msup\x1b[0m \x1b[1mbye\x1b["
"0m "
" \n \x1b[1mhey\x1b[0m \x1b[1m \x1b[0"
"m \x1b[1m \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b"
"[0m "
" \n ──────────────────────────"
"───── "
" \n \x1b[1m 3\x1b[0m \x1b[1m 1\x1b"
"[0m 1 4 6 "
" \n \x1b[1m 4"
"\x1b[0m \x1b[1m 1\x1b[0m 2 5 7 "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_missing_index_name_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with a missing index index name."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead th {\n "
" text-align: right;\n }\n</style>\n<tabl"
'e border="1" class="dataframe">\n <thead'
'>\n <tr style="text-align: right;">\n '
" <th></th>\n <th></th>\n <th>"
"a</th>\n <th>b</th>\n <th>c</th>"
"\n </tr>\n <tr>\n <th></th>\n "
" <th>hey</th>\n <th></th>\n <th"
"></th>\n <th></th>\n </tr>\n </the"
"ad>\n <tbody>\n <tr>\n <th>3</th>\n"
" <th>1</th>\n <td>1</td>\n "
"<td>4</td>\n <td>6</td>\n </tr>\n "
" <tr>\n <th>4</th>\n <th>1</th>"
"\n <td>2</td>\n <td>5</td>\n "
" <td>7</td>\n </tr>\n </tbody>\n</table"
">\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=308498;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m \x1b"
"[1ma\x1b[0m \x1b[1mb\x1b[0m \x1b[1mc\x1b[0m "
" "
" \n \x1b[1m \x1b[0m \x1b[1mhey\x1b[0m "
"\x1b[1m \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m "
" "
" \n ───────────────────── "
" "
" \n \x1b[1m3\x1b[0m \x1b[1m 1\x1b[0m "
" 1 4 6 "
" \n \x1b[1m4\x1b["
"0m \x1b[1m 1\x1b[0m 2 5 7 "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_missing_last_index_name_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with missing lasst index index name."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead th {\n "
" text-align: right;\n }\n</style>\n<tabl"
'e border="1" class="dataframe">\n <thead'
'>\n <tr style="text-align: right;">\n '
" <th></th>\n <th></th>\n <th>"
"a</th>\n <th>b</th>\n <th>c</th>"
"\n </tr>\n <tr>\n <th>hey</th>\n "
" <th></th>\n <th></th>\n <th"
"></th>\n <th></th>\n </tr>\n </the"
"ad>\n <tbody>\n <tr>\n <th>3</th>\n"
" <th>1</th>\n <td>1</td>\n "
"<td>4</td>\n <td>6</td>\n </tr>\n "
" <tr>\n <th>4</th>\n <th>1</th>"
"\n <td>2</td>\n <td>5</td>\n "
" <td>7</td>\n </tr>\n </tbody>\n</table"
">\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=59302;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view "
"HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;247"
"m[2]:\x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m \x1b["
"1ma\x1b[0m \x1b[1mb\x1b[0m \x1b[1mc\x1b[0m "
" "
" \n \x1b[1mhey\x1b[0m \x1b[1m \x1b[0m \x1b"
"[1m \x1b[0m \x1b[1m \x1b[0m \x1b[1m \x1b[0m "
" "
" \n ───────────────────── "
" "
" \n \x1b[1m 3\x1b[0m \x1b[1m1\x1b[0m "
" 1 4 6 "
" \n \x1b[1m 4\x1b"
"[0m \x1b[1m1\x1b[0m 2 5 7 "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_plain_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame in a plain style."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"<div>\n<style scoped>\n .dataframe tbod"
"y tr th:only-of-type {\n vertical-"
"align: middle;\n }\n\n .dataframe tbo"
"dy tr th {\n vertical-align: top;\n"
" }\n\n .dataframe thead tr th {\n "
" text-align: left;\n }\n\n .datafr"
"ame thead tr:last-of-type th {\n t"
"ext-align: right;\n }\n</style>\n<table "
'border="1" class="dataframe">\n <thead>\n'
" <tr>\n <th></th>\n <th></th>"
"\n <th>lorep</th>\n <th colspan="
'"2" halign="left">hey</th>\n <th>bye'
"</th>\n </tr>\n <tr>\n <th></th>"
"\n <th></th>\n <th>ipsum</th>\n "
" <th>hi</th>\n <th>very_long_word"
"</th>\n <th>hi</th>\n </tr>\n <t"
"r>\n <th>first</th>\n <th>second"
"</th>\n <th>third</th>\n <th></t"
"h>\n <th></th>\n <th></th>\n <"
"/tr>\n </thead>\n <tbody>\n <tr>\n "
' <th rowspan="3" valign="top">bar</th>\n '
' <th rowspan="2" valign="top">one</t'
"h>\n <th>1</th>\n <td>1</td>\n "
" <td>2</td>\n <td>4</td>\n </tr>"
"\n <tr>\n <th>10</th>\n <td>3<"
"/td>\n <td>4</td>\n <td>-1</td>\n"
" </tr>\n <tr>\n <th>three</th>\n"
" <th>3</th>\n <td>3</td>\n "
"<td>4</td>\n <td>-1</td>\n </tr>\n "
" <tr>\n <th>foo</th>\n <th>one"
"</th>\n <th>1</th>\n <td>3</td>\n"
" <td>4</td>\n <td>-1</td>\n <"
"/tr>\n </tbody>\n</table>\n</div>"
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" "
" "
"\n "
" "
" \n\x1b]8;id=1627258290.675266-113809;file:/"
f"/{tempfile_path}0.html\x1b\\"
"\x1b[94m🌐 Click to view HTML\x1b[0m\x1b]8;;\x1b\\ "
" "
" \n "
" "
" \nlorep hey "
" bye "
" \nipsum hi"
" very_long_word hi "
" \nfirst second third "
" "
" \nbar one 1 "
" 1 2 4 "
" \n 10 "
" 3 4 -1 "
" \n three 3 "
" 3 4 -1 "
" \nfoo one 1 "
" 3 4 -1 "
" \n"
)
output = rich_notebook_output(code_cell, plain=True)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_uneven_columns_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with missing columns."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
\n
<tr>
\n
<th class="index_name level0">Model:</th>
\n
<th class="col_heading level0 col0" colspan="2">Decision Tree</th>
\n
<th class="col_heading level0 col2" colspan="2">Regression</th>
\n
</tr>
\n
<tr>
\n
<th class="col_heading level1 col0">Tumour</th>
\n
<th class="col_heading level1 col1">Non-Tumour</th>
\n
<th class="col_heading level1 col2">Tumour</th>
\n
<th class="col_heading level1 col3">Non-Tumour</th>
\n
</tr>
\n
<tr>
\n
<th class="index_name level0">Actual Label:</th>
\n
<th class="blank col0"> </th>
\n
<th class="blank col1"> </th>
\n
<th class="blank col2"> </th>
\n
<th class="blank col3"> </th>
\n
</tr>
\n
</thead>
\n
<tbody>
\n
<tr>
\n
<th id="T_aba0a_level0_row0" class="row_heading level0 row0">
Tumour (Positive)
</th>
\n
<td id="T_aba0a_row0_col0" class="data row0 col0">38</td>
\n
<td id="T_aba0a_row0_col1" class="data row0 col1">2</td>
\n
<td id="T_aba0a_row0_col2" class="data row0 col2">18</td>
\n
<td id="T_aba0a_row0_col3" class="data row0 col3">22</td>
\n
</tr>
\n
<tr>
\n
<th id="T_aba0a_level0_row1" class="row_heading level0 row1">
Non-Tumour (Negative)
</th>
\n
<td id="T_aba0a_row1_col0" class="data row1 col0">19</td>
\n
<td id="T_aba0a_row1_col1" class="data row1 col1">439</td>
\n
<td id="T_aba0a_row1_col2" class="data row1 col2">6</td>
\n
<td id="T_aba0a_row1_col3" class="data row1 col3">452</td>
\n
</tr>
\n
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=635975;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1m Model:\x1b[0m "
" \x1b[1mDecision Tree\x1b[0m "
" \x1b[1mRegression\x1b[0m \n \x1b["
"1m Tumour\x1b[0m \x1b[1mNon-Tumour"
"\x1b[0m \x1b[1m Tumour\x1b[0m \x1b[1mNon-T"
"umour\x1b[0m \n \x1b[1m A"
"ctual Label:\x1b[0m \x1b[1m \x1b[0m "
"\x1b[1m \x1b[0m \x1b[1m \x1b["
"0m \x1b[1m \x1b[0m \n ─────────"
"────────────────────────────────────────"
"─────────────────────────\n \x1b[1mTum"
"our (Positive)\x1b[0m 38 "
" 2 18 22 \n "
" \x1b[1m Non-Tumour\x1b[0m 19"
" 439 6 4"
"52 \n \x1b[1m (Negative)\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_no_columns_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with missing columns."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
</thead>
\n
<tbody>
\n
<tr>
\n
<th id="T_aba0a_level0_row0" class="row_heading level0 row0">
Tumour (Positive)
</th>
\n
<td id="T_aba0a_row0_col0" class="data row0 col0">38</td>
\n
<td id="T_aba0a_row0_col1" class="data row0 col1">2</td>
\n
<td id="T_aba0a_row0_col2" class="data row0 col2">18</td>
\n
<td id="T_aba0a_row0_col3" class="data row0 col3">22</td>
\n
</tr>
\n
<tr>
\n
<th id="T_aba0a_level0_row1" class="row_heading level0 row1">
Non-Tumour (Negative)
</th>
\n
<td id="T_aba0a_row1_col0" class="data row1 col0">19</td>
\n
<td id="T_aba0a_row1_col1" class="data row1 col1">439</td>
\n
<td id="T_aba0a_row1_col2" class="data row1 col2">6</td>
\n
<td id="T_aba0a_row1_col3" class="data row1 col3">452</td>
\n
</tr>
\n
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=380451;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1mTumour (Positive) \x1b["
"0m 38 2 18 22 "
" \n \x1b[1mNon-Tumour (Ne"
"gative)\x1b[0m 19 439 6 452 "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_uneven_data_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with non square data."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
</thead>
\n
<tbody>
\n
<tr>
\n
<th id="T_aba0a_level0_row0" class="row_heading level0 row0">
Tumour (Positive)
</th>
\n
<td id="T_aba0a_row0_col1" class="data row0 col1">2</td>
\n
<td id="T_aba0a_row0_col2" class="data row0 col2">18</td>
\n
<td id="T_aba0a_row0_col3" class="data row0 col3">22</td>
\n
</tr>
\n
<tr>
\n
<th id="T_aba0a_level0_row1" class="row_heading level0 row1">
Non-Tumour (Negative)
</th>
\n
<td id="T_aba0a_row1_col0" class="data row1 col0">19</td>
\n
<td id="T_aba0a_row1_col1" class="data row1 col1">439</td>
\n
<td id="T_aba0a_row1_col2" class="data row1 col2">6</td>
\n
<td id="T_aba0a_row1_col3" class="data row1 col3">452</td>
\n
</tr>
\n
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=330589;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m \x1b[1mTumour (Positive) \x1b["
"0m 2 18 22 "
" \n \x1b[1mNon-Tumour (Ne"
"gative)\x1b[0m 19 439 6 452 "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_uneven_index_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a DataFrame with uneven index names."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
</thead>
\n
<tbody>
\n
<tr>
\n
<td id="T_aba0a_row0_col1" class="data row0 col1">2</td>
\n
<td id="T_aba0a_row0_col2" class="data row0 col2">18</td>
\n
<td id="T_aba0a_row0_col3" class="data row0 col3">22</td>
\n
</tr>
\n
<tr>
\n
<th id="T_aba0a_level0_row1" class="row_heading level0 row1">
Non-Tumour (Negative)
</th>
\n
<td id="T_aba0a_row1_col0" class="data row1 col0">19</td>
\n
<td id="T_aba0a_row1_col1" class="data row1 col1">439</td>
\n
<td id="T_aba0a_row1_col2" class="data row1 col2">6</td>
\n
<td id="T_aba0a_row1_col3" class="data row1 col3">452</td>
\n
</tr>
\n
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=487619;file://{tempfile_path}0.html\x1b\\\x1b"
"[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m 2 18 "
" 22 "
" \n \x1b[1mNon-Tumour (Negative)\x1b"
"[0m 19 439 6 452 "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_empty_html_dataframe(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a blank output when given an empty table."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "mighty-oasis",
"metadata": {},
"outputs": [
{
"data": {
"text/html": (
"""
<style type="text/css">
\n</style
>\n
<table id="T_aba0a_">
\n
<thead>
</thead>
\n
<tbody>
</tbody>
\n
</table>
\n
"""
),
"text/plain": (
"lorep hey by"
"e\nipsum hi very_long_word "
" hi\nfirst second third "
" \nbar one 1 1 "
" 2 4\n 10 3 "
" 4 -1\n three 3 3 "
" 4 -1\nfoo one 1 3 "
" 4 -1"
),
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=316923;file://{tempfile_path}0.html"
"\x1b\\\x1b[94m🌐 Click to view"
" HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n\x1b[38;5;24"
"7m[2]:\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(code_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_stderr_stream(rich_notebook_output: RichOutput) -> None:
"""It renders the stderr stream."""
stderr_cell = {
"cell_type": "code",
"execution_count": 5,
"id": "impressed-canadian",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": "<ipython-input-5-bc08279b5148>:2: UserWarning: Lorep\n"
' warnings.warn("Lorep")\n',
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[5]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b[48;5;174m "
" "
" \x1b[0m\n "
" \x1b[48;5;174m \x1b[0m\x1b[38;5;237;48;5;174m<ip"
"ython-input-5-bc08279b5148>:2: UserWarni"
"ng: Lorep \x1b[0m\x1b[48;5;"
"174m \x1b[0m\n \x1b[48;5;174m \x1b[0m\x1b[38;5;2"
'37;48;5;174m warnings.warn("Lorep") '
" "
" \x1b[0m\x1b[48;5;174m \x1b[0m\n \x1b[48;5;17"
"4m \x1b[0m\x1b[38;5;237;48;5;174m "
" "
" \x1b[0m\x1b[48;5;174m \x1b[0m\n"
)
output = rich_notebook_output(stderr_cell)
assert output == expected_output
def test_render_stream_stdout(rich_notebook_output: RichOutput) -> None:
"""It renders stdout."""
stdout_cell = {
"cell_type": "code",
"execution_count": 6,
"id": "underlying-merit",
"metadata": {},
"outputs": [{"name": "stdout", "output_type": "stream", "text": "Lorep\n"}],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[6]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n Lorep "
" "
" \n"
)
output = rich_notebook_output(stdout_cell)
assert output == expected_output
def test_render_error_traceback(rich_notebook_output: RichOutput) -> None:
"""It renders the traceback from an error."""
traceback_cell = {
"cell_type": "code",
"execution_count": 7,
"id": "brave-sheep",
"metadata": {},
"outputs": [
{
"ename": "ZeroDivisionError",
"evalue": "division by zero",
"output_type": "error",
"traceback": [
"\x1b[1;31m----------------------------------------"
"-----------------------------------\x1b[0m",
"\x1b[1;31mZeroDivisionError\x1b[0m "
" Traceback (most recent call last)",
"\x1b[1;32m<ipython-input-7-9e1622b385b6>\x1b[0m in"
" \x1b[0;36m<module>\x1b[1;34m\x1b[0m\n\x1b[1;32m--"
"--> 1\x1b[1;33m \x1b[1;36m1\x1b[0m\x1b[1;33m/\x1b["
"0m\x1b[1;36m0\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m"
"\x1b[0m\x1b[0m\n\x1b[0m",
"\x1b[1;31mZeroDivisionError\x1b[0m: division by zero",
],
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[7]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b[1;31m--------"
"----------------------------------------"
"-------------------------…\x1b[0m\n \x1b[1"
";31mZeroDivisionError\x1b[0m "
" Traceback (most recent call "
" \n last) "
" "
" \n \x1b[1;32m<ipython-input-7-9e1622"
"b385b6>\x1b[0m in \x1b[36m<module>\x1b[0m "
" \n \x1b[1;32m--"
"--> 1\x1b[0m\x1b[1;33m \x1b[0m\x1b[1;36m1\x1b[0m\x1b[1;33m"
"/\x1b[0m\x1b[1;36m0\x1b[0m "
" "
"\n "
" "
" \n \x1b[1;31mZeroDivisionError\x1b[0m: di"
"vision by zero "
" \n"
)
output = rich_notebook_output(traceback_cell)
assert output == expected_output
def test_render_error_traceback_no_hang(
rich_notebook_output: RichOutput, expected_output: str
) -> None:
"""It renders the traceback from an error without hanging."""
traceback_cell = {
"cell_type": "code",
"execution_count": 4,
"id": "allied-contrary",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": "bash: line 1: ech: command not found\n",
},
{
"ename": "CalledProcessError",
"evalue": "Command 'b'ech\\n'' returned non-zero exit status 127.",
"output_type": "error",
"traceback": [
"\x1b[1;31m----------------------------------------"
"-----------------------------------\x1b[0m",
"\x1b[1;31mCalledProcessError\x1b[0m "
" Traceback (most recent call last)",
"\x1b[1;32m<ipython-input-4-4fb31ecfb364>\x1b[0m in"
" \x1b[0;36m<module>\x1b[1;34m\x1b[0m\n\x1b[1;32m--"
"--> 1\x1b[1;33m \x1b[0mget_ipython\x1b[0m\x1b[1;33"
"m(\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m.\x1b[0m\x1b["
"0mrun_cell_magic\x1b[0m\x1b[1;33m(\x1b[0m\x1b[1;34"
"m'bash'\x1b[0m\x1b[1;33m,\x1b[0m \x1b[1;34m''\x1b["
"0m\x1b[1;33m,\x1b[0m \x1b[1;34m'ech\\n'\x1b[0m\x1b"
"[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[0m\n\x1b[0m",
"\x1b[1;32m~/.pyenv/versions/scratch/lib/python3.8/"
"site-packages/IPython/core/interactiveshell.py\x1b"
"[0m in \x1b[0;36mrun_cell_magic\x1b[1;34m(self, "
"magic_name, line, cell)\x1b[0m\n\x1b[0;32m 2389"
"\x1b[0m \x1b[1;32mwith\x1b[0m \x1b"
"[0mself\x1b[0m\x1b[1;33m.\x1b[0m\x1b[0mbuiltin_tra"
"p\x1b[0m\x1b[1;33m:\x1b[0m\x1b[1;33m\x1b[0m\x1b"
"[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m 2390\x1b[0m "
" \x1b[0margs\x1b[0m \x1b[1;33m=\x1b[0m"
" \x1b[1;33m(\x1b[0m\x1b[0mmagic_arg_s\x1b[0m\x1b"
"[1;33m,\x1b[0m \x1b[0mcell\x1b[0m\x1b[1;33m)\x1b"
"[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b"
"[1;32m-> 2391\x1b[1;33m \x1b"
"[0mresult\x1b[0m \x1b[1;33m=\x1b[0m \x1b[0mfn\x1b"
"[0m\x1b[1;33m(\x1b[0m\x1b[1;33m*\x1b[0m\x1b[0margs"
"\x1b[0m\x1b[1;33m,\x1b[0m \x1b[1;33m**\x1b[0m\x1b"
"[0mkwargs\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m\x1b"
"[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[0m\x1b[0;32m "
"2392\x1b[0m \x1b[1;32mreturn\x1b[0m "
"\x1b[0mresult\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m"
"\x1b[0m\x1b[0m\n\x1b[0;32m 2393\x1b[0m "
"\x1b[1;33m\x1b[0m\x1b[0m\n",
"\x1b[1;32m~/.pyenv/versions/scratch/lib/python3.8/"
"site-packages/IPython/core/magics/script.py\x1b[0m"
" in \x1b[0;36mnamed_script_magic\x1b[1;34m(line,"
" cell)\x1b[0m\n\x1b[0;32m 140\x1b[0m "
" \x1b[1;32melse\x1b[0m\x1b[1;33m:\x1b[0m\x1b"
"[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m"
" 141\x1b[0m \x1b[0mline\x1b[0m"
" \x1b[1;33m=\x1b[0m \x1b[0mscript\x1b[0m\x1b[1;33m"
"\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[1;32m--> 142"
"\x1b[1;33m \x1b[1;32mreturn\x1b[0m"
" \x1b[0mself\x1b[0m\x1b[1;33m.\x1b[0m\x1b"
"[0mshebang\x1b[0m\x1b[1;33m(\x1b[0m\x1b[0mline\x1b"
"[0m\x1b[1;33m,\x1b[0m \x1b[0mcell\x1b[0m\x1b"
"[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[0m\n\x1b[0m\x1b[0;32m 143\x1b[0m \x1b"
"[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m 144\x1b[0m "
" \x1b[1;31m# write a basic docstring:\x1b[0m"
"\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b"
"[0m\x1b[0m\n",
"\x1b[1;32m<decorator-gen-103>\x1b[0m in \x1b[0;36m"
"shebang\x1b[1;34m(self, line, cell)\x1b[0m\n",
"\x1b[1;32m~/.pyenv/versions/scratch/lib/python3.8"
"/site-packages/IPython/core/magic.py\x1b[0m in "
"\x1b[0;36m<lambda>\x1b[1;34m(f, *a, **k)\x1b[0m\n"
"\x1b[0;32m 185\x1b[0m \x1b[1;31m# but it's"
" overkill for just that one bit of state.\x1b[0m"
"\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b"
"[0m\x1b[0m\n\x1b[0;32m 186\x1b[0m \x1b[1;32"
"mdef\x1b[0m \x1b[0mmagic_deco\x1b[0m\x1b[1;33m("
"\x1b[0m\x1b[0marg\x1b[0m\x1b[1;33m)\x1b[0m\x1b"
"[1;33m:\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[0m\n\x1b[1;32m--> 187\x1b[1;33m \x1b"
"[0mcall\x1b[0m \x1b[1;33m=\x1b[0m \x1b[1;32mlambda"
"\x1b[0m \x1b[0mf\x1b[0m\x1b[1;33m,\x1b[0m \x1b"
"[1;33m*\x1b[0m\x1b[0ma\x1b[0m\x1b[1;33m,\x1b[0m "
"\x1b[1;33m**\x1b[0m\x1b[0mk\x1b[0m\x1b[1;33m:"
"\x1b[0m \x1b[0mf\x1b[0m\x1b[1;33m(\x1b[0m\x1b"
"[1;33m*\x1b[0m\x1b[0ma\x1b[0m\x1b[1;33m,\x1b[0m "
"\x1b[1;33m**\x1b[0m\x1b[0mk\x1b[0m\x1b[1;33m)\x1b"
"[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n\x1b"
"[0m\x1b[0;32m 188\x1b[0m \x1b[1;33m\x1b[0m\x1b"
"[0m\n\x1b[0;32m 189\x1b[0m \x1b[1;32mif"
"\x1b[0m \x1b[0mcallable\x1b[0m\x1b[1;33m(\x1b[0m"
"\x1b[0marg\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m:\x1b"
"[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n",
"\x1b[1;32m~/.pyenv/versions/scratch/lib/python3.8"
"/site-packages/IPython/core/magics/script.py\x1b"
"[0m in \x1b[0;36mshebang\x1b[1;34m(self, line, "
"cell)\x1b[0m\n\x1b[0;32m 243\x1b[0m "
" \x1b[0msys\x1b[0m\x1b[1;33m.\x1b[0m\x1b[0mstderr"
"\x1b[0m\x1b[1;33m.\x1b[0m\x1b[0mflush\x1b[0m\x1b"
"[1;33m(\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m 244\x1b[0m"
" \x1b[1;32mif\x1b[0m \x1b[0margs\x1b[0m"
"\x1b[1;33m.\x1b[0m\x1b[0mraise_error\x1b[0m \x1b"
"[1;32mand\x1b[0m \x1b[0mp\x1b[0m\x1b[1;33m.\x1b[0m"
"\x1b[0mreturncode\x1b[0m\x1b[1;33m!=\x1b[0m\x1b"
"[1;36m0\x1b[0m\x1b[1;33m:\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[1;33m\x1b[0m\x1b[0m\n\x1b[1;32m--> 245\x1b"
"[1;33m \x1b[1;32mraise\x1b[0m \x1b[0m"
"CalledProcessError\x1b[0m\x1b[1;33m(\x1b[0m\x1b"
"[0mp\x1b[0m\x1b[1;33m.\x1b[0m\x1b[0mreturncode\x1b"
"[0m\x1b[1;33m,\x1b[0m \x1b[0mcell\x1b[0m\x1b[1;33m"
",\x1b[0m \x1b[0moutput\x1b[0m\x1b[1;33m=\x1b[0m"
"\x1b[0mout\x1b[0m\x1b[1;33m,\x1b[0m \x1b[0mstderr"
"\x1b[0m\x1b[1;33m=\x1b[0m\x1b[0merr\x1b[0m\x1b"
"[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m"
"\x1b[0m\n\x1b[0m\x1b[0;32m 246\x1b[0m \x1b"
"[1;33m\x1b[0m\x1b[0m\n\x1b[0;32m 247\x1b[0m "
" \x1b[1;32mdef\x1b[0m \x1b[0m_run_script\x1b[0m"
"\x1b[1;33m(\x1b[0m\x1b[0mself\x1b[0m\x1b[1;33m,"
"\x1b[0m \x1b[0mp\x1b[0m\x1b[1;33m,\x1b[0m \x1b"
"[0mcell\x1b[0m\x1b[1;33m,\x1b[0m \x1b[0mto_close"
"\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m:\x1b[0m\x1b"
"[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b[0m\n",
"\x1b[1;31mCalledProcessError\x1b[0m: Command "
"'b'ech\\n'' returned non-zero exit status 127.",
],
},
],
"source": "%%bash\nech",
}
output = rich_notebook_output(traceback_cell)
assert output == expected_output
def test_render_debugger_output(
rich_notebook_output: RichOutput, expected_output: str
) -> None:
"""It renders the output from the debugger."""
debugger_output_cell = {
"cell_type": "code",
"execution_count": 4,
"id": "fa534da6-88ac-43bc-b00f-cc68ace69fb7",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": "> \x1b[1;32m<ipython-input-4-a2d401806d89>\x1b"
"[0m(1)\x1b[0;36m<module>\x1b[1;34m()\x1b[0m\n\x1b"
"[1;32m----> 1 \x1b[1;33m\x1b[0m_jupyterlab_variable"
"inspector_dict_list\x1b[0m\x1b[1;33m(\x1b[0m\x1b"
"[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b[1;33m\x1b[0m\x1b"
"[0m\n\x1b[0m\n",
},
{"name": "stdin", "output_type": "stream", "text": "ipdb> ll\n"},
{
"name": "stdout",
"output_type": "stream",
"text": "\x1b[1;32m----> 1 \x1b[1;33m\x1b[0m_"
"jupyterlab_variableinspector_dict_list\x1b[0m\x1b"
"[1;33m(\x1b[0m\x1b[1;33m)\x1b[0m\x1b[1;33m\x1b[0m\x1b"
"[1;33m\x1b[0m\x1b[0m\n\x1b[0m\n",
},
{"name": "stdin", "output_type": "stream", "text": "ipdb> sticky\n"},
{
"name": "stdout",
"output_type": "stream",
"text": "*** NameError: name 'sticky' is not defined\n",
},
{"name": "stdin", "output_type": "stream", "text": "ipdb> q\n"},
],
"source": "%debug",
}
output = rich_notebook_output(debugger_output_cell)
assert output == expected_output
def test_render_result(rich_notebook_output: RichOutput) -> None:
"""It renders a result."""
output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "intense-middle",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "3"},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
"3 "
" \n"
)
output = rich_notebook_output(output_cell)
assert output == expected_output
def test_render_unknown_data_format(rich_notebook_output: RichOutput) -> None:
"""It passes on rendering an unknown data format."""
output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "intense-middle",
"metadata": {},
"outputs": [
{
"data": {"unknown_format": "3"},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n"
)
output = rich_notebook_output(output_cell)
assert output == expected_output
def test_render_error_no_traceback(rich_notebook_output: RichOutput) -> None:
"""It skips rendering an error with no traceback."""
traceback_cell = {
"cell_type": "code",
"execution_count": 7,
"id": "brave-sheep",
"metadata": {},
"outputs": [
{
"ename": "ZeroDivisionError",
"evalue": "division by zero",
"output_type": "error",
"traceback": [],
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[7]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n "
" "
" \n"
)
output = rich_notebook_output(traceback_cell)
assert output == expected_output
def test_render_markdown_output(rich_notebook_output: RichOutput) -> None:
"""It renders a markdown output."""
markdown_output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": "**Lorep** _ipsum_\n",
"text/plain": "<IPython.core.display.Markdown object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "%%markdown\n**Lorep** _ipsum_",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ \x1b[38;2;137;221;25"
"5;49m%%\x1b[0m\x1b[38;2;187;128;179;49mmarkdow"
"n\x1b[0m "
" │\n │ \x1b[38"
";2;255;83;112;49m**Lorep**\x1b[0m\x1b[38;2;238"
";255;255;49m \x1b[0m\x1b[38;2;137;221;255;49m_"
"ipsum_\x1b[0m "
" │\n ╰───────"
"────────────────────────────────────────"
"──────────────────────────╯\n "
" "
" \n \x1b[1mL"
"orep\x1b[0m \x1b[3mipsum\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(markdown_output_cell)
assert output == expected_output
def test_render_unknown_display_data(rich_notebook_output: RichOutput) -> None:
"""It skips rendering an unknown data display type."""
unknown_display_data_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"unknown_data_type": "**Lorep** _ipsum_\n",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n"
)
output = rich_notebook_output(unknown_display_data_cell)
assert output == expected_output
def test_render_json_output(rich_notebook_output: RichOutput) -> None:
"""It renders a JSON output."""
json_output_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "behind-authentication",
"metadata": {},
"outputs": [
{
"data": {
"application/json": {"one": 1, "three": {"a": "b"}, "two": 2},
"text/plain": "<IPython.core.display.JSON object>",
},
"execution_count": 1,
"metadata": {"application/json": {"expanded": False, "root": "root"}},
"output_type": "execute_result",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"\x1b[38;2;137;221;255;49m{\x1b[0m\x1b[38;2;255;83"
';112;49m"one"\x1b[0m\x1b[38;2;137;221;255;49m:'
"\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;24"
"7;140;108;49m1\x1b[0m\x1b[38;2;137;221;255;49m"
",\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;2"
'55;83;112;49m"three"\x1b[0m\x1b[38;2;137;221;2'
"55;49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b["
"38;2;137;221;255;49m{\x1b[0m\x1b[38;2;255;83;1"
'12;49m"a"\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0m'
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;195;23"
'2;141;49m"b"\x1b[0m\x1b[38;2;137;221;255;49m},'
"\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;25"
'5;83;112;49m"two"\x1b[0m\x1b[38;2;137;221;255;'
"49m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;"
"2;247;140;108;49m2\x1b[0m\x1b[38;2;137;221;255"
";49m}\x1b[0m "
" \n"
)
output = rich_notebook_output(json_output_cell)
assert output == expected_output
def test_render_latex_output(rich_notebook_output: RichOutput) -> None:
"""It renders LaTeX output."""
latex_output_cell = {
"cell_type": "code",
"execution_count": 15,
"id": "sapphire-harmony",
"metadata": {},
"outputs": [
{
"data": {
"text/latex": "$$\n\\alpha \\sim \\text{Normal}"
" \\\\\n\\beta \\sim \\text{Normal} \\\\\n\\epsilon"
" \\sim \\text{Half-Cauchy} \\\\\n\\mu = \\alpha +"
" X\\beta \\\\\ny \\sim \\text{Normal}(\\mu, \\epsilon)\n$$\n",
"text/plain": "<IPython.core.display.Latex object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭─────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[15]:\x1b[0m │ "
" "
" │\n ╰────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n "
" "
" \n "
" "
" \n α∼Normal"
" "
" \n β∼Norma"
"l "
" \n ϵ∼Half"
"-Cauchy "
" \n μ = α"
" + Xβ "
" \n y ∼N"
"ormal(μ, ϵ) "
" \n "
" "
" \n "
" "
" \n"
)
output = rich_notebook_output(latex_output_cell)
assert expected_output == output
def test_render_invalid_latex_output(rich_notebook_output: RichOutput) -> None:
"""It renders invalid LaTeX output."""
latex_output_cell = {
"cell_type": "code",
"execution_count": 15,
"id": "sapphire-harmony",
"metadata": {},
"outputs": [
{
"data": {
"text/latex": r"garbledmess \sef{}",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭─────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[15]:\x1b[0m │ "
" "
" │\n ╰────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n garbledmess "
" "
" \n"
)
output = rich_notebook_output(latex_output_cell)
assert expected_output == output
def test_render_latex_output_no_unicode(rich_notebook_output: RichOutput) -> None:
"""It does not render LaTeX output if unicode is False."""
latex_output_cell = {
"cell_type": "code",
"execution_count": 15,
"id": "sapphire-harmony",
"metadata": {},
"outputs": [
{
"data": {
"text/latex": "$$\n\\alpha \\sim \\text{Normal}"
" \\\\\n\\beta \\sim \\text{Normal} \\\\\n\\epsilon"
" \\sim \\text{Half-Cauchy} \\\\\n\\mu = \\alpha +"
" X\\beta \\\\\ny \\sim \\text{Normal}(\\mu, \\epsilon)\n$$\n",
"text/plain": "<IPython.core.display.Latex object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭─────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[15]:\x1b[0m │ "
" "
" │\n ╰────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n <IPython.core."
"display.Latex object> "
" \n"
)
output = rich_notebook_output(latex_output_cell, unicode=False)
assert expected_output == output
def test_render_text_display_data(rich_notebook_output: RichOutput) -> None:
"""It renders text display data."""
text_display_data_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": "Lorep ipsum",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n Lorep ipsum "
" "
" \n"
)
output = rich_notebook_output(text_display_data_cell)
assert output == expected_output
def test_pdf_emoji_output(rich_notebook_output: RichOutput) -> None:
"""It renders an emoji for PDF output."""
pdf_output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"application/pdf": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n 📄 "
" "
" \n"
)
output = rich_notebook_output(pdf_output_cell, unicode=True)
assert output == expected_output
def test_pdf_nerd_output(rich_notebook_output: RichOutput) -> None:
"""It renders a nerd font icon for PDF output."""
pdf_output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"application/pdf": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \uf1c1 "
" "
" \n"
)
output = rich_notebook_output(pdf_output_cell, nerd_font=True)
assert output == expected_output
def test_pdf_no_unicode_no_nerd(rich_notebook_output: RichOutput) -> None:
"""It does not render a PDF icon if no nerd font or unicode."""
pdf_output_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "declared-stevens",
"metadata": {},
"outputs": [
{
"data": {
"application/pdf": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n"
)
output = rich_notebook_output(pdf_output_cell, nerd_font=False, unicode=False)
assert output == expected_output
def test_vega_output(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a hyperlink to a rendered Vega plot."""
vega_output_cell = {
"cell_type": "code",
"execution_count": 3,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vega.v5+json": {
"$schema": "https://vega.github.io/schema/vega/v5.0.json",
"axes": [
{"orient": "bottom", "scale": "xscale"},
{"orient": "left", "scale": "yscale"},
],
"data": [
{
"name": "table",
"values": [
{"amount": 28, "category": "A"},
{"amount": 55, "category": "B"},
{"amount": 43, "category": "C"},
{"amount": 91, "category": "D"},
{"amount": 81, "category": "E"},
{"amount": 53, "category": "F"},
{"amount": 19, "category": "G"},
{"amount": 87, "category": "H"},
],
}
],
"height": 200,
"marks": [
{
"encode": {
"enter": {
"width": {"band": 1, "scale": "xscale"},
"x": {"field": "category", "scale": "xscale"},
"y": {"field": "amount", "scale": "yscale"},
"y2": {"scale": "yscale", "value": 0},
},
"hover": {"fill": {"value": "red"}},
"update": {"fill": {"value": "steelblue"}},
},
"from": {"data": "table"},
"type": "rect",
},
{
"encode": {
"enter": {
"align": {"value": "center"},
"baseline": {"value": "bottom"},
"fill": {"value": "#333"},
},
"update": {
"fillOpacity": [
{"test": "datum === tooltip", "value": 0},
{"value": 1},
],
"text": {"signal": "tooltip.amount"},
"x": {
"band": 0.5,
"scale": "xscale",
"signal": "tooltip.category",
},
"y": {
"offset": -2,
"scale": "yscale",
"signal": "tooltip.amount",
},
},
},
"type": "text",
},
],
"padding": 5,
"scales": [
{
"domain": {"data": "table", "field": "category"},
"name": "xscale",
"padding": 0.05,
"range": "width",
"round": True,
"type": "band",
},
{
"domain": {"data": "table", "field": "amount"},
"name": "yscale",
"nice": True,
"range": "height",
},
],
"signals": [
{
"name": "tooltip",
"on": [
{"events": "rect:mouseover", "update": "datum"},
{"events": "rect:mouseout", "update": "{}"},
],
"value": {},
}
],
"width": 400,
},
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281369"
f"58.012196-350876;file://{tempfile_path}0.html\x1b\\\x1b[94m\uf080"
" Click to v"
"iew Vega chart\x1b[0m\x1b]8;;\x1b\\ "
" \n"
)
output = rich_notebook_output(
vega_output_cell,
nerd_font=True,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_invalid_vega_output(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a hyperlink to an invalid Vega plot."""
vega_output_cell = {
"cell_type": "code",
"execution_count": 3,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vega.v5+json": {
"invalid": "no",
},
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281369"
f"58.012196-350876;file://{tempfile_path}0.html\x1b\\\x1b[94m\uf080"
" Click to v"
"iew Vega chart\x1b[0m\x1b]8;;\x1b\\ "
" \n"
)
output = rich_notebook_output(
vega_output_cell,
nerd_font=True,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_vegalite_output(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders a hyperlink to a rendered Vega plot."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=304082;f"
f"ile://{tempfile_path}0.h"
"tml\x1b\\\x1b[94m\uf080 Click to view Vega chart\x1b[0m"
"\x1b]8;;\x1b\\ "
" \n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 1)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=True,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
)
assert remove_link_ids(output) == remove_link_ids(adjusted_expected_output)
def test_vegalite_output_no_hints(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders a hyperlink to a Vega plot without hints."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=90200;fi"
f"le://{tempfile_path}0.ht"
"ml\x1b\\\x1b[94m\uf080 \x1b[0m\x1b]8;;\x1b\\ "
" "
" \n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 1)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=True,
files=True,
hyperlinks=True,
hide_hyperlink_hints=True,
)
assert remove_link_ids(output) == remove_link_ids(adjusted_expected_output)
def test_vegalite_output_no_nerd_font(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders a hyperlink to a Vega plot without nerd fonts."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=2129;fil"
f"e://{tempfile_path}0.htm"
"l\x1b\\\x1b[94m📊 Click to view Vega chart\x1b[0m\x1b]"
"8;;\x1b\\ "
" \n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 1)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
)
assert remove_link_ids(output) == remove_link_ids(adjusted_expected_output)
def test_vegalite_output_no_nerd_font_no_unicode(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a hyperlink to plot without nerd fonts or unicode."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281372"
f"55.127551-234092;file://{tempfile_path}0.html\x1b\\\x1b[94mClick to vie"
"w Vega chart\x1b[0m\x1b]8;;\x1b\\ "
" \n"
" "
" \n \x1b[38;2;187;134;252mImage "
" "
"\x1b[0m\n"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=False,
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_vegalite_output_no_files(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders a message representing a Vega plot."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n 📊 Vega chart "
" "
" \n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 1)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=False,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=True,
)
tempfile_directory = tempfile_path.parent
for file in tempfile_directory.glob(
f"{tempfile_path.stem}*.html"
): # pragma: no cover
assert not file.exists()
assert remove_link_ids(output) == remove_link_ids(adjusted_expected_output)
def test_write_vega_output(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
parse_link_filepath: Callable[[str], Path],
) -> None:
"""It writes the Vega plot to a file."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_contents = (
'<html>\n<head>\n <script src="https://c'
'dn.jsdelivr.net/npm/vega@5"></script>\n '
' <script src="https://cdn.jsdelivr.net/'
'npm/vega-lite@5"></script>\n <script s'
'rc="https://cdn.jsdelivr.net/npm/vega-em'
'bed@6"></script>\n <script src="https:'
"//cdn.jsdelivr.net/gh/koaning/justcharts"
'/justcharts.js"></script>\n <title>Veg'
"a chart</title>\n</head>\n<body>\n <vega"
'chart style="width: 100%">\n {"$sc'
'hema": "https://vega.github.io/schema/ve'
'ga-lite/v4.json", "data": {"values": [{"'
'a": "A", "b": 28}, {"a": "B", "b": 55}, '
'{"a": "C", "b": 43}, {"a": "D", "b": 91}'
', {"a": "E", "b": 81}, {"a": "F", "b": 5'
'3}, {"a": "G", "b": 19}, {"a": "H", "b":'
' 87}, {"a": "I", "b": 52}]}, "descriptio'
'n": "A simple bar chart with embedded da'
'ta.", "encoding": {"x": {"field": "a", "'
'type": "ordinal"}, "y": {"field": "b", "'
'type": "quantitative"}}, "mark": "bar"}\n'
" </vegachart>\n</body>\n<html></html>\n<"
"/html>"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=False,
)
tempfile_path = parse_link_filepath(output)
file_contents = tempfile_path.read_text()
assert file_contents == expected_contents
def test_vega_no_icon_no_message(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders subject text when no icons or messages are used."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281373"
f"35.10625-550844;file://{tempfile_path}0.html\x1b\\\x1b[94mVega"
" chart\x1b[0"
"m\x1b]8;;\x1b\\ "
" \n"
" "
" \n \x1b[38;2;187;134;252mImage "
" "
"\x1b[0m\n"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=True,
unicode=False,
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_vega_no_hyperlink(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
tempfile_path: Path,
adjust_for_fallback: Callable[[str, int], str],
) -> None:
"""It renders the file path when no hyperlinks are allowed."""
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 4,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vegalite.v4+json": {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"description": "A simple bar chart with embedded data.",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"},
},
"mark": "bar",
},
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
tempfile_text = f"📊 file://{tempfile_path}0.html"
line_width = 80 - 6
if line_width - 1 < len(tempfile_text) < line_width + 2:
first_line, second_line = tempfile_text.split(maxsplit=1)
wrapped_file_path = "\n".join(
(f"{'':>6}{first_line:<73}", f"{'':>6}{second_line:<74}")
)
else:
wrapped_file_path = "\n".join(
[f"{'':>6}{tempfile_text[:line_width - 1]:<73}"]
+ [
f"{'':>6}{tempfile_text[i: i + line_width]:<74}"
for i in range(line_width - 1, len(tempfile_text), line_width)
]
)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[4]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
f" \n{wrapped_file_path}\n"
f"{'':<80}\n"
)
adjusted_expected_output = adjust_for_fallback(expected_output, 0)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=False,
hide_hyperlink_hints=True,
unicode=True,
)
assert output.rstrip() == adjusted_expected_output.rstrip()
def test_vega_url(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
mocker: MockerFixture,
parse_link_filepath: Callable[[str], Path],
) -> None:
"""It pulls the JSON data from the URL and writes to file."""
mock = mocker.patch("httpx.get")
mock.return_value.text = json.dumps(
{
"$schema": "https://vega.github.io/schema/vega-lite/v5.json",
"description": "A simple bar chart with embedded data.",
"data": {
"values": [
{"a": "A", "b": 28},
{"a": "B", "b": 55},
{"a": "C", "b": 43},
{"a": "D", "b": 91},
{"a": "E", "b": 81},
{"a": "F", "b": 53},
{"a": "G", "b": 19},
{"a": "H", "b": 87},
{"a": "I", "b": 52},
]
},
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "nominal", "axis": {"labelAngle": 0}},
"y": {"field": "b", "type": "quantitative"},
},
}
)
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 3,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vega.v5+json": "https://raw.githubusercontent.com/"
"vega/vega/master/docs/examples/bar-chart.vg.json",
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_contents = (
'<html>\n<head>\n <script src="https://c'
'dn.jsdelivr.net/npm/vega@5"></script>\n '
' <script src="https://cdn.jsdelivr.net/'
'npm/vega-lite@5"></script>\n <script s'
'rc="https://cdn.jsdelivr.net/npm/vega-em'
'bed@6"></script>\n <script src="https:'
"//cdn.jsdelivr.net/gh/koaning/justcharts"
'/justcharts.js"></script>\n <title>Veg'
"a chart</title>\n</head>\n<body>\n <vega"
'chart style="width: 100%">\n {"$sc'
'hema": "https://vega.github.io/schema/ve'
'ga-lite/v5.json", "description": "A simp'
'le bar chart with embedded data.", "data'
'": {"values": [{"a": "A", "b": 28}, {"a"'
': "B", "b": 55}, {"a": "C", "b": 43}, {"'
'a": "D", "b": 91}, {"a": "E", "b": 81}, '
'{"a": "F", "b": 53}, {"a": "G", "b": 19}'
', {"a": "H", "b": 87}, {"a": "I", "b": 5'
'2}]}, "mark": "bar", "encoding": {"x": {'
'"field": "a", "type": "nominal", "axis":'
' {"labelAngle": 0}}, "y": {"field": "b",'
' "type": "quantitative"}}}\n </vegacha'
"rt>\n</body>\n<html></html>\n</html>"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=False,
)
tempfile_path = parse_link_filepath(output)
file_contents = tempfile_path.read_text()
mock.assert_called_with(
url="https://raw.githubusercontent.com"
"/vega/vega/master/docs/examples/bar-chart.vg.json"
)
assert file_contents == expected_contents
def test_vega_url_request_error(
rich_notebook_output: RichOutput,
mocker: MockerFixture,
) -> None:
"""It falls back to rendering a message if there is a RequestError."""
mocker.patch("httpx.get", side_effect=httpx.RequestError("Mock"))
vegalite_output_cell = {
"cell_type": "code",
"execution_count": 3,
"metadata": {"tags": []},
"outputs": [
{
"data": {
"application/vnd.vega.v5+json": "https://raw.githubusercontent.com/"
"vega/vega/master/docs/examples/bar-chart.vg.json",
"image/png": "",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n Vega chart "
" "
" \n"
" "
" \n \x1b[38;2;187;134;252mImage "
" "
"\x1b[0m\n"
)
output = rich_notebook_output(
vegalite_output_cell,
nerd_font=False,
files=True,
hyperlinks=True,
hide_hyperlink_hints=False,
unicode=False,
)
assert output == expected_output
def test_render_html(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders HTML output."""
html_cell = {
"cell_type": "code",
"execution_count": 7,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {
"text/html": " <head>\n"
" <title>Example</title>\n </head>\n "
"<body>\n <p><strong>Lorep</strong> "
"<em>Ipsum</em> </p>\n </body>\n",
"text/plain": "<IPython.core.display.HTML object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[7]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=16281375"
f"06.111208-917276;file://{tempfile_path}0.html\x1b\\\x1b[94m🌐 Click to v"
"iew HTML\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n "
"\x1b[1mLorep\x1b[0m \x1b[3mIpsum\x1b[0m "
" "
" \n"
)
output = rich_notebook_output(html_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_html_table(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders an HTML table."""
html_cell = {
"cell_type": "code",
"execution_count": 7,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {
"text/html": """\
<table>
<tr>
<th>Company</th>
<th>Contact</th>
<th>Country</th>
</tr>
<tr>
<td>Alfreds Futterkiste</td>
<td>Maria Anders</td>
<td>Germany</td>
</tr>
<tr>
<td>Centro comercial Moctezuma</td>
<td>Francisco Chang</td>
<td>Mexico</td>
</tr>
</table>
""",
"text/plain": "<IPython.core.display.HTML object>",
},
"metadata": {},
"output_type": "display_data",
}
],
"source": "",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[7]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n \x1b]8;id=58222;fi"
f"le://{tempfile_path}0.ht"
"ml\x1b\\\x1b[94m🌐 Click to view HTML\x1b[0m\x1b]8;;\x1b\\"
" "
" \n "
" "
" \n "
" "
" \n \x1b[1mCompany\x1b[0m "
" \x1b[1mContact\x1b[0m "
" \x1b[1mCountry\x1b[0m "
"\n ─────────────────────────────────"
"────────────────────────────────────────"
"─\n Alfreds Futterkiste Maria "
"Anders Germany "
" \n Centro comercial Franc"
"isco Chang Mexico "
" \n Moctezuma "
" "
" \n "
" "
" \n"
)
output = rich_notebook_output(html_cell)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_unknown_data_type(rich_notebook_output: RichOutput) -> None:
"""It skips rendering an unknown output type."""
unknown_data_type = {
"cell_type": "code",
"execution_count": 11,
"id": "intense-middle",
"metadata": {},
"outputs": [
{
"data": {"unkown_data_type": "3"},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
output = rich_notebook_output(unknown_data_type)
expected_output = (
" ╭─────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[11]:\x1b[0m │ "
" "
" │\n ╰────────────────"
"────────────────────────────────────────"
"────────────────╯\n"
)
assert output == expected_output
@pytest.mark.skipif(
"terminedia" not in sys.modules,
reason=SKIP_TERMINEDIA_REASON,
)
def test_render_block_image(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
disable_capture: ContextManager[_PluggyPlugin],
expected_output: str,
) -> None:
"""It renders a block drawing of an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
with disable_capture:
output = rich_notebook_output(image_cell, images=True, image_drawing="block")
assert remove_link_ids(output) == expected_output
@pytest.mark.skipif(
"terminedia" not in sys.modules,
reason=SKIP_TERMINEDIA_REASON,
)
def test_render_invalid_block_image(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
disable_capture: ContextManager[_PluggyPlugin],
tempfile_path: Path,
) -> None:
"""It renders a fallback when image is invalid."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "bad_image_data\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
with disable_capture:
output = rich_notebook_output(image_cell, images=True, image_drawing="block")
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"<AxesSubplot:> "
" \n "
" "
" \n "
f" \x1b]8;id=45753;file://{tempfile_path}0.png"
"\x1b\\\x1b[94m🖼 Click to view"
" Image\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n \x1b["
"38;2;187;134;252m<Figure size 432x288 wi"
"th 1 Axes> "
" \x1b[0m\n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
@pytest.mark.skipif(
"terminedia" not in sys.modules,
reason=SKIP_TERMINEDIA_REASON,
)
def test_render_height_constrained_block_image(
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
make_notebook: Callable[[Optional[Dict[str, Any]]], NotebookNode],
disable_capture: ContextManager[_PluggyPlugin],
expected_output: str,
) -> None:
"""It renders a height constrained block drawing of an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
notebook_node = make_notebook(image_cell)
rendered_notebook = notebook.Notebook(
notebook_node,
images=True,
image_drawing="block",
)
with disable_capture:
con = console.Console(
file=io.StringIO(),
width=80,
height=20,
color_system="truecolor",
legacy_windows=False,
force_terminal=True,
)
con.print(rendered_notebook)
output = con.file.getvalue() # type: ignore[attr-defined]
assert remove_link_ids(output) == expected_output
def test_render_image_link(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
disable_capture: ContextManager[_PluggyPlugin],
) -> None:
"""It renders a link to an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
with disable_capture:
output = rich_notebook_output(image_cell, images=False)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"<AxesSubplot:> "
" \n "
" "
" \n "
f" \x1b]8;id=42532;file://{tempfile_path}0.png"
"\x1b\\\x1b[94m🖼 Click to view"
" Image\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n <F"
"igure size 432x288 with 1 Axes> "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_charater_drawing(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
expected_output: str,
) -> None:
"""It renders a character drawing of an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
output = rich_notebook_output(
image_cell, images=True, image_drawing="character", files=False
)
assert output == expected_output
def test_braille_drawing(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
expected_output: str,
) -> None:
"""It renders a block drawing of an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
output = rich_notebook_output(
image_cell, images=True, image_drawing="braille", files=False
)
assert output == expected_output
def test_invalid_image_drawing(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
) -> None:
"""It fallsback to text when failing to draw image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "ib45",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
output = rich_notebook_output(
image_cell, images=True, image_drawing="character", files=False
)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"<AxesSubplot:> "
" \n "
" "
" \n "
" 🖼 Image "
" \n "
" "
" \n "
" \x1b[38;2;187;134;252m<Figure size 432x"
"288 with 1 Axes> "
" \x1b[0m\n"
)
assert output == expected_output
def test_render_image_link_no_image(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
disable_capture: ContextManager[_PluggyPlugin],
) -> None:
"""It renders a link to an image."""
image_cell = {
"cell_type": "code",
"execution_count": 1,
"id": "43e39858-6416-4dc8-9d7e-7905127e7452",
"metadata": {},
"outputs": [
{
"data": {"text/plain": "<AxesSubplot:>"},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result",
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEDCAYAAAAyZm"
"/jAAAAOXRFWHRTb2Z0"
"d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90"
"bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAATJElEQVR4nO3d"
"f5DcdX3H8edl90IoiTgoN+Q8PEVSRmtFS4EO2hktMsUqNs7YiFW0paYg/gBl"
"eGuniFOtxX7iD8RftS1Oa7Hi+LtmtAgIZRwoTtMiaEXG0SQcARck/AgkcLe3"
"/WMPejn3bveyn9vbvc/zMZO52e998sn7dZm88t3vfm9vqNFoIEkqx6rlHkCS"
"1FsWvyQVxuKXpMJY/JJUGItfkgozKMXfGLRf9Xq9sWPHjka9Xl/2WcxrZjMX"
"m7mlQSn+gTM9Pb3fx5WutLxg5lKsxMwWvyQVxuKXpMJY/JJUGItfkgpj8UtS"
"YaqdLIqINwLvAQ4HbgL+LKV0x5w1pwAfA9YD3wPOTCndk3dcSVK32p7xR8Rx"
"wBbgD4GnAjuAS+asWQN8AXgrcARwL/D+zLNKkjLo5Iz/mcBHU0o/AoiIzwGf"
"nrPmRGBnSum7M2suBb6ea8h6vT5w99BOTU3t93GlKy0vmLkUg5x5eHi45fG2"
"xZ9S+vKcQy8Erp9zbD3wi1mP76J55p/FxMRErq16rlarLfcIPVVaXjBzKQYx"
"8/j4eMvjHV3jf1xE/BbwFuD4OZ9aA9RnPZ4CVi9m74WMjY0N5Bl/rVZjZGSE"
"anVRX+aBVFpeMLOZB1fHKSLi6cBXgT9JKd0959OPztmrQrP8s6hUKlQqlVzb"
"9VS1Wp336dZKVFpeMHMpVlLmjm7njIhDga3AhSmla1osuQs4ctbj9cDc/xwk"
"SX2g7Rl/RAwDXwH+JaV0+TzLbgKOiIjTgCtp3t1zZbYpJUnZdHKp5xXAycDv"
"RMR7Zx3/K+APUkovSSntjYjTgUuBz9F88fdPs08rSQW5/fbb+drXvsbDDz/M"
"s571LF7zmtewZs2arvcdajTmfcvmfjIQQ842OTnJrl27GB0dXTHXBRdSWl4w"
"80rM/LPvT7H1/fvY99D/V06jMc1jjz7G6oNWMzTU/ZsdrFk3xCves4ajTlj4"
"vPvBBx/kQx/6EGeddRYjIyNcdtllbNiwgZNPPnkxf9xQq4Mr4yVqScrg6kv2"
"ccvWyRafqQLTM7+6d/CThjjq8wvX78EHH8y5557LU57yFACOOeYY7rknz5sh"
"WPySNOOl561h30Ms+Rn/S887qO264eFhbrvtNm688UYefPBBpqamOPbYY7v+"
"88Hil6QnHHVClbd/c+1+x5qXt+5ndPTJPb28tWPHDq677jo2b97M4YcfzvXX"
"X8/dd+e5WdJ355SkPvTII48wPDzMunXreOihh7j99tuzfSOrZ/yS1IeOOeYY"
"br31Vi6++GIOPfRQjj76aB544IEse1v8ktSHVq1axaZNm9i0aVP+vbPvKEnq"
"axa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqQ/dd999XHjh"
"hUuyt8UvSYWx+CWpML5XjyT1qUajwTe+8Q1uvvlm1q1bx6ZNmxgbG+t6X4tf"
"kmZ8/977eP8tP+ahqaknjjWmGzz62KMc9OOfM7Sq5U8yXJR11Srved6zOeGp"
"h7VdOzk5ydjYGKeddho33HADV1xxBeeffz5DQ93NYfFL0oxLfvxTtt453w87"
"2ZPtz3nS8DCf/90T2q5bvXo1xx13HAAnnXQSV111Fffdd98TP47xQFn8kjTj"
"vGcfzUOTk63P+FcflO2M/7xnH73o37dq1SoOOeQQ9uzZY/FLUi4nPPUwvvl7"
"L9zvWPNHL+5idHS0pz96ca7p6Wn27NnD2rVr2y9uw7t6JKlPPfbYY2zbto3p"
"6WluuOEGDj30UA47rP1rA+14xi9JfWrt2rXccccdbN26lXXr1vHa17626xd2"
"weKXpL502GGHcdFFFwGwcePGrHt7qUeSCmPxS1JhLH5JKozFL0mFsfglqTAd"
"3dUTEWuB04HNwDkppW0t1rwReB+wDtgKnJVS2ptxVklSBm3P+GdKfztwCrAB"
"+JWbSCPimcClwCuAI4GnAe/MOagkKY9Ozvj3AhtSSrsjYvs8a54N3JZSuhUg"
"Ir4CvDjLhEC9Xmd6ejrXdj0xNfNeH1Oz3vNjJSstL5i5FIOceb63mGhb/Cml"
"OrC7zbL/Bo6MiGOBnwKvBL6+uBHnNzExkWurnqvVass9Qk+VlhfMXIpBzDw+"
"Pt7yeJbv3E0p3R0RW4CbgWlgG/DZHHsDjI2NDeQZf61WY2RkhGp15X+DdGl5"
"wcxmHlxZUkTEccA7aF7y2Q58BPgE8Oc59q9UKlQqlRxb9Vy1Wl3Wd/TrtdLy"
"gplLsZIy57qd82TgmpTSbSmlfcAnaV7ukST1mVzPW34AnBMR48CdwOuBWzLt"
"LUnK6IDP+CNiY0RcDpBSuhL4FPA94F7gBTTv+Zck9ZmhRqOx3DN0YiCGnK1f"
"fmpPr5SWF8xs5oHQ8s37fcsGSSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiL"
"X5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1Jh"
"LH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9J"
"hbH4JakwFr8kFcbil6TCVDtZFBFrgdOBzcA5KaVt86x7J3A2cGdK6SXZppQk"
"ZdO2+GdKfztwDbABGJpn3V8CrwI2AbfkG1GSlFMnZ/x7gQ0ppd0Rsb3Vgog4"
"GDgfeEFKaUfG+QCo1+tMT0/n3nZJTU1N7fdxpSstL5i5FIOceXh4uOXxtsWf"
"UqoDu9ss+23gIeCSiDgJ2AacmVK6e5FztjQxMZFjm2VRq9WWe4SeKi0vmLkU"
"g5h5fHy85fGOrvF3YAwYAT5D87WAjwOX0rzs0/3mY2MDecZfq9UYGRmhWs31"
"Ze5fpeUFM5t5cOVKsQr4j5TSvwNExBbgpkx7U6lUqFQqubbrqWq1Ou/TrZWo"
"tLxg5lKspMy5bufcCTx9zrF6pr0lSRnlOuP/T2BNRJwBXEHzhd7vZNpbkpTR"
"AZ/xR8TGiLgcIKU0CWwE3gbcA4wC78gxoCQpr6FGo7HcM3RiIIacbXJykl27"
"djE6OrpirgsupLS8YGYzD4SW33flWzZIUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klQYi1+SCmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJU"
"GItfkgpj8UtSYSx+SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlQ7WRQRa4HTgc3AOSmlbQusfQPw"
"z8DhKaV7s0wpScqmbfHPlP524BpgAzC0wNonA+/NNJskaQl0csa/F9iQUtod"
"EdvbrP0A8A/Axd0ONlu9Xmd6ejrnlktuampqv48rXWl5wcylGOTMw8PDLY+3"
"Lf6UUh3Y3W5dRBwHvBh4AZmLf2JiIud2PVWr1ZZ7hJ4qLS+YuRSDmHl8fLzl"
"8Y6u8bcTEauATwHnppQei4gc2z5hbGxsIM/4a7UaIyMjVKtZvsx9rbS8YGYz"
"D65cKTYDO1NKV2fabz+VSoVKpbIUWy+5arU679Otlai0vGDmUqykzLlu5zwX"
"eGVE7IuIfTPHJiLiJZn2lyRlkuWMP6X0nNmPI6IBjHk7pyT1nwM+44+IjRFx"
"ec5hJElLb6jRaCz3DJ0YiCFnm5ycZNeuXYyOjq6Y64ILKS0vmNnMA6Hl9135"
"lg2SVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+S"
"CmPxS1JhLH5JKozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+"
"SSqMxS9JhbH4JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWp"
"drIoItYCpwObgXNSSttarHkX8BZgHXA18KaU0gMZZ5UkZdD2jH+m9LcDpwAb"
"gKEWa14FnAm8CHgacAhwUc5BJUl5dHLGvxfYkFLaHRHb51nzNOCDKaWdABHx"
"BeA1eUaEer3O9PR0ru16Ympqar+PK11pecHMpRjkzMPDwy2Pty3+lFId2N1m"
"zSfmHHohcH2nw7UzMTGRa6ueq9Vqyz1CT5WWF8xcikHMPD4+3vJ4R9f4FyMi"
"Xg68BHhHrj3HxsYG8oy/VqsxMjJCtZr9y9x3SssLZjbz4MqaIiKeD3wGODWl"
"tDfXvpVKhUqlkmu7nqpWq/M+3VqJSssLZi7FSsqc7XbOiDgS+BpwRkrph7n2"
"lSTllaX4I+JQYCvw7pTStTn2lCQtjQO+1BMRG4FXp5ReT/NWzt8ELouIy2Yt"
"e15K6WfdjShJymmo0Wgs9wydGIghZ5ucnGTXrl2Mjo6umOuCCyktL5jZzAPh"
"V77vCnzLBkkqjsUvSYWx+CWpMBa/JBXG4pekwlj8klQYi1+SCmPxS1JhLH5J"
"KozFL0mFsfglqTAWvyQVxuKXpMJY/JJUGItfkgpj8UtSYSx+SSqMxS9JhbH4"
"JakwFr8kFcbil6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG"
"4pekwlj8klSYaieLImItcDqwGTgnpbStxZozgPcCTwa+BZydUnok36iSpBza"
"nvHPlP524BRgAzDUYs0ocCnwKmAcOAJ4e85BJUl5dHLGvxfYkFLaHRHb51lz"
"CnBtSulWgIj4NHAe8MEcQ9brdaanp3Ns1TNTU1P7fVzpSssLZi7FIGceHh5u"
"ebxt8aeU6sDuNsvWA7+Y9fgummf9WUxMTOTaqudqtdpyj9BTpeUFM5diEDOP"
"j4+3PN7RNf4OrAHqsx5PAQdl2puxsbGBPOOv1WqMjIxQreb6Mvev0vKCmc08"
"uHKleHTOXhVgMtPeVCoVKpVKru16qlqtzvt0ayUqLS+YuRQrKXOu2znvAo6c"
"9Xg9cHemvSVJGeU6478K+HhEHA/8CDgbuDLT3pKkjA74jD8iNkbE5QAppTuB"
"NwNfBHbSfAbw4SwTSpKyGmo0Gss9QycGYsjZJicn2bVrF6OjoyvmuuBCSssL"
"ZjbzQPiV77sC37JBkopj8UtSYfr+ptSIWH3BBRcs9xiLNjU1xf3338/q1atX"
"zL2/CyktL5jZzP1vy5Ytvw5sTyk9Nvv4IKR4xpYtW5Z7BkkaRD8BjgFun31w"
"EIp/O83BJUmLt33ugUG5q0eSlIkv7kpSYSx+SSqMxS9JhbH4JakwFr8kFcbi"
"l6TCWPySVBiLX5IKY/FLUmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwgzC+/H3"
"rYg4A3gv8GTgW8DZKaVHDmRdRHwWOCGl9NwlHrsr3WaOiHXApcDLgWngH1NK"
"F/Zm+s5FxCnAx4D1wPeAM1NK93S6JiICeCuwGvhX4IKUUr13CRavm8wRUQX+"
"FvhjYBj4KvCWlNJkDyMsWrd/z7PWXARESmltTwbvkmf8BygiRmkW2KuAceAI"
"4O0Hsi4iTgJevcQjdy1T5r8G1gHPBE4A3hARpy358IsQEWuAL9As7iOAe4H3"
"d7omIk4E3ga8CHgucDLwRz0a/4B0m5lm3hNo5j0GOA44uxezH6gMmR9fcxTN"
"/APD4j9wpwDXppRuTSk9DHwaeNli10VEBfgkcHEPZu5WjsyHAB9IKT2cUtoJ"
"XEX//YS1E4GdKaXvppQepfmf2NycC605FfhSSmlnSule4J9a/P5+023mpwB/"
"k1L6ZUrpl8DX6b+/17m6zfy4S4EPL/m0GXmp58CtB34x6/FdNM8IFrvubcD/"
"ADcCr8s8Y25dZ04pvenxgzP/6Z0IfCb7pN3pJOdCa9YDP5/zuVMzz5hbV5lb"
"XK57IfDZzDPm1u3fMxGxETgYuALou0uW87H425gppx1zDt8BXAnMvmY7BRzU"
"Yos1862LiPXAecDxwG/kmbh7S5l5jg8AP0gpff/Ap10SreZfvYg1nebvJ91m"
"fkJEnE3zmd2XMs+YW1eZI+LXgC3AK5dwxiVh8bcx84Lc2NzjEfEX7P/1qwCt"
"Xsh6dIF1HwG2zLw4lmfgDJY48+N7vYnmWfCLup13CbSaf2oRa9rm70PdZgYg"
"Ik4F3gWclFLq9x/o3W3m9wBfTyn9OCKesVRDLgWv8R+4u4AjZz1eD9zd6bqZ"
"s/1XAx+OiH3Ad4DnRMS+mTPuftRV5scfRMTvA+8GXpZS2rMEc3ark5wLren0"
"69RPus1MRDwf+HvgFSmlu5ZmzKy6zfx24G0z/35/Ahwy8+/3qCWaNxvP+A/c"
"VcDHI+J44Ec072C4stN1M/8whh9fFBEvBj7R57dzdpUZICKOBf6OZun3aznc"
"BBwxc7fRlTTv6Jibc6E13wb+LSI+CtwHvJH+f/Gvq8wRMQZ8GXhdSulHPZu6"
"O11lTikd8viimTP+H3o75wqXUroTeDPwRWAnzTODD0PzBZ+IuLzdukGTKfP7"
"aF5G+q+I2DPz6yc9DdJGSmkvcDqQaL6wdzjNW/OPj4hrF1oz87mbgA8B1wH/"
"C1xN817+vtVtZuAC4BnAt2f9ve7p42evOTIPrKFGo98vw0mScvKMX5IKY/FL"
"UmEsfkkqjMUvSYWx+CWpMBa/JBXG4pekwlj8klSY/wPTNSCZbt4GgAAAAABJ"
"RU5ErkJggg==\n",
"text/plain": "<Figure size 432x288 with 1 Axes>",
},
"metadata": {"needs_background": "light"},
"output_type": "display_data",
},
],
"source": "",
}
with disable_capture:
output = rich_notebook_output(image_cell, images=False)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[1]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[1]:\x1b[0m "
"<AxesSubplot:> "
" \n "
" "
" \n "
f" \x1b]8;id=236660;file://{tempfile_path}0.png"
"\x1b\\\x1b[94m🖼 Click to vie"
"w Image\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n <"
"Figure size 432x288 with 1 Axes> "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_render_svg_link(
rich_notebook_output: RichOutput,
mock_tempfile_file: Generator[Mock, None, None],
remove_link_ids: Callable[[str], str],
tempfile_path: Path,
) -> None:
"""It renders a link to an image."""
svg_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "1a2e22b6-ae2b-4c0c-a8db-ec0c0ea1227b",
"metadata": {},
"outputs": [
{
"data": {
"image/svg+xml": (
'<?xml version="1.0" encoding="UTF-8" sta'
'ndalone="no"?>\n<!DOCTYPE svg PUBLIC "-//'
'W3C//DTD SVG 1.1//EN"\n "http://www.w3.or'
'g/Graphics/SVG/1.1/DTD/svg11.dtd">\n<!-- '
"Generated by graphviz version 2.47.2 (20"
"210527.0053)\n -->\n<!-- Pages: 1 -->\n<svg"
' width="514pt" height="44pt"\n viewBox="0'
'.00 0.00 513.94 44.00" xmlns="http://www'
'.w3.org/2000/svg" xmlns:xlink="http://ww'
'w.w3.org/1999/xlink">\n<g id="graph0" cla'
'ss="graph" transform="scale(1 1) rotate('
'0) translate(4 40)">\n<polygon fill="whit'
'e" stroke="transparent" points="-4,4 -4,'
'-40 509.94,-40 509.94,4 -4,4"/>\n<!-- A -'
'->\n<g id="node1" class="node">\n<title>A<'
'/title>\n<ellipse fill="none" stroke="bla'
'ck" cx="53.95" cy="-18" rx="53.89" ry="1'
'8"/>\n<text text-anchor="middle" x="53.95'
'" y="-14.3" font-family="Times,serif" fo'
'nt-size="14.00">King Arthur</text>\n</g>\n'
'<!-- B -->\n<g id="node2" class="node">\n<'
'title>B</title>\n<ellipse fill="none" str'
'oke="black" cx="215.95" cy="-18" rx="90.'
'18" ry="18"/>\n<text text-anchor="middle"'
' x="215.95" y="-14.3" font-family="Times'
',serif" font-size="14.00">Sir Bedevere t'
'he Wise</text>\n</g>\n<!-- L -->\n<g id="no'
'de3" class="node">\n<title>L</title>\n<ell'
'ipse fill="none" stroke="black" cx="414.'
'95" cy="-18" rx="90.98" ry="18"/>\n<text '
'text-anchor="middle" x="414.95" y="-14.3'
'" font-family="Times,serif" font-size="1'
'4.00">Sir Lancelot the Brave</text>\n</g>'
"\n</g>\n</svg>\n"
),
"text/plain": "<graphviz.dot.Digraph at 0x108eb9430>",
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result",
}
],
"source": "",
}
output = rich_notebook_output(svg_cell)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ "
" "
" │\n ╰─────────────────"
"────────────────────────────────────────"
"────────────────╯\n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m "
f"\x1b]8;id=1627259094.976956-618609;file://{tempfile_path}0.svg"
"\x1b\\\x1b[9"
"4m🖼 Click to view Image\x1b[0m\x1b]8;;\x1b\\ "
" "
" \n "
" "
" \n\x1b[38;5;247m[2]:\x1b[0m <graphviz."
"dot.Digraph at 0x108eb9430> "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_unknown_language() -> None:
"""It sets the language to Python when it cannot be parsed."""
notebook_node = nbformat.from_dict(
{
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5,
}
)
rendered_notebook = notebook.Notebook(notebook_node)
expected_output = "python"
acutal_output = rendered_notebook.language
assert acutal_output == expected_output
def test_skip_unknown_cell_type(rich_notebook_output: RichOutput) -> None:
"""It skips rendering a cell if the type is not known."""
markdown_cell = {
"cell_type": "unknown",
"id": "academic-bride",
"metadata": {},
"source": "### Lorep ipsum\n\n**dolor** _sit_ `amet`",
}
output = rich_notebook_output(markdown_cell)
expected_output = ""
assert output == expected_output
def test_skip_no_cell_type(rich_notebook_output: RichOutput) -> None:
"""It skips rendering a cell if there is not cell type."""
markdown_cell = {
"metadata": {"no"},
"source": "### Lorep ipsum\n\n**dolor** _sit_ `amet`",
}
output = rich_notebook_output(markdown_cell)
expected_output = ""
assert output == expected_output
def test_image_link_not_image(
rich_notebook_output: RichOutput,
mocker: MockerFixture,
remove_link_ids: Callable[[str], str],
) -> None:
"""It falls back to skipping drawing if content is not an image."""
mock = mocker.patch("httpx.get")
mock.return_value.content = "Bad image"
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
output = rich_notebook_output(markdown_cell, image_drawing="character")
expected_output = (
" \x1b]8;id=246597;https://github.com/paw-l"
"u/nbpreview/tests/assets/outline_article"
"_white_48dp.png\x1b\\\x1b[94m🌐 Click to view Az"
"ores\x1b[0m\x1b]8;;\x1b\\ "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_relative_dir_markdown_link(
rich_notebook_output: RichOutput,
remove_link_ids: Callable[[str], str],
) -> None:
"""It adds a path prefix to the image hyperlink."""
markdown_cell = {
"cell_type": "markdown",
"id": "academic-bride",
"metadata": {},
"source": "",
}
relative_dir = pathlib.Path("/", "Users", "test")
output = rich_notebook_output(
markdown_cell, relative_dir=relative_dir, hyperlinks=True
)
expected_output = (
" \x1b]8;id=835649;"
f"file://{relative_dir.resolve() / 'image.png'}\x1b\\\x1b"
"[94m🖼 Click to view Test image\x1b[0m\x1b]8;;\x1b"
"\\ "
" \n "
" "
" \n"
)
assert remove_link_ids(output) == remove_link_ids(expected_output)
def test_notebook_code_line_numbers(rich_notebook_output: RichOutput) -> None:
"""It renders a code cell with line numbers."""
code_cell = {
"cell_type": "code",
"execution_count": 2,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "def foo(x: float, y: float) -> float:\n return x + y",
}
output = rich_notebook_output(code_cell, line_numbers=True)
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[2]:\x1b[0m │ \x1b[2m1 \x1b[0m\x1b[38;"
"2;187;128;179;49mdef\x1b[0m\x1b[38;2;238;255;2"
"55;49m \x1b[0m\x1b[38;2;130;170;255;49mfoo\x1b[0m"
"\x1b[38;2;137;221;255;49m(\x1b[0m\x1b[38;2;238;25"
"5;255;49mx\x1b[0m\x1b[38;2;137;221;255;49m:\x1b[0"
"m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;130;1"
"70;255;49mfloat\x1b[0m\x1b[38;2;137;221;255;49"
"m,\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"238;255;255;49my\x1b[0m\x1b[38;2;137;221;255;4"
"9m:\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2"
";130;170;255;49mfloat\x1b[0m\x1b[38;2;137;221;"
"255;49m)\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;137;221;255;49m-\x1b[0m\x1b[38;2;137;221"
";255;49m>\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m"
"\x1b[38;2;130;170;255;49mfloat\x1b[0m\x1b[38;2;13"
"7;221;255;49m:\x1b[0m "
" │\n │ \x1b[2m2 \x1b[0m\x1b[38;2;238"
";255;255;49m \x1b[0m\x1b[38;2;187;128;179;4"
"9mreturn\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;238;255;255;49mx\x1b[0m\x1b[38;2;238;255"
";255;49m \x1b[0m\x1b[38;2;137;221;255;49m+\x1b[0m"
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;238;25"
"5;255;49my\x1b[0m "
" │\n ╰──────"
"────────────────────────────────────────"
"───────────────────────────╯\n"
)
assert output == expected_output
def test_notebook_line_numbers_magic_code_cell(
rich_notebook_output: RichOutput,
) -> None:
"""It renders line numbers in a code cell with language magic."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "%%bash\necho 'lorep'",
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ \x1b[2m1 \x1b[0m\x1b[38;"
"2;137;221;255;49m%%\x1b[0m\x1b[38;2;187;128;17"
"9;49mbash\x1b[0m "
" │\n "
" │ \x1b[2m2 \x1b[0m\x1b[38;2;130;170;255;49mec"
"ho\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;"
"195;232;141;49m'lorep'\x1b[0m "
" "
" │\n ╰──────────────────────────────"
"────────────────────────────────────────"
"───╯\n"
)
output = rich_notebook_output(code_cell, line_numbers=True)
assert output == expected_output
def test_code_wrap(rich_notebook_output: RichOutput) -> None:
"""It wraps code when narrow."""
code_cell = {
"cell_type": "code",
"execution_count": 3,
"id": "emotional-amount",
"metadata": {},
"outputs": [],
"source": "non_monkeys ="
' [animal for animal in get_animals("mamals") if animal != "monkey"]',
}
expected_output = (
" ╭──────────────────────────────────"
"───────────────────────────────────────╮"
"\n\x1b[38;5;247m[3]:\x1b[0m │ \x1b[38;2;238;255;25"
"5;49mnon_monkeys\x1b[0m\x1b[38;2;238;255;255;4"
"9m \x1b[0m\x1b[38;2;137;221;255;49m=\x1b[0m\x1b[38;2"
";238;255;255;49m \x1b[0m\x1b[38;2;137;221;255;"
"49m[\x1b[0m\x1b[38;2;238;255;255;49manimal\x1b[0m"
"\x1b[38;2;238;255;255;49m \x1b[0m\x1b[38;2;187;12"
"8;179;49mfor\x1b[0m\x1b[38;2;238;255;255;49m \x1b"
"[0m\x1b[38;2;238;255;255;49manimal\x1b[0m\x1b[38;"
"2;238;255;255;49m \x1b[0m\x1b[3;38;2;137;221;2"
"55;49min\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b"
"[38;2;238;255;255;49mget_animals\x1b[0m\x1b[38"
";2;137;221;255;49m(\x1b[0m\x1b[38;2;195;232;14"
'1;49m"\x1b[0m\x1b[38;2;195;232;141;49mmamals\x1b['
'0m\x1b[38;2;195;232;141;49m"\x1b[0m\x1b[38;2;137;'
"221;255;49m)\x1b[0m\x1b[38;2;238;255;255;49m \x1b"
"[0m\x1b[38;2;187;128;179;49mif\x1b[0m\x1b[38;2;23"
"8;255;255;49m \x1b[0m\x1b[38;2;238;255;255;49m"
"animal\x1b[0m\x1b[38;2;238;255;255;49m \x1b[0m\x1b[3"
"8;2;137;221;255;49m!=\x1b[0m\x1b[38;2;238;255;"
"255;49m \x1b[0m │\n │ \x1b[38;2;195;232;141"
';49m"\x1b[0m\x1b[38;2;195;232;141;49mmonkey\x1b[0'
'm\x1b[38;2;195;232;141;49m"\x1b[0m\x1b[38;2;137;2'
"21;255;49m]\x1b[0m "
" │\n"
" ╰──────────────────────────────────"
"───────────────────────────────────────╯"
"\n"
)
output = rich_notebook_output(code_cell, code_wrap=True)
assert output == expected_output
| [
37811,
14402,
2663,
329,
8543,
526,
15931,
198,
11748,
4818,
330,
28958,
198,
11748,
33245,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
2420,
37150,
198,
6738,
3108,
8019,
... | 1.439689 | 181,583 |
from typing import Optional, Union
import re
import hashlib
from pathlib import Path
from mimetypes import guess_type as orig_guess_type
from collections.abc import Callable, AsyncGenerator
import xmltodict
import aiofiles
from chardet import UniversalDetector
from asgi_webdav.constants import RESPONSE_DATA_BLOCK_SIZE
from asgi_webdav.config import Config
def generate_etag(f_size: [float, int], f_modify_time: float) -> str:
"""
https://tools.ietf.org/html/rfc7232#section-2.3 ETag
https://developer.mozilla.org/zh-CN/docs/Web/HTTP/Headers/ETag
"""
return 'W/"{}"'.format(
hashlib.md5("{}{}".format(f_size, f_modify_time).encode("utf-8")).hexdigest()
)
def guess_type(
config: Config, file: Union[str, Path]
) -> (Optional[str], Optional[str]):
"""
https://tools.ietf.org/html/rfc6838
https://developer.mozilla.org/zh-CN/docs/Web/HTTP/Basics_of_HTTP/MIME_types
https://www.iana.org/assignments/media-types/media-types.xhtml
"""
if isinstance(file, str):
file = Path(file)
elif not isinstance(file, Path):
raise # TODO
content_encoding = None
if config.guess_type_extension.enable:
# extension guess
content_type = config.guess_type_extension.filename_mapping.get(file.name)
if content_type:
return content_type, content_encoding
content_type = config.guess_type_extension.suffix_mapping.get(file.suffix)
if content_type:
return content_type, content_encoding
# basic guess
content_type, content_encoding = orig_guess_type(file, strict=False)
return content_type, content_encoding
async def detect_charset(
file: Union[str, Path], content_type: Optional[str]
) -> Optional[str]:
"""
https://docs.python.org/3/library/codecs.html
"""
if isinstance(file, str):
return None
if content_type is None or not content_type.startswith("text/"):
return None
detector = UniversalDetector()
async with aiofiles.open(file, "rb") as fp:
for line in await fp.readlines():
detector.feed(line)
if detector.done:
break
if detector.result.get("confidence") >= 0.6:
return detector.result.get("encoding")
return None
USER_AGENT_PATTERN = r"firefox|chrome|safari"
| [
6738,
19720,
1330,
32233,
11,
4479,
198,
11748,
302,
198,
11748,
12234,
8019,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
17007,
2963,
12272,
1330,
4724,
62,
4906,
355,
1796,
62,
5162,
408,
62,
4906,
198,
6738,
17268,
13,
39305,
1330... | 2.466387 | 952 |
import flask_restful as flr
import const as cst
| [
11748,
42903,
62,
2118,
913,
355,
781,
81,
198,
11748,
1500,
355,
269,
301,
198
] | 3.2 | 15 |
"""Provides various variants of the qucosa dataset."""
# pylint: disable=too-many-arguments
import logging
import os
from typing import Callable, Iterator, List, Tuple, Union
from typing_extensions import Literal
from slub_docsa.common.dataset import Dataset, dataset_from_samples, samples_from_dataset
from slub_docsa.common.paths import get_cache_dir
from slub_docsa.common.sample import Sample
from slub_docsa.common.subject import SubjectHierarchy
from slub_docsa.data.load.qucosa import qucosa_subject_hierarchy_by_subject_schema, read_qucosa_samples
from slub_docsa.data.load.qucosa import read_qucosa_documents_from_directory
from slub_docsa.data.preprocess.dataset import filter_subjects_with_insufficient_samples
from slub_docsa.data.preprocess.language import filter_samples_by_detected_language_via_langid
from slub_docsa.data.preprocess.subject import prune_subject_targets_to_level, prune_subject_targets_to_minimum_samples
from slub_docsa.data.store.dataset import load_persisted_dataset_from_lazy_sample_iterator
from slub_docsa.evaluation.incidence import unique_subject_order
logger = logging.getLogger(__name__)
def _filter_min_samples(samples_iterator, min_samples):
"""Apply standard minimum samples pruning to a sample iterator."""
dataset = dataset_from_samples(samples_iterator)
dataset = filter_subjects_with_insufficient_samples(dataset, min_samples)
return samples_from_dataset(dataset)
def _prune_by_level(samples_iterator, prune_level, min_samples, subject_hierarchy):
"""Apply level-based pruning to a sample iterator."""
if prune_level < 1:
raise ValueError("prune level must be at least 1")
dataset = dataset_from_samples(samples_iterator)
dataset.subjects = prune_subject_targets_to_level(prune_level, dataset.subjects, subject_hierarchy)
pruned_iterator = samples_from_dataset(dataset)
filtered_iterator = _filter_min_samples(pruned_iterator, min_samples)
return filtered_iterator
def _prune_min_samples(samples_iterator, min_samples, subject_hierarchy):
"""Combine hierarchical and standard minimum sample pruning for a samples iterator."""
# prune hierarchy
dataset = dataset_from_samples(samples_iterator)
dataset.subjects = prune_subject_targets_to_minimum_samples(min_samples, dataset.subjects, subject_hierarchy)
pruned_iterator = samples_from_dataset(dataset)
# filter min samples
filtered_iterator = _filter_min_samples(pruned_iterator, min_samples)
return filtered_iterator
def qucosa_named_datasets_tuple_list(
check_qucosa_download: bool = False,
):
"""Return list of qucosa datasets as tuples."""
cqd = check_qucosa_download
datasets: List[Tuple[str, Callable[[], Iterator[Sample]], Callable[[], SubjectHierarchy]]] = [
("qucosa_all_titles_rvk",
lambda: _load_qucosa_samples(
"rvk", "titles", None, False, "min_samples_10", cqd
), lazy_rvk),
("qucosa_all_titles_ddc",
lambda: _load_qucosa_samples(
"ddc", "titles", None, False, "min_samples_10", cqd
), lazy_ddc),
("qucosa_de_titles_rvk",
lambda: _load_qucosa_samples(
"rvk", "titles", "de", False, "min_samples_10", cqd
), lazy_rvk),
("qucosa_de_titles_ddc",
lambda: _load_qucosa_samples(
"ddc", "titles", "de", False, "min_samples_10", cqd
), lazy_ddc),
("qucosa_de_titles_langid_rvk",
lambda: _load_qucosa_samples(
"rvk", "titles", "de", True, "min_samples_10", cqd
), lazy_rvk),
("qucosa_de_titles_langid_ddc",
lambda: _load_qucosa_samples(
"ddc", "titles", "de", True, "min_samples_10", cqd
), lazy_ddc),
("qucosa_de_complete_but_only_titles_rvk",
lambda: _load_qucosa_samples(
"rvk", "complete_but_only_titles", "de", False, "min_samples_10", cqd
), lazy_rvk),
("qucosa_de_abstracts_rvk",
lambda: _load_qucosa_samples(
"rvk", "abstracts", "de", False, "min_samples_10", cqd
), lazy_rvk),
("qucosa_de_abstracts_ddc",
lambda: _load_qucosa_samples(
"ddc", "abstracts", "de", False, "min_samples_10", cqd
), lazy_ddc),
("qucosa_de_abstracts_langid_rvk",
lambda: _load_qucosa_samples(
"rvk", "abstracts", "de", True, "min_samples_10", cqd
), lazy_rvk),
("qucosa_de_abstracts_langid_ddc",
lambda: _load_qucosa_samples(
"ddc", "abstracts", "de", True, "min_samples_10", cqd
), lazy_ddc),
("qucosa_de_complete_but_only_abstracts_rvk",
lambda: _load_qucosa_samples(
"rvk", "complete_but_only_abstracts", "de", False, "min_samples_10", cqd
), lazy_rvk),
("qucosa_de_fulltexts_rvk",
lambda: _load_qucosa_samples(
"rvk", "fulltexts", "de", False, "min_samples_10", cqd
), lazy_rvk),
("qucosa_de_fulltexts_ddc",
lambda: _load_qucosa_samples(
"ddc", "fulltexts", "de", False, "min_samples_10", cqd
), lazy_ddc),
("qucosa_de_fulltexts_langid_rvk",
lambda: _load_qucosa_samples(
"rvk", "fulltexts", "de", True, "min_samples_10", cqd
), lazy_rvk),
("qucosa_de_fulltexts_langid_rvk_level_1",
lambda: _load_qucosa_samples(
"rvk", "fulltexts", "de", True, "level_1", cqd
), lazy_rvk),
("qucosa_de_fulltexts_langid_rvk_level_2",
lambda: _load_qucosa_samples(
"rvk", "fulltexts", "de", True, "level_2", cqd
), lazy_rvk),
("qucosa_de_fulltexts_langid_rvk_no_pruning",
lambda: _load_qucosa_samples(
"rvk", "fulltexts", "de", True, "no_pruning", cqd
), lazy_rvk),
("qucosa_de_fulltexts_langid_ddc",
lambda: _load_qucosa_samples(
"ddc", "fulltexts", "de", True, "min_samples_10", cqd
), lazy_ddc),
("qucosa_de_complete_but_only_fulltexts_rvk",
lambda: _load_qucosa_samples(
"rvk", "complete_but_only_fulltexts", "de", False, "min_samples_10", cqd
), lazy_rvk),
]
return datasets
def qucosa_named_datasets(
name_subset: List[str] = None,
check_qucosa_download: bool = False,
) -> Iterator[Tuple[str, Dataset, SubjectHierarchy]]:
"""Return default qucosa dataset variants."""
quocsa_cache_dir = os.path.join(get_cache_dir(), "qucosa")
dataset_list = qucosa_named_datasets_tuple_list(check_qucosa_download)
# filter data sets based on name subset parameter
if name_subset is not None:
dataset_list = list(filter(lambda i: i[0] in name_subset, dataset_list))
for dataset_name, lazy_sample_iterator, lazy_subject_hierarchy in dataset_list:
# load and persist each dataset
logger.info("load and save persisted dataset %s", dataset_name)
filepath = os.path.join(quocsa_cache_dir, f"{dataset_name}.sqlite")
dataset = load_persisted_dataset_from_lazy_sample_iterator(lazy_sample_iterator, filepath)
yield dataset_name, dataset, lazy_subject_hierarchy()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# loads all data sets and generates persistent storage for them
for dn, ds, _ in qucosa_named_datasets():
n_unique_subjects = len(unique_subject_order(ds.subjects))
logger.info(
"dataset %s has %d documents and %d unique subjects",
dn, len(ds.documents), n_unique_subjects
)
| [
37811,
15946,
1460,
2972,
17670,
286,
262,
627,
6966,
64,
27039,
526,
15931,
198,
198,
2,
279,
2645,
600,
25,
15560,
28,
18820,
12,
21834,
12,
853,
2886,
198,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
6738,
19720,
1330,
4889,
54... | 2.110509 | 3,692 |
from django.contrib.auth import get_user_model
from test_plus import TestCase
from open.core.betterself.constants import (
BetterSelfResourceConstants,
TEST_CONSTANTS,
)
from open.core.betterself.factories import ActivityFactory
from open.core.betterself.models.activity import Activity
from open.core.betterself.tests.mixins.resource_mixin import (
BetterSelfResourceViewTestCaseMixin,
DeleteTestsMixin,
GetTestsMixin,
)
User = get_user_model()
"""
python manage.py test --pattern="*test_activity_views.py" --keepdb
"""
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
1332,
62,
9541,
1330,
6208,
20448,
198,
198,
6738,
1280,
13,
7295,
13,
27903,
944,
13,
9979,
1187,
1330,
357,
198,
220,
220,
220,
11625,
24704,
... | 3.033333 | 180 |
from ...exceptions import BadRequestException
from ...utils import get_temp_file, make_validation_report
from biosimulators_utils.config import Config
from biosimulators_utils.omex_meta.data_model import OmexMetadataInputFormat, OmexMetadataSchema
from biosimulators_utils.omex_meta.io import read_omex_meta_file
import requests
import requests.exceptions
def handler(body, file=None):
''' Validate metadata about a modeling project or a component of a project
Args:
body (:obj:`dict`): dictionary in schema ``ValidateOmexMetadataFileOrUrl`` with keys
* ``url`` whose value has schema ``Url`` with the URL for a model file
* ``format`` (:obj:`str`): format of the metadata
* ``schema`` (:obj:`str`): schema to use to validate the metadata
file (:obj:`werkzeug.datastructures.FileStorage`): OMEX Metadata file
Returns:
``ValidationReport``: information about the validity or
lack thereof of the metadata
'''
format = OmexMetadataInputFormat(body['format'])
schema = OmexMetadataSchema(body['schema'])
metadata_file = file
metadata_url = body.get('url', None)
if metadata_url and metadata_file:
raise BadRequestException(
title='Only one of `file` or `url` can be used at a time.',
instance=ValueError(),
)
if not metadata_url and not metadata_file:
raise BadRequestException(
title='One of `file` or `url` must be used.',
instance=ValueError(),
)
# create temporary file
metadata_filename = get_temp_file()
# get metadata
if metadata_file:
metadata_file.save(metadata_filename)
else:
try:
response = requests.get(metadata_url)
response.raise_for_status()
except requests.exceptions.RequestException as exception:
title = 'Metadata could not be loaded from `{}`'.format(
metadata_url)
raise BadRequestException(
title=title,
instance=exception,
)
# save metadata to local temporary file
with open(metadata_filename, 'wb') as file:
file.write(response.content)
# validate metadata
config = Config(
OMEX_METADATA_INPUT_FORMAT=format,
OMEX_METADATA_SCHEMA=schema,
)
_, errors, warnings = read_omex_meta_file(metadata_filename, config=config)
return make_validation_report(errors, warnings, filenames=[metadata_filename])
| [
6738,
2644,
1069,
11755,
1330,
7772,
18453,
16922,
198,
6738,
2644,
26791,
1330,
651,
62,
29510,
62,
7753,
11,
787,
62,
12102,
341,
62,
13116,
198,
6738,
37140,
320,
24325,
62,
26791,
13,
11250,
1330,
17056,
198,
6738,
37140,
320,
24325... | 2.551308 | 994 |
import types
from functools import wraps
| [
11748,
3858,
198,
6738,
1257,
310,
10141,
1330,
27521,
198
] | 4.1 | 10 |
import psycopg2
import credentials
import yaml
import logging
import time
from functools import wraps
logger = logging.getLogger(__name__)
def connect():
"""Creates a connection to the Postgres database specified in the credentials file
Returns:Psycopg.connection: The database connection"""
config = {
'database': credentials.database,
'user': credentials.user,
'password': credentials.password,
'host': credentials.host,
'port': credentials.port
}
return psycopg2.connect(**config)
| [
11748,
17331,
22163,
70,
17,
198,
11748,
18031,
198,
11748,
331,
43695,
198,
11748,
18931,
198,
11748,
640,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
... | 3.010929 | 183 |
from nltk.book import *
import pylab
zipfPlot(text1)
zipfPlot(text5)
| [
6738,
299,
2528,
74,
13,
2070,
1330,
1635,
198,
11748,
279,
2645,
397,
198,
198,
13344,
69,
43328,
7,
5239,
16,
8,
198,
13344,
69,
43328,
7,
5239,
20,
8,
198
] | 2.258065 | 31 |
"""
Given an array of integers sorted in ascending order, find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
For example,
Given [5, 7, 7, 8, 8, 10] and target value 8,
return [3, 4].
Your runtime beats 63.36 % of python submissions
"""
| [
37811,
198,
15056,
281,
7177,
286,
37014,
23243,
287,
41988,
1502,
11,
1064,
262,
3599,
290,
7464,
2292,
286,
257,
1813,
2496,
1988,
13,
198,
198,
7120,
11862,
338,
19124,
13357,
1276,
307,
287,
262,
1502,
286,
440,
7,
6404,
299,
737,... | 3.477477 | 111 |
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Server routes related to the system
"""
import logging
from http import HTTPStatus
from flasgger import swag_from
from flask import Blueprint, jsonify
from sparsify.blueprints.utils import API_ROOT_PATH
from sparsify.schemas import ErrorSchema, ResponseSystemInfo, data_dump_and_validation
from sparsify.utils import get_ml_sys_info, ml_engines_errors
__all__ = ["SYSTEM_PATH", "system_blueprint"]
SYSTEM_PATH = "{}/system".format(API_ROOT_PATH)
_LOGGER = logging.getLogger(__name__)
system_blueprint = Blueprint(SYSTEM_PATH, __name__, url_prefix=SYSTEM_PATH)
@system_blueprint.route("/info")
@swag_from(
{
"tags": ["System"],
"summary": "Get system specs and other hardware info",
"produces": ["application/json"],
"parameters": [],
"responses": {
HTTPStatus.OK.value: {
"description": "The info for the current system the server is on",
"schema": ResponseSystemInfo,
},
HTTPStatus.BAD_REQUEST.value: {
"description": "Information for the error that occurred",
"schema": ErrorSchema,
},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
"description": "Information for the error that occurred",
"schema": ErrorSchema,
},
},
},
)
def info():
"""
Route for getting the info describing the current system the server is running on
:return: a tuple containing (json response, http status code)
"""
_LOGGER.info("getting system info")
sys_info = get_ml_sys_info()
resp_info = data_dump_and_validation(ResponseSystemInfo(), {"info": sys_info})
_LOGGER.info("retrieved system info {}".format(resp_info))
return jsonify(resp_info), HTTPStatus.OK.value
@system_blueprint.route("/validate", methods=["POST"])
@swag_from(
{
"tags": ["System"],
"summary": "Validate that the system is setup correctly to run. "
"For example, make sure deepsparse and sparseml are accessible",
"produces": ["application/json"],
"parameters": [],
"responses": {
HTTPStatus.OK.value: {"description": "System is setup correctly"},
HTTPStatus.BAD_REQUEST.value: {
"description": "Information for the error that occurred",
"schema": ErrorSchema,
},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
"description": "Information for the error that occurred",
"schema": ErrorSchema,
},
},
},
)
def validate():
"""
Route for validating the current system the server is running on,
deepsparse and onnxruntime must be installed to validate successfully
:return: a tuple containing (response, http status code)
"""
_LOGGER.info("validating system")
errors = ml_engines_errors()
for key, err in errors.items():
if err is not None:
raise Exception("error on import for {}: {}".format(key, err))
_LOGGER.info("validated system")
return "", HTTPStatus.OK.value
| [
2,
15069,
357,
66,
8,
33448,
532,
1944,
1220,
47986,
32707,
11,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,... | 2.641297 | 1,419 |
import re
from datetime import datetime
from difflib import SequenceMatcher
from city_scrapers_core.constants import COMMISSION
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
| [
11748,
302,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
814,
8019,
1330,
45835,
19044,
2044,
198,
198,
6738,
1748,
62,
1416,
2416,
364,
62,
7295,
13,
9979,
1187,
1330,
22240,
40373,
198,
6738,
1748,
62,
1416,
2416,
364,
62,
72... | 3.584615 | 65 |
"""Manage MRL."""
import pathlib
from urllib.parse import unquote, urlparse
from path import Path
def mrl_to_path(file_mrl):
"""Convert a MRL to a filesystem path.
File path is stored as MRL inside a media object, we have to bring it back
to a more classic looking path format.
Args:
file_mrl (str): Path to the resource within MRL format.
Returns:
path.Path: Path to the resource.
"""
path_string = unquote(urlparse(file_mrl).path)
# remove first '/' if a colon character is found like in '/C:/a/b'
if path_string[0] == "/" and path_string[2] == ":":
path_string = path_string[1:]
return Path(path_string).normpath()
def path_to_mrl(file_path):
"""Convert a filesystem path to MRL.
Args:
file_path (path.Path or str): Path to the resource.
Returns:
str: Path to the resource within MRL format.
"""
return pathlib.Path(file_path).as_uri()
| [
37811,
5124,
496,
337,
7836,
526,
15931,
198,
198,
11748,
3108,
8019,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
555,
22708,
11,
19016,
29572,
198,
198,
6738,
3108,
1330,
10644,
628,
198,
4299,
285,
45895,
62,
1462,
62,
6978,
7,
7753... | 2.62259 | 363 |
from inac8hr.gui import *
from inac8hr.scenes.layers import *
from inac8hr.anim import SceneSequence, SequenceInfo, ExponentialEaseOut, QuadEaseIn, TemporalSequence
from inac8hr.entities import In8acUnitInfo
from inac8hr.commands import CommandHandler
from inac8hr.tools import ToolHandler
from inac8hr.globals import GAME_PREFS
import i18n
#
# the Jumping Ballot
#
# TODO: Add an InspectorPanel.
#
| [
6738,
287,
330,
23,
11840,
13,
48317,
1330,
1635,
198,
6738,
287,
330,
23,
11840,
13,
28123,
13,
75,
6962,
1330,
1635,
198,
6738,
287,
330,
23,
11840,
13,
11227,
1330,
28315,
44015,
594,
11,
45835,
12360,
11,
5518,
35470,
36,
589,
7... | 2.948529 | 136 |
# mixins for checking if user is logged in and the checklist author is the same as logged in user
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.views.generic import ListView
from checklist.models import Bookmark, Category, Checklist, Upvote
from .helper_methods import get_upvote_bookmark_list, paginate_content
# VIEW BOOKMARKS PAGE
# VIEW UPVOTE PAGE
# SEARCH RESULTS PAGE
# DISPLAY CHECKLISTS FOR A CATEGORY PAGE
| [
2,
5022,
1040,
329,
10627,
611,
2836,
318,
18832,
287,
290,
262,
41859,
1772,
318,
262,
976,
355,
18832,
287,
2836,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
19816,
1040,
1330,
23093,
37374,
35608,
259,
198,
6738,
42625,
... | 3.341772 | 158 |
from list_manager.listmanager import (subtract_elements, multiply_elements,
divide_elements,
remove_repeated_elements, return_element_index,
return_common_elements, return_uncommon_elements,
element_average, array_average, array_mode,
odd_array_median, pair_array_median)
| [
6738,
1351,
62,
37153,
13,
4868,
37153,
1330,
357,
7266,
83,
974,
62,
68,
3639,
11,
29162,
62,
68,
3639,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 1.698529 | 272 |
is_male=False
if is_male:
print("you are male.")
else:
print("You are female.")
| [
271,
62,
22606,
28,
25101,
201,
198,
201,
198,
361,
318,
62,
22606,
25,
201,
198,
220,
220,
220,
3601,
7203,
5832,
389,
4257,
19570,
201,
198,
17772,
25,
201,
198,
220,
220,
220,
3601,
7203,
1639,
389,
4048,
19570,
201,
198,
201,
... | 2.204545 | 44 |
#!/usr/bin/env python3
################################################
################################################
number_of_field_periods_to_include = 2
normalizedfluxvec = [0.01,0.05,0.1,0.3,0.5,0.8]
N_phi = 200 #Number of points for B0, B1 and B2 calculation and plotting
plotSave = 1 #Spend time plotting results or not
#stellDesigns=['WISTELL-A' ,'NZ1988' ,'HSX' ,'KuQHS48' ,'Drevlak' ,'NCSX' ,'ARIES-CS' ,'QAS2' ,'ESTELL' ,'CFQS' ,'Henneberg' ,'NAQS']
#etab =[0.791 ,0.155 ,1.33 ,0.147 ,0.0861 ,0.403 ,0.074 ,0.341 ,0.563 ,0.569 ,0.269 ,1.549]
#Nrotations =[4 ,-6 ,4 ,4 ,-5 ,0 ,0 ,0 ,0 ,0 ,0 ,4]
#vmecFiles =['wistella_midscale' ,'NuhrenbergZille_1988_QHS','HSX_QHS_vacuum_ns201','n4qh.b4.a79a','Drevlak_qh_8_7','li383_1.4m_ns201','n3are_R7.75B5.7_hires','GarabedianQAS2_noCurrentOnAxis_ns201','estell_24_scaled','cfqs_freeBoundary_vacuum_hiRes','st_a34_i32v22_beta_35_scaledAUG_hires','LandremanSengupta2019_section5.4']
stellDesigns=['NAQS']
etab =[1.549]
Nrotations =[4]
vmecFiles =['LandremanSengupta2019_section5.4']
#stellDesigns=['Drevlak']
#etab =[0.0899]
#Nrotations =[-5]
#vmecFiles =['Drevlak_qh_8_7']
# stellDesigns=['NZ1988']
# etab =[0.155]
# Nrotations =[-6]
# vmecFiles =['NuhrenbergZille_1988_QHS']
equilibriaFolder='equilibria/'
gs2gridsFolder='gs2grids/'
gxgridsFolder='gxgrids/'
papergridsFolder='paperGrids/'
vmecGS2interfaceFolder='VMEC_to_GS2'
figuresFolder='Figures/'
MathDataFolder='data/'
toPaperFolder='toPaper/'
###### Start NearAxisGK
print("###### Near-Axis Gyrokinetics Interface ######")
print("Number of configurations = "+str(len(stellDesigns)))
import os
from os import path
import subprocess
from shutil import move, copymode, copyfile
import numpy as np
from scipy.io import netcdf
import sys
import warnings
import matplotlib.pyplot as plt
import matplotlib
if not sys.warnoptions:
warnings.simplefilter("ignore")
###### Function to obtain B0 for each stellarator
print("Creating Bfield, equilibria and xbooz arrays")
equilibria=[]
booz=[]
BBarvec=[]
eta_barvec=[]
B0vec=[]
B1cvec=[]
B1svec=[]
B20vec=[]
B2cvec=[]
B2svec=[]
phiedge=[]
iotaVMEC=[]
nfpVMEC=[]
i=0
for stells in stellDesigns:
equilibria.append(equilibriaFolder+'wout_'+vmecFiles[i]+'.nc')
booz.append(equilibriaFolder+'boozmn_'+vmecFiles[i]+'.nc')
BBart,eta_bart,B0t,B1st,B1ct,B20t,B2ct,B2st,phi,phiedget,iotat,nfpt = obtainB0B1B2(booz[i],Nrotations[i])
BBarvec.append(BBart)
eta_barvec.append(eta_bart)
B0vec.append(B0t)
B1svec.append(B1st)
B1cvec.append(B1ct)
B20vec.append(B20t)
B2cvec.append(B2ct)
B2svec.append(B2st)
phiedge.append(phiedget)
iotaVMEC.append(iotat)
nfpVMEC.append(nfpt)
i=i+1
phi=np.linspace(0,1,N_phi)
if plotSave==0:# path.exists(toPaperFolder+'B0QSEquilibria1.pdf'):
None
else:
print("Plotting Near-Axis Bfields for the designs")
plotsIn1fig=6;plotfontSize=20;legendfontSize=14;figSize1=7.2;figSize2=4.0;
matplotlib.rc('font', size=plotfontSize);matplotlib.rc('axes', titlesize=plotfontSize);
plt.figure(figsize=(figSize1,figSize2));i=0;
for stells in stellDesigns:
if i<plotsIn1fig: plt.plot(phi, B0vec[i]/np.mean(B0vec[i]), label=stells);i=i+1;
plt.legend(loc=0,fontsize=legendfontSize);plt.xlabel(r'$N_{fp} \varphi/2 \pi$');plt.ylabel(r'$B_0/\langle B_0 \rangle$');plt.ylim((0.985,1.02))
plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
plt.savefig(toPaperFolder+'B0QSEquilibria1.pdf', format='pdf')
plt.figure(figsize=(figSize1,figSize2));i=0;
for stells in stellDesigns:
if i>=plotsIn1fig: plt.plot(phi, B0vec[i]/np.mean(B0vec[i]), label=stells); i=i+1;
else: i=i+1;
plt.legend(loc=0,fontsize=legendfontSize);plt.xlabel(r'$N_{fp} \varphi/2 \pi$');plt.ylabel(r'$B_0/\langle B_0 \rangle$');plt.ylim((0.985,1.02))
matplotlib.rc('font', size=plotfontSize);matplotlib.rc('axes', titlesize=plotfontSize)
plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
plt.savefig(toPaperFolder+'B0QSEquilibria2.pdf', format='pdf')
plt.figure(figsize=(figSize1,figSize2));i=0;
for stells in stellDesigns:
if i<plotsIn1fig: plt.plot(phi, B1cvec[i]/np.mean(B1cvec[i]), label=stells);i=i+1;
else: i=i+1;
plt.legend(loc=0,fontsize=legendfontSize);plt.xlabel(r'$N_{fp} \varphi/2 \pi$');plt.ylabel(r'$B_{1c}/\langle B_{1c} \rangle$');plt.ylim((0.7,1.21))
matplotlib.rc('font', size=plotfontSize);matplotlib.rc('axes', titlesize=plotfontSize)
plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
plt.savefig(toPaperFolder+'B1cQSEquilibria1.pdf', format='pdf')
plt.figure(figsize=(figSize1,figSize2));i=0;
for stells in stellDesigns:
if i>=plotsIn1fig: plt.plot(phi, B1cvec[i]/np.mean(B1cvec[i]), label=stells);i=i+1;
else: i=i+1;
plt.legend(loc=0,fontsize=legendfontSize);plt.xlabel(r'$N_{fp} \varphi/2 \pi$');plt.ylabel(r'$B_{1c}/\langle B_{1c} \rangle$');plt.ylim((0.7,1.21))
matplotlib.rc('font', size=plotfontSize);matplotlib.rc('axes', titlesize=plotfontSize)
plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
plt.savefig(toPaperFolder+'B1cQSEquilibria2.pdf', format='pdf')
plt.figure(figsize=(figSize1,figSize2));i=0;
for stells in stellDesigns:
if i<plotsIn1fig: plt.plot(phi, B1svec[i]/np.mean(B1cvec[i]), label=stells);i=i+1;
else: i=i+1;
plt.legend(loc=0,fontsize=legendfontSize);plt.xlabel(r'$N_{fp} \varphi/2 \pi$');plt.ylabel(r'$B_{1s}/\langle B_{1c} \rangle$');plt.ylim((-0.21,0.21))
matplotlib.rc('font', size=plotfontSize);matplotlib.rc('axes', titlesize=plotfontSize)
plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
plt.savefig(toPaperFolder+'B1sQSEquilibria1.pdf', format='pdf')
plt.figure(figsize=(figSize1,figSize2));i=0;
for stells in stellDesigns:
if i>=plotsIn1fig: plt.plot(phi, B1svec[i]/np.mean(B1cvec[i]), label=stells);i=i+1;
else: i=i+1;
plt.legend(loc=0,fontsize=legendfontSize);plt.xlabel(r'$N_{fp} \varphi/2 \pi$');plt.ylabel(r'$B_{1s}/\langle B_{1c} \rangle$');plt.ylim((-0.21,0.21))
matplotlib.rc('font', size=plotfontSize);matplotlib.rc('axes', titlesize=plotfontSize)
plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
plt.savefig(toPaperFolder+'B1sQSEquilibria2.pdf', format='pdf')
plt.figure(figsize=(figSize1,figSize2));i=0;
for stells in stellDesigns:
if i<plotsIn1fig: plt.plot(phi, B20vec[i]/np.mean(B20vec[i]), label=stells);i=i+1;
else: i=i+1;
plt.legend(loc=0,fontsize=legendfontSize);plt.xlabel(r'$N_{fp} \varphi/2 \pi$');plt.ylabel(r'$B_{20}/\langle B_{20} \rangle$');
matplotlib.rc('font', size=plotfontSize);matplotlib.rc('axes', titlesize=plotfontSize)
plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
plt.savefig(toPaperFolder+'B20QSEquilibria1.pdf', format='pdf')
plt.figure(figsize=(figSize1,figSize2));i=0;
for stells in stellDesigns:
if i>=plotsIn1fig: plt.plot(phi, B20vec[i]/np.mean(B20vec[i]), label=stells);i=i+1;
else: i=i+1;
plt.legend(loc=0,fontsize=legendfontSize);plt.xlabel(r'$N_{fp} \varphi/2 \pi$');plt.ylabel(r'$B_{20}/\langle B_{20} \rangle$');
matplotlib.rc('font', size=plotfontSize);matplotlib.rc('axes', titlesize=plotfontSize)
plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
plt.savefig(toPaperFolder+'B20QSEquilibria2.pdf', format='pdf')
print("Compiling VMEC to GS2")
os.chdir(vmecGS2interfaceFolder)
process = subprocess.call(['make'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
copyfile("test_vmec_to_gs2_geometry_interface","../test_vmec_to_gs2_geometry_interface")
copymode("test_vmec_to_gs2_geometry_interface","../test_vmec_to_gs2_geometry_interface")
os.chdir('../')
if path.exists(gs2gridsFolder+'agrid'+stellDesigns[0]+'r'+str(normalizedfluxvec[0])+'.out'):
None
else:
print("Running VMEC_to_GS2 interface")
for desired_normalized_toroidal_flux in normalizedfluxvec:
print(" Normalized toroidal flux = "+str(desired_normalized_toroidal_flux))
gs2_output=[]
gx_output=[]
geometry_output=[]
i=0
for stells in stellDesigns:
gs2_output.append(gs2gridsFolder+'gs2_grid'+stells+'r'+str(desired_normalized_toroidal_flux)+'.out')
gx_output.append(gxgridsFolder+'gx_grid'+stells+'r'+str(desired_normalized_toroidal_flux)+'.out')
geometry_output.append(papergridsFolder+'gridMath'+stells+'r'+str(desired_normalized_toroidal_flux)+'.out')
i=i+1
###### Run VMEC_to_GS2 interface
i=0
for eq in equilibria:
f = netcdf.netcdf_file(equilibria[i],'r',mmap=False)
iota0 = f.variables['iotaf'][()][1]
nfp = f.variables['nfp'][()]
#print('./test_vmec_to_gs2_geometry_interface',eq,gs2_output[i],geometry_output[i],str(nfp*number_of_field_periods_to_include/abs(iota0)),str(desired_normalized_toroidal_flux),gx_output[i])
#exit()
process = subprocess.call(['./test_vmec_to_gs2_geometry_interface',eq,gs2_output[i],geometry_output[i],str(nfp*number_of_field_periods_to_include/abs(iota0)),str(desired_normalized_toroidal_flux),gx_output[i]], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
i=i+1
###### Mixed Near-Axis/VMEC coefficients
if path.exists(MathDataFolder+'aBcrossGradBdGradAlpha.txt'):
None
else:
print("Creating Mixed Near-Axis/VMEC coefficients")
i=0
BcrossGradBdGradAlpha=[]
BcrossGradBdGradPsi=[]
phiH=[]
for stells in stellDesigns:
#print(" Stellarator "+stellDesigns[i])
B0temp=B0vec[i]
B1ctemp=B1cvec[i]
B1stemp=B1svec[i]
if abs(iotaVMEC[i]-Nrotations[i])>4:
phiMultiplier=2*np.int(nfpVMEC[i]*4*np.ceil(1/abs(iotaVMEC[i]-Nrotations[i])))
elif abs(iotaVMEC[i]-Nrotations[i])>3:
phiMultiplier=2*np.int(nfpVMEC[i]*6*np.ceil(1/abs(iotaVMEC[i]-Nrotations[i])))
elif abs(iotaVMEC[i]-Nrotations[i])>2:
phiMultiplier=2*np.int(nfpVMEC[i]*8*np.ceil(1/abs(iotaVMEC[i]-Nrotations[i])))
elif abs(iotaVMEC[i]-Nrotations[i])>1:
phiMultiplier=2*np.int(nfpVMEC[i]*8*np.ceil(1/abs(iotaVMEC[i]-Nrotations[i])))
else:
phiMultiplier=2*np.int(nfpVMEC[i]*10*np.ceil(1/abs(iotaVMEC[i]-Nrotations[i])))
for kk in range(phiMultiplier-1):
B0vec[i]=np.append(B0vec[i],B0temp)
B1cvec[i]=np.append(B1cvec[i],B1ctemp)
B1svec[i]=np.append(B1svec[i],B1stemp)
phiH.append(np.linspace(-np.pi*phiMultiplier/2,np.pi*phiMultiplier/2, N_phi*phiMultiplier))
vartheta=np.multiply(iotaVMEC[i]-Nrotations[i],phiH[i])
B1cvec[i]=-B1cvec[i]
#plotfontSize=20;legendfontSize=14;figSize1=7.5;figSize2=4.0;
#matplotlib.rc('font', size=plotfontSize);matplotlib.rc('axes', titlesize=plotfontSize);
#matplotlib.rc('text', usetex=True);matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
#plt.figure(figsize=(figSize1,figSize2))
for s in normalizedfluxvec:
r=np.sqrt(2*phiedge[i]*s/BBarvec[i])
Bmag=(B0vec[i]+np.multiply(r,np.multiply(B1cvec[i],np.cos(vartheta))+np.multiply(B1svec[i],np.sin(vartheta))))
#plt.plot(phiH[i], Bmag, label=stells+' s='+str(s))
#plt.legend(loc=0,fontsize=legendfontSize);plt.xlabel(r'$\varphi$');plt.ylabel(r'$B$');#plt.ylim((0.985,1.015))
#plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
#plt.savefig(figuresFolder+stells+'_BmagMixed.pdf', format='pdf')
#plt.figure(figsize=(figSize1,figSize2))
#BcrossGradBdGradAlpha.append(np.multiply(np.multiply(np.sqrt(BBarvec[i]/2),np.multiply(np.multiply(B0vec[i],B1cvec[i]),np.cos(vartheta))),np.power(np.reciprocal(B0vec[i]),3)))
BcrossGradBdGradAlpha.append(np.multiply(np.multiply(np.multiply(B1cvec[i],np.cos(vartheta))+np.multiply(B1svec[i],np.sin(vartheta)),np.reciprocal(B0vec[i])),np.sqrt(1/(2*BBarvec[i]))))
#plt.plot(phiH[i], BcrossGradBdGradAlpha[i])
#plt.xlabel(r'$\varphi$');plt.ylabel(r'$\sqrt{\psi}\boldsymbol B \times \nabla B \cdot \nabla \alpha/B^3$');#plt.ylim((0.985,1.015))
#plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
#plt.savefig(figuresFolder+stells+'_BcrossGradBdGradAlphaMixed.pdf', format='pdf')
#plt.figure(figsize=(figSize1,figSize2))
#BcrossGradBdGradPsi.append(np.multiply(np.multiply(np.sqrt(2/BBarvec[i]),np.multiply(np.multiply(np.square(B0vec[i]),-B1cvec[i]),np.sin(vartheta))),np.power(np.reciprocal(B0vec[i]),3)))
BcrossGradBdGradPsi.append(np.multiply(np.multiply(np.multiply(B1cvec[i],-np.sin(vartheta))-np.multiply(B1svec[i],np.cos(vartheta)),np.reciprocal(B0vec[i])),np.sqrt(2/(BBarvec[i]))))
#plt.plot(phiH[i], BcrossGradBdGradPsi[i])
#plt.xlabel(r'$\varphi$');plt.ylabel(r'$\sqrt{\psi}^{-1} \boldsymbol B \times \nabla B \cdot \nabla \psi/B^3$');#plt.ylim((0.985,1.015))
#plt.subplots_adjust(left=0.16, bottom=0.19, right=0.98, top=0.97)
#plt.savefig(figuresFolder+stells+'_BcrossGradBdGradPsiMixed.pdf', format='pdf')
i=i+1
with open(MathDataFolder+'phiH.txt','w+') as phif:
with open(MathDataFolder+'BcrossGradBdGradAlpha.txt','w+') as BcrossGradBdGradAlphaf:
with open(MathDataFolder+'BcrossGradBdGradPsi.txt','w+') as BcrossGradBdGradPsif:
i=0
#phif.write('{');BcrossGradBdGradAlphaf.write('{');BcrossGradBdGradPsif.write('{')
for stells in stellDesigns:
#phif.write('{');BcrossGradBdGradAlphaf.write('{');BcrossGradBdGradPsif.write('{')
np.savetxt(phif, phiH[i], fmt='%s',newline=' ')
np.savetxt(BcrossGradBdGradAlphaf, BcrossGradBdGradAlpha[i], fmt='%s',newline=' ')
np.savetxt(BcrossGradBdGradPsif, BcrossGradBdGradPsi[i], fmt='%s',newline=' ')
#phif.write('}');BcrossGradBdGradAlphaf.write('}');BcrossGradBdGradPsif.write('}')
phif.write('\n');BcrossGradBdGradAlphaf.write('\n');BcrossGradBdGradPsif.write('\n')
i=i+1
#phif.write('}');BcrossGradBdGradAlphaf.write('}');BcrossGradBdGradPsif.write('}')
###### Near-Axis Comparison
print("Running Mathematica")
i=0
for eq in equilibria:
sign=1
#if stellDesigns[i]=='NAQS': sign=-1
bashCommand = "wolframscript -noprompt -script grid_NearAxis.wls "+stellDesigns[i]+" "+papergridsFolder+" "+eq+" "+booz[i]+" "+str(sign*abs(eta_barvec[i]))+" "+figuresFolder+" "+gs2gridsFolder+" "+gxgridsFolder+" "+MathDataFolder+" "+toPaperFolder+" "+str(plotSave)+" "+str(BBarvec[i])+" "+str(i+1)+" "+str(len(normalizedfluxvec))
for rr in normalizedfluxvec:
bashCommand = bashCommand+" "+str(rr)
print(stellDesigns[i])
print("etabar = "+str(abs(eta_barvec[i])))
print("B0 = "+str(BBarvec[i]))
print("Working...", end='', flush=True)
#print(bashCommand)
#exit()
output = subprocess.call(bashCommand.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(" Success!")
i=i+1
#pkill -9 WolframKernel; pkill -9 WolframScript; pkill -9 Mathematica
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
29113,
14468,
198,
29113,
14468,
198,
198,
17618,
62,
1659,
62,
3245,
62,
41007,
82,
62,
1462,
62,
17256,
796,
362,
198,
11265,
1143,
69,
22564,
35138,
796,
685,
15,
13,
486,
11,... | 1.985669 | 7,536 |