repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/utils/tokenizer.py | BibaAndBoba/utils/tokenizer.py | import pkgutil
from nltk.tokenize import word_tokenize
from BibaAndBoba.utils.cacher import cache_to_file
from BibaAndBoba.utils.progress_bar import progress_bar
from BibaAndBoba.utils.languages import get_supported_language
# from emoji.unicode_codes import EMOJI_UNICODE_ENGLISH
stopwords = pkgutil.get_data(__name__, '../dictionaries/stopwords.txt')
STOPWORDS = set(stopwords.decode('utf-8').split())
base_ua = pkgutil.get_data(__name__, "../dictionaries/base_ua.txt")
STOPWORDS_UA = set(base_ua.decode('utf-8').split())
# EMOJI = set(EMOJI_UNICODE_ENGLISH.values())
# noinspection PyUnusedLocal
@cache_to_file()
def tokenize(messages: list[str], companion_id, companion_name: str = "Undefined",
use_cache: bool = True, flush_cache: bool = False) -> list[str]:
"""
Takes a list of strings and returns a list of tokens.
It also accepts two keyword arguments, use_cache and flush_cache.
If you use_cache is False, the function will neither use the cache from previous runs nor create a new one.
Default is True.
If flush_cache is True, the function flushes old cached data and creates a new cache.
:param messages: list[str]: Pass the list of messages to be tokenized
:param companion_id: Identify the companion ID
:param companion_name: str: Identify the companion name
:param use_cache: bool: Determine whether to use the cache or not. Defaults to True.
:param flush_cache: bool: Clear the cache of a given tokenizer. Defaults to False.
:return: A list of tokens for each message
"""
language = get_supported_language(messages)
tokenized_list = []
if not messages:
return []
for message in progress_bar(messages, prefix=f'Analyzing {companion_name} messages:'):
tokenized = word_tokenize(message, language=language)
for token in tokenized:
token = token.lower()
if token not in STOPWORDS_UA and not any(word in token for word in STOPWORDS):
tokenized_list.append(token)
return tokenized_list
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/utils/nltk_punkt_downloader.py | BibaAndBoba/utils/nltk_punkt_downloader.py | """
Important! This module downloads the punkt tokenizer from NLTK.
"""
import nltk
import ssl
from BibaAndBoba.utils.logger import logger
def download_punkt():
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
logger.warning(
"Warning, ssl unverified context is not available.\n"
"Consider installing punkt tokenizer from NLTK manually. Help: https://www.nltk.org/data.html"
)
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download("punkt")
logger.info("Successfully downloaded punkt tokenizer from NLTK.")
if __name__ == "__main__":
download_punkt()
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/utils/progress_bar.py | BibaAndBoba/utils/progress_bar.py | def progress_bar(iterable, prefix: str = '', suffix: str = '', decimals: int = 1,
length: int = 50, fill: str = '█', print_end: str = "") -> list:
"""
Takes an iterable as input, which is the object that will be looped over.
The prefix and suffix arguments are strings that will be displayed before and after the progress bar, respectively.
:param iterable: Define the iterable object that is passed to the loop
:param prefix: str: Define a string that will be printed before the progress bar. Default is an empty string.
:param suffix: str: A string that will be printed after the progress bar. Default is an empty string.
:param decimals: Specify the number of decimals to be shown in the progress bar. Default is 1.
:param length: Define the length of the progress bar. Default length is 50.
:param fill: str: Define the character that fills up the progress bar. Default is a '█' character.
:param print_end: str: Define the character that will be printed at the end. Default is an empty string.
:return: A generator
"""
total = len(iterable)
def print_progress_bar(iteration: int) -> None:
"""
Prints a progress bar to the console.
Takes an integer as input, which is the current iteration of a loop.
The total number of iterations is defined by another variable.
:param iteration: Keep track of the current iteration
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=print_end)
print_progress_bar(0)
for i, item in enumerate(iterable):
yield item
print_progress_bar(i + 1)
print("Done!")
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/docs/source/conf.py | BibaAndBoba/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'BibaAndBoba'
copyright = '2022, Andrew Yaroshevych'
author = 'Andrew Yaroshevych'
# The full version, including alpha/beta/rc tags
release = '1.2.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'furo'
html_logo = "../assets/BibaAndBoba-logo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/prune_sparse_seq.py | prune_sparse_seq.py | from __future__ import print_function
import datetime
import time
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import codecs
import pickle
import math
from model_word_ada.LM import LM
from model_word_ada.basic import BasicRNN
from model_word_ada.densenet import DenseRNN
from model_word_ada.ldnet import LDRNN
from model_seq.crf import CRFLoss, CRFDecode
from model_seq.dataset import SeqDataset
from model_seq.evaluator import eval_wc
from model_seq.seqlabel import SeqLabel, Vanilla_SeqLabel
from model_seq.seqlm import BasicSeqLM
from model_seq.sparse_lm import SparseSeqLM
import model_seq.utils as utils
from torch_scope import wrapper
import argparse
import logging
import json
import os
import sys
import itertools
import functools
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default="auto")
parser.add_argument('--cp_root', default='./checkpoint')
parser.add_argument('--checkpoint_name', default='p_ner')
parser.add_argument('--git_tracking', action='store_true')
parser.add_argument('--corpus', default='./data/ner_dataset.pk')
parser.add_argument('--load_seq', default='./checkpoint/ner.th')
parser.add_argument('--lm_hid_dim', type=int, default=300)
parser.add_argument('--lm_word_dim', type=int, default=300)
parser.add_argument('--lm_label_dim', type=int, default=1600)
parser.add_argument('--lm_layer_num', type=int, default=10)
parser.add_argument('--lm_droprate', type=float, default=0.5)
parser.add_argument('--lm_rnn_layer', choices=['Basic', 'DenseNet', 'LDNet'], default='LDNet')
parser.add_argument('--lm_rnn_unit', choices=['gru', 'lstm', 'rnn'], default='lstm')
parser.add_argument('--seq_c_dim', type=int, default=30)
parser.add_argument('--seq_c_hid', type=int, default=150)
parser.add_argument('--seq_c_layer', type=int, default=1)
parser.add_argument('--seq_w_dim', type=int, default=100)
parser.add_argument('--seq_w_hid', type=int, default=300)
parser.add_argument('--seq_w_layer', type=int, default=1)
parser.add_argument('--seq_droprate', type=float, default=0.5)
parser.add_argument('--seq_rnn_unit', choices=['gru', 'lstm', 'rnn'], default='lstm')
parser.add_argument('--seq_model', choices=['vanilla', 'lm-aug'], default='lm-aug')
parser.add_argument('--seq_lambda0', type=float, default=0.05)
parser.add_argument('--seq_lambda1', type=float, default=2)
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--patience', type=int, default=5)
parser.add_argument('--epoch', type=int, default=200)
parser.add_argument('--least', type=int, default=50)
parser.add_argument('--clip', type=float, default=5)
parser.add_argument('--lr', type=float, default=0.015)
parser.add_argument('--lr_decay', type=float, default=0.05)
parser.add_argument('--update', choices=['Adam', 'Adagrad', 'Adadelta', 'SGD'], default='SGD')
args = parser.parse_args()
pw = wrapper(os.path.join(args.cp_root, args.checkpoint_name), args.checkpoint_name, enable_git_track=args.git_tracking)
gpu_index = pw.auto_device() if 'auto' == args.gpu else int(args.gpu)
device = torch.device("cuda:" + str(gpu_index) if gpu_index >= 0 else "cpu")
if gpu_index >= 0:
torch.cuda.set_device(gpu_index)
logger.info('Loading data from {}.'.format(args.corpus))
dataset = pickle.load(open(args.corpus, 'rb'))
name_list = ['flm_map', 'blm_map', 'gw_map', 'c_map', 'y_map', 'emb_array', 'train_data', 'test_data', 'dev_data']
flm_map, blm_map, gw_map, c_map, y_map, emb_array, train_data, test_data, dev_data = [dataset[tup] for tup in name_list ]
logger.info('Building language models and seuqence labeling models.')
rnn_map = {'Basic': BasicRNN, 'DenseNet': DenseRNN, 'LDNet': functools.partial(LDRNN, layer_drop = 0)}
flm_rnn_layer = rnn_map[args.lm_rnn_layer](args.lm_layer_num, args.lm_rnn_unit, args.lm_word_dim, args.lm_hid_dim, args.lm_droprate)
blm_rnn_layer = rnn_map[args.lm_rnn_layer](args.lm_layer_num, args.lm_rnn_unit, args.lm_word_dim, args.lm_hid_dim, args.lm_droprate)
flm_model = LM(flm_rnn_layer, None, len(flm_map), args.lm_word_dim, args.lm_droprate, label_dim = args.lm_label_dim)
blm_model = LM(blm_rnn_layer, None, len(blm_map), args.lm_word_dim, args.lm_droprate, label_dim = args.lm_label_dim)
flm_model_seq = SparseSeqLM(flm_model, False, args.lm_droprate, False)
blm_model_seq = SparseSeqLM(blm_model, True, args.lm_droprate, False)
SL_map = {'vanilla':Vanilla_SeqLabel, 'lm-aug': SeqLabel}
seq_model = SL_map[args.seq_model](flm_model_seq, blm_model_seq, len(c_map), args.seq_c_dim, args.seq_c_hid, args.seq_c_layer, len(gw_map), args.seq_w_dim, args.seq_w_hid, args.seq_w_layer, len(y_map), args.seq_droprate, unit=args.seq_rnn_unit)
logger.info('Loading pre-trained models from {}.'.format(args.load_seq))
seq_file = wrapper.restore_checkpoint(args.load_seq)['model']
seq_model.load_state_dict(seq_file)
seq_model.to(device)
crit = CRFLoss(y_map)
decoder = CRFDecode(y_map)
evaluator = eval_wc(decoder, 'f1')
logger.info('Constructing dataset.')
train_dataset, test_dataset, dev_dataset = [SeqDataset(tup_data, flm_map['\n'], blm_map['\n'], gw_map['<\n>'], c_map[' '], c_map['\n'], y_map['<s>'], y_map['<eof>'], len(y_map), args.batch_size) for tup_data in [train_data, test_data, dev_data]]
logger.info('Constructing optimizer.')
param_dict = filter(lambda t: t.requires_grad, seq_model.parameters())
optim_map = {'Adam' : optim.Adam, 'Adagrad': optim.Adagrad, 'Adadelta': optim.Adadelta, 'SGD': functools.partial(optim.SGD, momentum=0.9)}
if args.lr > 0:
optimizer=optim_map[args.update](param_dict, lr=args.lr)
else:
optimizer=optim_map[args.update](param_dict)
logger.info('Saving configues.')
pw.save_configue(args)
logger.info('Setting up training environ.')
best_f1 = float('-inf')
patience_count = 0
batch_index = 0
normalizer = 0
tot_loss = 0
dev_f1, dev_pre, dev_rec, dev_acc = evaluator.calc_score(seq_model, dev_dataset.get_tqdm(device))
print(dev_f1)
logger.info('Start training...')
for indexs in range(args.epoch):
logger.info('############')
logger.info('Epoch: {}'.format(indexs))
pw.nvidia_memory_map()
iterator = train_dataset.get_tqdm(device)
seq_model.train()
for f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w, f_y, f_y_m, _ in iterator:
seq_model.zero_grad()
output = seq_model(f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w)
loss = crit(output, f_y, f_y_m)
tot_loss += utils.to_scalar(loss)
normalizer += 1
if args.seq_lambda0 > 0:
f_reg0, f_reg1, f_reg3 = flm_model_seq.regularizer()
b_reg0, b_reg1, b_reg3 = blm_model_seq.regularizer()
loss += args.seq_lambda0 * (f_reg3 + b_reg3)
if (f_reg0 + b_reg0 > args.seq_lambda1):
loss += args.seq_lambda0 * (f_reg1 + b_reg1)
loss.backward()
torch.nn.utils.clip_grad_norm_(seq_model.parameters(), args.clip)
optimizer.step()
flm_model_seq.prox()
blm_model_seq.prox()
batch_index += 1
if 0 == batch_index % 100:
pw.add_loss_vs_batch({'training_loss': tot_loss / (normalizer + 1e-9)}, batch_index, use_logger = False)
tot_loss = 0
normalizer = 0
if args.lr > 0:
current_lr = args.lr / (1 + (indexs + 1) * args.lr_decay)
utils.adjust_learning_rate(optimizer, current_lr)
dev_f1, dev_pre, dev_rec, dev_acc = evaluator.calc_score(seq_model, dev_dataset.get_tqdm(device))
nonezero_count = (flm_model_seq.rnn.weight_list.data > 0).int().cpu().sum() + (blm_model_seq.rnn.weight_list.data > 0).cpu().int().sum()
pw.add_loss_vs_batch({'dev_f1': dev_f1, 'none_zero_count': nonezero_count.item()}, indexs, use_logger = True)
pw.add_loss_vs_batch({'dev_pre': dev_pre, 'dev_rec': dev_rec}, indexs, use_logger = False)
logger.info('Saving model...')
pw.save_checkpoint(model = seq_model, is_best = (nonezero_count <= args.seq_lambda1 and dev_f1 > best_f1))
if nonezero_count <= args.seq_lambda1 and dev_f1 > best_f1:
nonezero_count = nonezero_count
test_f1, test_pre, test_rec, test_acc = evaluator.calc_score(seq_model, test_dataset.get_tqdm(device))
best_f1, best_dev_pre, best_dev_rec, best_dev_acc = dev_f1, dev_pre, dev_rec, dev_acc
pw.add_loss_vs_batch({'tot_loss': tot_loss/(normalizer+1e-9), 'test_f1': test_f1}, indexs, use_logger = True)
pw.add_loss_vs_batch({'test_pre': test_pre, 'test_rec': test_rec}, indexs, use_logger = False)
patience_count = 0
elif dev_f1 > best_f1:
test_f1, test_pre, test_rec, test_acc = evaluator.calc_score(seq_model, test_dataset.get_tqdm(device))
pw.add_loss_vs_batch({'tot_loss': tot_loss/(normalizer+1e-9), 'test_f1': test_f1}, indexs, use_logger = True)
pw.add_loss_vs_batch({'test_pre': test_pre, 'test_rec': test_rec}, indexs, use_logger = False)
else:
patience_count += 1
if patience_count >= args.patience and indexs >= args.least:
break
pw.add_loss_vs_batch({'best_test_f1': test_f1, 'best_test_pre': test_pre, 'best_test_rec': test_rec}, 0, use_logger = True, use_writer = False)
pw.add_loss_vs_batch({'best_dev_f1': best_f1, 'best_dev_pre': best_dev_pre, 'best_dev_rec': best_dev_rec}, 0, use_logger = True, use_writer = False)
logger.info('Loading best_performing_model.')
seq_param = pw.restore_best_checkpoint()['model']
seq_model.load_state_dict(seq_param)
seq_model.to(device)
logger.info('Test before deleting layers.')
test_f1, test_pre, test_rec, test_acc = evaluator.calc_score(seq_model, test_dataset.get_tqdm(device))
dev_f1, dev_pre, dev_rec, dev_acc = evaluator.calc_score(seq_model, dev_dataset.get_tqdm(device))
pw.add_loss_vs_batch({'best_test_f1': test_f1, 'best_dev_f1': dev_f1}, 1, use_logger = True, use_writer = False)
logger.info('Deleting layers.')
seq_model.cpu()
seq_model.prune_dense_rnn()
seq_model.to(device)
logger.info('Resulting models display.')
print(seq_model)
logger.info('Test after deleting layers.')
test_f1, test_pre, test_rec, test_acc = evaluator.calc_score(seq_model, test_dataset.get_tqdm(device))
dev_f1, dev_pre, dev_rec, dev_acc = evaluator.calc_score(seq_model, dev_dataset.get_tqdm(device))
pw.add_loss_vs_batch({'best_test_f1': test_f1, 'best_dev_f1': dev_f1}, 2, use_logger = True, use_writer = False)
seq_model.cpu()
logger.info('Saving model...')
seq_config = seq_model.to_params()
pw.save_checkpoint(model = seq_model,
is_best = True,
s_dict = {'config': seq_config,
'flm_map': flm_map,
'blm_map': blm_map,
'gw_map': gw_map,
'c_map': c_map,
'y_map': y_map})
pw.close()
| python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/train_lm.py | train_lm.py | from __future__ import print_function
import datetime
import time
import torch
import torch.nn as nn
import torch.optim as optim
import codecs
import pickle
import math
from model_word_ada.LM import LM
from model_word_ada.basic import BasicRNN
from model_word_ada.ldnet import LDRNN
from model_word_ada.densenet import DenseRNN
from model_word_ada.dataset import LargeDataset, EvalDataset
from model_word_ada.adaptive import AdaptiveSoftmax
import model_word_ada.utils as utils
from torch_scope import wrapper
import argparse
import logging
import json
import os
import sys
import itertools
import functools
logger = logging.getLogger(__name__)
def evaluate(data_loader, lm_model, limited = 76800):
lm_model.eval()
lm_model.init_hidden()
total_loss = 0
total_len = 0
for word_t, label_t in data_loader:
label_t = label_t.view(-1)
tmp_len = label_t.size(0)
total_loss += tmp_len * lm_model(word_t, label_t).item()
total_len += tmp_len
if limited >=0 and total_len > limited:
break
ppl = math.exp(total_loss / total_len)
return ppl
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default="auto")
parser.add_argument('--cp_root', default='./checkpoint')
parser.add_argument('--checkpoint_name', default='ld0')
parser.add_argument('--git_tracking', action='store_true')
parser.add_argument('--dataset_folder', default='./data/one_billion/')
parser.add_argument('--restore_checkpoint', default='')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--sequence_length', type=int, default=20)
parser.add_argument('--hid_dim', type=int, default=300)
parser.add_argument('--word_dim', type=int, default=300)
parser.add_argument('--label_dim', type=int, default=1600)
parser.add_argument('--layer_num', type=int, default=10)
parser.add_argument('--droprate', type=float, default=0.01)
parser.add_argument('--add_relu', action='store_true')
parser.add_argument('--layer_drop', type=float, default=0.5)
parser.add_argument('--epoch', type=int, default=400)
parser.add_argument('--clip', type=float, default=5)
parser.add_argument('--update', choices=['Adam', 'Adagrad', 'Adadelta'], default='Adam', help='adam is the best')
parser.add_argument('--rnn_layer', choices=['Basic', 'DenseNet', 'LDNet'], default='LDNet')
parser.add_argument('--rnn_unit', choices=['gru', 'lstm', 'rnn'], default='lstm')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--lr_decay', type=float, default=0.1)
parser.add_argument('--cut_off', nargs='+', default=[4000,40000,200000])
parser.add_argument('--interval', type=int, default=100)
parser.add_argument('--epoch_size', type=int, default=4000)
parser.add_argument('--patience', type=float, default=10)
args = parser.parse_args()
pw = wrapper(os.path.join(args.cp_root, args.checkpoint_name), args.checkpoint_name, enable_git_track=args.git_tracking)
gpu_index = pw.auto_device() if 'auto' == args.gpu else int(args.gpu)
device = torch.device("cuda:" + str(gpu_index) if gpu_index >= 0 else "cpu")
if gpu_index >= 0:
torch.cuda.set_device(gpu_index)
logger.info('Loading dataset.')
dataset = pickle.load(open(args.dataset_folder + 'test.pk', 'rb'))
w_map, test_data, range_idx = dataset['w_map'], dataset['test_data'], dataset['range']
train_loader = LargeDataset(args.dataset_folder, range_idx, args.batch_size, args.sequence_length)
test_loader = EvalDataset(test_data, args.batch_size)
logger.info('Building models.')
rnn_map = {'Basic': BasicRNN, 'DenseNet': DenseRNN, 'LDNet': functools.partial(LDRNN, layer_drop = args.layer_drop)}
rnn_layer = rnn_map[args.rnn_layer](args.layer_num, args.rnn_unit, args.word_dim, args.hid_dim, args.droprate)
cut_off = args.cut_off + [len(w_map) + 1]
if args.label_dim > 0:
soft_max = AdaptiveSoftmax(args.label_dim, cut_off)
else:
soft_max = AdaptiveSoftmax(rnn_layer.output_dim, cut_off)
lm_model = LM(rnn_layer, soft_max, len(w_map), args.word_dim, args.droprate, label_dim = args.label_dim, add_relu=args.add_relu)
lm_model.rand_ini()
logger.info('Building optimizer.')
optim_map = {'Adam' : optim.Adam, 'Adagrad': optim.Adagrad, 'Adadelta': optim.Adadelta}
if args.lr > 0:
optimizer=optim_map[args.update](lm_model.parameters(), lr=args.lr)
else:
optimizer=optim_map[args.update](lm_model.parameters())
if args.restore_checkpoint:
if os.path.isfile(args.restore_checkpoint):
logger.info("loading checkpoint: '{}'".format(args.restore_checkpoint))
model_file = wrapper.restore_checkpoint(args.restore_checkpoint)['model']
lm_model.load_state_dict(model_file, False)
else:
logger.info("no checkpoint found at: '{}'".format(args.restore_checkpoint))
lm_model.to(device)
logger.info('Saving configues.')
pw.save_configue(args)
logger.info('Setting up training environ.')
best_train_ppl = float('inf')
cur_lr = args.lr
batch_index = 0
epoch_loss = 0
patience = 0
writer = SummaryWriter(log_dir='./runs_1b/'+args.log_dir)
name_list = ['batch_loss', 'train_ppl', 'test_ppl']
bloss, tr_ppl, te_ppl = [args.log_dir+'/'+tup for tup in name_list]
try:
for indexs in range(args.epoch):
logger.info('############')
logger.info('Epoch: {}'.format(indexs))
pw.nvidia_memory_map()
lm_model.train()
for word_t, label_t in train_loader.get_tqdm(device):
if 1 == train_loader.cur_idx:
lm_model.init_hidden()
label_t = label_t.view(-1)
lm_model.zero_grad()
loss = lm_model(word_t, label_t)
loss.backward()
torch.nn.utils.clip_grad_norm_(lm_model.parameters(), args.clip)
optimizer.step()
batch_index += 1
if 0 == batch_index % args.interval:
s_loss = utils.to_scalar(loss)
pw.add_loss_vs_batch({'batch_loss': s_loss}, batch_index, use_logger = False)
epoch_loss += utils.to_scalar(loss)
if 0 == batch_index % args.epoch_size:
epoch_ppl = math.exp(epoch_loss / args.epoch_size)
pw.add_loss_vs_batch({'train_ppl': epoch_ppl}, batch_index, use_logger = True)
if epoch_loss < best_train_ppl:
best_train_ppl = epoch_loss
patience = 0
else:
patience += 1
epoch_loss = 0
if patience > args.patience and cur_lr > 0:
patience = 0
cur_lr *= args.lr_decay
best_train_ppl = float('inf')
logger.info('adjust_learning_rate...')
utils.adjust_learning_rate(optimizer, cur_lr)
test_ppl = evaluate(test_loader.get_tqdm(device), lm_model)
pw.add_loss_vs_batch({'test_ppl': test_ppl}, indexs, use_logger = True)
pw.save_checkpoint(model = lm_model, optimizer = optimizer, is_best = True)
except KeyboardInterrupt:
logger.info('Exiting from training early')
test_ppl = evaluate(test_loader.get_tqdm(device), lm_model)
pw.add_loss_vs_batch({'test_ppl': test_ppl}, indexs, use_logger = True)
pw.save_checkpoint(model = lm_model, optimizer = optimizer, is_best = True)
pw.close() | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/train_seq_elmo.py | train_seq_elmo.py | from __future__ import print_function
import datetime
import time
import torch
import torch.nn as nn
import torch.optim as optim
import codecs
import pickle
import math
import numpy as np
from model_word_ada.LM import LM
from model_word_ada.basic import BasicRNN
from model_word_ada.densenet import DenseRNN
from model_word_ada.ldnet import LDRNN
from model_seq.crf import CRFLoss, CRFDecode
from model_seq.dataset import SeqDataset
from model_seq.evaluator import eval_wc
from model_seq.seqlabel import SeqLabel, Vanilla_SeqLabel
from model_seq.seqlm import BasicSeqLM
from model_seq.elmo import ElmoLM
import model_seq.utils as utils
from torch_scope import wrapper
import argparse
import logging
import json
import os
import sys
import itertools
import functools
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default="auto")
parser.add_argument('--cp_root', default='./checkpoint')
parser.add_argument('--checkpoint_name', default='elmo_ner')
parser.add_argument('--git_tracking', action='store_true')
parser.add_argument('--corpus', default='./data/ner_dataset.pk')
parser.add_argument('--forward_lm', default='./checkpoint/basic0.th')
parser.add_argument('--backward_lm', default='./checkpoint/basic_0.th')
parser.add_argument('--lm_hid_dim', type=int, default=2048)
parser.add_argument('--lm_word_dim', type=int, default=300)
parser.add_argument('--lm_label_dim', type=int, default=-1)
parser.add_argument('--lm_layer_num', type=int, default=2)
parser.add_argument('--lm_droprate', type=float, default=0.5)
parser.add_argument('--lm_rnn_layer', choices=['Basic'], default='Basic')
parser.add_argument('--lm_rnn_unit', choices=['gru', 'lstm', 'rnn'], default='lstm')
parser.add_argument('--seq_c_dim', type=int, default=30)
parser.add_argument('--seq_c_hid', type=int, default=150)
parser.add_argument('--seq_c_layer', type=int, default=1)
parser.add_argument('--seq_w_dim', type=int, default=100)
parser.add_argument('--seq_w_hid', type=int, default=300)
parser.add_argument('--seq_w_layer', type=int, default=1)
parser.add_argument('--seq_droprate', type=float, default=0.5)
parser.add_argument('--seq_model', choices=['vanilla', 'lm-aug'], default='lm-aug')
parser.add_argument('--seq_rnn_unit', choices=['gru', 'lstm', 'rnn'], default='lstm')
parser.add_argument('--seq_lambda0', type=float, default=0.01)
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--patience', type=int, default=15)
parser.add_argument('--epoch', type=int, default=200)
parser.add_argument('--clip', type=float, default=5)
parser.add_argument('--lr', type=float, default=0.015)
parser.add_argument('--lr_decay', type=float, default=0.05)
parser.add_argument('--update', choices=['Adam', 'Adagrad', 'Adadelta', 'SGD'], default='SGD')
args = parser.parse_args()
pw = wrapper(os.path.join(args.cp_root, args.checkpoint_name), args.checkpoint_name, enable_git_track=args.git_tracking)
gpu_index = pw.auto_device() if 'auto' == args.gpu else int(args.gpu)
device = torch.device("cuda:" + str(gpu_index) if gpu_index >= 0 else "cpu")
if gpu_index >= 0:
torch.cuda.set_device(gpu_index)
logger.info('Loading data')
dataset = pickle.load(open(args.corpus, 'rb'))
name_list = ['flm_map', 'blm_map', 'gw_map', 'c_map', 'y_map', 'emb_array', 'train_data', 'test_data', 'dev_data']
flm_map, blm_map, gw_map, c_map, y_map, emb_array, train_data, test_data, dev_data = [dataset[tup] for tup in name_list ]
logger.info('Loading language model')
rnn_map = {'Basic': BasicRNN}
flm_rnn_layer = rnn_map[args.lm_rnn_layer](args.lm_layer_num, args.lm_rnn_unit, args.lm_word_dim, args.lm_hid_dim, args.lm_droprate)
blm_rnn_layer = rnn_map[args.lm_rnn_layer](args.lm_layer_num, args.lm_rnn_unit, args.lm_word_dim, args.lm_hid_dim, args.lm_droprate)
flm_model = LM(flm_rnn_layer, None, len(flm_map), args.lm_word_dim, args.lm_droprate, label_dim = args.lm_label_dim)
blm_model = LM(blm_rnn_layer, None, len(blm_map), args.lm_word_dim, args.lm_droprate, label_dim = args.lm_label_dim)
flm_file = wrapper.restore_checkpoint(args.forward_lm)['model']
flm_model.load_state_dict(flm_file, False)
blm_file = wrapper.restore_checkpoint(args.backward_lm)['model']
blm_model.load_state_dict(blm_file, False)
flm_model_seq = ElmoLM(flm_model, False, args.lm_droprate, True)
blm_model_seq = ElmoLM(blm_model, True, args.lm_droprate, True)
logger.info('Building model')
SL_map = {'vanilla':Vanilla_SeqLabel, 'lm-aug': SeqLabel}
seq_model = SL_map[args.seq_model](flm_model_seq, blm_model_seq, len(c_map), args.seq_c_dim, args.seq_c_hid, args.seq_c_layer, len(gw_map), args.seq_w_dim, args.seq_w_hid, args.seq_w_layer, len(y_map), args.seq_droprate, unit=args.seq_rnn_unit)
seq_model.rand_init()
seq_model.load_pretrained_word_embedding(torch.FloatTensor(emb_array))
seq_model.to(device)
crit = CRFLoss(y_map)
decoder = CRFDecode(y_map)
evaluator = eval_wc(decoder, 'f1')
print('constructing dataset')
train_dataset, test_dataset, dev_dataset = [SeqDataset(tup_data, flm_map['\n'], blm_map['\n'], gw_map['<\n>'], c_map[' '], c_map['\n'], y_map['<s>'], y_map['<eof>'], len(y_map), args.batch_size) for tup_data in [train_data, test_data, dev_data]]
print('constructing optimizer')
param_dict = filter(lambda t: t.requires_grad, seq_model.parameters())
optim_map = {'Adam' : optim.Adam, 'Adagrad': optim.Adagrad, 'Adadelta': optim.Adadelta, 'SGD': functools.partial(optim.SGD, momentum=0.9)}
if args.lr > 0:
optimizer=optim_map[args.update](param_dict, lr=args.lr)
else:
optimizer=optim_map[args.update](param_dict)
logger.info('Saving configues.')
pw.save_configue(args)
logger.info('Setting up training environ.')
best_f1 = float('-inf')
patience_count = 0
batch_index = 0
normalizer = 0
tot_loss = 0
for indexs in range(args.epoch):
logger.info('############')
logger.info('Epoch: {}'.format(indexs))
pw.nvidia_memory_map()
seq_model.train()
for f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w, f_y, f_y_m, _ in train_dataset.get_tqdm(device):
seq_model.zero_grad()
output = seq_model(f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w)
loss = crit(output, f_y, f_y_m)
tot_loss += utils.to_scalar(loss)
normalizer += 1
if args.seq_lambda0 > 0:
loss += args.seq_lambda0 * (flm_model_seq.regularizer(args.seq_lambda1) + blm_model_seq.regularizer(args.seq_lambda1))
loss.backward()
torch.nn.utils.clip_grad_norm_(seq_model.parameters(), args.clip)
optimizer.step()
batch_index += 1
if 0 == batch_index % 100:
pw.add_loss_vs_batch({'training_loss': tot_loss / (normalizer + 1e-9)}, batch_index, use_logger = False)
tot_loss = 0
normalizer = 0
if args.lr > 0:
current_lr = args.lr / (1 + (indexs + 1) * args.lr_decay)
utils.adjust_learning_rate(optimizer, current_lr)
dev_f1, dev_pre, dev_rec, dev_acc = evaluator.calc_score(seq_model, dev_dataset.get_tqdm(device))
pw.add_loss_vs_batch({'dev_f1': dev_f1}, indexs, use_logger = True)
pw.add_loss_vs_batch({'dev_pre': dev_pre, 'dev_rec': dev_rec}, indexs, use_logger = False)
logger.info('Saving model...')
pw.save_checkpoint(model = seq_model, is_best = (dev_f1 > best_f1))
if dev_f1 > best_f1:
test_f1, test_pre, test_rec, test_acc = evaluator.calc_score(seq_model, test_dataset.get_tqdm(device))
best_f1, best_dev_pre, best_dev_rec, best_dev_acc = dev_f1, dev_pre, dev_rec, dev_acc
pw.add_loss_vs_batch({'test_f1': test_f1}, indexs, use_logger = True)
pw.add_loss_vs_batch({'test_pre': test_pre, 'test_rec': test_rec}, indexs, use_logger = False)
patience_count = 0
else:
patience_count += 1
if patience_count >= args.patience:
break
pw.close()
| python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/train_seq.py | train_seq.py | from __future__ import print_function
import datetime
import time
import torch
import torch.nn as nn
import torch.optim as optim
import codecs
import pickle
import math
from model_word_ada.LM import LM
from model_word_ada.basic import BasicRNN
from model_word_ada.densenet import DenseRNN
from model_word_ada.ldnet import LDRNN
from model_seq.crf import CRFLoss, CRFDecode
from model_seq.dataset import SeqDataset
from model_seq.evaluator import eval_wc
from model_seq.seqlabel import SeqLabel, Vanilla_SeqLabel
from model_seq.seqlm import BasicSeqLM
from model_seq.sparse_lm import SparseSeqLM
import model_seq.utils as utils
from torch_scope import wrapper
import argparse
import logging
import json
import os
import sys
import itertools
import functools
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default="auto")
parser.add_argument('--cp_root', default='./checkpoint')
parser.add_argument('--checkpoint_name', default='ner')
parser.add_argument('--git_tracking', action='store_true')
parser.add_argument('--corpus', default='./data/ner_dataset.pk')
parser.add_argument('--forward_lm', default='./checkpoint/ld0.th')
parser.add_argument('--backward_lm', default='./checkpoint/ld_0.th')
parser.add_argument('--lm_hid_dim', type=int, default=300)
parser.add_argument('--lm_word_dim', type=int, default=300)
parser.add_argument('--lm_label_dim', type=int, default=-1)
parser.add_argument('--lm_layer_num', type=int, default=10)
parser.add_argument('--lm_droprate', type=float, default=0.5)
parser.add_argument('--lm_rnn_layer', choices=['Basic', 'DenseNet', 'LDNet'], default='LDNet')
parser.add_argument('--lm_rnn_unit', choices=['gru', 'lstm', 'rnn'], default='lstm')
parser.add_argument('--seq_c_dim', type=int, default=30)
parser.add_argument('--seq_c_hid', type=int, default=150)
parser.add_argument('--seq_c_layer', type=int, default=1)
parser.add_argument('--seq_w_dim', type=int, default=100)
parser.add_argument('--seq_w_hid', type=int, default=300)
parser.add_argument('--seq_w_layer', type=int, default=1)
parser.add_argument('--seq_droprate', type=float, default=0.5)
parser.add_argument('--seq_model', choices=['vanilla', 'lm-aug'], default='lm-aug')
parser.add_argument('--seq_rnn_unit', choices=['gru', 'lstm', 'rnn'], default='lstm')
parser.add_argument('--seq_lm_model', choices=['vanilla', 'sparse-lm'], default='vanilla')
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--patience', type=int, default=15)
parser.add_argument('--epoch', type=int, default=200)
parser.add_argument('--clip', type=float, default=5)
parser.add_argument('--lr', type=float, default=0.015)
parser.add_argument('--lr_decay', type=float, default=0.05)
parser.add_argument('--update', choices=['Adam', 'Adagrad', 'Adadelta', 'SGD'], default='SGD')
args = parser.parse_args()
pw = wrapper(os.path.join(args.cp_root, args.checkpoint_name), args.checkpoint_name, enable_git_track=args.git_tracking)
gpu_index = pw.auto_device() if 'auto' == args.gpu else int(args.gpu)
device = torch.device("cuda:" + str(gpu_index) if gpu_index >= 0 else "cpu")
if gpu_index >= 0:
torch.cuda.set_device(gpu_index)
logger.info('Loading data')
dataset = pickle.load(open(args.corpus, 'rb'))
name_list = ['flm_map', 'blm_map', 'gw_map', 'c_map', 'y_map', 'emb_array', 'train_data', 'test_data', 'dev_data']
flm_map, blm_map, gw_map, c_map, y_map, emb_array, train_data, test_data, dev_data = [dataset[tup] for tup in name_list ]
logger.info('Loading language model')
rnn_map = {'Basic': BasicRNN, 'DenseNet': DenseRNN, 'LDNet': functools.partial(LDRNN, layer_drop = 0)}
flm_rnn_layer = rnn_map[args.lm_rnn_layer](args.lm_layer_num, args.lm_rnn_unit, args.lm_word_dim, args.lm_hid_dim, args.lm_droprate)
blm_rnn_layer = rnn_map[args.lm_rnn_layer](args.lm_layer_num, args.lm_rnn_unit, args.lm_word_dim, args.lm_hid_dim, args.lm_droprate)
flm_model = LM(flm_rnn_layer, None, len(flm_map), args.lm_word_dim, args.lm_droprate, label_dim = args.lm_label_dim)
blm_model = LM(blm_rnn_layer, None, len(blm_map), args.lm_word_dim, args.lm_droprate, label_dim = args.lm_label_dim)
flm_file = wrapper.restore_checkpoint(args.forward_lm)['model']
flm_model.load_state_dict(flm_file, False)
blm_file = wrapper.restore_checkpoint(args.backward_lm)['model']
blm_model.load_state_dict(blm_file, False)
slm_map = {'vanilla': BasicSeqLM, 'sparse-lm': SparseSeqLM}
flm_model_seq = slm_map[args.seq_lm_model](flm_model, False, args.lm_droprate, True)
blm_model_seq = slm_map[args.seq_lm_model](blm_model, True, args.lm_droprate, True)
logger.info('Building models')
SL_map = {'vanilla':Vanilla_SeqLabel, 'lm-aug': SeqLabel}
seq_model = SL_map[args.seq_model](flm_model_seq, blm_model_seq, len(c_map), args.seq_c_dim, args.seq_c_hid, args.seq_c_layer, len(gw_map), args.seq_w_dim, args.seq_w_hid, args.seq_w_layer, len(y_map), args.seq_droprate, unit=args.seq_rnn_unit)
seq_model.rand_init()
seq_model.load_pretrained_word_embedding(torch.FloatTensor(emb_array))
seq_model.to(device)
crit = CRFLoss(y_map)
decoder = CRFDecode(y_map)
evaluator = eval_wc(decoder, 'f1')
logger.info('Constructing dataset')
train_dataset, test_dataset, dev_dataset = [SeqDataset(tup_data, flm_map['\n'], blm_map['\n'], gw_map['<\n>'], c_map[' '], c_map['\n'], y_map['<s>'], y_map['<eof>'], len(y_map), args.batch_size) for tup_data in [train_data, test_data, dev_data]]
logger.info('Constructing optimizer')
param_dict = filter(lambda t: t.requires_grad, seq_model.parameters())
optim_map = {'Adam' : optim.Adam, 'Adagrad': optim.Adagrad, 'Adadelta': optim.Adadelta, 'SGD': functools.partial(optim.SGD, momentum=0.9)}
if args.lr > 0:
optimizer=optim_map[args.update](param_dict, lr=args.lr)
else:
optimizer=optim_map[args.update](param_dict)
logger.info('Saving configues.')
pw.save_configue(args)
logger.info('Setting up training environ.')
best_f1 = float('-inf')
patience_count = 0
batch_index = 0
normalizer=0
tot_loss = 0
for indexs in range(args.epoch):
logger.info('############')
logger.info('Epoch: {}'.format(indexs))
pw.nvidia_memory_map()
seq_model.train()
for f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w, f_y, f_y_m, _ in train_dataset.get_tqdm(device):
seq_model.zero_grad()
output = seq_model(f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w)
loss = crit(output, f_y, f_y_m)
tot_loss += utils.to_scalar(loss)
normalizer += 1
loss.backward()
torch.nn.utils.clip_grad_norm_(seq_model.parameters(), args.clip)
optimizer.step()
batch_index += 1
if 0 == batch_index % 100:
pw.add_loss_vs_batch({'training_loss': tot_loss / (normalizer + 1e-9)}, batch_index, use_logger = False)
tot_loss = 0
normalizer = 0
if args.lr > 0:
current_lr = args.lr / (1 + (indexs + 1) * args.lr_decay)
utils.adjust_learning_rate(optimizer, current_lr)
dev_f1, dev_pre, dev_rec, dev_acc = evaluator.calc_score(seq_model, dev_dataset.get_tqdm(device))
pw.add_loss_vs_batch({'dev_f1': dev_f1}, indexs, use_logger = True)
pw.add_loss_vs_batch({'dev_pre': dev_pre, 'dev_rec': dev_rec}, indexs, use_logger = False)
logger.info('Saving model...')
pw.save_checkpoint(model = seq_model, is_best = (dev_f1 > best_f1))
if dev_f1 > best_f1:
test_f1, test_pre, test_rec, test_acc = evaluator.calc_score(seq_model, test_dataset.get_tqdm(device))
best_f1, best_dev_pre, best_dev_rec, best_dev_acc = dev_f1, dev_pre, dev_rec, dev_acc
pw.add_loss_vs_batch({'test_f1': test_f1}, indexs, use_logger = True)
pw.add_loss_vs_batch({'test_pre': test_pre, 'test_rec': test_rec}, indexs, use_logger = False)
patience_count = 0
else:
patience_count += 1
if patience_count >= args.patience:
break
pw.close()
| python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/pre_seq/encode_data.py | pre_seq/encode_data.py | """
.. module:: encode_data
:synopsis: encode data for sequence labeling
.. moduleauthor:: Liyuan Liu
"""
import pickle
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
import itertools
import functools
def encode_dataset(input_file, flm_map, blm_map, gw_map, c_map, y_map):
flm_unk = flm_map['<unk>']
blm_unk = blm_map['<unk>']
gw_unk = gw_map['<unk>']
c_con = c_map[' ']
c_unk = c_map['<unk>']
dataset = list()
tmpw_flm, tmpw_blm, tmpw_gw, tmpc, tmpy = list(), list(), list(), list(), list()
with open(input_file, 'r') as fin:
for line in fin:
if line.isspace() or line.startswith('-DOCSTART-'):
if len(tmpw_flm) > 0:
dataset.append([tmpw_flm, tmpw_blm, tmpw_gw, tmpc, tmpy])
tmpw_flm, tmpw_blm, tmpw_gw, tmpc, tmpy = list(), list(), list(), list(), list()
else:
line = line.split()
tmpw_flm.append(flm_map.get(line[0], flm_unk))
tmpw_blm.append(blm_map.get(line[0], blm_unk))
tmpw_gw.append(gw_map.get(line[0].lower(), gw_unk))
tmpy.append(y_map[line[-1]])
tmpc.append([c_map.get(tup, c_unk) for tup in line[0]])
if len(tmpw_flm) > 0:
dataset.append([tmpw_flm, tmpw_blm, tmpw_gw, tmpc, tmpy])
return dataset
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_file', default="./data/ner/eng.train.iobes")
parser.add_argument('--test_file', default="./data/ner/eng.testb.iobes")
parser.add_argument('--dev_file', default="./data/ner/eng.testa.iobes")
parser.add_argument('--input_map', default="./data/conll_map.pk")
parser.add_argument('--output_file', default="./data/ner_dataset.pk")
parser.add_argument('--threshold', type=int, default=1)
parser.add_argument('--unk', default='<unk>')
args = parser.parse_args()
with open(args.input_map, 'rb') as f:
p_data = pickle.load(f)
name_list = ['flm_map', 'blm_map', 'gw_map', 'c_map', 'y_map', 'emb_array']
flm_map, blm_map, gw_map, c_map, y_map, emb_array = [p_data[tup] for tup in name_list]
train_dataset = encode_dataset(args.train_file, flm_map, blm_map, gw_map, c_map, y_map)
test_dataset = encode_dataset(args.test_file, flm_map, blm_map, gw_map, c_map, y_map)
dev_dataset = encode_dataset(args.dev_file, flm_map, blm_map, gw_map, c_map, y_map)
with open(args.output_file, 'wb') as f:
pickle.dump({'flm_map': flm_map, 'blm_map': blm_map, 'gw_map': gw_map, 'c_map': c_map, 'y_map': y_map, 'emb_array': emb_array, 'train_data': train_dataset, 'test_data': test_dataset, 'dev_data': dev_dataset}, f) | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/pre_seq/gene_map.py | pre_seq/gene_map.py | """
.. module:: gene_map
:synopsis: generate map for sequence labeling
.. moduleauthor:: Liyuan Liu
"""
import pickle
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
import itertools
import functools
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_corpus', default='./data/ner/eng.train.iobes')
parser.add_argument('--input_embedding', default="./embedding/glove.6B.100d.txt")
parser.add_argument('--output_map', default="./data/conll_map.pk")
parser.add_argument('--flm_map', default="./data/one_billion/test.pk")
parser.add_argument('--blm_map', default="./data/one_billion_reverse/test.pk")
parser.add_argument('--threshold', type=int, default=5)
parser.add_argument('--unk', default='unk')
args = parser.parse_args()
with open(args.flm_map, 'rb') as f:
p_data = pickle.load(f)
flm_map = p_data['w_map']
with open(args.blm_map, 'rb') as f:
p_data = pickle.load(f)
blm_map = p_data['w_map']
gw_map = dict()
embedding_array = list()
for line in open(args.input_embedding, 'r'):
line = line.split()
vector = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))
if line[0] == args.unk:
gw_map['<unk>'] = len(gw_map)
else:
gw_map[line[0]] = len(gw_map)
embedding_array.append(vector)
bias = 2 * np.sqrt(3.0 / len(embedding_array[0]))
gw_map['<\n>'] = len(gw_map)
embedding_array.append([random.random() * bias - bias for tup in embedding_array[0]])
w_count = dict()
c_count = dict()
y_map = dict()
# y_map = {'B-LST':0, 'E-LST':1}
with open(args.train_corpus, 'r') as fin:
for line in fin:
if line.isspace() or line.startswith('-DOCSTART-'):
c_count['\n'] = c_count.get('\n', 0) + 1
else:
line = line.split()
for tup in line[0]:
c_count[tup] = c_count.get(tup, 0) + 1
c_count[' '] = c_count.get(' ', 0) + 1
if line[-1] not in y_map:
y_map[line[-1]] = len(y_map)
word = line[0].lower()
if word not in gw_map:
w_count[word] = w_count.get(word, 0) + 1
w_set = {k for k, v in w_count.items() if v > args.threshold}
for k in w_set:
gw_map[k] = len(gw_map)
embedding_array.append([random.random() * bias - bias for tup in embedding_array[0]])
c_set = {k for k, v in c_count.items() if v > args.threshold}
c_map = {v:k for k, v in enumerate(c_set)}
c_map['<unk>'] = len(c_map)
y_map['<s>'] = len(y_map)
y_map['<eof>'] = len(y_map)
with open(args.output_map, 'wb') as f:
pickle.dump({'flm_map': flm_map, 'blm_map': blm_map, 'gw_map': gw_map, 'c_map': c_map, 'y_map': y_map, 'emb_array': embedding_array}, f)
| python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_seq/sparse_lm.py | model_seq/sparse_lm.py | """
.. module:: sparse_lm
:synopsis: sparse language model for sequence labeling
.. moduleauthor:: Liyuan Liu
"""
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import model_seq.utils as utils
class SBUnit(nn.Module):
"""
The basic recurrent unit for the dense-RNNs wrapper.
Parameters
----------
ori_unit : ``torch.nn.Module``, required.
the original module of rnn unit.
droprate : ``float``, required.
the dropout ratrio.
fix_rate: ``bool``, required.
whether to fix the rqtio.
"""
def __init__(self, ori_unit, droprate, fix_rate):
super(SBUnit, self).__init__()
self.unit_type = ori_unit.unit_type
self.layer = ori_unit.layer
self.droprate = droprate
self.input_dim = ori_unit.input_dim
self.increase_rate = ori_unit.increase_rate
self.output_dim = ori_unit.input_dim + ori_unit.increase_rate
def prune_rnn(self, mask):
"""
Prune dense rnn to be smaller by delecting layers.
Parameters
----------
mask : ``torch.ByteTensor``, required.
The selection tensor for the input matrix.
"""
mask_index = mask.nonzero().squeeze(1)
self.layer.weight_ih_l0 = nn.Parameter(self.layer.weight_ih_l0.data.index_select(1, mask_index).contiguous())
self.layer.input_size = self.layer.weight_ih_l0.size(1)
def forward(self, x, weight=1):
"""
Calculate the output.
Parameters
----------
x : ``torch.FloatTensor``, required.
The input tensor, of shape (seq_len, batch_size, input_dim).
weight : ``torch.FloatTensor``, required.
The selection variable.
Returns
----------
output: ``torch.FloatTensor``.
The output of RNNs.
"""
if self.droprate > 0:
new_x = F.dropout(x, p=self.droprate, training=self.training)
else:
new_x = x
out, _ = self.layer(new_x)
out = weight * out
return torch.cat([x, out], 2)
class SDRNN(nn.Module):
"""
The multi-layer recurrent networks for the dense-RNNs wrapper.
Parameters
----------
ori_unit : ``torch.nn.Module``, required.
the original module of rnn unit.
droprate : ``float``, required.
the dropout ratrio.
fix_rate: ``bool``, required.
whether to fix the rqtio.
"""
def __init__(self, ori_drnn, droprate, fix_rate):
super(SDRNN, self).__init__()
if ori_drnn.layer:
self.layer_list = [SBUnit(ori_unit, droprate, fix_rate) for ori_unit in ori_drnn.layer._modules.values()]
self.weight_list = nn.Parameter(torch.FloatTensor([1.0] * len(self.layer_list)))
self.weight_list.requires_grad = not fix_rate
# self.layer = nn.Sequential(*self.layer_list)
self.layer = nn.ModuleList(self.layer_list)
for param in self.layer.parameters():
param.requires_grad = False
else:
self.layer_list = list()
self.weight_list = list()
self.layer = None
# self.output_dim = self.layer_list[-1].output_dim
self.emb_dim = ori_drnn.emb_dim
self.output_dim = ori_drnn.output_dim
self.unit_type = ori_drnn.unit_type
def to_params(self):
"""
To parameters.
"""
return {
"rnn_type": "LDRNN",
"unit_type": self.unit_type,
"layer_num": 0 if not self.layer else len(self.layer),
"emb_dim": self.emb_dim,
"hid_dim": -1 if not self.layer else self.layer[0].increase_rate,
"droprate": -1 if not self.layer else self.layer[0].droprate,
"after_pruned": True
}
def prune_dense_rnn(self):
"""
Prune dense rnn to be smaller by delecting layers.
"""
prune_mask = torch.ones(self.layer_list[0].input_dim)
increase_mask_one = torch.ones(self.layer_list[0].increase_rate)
increase_mask_zero = torch.zeros(self.layer_list[0].increase_rate)
new_layer_list = list()
new_weight_list = list()
for ind in range(0, len(self.layer_list)):
if self.weight_list.data[ind] > 0:
new_weight_list.append(self.weight_list.data[ind])
self.layer_list[ind].prune_rnn(prune_mask)
new_layer_list.append(self.layer_list[ind])
prune_mask = torch.cat([prune_mask, increase_mask_one], dim = 0)
else:
prune_mask = torch.cat([prune_mask, increase_mask_zero], dim = 0)
if not new_layer_list:
self.output_dim = self.layer_list[0].input_dim
self.layer = None
self.weight_list = None
self.layer_list = None
else:
self.layer_list = new_layer_list
self.layer = nn.ModuleList(self.layer_list)
self.weight_list = nn.Parameter(torch.FloatTensor(new_weight_list))
self.weight_list.requires_grad = False
for param in self.layer.parameters():
param.requires_grad = False
return prune_mask
def prox(self):
"""
the proximal calculator.
"""
self.weight_list.data.masked_fill_(self.weight_list.data < 0, 0)
self.weight_list.data.masked_fill_(self.weight_list.data > 1, 1)
none_zero_count = (self.weight_list.data > 0).sum()
return none_zero_count
def regularizer(self):
"""
Calculate the regularization term.
Returns
----------
reg0: ``torch.FloatTensor``.
The value of reg0.
reg1: ``torch.FloatTensor``.
The value of reg1.
reg2: ``torch.FloatTensor``.
The value of reg2.
"""
reg3 = (self.weight_list * (1 - self.weight_list)).sum()
none_zero = self.weight_list.data > 0
none_zero_count = none_zero.sum()
reg0 = none_zero_count
reg1 = self.weight_list[none_zero].sum()
return reg0, reg1, reg3
def forward(self, x):
"""
Calculate the output.
Parameters
----------
x : ``torch.FloatTensor``, required.
the input tensor, of shape (seq_len, batch_size, input_dim).
Returns
----------
output: ``torch.FloatTensor``.
The ELMo outputs.
"""
if self.layer_list is not None:
for ind in range(len(self.layer_list)):
x = self.layer[ind](x, self.weight_list[ind])
return x
# return self.layer(x)
class SparseSeqLM(nn.Module):
"""
The language model for the dense rnns with layer-wise selection.
Parameters
----------
ori_lm : ``torch.nn.Module``, required.
the original module of language model.
backward : ``bool``, required.
whether the language model is backward.
droprate : ``float``, required.
the dropout ratrio.
fix_rate: ``bool``, required.
whether to fix the rqtio.
"""
def __init__(self, ori_lm, backward, droprate, fix_rate):
super(SparseSeqLM, self).__init__()
self.rnn = SDRNN(ori_lm.rnn, droprate, fix_rate)
self.w_num = ori_lm.w_num
self.w_dim = ori_lm.w_dim
self.word_embed = ori_lm.word_embed
self.word_embed.weight.requires_grad = False
self.output_dim = ori_lm.rnn_output
self.backward = backward
def to_params(self):
"""
To parameters.
"""
return {
"backward": self.backward,
"rnn_params": self.rnn.to_params(),
"word_embed_num": self.word_embed.num_embeddings,
"word_embed_dim": self.word_embed.embedding_dim
}
def prune_dense_rnn(self):
"""
Prune dense rnn to be smaller by delecting layers.
"""
prune_mask = self.rnn.prune_dense_rnn()
self.output_dim = self.rnn.output_dim
return prune_mask
def init_hidden(self):
"""
initialize hidden states.
"""
return
def regularizer(self):
"""
Calculate the regularization term.
Returns
----------
reg: ``list``.
The list of regularization terms.
"""
return self.rnn.regularizer()
def prox(self):
"""
the proximal calculator.
"""
return self.rnn.prox()
def forward(self, w_in, ind=None):
"""
Calculate the output.
Parameters
----------
w_in : ``torch.LongTensor``, required.
the input tensor, of shape (seq_len, batch_size).
ind : ``torch.LongTensor``, optional, (default=None).
the index tensor for the backward language model, of shape (seq_len, batch_size).
Returns
----------
output: ``torch.FloatTensor``.
The ELMo outputs.
"""
w_emb = self.word_embed(w_in)
out = self.rnn(w_emb)
if self.backward:
out_size = out.size()
out = out.view(out_size[0] * out_size[1], out_size[2]).index_select(0, ind).contiguous().view(out_size)
return out
| python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_seq/seqlm.py | model_seq/seqlm.py | """
.. module:: seqlm
:synopsis: language model for sequence labeling
.. moduleauthor:: Liyuan Liu
"""
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import model_seq.utils as utils
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicSeqLM(nn.Module):
"""
The language model for the dense rnns.
Parameters
----------
ori_lm : ``torch.nn.Module``, required.
the original module of language model.
backward : ``bool``, required.
whether the language model is backward.
droprate : ``float``, required.
the dropout ratrio.
fix_rate: ``bool``, required.
whether to fix the rqtio.
"""
def __init__(self, ori_lm, backward, droprate, fix_rate):
super(BasicSeqLM, self).__init__()
self.rnn = ori_lm.rnn
for param in self.rnn.parameters():
param.requires_grad = False
self.w_num = ori_lm.w_num
self.w_dim = ori_lm.w_dim
self.word_embed = ori_lm.word_embed
self.word_embed.weight.requires_grad = False
self.output_dim = ori_lm.rnn_output
self.backward = backward
def to_params(self):
"""
To parameters.
"""
return {
"rnn_params": self.rnn.to_params(),
"word_embed_num": self.word_embed.num_embeddings,
"word_embed_dim": self.word_embed.embedding_dim
}
def init_hidden(self):
"""
initialize hidden states.
"""
self.rnn.init_hidden()
def regularizer(self):
"""
Calculate the regularization term.
Returns
----------
reg: ``list``.
The list of regularization terms.
"""
return self.rnn.regularizer()
def forward(self, w_in, ind=None):
"""
Calculate the output.
Parameters
----------
w_in : ``torch.LongTensor``, required.
the input tensor, of shape (seq_len, batch_size).
ind : ``torch.LongTensor``, optional, (default=None).
the index tensor for the backward language model, of shape (seq_len, batch_size).
Returns
----------
output: ``torch.FloatTensor``.
The ELMo outputs.
"""
w_emb = self.word_embed(w_in)
out = self.rnn(w_emb)
if self.backward:
out_size = out.size()
out = out.view(out_size[0] * out_size[1], out_size[2]).index_select(0, ind).contiguous().view(out_size)
return out | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_seq/crf.py | model_seq/crf.py | """
.. module:: crf
:synopsis: conditional random field
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torch.sparse as sparse
import model_seq.utils as utils
class CRF(nn.Module):
"""
Conditional Random Field Module
Parameters
----------
hidden_dim : ``int``, required.
the dimension of the input features.
tagset_size : ``int``, required.
the size of the target labels.
if_bias: ``bool``, optional, (default=True).
whether the linear transformation has the bias term.
"""
def __init__(self,
hidden_dim: int,
tagset_size: int,
if_bias: bool = True):
super(CRF, self).__init__()
self.tagset_size = tagset_size
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size, bias=if_bias)
self.transitions = nn.Parameter(torch.Tensor(self.tagset_size, self.tagset_size))
def rand_init(self):
"""
random initialization
"""
utils.init_linear(self.hidden2tag)
self.transitions.data.zero_()
def forward(self, feats):
"""
calculate the potential score for the conditional random field.
Parameters
----------
feats: ``torch.FloatTensor``, required.
the input features for the conditional random field, of shape (*, hidden_dim).
Returns
-------
output: ``torch.FloatTensor``.
A float tensor of shape (ins_num, from_tag_size, to_tag_size)
"""
scores = self.hidden2tag(feats).view(-1, 1, self.tagset_size)
ins_num = scores.size(0)
crf_scores = scores.expand(ins_num, self.tagset_size, self.tagset_size) + self.transitions.view(1, self.tagset_size, self.tagset_size).expand(ins_num, self.tagset_size, self.tagset_size)
return crf_scores
class CRFLoss(nn.Module):
"""
The negative loss for the Conditional Random Field Module
Parameters
----------
y_map : ``dict``, required.
a ``dict`` maps from tag string to tag index.
average_batch : ``bool``, optional, (default=True).
whether the return score would be averaged per batch.
"""
def __init__(self,
y_map: dict,
average_batch: bool = True):
super(CRFLoss, self).__init__()
self.tagset_size = len(y_map)
self.start_tag = y_map['<s>']
self.end_tag = y_map['<eof>']
self.average_batch = average_batch
def forward(self, scores, target, mask):
"""
calculate the negative log likehood for the conditional random field.
Parameters
----------
scores: ``torch.FloatTensor``, required.
the potential score for the conditional random field, of shape (seq_len, batch_size, from_tag_size, to_tag_size).
target: ``torch.LongTensor``, required.
the positive path for the conditional random field, of shape (seq_len, batch_size).
mask: ``torch.ByteTensor``, required.
the mask for the unpadded sentence parts, of shape (seq_len, batch_size).
Returns
-------
loss: ``torch.FloatTensor``.
The NLL loss.
"""
seq_len = scores.size(0)
bat_size = scores.size(1)
tg_energy = torch.gather(scores.view(seq_len, bat_size, -1), 2, target.unsqueeze(2)).view(seq_len, bat_size)
tg_energy = tg_energy.masked_select(mask).sum()
seq_iter = enumerate(scores)
_, inivalues = seq_iter.__next__()
partition = inivalues[:, self.start_tag, :].squeeze(1).clone()
for idx, cur_values in seq_iter:
cur_values = cur_values + partition.unsqueeze(2).expand(bat_size, self.tagset_size, self.tagset_size)
cur_partition = utils.log_sum_exp(cur_values)
mask_idx = mask[idx, :].view(bat_size, 1).expand(bat_size, self.tagset_size)
partition.masked_scatter_(mask_idx, cur_partition.masked_select(mask_idx))
partition = partition[:, self.end_tag].sum()
if self.average_batch:
return (partition - tg_energy) / bat_size
else:
return (partition - tg_energy)
class CRFDecode():
"""
The negative loss for the Conditional Random Field Module
Parameters
----------
y_map : ``dict``, required.
a ``dict`` maps from tag string to tag index.
"""
def __init__(self, y_map: dict):
self.tagset_size = len(y_map)
self.start_tag = y_map['<s>']
self.end_tag = y_map['<eof>']
self.y_map = y_map
self.r_y_map = {v:k for k, v in self.y_map.items()}
def decode(self, scores, mask):
"""
find the best path from the potential scores by the viterbi decoding algorithm.
Parameters
----------
scores: ``torch.FloatTensor``, required.
the potential score for the conditional random field, of shape (seq_len, batch_size, from_tag_size, to_tag_size).
mask: ``torch.ByteTensor``, required.
the mask for the unpadded sentence parts, of shape (seq_len, batch_size).
Returns
-------
output: ``torch.LongTensor``.
A LongTensor of shape (seq_len - 1, batch_size)
"""
seq_len = scores.size(0)
bat_size = scores.size(1)
mask = 1 - mask.data
decode_idx = torch.LongTensor(seq_len-1, bat_size)
seq_iter = enumerate(scores)
_, inivalues = seq_iter.__next__()
forscores = inivalues[:, self.start_tag, :]
back_points = list()
for idx, cur_values in seq_iter:
cur_values = cur_values + forscores.contiguous().view(bat_size, self.tagset_size, 1).expand(bat_size, self.tagset_size, self.tagset_size)
forscores, cur_bp = torch.max(cur_values, 1)
cur_bp.masked_fill_(mask[idx].view(bat_size, 1).expand(bat_size, self.tagset_size), self.end_tag)
back_points.append(cur_bp)
pointer = back_points[-1][:, self.end_tag]
decode_idx[-1] = pointer
for idx in range(len(back_points)-2, -1, -1):
back_point = back_points[idx]
index = pointer.contiguous().view(-1, 1)
pointer = torch.gather(back_point, 1, index).view(-1)
decode_idx[idx] = pointer
return decode_idx
def to_spans(self, sequence):
"""
decode the best path to spans.
Parameters
----------
sequence: list, required.
the list of best label indexes paths .
Returns
-------
output: ``set``.
A set of chunks contains the position and type of the entities.
"""
chunks = []
current = None
for i, y in enumerate(sequence):
label = self.r_y_map[y]
if label.startswith('B-'):
if current is not None:
chunks.append('@'.join(current))
current = [label.replace('B-', ''), '%d' % i]
elif label.startswith('S-'):
if current is not None:
chunks.append('@'.join(current))
current = None
base = label.replace('S-', '')
chunks.append('@'.join([base, '%d' % i]))
elif label.startswith('I-'):
if current is not None:
base = label.replace('I-', '')
if base == current[0]:
current.append('%d' % i)
else:
chunks.append('@'.join(current))
current = [base, '%d' % i]
else:
current = [label.replace('I-', ''), '%d' % i]
elif label.startswith('E-'):
if current is not None:
base = label.replace('E-', '')
if base == current[0]:
current.append('%d' % i)
chunks.append('@'.join(current))
current = None
else:
chunks.append('@'.join(current))
current = [base, '%d' % i]
chunks.append('@'.join(current))
current = None
else:
current = [label.replace('E-', ''), '%d' % i]
chunks.append('@'.join(current))
current = None
else:
if current is not None:
chunks.append('@'.join(current))
current = None
if current is not None:
chunks.append('@'.join(current))
return set(chunks) | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_seq/dataset.py | model_seq/dataset.py | """
.. module:: dataset
:synopsis: dataset for sequence labeling
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import pickle
import random
import functools
import itertools
from tqdm import tqdm
class SeqDataset(object):
"""
Dataset for Sequence Labeling
Parameters
----------
dataset : ``list``, required.
The encoded dataset (outputs of preprocess scripts).
flm_pad : ``int``, required.
The pad index for the forward language model.
blm_pad : ``int``, required.
The pad index for the backward language model.
w_pad : ``int``, required.
The pad index for the word-level inputs.
c_con : ``int``, required.
The index of connect character token for character-level inputs.
c_pad : ``int``, required.
The pad index for the character-level inputs.
y_start : ``int``, required.
The index of the start label token.
y_pad : ``int``, required.
The index of the pad label token.
y_size : ``int``, required.
The size of the tag set.
batch_size: ``int``, required.
Batch size.
"""
def __init__(self,
dataset: list,
flm_pad: int,
blm_pad: int,
w_pad: int,
c_con: int,
c_pad: int,
y_start: int,
y_pad: int,
y_size: int,
batch_size: int):
super(SeqDataset, self).__init__()
self.flm_pad = flm_pad
self.blm_pad = blm_pad
self.w_pad = w_pad
self.c_con = c_con
self.c_pad = c_pad
self.y_pad = y_pad
self.y_size = y_size
self.y_start = y_start
self.batch_size = batch_size
self.construct_index(dataset)
self.shuffle()
def shuffle(self):
"""
shuffle dataset
"""
random.shuffle(self.shuffle_list)
def get_tqdm(self, device):
"""
construct dataset reader and the corresponding tqdm.
Parameters
----------
device: ``torch.device``, required.
the target device for the dataset loader.
"""
return tqdm(self.reader(device), mininterval=2, total=self.index_length // self.batch_size, leave=False, file=sys.stdout, ncols=80)
def construct_index(self, dataset):
"""
construct index for the dataset.
Parameters
----------
dataset: ``list``, required.
the encoded dataset (outputs of preprocess scripts).
"""
for instance in dataset:
c_len = [len(tup)+1 for tup in instance[3]]
c_ins = [tup for ins in instance[3] for tup in (ins + [self.c_con])]
instance[3] = c_ins
instance.append(c_len)
self.dataset = dataset
self.index_length = len(dataset)
self.shuffle_list = list(range(0, self.index_length))
def reader(self, device):
"""
construct dataset reader.
Parameters
----------
device: ``torch.device``, required.
the target device for the dataset loader.
Returns
-------
reader: ``iterator``.
A lazy iterable object
"""
cur_idx = 0
while cur_idx < self.index_length:
end_index = min(cur_idx + self.batch_size, self.index_length)
batch = [self.dataset[self.shuffle_list[index]] for index in range(cur_idx, end_index)]
cur_idx = end_index
yield self.batchify(batch, device)
self.shuffle()
def batchify(self, batch, device):
"""
batchify a batch of data and move to a device.
Parameters
----------
batch: ``list``, required.
a sample from the encoded dataset (outputs of preprocess scripts).
device: ``torch.device``, required.
the target device for the dataset loader.
"""
cur_batch_size = len(batch)
char_padded_len = max([len(tup[3]) for tup in batch])
word_padded_len = max([len(tup[0]) for tup in batch])
tmp_batch = [list() for ind in range(11)]
for instance_ind in range(cur_batch_size):
instance = batch[instance_ind]
char_padded_len_ins = char_padded_len - len(instance[3])
word_padded_len_ins = word_padded_len - len(instance[0])
tmp_batch[0].append(instance[3] + [self.c_pad] + [self.c_pad] * char_padded_len_ins)
tmp_batch[2].append([self.c_pad] + instance[3][::-1] + [self.c_pad] * char_padded_len_ins)
tmp_p = list( itertools.accumulate(instance[5]+[1]+[0]* word_padded_len_ins) )
tmp_batch[1].append([(x - 1) * cur_batch_size + instance_ind for x in tmp_p])
tmp_p = list(itertools.accumulate([1]+instance[5][::-1]))[::-1] + [1]*word_padded_len_ins
tmp_batch[3].append([(x - 1) * cur_batch_size + instance_ind for x in tmp_p])
tmp_batch[4].append(instance[0] + [self.flm_pad] + [self.flm_pad] * word_padded_len_ins)
tmp_batch[5].append([self.blm_pad] + instance[1][::-1] + [self.blm_pad] * word_padded_len_ins)
tmp_p = list(range(len(instance[1]), -1, -1)) + list(range(len(instance[1])+1, word_padded_len+1))
tmp_batch[6].append([x * cur_batch_size + instance_ind for x in tmp_p])
tmp_batch[7].append(instance[2] + [self.w_pad] + [self.w_pad] * word_padded_len_ins)
tmp_batch[8].append([self.y_start * self.y_size + instance[4][0]] + [instance[4][ind] * self.y_size + instance[4][ind+1] for ind in range(len(instance[4]) - 1)] + [instance[4][-1] * self.y_size + self.y_pad] + [self.y_pad * self.y_size + self.y_pad] * word_padded_len_ins)
tmp_batch[9].append([1] * len(instance[4]) + [1] + [0] * word_padded_len_ins)
tmp_batch[10].append(instance[4])
tbt = [torch.LongTensor(v).transpose(0, 1).contiguous() for v in tmp_batch[0:9]] + [torch.ByteTensor(tmp_batch[9]).transpose(0, 1).contiguous()]
tbt[1] = tbt[1].view(-1)
tbt[3] = tbt[3].view(-1)
tbt[6] = tbt[6].view(-1)
return [ten.to(device) for ten in tbt] + [tmp_batch[10]] | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_seq/evaluator.py | model_seq/evaluator.py | """
.. module:: evaluator
:synopsis: evaluator for sequence labeling
.. moduleauthor:: Liyuan Liu
"""
import torch
import numpy as np
import itertools
import model_seq.utils as utils
from torch.autograd import Variable
class eval_batch:
"""
Base class for evaluation, provide method to calculate f1 score and accuracy.
Parameters
----------
decoder : ``torch.nn.Module``, required.
the decoder module, which needs to contain the ``to_span()`` method.
"""
def __init__(self, decoder):
self.decoder = decoder
def reset(self):
"""
reset counters.
"""
self.correct_labels = 0
self.total_labels = 0
self.gold_count = 0
self.guess_count = 0
self.overlap_count = 0
def calc_f1_batch(self, decoded_data, target_data):
"""
update statics for f1 score.
Parameters
----------
decoded_data: ``torch.LongTensor``, required.
the decoded best label index pathes.
target_data: ``torch.LongTensor``, required.
the golden label index pathes.
"""
batch_decoded = torch.unbind(decoded_data, 1)
for decoded, target in zip(batch_decoded, target_data):
length = len(target)
best_path = decoded[:length]
correct_labels_i, total_labels_i, gold_count_i, guess_count_i, overlap_count_i = self.eval_instance(best_path.numpy(), target)
self.correct_labels += correct_labels_i
self.total_labels += total_labels_i
self.gold_count += gold_count_i
self.guess_count += guess_count_i
self.overlap_count += overlap_count_i
def calc_acc_batch(self, decoded_data, target_data):
"""
update statics for accuracy score.
Parameters
----------
decoded_data: ``torch.LongTensor``, required.
the decoded best label index pathes.
target_data: ``torch.LongTensor``, required.
the golden label index pathes.
"""
batch_decoded = torch.unbind(decoded_data, 1)
for decoded, target in zip(batch_decoded, target_data):
# remove padding
length = len(target)
best_path = decoded[:length].numpy()
self.total_labels += length
self.correct_labels += np.sum(np.equal(best_path, gold))
def f1_score(self):
"""
calculate the f1 score based on the inner counter.
"""
if self.guess_count == 0:
return 0.0, 0.0, 0.0, 0.0
precision = self.overlap_count / float(self.guess_count)
recall = self.overlap_count / float(self.gold_count)
if precision == 0.0 or recall == 0.0:
return 0.0, 0.0, 0.0, 0.0
f = 2 * (precision * recall) / (precision + recall)
accuracy = float(self.correct_labels) / self.total_labels
return f, precision, recall, accuracy
def acc_score(self):
"""
calculate the accuracy score based on the inner counter.
"""
if 0 == self.total_labels:
return 0.0
accuracy = float(self.correct_labels) / self.total_labels
return accuracy
def eval_instance(self, best_path, gold):
"""
Calculate statics to update inner counters for one instance.
Parameters
----------
best_path: required.
the decoded best label index pathe.
gold: required.
the golden label index pathes.
"""
total_labels = len(best_path)
correct_labels = np.sum(np.equal(best_path, gold))
gold_chunks = self.decoder.to_spans(gold)
gold_count = len(gold_chunks)
guess_chunks = self.decoder.to_spans(best_path)
guess_count = len(guess_chunks)
overlap_chunks = gold_chunks & guess_chunks
overlap_count = len(overlap_chunks)
return correct_labels, total_labels, gold_count, guess_count, overlap_count
class eval_wc(eval_batch):
"""
evaluation class for LD-Net
Parameters
----------
decoder : ``torch.nn.Module``, required.
the decoder module, which needs to contain the ``to_span()`` and ``decode()`` method.
score_type : ``str``, required.
whether the f1 score or the accuracy is needed.
"""
def __init__(self, decoder, score_type):
eval_batch.__init__(self, decoder)
if 'f' in score_type:
self.eval_b = self.calc_f1_batch
self.calc_s = self.f1_score
else:
self.eval_b = self.calc_acc_batch
self.calc_s = self.acc_score
def calc_score(self, seq_model, dataset_loader):
"""
calculate scores
Parameters
----------
seq_model: required.
sequence labeling model.
dataset_loader: required.
the dataset loader.
Returns
-------
score: ``float``.
calculated score.
"""
seq_model.eval()
self.reset()
for f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w, _, f_y_m, g_y in dataset_loader:
scores = seq_model(f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w)
decoded = self.decoder.decode(scores.data, f_y_m)
self.eval_b(decoded, g_y)
return self.calc_s() | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_seq/utils.py | model_seq/utils.py | """
.. module:: utils
:synopsis: utils
.. moduleauthor:: Liyuan Liu
"""
import numpy as np
import torch
import json
import torch
import torch.nn as nn
import torch.nn.init
from torch.autograd import Variable
def log_sum_exp(vec):
"""
log sum exp function.
Parameters
----------
vec : ``torch.FloatTensor``, required.
input vector, of shape(ins_num, from_tag_size, to_tag_size)
Returns
-------
sum: ``torch.FloatTensor``.
log sum exp results, tensor of shape (ins_num, to_tag_size)
"""
max_score, _ = torch.max(vec, 1)
return max_score + torch.log(torch.sum(torch.exp(vec - max_score.unsqueeze(1).expand_as(vec)), 1))
def repackage_hidden(h):
"""
Wraps hidden states in new Variables, to detach them from their history
Parameters
----------
h : ``Tuple`` or ``Tensors``, required.
Tuple or Tensors, hidden states.
Returns
-------
hidden: ``Tuple`` or ``Tensors``.
detached hidden states
"""
if type(h) == torch.Tensor:
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def to_scalar(var):
"""
convert a tensor to a scalar number
"""
return var.view(-1).item()
def init_embedding(input_embedding):
"""
random initialize embedding
"""
bias = np.sqrt(3.0 / input_embedding.size(1))
nn.init.uniform_(input_embedding, -bias, bias)
def init_linear(input_linear):
"""
random initialize linear projection.
"""
bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear.weight.size(1)))
nn.init.uniform_(input_linear.weight, -bias, bias)
if input_linear.bias is not None:
input_linear.bias.data.zero_()
def adjust_learning_rate(optimizer, lr):
"""
adjust learning to the the new value.
Parameters
----------
optimizer : required.
pytorch optimizer.
float : ``float``, required.
the target learning rate.
"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def init_lstm(input_lstm):
"""
random initialize lstms
"""
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.weight_ih_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
weight = eval('input_lstm.weight_hh_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
if input_lstm.bias:
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.bias_ih_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
weight = eval('input_lstm.bias_hh_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1 | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_seq/__init__.py | model_seq/__init__.py | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false | |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_seq/elmo.py | model_seq/elmo.py | """
.. module:: elmo
:synopsis: deep contextualized representation
.. moduleauthor:: Liyuan Liu
"""
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import model_seq.utils as utils
import torch
import torch.nn as nn
import torch.nn.functional as F
class EBUnit(nn.Module):
"""
The basic recurrent unit for the ELMo RNNs wrapper.
Parameters
----------
ori_unit : ``torch.nn.Module``, required.
The original module of rnn unit.
droprate : ``float``, required.
The dropout ratrio.
fix_rate: ``bool``, required.
Whether to fix the rqtio.
"""
def __init__(self, ori_unit, droprate, fix_rate):
super(EBUnit, self).__init__()
self.layer = ori_unit.layer
self.droprate = droprate
self.output_dim = ori_unit.output_dim
def forward(self, x):
"""
Calculate the output.
Parameters
----------
x : ``torch.FloatTensor``, required.
The input tensor, of shape (seq_len, batch_size, input_dim).
Returns
----------
output: ``torch.FloatTensor``.
The output of RNNs.
"""
out, _ = self.layer(x)
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return out
class ERNN(nn.Module):
"""
The multi-layer recurrent networks for the ELMo RNNs wrapper.
Parameters
----------
ori_drnn : ``torch.nn.Module``, required.
The original module of rnn networks.
droprate : ``float``, required.
The dropout ratrio.
fix_rate: ``bool``, required.
Whether to fix the rqtio.
"""
def __init__(self, ori_drnn, droprate, fix_rate):
super(ERNN, self).__init__()
self.layer_list = [EBUnit(ori_unit, droprate, fix_rate) for ori_unit in ori_drnn.layer._modules.values()]
self.gamma = nn.Parameter(torch.FloatTensor([1.0]))
self.weight_list = nn.Parameter(torch.FloatTensor([0.0] * len(self.layer_list)))
self.layer = nn.ModuleList(self.layer_list)
for param in self.layer.parameters():
param.requires_grad = False
if fix_rate:
self.gamma.requires_grad = False
self.weight_list.requires_grad = False
self.output_dim = self.layer_list[-1].output_dim
def regularizer(self):
"""
Calculate the regularization term.
Returns
----------
The regularization term.
"""
srd_weight = self.weight_list - (1.0 / len(self.layer_list))
return (srd_weight ** 2).sum()
def forward(self, x):
"""
Calculate the output.
Parameters
----------
x : ``torch.FloatTensor``, required.
the input tensor, of shape (seq_len, batch_size, input_dim).
Returns
----------
output: ``torch.FloatTensor``.
The ELMo outputs.
"""
out = 0
nw = self.gamma * F.softmax(self.weight_list, dim=0)
for ind in range(len(self.layer_list)):
x = self.layer[ind](x)
out += x * nw[ind]
return out
class ElmoLM(nn.Module):
"""
The language model for the ELMo RNNs wrapper.
Parameters
----------
ori_lm : ``torch.nn.Module``, required.
the original module of language model.
backward : ``bool``, required.
whether the language model is backward.
droprate : ``float``, required.
the dropout ratrio.
fix_rate: ``bool``, required.
whether to fix the rqtio.
"""
def __init__(self, ori_lm, backward, droprate, fix_rate):
super(ElmoLM, self).__init__()
self.rnn = ERNN(ori_lm.rnn, droprate, fix_rate)
self.w_num = ori_lm.w_num
self.w_dim = ori_lm.w_dim
self.word_embed = ori_lm.word_embed
self.word_embed.weight.requires_grad = False
self.output_dim = ori_lm.rnn_output
self.backward = backward
def init_hidden(self):
"""
initialize hidden states.
"""
return
def regularizer(self):
"""
Calculate the regularization term.
Returns
----------
reg: ``list``.
The list of regularization terms.
"""
return self.rnn.regularizer()
def prox(self, lambda0):
"""
the proximal calculator.
"""
return 0.0
def forward(self, w_in, ind=None):
"""
Calculate the output.
Parameters
----------
w_in : ``torch.LongTensor``, required.
the input tensor, of shape (seq_len, batch_size).
ind : ``torch.LongTensor``, optional, (default=None).
the index tensor for the backward language model, of shape (seq_len, batch_size).
Returns
----------
output: ``torch.FloatTensor``.
The ELMo outputs.
"""
w_emb = self.word_embed(w_in)
out = self.rnn(w_emb)
if self.backward:
out_size = out.size()
out = out.view(out_size[0] * out_size[1], out_size[2]).index_select(0, ind).contiguous().view(out_size)
return out | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_seq/seqlabel.py | model_seq/seqlabel.py | """
.. module:: seqlabel
:synopsis: sequence labeling model
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import model_seq.utils as utils
from model_seq.crf import CRF
class SeqLabel(nn.Module):
"""
Sequence Labeling model augumented with language model.
Parameters
----------
f_lm : ``torch.nn.Module``, required.
The forward language modle for contextualized representations.
b_lm : ``torch.nn.Module``, required.
The backward language modle for contextualized representations.
c_num : ``int`` , required.
The number of characters.
c_dim : ``int`` , required.
The dimension of character embedding.
c_hidden : ``int`` , required.
The dimension of character hidden states.
c_layer : ``int`` , required.
The number of character lstms.
w_num : ``int`` , required.
The number of words.
w_dim : ``int`` , required.
The dimension of word embedding.
w_hidden : ``int`` , required.
The dimension of word hidden states.
w_layer : ``int`` , required.
The number of word lstms.
y_num : ``int`` , required.
The number of tags types.
droprate : ``float`` , required
The dropout ratio.
unit : "str", optional, (default = 'lstm')
The type of the recurrent unit.
"""
def __init__(self, f_lm, b_lm,
c_num: int,
c_dim: int,
c_hidden: int,
c_layer: int,
w_num: int,
w_dim: int,
w_hidden: int,
w_layer: int,
y_num: int,
droprate: float,
unit: str = 'lstm'):
super(SeqLabel, self).__init__()
rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}
self.f_lm = f_lm
self.b_lm = b_lm
self.unit_type = unit
self.char_embed = nn.Embedding(c_num, c_dim)
self.word_embed = nn.Embedding(w_num, w_dim)
self.char_seq = nn.Linear(c_hidden * 2, w_dim)
self.lm_seq = nn.Linear(f_lm.output_dim + b_lm.output_dim, w_dim)
self.relu = nn.ReLU()
self.c_hidden = c_hidden
tmp_rnn_dropout = droprate if c_layer > 1 else 0
self.char_fw = rnnunit_map[unit](c_dim, c_hidden, c_layer, dropout = tmp_rnn_dropout)
self.char_bw = rnnunit_map[unit](c_dim, c_hidden, c_layer, dropout = tmp_rnn_dropout)
tmp_rnn_dropout = droprate if w_layer > 1 else 0
self.word_rnn = rnnunit_map[unit](w_dim * 3, w_hidden // 2, w_layer, dropout = tmp_rnn_dropout, bidirectional = True)
self.y_num = y_num
self.crf = CRF(w_hidden, y_num)
self.drop = nn.Dropout(p = droprate)
def to_params(self):
"""
To parameters.
"""
return {
"model_type": "char-lstm-crf",
"forward_lm": self.f_lm.to_params(),
"backward_lm": self.b_lm.to_params(),
"word_embed_num": self.word_embed.num_embeddings,
"word_embed_dim": self.word_embed.embedding_dim,
"char_embed_num": self.char_embed.num_embeddings,
"char_embed_dim": self.char_embed.embedding_dim,
"char_hidden": self.c_hidden,
"char_layers": self.char_fw.num_layers,
"word_hidden": self.word_rnn.hidden_size,
"word_layers": self.word_rnn.num_layers,
"droprate": self.drop.p,
"y_num": self.y_num,
"label_schema": "iobes",
"unit_type": self.unit_type
}
def prune_dense_rnn(self):
"""
Prune dense rnn to be smaller by delecting layers.
"""
f_prune_mask = self.f_lm.prune_dense_rnn()
b_prune_mask = self.b_lm.prune_dense_rnn()
prune_mask = torch.cat([f_prune_mask, b_prune_mask], dim = 0)
mask_index = prune_mask.nonzero().squeeze(1)
self.lm_seq.weight = nn.Parameter(self.lm_seq.weight.data.index_select(1, mask_index).contiguous())
self.lm_seq.in_features = self.lm_seq.weight.size(1)
def set_batch_seq_size(self, sentence):
"""
Set the batch size and sequence length.
"""
tmp = sentence.size()
self.word_seq_length = tmp[0]
self.batch_size = tmp[1]
def load_pretrained_word_embedding(self, pre_word_embeddings):
"""
Load pre-trained word embedding.
"""
self.word_embed.weight = nn.Parameter(pre_word_embeddings)
def rand_init(self):
"""
Random initialization.
"""
utils.init_embedding(self.char_embed.weight)
utils.init_lstm(self.char_fw)
utils.init_lstm(self.char_bw)
utils.init_lstm(self.word_rnn)
utils.init_linear(self.char_seq)
utils.init_linear(self.lm_seq)
self.crf.rand_init()
def forward(self, f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w):
"""
Calculate the output (crf potentials).
Parameters
----------
f_c : ``torch.LongTensor``, required.
Character-level inputs in the forward direction.
f_p : ``torch.LongTensor``, required.
Ouput position of character-level inputs in the forward direction.
b_c : ``torch.LongTensor``, required.
Character-level inputs in the backward direction.
b_p : ``torch.LongTensor``, required.
Ouput position of character-level inputs in the backward direction.
flm_w : ``torch.LongTensor``, required.
Word-level inputs for the forward language model.
blm_w : ``torch.LongTensor``, required.
Word-level inputs for the backward language model.
blm_ind : ``torch.LongTensor``, required.
Ouput position of word-level inputs for the backward language model.
f_w: ``torch.LongTensor``, required.
Word-level inputs for the sequence labeling model.
Returns
-------
output: ``torch.FloatTensor``.
A float tensor of shape (sequence_len, batch_size, from_tag_size, to_tag_size)
"""
self.set_batch_seq_size(f_w)
f_c_e = self.drop(self.char_embed(f_c))
b_c_e = self.drop(self.char_embed(b_c))
f_c_e, _ = self.char_fw(f_c_e)
b_c_e, _ = self.char_bw(b_c_e)
f_c_e = f_c_e.view(-1, self.c_hidden).index_select(0, f_p).view(self.word_seq_length, self.batch_size, self.c_hidden)
b_c_e = b_c_e.view(-1, self.c_hidden).index_select(0, b_p).view(self.word_seq_length, self.batch_size, self.c_hidden)
c_o = self.drop(torch.cat([f_c_e, b_c_e], dim = 2))
c_o = self.char_seq(c_o)
self.f_lm.init_hidden()
self.b_lm.init_hidden()
f_lm_e = self.f_lm(flm_w)
b_lm_e = self.b_lm(blm_w, blm_ind)
lm_o = self.drop(torch.cat([f_lm_e, b_lm_e], dim = 2))
lm_o = self.relu(self.lm_seq(lm_o))
w_e = self.word_embed(f_w)
rnn_in = self.drop(torch.cat([c_o, lm_o, w_e], dim = 2))
rnn_out, _ = self.word_rnn(rnn_in)
crf_out = self.crf(self.drop(rnn_out)).view(self.word_seq_length, self.batch_size, self.y_num, self.y_num)
return crf_out
class Vanilla_SeqLabel(nn.Module):
"""
Sequence Labeling model augumented without language model.
Parameters
----------
f_lm : ``torch.nn.Module``, required.
forward language modle for contextualized representations.
b_lm : ``torch.nn.Module``, required.
backward language modle for contextualized representations.
c_num : ``int`` , required.
number of characters.
c_dim : ``int`` , required.
dimension of character embedding.
c_hidden : ``int`` , required.
dimension of character hidden states.
c_layer : ``int`` , required.
number of character lstms.
w_num : ``int`` , required.
number of words.
w_dim : ``int`` , required.
dimension of word embedding.
w_hidden : ``int`` , required.
dimension of word hidden states.
w_layer : ``int`` , required.
number of word lstms.
y_num : ``int`` , required.
number of tags types.
droprate : ``float`` , required
dropout ratio.
unit : "str", optional, (default = 'lstm')
type of the recurrent unit.
"""
def __init__(self, f_lm, b_lm, c_num, c_dim, c_hidden, c_layer, w_num, w_dim, w_hidden, w_layer, y_num, droprate, unit='lstm'):
super(Vanilla_SeqLabel, self).__init__()
rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}
self.char_embed = nn.Embedding(c_num, c_dim)
self.word_embed = nn.Embedding(w_num, w_dim)
self.char_seq = nn.Linear(c_hidden * 2, w_dim)
self.c_hidden = c_hidden
self.char_fw = rnnunit_map[unit](c_dim, c_hidden, c_layer, dropout = droprate)
self.char_bw = rnnunit_map[unit](c_dim, c_hidden, c_layer, dropout = droprate)
self.word_rnn = rnnunit_map[unit](w_dim + w_dim, w_hidden // 2, w_layer, dropout = droprate, bidirectional = True)
self.y_num = y_num
self.crf = CRF(w_hidden, y_num)
self.drop = nn.Dropout(p = droprate)
def set_batch_seq_size(self, sentence):
"""
set batch size and sequence length
"""
tmp = sentence.size()
self.word_seq_length = tmp[0]
self.batch_size = tmp[1]
def load_pretrained_word_embedding(self, pre_word_embeddings):
"""
Load pre-trained word embedding.
"""
self.word_embed.weight = nn.Parameter(pre_word_embeddings)
def rand_init(self):
"""
Random initialization.
"""
utils.init_embedding(self.char_embed.weight)
utils.init_lstm(self.char_fw)
utils.init_lstm(self.char_bw)
utils.init_lstm(self.word_rnn)
utils.init_linear(self.char_seq)
self.crf.rand_init()
def forward(self, f_c, f_p, b_c, b_p, flm_w, blm_w, blm_ind, f_w):
"""
Calculate the output (crf potentials).
Parameters
----------
f_c : ``torch.LongTensor``, required.
Character-level inputs in the forward direction.
f_p : ``torch.LongTensor``, required.
Ouput position of character-level inputs in the forward direction.
b_c : ``torch.LongTensor``, required.
Character-level inputs in the backward direction.
b_p : ``torch.LongTensor``, required.
Ouput position of character-level inputs in the backward direction.
flm_w : ``torch.LongTensor``, required.
Word-level inputs for the forward language model.
blm_w : ``torch.LongTensor``, required.
Word-level inputs for the backward language model.
blm_ind : ``torch.LongTensor``, required.
Ouput position of word-level inputs for the backward language model.
f_w: ``torch.LongTensor``, required.
Word-level inputs for the sequence labeling model.
Returns
-------
output: ``torch.FloatTensor``.
A float tensor of shape (sequence_len, batch_size, from_tag_size, to_tag_size)
"""
self.set_batch_seq_size(f_w)
f_c_e = self.drop(self.char_embed(f_c))
b_c_e = self.drop(self.char_embed(b_c))
f_c_e, _ = self.char_fw(f_c_e)
b_c_e, _ = self.char_bw(b_c_e)
f_c_e = f_c_e.view(-1, self.c_hidden).index_select(0, f_p).view(self.word_seq_length, self.batch_size, self.c_hidden)
b_c_e = b_c_e.view(-1, self.c_hidden).index_select(0, b_p).view(self.word_seq_length, self.batch_size, self.c_hidden)
c_o = self.drop(torch.cat([f_c_e, b_c_e], dim = 2))
c_o = self.char_seq(c_o)
w_e = self.word_embed(f_w)
rnn_in = self.drop(torch.cat([c_o, w_e], dim = 2))
rnn_out, _ = self.word_rnn(rnn_in)
crf_out = self.crf(self.drop(rnn_out)).view(self.word_seq_length, self.batch_size, self.y_num, self.y_num)
return crf_out | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_word_ada/LM.py | model_word_ada/LM.py | """
.. module:: LM
:synopsis: language modeling
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import model_word_ada.utils as utils
class LM(nn.Module):
"""
The language model model.
Parameters
----------
rnn : ``torch.nn.Module``, required.
The RNNs network.
soft_max : ``torch.nn.Module``, required.
The softmax layer.
w_num : ``int`` , required.
The number of words.
w_dim : ``int`` , required.
The dimension of word embedding.
droprate : ``float`` , required
The dropout ratio.
label_dim : ``int`` , required.
The input dimension of softmax.
"""
def __init__(self, rnn, soft_max, w_num, w_dim, droprate, label_dim = -1, add_relu=False):
super(LM, self).__init__()
self.rnn = rnn
self.soft_max = soft_max
self.w_num = w_num
self.w_dim = w_dim
self.word_embed = nn.Embedding(w_num, w_dim)
self.rnn_output = self.rnn.output_dim
self.add_proj = label_dim > 0
if self.add_proj:
self.project = nn.Linear(self.rnn_output, label_dim)
if add_relu:
self.relu = nn.ReLU()
else:
self.relu = lambda x: x
self.drop = nn.Dropout(p=droprate)
def load_embed(self, origin_lm):
"""
Load embedding from another language model.
"""
self.word_embed = origin_lm.word_embed
self.soft_max = origin_lm.soft_max
def rand_ini(self):
"""
Random initialization.
"""
self.rnn.rand_ini()
# utils.init_linear(self.project)
self.soft_max.rand_ini()
# if not self.tied_weight:
utils.init_embedding(self.word_embed.weight)
if self.add_proj:
utils.init_linear(self.project)
def init_hidden(self):
"""
Initialize hidden states.
"""
self.rnn.init_hidden()
def forward(self, w_in, target):
"""
Calculate the loss.
Parameters
----------
w_in : ``torch.FloatTensor``, required.
the input tensor, of shape (word_num, input_dim).
target : ``torch.FloatTensor``, required.
the target of the language model, of shape (word_num).
Returns
----------
loss: ``torch.FloatTensor``.
The NLL loss.
"""
w_emb = self.word_embed(w_in)
w_emb = self.drop(w_emb)
out = self.rnn(w_emb).contiguous().view(-1, self.rnn_output)
if self.add_proj:
out = self.drop(self.relu(self.project(out)))
# out = self.drop(self.project(out))
out = self.soft_max(out, target)
return out
def log_prob(self, w_in):
"""
Calculate log-probability for the whole dictionary.
Parameters
----------
w_in : ``torch.FloatTensor``, required.
the input tensor, of shape (word_num, input_dim).
Returns
----------
prob: ``torch.FloatTensor``.
The full log-probability.
"""
w_emb = self.word_embed(w_in)
out = self.rnn(w_emb).contiguous().view(-1, self.rnn_output)
if self.add_proj:
out = self.relu(self.project(out))
out = self.soft_max.log_prob(out, w_emb.device)
return out | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_word_ada/dataset.py | model_word_ada/dataset.py | """
.. module:: dataset
:synopsis: dataset for language modeling
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import pickle
import random
from tqdm import tqdm
from torch.utils.data import Dataset
class EvalDataset(object):
"""
Dataset for Language Modeling
Parameters
----------
dataset : ``list``, required.
The encoded dataset (outputs of preprocess scripts).
sequence_length: ``int``, required.
Sequence Length.
"""
def __init__(self, dataset, sequence_length):
super(EvalDataset, self).__init__()
self.dataset = dataset
self.sequence_length = sequence_length
self.construct_index()
def get_tqdm(self, device):
"""
construct dataset reader and the corresponding tqdm.
Parameters
----------
device: ``torch.device``, required.
the target device for the dataset loader.
"""
return tqdm(self.reader(device), mininterval=2, total=self.index_length, leave=False, file=sys.stdout, ncols=80)
def construct_index(self):
"""
construct index for the dataset.
"""
token_per_batch = self.sequence_length
tot_num = len(self.dataset) - 1
res_num = tot_num - tot_num % token_per_batch
self.x = list(torch.unbind(torch.LongTensor(self.dataset[0:res_num]).view(-1, self.sequence_length), 0))
self.y = list(torch.unbind(torch.LongTensor(self.dataset[1:res_num+1]).view(-1, self.sequence_length), 0))
self.x.append(torch.LongTensor(self.dataset[res_num:tot_num]))
self.y.append(torch.LongTensor(self.dataset[res_num+1:tot_num+1]))
self.index_length = len(self.x)
self.cur_idx = 0
def reader(self, device):
"""
construct dataset reader.
Parameters
----------
device: ``torch.device``, required.
the target device for the dataset loader.
Returns
-------
reader: ``iterator``.
A lazy iterable object
"""
if self.cur_idx == self.index_length:
self.cur_idx = 0
raise StopIteration
word_t = self.x[self.cur_idx].to(device).view(-1, 1)
label_t = self.y[self.cur_idx].to(device).view(-1, 1)
self.cur_idx += 1
yield word_t, label_t
class LargeDataset(object):
"""
Lazy Dataset for Language Modeling
Parameters
----------
root : ``str``, required.
The root folder for dataset files.
range_idx : ``int``, required.
The maximum file index for the input files (train_*.pk).
batch_size : ``int``, required.
Batch size.
sequence_length: ``int``, required.
Sequence Length.
"""
def __init__(self, root, range_idx, batch_size, sequence_length):
super(LargeDataset, self).__init__()
self.root = root
self.range_idx = range_idx
self.shuffle_list = list(range(0, range_idx))
self.shuffle()
self.batch_size = batch_size
self.sequence_length = sequence_length
self.token_per_batch = self.batch_size * self.sequence_length
self.total_batch_num = -1
def shuffle(self):
"""
shuffle dataset
"""
random.shuffle(self.shuffle_list)
def get_tqdm(self, device):
"""
construct dataset reader and the corresponding tqdm.
Parameters
----------
device: ``torch.device``, required.
the target device for the dataset loader.
"""
self.batch_count = 0
self.cur_idx = 0
self.file_idx = 0
self.index_length = 0
if self.total_batch_num <= 0:
return tqdm(self.reader(device), mininterval=2, leave=False, file=sys.stdout).__iter__()
else:
return tqdm(self.reader(device), mininterval=2, total=self.total_batch_num, leave=False, file=sys.stdout, ncols=80).__iter__()
def reader(self, device):
"""
construct dataset reader.
Parameters
----------
device: ``torch.device``, required.
the target device for the dataset loader.
Returns
-------
reader: ``iterator``.
A lazy iterable object
"""
while self.file_idx < self.range_idx:
self.open_next()
while self.cur_idx < self.index_length:
word_t = self.x[self.cur_idx].to(device)
# label_t = self.y[self.cur_idx].to(device)
label_t = self.y[self.cur_idx].to(device)
self.cur_idx += 1
yield word_t, label_t
self.total_batch_num = self.batch_count
self.shuffle()
def open_next(self):
"""
Open the next file.
"""
self.dataset = pickle.load(open(self.root + 'train_' + str( self.shuffle_list[self.file_idx])+'.pk', 'rb'))
res_num = len(self.dataset) - 1
res_num = res_num - res_num % self.token_per_batch
self.x = torch.LongTensor(self.dataset[0:res_num]).view(self.batch_size, -1, self.sequence_length).transpose_(0, 1).transpose_(1, 2).contiguous()
self.y = torch.LongTensor(self.dataset[1:res_num+1]).view(self.batch_size, -1, self.sequence_length).transpose_(0, 1).transpose_(1, 2).contiguous()
self.index_length = self.x.size(0)
self.cur_idx = 0
self.batch_count += self.index_length
self.file_idx += 1 | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_word_ada/utils.py | model_word_ada/utils.py | """
.. module:: utils
:synopsis: utils
.. moduleauthor:: Liyuan Liu
"""
import numpy as np
import torch
import json
import torch
import torch.nn as nn
import torch.nn.init
from torch.autograd import Variable
def repackage_hidden(h):
"""
Wraps hidden states in new Variables, to detach them from their history
Parameters
----------
h : ``Tuple`` or ``Tensors``, required.
Tuple or Tensors, hidden states.
Returns
-------
hidden: ``Tuple`` or ``Tensors``.
detached hidden states
"""
if type(h) == torch.Tensor:
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def to_scalar(var):
"""
convert a tensor to a scalar number
"""
return var.view(-1).item()
def init_embedding(input_embedding):
"""
random initialize embedding
"""
bias = np.sqrt(3.0 / input_embedding.size(1))
nn.init.uniform_(input_embedding, -bias, bias)
def init_linear(input_linear):
"""
random initialize linear projection.
"""
bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear.weight.size(1)))
nn.init.uniform_(input_linear.weight, -bias, bias)
if input_linear.bias is not None:
input_linear.bias.data.zero_()
def adjust_learning_rate(optimizer, lr):
"""
adjust learning to the the new value.
Parameters
----------
optimizer : required.
pytorch optimizer.
float : ``float``, required.
the target learning rate.
"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def init_lstm(input_lstm):
"""
random initialize lstms
"""
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.weight_ih_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
weight = eval('input_lstm.weight_hh_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
if input_lstm.bias:
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.bias_ih_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
weight = eval('input_lstm.bias_hh_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1 | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_word_ada/basic.py | model_word_ada/basic.py | """
.. module:: basic
:synopsis: basic rnn
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import model_word_ada.utils as utils
class BasicUnit(nn.Module):
"""
The basic recurrent unit for the vanilla stacked RNNs.
Parameters
----------
unit : ``str``, required.
The type of rnn unit.
input_dim : ``int``, required.
The input dimension fo the unit.
hid_dim : ``int``, required.
The hidden dimension fo the unit.
droprate : ``float``, required.
The dropout ratrio.
"""
def __init__(self, unit, input_dim, hid_dim, droprate):
super(BasicUnit, self).__init__()
self.unit_type = unit
rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}
self.batch_norm = (unit == 'bnlstm')
self.layer = rnnunit_map[unit](input_dim, hid_dim, 1)
self.droprate = droprate
self.output_dim = hid_dim
self.init_hidden()
def init_hidden(self):
"""
Initialize hidden states.
"""
self.hidden_state = None
def rand_ini(self):
"""
Random Initialization.
"""
if not self.batch_norm:
utils.init_lstm(self.layer)
def forward(self, x):
"""
Calculate the output.
Parameters
----------
x : ``torch.LongTensor``, required.
the input tensor, of shape (seq_len, batch_size, input_dim).
Returns
----------
output: ``torch.FloatTensor``.
The output of RNNs.
"""
out, new_hidden = self.layer(x, self.hidden_state)
self.hidden_state = utils.repackage_hidden(new_hidden)
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return out
class BasicRNN(nn.Module):
"""
The multi-layer recurrent networks for the vanilla stacked RNNs.
Parameters
----------
layer_num: ``int``, required.
The number of layers.
unit : ``torch.nn.Module``, required.
The type of rnn unit.
input_dim : ``int``, required.
The input dimension fo the unit.
hid_dim : ``int``, required.
The hidden dimension fo the unit.
droprate : ``float``, required.
The dropout ratrio.
"""
def __init__(self, layer_num, unit, emb_dim, hid_dim, droprate):
super(BasicRNN, self).__init__()
layer_list = [BasicUnit(unit, emb_dim, hid_dim, droprate)] + [BasicUnit(unit, hid_dim, hid_dim, droprate) for i in range(layer_num - 1)]
self.layer = nn.Sequential(*layer_list)
self.output_dim = layer_list[-1].output_dim
self.unit_type = unit
self.init_hidden()
def to_params(self):
"""
To parameters.
"""
return {
"rnn_type": "Basic",
"unit_type": self.layer[0].unit_type,
"layer_num": len(self.layer),
"emb_dim": self.layer[0].layer.input_size,
"hid_dim": self.layer[0].layer.hidden_size,
"droprate": self.layer[0].droprate
}
def init_hidden(self):
"""
Initialize hidden states.
"""
for tup in self.layer.children():
tup.init_hidden()
def rand_ini(self):
"""
Random Initialization.
"""
for tup in self.layer.children():
tup.rand_ini()
def forward(self, x):
"""
Calculate the output.
Parameters
----------
x : ``torch.LongTensor``, required.
the input tensor, of shape (seq_len, batch_size, input_dim).
Returns
----------
output: ``torch.FloatTensor``.
The output of RNNs.
"""
return self.layer(x) | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_word_ada/adaptive.py | model_word_ada/adaptive.py | """
.. module:: adaptive
:synopsis: adaptive softmax
.. moduleauthor:: Liyuan Liu
"""
import torch
from torch import nn
from math import sqrt
class AdaptiveSoftmax(nn.Module):
"""
The adaptive softmax layer.
Modified from: https://github.com/rosinality/adaptive-softmax-pytorch/blob/master/adasoft.py
Parameters
----------
input_size : ``int``, required.
The input dimension.
cutoff : ``list``, required.
The list of cutoff values.
"""
def __init__(self, input_size, cutoff):
super().__init__()
self.input_size = input_size
self.cutoff = cutoff
self.output_size = cutoff[0] + len(cutoff) - 1
self.head = nn.Linear(input_size, self.output_size)
self.tail = nn.ModuleList()
self.cross_entropy = nn.CrossEntropyLoss(size_average=False)
for i in range(len(self.cutoff) - 1):
seq = nn.Sequential(
nn.Linear(input_size, input_size // 4 ** i, False),
nn.Linear(input_size // 4 ** i, cutoff[i + 1] - cutoff[i], False)
)
self.tail.append(seq)
def rand_ini(self):
"""
Random Initialization.
"""
nn.init.xavier_normal_(self.head.weight)
for tail in self.tail:
nn.init.xavier_normal_(tail[0].weight)
nn.init.xavier_normal_(tail[1].weight)
def log_prob(self, w_in, device):
"""
Calculate log-probability for the whole dictionary.
Parameters
----------
w_in : ``torch.FloatTensor``, required.
the input tensor, of shape (word_num, input_dim).
device: ``torch.device``, required.
the target device for calculation.
Returns
----------
prob: ``torch.FloatTensor``.
The full log-probability.
"""
lsm = nn.LogSoftmax(dim=1).to(device)
head_out = self.head(w_in)
batch_size = head_out.size(0)
prob = torch.zeros(batch_size, self.cutoff[-1]).to(device)
lsm_head = lsm(head_out)
prob.narrow(1, 0, self.output_size).add_(lsm_head.narrow(1, 0, self.output_size).data)
for i in range(len(self.tail)):
pos = self.cutoff[i]
i_size = self.cutoff[i + 1] - pos
buffer = lsm_head.narrow(1, self.cutoff[0] + i, 1)
buffer = buffer.expand(batch_size, i_size)
lsm_tail = lsm(self.tail[i](w_in))
prob.narrow(1, pos, i_size).copy_(buffer.data).add_(lsm_tail.data)
return prob
def forward(self, w_in, target):
"""
Calculate the log-likihood w.o. calculate the full distribution.
Parameters
----------
w_in : ``torch.FloatTensor``, required.
the input tensor, of shape (word_num, input_dim).
target : ``torch.FloatTensor``, required.
the target of the language model, of shape (word_num).
Returns
----------
loss: ``torch.FloatTensor``.
The NLL loss.
"""
batch_size = w_in.size(0)
output = 0.0
first_target = target.clone()
for i in range(len(self.cutoff) - 1):
mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))
if mask.sum() > 0:
first_target[mask] = self.cutoff[0] + i
second_target = target[mask].add(-self.cutoff[i])
second_input = w_in.index_select(0, mask.nonzero().squeeze())
second_output = self.tail[i](second_input)
output += self.cross_entropy(second_output, second_target)
output += self.cross_entropy(self.head(w_in), first_target)
output /= batch_size
return output
| python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_word_ada/__init__.py | model_word_ada/__init__.py | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false | |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_word_ada/ldnet.py | model_word_ada/ldnet.py | """
.. module:: ldnet
:synopsis: LD-Net
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import model_word_ada.utils as utils
import random
class BasicUnit(nn.Module):
"""
The basic recurrent unit for the densely connected RNNs with layer-wise dropout.
Parameters
----------
unit : ``torch.nn.Module``, required.
The type of rnn unit.
input_dim : ``float``, required.
The input dimension fo the unit.
increase_rate : ``float``, required.
The hidden dimension fo the unit.
droprate : ``float``, required.
The dropout ratrio.
layer_dropout : ``float``, required.
The layer-wise dropout ratrio.
"""
def __init__(self, unit, input_dim, increase_rate, droprate, layer_drop = 0):
super(BasicUnit, self).__init__()
rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}
self.unit_type = unit
self.layer = rnnunit_map[unit](input_dim, increase_rate, 1)
if 'lstm' == self.unit_type:
utils.init_lstm(self.layer)
self.layer_drop = layer_drop
self.droprate = droprate
self.input_dim = input_dim
self.increase_rate = increase_rate
self.output_dim = input_dim + increase_rate
self.init_hidden()
def init_hidden(self):
"""
Initialize hidden states.
"""
self.hidden_state = None
def rand_ini(self):
"""
Random Initialization.
"""
return
def forward(self, x, p_out):
"""
Calculate the output.
Parameters
----------
x : ``torch.LongTensor``, required.
the input tensor, of shape (seq_len, batch_size, input_dim).
p_out : ``torch.LongTensor``, required.
the final output tensor for the softmax, of shape (seq_len, batch_size, input_dim).
Returns
----------
out: ``torch.FloatTensor``.
The undropped outputs of RNNs to the softmax.
p_out: ``torch.FloatTensor``.
The dropped outputs of RNNs to the next_layer.
"""
if self.droprate > 0:
new_x = F.dropout(x, p=self.droprate, training=self.training)
else:
new_x = x
out, new_hidden = self.layer(new_x, self.hidden_state)
self.hidden_state = utils.repackage_hidden(new_hidden)
out = out.contiguous()
if self.training and random.uniform(0, 1) < self.layer_drop:
deep_out = torch.autograd.Variable( torch.zeros(x.size(0), x.size(1), self.increase_rate) ).cuda()
else:
deep_out = out
o_out = torch.cat([p_out, out], 2)
d_out = torch.cat([x, deep_out], 2)
return d_out, o_out
class LDRNN(nn.Module):
"""
The multi-layer recurrent networks for the densely connected RNNs with layer-wise dropout.
Parameters
----------
layer_num: ``float``, required.
The number of layers.
unit : ``torch.nn.Module``, required.
The type of rnn unit.
input_dim : ``float``, required.
The input dimension fo the unit.
hid_dim : ``float``, required.
The hidden dimension fo the unit.
droprate : ``float``, required.
The dropout ratrio.
layer_dropout : ``float``, required.
The layer-wise dropout ratrio.
"""
def __init__(self, layer_num, unit, emb_dim, hid_dim, droprate, layer_drop):
super(LDRNN, self).__init__()
self.unit_type = unit
self.layer_list = [BasicUnit(unit, emb_dim + i * hid_dim, hid_dim, droprate, layer_drop) for i in range(layer_num)]
self.layer_num = layer_num
self.layer = nn.ModuleList(self.layer_list) if layer_num > 0 else None
self.output_dim = self.layer_list[-1].output_dim if layer_num > 0 else emb_dim
self.emb_dim = emb_dim
self.init_hidden()
def to_params(self):
"""
To parameters.
"""
return {
"rnn_type": "LDRNN",
"unit_type": self.layer[0].unit_type,
"layer_num": len(self.layer),
"emb_dim": self.layer[0].input_dim,
"hid_dim": self.layer[0].increase_rate,
"droprate": self.layer[0].droprate,
"after_pruned": False
}
def init_hidden(self):
"""
Initialize hidden states.
"""
for tup in self.layer_list:
tup.init_hidden()
def rand_ini(self):
"""
Random Initialization.
"""
for tup in self.layer_list:
tup.rand_ini()
def forward(self, x):
"""
Calculate the output.
Parameters
----------
x : ``torch.LongTensor``, required.
the input tensor, of shape (seq_len, batch_size, input_dim).
Returns
----------
output: ``torch.FloatTensor``.
The output of RNNs to the Softmax.
"""
output = x
for ind in range(self.layer_num):
x, output = self.layer_list[ind](x, output)
return output | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/model_word_ada/densenet.py | model_word_ada/densenet.py | """
.. module:: densenet
:synopsis: densernn
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import model_word_ada.utils as utils
class BasicUnit(nn.Module):
"""
The basic recurrent unit for the densely connected RNNs.
Parameters
----------
unit : ``torch.nn.Module``, required.
The type of rnn unit.
input_dim : ``float``, required.
The input dimension fo the unit.
increase_rate : ``float``, required.
The hidden dimension fo the unit.
droprate : ``float``, required.
The dropout ratrio.
"""
def __init__(self, unit, input_dim, increase_rate, droprate):
super(BasicUnit, self).__init__()
rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}
self.unit_type = unit
self.layer = rnnunit_map[unit](input_dim, increase_rate, 1)
if 'lstm' == self.unit_type:
utils.init_lstm(self.layer)
self.droprate = droprate
self.input_dim = input_dim
self.increase_rate = increase_rate
self.output_dim = input_dim + increase_rate
self.init_hidden()
def init_hidden(self):
"""
Initialize hidden states.
"""
self.hidden_state = None
def rand_ini(self):
"""
Random Initialization.
"""
return
def forward(self, x):
"""
Calculate the output.
Parameters
----------
x : ``torch.LongTensor``, required.
the input tensor, of shape (seq_len, batch_size, input_dim).
Returns
----------
output: ``torch.FloatTensor``.
The output of RNNs.
"""
if self.droprate > 0:
new_x = F.dropout(x, p=self.droprate, training=self.training)
else:
new_x = x
out, new_hidden = self.layer(new_x, self.hidden_state)
self.hidden_state = utils.repackage_hidden(new_hidden)
out = out.contiguous()
return torch.cat([x, out], 2)
class DenseRNN(nn.Module):
"""
The multi-layer recurrent networks for the densely connected RNNs.
Parameters
----------
layer_num: ``float``, required.
The number of layers.
unit : ``torch.nn.Module``, required.
The type of rnn unit.
input_dim : ``float``, required.
The input dimension fo the unit.
hid_dim : ``float``, required.
The hidden dimension fo the unit.
droprate : ``float``, required.
The dropout ratrio.
"""
def __init__(self, layer_num, unit, emb_dim, hid_dim, droprate):
super(DenseRNN, self).__init__()
self.unit_type = unit
self.layer_list = [BasicUnit(unit, emb_dim + i * hid_dim, hid_dim, droprate) for i in range(layer_num)]
self.layer = nn.Sequential(*self.layer_list) if layer_num > 0 else None
self.output_dim = self.layer_list[-1].output_dim if layer_num > 0 else emb_dim
self.emb_dim = emb_dim
self.init_hidden()
def to_params(self):
"""
To parameters.
"""
return {
"rnn_type": "DenseRNN",
"unit_type": self.layer[0].unit_type,
"layer_num": len(self.layer),
"emb_dim": self.layer[0].input_dim,
"hid_dim": self.layer[0].increase_rate,
"droprate": self.layer[0].droprate
}
def init_hidden(self):
"""
Initialize hidden states.
"""
for tup in self.layer_list:
tup.init_hidden()
def rand_ini(self):
"""
Random Initialization.
"""
for tup in self.layer_list:
tup.rand_ini()
def forward(self, x):
"""
Calculate the output.
Parameters
----------
x : ``torch.LongTensor``, required.
the input tensor, of shape (seq_len, batch_size, input_dim).
Returns
----------
output: ``torch.FloatTensor``.
The output of RNNs.
"""
return self.layer(x) | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/pre_word_ada/encode_data2folder.py | pre_word_ada/encode_data2folder.py | """
.. module:: encode_data2folder
:synopsis: encode data folder for language modeling
.. moduleauthor:: Liyuan Liu
"""
import pickle
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
import itertools
import functools
def encode_dataset(input_folder, w_map, reverse):
w_eof = w_map['\n']
w_unk = w_map['<unk>']
list_dirs = os.walk(input_folder)
lines = list()
for root, dirs, files in list_dirs:
for file in tqdm(files):
with open(os.path.join(root, file)) as fin:
lines = lines + list(filter(lambda t: t and not t.isspace(), fin.readlines()))
dataset = list()
for line in lines:
dataset += list(map(lambda t: w_map.get(t, w_unk), line.split())) + [w_eof]
if reverse:
dataset = dataset[::-1]
return dataset
def encode_dataset2file(input_folder, t, w_map, reverse):
w_eof = w_map['\n']
w_unk = w_map['<unk>']
list_dirs = os.walk(input_folder)
range_ind = 0
for root, dirs, files in list_dirs:
for file in tqdm(files):
with open(os.path.join(root, file), 'r') as fin:
lines = list(filter(lambda t: t and not t.isspace(), fin.readlines()))
dataset = list()
for line in lines:
dataset += list(map(lambda t: w_map.get(t, w_unk), line.split())) + [w_eof]
if reverse:
dataset = dataset[::-1]
with open(output_folder+'train_'+ str(range_ind) + '.pk', 'wb') as f:
pickle.dump(dataset, f)
range_ind += 1
return range_ind
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_folder', default="./data/1b_train")
parser.add_argument('--test_folder', default="./data/1b_test")
parser.add_argument('--input_map', default="./data/1b_map.pk")
parser.add_argument('--output_folder', default="./data/one_billion/")
parser.add_argument('--threshold', type=int, default=3)
parser.add_argument('--unk', default='<unk>')
parser.add_argument('--reverse', action='store_true')
args = parser.parse_args()
with open(args.input_map, 'rb') as f:
w_count = pickle.load(f)
unk_count = sum([v for k, v in w_count.items() if v <= args.threshold])
w_list = [(k, v) for k, v in w_count.items() if v > args.threshold]
w_list.append(('<unk>', unk_count))
w_list.sort(key=lambda t: t[1], reverse=True)
w_map = {kv[0]:v for v, kv in enumerate(w_list)}
range_ind = encode_dataset2file(args.train_folder, args.output_folder, w_map, args.reverse)
test_dataset = encode_dataset(args.test_folder, w_map, args.reverse)
with open(args.output_folder+'test.pk', 'wb') as f:
pickle.dump({'w_map': w_map, 'test_data':test_dataset, 'range' : range_ind}, f)
| python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/pre_word_ada/gene_map.py | pre_word_ada/gene_map.py | """
.. module:: gene_map
:synopsis: gene map for language modeling
.. moduleauthor:: Liyuan Liu
"""
import pickle
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
import itertools
import functools
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_folder', default="./data/1b_train")
parser.add_argument('--output_map', default="./data/1b_map.pk")
args = parser.parse_args()
w_count = {'\n':0}
list_dirs = os.walk(args.input_folder)
for root, dirs, files in list_dirs:
for file in tqdm(files):
with open(os.path.join(root, file)) as fin:
for line in fin:
if not line or line.isspace():
continue
line = line.split()
for tup in line:
w_count[tup] = w_count.get(tup, 0) + 1
w_count['\n'] += 1
with open(args.output_map, 'wb') as f:
pickle.dump(w_count, f) | python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
LiyuanLucasLiu/LD-Net | https://github.com/LiyuanLucasLiu/LD-Net/blob/f9489b6e7d436b7e3ed6447b797fb6ce9a886483/docs/source/conf.py | docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Wrapper documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 14 03:49:01 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages'
]
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'LD-Net'
copyright = '2018, Liyuan Liu'
author = 'Liyuan Liu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LD_Net'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ldnet.tex', 'LD-Net Documentation',
'Liyuan Liu', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'LD-Net', 'LD-Net Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LD-Net', 'LD-Net Documentation',
author, 'LD-Net', 'Efficient Contextualized Representations.',
'Miscellaneous'),
]
autodoc_mock_imports = ['torch', 'numpy', 'tensorboardX', 'git', 'tqdm']
intersphinx_mapping = {
'git': ('https://gitpython.readthedocs.io/en/stable/', None),
'tensorboardX': ('https://tensorboardx.readthedocs.io/en/latest/', None),
'python':('https://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'torch': ('http://pytorch.org/docs/master', None)
}
| python | Apache-2.0 | f9489b6e7d436b7e3ed6447b797fb6ce9a886483 | 2026-01-05T07:14:28.410516Z | false |
jiashunwang/Neural-Pose-Transfer | https://github.com/jiashunwang/Neural-Pose-Transfer/blob/bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6/train.py | train.py | import torch
import torch.optim as optim
from data import SMPL_DATA
from model_maxpool import NPT
import utils as utils
import numpy as np
import time
import pymesh
batch_size=8
dataset = SMPL_DATA(train=True, shuffle_point = True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=1)
model=NPT()
model.cuda()
model.apply(utils.weights_init)
lrate=0.00005
optimizer_G = optim.Adam(model.parameters(), lr=lrate)
for epoch in range(1000):
start=time.time()
total_loss=0
for j,data in enumerate(dataloader,0):
optimizer_G.zero_grad()
pose_points, random_sample, gt_points, identity_points, new_face=data
pose_points=pose_points.transpose(2,1)
pose_points=pose_points.cuda()
identity_points=identity_points.transpose(2,1)
identity_points=identity_points.cuda()
gt_points=gt_points.cuda()
pointsReconstructed = model(pose_points,identity_points)
rec_loss = torch.mean((pointsReconstructed - gt_points)**2)
edg_loss=0
for i in range(len(random_sample)):
f=new_face[i].cpu().numpy()
v=identity_points[i].transpose(0,1).cpu().numpy()
edg_loss=edg_loss+utils.compute_score(pointsReconstructed[i].unsqueeze(0),f,utils.get_target(v,f,1))
edg_loss=edg_loss/len(random_sample)
l2_loss=rec_loss
rec_loss=rec_loss+0.0005*edg_loss
rec_loss.backward()
optimizer_G.step()
total_loss=total_loss+rec_loss
print('####################################')
print(epoch)
print(time.time()-start)
mean_loss=total_loss/(j+1)
print('mean_loss',mean_loss.item())
print('####################################')
if (epoch+1)%10==0:
save_path='./saved_model/'+str(epoch)+'.model'
torch.save(model.state_dict(),save_path)
| python | Apache-2.0 | bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6 | 2026-01-05T07:14:12.473684Z | false |
jiashunwang/Neural-Pose-Transfer | https://github.com/jiashunwang/Neural-Pose-Transfer/blob/bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6/model_maxpool.py | model_maxpool.py | from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class PoseFeature(nn.Module):
def __init__(self, num_points = 6890):
super(PoseFeature, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.norm1 = torch.nn.InstanceNorm1d(64)
self.norm2 = torch.nn.InstanceNorm1d(128)
self.norm3 = torch.nn.InstanceNorm1d(1024)
self.num_points = num_points
def forward(self, x):
x = F.relu(self.norm1(self.conv1(x)))
x = F.relu(self.norm2(self.conv2(x)))
x = F.relu(self.norm3(self.conv3(x)))
x,_ = torch.max(x, 2)
x = x.view(-1, 1024)
x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)
return x
class SPAdaIN(nn.Module):
def __init__(self,norm,input_nc,planes):
super(SPAdaIN,self).__init__()
self.conv_weight = nn.Conv1d(input_nc, planes, 1)
self.conv_bias = nn.Conv1d(input_nc, planes, 1)
self.norm = norm(planes)
def forward(self,x,addition):
x = self.norm(x)
weight = self.conv_weight(addition)
bias = self.conv_bias(addition)
out = weight * x + bias
return out
class SPAdaIN1(nn.Module):
def __init__(self,norm,input_nc,planes):
super(SPAdaIN1,self).__init__()
self.conv_weight = nn.Conv1d(input_nc, planes, 1)
self.conv_bias = nn.Conv1d(input_nc, planes, 1)
self.norm = norm(planes)
def forward(self,x,addition):
weight= self.conv_weight(addition)
bias = self.conv_bias(addition)
out = weight * x + bias
return out
class SPAdaINResBlock1(nn.Module):
def __init__(self,input_nc,planes,norm=nn.InstanceNorm1d,conv_kernel_size=1,padding=0):
super(SPAdaINResBlock1,self).__init__()
self.spadain1 = SPAdaIN1(norm=norm,input_nc=input_nc,planes=planes)
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(planes, planes, kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain2 = SPAdaIN1(norm=norm,input_nc=input_nc,planes=planes)
self.conv2 = nn.Conv1d(planes,planes,kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain_res = SPAdaIN1(norm=norm,input_nc=input_nc,planes=planes)
self.conv_res=nn.Conv1d(planes,planes,kernel_size=conv_kernel_size, stride=1, padding=padding)
def forward(self,x,addition):
#print(x.shape)
out = self.spadain1(x,addition)
#print(out.shape)
out = self.relu(out)
out = self.conv1(out)
out = self.spadain2(out,addition)
out = self.relu(out)
out = self.conv2(out)
residual = x
residual = self.spadain_res(residual,addition)
residual = self.relu(residual)
residual = self.conv_res(residual)
out = out + residual
return out
class SPAdaINResBlock(nn.Module):
def __init__(self,input_nc,planes,norm=nn.InstanceNorm1d,conv_kernel_size=1,padding=0):
super(SPAdaINResBlock,self).__init__()
self.spadain1 = SPAdaIN(norm=norm,input_nc=input_nc,planes=planes)
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(planes, planes, kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain2 = SPAdaIN(norm=norm,input_nc=input_nc,planes=planes)
self.conv2 = nn.Conv1d(planes,planes,kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain_res = SPAdaIN(norm=norm,input_nc=input_nc,planes=planes)
self.conv_res=nn.Conv1d(planes,planes,kernel_size=conv_kernel_size, stride=1, padding=padding)
def forward(self,x,addition):
out = self.spadain1(x,addition)
out = self.relu(out)
out = self.conv1(out)
out = self.spadain2(out,addition)
out = self.relu(out)
out = self.conv2(out)
residual = x
residual = self.spadain_res(residual,addition)
residual = self.relu(residual)
residual = self.conv_res(residual)
out = out + residual
return out
class Decoder(nn.Module):
def __init__(self, bottleneck_size = 1024):
self.bottleneck_size = bottleneck_size
super(Decoder, self).__init__()
self.conv1 = torch.nn.Conv1d(self.bottleneck_size, self.bottleneck_size, 1)
self.conv2 = torch.nn.Conv1d(self.bottleneck_size, self.bottleneck_size//2, 1)
self.conv3 = torch.nn.Conv1d(self.bottleneck_size//2, self.bottleneck_size//4, 1)
self.conv4 = torch.nn.Conv1d(self.bottleneck_size//4, 3, 1)
self.spadain_block1 = SPAdaINResBlock1(input_nc=3 ,planes=self.bottleneck_size)
self.spadain_block2 = SPAdaINResBlock(input_nc=3 ,planes=self.bottleneck_size//2)
self.spadain_block3 = SPAdaINResBlock(input_nc=3 ,planes=self.bottleneck_size//4)
self.norm1 = torch.nn.InstanceNorm1d(self.bottleneck_size)
self.norm2 = torch.nn.InstanceNorm1d(self.bottleneck_size//2)
self.norm3 = torch.nn.InstanceNorm1d(self.bottleneck_size//4)
self.th = nn.Tanh()
def forward(self, x, addition):
x = self.conv1(x)
x = self.spadain_block1(x,addition)
x = self.conv2(x)
x = self.spadain_block2(x,addition)
x = self.conv3(x)
x = self.spadain_block3(x,addition)
x = 2*self.th(self.conv4(x))
return x
class NPT(nn.Module):
def __init__(self, num_points = 6890, bottleneck_size = 1024):
super(NPT, self).__init__()
self.num_points = num_points
self.bottleneck_size = bottleneck_size
self.encoder = PoseFeature(num_points = num_points)
self.decoder = Decoder(bottleneck_size = self.bottleneck_size+3)
def forward(self, x1, x2):
x1 = self.encoder(x1)
y = torch.cat((x1, x2), 1)
out =self.decoder(y,x2)
return out.transpose(2,1) | python | Apache-2.0 | bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6 | 2026-01-05T07:14:12.473684Z | false |
jiashunwang/Neural-Pose-Transfer | https://github.com/jiashunwang/Neural-Pose-Transfer/blob/bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6/model.py | model.py | from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class PoseFeature(nn.Module):
def __init__(self, num_points = 6890):
super(PoseFeature, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.norm1 = torch.nn.InstanceNorm1d(64)
self.norm2 = torch.nn.InstanceNorm1d(128)
self.norm3 = torch.nn.InstanceNorm1d(1024)
def forward(self, x):
x = F.relu(self.norm1(self.conv1(x)))
x = F.relu(self.norm2(self.conv2(x)))
x = F.relu(self.norm3(self.conv3(x)))
return x
class SPAdaIN(nn.Module):
def __init__(self,norm,input_nc,planes):
super(SPAdaIN,self).__init__()
self.conv_weight = nn.Conv1d(input_nc, planes, 1)
self.conv_bias = nn.Conv1d(input_nc, planes, 1)
self.norm = norm(planes)
def forward(self,x,addition):
x = self.norm(x)
weight = self.conv_weight(addition)
bias = self.conv_bias(addition)
out = weight * x + bias
return out
class SPAdaINResBlock(nn.Module):
def __init__(self,input_nc,planes,norm=nn.InstanceNorm1d,conv_kernel_size=1,padding=0):
super(SPAdaINResBlock,self).__init__()
self.spadain1 = SPAdaIN(norm=norm,input_nc=input_nc,planes=planes)
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(planes, planes, kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain2 = SPAdaIN(norm=norm,input_nc=input_nc,planes=planes)
self.conv2 = nn.Conv1d(planes,planes,kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain_res = SPAdaIN(norm=norm,input_nc=input_nc,planes=planes)
self.conv_res=nn.Conv1d(planes,planes,kernel_size=conv_kernel_size, stride=1, padding=padding)
def forward(self,x,addition):
out = self.spadain1(x,addition)
out = self.relu(out)
out = self.conv1(out)
out = self.spadain2(out,addition)
out = self.relu(out)
out = self.conv2(out)
residual = x
residual = self.spadain_res(residual,addition)
residual = self.relu(residual)
residual = self.conv_res(residual)
out = out + residual
return out
class Decoder(nn.Module):
def __init__(self, bottleneck_size = 1024):
self.bottleneck_size = bottleneck_size
super(Decoder, self).__init__()
self.conv1 = torch.nn.Conv1d(self.bottleneck_size, self.bottleneck_size, 1)
self.conv2 = torch.nn.Conv1d(self.bottleneck_size, self.bottleneck_size//2, 1)
self.conv3 = torch.nn.Conv1d(self.bottleneck_size//2, self.bottleneck_size//4, 1)
self.conv4 = torch.nn.Conv1d(self.bottleneck_size//4, 3, 1)
self.spadain_block1 = SPAdaINResBlock(input_nc=3 ,planes=self.bottleneck_size)
self.spadain_block2 = SPAdaINResBlock(input_nc=3 ,planes=self.bottleneck_size//2)
self.spadain_block3 = SPAdaINResBlock(input_nc=3 ,planes=self.bottleneck_size//4)
self.norm1 = torch.nn.InstanceNorm1d(self.bottleneck_size)
self.norm2 = torch.nn.InstanceNorm1d(self.bottleneck_size//2)
self.norm3 = torch.nn.InstanceNorm1d(self.bottleneck_size//4)
self.th = nn.Tanh()
def forward(self, x, addition):
x = self.conv1(x)
x = self.spadain_block1(x,addition)
x = self.conv2(x)
x = self.spadain_block2(x,addition)
x = self.conv3(x)
x = self.spadain_block3(x,addition)
x = 2*self.th(self.conv4(x))
return x
class NPT(nn.Module):
def __init__(self, num_points = 6890, bottleneck_size = 1024):
super(NPT, self).__init__()
self.num_points = num_points
self.bottleneck_size = bottleneck_size
self.encoder = PoseFeature(num_points = num_points)
self.decoder = Decoder(bottleneck_size = self.bottleneck_size+3)
def forward(self, x1, x2):
x1 = self.encoder(x1)
y = torch.cat((x1, x2), 1)
out =self.decoder(y,x2)
return out.transpose(2,1)
| python | Apache-2.0 | bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6 | 2026-01-05T07:14:12.473684Z | false |
jiashunwang/Neural-Pose-Transfer | https://github.com/jiashunwang/Neural-Pose-Transfer/blob/bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6/data_generation.py | data_generation.py | import numpy as np
import pickle
import math
import random
import torch
import os
import trimesh
import torch
class SMPLModel():
def __init__(self, model_path):
"""
SMPL model.
Parameter:
---------
model_path: Path to the SMPL model parameters, pre-processed by
`preprocess.py`.
"""
with open(model_path, 'rb') as f:
params = pickle.load(f)
self.J_regressor = params['J_regressor']
self.weights = params['weights']
self.posedirs = params['posedirs']
self.v_template = params['v_template']
self.shapedirs = params['shapedirs']
self.faces = params['f']
self.kintree_table = params['kintree_table']
id_to_col = {
self.kintree_table[1, i]: i for i in range(self.kintree_table.shape[1])
}
self.parent = {
i: id_to_col[self.kintree_table[0, i]]
for i in range(1, self.kintree_table.shape[1])
}
self.pose_shape = [24, 3]
self.beta_shape = [10]
self.trans_shape = [3]
self.pose = np.zeros(self.pose_shape)
self.beta = np.zeros(self.beta_shape)
self.trans = np.zeros(self.trans_shape)
self.verts = None
self.J = None
self.R = None
self.update()
def set_params(self, pose=None, beta=None, trans=None):
"""
Set pose, shape, and/or translation parameters of SMPL model. Verices of the
model will be updated and returned.
Parameters:
---------
pose: Also known as 'theta', a [24,3] matrix indicating child joint rotation
relative to parent joint. For root joint it's global orientation.
Represented in a axis-angle format.
beta: Parameter for model shape. A vector of shape [10]. Coefficients for
PCA component. Only 10 components were released by MPI.
trans: Global translation of shape [3].
Return:
------
Updated vertices.
"""
if pose is not None:
self.pose = pose
if beta is not None:
self.beta = beta
if trans is not None:
self.trans = trans
self.update()
return self.verts
def update(self):
"""
Called automatically when parameters are updated.
"""
# how beta affect body shape
v_shaped = self.shapedirs.dot(self.beta) + self.v_template
# joints location
self.J = self.J_regressor.dot(v_shaped)
pose_cube = self.pose.reshape((-1, 1, 3))
# rotation matrix for each joint
self.R = self.rodrigues(pose_cube)
I_cube = np.broadcast_to(
np.expand_dims(np.eye(3), axis=0),
(self.R.shape[0]-1, 3, 3)
)
lrotmin = (self.R[1:] - I_cube).ravel()
# how pose affect body shape in zero pose
v_posed = v_shaped + self.posedirs.dot(lrotmin)
# world transformation of each joint
G = np.empty((self.kintree_table.shape[1], 4, 4))
G[0] = self.with_zeros(np.hstack((self.R[0], self.J[0, :].reshape([3, 1]))))
for i in range(1, self.kintree_table.shape[1]):
G[i] = G[self.parent[i]].dot(
self.with_zeros(
np.hstack(
[self.R[i],((self.J[i, :]-self.J[self.parent[i],:]).reshape([3,1]))]
)
)
)
# remove the transformation due to the rest pose
G = G - self.pack(
np.matmul(
G,
np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1])
)
)
# transformation of each vertex
T = np.tensordot(self.weights, G, axes=[[1], [0]])
rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1])))
v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1, 4])[:, :3]
self.verts = v + self.trans.reshape([1, 3])
def rodrigues(self, r):
"""
Rodrigues' rotation formula that turns axis-angle vector into rotation
matrix in a batch-ed manner.
Parameter:
----------
r: Axis-angle rotation vector of shape [batch_size, 1, 3].
Return:
-------
Rotation matrix of shape [batch_size, 3, 3].
"""
theta = np.linalg.norm(r, axis=(1, 2), keepdims=True)
# avoid zero divide
theta = np.maximum(theta, np.finfo(np.float64).tiny)
r_hat = r / theta
cos = np.cos(theta)
z_stick = np.zeros(theta.shape[0])
m = np.dstack([
z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1],
r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0],
-r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick]
).reshape([-1, 3, 3])
i_cube = np.broadcast_to(
np.expand_dims(np.eye(3), axis=0),
[theta.shape[0], 3, 3]
)
A = np.transpose(r_hat, axes=[0, 2, 1])
B = r_hat
dot = np.matmul(A, B)
R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m
return R
def with_zeros(self, x):
"""
Append a [0, 0, 0, 1] vector to a [3, 4] matrix.
Parameter:
---------
x: Matrix to be appended.
Return:
------
Matrix after appending of shape [4,4]
"""
return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]])))
def pack(self, x):
"""
Append zero matrices of shape [4, 3] to vectors of [4, 1] shape in a batched
manner.
Parameter:
----------
x: Matrices to be appended of shape [batch_size, 4, 1]
Return:
------
Matrix of shape [batch_size, 4, 4] after appending.
"""
return np.dstack((np.zeros((x.shape[0], 4, 3)), x))
def save_to_obj(self, path):
"""
Save the SMPL model into .obj file.
Parameter:
---------
path: Path to save.
"""
file = open(path, 'w')
for v in self.verts:
file.write('v %f %f %f\n' % (v[0], v[1], v[2]))
for f in self.faces + 1:
file.write('f %d %d %d\n' % (f[0], f[1], f[2]))
file.close()
# with open(path, 'w') as fp:
# for v in self.verts:
# fp.write('v %f %f %f\n' % (v[0], v[1], v[2]))
# for f in self.faces + 1:
# fp.write('f %d %d %d\n' % (f[0], f[1], f[2]))
# functions: degree to rad
def rad(angle):
rad = 0
rad = math.radians(angle)
return rad
def lit(angle_start, angle_stop):
random_rad = 0
random_angle = 0
random_angle = random.uniform(angle_start, angle_stop)
# print(random_angle)
random_rad = rad(random_angle)
return random_rad
# form random pose
def random_pose_straight():
# np.random.seed(9608)
pose = np.zeros((24, 3))
#left arm
arm_y_l = lit(-30, 30)
arm_z_l = lit(-30, 30)
pose[13]=[0, arm_y_l, arm_z_l]
pose[16]=[0, arm_y_l, arm_z_l]
pose[18] = [0, lit(-60, 0), 0]
pose[20] = [lit(-10,10), lit(-10, 10), lit(-10,10)]
pose[22] = [lit(-5,5), lit(0,10), lit(-10,0)]
#right arm
arm_y_r = lit(-30, 30)
arm_z_r = lit(-30, 30)
pose[14]=[0, arm_y_r, arm_z_r]
pose[17]=[0, arm_y_r, arm_z_r]
pose[19] = [0, lit(0, 60), 0]
pose[21] = [lit(-10,10), lit(-10, 10), lit(-10,10)]
pose[23] = [lit(-5,5), lit(-10,0), lit(0,10)]
# #left leg
pose[1] = [lit(-90, 0), 0, lit(0, 5)]
pose[4] = [lit(0, 10), 0, 0]
pose[7] = [lit(-10,20), lit(-10,10), lit(-1,1)]
# # pose[10]=[rad(-20), 0, 0]
# #right leg
pose[2] = [lit(-90, 0), 0, lit(-5, 0)]
pose[5] = [lit(0, 10), 0, 0]
pose[8] = [lit(-10,10), lit(-10,10), lit(-1,1)]
# # pose[11]=[rad(), 0, 0]
neck = lit(-1,1)
pose[15] = [neck,neck,neck]
pose[12] = [neck,neck,neck]
bone = lit(-1,1)
pose[9]=[bone,bone,bone]
pose[6]=[bone,bone,bone]
pose[3]=[bone,bone,bone]
pose[0]=[lit(-2,2),lit(-2,2),lit(-2,2)]
# print("pose done")
return pose
def random_pose():
# np.random.seed(9608)
pose = np.zeros((24, 3))
# left arm
arm_y_l = lit(-30, 30)
arm_z_l = lit(-30, 30)
pose[13] = [0, arm_y_l, arm_z_l]
pose[16] = [0, arm_y_l, arm_z_l]
pose[18] = [0, lit(-60, 0), 0]
pose[20] = [lit(-20,20), lit(-20, 20), lit(-20,20)]
pose[22] = [lit(-5,5), lit(0,10), lit(-10,0)]
# right arm
arm_y_r = lit(-30, 30)
arm_z_r = lit(-30, 30)
pose[14] = [0, arm_y_r, arm_z_r]
pose[17] = [0, arm_y_r, arm_z_r]
pose[19] = [0, lit(0, 60), 0]
pose[21] = [lit(-20,20), lit(-20, 20), lit(-20,20)]
pose[23] = [lit(-5,5), lit(-10,0), lit(0,10)]
# #left leg
pose[1] = [lit(-90, 0), 0, lit(0, 40)]
pose[4] = [lit(0, 100), 0, 0]
pose[7] = [lit(-10,10), lit(-10,10), lit(-1,1)]
# # pose[10]=[rad(-20), 0, 0]
# #right leg
pose[2] = [lit(-90, 0), 0, lit(-40, 0)]
pose[5] = [lit(0, 100), 0, 0]
pose[8] = [lit(-10,10), lit(-10,10), lit(-1,1)]
# # pose[11]=[rad(), 0, 0]
neck = lit(-1,1)
pose[15] = [neck,neck,neck]
pose[12] = [neck,neck,neck]
bone = lit(-1,1)
pose[9]=[bone,bone,bone]
pose[6]=[bone,bone,bone]
pose[3]=[bone,bone,bone]
pose[0]=[lit(-2,2),lit(-2,2),lit(-2,2)]
# print("pose done")
return pose
if __name__ == '__main__':
ID_SIZE = 16
POSE_SIZE = 800
smpl = SMPLModel('./model_male.pkl')
# np.random.seed(9606)
beta = np.zeros(*smpl.beta_shape)
#
# print(*smpl.beta_shape)
trans = np.zeros(smpl.trans_shape)
for i in range(ID_SIZE):
np.random.seed(i*10)
beta = (np.random.rand(10)-0.5)*8
print('beta:',beta)
for j in range(POSE_SIZE):
random.seed(j)
if j<POSE_SIZE/2:
pose = random_pose()
else:
pose = random_pose_straight()
smpl.set_params(beta=beta, pose=pose, trans=trans)
smpl.save_to_obj('./smpl_data/' + str(i) + '_' + str(j) + '.obj')
print(i,j)
| python | Apache-2.0 | bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6 | 2026-01-05T07:14:12.473684Z | false |
jiashunwang/Neural-Pose-Transfer | https://github.com/jiashunwang/Neural-Pose-Transfer/blob/bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6/utils.py | utils.py | import numpy as np
import torch
def init_regul(source_vertices, source_faces):
sommet_A_source = source_vertices[source_faces[:, 0]]
sommet_B_source = source_vertices[source_faces[:, 1]]
sommet_C_source = source_vertices[source_faces[:, 2]]
target = []
target.append(np.sqrt( np.sum((sommet_A_source - sommet_B_source) ** 2, axis=1)))
target.append(np.sqrt( np.sum((sommet_B_source - sommet_C_source) ** 2, axis=1)))
target.append(np.sqrt( np.sum((sommet_A_source - sommet_C_source) ** 2, axis=1)))
return target
def get_target(vertice, face, size):
target = init_regul(vertice,face)
target = np.array(target)
target = torch.from_numpy(target).float().cuda()
#target = target+0.0001
target = target.unsqueeze(1).expand(3,size,-1)
return target
def compute_score(points, faces, target):
score = 0
sommet_A = points[:,faces[:, 0]]
sommet_B = points[:,faces[:, 1]]
sommet_C = points[:,faces[:, 2]]
score = torch.abs(torch.sqrt(torch.sum((sommet_A - sommet_B) ** 2, dim=2)) / target[0] -1)
score = score + torch.abs(torch.sqrt(torch.sum((sommet_B - sommet_C) ** 2, dim=2)) / target[1] -1)
score = score + torch.abs(torch.sqrt(torch.sum((sommet_A - sommet_C) ** 2, dim=2)) / target[2] -1)
return torch.mean(score)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
| python | Apache-2.0 | bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6 | 2026-01-05T07:14:12.473684Z | false |
jiashunwang/Neural-Pose-Transfer | https://github.com/jiashunwang/Neural-Pose-Transfer/blob/bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6/demo.py | demo.py | import torch
from model import NPT
import numpy as np
import pymesh
net_G=NPT()
net_G.cuda()
net_G.load_state_dict(torch.load('original_169.model'))
def face_reverse(faces):
identity_faces=faces
face_dict={}
for i in range(len(random_sample)):
face_dict[random_sample[i]]=i
new_f=[]
for i in range(len(identity_faces)):
new_f.append([face_dict[identity_faces[i][0]],face_dict[identity_faces[i][1]],face_dict[identity_faces[i][2]]])
new_face=np.array(new_f)
return new_face
random_sample = np.random.choice(6890,size=6890,replace=False)
random_sample2 = np.random.choice(6890,size=6890,replace=False)
id_mesh=pymesh.load_mesh('./demo_data/13_643.obj')
pose_mesh=pymesh.load_mesh('./demo_data/14_664.obj')
with torch.no_grad():
id_mesh_points=id_mesh.vertices[random_sample]
id_mesh_points=id_mesh_points - (id_mesh.bbox[0] + id_mesh.bbox[1]) / 2
id_mesh_points = torch.from_numpy(id_mesh_points.astype(np.float32)).cuda()
pose_mesh_points=pose_mesh.vertices#[random_sample2]
pose_mesh_points=pose_mesh_points-(pose_mesh.bbox[0] + pose_mesh.bbox[1]) / 2
pose_mesh_points = torch.from_numpy(pose_mesh_points.astype(np.float32)).cuda()
pointsReconstructed = net_G(pose_mesh_points.transpose(0,1).unsqueeze(0),id_mesh_points.transpose(0,1).unsqueeze(0)) # forward pass
new_face=face_reverse(id_mesh.faces)
pymesh.save_mesh_raw('./demo_data/13_664.obj', pointsReconstructed.cpu().numpy().squeeze(),new_face)
| python | Apache-2.0 | bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6 | 2026-01-05T07:14:12.473684Z | false |
jiashunwang/Neural-Pose-Transfer | https://github.com/jiashunwang/Neural-Pose-Transfer/blob/bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6/evaluate.py | evaluate.py | import trimesh
import numpy as np
#make sure the order of identity points and gt points are same
#for original_model, please keep the identity and pose points in different order
ours_mesh = trimesh.load('ours.obj')
ours_vertices=ours_mesh.vertices
ours_bbox= np.array([[np.max(ours_vertices[:,0]), np.max(ours_vertices[:,1]), np.max(ours_vertices[:,2])], \
[np.min(ours_vertices[:,0]), np.min(ours_vertices[:,1]), np.min(ours_vertices[:,2])]])
ours_vertices_align=ours_vertices-(ours_bbox[0] + ours_bbox[1]) / 2
gt_mesh=trimesh.load('gt.obj')
gt_vertices=gt_mesh.vertices
gt_bbox= np.array([[np.max(gt_vertices[:,0]), np.max(gt_vertices[:,1]), np.max(gt_vertices[:,2])], \
[np.min(gt_vertices[:,0]), np.min(gt_vertices[:,1]), np.min(gt_vertices[:,2])]])
gt_vertices_align=gt_vertices-(gt_bbox[0] + gt_bbox[1]) / 2
print(np.mean((ours_vertices_align-gt_vertices_align)**2))
| python | Apache-2.0 | bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6 | 2026-01-05T07:14:12.473684Z | false |
jiashunwang/Neural-Pose-Transfer | https://github.com/jiashunwang/Neural-Pose-Transfer/blob/bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6/data.py | data.py | import torch.utils.data as data
import torch
import numpy as np
import pymesh
import random
class SMPL_DATA(data.Dataset):
def __init__(self, train, npoints=6890, shuffle_point = False):
self.train = train
self.shuffle_point = shuffle_point
self.npoints = npoints
self.path='./smpl_data/'
def __getitem__(self, index):
identity_mesh_i=np.random.randint(0,16)
identity_mesh_p=np.random.randint(200,600)
pose_mesh_i=np.random.randint(0,16)
pose_mesh_p=np.random.randint(200,600)
identity_mesh=pymesh.load_mesh(self.path+str(identity_mesh_i)+'_'+str(identity_mesh_p)+'.obj')
pose_mesh=pymesh.load_mesh(self.path+str(pose_mesh_i)+'_'+str(pose_mesh_p)+'.obj')
gt_mesh=pymesh.load_mesh(self.path+str(identity_mesh_i)+'_'+str(pose_mesh_p)+'.obj')
pose_points = pose_mesh.vertices
identity_points=identity_mesh.vertices
identity_faces=identity_mesh.faces
gt_points = gt_mesh.vertices
pose_points = pose_points - (pose_mesh.bbox[0] + pose_mesh.bbox[1]) / 2
pose_points = torch.from_numpy(pose_points.astype(np.float32))
identity_points=identity_points-(identity_mesh.bbox[0]+identity_mesh.bbox[1])/2
identity_points=torch.from_numpy(identity_points.astype(np.float32))
gt_points=gt_points-(gt_mesh.bbox[0]+gt_mesh.bbox[1]) / 2
gt_points = torch.from_numpy(gt_points.astype(np.float32))
#if self.train:
# a = torch.FloatTensor(3)
# pose_points = pose_points + (a.uniform_(-1,1) * 0.03).unsqueeze(0).expand(-1, 3)
random_sample = np.random.choice(self.npoints,size=self.npoints,replace=False)
random_sample2 = np.random.choice(self.npoints,size=self.npoints,replace=False)
new_face=identity_faces
if self.shuffle_point:
pose_points = pose_points[random_sample2]
identity_points=identity_points[random_sample]
gt_points=gt_points[random_sample]
face_dict={}
for i in range(len(random_sample)):
face_dict[random_sample[i]]=i
new_f=[]
for i in range(len(identity_faces)):
new_f.append([face_dict[identity_faces[i][0]],face_dict[identity_faces[i][1]],face_dict[identity_faces[i][2]]])
new_face=np.array(new_f)
return pose_points, random_sample, gt_points, identity_points, new_face
def __len__(self):
return 4000
| python | Apache-2.0 | bd62eef7bad6752ae6cab7fa40bc1935e4dfeec6 | 2026-01-05T07:14:12.473684Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/setup.py | setup.py | from setuptools import setup, find_packages
from pathlib import Path
import os
if __name__ == "__main__":
with Path(Path(__file__).parent, "README.md").open(encoding="utf-8") as file:
long_description = file.read()
def _read_reqs(relpath):
fullpath = os.path.join(os.path.dirname(__file__), relpath)
with open(fullpath) as f:
return [s.strip() for s in f.readlines() if (s.strip() and not s.startswith("#"))]
REQUIREMENTS = _read_reqs("requirements.txt")
setup(
name="clip-video-encode",
packages=find_packages(),
include_package_data=True,
version="1.3.0",
license="MIT",
description="Easily compute clip embeddings from video frames",
long_description=long_description,
long_description_content_type="text/markdown",
entry_points={"console_scripts": ["clip-video-encode=clip_video_encode.cli:main"]},
author="Maciej Kilian",
author_email="kilianmaciej6@gmail.com",
url="https://github.com/iejMac/clip-video-encode",
data_files=[(".", ["README.md"])],
keywords=["machine learning"],
install_requires=REQUIREMENTS,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/tests/test_similarity.py | tests/test_similarity.py | import os
import numpy as np
import pytest
import tempfile
import torch
import open_clip
from clip_video_encode import clip_video_encode
def test_similarity():
test_path = "tests/test_videos"
with tempfile.TemporaryDirectory() as tmpdir:
clip_video_encode(
["tests/test_videos/vid1.mp4", "tests/test_videos/vid2.mp4"],
tmpdir,
output_format="files",
take_every_nth=2,
frame_memory_size=0.125,
use_dst_name=True,
)
model, _, _ = open_clip.create_model_and_transforms("ViT-B-32", pretrained="laion2b_s34b_b79k")
text = open_clip.tokenize(["bears", "monkey"])
with torch.no_grad(), torch.cuda.amp.autocast():
text_feat = model.encode_text(text).float()
for vid in ["vid1.mp4", "vid2.mp4"]:
frame_embeddings = np.load(os.path.join(tmpdir, vid[:-4] + ".npy"))
frame_feat = torch.from_numpy(frame_embeddings[0]).float() # only take first frame
text_probs = (100.0 * frame_feat @ text_feat.T).softmax(dim=-1)
best = torch.argmax(text_probs)
assert best == (0 if vid == "vid1.mp4" else 1)
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/tests/test_encode.py | tests/test_encode.py | import os
import numpy as np
import pytest
import tempfile
from clip_video_encode import clip_video_encode
FRAME_COUNTS = {
"vid1.mp4": 56,
"vid2.mp4": 134,
"https://www.youtube.com/watch?v=a8DM-tD9w2I": 20,
}
def test_encode():
test_path = "tests/test_videos"
with tempfile.TemporaryDirectory() as tmpdir:
clip_video_encode(
os.path.join(test_path, "test_list.parquet"),
tmpdir,
output_format="files",
take_every_nth=2,
frame_memory_size=0.125,
metadata_columns=["caption", "meta"],
use_dst_name=True,
)
assert len(os.listdir(tmpdir)) == len(FRAME_COUNTS) * 3
for vid in FRAME_COUNTS.keys():
if vid.endswith(".mp4"):
ld = vid[:-4] + ".npy"
else:
ld = vid.split("=")[-1] + ".npy"
embeddings = np.load(os.path.join(tmpdir, ld))
assert embeddings.shape[0] == FRAME_COUNTS[vid] // 2 # frame count
assert embeddings.shape[1] == 512 # embed dim
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/tests/test_modules.py | tests/test_modules.py | import os
import glob
import pytest
import tempfile
import open_clip
import multiprocessing
import numpy as np
import tarfile
import torch
from torchvision.transforms import Compose, Normalize, ToPILImage, ToTensor
from clip_video_encode.utils import block2dl
from clip_video_encode.simplemapper import FrameMapper
from clip_video_encode.writer import FileWriter, WebDatasetWriter
from clip_video_encode.reader import Reader
FRAME_COUNTS = {
"vid1.mp4": 56,
"vid2.mp4": 134,
}
def _convert_image_to_rgb(image):
return image.convert("RGB")
def test_utils():
n_px = 224
prepro = Compose(
[
ToPILImage(),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
]
)
post_prepro_shape = (3, n_px, n_px)
N_FRAMES = 100
block = np.zeros((100, n_px, n_px, 3), dtype=np.uint8)
BATCH_SIZE = 20
N_DATASET_WORKERS = multiprocessing.cpu_count()
fr_dl = block2dl(block, prepro, BATCH_SIZE, N_DATASET_WORKERS)
batch_count = 0
for batch in fr_dl:
assert batch.shape == (BATCH_SIZE, *post_prepro_shape)
batch_count += 1
assert batch_count == int(N_FRAMES / BATCH_SIZE)
@pytest.mark.parametrize("oc_model_name", ["ViT-B-32", "ViT-L-14"])
def test_mapper(oc_model_name):
# Initialize model:
device = "cpu"
model_input_shape = (3, 224, 224)
model_output_dim = 512 if oc_model_name == "ViT-B-32" else 768
fm = FrameMapper(oc_model_name, "laion400m_e32", device)
bs = 20
batch = torch.rand(bs, *model_input_shape).to(device)
with torch.no_grad(), torch.cuda.amp.autocast():
output = fm(batch)
assert output.shape == (bs, model_output_dim)
@pytest.mark.parametrize("writer_type", ["files", "webdataset"])
def test_writer(writer_type):
with tempfile.TemporaryDirectory() as tmpdir:
if writer_type == "files":
writer = FileWriter(tmpdir)
elif writer_type == "webdataset":
writer = WebDatasetWriter(tmpdir, 5, "npy", 5)
N_VIDS = 10
N_FRAMES = 100
lat_dim = 8
vid_embeds = [np.ones((N_FRAMES, lat_dim), dtype=float) * i for i in range(N_VIDS)]
for i, emb in enumerate(vid_embeds):
fake_metadata = {"json": {"caption": str(i), "x": i}, "txt": str(i)}
writer.write(emb, str(i), fake_metadata)
writer.close()
if writer_type == "files":
for i in range(N_VIDS):
dst_name = f"{i}.npy"
np_embeddings = np.load(os.path.join(tmpdir, dst_name))
assert np_embeddings.shape == (N_FRAMES, lat_dim)
val = int(dst_name[0])
assert np.all(np_embeddings == val)
assert len(os.listdir(tmpdir)) == N_VIDS * 3
elif writer_type == "webdataset":
l = glob.glob(tmpdir + "/*.tar")
assert len(l) == 2
for i in range(2):
assert tmpdir + f"/0000{i}_clip_embeddings.tar" in l
assert len(tarfile.open(tmpdir + "/00000_clip_embeddings.tar").getnames()) == (N_VIDS // 2) * 3
@pytest.mark.parametrize("input_format", ["txt", "csv", "parquet"])
def test_reader(input_format):
src = f"tests/test_videos/test_list.{input_format}"
metadata_columns = ["caption", "meta"] if input_format != "txt" else []
reader = Reader(src, metadata_columns)
vids, ids, meta = reader.get_data()
assert len(vids) == 3
for i in range(len(vids)):
assert ids[i].as_py() == i
assert len(meta) == len(metadata_columns)
for k in meta:
assert k in metadata_columns
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/writer.py | clip_video_encode/writer.py | """save embeddings."""
import os
import json
import fsspec
import numpy as np
import webdataset as wds
from io import BytesIO
write_fmt = {
"mp4": lambda data: data, # pylint: disable=unnecessary-lambda
"txt": lambda data: str(data), # pylint: disable=unnecessary-lambda
"json": lambda data: json.dumps(data, indent=4),
}
class FileWriter:
"""Writes output as files."""
def __init__(self, output_folder):
self.output_folder = output_folder
self.fs, self.output_folder = fsspec.core.url_to_fs(output_folder)
def write(self, arr, key, metadata=None):
"""write sample to file."""
key, metadata = str(key), {} if metadata is None else metadata
save_pth = os.path.join(self.output_folder, key + ".npy")
with self.fs.open(save_pth, "wb") as f:
nbp = BytesIO()
np.save(nbp, arr)
f.write(nbp.getbuffer())
for ext in metadata:
md_filename = os.path.join(self.output_folder, f"{key}.{ext}")
write_data = write_fmt[ext](metadata[ext]) if ext in write_fmt else metadata[ext]
with self.fs.open(md_filename, "w") as f:
f.write(write_data)
def close(self):
pass
class WebDatasetWriter:
"""Writes output in WebDataset format."""
def __init__(self, output_folder, oom_shard_count, encode_format, maxcount=10000, shard_id=0):
self.output_folder = output_folder
self.oom_shard_count = oom_shard_count
self.encode_format = encode_format
self.maxcount = maxcount
self.shard_id = shard_id
self.shard_suffix = "clip_embeddings" # TODO: maybe there should be param for this?
self.count = 0
self.tarwriter = None
self.tar_fd = None
self.create_shard()
def create_shard(self, shard_id=None):
"""create new shard in sequential order."""
self.close()
if shard_id is not None:
self.shard_id = shard_id
shard_name = shard_id
if not isinstance(shard_id, str):
shard_name = "{shard_id:0{oom_shard_count}d}".format( # pylint: disable=consider-using-f-string
shard_id=self.shard_id, oom_shard_count=self.oom_shard_count
)
shard_name += "_" + self.shard_suffix
fs, output_path = fsspec.core.url_to_fs(self.output_folder)
self.tar_fd = fs.open(f"{output_path}/{shard_name}.tar", "wb")
self.tarwriter = wds.TarWriter(self.tar_fd)
def write(self, arr, key, metadata=None):
"""write sample to current shard."""
key, metadata = str(key), {} if metadata is None else metadata
if self.count >= self.maxcount:
self.shard_id += 1
self.count = 0
self.create_shard()
sample = {"__key__": key}
if arr is not None:
sample[self.encode_format] = arr
for ext in metadata:
sample[ext] = write_fmt[ext](metadata[ext]) if ext in write_fmt else metadata[ext]
self.tarwriter.write(sample)
self.count += 1
def close(self):
if self.tarwriter is not None:
self.tarwriter.close()
self.tar_fd.close()
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/cli.py | clip_video_encode/cli.py | """cli entry point"""
import fire
from clip_video_encode import clip_video_encode
def main():
"""Main entry point"""
fire.Fire(clip_video_encode)
if __name__ == "__main__":
main()
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/reader.py | clip_video_encode/reader.py | """handles input parsing."""
import os
import json
import glob
import pyarrow.parquet as pq
import pyarrow.csv as csv_pq
import pyarrow as pa
class Reader:
"""Parses input into required data.
Necessary columns (reader will always look for these columns in parquet and csv):
* videoLoc - location of video either on disc or URL
* videoID - unique ID of each video, if not provided, ID = index
Additional special columns:
* caption - will be saved in separate key.txt file
anything else - put in key.json metadata file
"""
def __init__(self, src, meta_columns=None):
"""
Input:
src:
str: path to mp4 file
str: youtube link
str: path to txt file with multiple mp4's or youtube links
list[str]: list with multiple mp4's or youtube links
meta_columns:
list[str]: columns of useful metadata to save with videos
"""
self.columns = ["videoID", "videoLoc"]
no_dupl_temp = []
for c in self.columns:
if c in meta_columns:
no_dupl_temp.append(c)
meta_columns.remove(c)
self.meta_columns = meta_columns if meta_columns is not None else []
if isinstance(src, str):
if src.endswith(".txt"):
df = csv_pq.read_csv(src, read_options=csv_pq.ReadOptions(column_names=["videoLoc"]))
df = df.add_column(0, "videoID", [list(range(df.num_rows))]) # add ID's
elif src.endswith(".csv"):
df = csv_pq.read_csv(src)
elif src.endswith(".parquet"):
with open(src, "rb") as f:
columns_to_read = self.columns + meta_columns
df = pq.read_table(f, columns=columns_to_read)
else: # singular video (mp4 or link)
src = [src]
if isinstance(src, list):
df = pa.Table.from_arrays([src], names=["videoLoc"])
df = df.add_column(0, "videoID", [list(range(df.num_rows))]) # add ID's
for c in no_dupl_temp:
self.meta_columns.append(c)
self.df = df
def get_data(self):
vids = self.df["videoLoc"].to_pylist()
ids = self.df["videoID"]
meta = dict( # pylint: disable=consider-using-dict-comprehension
[(meta, self.df[meta]) for meta in self.meta_columns]
)
return vids, ids, meta
def read_shard(tempdir, vid_ext="mp4", pass_through_keys=None):
"""
Extracts shard a tempdir and returns references to files inside
Input:
tempdir:
path to directory containing contents of an opened WebDataset shard with input data
pass_through_keys:
extensions we would like to keep from the source shard in the output shard
"""
if pass_through_keys is None:
pass_through_keys = []
vids = sorted(
[f.split("/")[-1] for f in glob.glob(os.path.join(tempdir, f"*.{vid_ext}"))]
) # TODO: parameterize the video extension
read_funcs = {
"json": lambda path: json.load(open(path, "rb")), # pylint: disable=consider-using-with
"txt": lambda path: open(path, "r", encoding="UTF-8").read(), # pylint: disable=consider-using-with
}
keys, meta = [x.split(f".{vid_ext}")[0] for x in vids], []
for key in keys:
metadata = {}
# handles double extensions for weird metadata types f.e. ".optical-flow.npy" vs. ".clip_b.npy"
exts = [".".join(f.split(".")[1:]) for f in glob.glob(os.path.join(tempdir, f"{key}.*"))]
desired_exts = list(set(pass_through_keys).intersection(set(exts)))
for ext in desired_exts:
file_path = os.path.join(tempdir, f"{key}.{ext}")
if ext in read_funcs:
read_data = read_funcs[ext](file_path)
else:
read_data = open(file_path, "rb").read() # pylint: disable=consider-using-with
metadata[ext] = read_data
meta.append(metadata)
vids = [os.path.join(tempdir, v) for v in vids]
return vids, keys, meta
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/simplemapper.py | clip_video_encode/simplemapper.py | """simplemapper - simple frame -> embedding mapper."""
import torch
import numpy as np
import open_clip
from torchvision.transforms import ToPILImage
try:
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel, GumbelVQ
except ImportError as e:
print("Missing imports")
def load_config(config_path, display=False):
config = OmegaConf.load(config_path)
if display:
print(yaml.dump(OmegaConf.to_container(config)))
return config
def load_vqgan(config, ckpt_path=None, is_gumbel=False):
if is_gumbel:
model = GumbelVQ(**config.model.params)
else:
model = VQModel(**config.model.params)
if ckpt_path is not None:
sd = torch.load(ckpt_path, map_location="cpu")["state_dict"]
_, _ = model.load_state_dict(sd, strict=False)
# missing, unexpected = model.load_state_dict(sd, strict=False)
return model.eval()
def preprocess_vqgan(x):
x = 2.0 * x - 1.0
return x
class FrameMapper:
"""maps frames -> embeddings (or captions"""
def __init__(self, model_name, pretrained, device, get_text_tokenizer=False, get_frame_tokenizer=False):
# Initialize model:
if not get_frame_tokenizer:
model, _, preprocess = open_clip.create_model_and_transforms(
model_name, pretrained=pretrained, device=device
)
tokenizer = open_clip.get_tokenizer(oc_model_name) if get_text_tokenizer else None
preprocess.transforms = [ToPILImage()] + preprocess.transforms[-3:]
else:
# TODO: (https://github.com/CompVis/taming-transformers/tree/master#overview-of-pretrained-models)
config_path, ckpt_path = model_name, pretrained
config = load_config(config_path, display=False)
model = load_vqgan(config, ckpt_path=ckpt_path, is_gumbel=("gumbel" in config_path)).to(device)
# preprocess = preprocess_vqgan
preprocess = lambda x: x # dataloader preprocess
tokenizer = lambda x: x
self.model = model
self.preprocess = preprocess
self.tokenizer = tokenizer
self.device = device
def __call__(self, batch, captions=None):
with torch.no_grad(), torch.cuda.amp.autocast():
embeddings = self.model.encode_image(batch).cpu().detach().numpy()
return embeddings
def encode_captions(self, captions):
with torch.no_grad(), torch.cuda.amp.autocast():
tokens = self.tokenizer(captions).to(self.device)
caption_embeddings = self.model.encode_text(tokens).cpu().detach().numpy()
return caption_embeddings
def tokenize_frames(self, batch):
with torch.no_grad():
batch = preprocess_vqgan(batch)
z, _, [_, _, indices] = self.model.encode(batch)
return indices.reshape(-1, np.prod(z.shape[-2:])).cpu().detach().numpy()
def generate_captions(self, batch):
"""generate caption for batch of imgs"""
# TODO: idk if this is the best way to do it but works for now
# jprompt = "a video of "
prompt = ""
tok = self.tokenizer(prompt)
index = torch.argmax((tok == 49407).type(torch.int64))
tok = tok[:, :index] # pylint: disable=(invalid-sequence-index)
tok = torch.cat([tok] * batch.shape[0])
tok = tok.to(batch.device)
with torch.no_grad(), torch.cuda.amp.autocast():
generated = self.model.generate(
batch,
text=tok,
generation_type="beam_search",
temperature=1.0,
top_p=0.1,
min_seq_len=15,
num_beams=10,
num_beam_groups=5,
)
captions = [
open_clip.decode(gen).split("<end_of_text>")[0].replace("<start_of_text>", "")[len(prompt) :]
for gen in generated
]
return captions
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/distributed.py | clip_video_encode/distributed.py | """functions for distributing computation"""
import os
def world_info_from_env():
"""get info from dist env"""
local_rank = 0
for v in ("LOCAL_RANK", "MPI_LOCALRANKID", "SLURM_LOCALID", "OMPI_COMM_WORLD_LOCAL_RANK"):
if v in os.environ:
local_rank = int(os.environ[v])
break
global_rank = 0
for v in ("RANK", "PMI_RANK", "SLURM_PROCID", "OMPI_COMM_WORLD_RANK"):
if v in os.environ:
global_rank = int(os.environ[v])
break
world_size = 1
for v in ("WORLD_SIZE", "PMI_SIZE", "SLURM_NTASKS", "OMPI_COMM_WORLD_SIZE"):
if v in os.environ:
world_size = int(os.environ[v])
break
return local_rank, global_rank, world_size
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/utils.py | clip_video_encode/utils.py | """clip-video-encode utils."""
from torch.utils.data import Dataset, DataLoader
class HelperDataset(Dataset):
"""Helper dataset that preprocesses frames"""
def __init__(self, imgs, preprocess):
super().__init__()
self.imgs = imgs
self.preprocess = preprocess
def __len__(self):
return len(self.imgs)
def __getitem__(self, ind):
return self.preprocess(self.imgs[ind])
def block2dl(frames, preprocess, bs, n_work):
ds = HelperDataset(frames, preprocess)
return DataLoader(
ds,
batch_size=bs,
shuffle=False,
num_workers=n_work,
)
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/__init__.py | clip_video_encode/__init__.py | """clip video encode"""
from .clip_video_encode import clip_video_encode
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/clip_video_encode.py | clip_video_encode/clip_video_encode.py | """encode video with CLIP"""
import re
import sys
import time
import math
import torch
from video2numpy.frame_reader import FrameReader
from .reader import Reader, read_shard
from .simplemapper import FrameMapper
from .writer import FileWriter, WebDatasetWriter
from .distributed import world_info_from_env
from .handle_chunk import encode_chunk
import tarfile
import tempfile
import os
import braceexpand
import fsspec
import io
CHUNK_SIZE = 100
def _convert_image_to_rgb(image):
return image.convert("RGB")
def extract_braceexpand_values(be_template, path):
# Construct regex pattern based on the braceexpand string
reg_template = re.sub(r"\{.*?\}", r"(\\d+)", be_template)
reg_template = reg_template.replace(".", r"\.")
pattern = re.compile(reg_template)
match = pattern.match(path)
return list(match.groups())
def clip_video_encode(
src,
dest="",
output_format="files",
take_every_nth=25,
target_fps=-1,
input_format="table",
frame_workers=1,
frame_memory_size=4,
metadata_columns="",
use_dst_name=False,
distribute="none",
oom_shard_count=5,
model_name="ViT-B-32",
pretrained="laion2b_s34b_b79k",
captioning_strategy="none",
frame_tokenization_strategy="none",
generated_caption_key="generated_caption", # this will put it in json, make this 'caption' if you want it in txt
pass_through_keys="mp4,txt,json",
vid_ext="mp4",
caption_similarity=False,
img_size=224,
):
"""
Encode frames using CLIP image encoder
Input:
src:
str: path to mp4 file
str: youtube link
str: path to txt file with multiple mp4's or youtube links
list: list with multiple mp4's or youtube links
dest:
str: directory where to save embeddings to
None: dest = src + .npy
output_format:
str: "files" or "webdataset"
take_every_nth:
int: only take every nth frame
target_fps:
int: target fps to downsample videos to (-1 means original fps or take_every_nth)
frame_workers:
int: number of Processes to distribute video reading to.
frame_memory_size:
int: GB of memory for FrameReader.
metadata_columns:
str: a comma separated list of metadata column names to look for in src
use_dst_name:
bool: use the save name suggested by video2numpy
distribute:
str: distribution strategy, currently either slurm or none
model_name:
str:
- open_clip model name, used for selecting CLIP architecture
- vqgan config path
pretrained:
str:
- open_clip pretrained weights name
- vqgan weights checkpoint path
captioning_strategy:
str: which frames of a video to generate captions for. Possible values are:
- none: don't generate any captions
- center: generate a caption for the middle frame
int: (NOT IMPLEMENTED) step size for which frames to generate captions for
pass_through_keys:
str: comma separated list of extension to pass through from input dataset (if webdataset format)
caption_similarity:
bool: whether to put the similarity between the average frame embedding and text embedding into metadata
img_size:
int: pixel height and width of target output shape
"""
assert input_format in ["table", "webdataset"]
if isinstance(metadata_columns, str):
metadata_columns = [metadata_columns] if metadata_columns != "" else []
metadata_columns = list(metadata_columns) if isinstance(metadata_columns, tuple) else metadata_columns
if isinstance(pass_through_keys, str):
pass_through_keys = pass_through_keys.split(",")
if input_format == "table":
reader = Reader(src, metadata_columns)
vids, ids, meta = reader.get_data()
meta_refs = list(range(len(vids)))
else: # WebDataset, so we distribute shards
shards = list(braceexpand.braceexpand(src))
# NOTE: this might need to be improved, some shards may not be complete
fs, output_path = fsspec.core.url_to_fs(dest)
if not fs.exists(output_path):
fs.mkdir(output_path)
done_shards = set()
else:
done_shards = set(x.split("/")[-1].split("_")[0] for x in fs.glob(output_path + "/*.tar"))
print(f"Removing {len(done_shards)} done_shards from processing queue...")
# TODO: finish this
# def get_sids(be_template):
# shards = list(braceexpand.braceexpand(be_template))
# values = extract_braceexpand_values(be_template, path)
# max_values = extract_braceexpand_values(be_template, list(braceexpand.braceexpand(be_template))[-1])
# for i in range(len(values)):
# values[i] = values[i].zfill(len(max_values[i]))
# write_shard_id = "".join(values)
# return write_shard_id
shards = [s for s_id, s in zip(s_ids, shards) if int(s_id) not in done_shards]
starting_shard_id = 0
shard_sample_count = 10000
if distribute == "slurm":
local_rank, global_rank, world_size = world_info_from_env()
if input_format == "table":
work_size = math.ceil(len(vids) / world_size)
else:
work_size = math.ceil(len(shards) / world_size)
print(f"Slurm worker {global_rank} processing {work_size} videos...")
ws, wf = global_rank * work_size, (global_rank + 1) * work_size
if input_format == "table":
vids = vids[ws:wf]
ids = ids[ws:wf]
for mc in meta.keys():
meta[mc] = meta[mc][ws:wf]
starting_shard_id += math.ceil(work_size / shard_sample_count) * global_rank
elif input_format == "webdataset":
shards = shards[ws:wf]
device = f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu"
else:
local_rank, global_rank, world_size = 0, 0, 1 # TODO: how do we do this?
device = "cuda" if torch.cuda.is_available() else "cpu"
assert output_format in ["files", "webdataset"]
if output_format == "files":
writer = FileWriter(dest)
elif output_format == "webdataset":
# TODO: maybe include params for this?
starting_shard_id = int(shards[0].split("/")[-1].split(".tar")[0])
writer = WebDatasetWriter(dest, oom_shard_count, "npy", maxcount=1e6, shard_id=starting_shard_id)
fm = FrameMapper(
model_name,
pretrained,
device,
get_text_tokenizer=(caption_similarity or (captioning_strategy != "none")),
get_frame_tokenizer=(frame_tokenization_strategy != "none"),
)
if input_format == "table":
fr = FrameReader(
vids,
meta_refs,
take_every_nth=take_every_nth,
target_fps=target_fps,
resize_size=img_size,
workers=frame_workers,
memory_size=frame_memory_size,
)
fr.start_reading()
frames, ind_dict = [], {}
block_size = 0
i = 0
for vid_frames, info in fr:
i += 1
frames.append(vid_frames)
ind_dict[info["reference"]] = (
block_size,
block_size + vid_frames.shape[0],
info["dst_name"],
)
block_size += vid_frames.shape[0]
if i % CHUNK_SIZE == 0:
encode_chunk(frames, ind_dict, writer, fm, meta, ids, use_dst_name, device, input_format=input_format)
frames, ind_dict, block_size = [], {}, 0
if len(frames) > 0: # TODO: make this cleaner
encode_chunk(frames, ind_dict, writer, fm, meta, ids, use_dst_name, device, input_format=input_format)
else: # WebDataset shard logic
for shard in shards:
try:
values = extract_braceexpand_values(src, shard)
max_values = extract_braceexpand_values(src, list(braceexpand.braceexpand(src))[-1])
for i, val in enumerate(values):
values[i] = val.zfill(len(max_values[i]))
write_shard_id = "".join(values)
# TODO: find better way of doing this earlier
if write_shard_id in done_shards:
continue
times = {}
t = time.time()
with tempfile.TemporaryDirectory(prefix=f"worker_{global_rank}_") as tempdir:
os.chmod(tempdir, 0o777) # This lets subprocesses from v2np read files in the tempdir
folder = "/".join(shard.split("/")[0:-1])
fs, output_path = fsspec.core.url_to_fs(folder)
read_shard_id = shard.split("/")[-1].split(".tar")[0]
tar_bytes = io.BytesIO(fs.open(f"{output_path}/{read_shard_id}.tar").read())
with tarfile.open(fileobj=tar_bytes) as tar:
tar.extractall(tempdir)
writer.create_shard(shard_id=write_shard_id)
times["download_and_extract"] = times.get("download_and_extract", 0) + time.time() - t
t = time.time()
vids, ids, meta = read_shard(tempdir, vid_ext, pass_through_keys=pass_through_keys)
meta_refs = list(range(len(vids)))
fr = FrameReader(
vids,
meta_refs,
take_every_nth=take_every_nth,
target_fps=target_fps,
resize_size=img_size,
workers=frame_workers,
memory_size=frame_memory_size,
)
fr.start_reading()
frames, ind_dict = [], {}
block_size = 0
i = 0
n_frames = 0
for vid_frames, info in fr:
i += 1
if captioning_strategy == "center":
vid_frames = vid_frames[len(vid_frames) // 2 : len(vid_frames) // 2 + 1]
n_frames += len(vid_frames)
frames.append(vid_frames)
ind_dict[info["reference"]] = (
block_size,
block_size + vid_frames.shape[0],
info["dst_name"],
)
block_size += vid_frames.shape[0]
times["read_frames"] = times.get("read_frames", 0) + time.time() - t
t = time.time()
if i % CHUNK_SIZE == 0:
encode_chunk(
frames,
ind_dict,
writer, # TODO: turn all args below this into kwarg dict and just unpack
fm,
meta,
ids,
use_dst_name,
device,
input_format=input_format,
captioning_strategy=captioning_strategy,
frame_tokenization_strategy=frame_tokenization_strategy,
generated_caption_key=generated_caption_key,
)
times["encode"] = times.get("encode", 0) + time.time() - t
t = time.time()
frames, ind_dict, block_size = [], {}, 0
t = time.time()
if len(frames) > 0: # TODO: make this cleaner
encode_chunk(
frames,
ind_dict,
writer,
fm,
meta,
ids,
use_dst_name,
device,
input_format=input_format,
captioning_strategy=captioning_strategy,
frame_tokenization_strategy=frame_tokenization_strategy,
generated_caption_key=generated_caption_key,
)
times["encode"] = times.get("encode", 0) + time.time() - t
t = time.time()
frame_adjusted = {k: n_frames / v for k, v in times.items()}
print(f"Frames/s: {frame_adjusted}")
except Exception as e: # pylint: disable=(broad-except)
print(f"Shard {shard} failed: {str(e)}")
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage: python clip-video-encode.py video.mp4 embeddings.npy take_every_nth")
sys.exit(1)
clip_video_encode(sys.argv[1], sys.argv[2], int(sys.argv[3]))
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/handle_chunk.py | clip_video_encode/handle_chunk.py | """encode chunk with CLIP"""
import numpy as np
import torch
from .utils import block2dl
# BATCH_SIZE = 256
BATCH_SIZE = 128
N_DATASET_WORKERS = 6
def encode_chunk(
frames,
ind_dict,
writer,
mapper,
meta,
ids,
use_dst_name,
device,
input_format="table",
captioning_strategy="none",
frame_tokenization_strategy="none",
generated_caption_key="generated_caption",
):
"""encodes a chunk of video frames and saves."""
vid_block = np.concatenate(frames)
dl = block2dl(vid_block, mapper.preprocess, BATCH_SIZE, N_DATASET_WORKERS)
with torch.no_grad():
if captioning_strategy != "none":
captions = []
for batch in dl:
captions += mapper.generate_captions(batch.to(device))
for ref, (i0, it, dst_name) in ind_dict.items():
vid_id = dst_name[:-4] if use_dst_name else ids[ref]
if input_format == "webdataset":
vid_meta = meta[ref]
vid_meta["json"] = vid_meta["json"] if "json" in vid_meta else {}
else:
vid_meta = {"json": {}}
for k in meta:
vid_meta["json"][k] = meta[k][ref].as_py()
# NOTE: Warning this might overwrite previous caption
# NOTE: for now assumes there is only one caption
vid_meta["json"][generated_caption_key] = captions[i0:it][0]
# TODO: we should be able to do both at once with a CoCa model
writer.write(None, vid_id, vid_meta)
elif frame_tokenization_strategy != "none":
tokens = []
for batch in dl:
batch = batch.permute(0, 3, 1, 2).float() / 255.0 # make channel first and [0, 1]
indices = mapper.tokenize_frames(batch.to(device))
tokens.append(indices)
tokens = np.concatenate(tokens)
for ref, (i0, it, dst_name) in ind_dict.items():
vid_id = dst_name[:-4] if use_dst_name else ids[ref]
if input_format == "webdataset":
vid_meta = meta[ref]
else:
vid_meta = {"json": {}}
for k in meta:
vid_meta["json"][k] = meta[k][ref].as_py()
if "caption" in vid_meta["json"]:
vid_meta["txt"] = vid_meta["json"]["caption"]
video_tokens = tokens[i0:it]
writer.write(video_tokens, vid_id, vid_meta)
else:
embeddings = []
for batch in dl:
with torch.cuda.amp.autocast():
emb = mapper(batch.to(device))
embeddings.append(emb)
caption_embs = None
if mapper.tokenizer is not None:
# TODO: is there a better way of doing this?
# here we will compute similarity of empty string...
captions = [m["caption"] if "caption" in m else "" for m in meta]
caption_embs = mapper.encode_captions(captions)
caption_embs = caption_embs / np.linalg.norm(caption_embs, axis=-1)[:, None]
embeddings = np.concatenate(embeddings)
for ref, (i0, it, dst_name) in ind_dict.items():
vid_id = dst_name[:-4] if use_dst_name else ids[ref]
if input_format == "webdataset":
vid_meta = meta[ref]
else:
vid_meta = {"json": {}}
for k in meta:
vid_meta["json"][k] = meta[k][ref].as_py()
if "caption" in vid_meta["json"]:
vid_meta["txt"] = vid_meta["json"]["caption"]
frame_embeddings = embeddings[i0:it]
if caption_embs is not None:
# normalize
fe = frame_embeddings / np.linalg.norm(frame_embeddings, axis=-1)[:, None]
ce = caption_embs[ref]
sim = (fe @ ce.T).tolist()
vid_meta["json"] = vid_meta["json"] if "json" in vid_meta else {}
vid_meta["json"]["clip_frame_similarity"] = sim
writer.write(frame_embeddings, vid_id, vid_meta)
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/live_numpy_encoder.py | clip_video_encode/live_numpy_encoder.py | """encode numpy video frame arrays with CLIP from directory as they come in from other processes."""
import os
import time
import numpy as np
from .utils import block2dl
from .writer import FileWriter
N_DATASET_WORKERS = 6
BATCH_SIZE = 256
class LiveNumpyEncoder:
"""class that watches directory for set of numpy arrays of videos to encode using CLIP."""
def __init__(self, data_dir, dest_dir, n_vids, mapper, preprocess, frame_mem=4, remove_on_read=False):
"""
Input:
data_dir: directory to watch for np files
dest_dir: where to save embeddings to
n_vids: number of numpy array names to watch for. Completes after n_vids have been encoded
mapper: model used to map frames to embeddings
preprocess: function to preprocess the frames with
frame_mem: amount of memory in GB for shared frame array
remove_on_read: remove arrays when done reading them
"""
assert data_dir != dest_dir # input and output will have same name
self.data_dir = data_dir
self.writer = FileWriter(dest_dir)
self.n_vids = n_vids
self.frame_mem = frame_mem
self.fm = mapper
self.preprocess = preprocess
self.remove_on_read = remove_on_read
def start(self):
"""starts live reading."""
mem_size_b = int(self.frame_mem * 1024**3)
mem_frames = mem_size_b // (224**2 * 3)
frame_array = np.zeros((mem_frames, 224, 224, 3), dtype=np.uint8)
embedding_array = np.zeros((mem_frames, 512))
while self.n_vids > 0: # haven't seen all videos.
# TODO: decide if we need some checks here for incorrectly placed files
available_vids = os.listdir(self.data_dir) # for now assuming all vids in self.data_dir are correct
if len(available_vids) == 0:
print("Waiting for arrays...")
time.sleep(5) # wait for arrays to come in
continue
print(f"Found {len(available_vids)} arrays.")
name_inds = []
t0 = time.perf_counter()
cur_len = 0
for vid in available_vids:
assert vid.endswith(".npy")
vid_path = os.path.join(self.data_dir, vid)
vid_frames = np.load(vid_path)
frame_array[cur_len : cur_len + vid_frames.shape[0]] = vid_frames
name_inds.append((vid, cur_len, cur_len + vid_frames.shape[0]))
cur_len += vid_frames.shape[0]
self.n_vids -= 1
if self.remove_on_read:
os.remove(vid_path)
t_load = time.perf_counter() - t0
print(f"Load time: {t_load}")
t0 = time.perf_counter()
frame_chunk = frame_array[:cur_len]
dl = block2dl(frame_chunk, self.preprocess, BATCH_SIZE, N_DATASET_WORKERS)
cur_len = 0
for batch in dl:
emb = self.fm(batch.to(self.fm.device))
embedding_array[cur_len : cur_len + emb.shape[0]] = emb
cur_len += emb.shape[0]
t_enc = time.perf_counter() - t0
print(f"Encode time: {t_enc}")
all_embs = embedding_array[:cur_len]
for name, i0, it in name_inds:
self.writer.write(all_embs[i0:it], name)
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/dataset/dataset_reader.py | clip_video_encode/dataset/dataset_reader.py | """
utils for processing datasets of format described in https://github.com/iejMac/clip-video-encode/pull/13
used https://github.com/rom1504/laion-prepro/blob/main/laion5B/usage_guide/dataloader_pytorch.py as template
"""
import io
import json
import numpy as np
import open_clip
import torch
import webdataset as wds
from torch.utils.data import DataLoader
def standardize_embedding_shape(emb, seq_len):
if len(emb) > seq_len:
print(f"Warning: Raw embedding is longer than standard sequence length ({len(emb)} > {seq_len})")
emb = emb[:seq_len]
pad = np.zeros((seq_len - len(emb), emb.shape[1]), dtype=emb.dtype)
zero_mask = np.concatenate([np.ones(len(emb)), np.zeros(len(pad))])
padded_emb = np.concatenate([emb, pad])
return padded_emb, zero_mask
def create_embeddingwebdataset(
urls,
embedding_transform=lambda emb: emb,
standard_seq_len=-1,
to_tensor=True,
enable_text=True,
enable_meta=True,
):
"""
Create a WebDataset reader for Frame Embedding Dataset
Input:
standard_seq_len: sequence length to pad all embedding sequences to (for batching)
!(-1) : pad to standard_seq_len
-1: don't pad (dataset can't be used in DataLoader with batch_size > 1)
enable_text: include text captions
enable_meta: include metadata
"""
dataset = wds.WebDataset(urls)
# TODO: different tokeinzers??
def tokenizer(text):
return open_clip.tokenize([text])[0]
def preprocess_dataset(item):
output = {}
npy_data = item["npy"]
stream = io.BytesIO(npy_data)
emb = np.lib.format.read_array(stream)
if standard_seq_len != -1:
emb, zero_mask = standardize_embedding_shape(emb, standard_seq_len)
output["zero_mask"] = zero_mask
if to_tensor:
emb = torch.from_numpy(emb)
output["embeddings"] = embedding_transform(emb)
if enable_text:
text_data = item["txt"]
text = text_data.decode("utf-8")
output["text"] = text
output["text_tokens"] = tokenizer(text)
if enable_meta:
meta_data = item["json"]
meta = json.loads(meta_data)
# meta = meta_data.decode("utf-8")
output["meta"] = meta
return output
transformed_dataset = dataset.map(preprocess_dataset, handler=wds.handlers.warn_and_continue)
return transformed_dataset
def dataset_to_dataloader(dataset, batch_size, num_prepro_workers):
"""converts WebDataset to PyTorch DataLoader."""
dl = DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_prepro_workers,
pin_memory=True,
prefetch_factor=2,
)
return dl
class EmbeddingWebDatasetReader:
"""WebDataset reader for Embedding Datasets"""
def __init__(
self,
urls,
standard_seq_len,
batch_size,
num_prepro_workers,
to_tensor=True,
enable_text=True,
enable_meta=False,
embedding_transform=lambda emb: emb,
):
self.batch_size = batch_size
dataset = create_embeddingwebdataset(
urls,
embedding_transform,
standard_seq_len,
to_tensor,
enable_text,
enable_meta,
)
self.dataloader = dataset_to_dataloader(dataset, batch_size, num_prepro_workers)
def __iter__(self):
for batch in self.dataloader:
yield batch
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/dataset/create_shards.py | clip_video_encode/dataset/create_shards.py | """creates EmbeddingWebDataset from Processed Dataset format"""
import os
import os.path
import random
import argparse
import json
from csv import writer
from pathlib import Path
import numpy as np
import webdataset as wds
parser = argparse.ArgumentParser("""Generate Embedding WebDataset from Processed Dataset.""")
parser.add_argument("--maxsize", type=float, default=1e9)
parser.add_argument("--maxcount", type=float, default=10000)
parser.add_argument(
"--compression",
dest="compression",
action="store_true",
help="Creates compressed .tar.gz files instead of uncompressed .tar files.",
)
parser.add_argument("--json", dest="json", action="store_true", help="Reads json files and add them to the .tar files.")
parser.add_argument("--shards", default="./shards", help="directory where shards are written")
parser.add_argument(
"--data",
default="./data",
help="directory path containing Processed Dataset",
)
args = parser.parse_args()
assert args.maxsize > 10000000
assert args.maxcount < 1000000
os.makedirs(Path(args.shards), exist_ok=True)
SPLITS = ["train", "val", "test"]
tar_count = 0
with open(os.path.join(args.shards, "splits.csv"), "a+", newline="", encoding="utf-8") as f:
csv_writer = writer(f)
csv_writer.writerow(["tar_file", "split"])
# This is the output pattern under which we write shards.
pattern = os.path.join(args.shards, "ds_%06d.tar" + (".gz" if args.compression else ""))
with wds.ShardWriter(pattern, maxsize=int(args.maxsize), maxcount=int(args.maxcount)) as sink:
for split in SPLITS:
path = Path(os.path.join(args.data, split))
text_files_l = [*path.glob("*.txt")]
text_files = {text_file.stem: text_file for text_file in text_files_l}
text_total = len(text_files)
if args.json:
json_files_l = [*path.glob("*.json")]
json_files = {json_file.stem: json_file for json_file in json_files_l}
json_dicts = {}
for key in json_files:
try:
with open(json_files[key], "r", encoding="utf-8") as f:
json_dicts[key] = json.dumps(json.load(f))
except json.JSONDecodeError:
print(f"Found {len(json_files.keys()) - len(json_dicts.keys())} corrupt json file(s).")
json_keys = json_files.keys()
npy_files_l = [*path.glob("*.npy")]
npy_files = {npy_file.stem: npy_file for npy_file in npy_files_l}
npy_total = len(npy_files)
print("Found {text_total} textfiles and {npy_total} numpy files.")
keys = list(npy_files.keys() & text_files.keys())
text_files = {k: v for k, v in text_files.items() if k in keys}
npy_files = {k: v for k, v in npy_files.items() if k in keys}
total_pairs = len(keys)
keys = list(keys)
split_tar_count = total_pairs // args.maxcount + (total_pairs % args.maxcount != 0)
tar_split = [("ds_{tar_count+i:06}", split) for i in range(split_tar_count)]
with open(os.path.join(args.shards, "splits.csv"), "a+", newline="", encoding="utf-8") as f:
csv_writer = writer(f)
for row in tar_split:
csv_writer.writerow(row)
tar_count += split_tar_count
indexes = list(range(total_pairs))
random.shuffle(indexes)
for i in indexes:
embeddings = np.load(npy_files[keys[i]])
with open(text_files[keys[i]], "rb", encoding="utf-8") as txtstream:
text = txtstream.read()
ds_key = keys[i]
sample = {"__key__": ds_key, "npy": embeddings, "txt": text}
if args.json and keys[i] in json_keys:
sample["json"] = json_dicts[keys[i]]
sink.write(sample)
sink.next_stream()
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/dataset/kinetics700_example_process.py | clip_video_encode/dataset/kinetics700_example_process.py | """
processes s3://s-datasets/kinetics-700/kinetics700_embeddings into processed dataset format.
run from kinetics700_embeddings directory (so train/val/test at the same level)
"""
import os
import glob
import json
import shutil
from tqdm import tqdm
SPLITS = ["train", "val", "test"]
PROCESSED_DIR = "processed"
sample_id = 0
for split in SPLITS:
print(f"Processing split - {split}")
npys = glob.glob(os.path.join(split, "**/*.npy"))
os.makedirs(os.path.join(PROCESSED_DIR, split), exist_ok=True)
for npy in tqdm(npys):
_, cap, meta_string = npy.split("/")
videoID, start_t, end_t = meta_string[:11], meta_string[12:18], meta_string[19:-4]
meta = {
"videoID": videoID,
"start_time": start_t,
"end_time": end_t,
}
fname_prefix = os.path.join(PROCESSED_DIR, split, f"vid_{sample_id:09}")
with open(fname_prefix + ".txt", "w", encoding="utf-8") as f:
f.write(cap) # for Kinetics700 caption is label
with open(fname_prefix + ".json", "w", encoding="utf-8") as f:
json.dump(meta, f)
shutil.copyfile(npy, fname_prefix + ".npy")
sample_id += 1 # UNIQUE ID FOR EACH VIDEO
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/clip_video_encode/dataset/__init__.py | clip_video_encode/dataset/__init__.py | """clip-video-encode dataset."""
from .dataset_reader import EmbeddingWebDatasetReader
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/examples/reader.py | examples/reader.py | """
The example below is showcasing how to read an Embedding WebDataset uing the
EmbeddingWebDatasetReader object.
In order to follow along, below are instructions to download the dataset used
in this example.
In a directory of your choice, from the command line call:
git clone https://huggingface.co/datasets/iejMac/CLIP-MSR-VTT
Next, change directory into the newly created CLIP-MSR-VTT/ and call:
git lfs pull
This will load CLIP encodings of the MSR-VTT dataset onto your machine and
allow you to load that data with the EmbeddingWebDatasetReader.
"""
from clip_video_encode.dataset import EmbeddingWebDatasetReader
val_urls = "CLIP-MSR-VTT/data/oai_b32/test_full_fps/{000000000..000000007}.tar" # path to multiple TAR files, the {} notation allows us to specify the range of TAR files we want
val_reader = EmbeddingWebDatasetReader(
val_urls,
standard_seq_len=-1,
batch_size=1,
num_prepro_workers=2,
to_tensor=False,
enable_text=True,
enable_meta=True,
)
# This newly created val_reader is an iterable, so we can iterate through it
for batch in val_reader:
# Print out information about our batch
print("=====")
print(batch.keys())
print(batch["embeddings"].shape)
print(batch["text"])
print(batch["meta"])
print("=====")
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/examples/live_encoding.py | examples/live_encoding.py | import os
import glob
import clip
import torch
import time
from torchvision.transforms import ToPILImage, Compose, ToTensor, Normalize
from clip_video_encode.live_numpy_encoder import LiveNumpyEncoder
from clip_video_encode.simplemapper import FrameMapper
def _convert_image_to_rgb(image):
return image.convert("RGB")
DATA_DIR = "nps" # load up DATA_DIR with numpy video frame arrays (https://github.com/iejMac/video2numpy)
# you can do this live while LiveNumpyEncoder is functioning as long as you pass it
# the entire set of fnames you expect encoded.
EMB_DIR = "embs" # save embeddings here
VIDS = os.listdir(DATA_DIR)
print(f"DATA_DIR has {len(os.listdir(DATA_DIR))} frame arrays")
print(f"EMB_DIR has {len(os.listdir(EMB_DIR))} embedding arrays")
# Initialize model and preproc:
device = "cuda" if torch.cuda.is_available() else "cpu"
model, _ = clip.load("ViT-B/32", device=device)
preprocess = Compose(
[
ToPILImage(),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
]
)
fm = FrameMapper(model, device)
np_enc = LiveNumpyEncoder(DATA_DIR, EMB_DIR, VIDS, fm, preprocess)
np_enc.start()
print("DONE ENCODING")
print(f"DATA_DIR has {len(os.listdir(DATA_DIR))} frame arrays")
print(f"EMB_DIR has {len(os.listdir(EMB_DIR))} embedding arrays")
# Once this finishes EMB_DIR should have a embedding array for each vid in VIDS
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iejMac/clip-video-encode | https://github.com/iejMac/clip-video-encode/blob/b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7/examples/thing_detector/thing_detector.py | examples/thing_detector/thing_detector.py | """thing detector script using clip-video-encode."""
import clip
import numpy as np
import sys
import torch
from matplotlib import pyplot as plt
def conv_filter(probs, width=10):
padded_probs = np.pad(probs, width // 2)
prob_filt = np.zeros(probs.shape)
for i in range(len(probs)):
prob_filt[i] = np.mean(padded_probs[i : i + width])
return prob_filt
EMBEDDINGS = "pCUtPE4cAsk.npy"
device = "cuda" if torch.cuda.is_available() else "cpu"
video_embs = torch.Tensor(np.load(EMBEDDINGS)).to(device)
chosen_thing = "bear"
labels = [f"a photo of a {chosen_thing}", "a photo of something"]
tokenized_labels = clip.tokenize(labels).to(device)
model, _ = clip.load("ViT-B/32", device=device)
with torch.no_grad():
lab_embs = model.encode_text(tokenized_labels)
video_embs = video_embs / video_embs.norm(dim=-1, keepdim=True)
lab_embs = lab_embs / lab_embs.norm(dim=-1, keepdim=True)
logit_scale = model.logit_scale.exp()
logits_per_frame = logit_scale * video_embs @ lab_embs.t()
probs = logits_per_frame.softmax(dim=-1).cpu().numpy()
T = 12.95 # length of video in minutes
ps = probs[:, 0]
xs = [(i * T) / len(ps) for i in range(len(ps))]
# Unfiltered probs
plt.plot(xs, ps)
plt.show()
# Filter probs:
n_filter_steps = 20
for i in range(n_filter_steps):
ps = conv_filter(ps, 20)
plt.plot(xs, ps)
plt.show()
| python | MIT | b5df2c7e116d937ac1e67b14c1516f2f07cfe5b7 | 2026-01-05T07:14:33.720079Z | false |
iniwym/XT-Bot | https://github.com/iniwym/XT-Bot/blob/f577d2a3161a5209b059b07aada4c087a2aa2894/Python/src/T-Bot.py | Python/src/T-Bot.py | import sys
import json
import os
import requests
import telegram
from datetime import datetime, timedelta
from pathlib import Path
from typing import (Optional, Dict, Any, List, Tuple, DefaultDict, BinaryIO, IO)
from collections import defaultdict
# 将项目根目录添加到模块搜索路径
_project_root = Path(__file__).resolve().parent.parent
sys.path.append(str(_project_root))
from utils.log_utils import LogUtils
# --------------------------
# 配置模块
# --------------------------
class Config:
"""全局配置类"""
# 时间格式
MESSAGE_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
INFO_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
# 文件路径
DEFAULT_DOWNLOAD_DIR = "../downloads"
DEFAULT_OUTPUT_DIR = "../output"
# Telegram配置
TELEGRAM_LIMITS = {
'images': 10 * 1024 * 1024, # 10MB
'videos': 50 * 1024 * 1024, # 50MB
'caption': 1024, # 消息截断长度
'media_group': 10, # 媒体分组最多文件数
}
# 业务参数
MAX_DOWNLOAD_ATTEMPTS = 10 # 重试次数
ERROR_TRUNCATE = 50 # 错误信息截断长度
NOTIFICATION_TRUNCATE = 200 # 通知消息截断长度
@classmethod
def get_env_vars(cls) -> Dict[str, str]:
"""环境变量获取"""
return {
'bot_token': os.getenv('BOT_TOKEN'),
'chat_id': os.getenv('CHAT_ID'),
'lark_key': os.getenv('LARK_KEY')
}
# --------------------------
# 异常类
# --------------------------
class FileTooLargeError(Exception):
"""文件大小超过平台限制异常"""
pass
class MaxAttemptsError(Exception):
"""达到最大尝试次数异常"""
pass
# 引入日志模块
logger = LogUtils().get_logger()
logger.info("🔄 T-Bot 初始化完成")
# --------------------------
# 通知模块
# --------------------------
class Notifier:
"""通知处理器"""
@staticmethod
def send_lark_message(message: str) -> bool:
"""发送普通飞书消息"""
lark_key = Config.get_env_vars()['lark_key']
if not lark_key:
return False
webhook_url = f"https://open.feishu.cn/open-apis/bot/v2/hook/{lark_key}"
try:
payload = {
"msg_type": "text",
"content": {"text": f"📢 动态更新\n{message}"}
}
response = requests.post(webhook_url, json=payload, timeout=10)
response.raise_for_status()
logger.info("📨 飞书动态消息发送成功")
return True
except Exception as e:
logger.error(f"✗ 飞书消息发送失败: {str(e)}")
return False
@staticmethod
def send_lark_alert(message: str) -> bool:
"""发送飞书通知"""
if not Config.get_env_vars()['lark_key']:
return False
# 消息截断
truncated_msg = f"{message[:Config.NOTIFICATION_TRUNCATE]}..." if len(
message) > Config.NOTIFICATION_TRUNCATE else message
webhook_url = f"https://open.feishu.cn/open-apis/bot/v2/hook/{Config.get_env_vars()['lark_key']}"
try:
payload = {
"msg_type": "text",
"content": {"text": f"📢 XT-Bot处理告警\n{truncated_msg}"}
}
response = requests.post(webhook_url, json=payload, timeout=10)
response.raise_for_status()
logger.info("📨 飞书通知发送成功")
return True
except Exception as e:
logger.error(f"✗ 飞书通知发送失败: {str(e)}")
return False
# --------------------------
# 文件处理模块
# --------------------------
class FileProcessor:
"""文件处理器"""
def __init__(self, json_path: str, download_dir: str):
self.json_path = Path(json_path)
self.download_path = Path(download_dir)
self._ensure_dirs()
def _ensure_dirs(self) -> None:
"""目录创建"""
self.download_path.mkdir(parents=True, exist_ok=True)
logger.info(f"📂 下载目录已就绪: {self.download_path}")
def load_data(self) -> List[Dict[str, Any]]:
"""加载JSON数据"""
try:
with self.json_path.open('r+', encoding='utf-8') as f:
data = json.load(f)
logger.info(f"📄 已加载JSON数据,共{len(data)}条记录")
return data
except Exception as e:
logger.error(f"✗ JSON文件加载失败: {str(e)}")
raise
def save_data(self, data: List[Dict[str, Any]]) -> None:
"""保存JSON数据"""
try:
with self.json_path.open('r+', encoding='utf-8') as f:
f.seek(0)
json.dump(data, f, indent=2, ensure_ascii=False)
f.truncate()
except Exception as e:
logger.error(f"✗ JSON保存失败: {str(e)}")
raise
# --------------------------
# 下载模块
# --------------------------
class DownloadManager:
"""下载管理器"""
@classmethod
def process_item(cls, item: Dict[str, Any], processor: FileProcessor) -> None:
"""处理单个文件下载"""
# 处理特殊类型(spaces/broadcasts)直接返回
if cls._is_special_type(item):
cls._handle_special_type(item)
return
# 如果已下载或达到最大尝试次数,直接返回
if cls._should_skip_download(item):
return
# 执行下载操作
try:
logger.info(f"⏬ 开始下载: {item['file_name']}")
file_path = cls._download_file(item, processor)
# 处理下载成功
size_mb = cls._handle_download_success(item, file_path)
logger.info(f"✓ 下载成功: {item['file_name']} ({size_mb}MB)")
except Exception as e:
# 处理下载失败
cls._handle_download_failure(item, e)
@classmethod
def _is_special_type(cls, item: Dict[str, Any]) -> bool:
"""检查是否为特殊类型(spaces/broadcasts)"""
return item.get('media_type') in ['spaces', 'broadcasts']
@classmethod
def _handle_special_type(cls, item: Dict[str, Any]) -> None:
"""处理特殊类型项"""
if item.get('is_downloaded'):
return
item.update({
"is_downloaded": True,
"download_info": {
"success": True,
"size_mb": 0,
"timestamp": datetime.now().strftime(Config.INFO_DATE_FORMAT),
"download_attempts": 0
}
})
logger.info(f"⏭ 跳过特殊类型下载: {item['file_name']}")
@classmethod
def _should_skip_download(cls, item: Dict[str, Any]) -> bool:
"""检查是否应该跳过下载"""
# 已下载的直接跳过
if item.get('is_downloaded'):
return True
download_info = item.setdefault('download_info', {})
current_attempts = download_info.get('download_attempts', 0)
# 达到最大尝试次数
if current_attempts >= Config.MAX_DOWNLOAD_ATTEMPTS:
# 达到最大尝试次数
cls._handle_max_attempts(item)
return True
return False
@classmethod
def _download_file(cls, item: Dict[str, Any], processor: FileProcessor) -> Path:
"""执行文件下载操作"""
response = requests.get(item['url'], stream=True, timeout=30)
response.raise_for_status()
file_path = processor.download_path / item['file_name']
with open(file_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return file_path
@classmethod
def _handle_download_success(cls, item: Dict[str, Any], file_path: Path) -> float:
"""处理下载成功的情况,返回文件大小(MB)"""
file_size = os.path.getsize(file_path)
size_mb = round(file_size / 1024 / 1024, 2)
item.update({
"is_downloaded": True,
"download_info": {
"success": True,
"size_mb": size_mb,
"timestamp": datetime.now().strftime(Config.INFO_DATE_FORMAT),
"download_attempts": 0 # 重置计数器
}
})
return size_mb
@classmethod
def _handle_download_failure(cls, item: Dict[str, Any], error: Exception) -> None:
"""处理下载失败的情况"""
download_info = item.setdefault('download_info', {})
current_attempts = download_info.get('download_attempts', 0)
new_attempts = current_attempts + 1
# 更新下载信息
download_info.update({
"success": False,
"error_type": "download_error",
"message": str(error),
"timestamp": datetime.now().strftime(Config.INFO_DATE_FORMAT),
"download_attempts": new_attempts
})
# 错误日志
truncated_error = str(error)[:Config.ERROR_TRUNCATE]
error_msg = f"✗ 下载失败: {item['file_name']} - {truncated_error} (尝试 {new_attempts}/{Config.MAX_DOWNLOAD_ATTEMPTS})"
logger.error(error_msg)
# 调试日志
logger.debug(f"✗ 下载失败详情: {item['file_name']} - {str(error)}")
@classmethod
def _handle_max_attempts(cls, item: Dict[str, Any]) -> None:
"""处理达到最大尝试次数的情况"""
# 准备要设置的默认值
new_info = {
"success": False,
"error_type": "max_download_attempts",
"message": "连续下载失败10次",
"notification_sent": False
}
# 如果已有upload_info,复用其中的某些字段
if 'upload_info' in item and isinstance(item['upload_info'], dict):
existing_info = item['upload_info']
# 保留已有的时间戳(如果有)
if 'timestamp' in existing_info:
new_info['timestamp'] = existing_info['timestamp']
else:
new_info['timestamp'] = datetime.now().strftime(Config.INFO_DATE_FORMAT)
# 保留已有的通知状态(如果有)
if 'notification_sent' in existing_info:
new_info['notification_sent'] = existing_info['notification_sent']
else:
# 没有已有信息,创建新的时间戳
new_info['timestamp'] = datetime.now().strftime(Config.INFO_DATE_FORMAT)
# 更新或创建upload_info
item['upload_info'] = new_info
logger.warning(f"⏭ 已达最大下载尝试次数: {item['file_name']}")
# --------------------------
# 上传模块
# --------------------------
class UploadManager:
"""上传管理器"""
def __init__(self):
self._initialize_bot()
self.strategies = {
'text': self._handle_text_upload,
'single': self._handle_single_media,
'group': self._handle_media_group
}
self.processor = None # 文件处理器引用
def _initialize_bot(self):
"""初始化Telegram机器人"""
env_vars = Config.get_env_vars()
if not env_vars['bot_token'] or not env_vars['chat_id']:
logger.error("❌ 必须配置 BOT_TOKEN 和 CHAT_ID 环境变量!")
sys.exit(1)
self.bot = telegram.Bot(token=env_vars['bot_token'])
self.chat_id = env_vars['chat_id']
def process_items(self, items: List[Dict[str, Any]], processor: FileProcessor) -> None:
"""
处理待上传项的主入口
"""
# 保存处理器引用,供后续使用
self.processor = processor
# 过滤出可上传的项
upload_queue = self._filter_uploadable_items(items)
if not upload_queue:
return
# 策略分发中心
strategy_map = self._create_strategy_map(upload_queue)
# 按策略类型处理
for strategy_type, items_to_upload in strategy_map.items():
try:
if strategy_type == 'group':
# 按推文分组处理媒体组
grouped_items = self._group_by_tweet_id(items_to_upload)
for tweet_items in grouped_items:
self.strategies[strategy_type](tweet_items, processor)
else:
self.strategies[strategy_type](items_to_upload, processor)
except Exception as e:
self._handle_strategy_error(e, items_to_upload, strategy_type)
def _filter_uploadable_items(self, items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""过滤出可上传的项"""
return [
item for item in items
if not item.get('is_uploaded') and self._is_eligible_for_upload(item)
]
def _is_eligible_for_upload(self, item: Dict[str, Any]) -> bool:
"""判断项是否适合上传"""
# 检查不可恢复的错误
if self._has_unrecoverable_error(item):
return False
# 特殊类型(文本)可直接上传
if item.get('media_type') in ['spaces', 'broadcasts']:
return True
# 常规类型需要下载成功
return item.get('is_downloaded', False)
def _create_strategy_map(self, items: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]:
"""
创建上传策略映射:
- 'text': 文本类型项
- 'single': 单媒体项
- 'group': 媒体组项
"""
strategy_map = defaultdict(list)
for item in items:
media_type = item['media_type']
if media_type in ['spaces', 'broadcasts']:
strategy_map['text'].append(item)
elif media_type in ['images', 'videos']:
# 对媒体文件进行分组(媒体数量决定策略)
media_count = self._get_media_count_in_tweet(items, item['tweet_id'])
if media_count == 1:
strategy_map['single'].append(item)
else:
strategy_map['group'].append(item)
return dict(strategy_map)
def _get_media_count_in_tweet(self, all_items: List[Dict[str, Any]], tweet_id: str) -> int:
"""获取同一推文中的媒体项数量"""
return sum(
1 for item in all_items
if item['tweet_id'] == tweet_id
and item['media_type'] in ['images', 'videos']
and not item.get('is_uploaded')
)
def _group_by_tweet_id(self, items: List[Dict[str, Any]]) -> List[List[Dict[str, Any]]]:
"""按推文ID分组项"""
grouped = defaultdict(list)
for item in items:
grouped[item['tweet_id']].append(item)
return list(grouped.values())
# --------------------------
# 上传策略实现
# --------------------------
def _handle_text_upload(self, items: List[Dict[str, Any]], processor: FileProcessor) -> None:
"""处理文本项上传策略"""
for item in items:
self._upload_text_item(item)
def _handle_single_media(self, items: List[Dict[str, Any]], processor: FileProcessor) -> None:
"""处理单媒体项上传策略"""
for item in items:
try:
self._upload_media_item(item, processor)
except Exception as e:
self._handle_single_upload_error(e, item)
def _handle_media_group(self, items: List[Dict[str, Any]], processor: FileProcessor) -> None:
"""处理媒体组上传策略"""
tweet_id = items[0]['tweet_id']
logger.info(f"🖼️ 准备媒体组上传: {tweet_id} ({len(items)}个文件)")
# 提前检查媒体组大小
if self._is_group_size_exceeded(items):
logger.warning(f"⚠️ 媒体组过大({self._get_group_size_mb(items)}MB > 50MB),回退为单文件上传")
self._fallback_to_single_upload(items)
return
# 构建媒体组
media_group, included_items = self._prepare_media_group(items, processor)
if not media_group:
logger.warning(f"⏭ 无可上传的有效媒体: {tweet_id}")
return
try:
# 发送媒体组
messages = self.bot.send_media_group(
chat_id=self.chat_id,
media=media_group
)
# 验证响应
if len(messages) != len(included_items):
logger.warning(
f"⚠️ 返回消息数量({len(messages)})与媒体组数量({len(included_items)})不匹配,将回退为单文件上传"
)
# 使用回退机制处理不匹配情况
self._fallback_to_single_upload(included_items)
return
# 更新状态
for msg, item in zip(messages, included_items):
msg_id = msg.message_id
self._update_upload_status(item, msg_id)
logger.info(f"✅ 上传成功: {item['file_name']}({msg_id})")
logger.info(f"✅ 媒体组上传成功: {tweet_id} ({len(media_group)}个文件)")
except Exception as e:
self._handle_group_upload_error(e, included_items)
finally:
# 确保关闭所有文件句柄
for media_item in media_group:
if hasattr(media_item, 'media') and hasattr(media_item.media, 'close'):
media_item.media.close()
# --------------------------
# 媒体组大小检测和回退
# --------------------------
def _handle_strategy_error(self, error: Exception, items: List[Dict[str, Any]], strategy_type: str) -> None:
"""处理策略级错误,优化媒体组处理逻辑"""
logger.error(f"✗ {strategy_type}策略执行失败: {str(error)[:Config.ERROR_TRUNCATE]}")
logger.debug(f"✗ {strategy_type}策略执行失败详情: {str(error)}")
# 对于媒体组错误,检查大小决定是否回退
if strategy_type == 'group' and self._is_group_size_exceeded(items):
logger.warning(f"⚠️ 媒体组过大({self._get_group_size_mb(items)}MB > 50MB),回退为单文件上传")
self._fallback_to_single_upload(items)
elif strategy_type in ['group', 'text']:
# 其他类型的媒体组错误也尝试回退
self._fallback_to_single_upload(items)
def _is_group_size_exceeded(self, items: List[Dict[str, Any]]) -> bool:
"""检查媒体组总大小是否超过50MB限制"""
return self._get_group_size_mb(items) > Config.TELEGRAM_LIMITS['videos'] / (1024 * 1024)
def _get_group_size_mb(self, items: List[Dict[str, Any]]) -> float:
"""计算媒体组总大小(MB)"""
total_size_bytes = 0
for item in items:
if 'download_info' in item and 'size_mb' in item['download_info']:
# 使用已有大小信息
total_size_bytes += item['download_info']['size_mb'] * 1024 * 1024
else:
# 尝试从文件系统获取大小
try:
file_path = self.processor.download_path / item['file_name']
if file_path.exists():
total_size_bytes += os.path.getsize(file_path)
except Exception:
continue
return round(total_size_bytes / (1024 * 1024), 2)
def _fallback_to_single_upload(self, items: List[Dict[str, Any]]) -> None:
"""回退为单文件上传策略"""
logger.info(f"⏮️ 回退为单文件上传: {items[0]['tweet_id']} ({len(items)}个文件)")
for item in items:
if item.get('is_uploaded'):
continue
try:
if item['media_type'] in ['spaces', 'broadcasts']:
self._upload_text_item(item)
else:
self._upload_media_item(item, self.processor)
except Exception as inner_error:
self._update_error_status(inner_error, item)
self._reset_download_status(item)
logger.error(f"✗ 单文件上传失败: {item['file_name']} - {str(inner_error)[:Config.ERROR_TRUNCATE]}")
logger.debug(f"✗ 单文件上传失败详情: {item['file_name']} - {str(inner_error)}")
# --------------------------
# 实际上传操作
# --------------------------
def _upload_text_item(self, item: Dict[str, Any]) -> None:
"""上传文本消息"""
# 文本型caption构建
caption = self._build_text_caption(item)
msg = self.bot.send_message(chat_id=self.chat_id, text=caption)
msg_id = msg.message_id
self._update_upload_status(item, msg_id)
# 发送飞书通知
if Config.get_env_vars()['lark_key']:
Notifier.send_lark_message(caption)
logger.info(f"✅ 发送成功: {item['file_name']}({msg_id})")
def _upload_media_item(self, item: Dict[str, Any], processor: FileProcessor) -> None:
"""上传单个媒体文件"""
with self._get_file_handle(item, processor) as file_obj:
# 媒体型caption构建
caption = self._build_media_caption(item)
if item['media_type'] == 'images':
msg = self.bot.send_photo(chat_id=self.chat_id, photo=file_obj, caption=caption)
else: # videos
msg = self.bot.send_video(chat_id=self.chat_id, video=file_obj, caption=caption)
msg_id = msg.message_id
self._update_upload_status(item, msg_id)
logger.info(f"✅ 上传成功: {item['file_name']}({msg_id})")
def _prepare_media_group(self, items: List[Dict[str, Any]], processor: FileProcessor
) -> Tuple[List[telegram.InputMedia], List[Dict[str, Any]]]:
"""
准备媒体组上传
返回:媒体组对象列表, 包含的原始项列表
"""
media_group = []
included_items = []
tweet_id = items[0]['tweet_id']
for idx, item in enumerate(items):
if item.get('is_uploaded'):
continue
try:
with self._get_file_handle(item, processor) as file_obj:
# 仅第一项添加caption
caption = self._build_media_caption(item) if idx == 0 else None
if item['media_type'] == 'images':
media_item = telegram.InputMediaPhoto(file_obj, caption=caption)
else: # videos
media_item = telegram.InputMediaVideo(file_obj, caption=caption)
media_group.append(media_item)
included_items.append(item)
# 检查媒体组文件数限制
if len(media_group) >= Config.TELEGRAM_LIMITS['media_group']:
logger.warning(f"⚠️ 媒体组文件数达到上限: {tweet_id}")
break
except Exception as e:
self._handle_preparation_error(e, item)
return media_group, included_items
# --------------------------
# caption构建系统
# --------------------------
def _build_text_caption(self, item: Dict[str, Any]) -> str:
"""
文本型caption构建
格式: #[用户名] #[类型]
[发布时间]
[原链接]
"""
username = item['user']['screen_name']
media_type = item['media_type']
publish_time = datetime.fromisoformat(item['publish_time']).strftime(Config.MESSAGE_DATE_FORMAT)
url = item['url']
# 组合文本元素
content = f"#{username} #{media_type}\n{publish_time}\n{url}"
return self._truncate_text(content, Config.TELEGRAM_LIMITS['caption'])
def _build_media_caption(self, item: Dict[str, Any]) -> str:
"""
媒体型caption构建
格式: #[用户名] [显示名]
[发布时间]
[推文文本内容]
"""
screen_name = item['user']['screen_name']
display_name = item['user']['name']
publish_time = datetime.fromisoformat(item['publish_time']).strftime(Config.MESSAGE_DATE_FORMAT)
# 组合基本信息
base_info = f"#{screen_name} {display_name}\n{publish_time}"
# 添加推文内容
text_content = f"{base_info}\n{item.get('full_text', '')}"
return self._truncate_text(text_content, Config.TELEGRAM_LIMITS['caption'])
def _truncate_text(self, text: str, max_length: int) -> str:
"""智能截断文本"""
if len(text) > max_length:
truncated = text[:max_length - 3]
# 确保截断在完整句子后
if truncated.rfind('.') > max_length - 10:
truncate_point = truncated.rfind('.') + 1
else:
truncate_point = max_length - 3
return text[:truncate_point] + "..."
return text
# --------------------------
# 辅助方法
# --------------------------
def _get_file_handle(self, item: Dict[str, Any], processor: FileProcessor) -> BinaryIO:
"""获取文件句柄并进行大小验证"""
if item.get('media_type') in ['spaces', 'broadcasts']:
# 特殊类型直接返回URL
return item['url']
# 处理本地文件
file_path = processor.download_path / item['file_name']
media_type = item['media_type']
# 检查文件大小
file_size = os.path.getsize(file_path)
if file_size > Config.TELEGRAM_LIMITS[media_type]:
raise FileTooLargeError(
f"{media_type}大小超标 ({file_size / (1024 * 1024):.2f}MB > "
f"{Config.TELEGRAM_LIMITS[media_type] / (1024 * 1024):.2f}MB)"
)
return open(file_path, 'rb')
def _update_upload_status(self, item: Dict[str, Any], message_id: int) -> None:
"""更新上传状态为成功"""
item.update({
"is_uploaded": True,
"upload_info": {
"success": True,
"message_id": message_id,
"timestamp": datetime.now().strftime(Config.INFO_DATE_FORMAT)
}
})
# --------------------------
# 错误处理系统
# --------------------------
def _has_unrecoverable_error(self, item: Dict[str, Any]) -> bool:
"""检查不可恢复错误"""
upload_info = item.get('upload_info', {})
error_type = upload_info.get('error_type')
if error_type in ['file_too_large', 'max_download_attempts']:
# 发送通知(如果尚未发送)
if not upload_info.get('notification_sent'):
self._send_unrecoverable_alert(item, error_type)
upload_info['notification_sent'] = True
return True
return False
def _send_unrecoverable_alert(self, item: Dict[str, Any], error_type: str) -> None:
"""发送不可恢复错误通知"""
Notifier.send_lark_alert(
f"🔴 推送失败\n文件名: {item['file_name']}\n"
f"类型: {error_type}\n"
f"错误: {item['upload_info']['message'][:Config.ERROR_TRUNCATE]}"
)
def _handle_single_upload_error(self, error: Exception, item: Dict[str, Any]) -> None:
"""处理单文件上传错误"""
self._update_error_status(error, item)
self._reset_download_status(item)
logger.error(f"✗ 单文件上传失败: {item['file_name']} - {str(error)[:Config.ERROR_TRUNCATE]}")
logger.debug(f"✗ 单文件上传失败详情: {item['file_name']} - {str(error)}")
def _handle_group_upload_error(self, error: Exception, items: List[Dict[str, Any]]) -> None:
"""处理媒体组上传错误"""
for item in items:
self._update_error_status(error, item)
self._reset_download_status(item)
tweet_id = items[0]['tweet_id'] if items else "未知"
logger.error(f"✗ 媒体组上传失败: {tweet_id} - {str(error)[:Config.ERROR_TRUNCATE]}")
logger.debug(f"✗ 媒体组上传失败详情: {tweet_id} - {str(error)}")
def _handle_preparation_error(self, error: Exception, item: Dict[str, Any]) -> None:
"""处理媒体组准备过程中的错误"""
self._update_error_status(error, item)
self._reset_download_status(item)
logger.warning(f"✗ 媒体组准备失败: {item['file_name']}")
def _update_error_status(self, error: Exception, item: Dict[str, Any]) -> None:
"""更新错误状态"""
error_type = 'file_too_large' if isinstance(error, FileTooLargeError) else 'api_error'
item['upload_info'] = {
"success": False,
"error_type": error_type,
"message": str(error),
"timestamp": datetime.now().strftime(Config.INFO_DATE_FORMAT),
"notification_sent": False
}
# 对于非文件大小错误,立即通知
if error_type != 'file_too_large':
Notifier.send_lark_alert(
f"🔴 上传失败\n文件名: {item['file_name']}\n"
f"错误类型: {error.__class__.__name__}\n"
f"错误详情: {str(error)[:Config.ERROR_TRUNCATE]}"
)
def _reset_download_status(self, item: Dict[str, Any]) -> None:
"""重置下载状态以允许重试"""
if 'is_downloaded' in item:
item['is_downloaded'] = False
# --------------------------
# 主流程
# --------------------------
def process_single(json_path: str, download_dir: str = Config.DEFAULT_DOWNLOAD_DIR) -> None:
"""处理单个文件"""
try:
logger.info(f"\n{'-' * 40}\n🔍 开始处理: {json_path}")
processor = FileProcessor(json_path, download_dir)
data = processor.load_data()
# 1. 按tweet_id分组数据
grouped_items = defaultdict(list)
for item in data:
if 'tweet_id' not in item:
logger.error(f"⚠️ 数据项缺少tweet_id: 文件名={item.get('file_name', '未知')}, 跳过")
continue
grouped_items[item['tweet_id']].append(item)
download_manager = DownloadManager()
upload_manager = UploadManager()
logger.info(f"📊 检测到 {len(grouped_items)} 个推文分组")
# 2. 按分组处理
for tweet_id, items in grouped_items.items():
# 2.1 下载组内所有未下载的文件
for item in items:
if not item.get('is_downloaded'):
download_manager.process_item(item, processor)
# 2.2 分组上传策略
upload_manager.process_items(items, processor)
processor.save_data(data)
logger.info(f"✅ 文件处理完成\n{'-' * 40}\n")
except Exception as e:
logger.error(f"💥 处理异常: {str(e)}", exc_info=True)
Notifier.send_lark_alert(f"处理异常: {str(e)[:Config.NOTIFICATION_TRUNCATE]}")
raise
def batch_process(days: int = 7) -> None:
"""批量处理"""
base_dir = Path(Config.DEFAULT_OUTPUT_DIR)
for i in range(days, -1, -1): # 倒序处理
target_date = datetime.now() - timedelta(days=i)
date_str = target_date.strftime("%Y-%m-%d")
json_path = base_dir / f"{date_str[:7]}/{date_str}.json"
if json_path.exists():
process_single(str(json_path))
else:
logger.info(f"⏭ 跳过不存在文件: {json_path}")
def main():
args = sys.argv[1:] # 获取命令行参数
if len(args) == 2:
process_single(args[0], args[1])
elif len(args) == 1:
process_single(args[0])
elif len(args) == 0:
batch_process()
else:
logger.error("错误:参数数量不正确。")
logger.error("使用方法:python T-Bot.py [<JSON文件路径> <下载目录>]")
logger.error("示例:")
logger.error("使用参数:python T-Bot.py ../output/2000-01/2000-01-01.json ../downloads(默认)")
logger.error("使用默认:python T-Bot.py")
sys.exit(1)
if __name__ == "__main__":
try:
main()
logger.info("🏁 所有处理任务已完成!")
except KeyboardInterrupt:
logger.warning("⏹️ 用户中断操作")
sys.exit(0)
except Exception as e:
| python | MIT | f577d2a3161a5209b059b07aada4c087a2aa2894 | 2026-01-05T07:14:36.260795Z | true |
iniwym/XT-Bot | https://github.com/iniwym/XT-Bot/blob/f577d2a3161a5209b059b07aada4c087a2aa2894/Python/src/INI-XT-Bot.py | Python/src/INI-XT-Bot.py | import sys
import json
import os
import subprocess
import telegram
from datetime import datetime
from pathlib import Path
from typing import List, Dict
# 将项目根目录添加到模块搜索路径
_project_root = Path(__file__).resolve().parent.parent
sys.path.append(str(_project_root))
from utils.log_utils import LogUtils
# --------------------------
# 配置常量
# --------------------------
class EnvConfig:
"""环境变量配置"""
BOT_TOKEN = os.getenv("BOT_TOKEN") # Telegram机器人Token
CHAT_ID = os.getenv("CHAT_ID") # Telegram频道/群组ID
LARK_KEY = os.getenv("LARK_KEY") # 飞书机器人Webhook Key
class PathConfig:
"""路径配置"""
CONFIG_PATH = Path("../../config/config.json") # 配置文件路径
OUT_PUT_DIR = Path("../output/") # 用户数据目录
USER_DATA_DIR = Path("../../TypeScript/tweets/user/") # 用户数据目录
class MsgConfig:
"""消息模板"""
TELEGRAM_ALERT = "#{screen_name} #x" # Telegram通知模板
# 引入日志模块
logger = LogUtils().get_logger()
logger.info("🔄 INI-XT-Bot 初始化完成")
# --------------------------
# 通知模块
# --------------------------
def send_telegram_alert(screen_name: str) -> bool:
"""
发送Telegram格式通知
返回发送状态: True成功 / False失败
"""
# 检查环境配置
if not all([EnvConfig.BOT_TOKEN, EnvConfig.CHAT_ID]):
logger.warning("⏭️ 缺少Telegram环境变量配置,跳过通知发送")
return False
try:
# 生成格式化消息
formatted_msg = MsgConfig.TELEGRAM_ALERT.format(
screen_name=screen_name
)
# 初始化机器人
bot = telegram.Bot(token=EnvConfig.BOT_TOKEN)
# 发送消息(静默模式)
bot.send_message(
chat_id=EnvConfig.CHAT_ID,
text=formatted_msg,
disable_notification=True
)
logger.info(f"📢 Telegram通知发送成功: {formatted_msg}")
return True
except telegram.error.TelegramError as e:
logger.error(f"❌ Telegram消息发送失败: {str(e)}")
return False
except Exception as e:
logger.error(f"🚨 通知发送出现意外错误: {str(e)}", exc_info=True)
return False
# --------------------------
# 核心逻辑
# --------------------------
def load_config() -> List[str]:
"""
加载配置文件
返回screen_name列表
"""
try:
with open(PathConfig.CONFIG_PATH, "r") as f:
config = json.load(f)
# 获取原始列表并过滤空值
raw_users = config.get("screenName", [])
users = [u.strip() for u in raw_users if u.strip()]
logger.info(f"📋 加载到{len(users)}个待处理用户")
logger.debug(f"用户列表: {', '.join(users)}")
return users
except FileNotFoundError:
logger.error(f"❌ 配置文件不存在: {PathConfig.CONFIG_PATH}")
return []
except json.JSONDecodeError:
logger.error(f"❌ 配置文件解析失败: {PathConfig.CONFIG_PATH}")
return []
except Exception as e:
logger.error(f"🚨 加载配置出现意外错误: {str(e)}")
return []
def trigger_xbot(screen_name: str) -> int:
"""
处理单个用户数据
返回新增条目数
"""
# 构建数据文件路径
data_file = PathConfig.USER_DATA_DIR / f"{screen_name}.json"
if not data_file.exists():
logger.warning(f"⏭️ 用户数据文件不存在: {data_file}")
return 0
try:
logger.info("🚀 触发X-Bot执行")
# 执行X-Bot处理(实时显示日志)
process = subprocess.Popen(
["python", "-u", "X-Bot.py", str(data_file)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, # 合并错误输出
text=True,
bufsize=1 # 启用行缓冲
)
# 实时打印输出并捕获最后结果
output_lines = []
for line in iter(process.stdout.readline, ''):
line = line.strip()
if line: # 过滤空行
# 实时打印到父进程控制台
print(f"[X-Bot] {line}", flush=True)
output_lines.append(line)
# 等待进程结束
process.wait()
# 检查退出码
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode,
process.args,
output='\n'.join(output_lines)
)
if output_lines:
if len(output_lines) > 1:
# 解析倒数第二行作为结果
new_count = int(output_lines[-2])
else:
# 解析倒数第一行作为结果
new_count = int(output_lines[-1])
else:
new_count = 0
logger.info(f"✅ X-Bot执行成功,用户 {screen_name} 处理完成,新增 {new_count} 条")
return new_count
except subprocess.CalledProcessError as e:
logger.error(f"❌ X-Bot处理 用户 {screen_name} 处理失败: {e.output.splitlines()[-1][:200]}")
return 0
except Exception as e:
logger.error(f"🚨 X-Bot未知错误: {str(e)}")
return 0
def trigger_tbot() -> bool:
"""
触发下游处理流程
返回执行状态: True成功 / False失败
"""
current_date = datetime.now().strftime("%Y-%m-%d")
json_path = PathConfig.OUT_PUT_DIR / f"{current_date[:7]}/{current_date}.json"
if not json_path.exists():
logger.warning(f"⏭️ 推送数据文件不存在: {json_path}")
return 0
try:
logger.info("🚀 触发T-Bot执行")
# 执行T-Bot处理(实时显示日志)
process = subprocess.Popen(
["python", "-u", "T-Bot.py", str(json_path)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1
)
# 实时转发输出
for line in iter(process.stdout.readline, ''):
print(f"[T-Bot] {line.strip()}", flush=True)
# 检查结果
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode,
process.args
)
logger.info("✅ T-Bot执行成功")
return True
except subprocess.CalledProcessError as e:
logger.error(f"❌ T-Bot执行失败: {str(e)}")
return False
except Exception as e:
logger.error(f"🚨 T-Bot未知错误: {str(e)}")
return False
# --------------------------
# 主流程
# --------------------------
def main():
"""主处理流程"""
# 加载配置文件
users = load_config()
if not users:
logger.error("❌ 未获取到有效用户列表,程序终止")
return
# 遍历处理用户
total_new = 0
for screen_name in users:
logger.info(f"\n{'=' * 40}\n🔍 开始处理: {screen_name}")
new_count = trigger_xbot(screen_name)
# 处理新增条目
if new_count > 0:
# 发送即时通知
send_telegram_alert(screen_name)
# 触发下游流程
if not trigger_tbot():
logger.error(f"❌ 触发T-Bot失败 - 用户: {screen_name}")
total_new += new_count
logger.info(f"✅ 处理完成\n{'=' * 40}\n")
# 最终状态汇总
logger.info(f"🎉 所有用户处理完成!总新增条目: {total_new}")
if __name__ == "__main__":
try:
main()
except Exception as e:
logger.error(f"💥 未处理的全局异常: {str(e)}", exc_info=True)
| python | MIT | f577d2a3161a5209b059b07aada4c087a2aa2894 | 2026-01-05T07:14:36.260795Z | false |
iniwym/XT-Bot | https://github.com/iniwym/XT-Bot/blob/f577d2a3161a5209b059b07aada4c087a2aa2894/Python/src/X-Bot.py | Python/src/X-Bot.py | import sys
import json
import os
from datetime import datetime, timedelta
from pathlib import Path
# 将项目根目录添加到模块搜索路径
_project_root = Path(__file__).resolve().parent.parent
sys.path.append(str(_project_root))
from utils.log_utils import LogUtils
# --------------------
# 配置区
# --------------------
class Config:
# 分片配置
MAX_ENTRIES_PER_SHARD = 10000 # 单个分片最大条目数
SHARD_DIR = "../dataBase/" # 分片存储目录
FORMAT_SHARDS = True # 是否格式化分片文件
SHARD_PREFIX = "processed_entries_"
# 路径配置
DEFAULT_INPUT_DIR = "../../TypeScript/tweets/" # 默认输入目录
DEFAULT_OUTPUT_DIR = "../output/" # 默认输出目录
# 日期格式
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S" # 时间戳格式
YEAR_MONTH_DAY = "%Y-%m-%d" # 年月日格式
YEAR_MONTH = "%Y-%m" # 年月格式
# 引入日志模块
logger = LogUtils().get_logger()
logger.info("🔄 X-Bot 初始化完成")
# --------------------
# 分片管理器
# --------------------
class ShardManager:
"""管理已处理条目的分片存储"""
def __init__(self):
self._ensure_shard_dir()
def _ensure_shard_dir(self):
"""确保分片目录存在"""
if not os.path.exists(Config.SHARD_DIR):
os.makedirs(Config.SHARD_DIR)
logger.info(f"📁 创建分片目录: {Config.SHARD_DIR}")
def get_current_shard_info(self):
"""获取当前分片信息"""
year_month = datetime.now().strftime(Config.YEAR_MONTH)
max_shard = self._get_max_shard_number(year_month)
return {
"year_month": year_month,
"current_max": max_shard,
"next_shard": max_shard + 1
}
def _get_max_shard_number(self, year_month):
"""获取指定年月最大分片号"""
max_num = 0
for file_path in self._list_shard_files():
if f"_{year_month}-" in file_path:
num = self._parse_shard_number(file_path)
max_num = max(max_num, num)
return max_num
def _list_shard_files(self):
"""列出所有分片文件"""
return [
os.path.join(Config.SHARD_DIR, f)
for f in os.listdir(Config.SHARD_DIR)
if f.startswith(Config.SHARD_PREFIX) and f.endswith(".json")
]
@staticmethod
def _parse_shard_number(file_path):
"""从文件路径解析分片编号"""
filename = os.path.basename(file_path)
return int(filename.split("-")[-1].split(".")[0])
def save_entry_id(self, entry_id):
"""保存条目ID到合适的分片"""
shard_info = self.get_current_shard_info()
candidate_path = self._build_shard_path(shard_info["year_month"], shard_info["current_max"])
# 尝试写入现有分片
if os.path.exists(candidate_path):
try:
with open(candidate_path, "r+") as f:
entries = json.load(f)
if len(entries) < Config.MAX_ENTRIES_PER_SHARD:
entries.append(entry_id)
f.seek(0)
json.dump(entries, f, indent=2 if Config.FORMAT_SHARDS else None)
logger.debug(f"📥 条目 {entry_id} 已写入现有分片: {candidate_path}")
return candidate_path
except json.JSONDecodeError:
logger.warning("🔄 检测到损坏分片,尝试修复...")
return self._handle_corrupted_shard(candidate_path, entry_id)
# 创建新分片
new_path = self._build_shard_path(shard_info["year_month"], shard_info["next_shard"])
self._write_shard(new_path, [entry_id])
logger.info(f"✨ 创建新分片: {new_path}")
return new_path
def _build_shard_path(self, year_month, shard_number):
"""构建分片文件路径"""
return os.path.join(
Config.SHARD_DIR,
f"{Config.SHARD_PREFIX}{year_month}-{shard_number:04d}.json"
)
def _handle_corrupted_shard(self, path, entry_id):
"""处理损坏的分片文件"""
try:
self._write_shard(path, [entry_id])
logger.warning(f"✅ 成功修复损坏分片: {path}")
return path
except Exception as e:
logger.error(f"❌ 修复分片失败: {str(e)}")
raise
def _write_shard(self, path, data):
"""写入分片文件"""
with open(path, "w") as f:
json.dump(data, f, indent=2 if Config.FORMAT_SHARDS else None)
def load_processed_entries(self):
"""加载所有已处理条目"""
processed = set()
for file_path in self._list_shard_files():
try:
with open(file_path, "r") as f:
entries = json.load(f)
processed.update(entries)
logger.debug(f"📖 加载分片: {file_path} (条目数: {len(entries)})")
except Exception as e:
logger.warning(f"⚠️ 跳过损坏分片 {file_path}: {str(e)}")
logger.info(f"🔍 已加载历史条目总数: {len(processed)}")
return processed
# --------------------
# 条目处理器
# --------------------
class EntryProcessor:
"""处理推文条目中的媒体资源"""
@staticmethod
def generate_entry_id(filename, username, media_type):
"""生成唯一条目ID"""
return f"{filename}_{username}_{media_type}"
@staticmethod
def create_entry_template(filename, user_info, media_type, url):
"""创建标准条目模板"""
return {
"tweet_id": "",
"file_name": filename,
"user": {
"screen_name": user_info["screen_name"],
"name": user_info.get("name", "N/A")
},
"media_type": media_type,
"url": url,
"read_time": datetime.now().strftime(Config.DATE_FORMAT),
"is_uploaded": False,
"upload_info": {},
"is_downloaded": False,
"download_info": {},
"full_text": "",
"publish_time": ""
}
def process_entry(self, entry, user_info, processed_ids):
"""处理单个推文条目"""
new_entries = []
# 提取条目中的 tweet_id
tweet_id = self._extract_tweet_id(entry.get("tweet_url", ""))
# 处理普通媒体
new_entries.extend(self._process_media(entry, user_info, processed_ids, "images"))
new_entries.extend(self._process_media(entry, user_info, processed_ids, "videos"))
# 处理特殊链接
new_entries.extend(self._process_special_urls(entry, user_info, processed_ids))
# 补充元数据
for e in new_entries:
e.update({
"tweet_id": tweet_id,
"full_text": entry.get("full_text", ""),
"publish_time": entry.get("publish_time", "")
})
return new_entries
def _process_media(self, entry, user_info, processed_ids, media_type):
"""处理图片/视频类媒体"""
entries = []
for url in entry.get(media_type, []):
filename = self._extract_filename(url)
entry_id = self.generate_entry_id(filename, user_info["screen_name"], media_type)
if entry_id in processed_ids:
continue
new_entry = self.create_entry_template(filename, user_info, media_type, url)
entries.append(new_entry)
logger.debug(f"📷 发现新{media_type}条目: {filename}")
return entries
def _process_special_urls(self, entry, user_info, processed_ids):
"""处理广播/空间链接"""
entries = []
for url in entry.get("expand_urls", []):
media_type = self._detect_media_type(url)
if not media_type:
continue
filename = self._extract_filename(url)
entry_id = self.generate_entry_id(filename, user_info["screen_name"], media_type)
if entry_id in processed_ids:
continue
new_entry = self.create_entry_template(filename, user_info, media_type, url)
entries.append(new_entry)
logger.debug(f"🔗 发现特殊链接: {media_type} - {filename}")
return entries
@staticmethod
def _extract_tweet_id(tweet_url):
"""从推文URL提取唯一ID"""
if not tweet_url:
return ""
# 查找/status/后的部分作为推文ID
parts = tweet_url.split("/status/")
if len(parts) > 1:
# 获取ID部分,并移除可能存在的查询参数
tweet_id = parts[1].split("?")[0].split("/")[0]
# 确保ID是纯数字
if tweet_id.isdigit():
return tweet_id
return ""
@staticmethod
def _extract_filename(url):
"""从URL提取文件名"""
return url.split("?")[0].split("/")[-1]
@staticmethod
def _detect_media_type(url):
"""识别链接类型"""
if "/broadcasts/" in url:
return "broadcasts"
if "/spaces/" in url:
return "spaces"
return None
# --------------------
# 文件管理器
# --------------------
class FileManager:
"""处理文件IO操作"""
@staticmethod
def load_json(path):
"""安全加载JSON文件"""
try:
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
logger.info(f"📂 成功加载文件: {path}")
return data
except FileNotFoundError:
logger.error(f"❌ 文件未找到: {path}")
raise
except json.JSONDecodeError:
logger.error(f"❌ JSON解析失败: {path}")
raise
@staticmethod
def save_output(data, output_path):
"""保存输出文件"""
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger.info(f"📁 创建输出目录: {output_dir}")
with open(output_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
logger.info(f"💾 输出已保存至: {output_path}")
# --------------------
# 核心流程
# --------------------
class XBotCore:
"""主处理逻辑"""
def __init__(self):
self.shard_manager = ShardManager()
self.entry_processor = EntryProcessor()
self.file_manager = FileManager()
self.processed_ids = self.shard_manager.load_processed_entries()
def process_single_day(self, data_path, output_path):
"""处理单日数据"""
logger.info(f"\n{'-' * 40}\n🔍 开始处理: {os.path.basename(data_path)}")
# 加载数据
raw_data = self.file_manager.load_json(data_path)
user_data = self._organize_user_data(raw_data)
# 处理条目
all_new_entries = []
# 遍历所有用户
for username in user_data:
user_info = user_data[username]
user_entries = []
for entry in user_info["entries"]:
user_entries.extend(self.entry_processor.process_entry(entry, user_info, self.processed_ids))
# 保存新条目ID
for entry in user_entries:
entry_id = EntryProcessor.generate_entry_id(
entry["file_name"],
entry["user"]["screen_name"],
entry["media_type"]
)
self.shard_manager.save_entry_id(entry_id)
all_new_entries.extend(user_entries)
# 合并输出
final_output = self._merge_output(output_path, all_new_entries)
self.file_manager.save_output(final_output, output_path)
logger.info(f"🎉 本日处理完成!新增条目: {len(all_new_entries)}\n{'-' * 40}\n")
return len(all_new_entries)
def _organize_user_data(self, raw_data):
"""重组用户数据结构"""
organized = {}
for item in raw_data:
user = item.get("user", {})
username = user.get("screenName")
if not username:
continue
if username not in organized:
organized[username] = {
"screen_name": username,
"name": user.get("name", "N/A"),
"entries": []
}
organized[username]["entries"].append({
"tweet_url": item.get("tweetUrl", ""),
"full_text": item.get("fullText", ""),
"publish_time": item.get("publishTime", ""),
"images": item.get("images", []),
"videos": item.get("videos", []),
"expand_urls": item.get("expandUrls", [])
})
return organized
def _merge_output(self, output_path, new_entries):
"""合并新旧输出文件"""
existing = []
if os.path.exists(output_path):
existing = self.file_manager.load_json(output_path)
logger.info(f"🔄 合并现有输出文件,已有条目: {len(existing)}")
existing_ids = {self._get_entry_id(e) for e in existing}
merged = existing.copy()
added = 0
for entry in new_entries:
entry_id = self._get_entry_id(entry)
if entry_id not in existing_ids:
merged.append(entry)
added += 1
merged.sort(key=lambda x: x.get("publish_time", ""))
logger.info(f"🆕 新增条目: {added} | 合并后总数: {len(merged)}")
return merged
@staticmethod
def _get_entry_id(entry):
"""获取条目唯一标识"""
return f"{entry['file_name']}_{entry['user']['screen_name']}_{entry['media_type']}"
# --------------------
# 命令行接口
# --------------------
def main():
core = XBotCore()
args = sys.argv[1:] # 获取命令行参数
# 指定输出目录:python X-Bot.py 数据文件 输出文件
if len(args) == 2:
data_path = os.path.normpath(args[0])
output_path = os.path.normpath(args[1])
if os.path.exists(data_path):
logger.info(f"🔧 自定义模式处理:{data_path}")
core.process_single_day(data_path, output_path)
else:
logger.info(f"⏭️ 跳过不存在的数据文件:{data_path}")
# 单参数模式:python X-Bot.py 数据文件
elif len(args) == 1:
data_path = os.path.normpath(args[0])
current_date = datetime.now()
# 生成当天输出路径
output_dir = os.path.normpath(
f"{Config.DEFAULT_OUTPUT_DIR}{current_date.strftime(Config.YEAR_MONTH)}/"
)
output_filename = f"{current_date.strftime(Config.YEAR_MONTH_DAY)}.json"
output_path = os.path.join(output_dir, output_filename)
if os.path.exists(data_path):
logger.info(f"⚡ 单文件模式处理:{os.path.basename(data_path)}")
os.makedirs(output_dir, exist_ok=True)
new_entries_count = core.process_single_day(data_path, output_path)
# 返回新增条数
print(new_entries_count)
else:
logger.info(f"⏭️ 跳过不存在的数据文件:{data_path}")
print(0)
# 无参数模式:python X-Bot.py
elif len(args) == 0:
current_date = datetime.now()
logger.info("🤖 自动模式:处理最近一周数据")
for day_offset in reversed(range(8)): # 包含今天共8天
target_date = current_date - timedelta(days=day_offset)
# 输入文件路径(按数据日期)
data_dir = os.path.normpath(
f"{Config.DEFAULT_INPUT_DIR}{target_date.strftime(Config.YEAR_MONTH)}/"
)
data_filename = f"{target_date.strftime(Config.YEAR_MONTH_DAY)}.json"
data_path = os.path.join(data_dir, data_filename)
# 输出文件路径(按数据日期)
output_dir = os.path.normpath(
f"{Config.DEFAULT_OUTPUT_DIR}{target_date.strftime(Config.YEAR_MONTH)}/"
)
output_path = os.path.join(output_dir, data_filename)
if os.path.exists(data_path):
logger.info(f"🔍 正在处理 {target_date.strftime(Config.YEAR_MONTH_DAY)} 数据...")
os.makedirs(output_dir, exist_ok=True)
core.process_single_day(data_path, output_path)
else:
logger.info(f"⏭️ 跳过不存在的数据文件:{data_filename}")
# 错误参数处理
else:
logger.error("❗ 参数错误!支持以下模式:")
logger.error("1. 全参数模式:脚本 + 数据文件 + 输出文件")
logger.error("2. 单文件模式:脚本 + 数据文件(输出到当天目录)")
logger.error("3. 自动模式:仅脚本(处理最近一周数据)")
logger.error("示例:")
logger.error(
"python X-Bot.py ../../TypeScript/tweets/2000-01/2000-01-01.json ../output/2000-01/2000-01-01.json")
logger.error("python X-Bot.py ../../TypeScript/tweets/user/xxx.json")
logger.error("python X-Bot.py")
sys.exit(1)
if __name__ == "__main__":
try:
main()
logger.info("🏁 所有处理任务已完成!")
except KeyboardInterrupt:
logger.warning("⏹️ 用户中断操作")
sys.exit(0)
except Exception as e:
logger.error(f"💥 未处理的异常: {str(e)}")
sys.exit(1)
| python | MIT | f577d2a3161a5209b059b07aada4c087a2aa2894 | 2026-01-05T07:14:36.260795Z | false |
iniwym/XT-Bot | https://github.com/iniwym/XT-Bot/blob/f577d2a3161a5209b059b07aada4c087a2aa2894/Python/utils/encrypt_7z.py | Python/utils/encrypt_7z.py | import sys
import py7zr
from pathlib import Path
# 将项目根目录添加到模块搜索路径
_project_root = Path(__file__).resolve().parent.parent
sys.path.append(str(_project_root))
from utils.log_utils import LogUtils
logger = LogUtils().get_logger()
logger.info("🔄 Encrypt_7z 初始化完成")
def compress_folders(dirs, output_file, password):
"""执行压缩加密操作"""
try:
with py7zr.SevenZipFile(
output_file,
mode='w',
password=password,
header_encryption=True,
filters=[{
'id': py7zr.FILTER_LZMA2,
'preset': 7,
'dict_size': 64 * 1024 * 1024
}]
) as archive:
for folder in dirs:
folder_path = Path(folder)
archive.writeall(folder_path, folder_path.name)
logger.info(f"✓ 压缩完成:{output_file}")
except Exception as e:
logger.error(f"⚠ 压缩失败:{str(e)}")
sys.exit(0)
if __name__ == '__main__':
"""验证参数格式及路径有效性"""
if len(sys.argv) != 4:
logger.warning('⚠ 参数错误!正确格式:python encrypt_7z.py "[目录1,目录2,...]" [输出文件.7z] [密码]')
sys.exit(0)
dirs = sys.argv[1].split(',')
output_file = sys.argv[2]
password = sys.argv[3]
# 检查密码是否为空或仅包含空格
if not password.strip():
logger.warning('⚠ 密码为空,不执行压缩加密操作。')
sys.exit(0)
compress_folders(dirs, output_file, password)
| python | MIT | f577d2a3161a5209b059b07aada4c087a2aa2894 | 2026-01-05T07:14:36.260795Z | false |
iniwym/XT-Bot | https://github.com/iniwym/XT-Bot/blob/f577d2a3161a5209b059b07aada4c087a2aa2894/Python/utils/get_redis_config.py | Python/utils/get_redis_config.py | import os
import json
import sys
import redis
from redis.exceptions import RedisError
from pathlib import Path
# 将项目根目录添加到模块搜索路径
_project_root = Path(__file__).resolve().parent.parent
sys.path.append(str(_project_root))
from utils.log_utils import LogUtils
logger = LogUtils().get_logger()
logger.info("🔄 Get_Redis_Config 初始化完成")
def main():
# 获取环境变量
redis_config = os.environ.get('REDIS_CONFIG')
if not redis_config:
logger.warning("ℹ 未配置 REDIS_CONFIG,直接使用本地配置文件")
sys.exit(0)
logger.info("✓ 已读取环境变量 REDIS_CONFIG")
# 解析Redis配置
try:
config = json.loads(redis_config)
logger.info("✓ Redis配置解析成功")
except json.JSONDecodeError as e:
logger.warning(f"⚠ 警告:Redis配置JSON格式错误({e}),使用本地配置")
sys.exit(0)
# 建立并验证Redis连接
try:
r = redis.Redis(
host=config.get('host', 'localhost'),
port=config.get('port', 6379),
password=config.get('password'),
db=config.get('db', 0),
decode_responses=True,
socket_connect_timeout=5
)
# 主动发送PING命令验证连接和认证
r.ping()
logger.info("✓ Redis连接验证通过")
except RedisError as e:
logger.warning(f"⚠ 警告:Redis连接失败({e}),使用本地配置")
sys.exit(0)
# 读取配置数据
config_data = r.get('config')
if not config_data:
logger.warning("⚠ 警告:Redis中未找到'config'键值,使用本地配置")
sys.exit(0)
logger.info("✓ 成功读取配置数据")
# 解析配置数据
try:
json_obj = json.loads(config_data)
logger.info("✓ 配置数据格式验证成功")
except json.JSONDecodeError as e:
logger.warning(f"⚠ 警告:配置数据JSON格式错误({e}),使用本地配置")
sys.exit(0)
# 写入配置文件
file_path = '../../config/config.json'
try:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(json_obj, f, indent=2, ensure_ascii=False)
logger.info(f"✓ 配置文件已生成:{os.path.abspath(file_path)}")
except IOError as e:
logger.warning(f"⚠ 警告:文件写入失败({e}),使用现有配置")
sys.exit(0)
if __name__ == "__main__":
main()
| python | MIT | f577d2a3161a5209b059b07aada4c087a2aa2894 | 2026-01-05T07:14:36.260795Z | false |
iniwym/XT-Bot | https://github.com/iniwym/XT-Bot/blob/f577d2a3161a5209b059b07aada4c087a2aa2894/Python/utils/log_utils.py | Python/utils/log_utils.py | import sys
import json
import logging
from datetime import datetime
from pathlib import Path
from dotenv import load_dotenv
# 获取python根目录(向上找两级)
python_root = Path(__file__).resolve().parent.parent
# 获取项目根目录(向上找三级)
project_root = python_root.parent
# 定位到项目根目录的 .env
env_path = project_root / '.env'
load_dotenv(dotenv_path=env_path)
class LogUtils:
def __init__(self, name=__name__, log_dir="logs",
console_level=None,
file_level=logging.DEBUG,
fmt='[%(asctime)s] [%(levelname)-5s] %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"):
"""
:param name: 日志器名称
:param log_dir: 日志目录
:param console_level: 可选参数,手动指定时优先
:param file_level: 文件日志级别
"""
self.logger = logging.getLogger(name)
config_path = project_root / "config" / "config.json"
# 读取控制台日志级别
resolved_console_level = self._get_console_level(config_path, console_level)
# 设置Logger总级别
self.logger.setLevel(logging.DEBUG)
# 创建日志目录
log_dir = python_root / log_dir
log_dir.mkdir(parents=True, exist_ok=True)
# 日志文件路径
log_filename = f"python-{datetime.now().strftime('%Y-%m-%d')}.log"
log_path = log_dir / log_filename
# 配置Formatter
formatter = logging.Formatter(fmt, datefmt=datefmt)
# 控制台Handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(resolved_console_level)
console_handler.setFormatter(formatter)
# 文件Handler
file_handler = logging.FileHandler(log_path, encoding='utf-8')
file_handler.setLevel(file_level)
file_handler.setFormatter(formatter)
# 添加处理器
self.logger.addHandler(console_handler)
self.logger.addHandler(file_handler)
def _get_console_level(self, config_path, manual_level):
"""优先级:手动指定 > 配置文件 > 默认INFO"""
if manual_level is not None:
return manual_level
try:
# 读取配置文件
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
# 获取日志级别配置
level_str = config.get('consoleLogLevel', 'INFO').upper()
level_str = {"WARN": "WARNING"}.get(level_str, level_str)
return getattr(logging, level_str, logging.INFO)
except (FileNotFoundError, json.JSONDecodeError) as e:
self._handle_config_error(e)
return logging.INFO
def _handle_config_error(self, error):
"""配置文件错误处理"""
error_msg = {
FileNotFoundError: f"⚠️ 配置文件未找到,使用默认配置",
json.JSONDecodeError: f"⚠️ 配置文件格式错误,使用默认配置"
}.get(type(error), "⚠️ 未知配置错误")
# 使用基础Logger(此时正式Logger还未初始化)
temp_logger = logging.getLogger(__name__)
temp_logger.warning(f"{error_msg} | 错误详情: {str(error)}")
def get_logger(self):
"""获取配置好的日志器"""
return self.logger
| python | MIT | f577d2a3161a5209b059b07aada4c087a2aa2894 | 2026-01-05T07:14:36.260795Z | false |
iniwym/XT-Bot | https://github.com/iniwym/XT-Bot/blob/f577d2a3161a5209b059b07aada4c087a2aa2894/Python/utils/sync_data.py | Python/utils/sync_data.py | import sys
import os
import shutil
import argparse
from pathlib import Path
# 将项目根目录添加到模块搜索路径
_project_root = Path(__file__).resolve().parent.parent
sys.path.append(str(_project_root))
from utils.log_utils import LogUtils
logger = LogUtils().get_logger()
logger.info("🔄 Sync_Data 初始化完成")
def sync_dirs(source, dest):
"""同步目录的核心函数"""
# 标准化路径并确保末尾没有斜杠
source = os.path.normpath(source)
dest = os.path.normpath(dest)
# 确保源目录存在
if not os.path.exists(source):
raise FileNotFoundError(f"源目录不存在:'{source}'")
# 收集源目录中的所有文件相对路径
source_files = set()
for root, dirs, files in os.walk(source):
rel_path = os.path.relpath(root, source)
for file in files:
file_rel_path = os.path.join(rel_path, file) if rel_path != '.' else file
source_files.add(file_rel_path)
# 复制或更新文件到目标目录
for file_rel in source_files:
src_path = os.path.join(source, file_rel)
dest_path = os.path.join(dest, file_rel)
dest_dir = os.path.dirname(dest_path)
# 创建目标目录结构
os.makedirs(dest_dir, exist_ok=True)
# 检查是否需要复制(修改时间或大小不同)
if os.path.exists(dest_path):
src_stat = os.stat(src_path)
dest_stat = os.stat(dest_path)
if src_stat.st_mtime <= dest_stat.st_mtime and src_stat.st_size == dest_stat.st_size:
continue # 文件相同,跳过复制
shutil.copy2(src_path, dest_path)
logger.debug(f"📥 已复制:{src_path} -> {dest_path}")
# 收集目标目录中的所有文件相对路径
dest_files = set()
for root, dirs, files in os.walk(dest):
rel_path = os.path.relpath(root, dest)
for file in files:
file_rel_path = os.path.join(rel_path, file) if rel_path != '.' else file
dest_files.add(file_rel_path)
# 删除目标中存在但源中不存在的文件
for file_rel in (dest_files - source_files):
file_path = os.path.join(dest, file_rel)
try:
os.remove(file_path)
logger.debug(f"🗑️ 已删除:{file_path}")
except Exception as e:
logger.error(f"⚠ 删除文件失败:{file_path} - {str(e)}")
# 删除空目录(从叶子目录开始向上删除)
for root, dirs, files in os.walk(dest, topdown=False):
# 删除空目录
if not os.listdir(root):
try:
os.rmdir(root)
logger.debug(f"📁 已删除空目录:{root}")
except Exception as e:
logger.error(f"⚠ 删除目录失败:{root} - {str(e)}")
def main():
# 预定义任务组
TASK_GROUPS = {
"pull": [
{"source": "data-repo/config", "dest": "config"},
{"source": "data-repo/Python/dataBase", "dest": "Python/dataBase"},
{"source": "data-repo/Python/output", "dest": "Python/output"},
{"source": "data-repo/TypeScript/data", "dest": "TypeScript/data"},
{"source": "data-repo/TypeScript/tweets", "dest": "TypeScript/tweets"},
],
"push": [
{"dest": "data-repo/config", "source": "config"},
{"dest": "data-repo/Python/dataBase", "source": "Python/dataBase"},
{"dest": "data-repo/Python/output", "source": "Python/output"},
{"dest": "data-repo/TypeScript/data", "source": "TypeScript/data"},
{"dest": "data-repo/TypeScript/tweets", "source": "TypeScript/tweets"},
]
}
# 配置命令行参数
parser = argparse.ArgumentParser()
parser.add_argument(
'task_group',
nargs='?', # 设置为可选参数
default='pull',
choices=TASK_GROUPS.keys(),
help="选择同步任务组(pull/push)"
)
args = parser.parse_args()
# 执行同步任务
logger.info(f"🔄 正在执行任务组 [{args.task_group}]")
for task in TASK_GROUPS[args.task_group]:
src = task["source"]
dst = task["dest"]
logger.debug(f"→ 同步任务: {src} => {dst}")
try:
sync_dirs(src, dst)
except Exception as e:
logger.error(f"⚠ 同步失败:{src} => {dst} - {str(e)}")
continue
if __name__ == "__main__":
main()
| python | MIT | f577d2a3161a5209b059b07aada4c087a2aa2894 | 2026-01-05T07:14:36.260795Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/__init__.py | src/cellcharter/__init__.py | from importlib.metadata import version
from . import datasets, gr, pl, tl
__all__ = ["gr", "pl", "tl", "datasets"]
__version__ = version("cellcharter")
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/_utils.py | src/cellcharter/_utils.py | from __future__ import annotations
from typing import Union
import numpy as np
AnyRandom = Union[int, np.random.RandomState, None]
def str2list(value: Union[str, list]) -> list:
"""Check whether value is a string. If so, converts into a list containing value."""
return [value] if isinstance(value, str) else value
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/_constants/_pkg_constants.py | src/cellcharter/_constants/_pkg_constants.py | """Internal constants not exposed to the user."""
class Key:
class obs:
@classmethod
@property
def sample(cls) -> str:
return "sample"
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/datasets/_dataset.py | src/cellcharter/datasets/_dataset.py | from copy import copy
from squidpy.datasets._utils import AMetadata
_codex_mouse_spleen = AMetadata(
name="codex_mouse_spleen",
doc_header="Pre-processed CODEX dataset of mouse spleen from `Goltsev et al "
"<https://doi.org/10.1016/j.cell.2018.07.010>`__.",
shape=(707474, 29),
url="https://figshare.com/ndownloader/files/38538101",
)
for name, var in copy(locals()).items():
if isinstance(var, AMetadata):
var._create_function(name, globals())
__all__ = ["codex_mouse_spleen"] # noqa: F822
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/datasets/__init__.py | src/cellcharter/datasets/__init__.py | from ._dataset import * # noqa: F403
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/pl/_shape.py | src/cellcharter/pl/_shape.py | from __future__ import annotations
import warnings
from pathlib import Path
import anndata as ad
import geopandas
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from anndata import AnnData
from squidpy._docs import d
from ._utils import adjust_box_widths
def plot_boundaries(
adata: AnnData,
sample: str,
library_key: str = "sample",
component_key: str = "component",
alpha_boundary: float = 0.5,
show_cells: bool = True,
save: str | Path | None = None,
) -> None:
"""
Plot the boundaries of the clusters.
Parameters
----------
%(adata)s
sample
Sample to plot.
library_key
Key in :attr:`anndata.AnnData.obs` where the sample labels are stored.
component_key
Key in :attr:`anndata.AnnData.obs` where the component labels are stored.
alpha_boundary
Transparency of the boundaries.
show_cells
Whether to show the cells or not.
Returns
-------
%(plotting_returns)s
"""
# Print warning and call boundaries
warnings.warn(
"plot_boundaries is deprecated and will be removed in the next release. " "Please use `boundaries` instead.",
FutureWarning,
stacklevel=2,
)
boundaries(
adata=adata,
sample=sample,
library_key=library_key,
component_key=component_key,
alpha_boundary=alpha_boundary,
show_cells=show_cells,
save=save,
)
@d.dedent
def boundaries(
adata: AnnData,
sample: str,
library_key: str = "sample",
component_key: str = "component",
alpha_boundary: float = 0.5,
show_cells: bool = True,
cell_radius: float = 1.0,
save: str | Path | None = None,
**kwargs,
) -> None:
"""
Plot the boundaries of the clusters.
Parameters
----------
%(adata)s
sample
Sample to plot.
library_key
Key in :attr:`anndata.AnnData.obs` where the sample labels are stored.
component_key
Key in :attr:`anndata.AnnData.obs` where the component labels are stored.
alpha_boundary
Transparency of the boundaries.
show_cells
Whether to show the cells or not.
cell_radius
Radius of the cells, if present.
save
Path to save the plot.
kwargs
Additional arguments to pass to the `spatialdata.pl.render_shapes()` function.
Notes
-----
To visualize boundaries with this function, install `spatialdata` and
`spatialdata-plot`, or install the optional extra:
- `pip install "cellcharter[shape]"`
Returns
-------
%(plotting_returns)s
"""
# Optional dependency check
try:
import spatialdata as sd # type: ignore
import spatialdata_plot # noqa: F401
except ImportError as err:
raise ImportError(
"pl.boundaries requires 'spatialdata' and 'spatialdata-plot'. Install them with\n"
" pip install spatialdata spatialdata-plot"
) from err
adata = adata[adata.obs[library_key] == sample].copy()
del adata.raw
clusters = adata.obs[component_key].unique()
boundaries = {
cluster: boundary
for cluster, boundary in adata.uns[f"shape_{component_key}"]["boundary"].items()
if cluster in clusters
}
gdf = geopandas.GeoDataFrame(geometry=list(boundaries.values()), index=np.arange(len(boundaries)).astype(str))
adata_components = ad.AnnData(
obs=pd.DataFrame(list(boundaries.keys()), columns=[component_key], index=np.arange(len(boundaries)).astype(str))
)
adata_components.obs["region"] = "component_boundaries"
adata_components.obs["region"] = pd.Categorical(adata_components.obs["region"])
adata_components.index = "cluster_" + adata_components.obs.index
adata_components.obs["instance_id"] = np.arange(len(boundaries)).astype(str)
adata_components.obs[component_key] = pd.Categorical(adata_components.obs[component_key])
adata_components.obs[component_key] = adata_components.obs[component_key].cat.remove_unused_categories()
shapes = {"component_boundaries": sd.models.ShapesModel.parse(gdf)}
tables = {
"components": sd.models.TableModel.parse(
adata_components, region_key="region", region="component_boundaries", instance_key="instance_id"
)
}
if show_cells:
adata_cells = ad.AnnData(obs=adata.obs[[component_key]], obsm={"spatial": adata.obsm["spatial"]})
adata_cells.obs.loc[adata_cells.obs[component_key] == -1, component_key] = np.nan
adata_cells.obs.index = "cell_" + adata_cells.obs.index
adata_cells.obs["instance_id"] = adata_cells.obs.index
adata_cells.obs["region"] = "cell_circles"
adata_cells.obs["region"] = pd.Categorical(adata_cells.obs["region"])
# Ensure component_key is categorical
if not pd.api.types.is_categorical_dtype(adata_cells.obs[component_key]):
adata_cells.obs[component_key] = pd.Categorical(adata_cells.obs[component_key])
# Check if spatial data exists
if "spatial" not in adata_cells.obsm or adata_cells.obsm["spatial"].shape[0] == 0:
warnings.warn("No spatial data found for cells. Skipping cell visualization.", stacklevel=2)
show_cells = False
else:
tables["cells"] = sd.models.TableModel.parse(
adata_cells, region_key="region", region="cell_circles", instance_key="instance_id"
)
shapes["cell_circles"] = sd.models.ShapesModel.parse(
adata_cells.obsm["spatial"], geometry=0, radius=1.0, index=adata_cells.obs["instance_id"]
)
sdata = sd.SpatialData(shapes=shapes, tables=tables)
_, ax = plt.subplots(**kwargs)
palette = None
groups = None
if show_cells:
try:
if pd.api.types.is_categorical_dtype(sdata.tables["cells"].obs[component_key]):
groups = list(sdata.tables["cells"].obs[component_key].cat.categories)
else:
groups = list(sdata.tables["cells"].obs[component_key].unique())
# Remove any NaN values from groups
groups = [g for g in groups if pd.notna(g)]
except (KeyError, AttributeError) as e:
warnings.warn(f"Could not determine groups for plotting: {e}", stacklevel=2)
groups = None
from squidpy.pl._color_utils import _maybe_set_colors
_maybe_set_colors(
source=sdata.tables["cells"], target=sdata.tables["cells"], key=component_key, palette=palette
)
palette = sdata.tables["cells"].uns[f"{component_key}_colors"]
try:
sdata.pl.render_shapes(
"cell_circles",
color=component_key,
scale=cell_radius,
palette=palette,
groups=groups,
method="matplotlib",
).pl.show(ax=ax, legend_loc=None)
except TypeError: # TODO: remove after spatialdata-plot issue #256 is fixed
warnings.warn(
"Until the next spatialdata_plot release, the cells that do not belong to any component will be displayed with a random color instead of grey.",
stacklevel=2,
)
# Create a copy of the table with modified component labels
modified_table = sdata.tables["cells"].copy()
modified_table.obs[component_key] = modified_table.obs[component_key].cat.add_categories([-1])
modified_table.obs[component_key] = modified_table.obs[component_key].fillna(-1)
# Update the spatialdata object with the modified table
sdata.tables["cells"] = modified_table
sdata.pl.render_shapes(
"cell_circles",
color=component_key,
scale=cell_radius,
palette=palette,
groups=groups,
method="matplotlib",
).pl.show(ax=ax, legend_loc=None)
sdata.pl.render_shapes(
element="component_boundaries",
color=component_key,
fill_alpha=alpha_boundary,
palette=palette,
groups=groups if groups is not None else list(adata_components.obs[component_key].cat.categories),
method="matplotlib",
).pl.show(ax=ax)
if save is not None:
plt.savefig(save, bbox_inches="tight")
def plot_shape_metrics(
adata: AnnData,
condition_key: str,
condition_groups: list[str] | None = None,
cluster_key: str | None = None,
cluster_groups: list[str] | None = None,
component_key: str = "component",
metrics: str | tuple[str] | list[str] = ("linearity", "curl"),
figsize: tuple[float, float] = (8, 7),
save: str | Path | None = None,
return_fig: bool = False,
):
"""
Boxplots of the shape metrics between two conditions.
Parameters
----------
%(adata)s
condition_key
Key in :attr:`anndata.AnnData.obs` where the condition labels are stored.
condition_groups
List of two conditions to compare. If None, all pairwise comparisons are made.
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored. This is used to filter the clusters to plot.
cluster_id
List of clusters to plot. If None, all clusters are plotted.
component_key
Key in :attr:`anndata.AnnData.obs` where the component labels are stored.
metrics
List of metrics to plot. Available metrics are ``linearity``, ``curl``, ``elongation``, ``purity``.
figsize
Figure size.
title
Title of the plot.
Returns
-------
%(plotting_returns)s
"""
# Print warning and call shape_metrics
warnings.warn(
"plot_shape_metrics is deprecated and will be removed in the next release. "
"Please use `shape_metrics` instead.",
FutureWarning,
stacklevel=2,
)
return shape_metrics(
adata=adata,
condition_key=condition_key,
condition_groups=condition_groups,
cluster_key=cluster_key,
cluster_groups=cluster_groups,
component_key=component_key,
metrics=metrics,
figsize=figsize,
save=save,
return_fig=return_fig,
)
def plot_shapes(data, x, y, hue, hue_order, fig, ax, fontsize: str | int = 14, title: str | None = None) -> None:
"""
Create a boxplot with stripplot overlay for shape metrics visualization.
Parameters
----------
data
DataFrame containing the data to plot.
x
Column name for x-axis variable.
y
Column name for y-axis variable.
hue
Column name for grouping variable.
hue_order
Order of hue categories.
fig
Matplotlib figure object.
ax
Matplotlib axes object.
fontsize
Font size for plot elements.
title
Title for the plot.
Returns
-------
matplotlib.axes.Axes
The modified axes object.
"""
new_ax = sns.boxplot(data=data, x=x, hue=hue, y=y, showfliers=False, hue_order=hue_order, ax=ax)
adjust_box_widths(fig, 0.9)
new_ax = sns.stripplot(
data=data,
x=x,
hue=hue,
y=y,
palette="dark:0.08",
size=4,
jitter=0.13,
dodge=True,
hue_order=hue_order,
ax=new_ax,
)
if len(data[hue].unique()) > 1:
handles, labels = new_ax.get_legend_handles_labels()
if len(handles) > 1:
new_ax.legend(
handles[0 : len(data[hue].unique())],
labels[0 : len(data[hue].unique())],
bbox_to_anchor=(1.0, 1.03),
title=hue,
prop={"size": fontsize},
title_fontsize=fontsize,
)
else:
if new_ax.get_legend() is not None:
new_ax.get_legend().remove()
new_ax.set_ylim(-0.05, 1.05)
new_ax.set_title(title, fontdict={"fontsize": fontsize})
new_ax.tick_params(axis="both", labelsize=fontsize)
new_ax.set_xlabel(new_ax.get_xlabel(), fontsize=fontsize)
new_ax.set_ylabel(new_ax.get_ylabel(), fontsize=fontsize)
return new_ax
@d.dedent
def shape_metrics(
adata: AnnData,
condition_key: str | None = None,
condition_groups: list[str] | None = None,
cluster_key: str | None = None,
cluster_groups: str | list[str] | None = None,
component_key: str = "component",
metrics: str | tuple[str] | list[str] | None = None,
fontsize: str | int = 14,
figsize: tuple[float, float] = (10, 7),
ncols: int = 2,
save: str | Path | None = None,
return_fig: bool = False,
):
"""
Boxplots of the shape metrics between two conditions.
Parameters
----------
%(adata)s
condition_key
Key in :attr:`anndata.AnnData.obs` where the condition labels are stored.
condition_groups
List of conditions to show. If None, all conditions are plotted.
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored. This is used to filter the clusters to plot.
cluster_groups
List of cluster to plot. If None, all clusters are plotted.
component_key
Key in :attr:`anndata.AnnData.obs` where the component labels are stored.
metrics
List of metrics to plot. Available metrics are ``linearity``, ``curl``, ``elongation``, ``purity``, ``rcs``. If `None`, all computed metrics are plotted.
figsize
Figure size.
ncols
Number of columns in the subplot grid when plotting multiple metrics.
save
Path to save the plot. If provided, the plot will be saved using default parameters (``bbox_inches='tight'``).
For more control over saving parameters, use ``return_fig=True`` and call ``savefig()`` manually.
return_fig
If ``True``, return the figure object for further customization. Default is ``False``.
Returns
-------
If ``return_fig=True``, returns the :class:`matplotlib.figure.Figure` object.
Otherwise returns ``None``.
Examples
--------
Basic usage with automatic saving:
>>> cc.pl.shape_metrics(adata, condition_key='condition', save='plot.pdf')
Advanced usage with custom save parameters:
>>> fig = cc.pl.shape_metrics(adata, condition_key='condition', return_fig=True)
>>> fig.savefig('plot.pdf', dpi=300, bbox_inches='tight', transparent=True)
"""
if isinstance(metrics, str):
metrics = [metrics]
elif isinstance(metrics, tuple):
metrics = list(metrics)
if (
cluster_groups is not None
and not isinstance(cluster_groups, list)
and not isinstance(cluster_groups, np.ndarray)
):
cluster_groups = [cluster_groups]
if condition_groups is None and condition_key is not None:
condition_groups = adata.obs[condition_key].cat.categories
else:
if not isinstance(condition_groups, list) and not isinstance(condition_groups, np.ndarray):
condition_groups = [condition_groups]
if metrics is None:
metrics = [
metric
for metric in ["linearity", "curl", "elongation", "purity", "rcs"]
if metric in adata.uns[f"shape_{component_key}"]
]
keys = []
if condition_key is not None:
keys.append(condition_key)
if cluster_key is not None:
keys.append(cluster_key)
metrics_df = adata.obs[[component_key] + keys].drop_duplicates().dropna().set_index(component_key)
# Initialize fig to None - will be set in one of the code paths below
fig = None
for metric in metrics:
metrics_df[metric] = metrics_df.index.map(adata.uns[f"shape_{component_key}"][metric])
if cluster_groups is not None:
metrics_df = metrics_df[metrics_df[cluster_key].isin(cluster_groups)]
metrics_melted = pd.melt(
metrics_df,
id_vars=keys,
value_vars=metrics,
var_name="metric",
)
metrics_melted[cluster_key] = metrics_melted[cluster_key].cat.remove_unused_categories()
if cluster_key is not None and condition_key is not None and metrics_melted[condition_key].nunique() >= 2:
nrows = (2 + ncols - 1) // ncols # Ceiling division
# Create figure with appropriate size
fig, axes = plt.subplots(nrows, ncols, figsize=(figsize[0] * ncols, figsize[1] * nrows))
if nrows == 1 and ncols == 1:
axes = np.array([axes])
axes = axes.flatten()
# Calculate average axes height in inches
avg_height = figsize[1] / 2
# Set absolute spacing of 1.5 inches between subplots
fig.subplots_adjust(hspace=1.5 / avg_height)
plot_shapes(
metrics_melted,
"metric",
"value",
cluster_key,
cluster_groups,
fig=fig,
ax=axes[0],
title="Shape metrics by domain",
fontsize=fontsize,
)
plot_shapes(
metrics_melted,
"metric",
"value",
condition_key,
condition_groups,
fig=fig,
ax=axes[1],
title="Shape metrics by condition",
fontsize=fontsize,
)
else:
fig, ax = plt.subplots(figsize=figsize)
if cluster_key is not None:
plot_shapes(
metrics_melted,
"metric",
"value",
cluster_key,
cluster_groups,
fig=fig,
ax=ax,
title="Shape metrics by domain",
fontsize=fontsize,
)
if condition_key is not None:
if metrics_melted[condition_key].nunique() < 2:
warnings.warn(
f"Only one condition {condition_groups[0]} for domain {cluster_groups}. Skipping condition plot.",
stacklevel=2,
)
else:
plot_shapes(
metrics_melted,
"metric",
"value",
condition_key,
condition_groups,
fig=fig,
ax=ax,
title="Shape metrics by condition",
fontsize=fontsize,
)
else:
# Calculate number of rows needed based on number of metrics and ncols
n_metrics = len(metrics)
nrows = (n_metrics + ncols - 1) // ncols # Ceiling division
# Create figure with appropriate size
fig, axes = plt.subplots(nrows, ncols, figsize=(figsize[0] * ncols, figsize[1] * nrows))
if nrows == 1 and ncols == 1:
axes = np.array([axes])
axes = axes.flatten()
# Plot each metric in its own subplot
for i, metric in enumerate(metrics):
ax = axes[i]
plot_shapes(
metrics_df,
cluster_key if cluster_key is not None else condition_key,
metric,
condition_key if condition_key is not None else cluster_key,
condition_groups if condition_groups is not None else None,
fig=fig,
ax=ax,
title=f"Spatial domains: {metric}",
fontsize=fontsize,
)
# Hide any unused subplots
for j in range(i + 1, len(axes)):
axes[j].set_visible(False)
# If fig wasn't set in any code path (edge case), get the current figure
if fig is None:
fig = plt.gcf()
if save is not None:
fig.savefig(save, bbox_inches="tight")
if return_fig:
return fig
return None
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/pl/_autok.py | src/cellcharter/pl/_autok.py | from __future__ import annotations
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from cellcharter.tl import ClusterAutoK
def autok_stability(autok: ClusterAutoK, save: str | Path | None = None, return_ax: bool = False) -> None:
"""
Plot the clustering stability.
The clustering stability is computed by :class:`cellcharter.tl.ClusterAutoK`.
Parameters
----------
autok
The fitted :class:`cellcharter.tl.ClusterAutoK` model.
save
Whether to save the plot.
similarity_function
The similarity function to use. Defaults to :func:`sklearn.metrics.fowlkes_mallows_score`.
Returns
-------
Nothing, just plots the figure and optionally saves the plot.
"""
robustness_df = pd.melt(
pd.DataFrame.from_dict({k: autok.stability[i] for i, k in enumerate(autok.n_clusters[1:-1])}, orient="columns"),
var_name="N. clusters",
value_name="Stability",
)
ax = sns.lineplot(data=robustness_df, x="N. clusters", y="Stability")
ax.set_xticks(autok.n_clusters[1:-1])
if save:
plt.savefig(save)
ax.spines.right.set_visible(False)
ax.spines.top.set_visible(False)
if return_ax:
return ax
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/pl/_nhood.py | src/cellcharter/pl/_nhood.py | from __future__ import annotations
import warnings
from itertools import combinations
from pathlib import Path
from types import MappingProxyType
from typing import Any, Mapping
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from anndata import AnnData
from matplotlib import rcParams
from matplotlib.axes import Axes
from squidpy._docs import d
from squidpy.gr._utils import _assert_categorical_obs
from squidpy.pl._color_utils import Palette_t, _maybe_set_colors
from squidpy.pl._graph import _get_data
from squidpy.pl._spatial_utils import _panel_grid
from cellcharter.pl._utils import _heatmap
def _plot_nhood_enrichment(
adata: AnnData,
nhood_enrichment_values: dict,
cluster_key: str,
row_groups: str | None = None,
col_groups: str | None = None,
annotate: bool = False,
n_digits: int = 2,
significance: float | None = None,
method: str | None = None,
title: str | None = "Neighborhood enrichment",
cmap: str = "bwr",
palette: Palette_t = None,
cbar_kwargs: Mapping[str, Any] = MappingProxyType({}),
fontsize=None,
figsize: tuple[float, float] | None = None,
dpi: int | None = None,
ax: Axes | None = None,
**kwargs: Any,
):
enrichment = nhood_enrichment_values["enrichment"]
adata_enrichment = AnnData(X=enrichment.astype(np.float32))
adata_enrichment.obs[cluster_key] = pd.Categorical(enrichment.index)
if significance is not None:
if "pvalue" not in nhood_enrichment_values:
warnings.warn(
"Significance requires gr.nhood_enrichment to be run with pvalues=True. Ignoring significance.",
UserWarning,
stacklevel=2,
)
else:
adata_enrichment.layers["significant"] = np.empty_like(enrichment, dtype=str)
adata_enrichment.layers["significant"][nhood_enrichment_values["pvalue"].values <= significance] = "*"
_maybe_set_colors(source=adata, target=adata_enrichment, key=cluster_key, palette=palette)
if figsize is None:
figsize = list(adata_enrichment.shape[::-1])
if row_groups is not None:
figsize[1] = len(row_groups)
if col_groups is not None:
figsize[0] = len(col_groups)
figsize = tuple(figsize)
_heatmap(
adata_enrichment,
key=cluster_key,
rows=row_groups,
cols=col_groups,
annotate=annotate,
n_digits=n_digits,
method=method,
title=title,
cont_cmap=cmap,
fontsize=fontsize,
figsize=figsize,
dpi=dpi,
cbar_kwargs=cbar_kwargs,
ax=ax,
**kwargs,
)
@d.dedent
def nhood_enrichment(
adata: AnnData,
cluster_key: str,
row_groups: list[str] | None = None,
col_groups: list[str] | None = None,
min_freq: float | None = None,
annotate: bool = False,
transpose: bool = False,
method: str | None = None,
title: str | None = "Neighborhood enrichment",
cmap: str = "bwr",
palette: Palette_t = None,
cbar_kwargs: Mapping[str, Any] = MappingProxyType({}),
figsize: tuple[float, float] | None = None,
dpi: int | None = None,
ax: Axes | None = None,
n_digits: int = 2,
significance: float | None = None,
save: str | Path | None = None,
**kwargs: Any,
) -> None:
"""
A modified version of squidpy's function for `plotting neighborhood enrichment <https://squidpy.readthedocs.io/en/stable/api/squidpy.pl.nhood_enrichment.html>`_.
The enrichment is computed by :func:`cellcharter.gr.nhood_enrichment`.
Parameters
----------
%(adata)s
%(cluster_key)s
row_groups
Restrict the rows to these groups. If `None`, all groups are plotted.
col_groups
Restrict the columns to these groups. If `None`, all groups are plotted.
%(heatmap_plotting)s
n_digits
The number of digits of the number in the annotations.
significance
Mark the values that are below this threshold with a star. If `None`, no significance is computed. It requires ``gr.nhood_enrichment`` to be run with ``pvalues=True``.
kwargs
Keyword arguments for :func:`matplotlib.pyplot.text`.
Returns
-------
%(plotting_returns)s
"""
_assert_categorical_obs(adata, key=cluster_key)
nhood_enrichment_values = _get_data(adata, cluster_key=cluster_key, func_name="nhood_enrichment").copy()
nhood_enrichment_values["enrichment"][np.isinf(nhood_enrichment_values["enrichment"])] = np.nan
if transpose:
nhood_enrichment_values["enrichment"] = nhood_enrichment_values["enrichment"].T
if min_freq is not None:
frequency = adata.obs[cluster_key].value_counts(normalize=True)
nhood_enrichment_values["enrichment"].loc[frequency[frequency < min_freq].index] = np.nan
nhood_enrichment_values["enrichment"].loc[:, frequency[frequency < min_freq].index] = np.nan
_plot_nhood_enrichment(
adata,
nhood_enrichment_values,
cluster_key,
row_groups=row_groups,
col_groups=col_groups,
annotate=annotate,
method=method,
title=title,
cmap=cmap,
palette=palette,
cbar_kwargs=cbar_kwargs,
figsize=figsize,
dpi=dpi,
ax=ax,
n_digits=n_digits,
significance=significance,
**kwargs,
)
if save is not None:
plt.savefig(save, bbox_inches="tight")
@d.dedent
def diff_nhood_enrichment(
adata: AnnData,
cluster_key: str,
condition_key: str,
condition_groups: list[str] | None = None,
hspace: float = 0.25,
wspace: float | None = None,
ncols: int = 1,
**nhood_kwargs: Any,
) -> None:
r"""
Plot the difference in neighborhood enrichment between conditions.
The difference is computed by :func:`cellcharter.gr.diff_nhood_enrichment`.
Parameters
----------
%(adata)s
%(cluster_key)s
condition_key
Key in ``adata.obs`` that stores the sample condition (e.g., normal vs disease).
condition_groups
Restrict the conditions to these clusters. If `None`, all groups are plotted.
hspace
Height space between panels.
wspace
Width space between panels.
ncols
Number of panels per row.
nhood_kwargs
Keyword arguments for :func:`cellcharter.pl.nhood_enrichment`.
Returns
-------
%(plotting_returns)s
"""
_assert_categorical_obs(adata, key=cluster_key)
_assert_categorical_obs(adata, key=condition_key)
conditions = adata.obs[condition_key].cat.categories if condition_groups is None else condition_groups
if nhood_kwargs is None:
nhood_kwargs = {}
cmap = nhood_kwargs.pop("cmap", "PRGn_r")
save = nhood_kwargs.pop("save", None)
n_combinations = len(conditions) * (len(conditions) - 1) // 2
figsize = nhood_kwargs.get("figsize", rcParams["figure.figsize"])
# Plot neighborhood enrichment for each condition pair as a subplot
_, grid = _panel_grid(
num_panels=n_combinations,
hspace=hspace,
wspace=0.75 / figsize[0] + 0.02 if wspace is None else wspace,
ncols=ncols,
dpi=nhood_kwargs.get("dpi", rcParams["figure.dpi"]),
figsize=nhood_kwargs.get("dpi", figsize),
)
axs = [plt.subplot(grid[c]) for c in range(n_combinations)]
for i, (condition1, condition2) in enumerate(combinations(conditions, 2)):
if f"{condition1}_{condition2}" not in adata.uns[f"{cluster_key}_{condition_key}_diff_nhood_enrichment"]:
nhood_enrichment_values = dict(
adata.uns[f"{cluster_key}_{condition_key}_diff_nhood_enrichment"][f"{condition2}_{condition1}"]
)
nhood_enrichment_values["enrichment"] = -nhood_enrichment_values["enrichment"]
else:
nhood_enrichment_values = adata.uns[f"{cluster_key}_{condition_key}_diff_nhood_enrichment"][
f"{condition1}_{condition2}"
]
_plot_nhood_enrichment(
adata,
nhood_enrichment_values,
cluster_key,
cmap=cmap,
ax=axs[i],
title=f"{condition1} vs {condition2}",
show_cols=i >= n_combinations - ncols, # Show column labels only the last subplot of each grid column
show_rows=i % ncols == 0, # Show row labels only for the first subplot of each grid row
**nhood_kwargs,
)
if save is not None:
plt.savefig(save, bbox_inches="tight")
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/pl/__init__.py | src/cellcharter/pl/__init__.py | from ._autok import autok_stability
from ._group import enrichment, proportion
from ._nhood import diff_nhood_enrichment, nhood_enrichment
from ._shape import boundaries, shape_metrics
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/pl/_group.py | src/cellcharter/pl/_group.py | from __future__ import annotations
import warnings
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from anndata import AnnData
from matplotlib.colors import LogNorm, Normalize
from matplotlib.legend_handler import HandlerTuple
from scipy.cluster import hierarchy
from squidpy._docs import d
from squidpy.gr._utils import _assert_categorical_obs
from squidpy.pl._color_utils import Palette_t, _get_palette, _maybe_set_colors
try:
from matplotlib.colormaps import get_cmap
except ImportError:
from matplotlib.pyplot import get_cmap
from cellcharter.gr._group import _proportion
empty_handle = matplotlib.patches.Rectangle((0, 0), 1, 1, fill=False, edgecolor="none", visible=False)
@d.dedent
def proportion(
adata: AnnData,
group_key: str,
label_key: str,
groups: list | None = None,
labels: list | None = None,
rotation_xlabel: int = 45,
ncols: int = 1,
normalize: bool = True,
palette: Palette_t = None,
figsize: tuple[float, float] | None = None,
dpi: int | None = None,
save: str | Path | None = None,
**kwargs,
) -> None:
"""
Plot the proportion of `y_key` in `x_key`.
Parameters
----------
%(adata)s
group_key
Key in :attr:`anndata.AnnData.obs` where groups are stored.
label_key
Key in :attr:`anndata.AnnData.obs` where labels are stored.
groups
List of groups to plot.
labels
List of labels to plot.
rotation_xlabel
Rotation in degrees of the ticks of the x axis.
ncols
Number of columns for the legend.
normalize
If `True` use relative frequencies, outherwise use counts.
palette
Categorical colormap for the clusters.
If ``None``, use :attr:`anndata.AnnData.uns` ``['{{cluster_key}}_colors']``, if available.
%(plotting)s
kwargs
Keyword arguments for :func:`pandas.DataFrame.plot.bar`.
Returns
-------
%(plotting_returns)s
"""
_assert_categorical_obs(adata, key=group_key)
_assert_categorical_obs(adata, key=label_key)
_maybe_set_colors(source=adata, target=adata, key=label_key, palette=palette)
clusters = adata.obs[label_key].cat.categories
palette = _get_palette(adata, cluster_key=label_key, categories=clusters)
df = _proportion(obs=adata.obs, id_key=group_key, val_key=label_key, normalize=normalize)
df = df[df.columns[::-1]]
if groups is not None:
df = df.loc[groups, :]
if labels is not None:
df = df.loc[:, labels]
plt.figure(dpi=dpi)
ax = df.plot.bar(stacked=True, figsize=figsize, color=palette, rot=rotation_xlabel, ax=plt.gca(), **kwargs)
ax.grid(False)
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles[::-1], labels[::-1], loc="center left", ncol=ncols, bbox_to_anchor=(1.0, 0.5))
if save:
plt.savefig(save, bbox_extra_artists=(lgd, lgd), bbox_inches="tight")
def _select_labels(fold_change, pvalues, labels, groups):
col_name = fold_change.columns.name
idx_name = fold_change.index.name
if labels is not None:
fold_change = fold_change.loc[labels]
# The indexing removes the name of the index, so we need to set it back
fold_change.index.name = idx_name
if pvalues is not None:
pvalues = pvalues.loc[labels]
# The indexing removes the name of the index, so we need to set it back
pvalues.index.name = idx_name
if groups is not None:
fold_change = fold_change.loc[:, groups]
# The indexing removes the name of the columns, so we need to set it back
fold_change.columns.name = col_name
if pvalues is not None:
pvalues = pvalues.loc[:, groups]
# The indexing removes the name of the columns, so we need to set it back
pvalues.columns.name = col_name
return fold_change, pvalues
# Calculate the dendrogram for rows and columns clustering
def _reorder_labels(fold_change, pvalues, group_cluster, label_cluster):
if label_cluster:
order_rows = hierarchy.leaves_list(hierarchy.linkage(fold_change, method="complete"))
fold_change = fold_change.iloc[order_rows]
if pvalues is not None:
pvalues = pvalues.iloc[order_rows]
if group_cluster:
order_cols = hierarchy.leaves_list(hierarchy.linkage(fold_change.T, method="complete"))
fold_change = fold_change.iloc[:, order_cols]
if pvalues is not None:
pvalues = pvalues.iloc[:, order_cols]
return fold_change, pvalues
def _significance_colors(color, pvalues, significance):
color[pvalues <= significance] = 0.0
color[pvalues > significance] = 0.8
return color
def _pvalue_colorbar(ax, cmap_enriched, cmap_depleted, norm):
from matplotlib.colorbar import ColorbarBase
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
# Append axes to the right of ax, with 5% width of ax
cax1 = divider.append_axes("right", size="2%", pad=0.05)
cbar1 = ColorbarBase(cax1, cmap=cmap_enriched, norm=norm, orientation="vertical")
cbar1.ax.invert_yaxis()
cbar1.ax.tick_params(labelsize=10)
cbar1.set_ticks([], minor=True)
cbar1.ax.set_title("p-value", fontdict={"fontsize": 10})
if cmap_depleted is not None:
cax2 = divider.append_axes("right", size="2%", pad=0.10)
# Place colorbars next to each other and share ticks
cbar2 = ColorbarBase(cax2, cmap=cmap_depleted, norm=norm, orientation="vertical")
cbar2.ax.invert_yaxis()
cbar2.ax.tick_params(labelsize=10)
cbar2.set_ticks([], minor=True)
cbar1.set_ticks([])
def _enrichment_legend(
scatters, fold_change_melt, dot_scale, size_max, enriched_only, significant_only, significance, size_threshold
):
handles_list = []
labels_list = []
if enriched_only is False:
handles_list.extend(
[scatter.legend_elements(prop="colors", num=None)[0][0] for scatter in scatters] + [empty_handle]
)
labels_list.extend(["Enriched", "Depleted", ""])
if significance is not None:
handles_list.append(tuple([scatter.legend_elements(prop="colors", num=None)[0][0] for scatter in scatters]))
labels_list.append(f"p-value < {significance}")
if significant_only is False:
handles_list.append(tuple([scatter.legend_elements(prop="colors", num=None)[0][1] for scatter in scatters]))
labels_list.append(f"p-value >= {significance}")
handles_list.append(empty_handle)
labels_list.append("")
handles, labels = scatters[0].legend_elements(prop="sizes", num=5, func=lambda x: x / 100 / dot_scale * size_max)
if size_threshold is not None:
# Show the threshold as a label only if the threshold is lower than the maximum fold change
if enriched_only is True and fold_change_melt[fold_change_melt["value"] >= 0]["value"].max() > size_threshold:
labels[-1] = f">{size_threshold:.1f}"
elif fold_change_melt["value"].max() > size_threshold:
labels[-1] = f">{size_threshold:.1f}"
handles_list.extend([empty_handle] + handles)
labels_list.extend(["log2 FC"] + labels)
return handles_list, labels_list
@d.dedent
def enrichment(
adata: AnnData,
group_key: str,
label_key: str,
dot_scale: float = 3,
group_cluster: bool = True,
label_cluster: bool = False,
groups: list | None = None,
labels: list | None = None,
show_pvalues: bool = False,
significance: float | None = None,
enriched_only: bool = True,
significant_only: bool = False,
size_threshold: float | None = None,
palette: str | matplotlib.colors.ListedColormap | None = None,
fontsize: str | int = "small",
figsize: tuple[float, float] | None = (7, 5),
save: str | Path | None = None,
**kwargs,
):
"""
Plot a dotplot of the enrichment of `label_key` in `group_key`.
Parameters
----------
%(adata)s
group_key
Key in :attr:`anndata.AnnData.obs` where groups are stored.
label_key
Key in :attr:`anndata.AnnData.obs` where labels are stored.
dot_scale
Scale of the dots.
group_cluster
If `True`, display groups ordered according to hierarchical clustering.
label_cluster
If `True`, display labels ordered according to hierarchical clustering.
groups
The groups for which to show the enrichment.
labels
The labels for which to show the enrichment.
show_pvalues
If `True`, show p-values as colors.
significance
If not `None`, show fold changes with a p-value above this threshold in a lighter color.
enriched_only
If `True`, display only enriched values and hide depleted values.
significant_only
If `True`, display only significant values and hide non-significant values.
size_threshold
Threshold for the size of the dots. Enrichment or depletions with absolute value above this threshold will have all the same size.
palette
Colormap for the enrichment values. It must be a diverging colormap.
%(plotting)s
kwargs
Keyword arguments for :func:`matplotlib.pyplot.scatter`.
"""
if f"{group_key}_{label_key}_enrichment" not in adata.uns:
raise ValueError("Run cellcharter.gr.enrichment first.")
if size_threshold is not None and size_threshold <= 0:
raise ValueError("size_threshold must be greater than 0.")
if palette is None:
palette = sns.diverging_palette(240, 10, as_cmap=True)
elif isinstance(palette, str):
palette = get_cmap(palette)
pvalues = None
if "pvalue" not in adata.uns[f"{group_key}_{label_key}_enrichment"]:
if show_pvalues:
ValueError("show_pvalues requires gr.enrichment to be run with pvalues=True.")
if significance is not None:
ValueError("significance requires gr.enrichment to be run with pvalues=True.")
if significant_only:
ValueError("significant_only requires gr.enrichment to be run with pvalues=True.")
elif show_pvalues:
pvalues = adata.uns[f"{group_key}_{label_key}_enrichment"]["pvalue"].copy().T
else:
if significance is not None:
warnings.warn(
"Significance requires show_pvalues=True. Ignoring significance.",
UserWarning,
stacklevel=2,
)
significance = None
if significant_only is True and significance is None:
warnings.warn(
"Significant_only requires significance to be set. Ignoring significant_only.",
UserWarning,
stacklevel=2,
)
significant_only = False
# Set kwargs['alpha'] to 1 if not set
if "alpha" not in kwargs:
kwargs["alpha"] = 1
if "edgecolor" not in kwargs:
kwargs["edgecolor"] = "none"
fold_change = adata.uns[f"{group_key}_{label_key}_enrichment"]["enrichment"].copy().T
fold_change, pvalues = _select_labels(fold_change, pvalues, labels, groups)
# Set -inf values to minimum and inf values to maximum
fold_change[:] = np.nan_to_num(
fold_change,
neginf=np.min(fold_change[np.isfinite(fold_change)]),
posinf=np.max(fold_change[np.isfinite(fold_change)]),
)
fold_change, pvalues = _reorder_labels(fold_change, pvalues, group_cluster, label_cluster)
fold_change_melt = pd.melt(fold_change.reset_index(), id_vars=label_key)
# Normalize the size of dots based on the absolute values in the dataframe, scaled to your preference
sizes = fold_change_melt.copy()
sizes["value"] = np.abs(sizes["value"])
size_max = sizes["value"].max() if size_threshold is None else size_threshold
if size_threshold is not None:
sizes["value"] = sizes["value"].clip(upper=size_threshold)
sizes["value"] = sizes["value"] * 100 / sizes["value"].max() * dot_scale
norm = Normalize(0, 1)
# Set colormap to red if below 0, blue if above 0
if significance is not None:
color = _significance_colors(fold_change.copy(), pvalues, significance)
else:
if pvalues is not None:
pvalues += 0.0001
norm = LogNorm(vmin=pvalues.min().min(), vmax=pvalues.max().max())
color = pvalues.copy()
else:
color = fold_change.copy()
color[:] = 0.0
color = pd.melt(color.reset_index(), id_vars=label_key)
# Create a figure and axis for plotting
fig, ax = plt.subplots(figsize=figsize)
scatters = []
enriched_mask = fold_change_melt["value"] >= 0
significant_mask = np.ones_like(fold_change_melt["value"], dtype=bool)
if significant_only:
significant_mask = pd.melt(pvalues.reset_index(), id_vars=label_key)["value"] < significance
cmap_enriched = matplotlib.colors.LinearSegmentedColormap.from_list("", [palette(1.0), palette(0.5)])
scatter_enriched = ax.scatter(
pd.factorize(sizes[label_key])[0][enriched_mask & significant_mask],
pd.factorize(sizes[group_key])[0][enriched_mask & significant_mask],
s=sizes["value"][enriched_mask & significant_mask],
c=color["value"][enriched_mask & significant_mask],
cmap=cmap_enriched,
norm=norm,
**kwargs,
)
scatters.append(scatter_enriched)
cmap_depleted = None
if enriched_only is False:
cmap_depleted = matplotlib.colors.LinearSegmentedColormap.from_list("", [palette(0.0), palette(0.5)])
scatter_depleted = ax.scatter(
pd.factorize(sizes[label_key])[0][~enriched_mask & significant_mask],
pd.factorize(sizes[group_key])[0][~enriched_mask & significant_mask],
s=sizes["value"][~enriched_mask & significant_mask],
c=color["value"][~enriched_mask & significant_mask],
cmap=cmap_depleted,
norm=norm,
**kwargs,
)
scatters.append(scatter_depleted)
if pvalues is not None and significance is None:
_pvalue_colorbar(ax, cmap_enriched, cmap_depleted, norm)
handles_list, labels_list = _enrichment_legend(
scatters, fold_change_melt, dot_scale, size_max, enriched_only, significant_only, significance, size_threshold
)
fig.legend(
handles_list,
labels_list,
loc="outside upper left",
bbox_to_anchor=(0.98, 0.95),
handler_map={tuple: HandlerTuple(ndivide=None, pad=1)},
borderpad=1,
handletextpad=1.0,
fontsize=fontsize,
)
# Adjust the ticks to match the dataframe's indices and columns
ax.set_xticks(range(len(fold_change.index)))
ax.set_yticks(range(len(fold_change.columns)))
ax.set_xticklabels(fold_change.index, rotation=90)
ax.set_yticklabels(fold_change.columns)
ax.tick_params(axis="both", which="major", labelsize=fontsize)
# Remove grid lines
ax.grid(False)
plt.tight_layout()
if save:
plt.savefig(save, bbox_inches="tight")
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/pl/_utils.py | src/cellcharter/pl/_utils.py | from __future__ import annotations
from copy import copy
from types import MappingProxyType
from typing import Any, Mapping
import matplotlib as mpl
import numpy as np
import seaborn as sns
import squidpy as sq
from anndata import AnnData
from matplotlib import colors as mcolors
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from matplotlib.patches import PathPatch
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster import hierarchy as sch
from squidpy._constants._pkg_constants import Key
try:
from matplotlib import colormaps as cm
except ImportError:
from matplotlib import cm
def _get_cmap_norm(
adata: AnnData,
key: str,
order: tuple[list[int], list[int]] | None | None = None,
) -> tuple[mcolors.ListedColormap, mcolors.ListedColormap, mcolors.BoundaryNorm, mcolors.BoundaryNorm, int]:
n_rows = n_cols = adata.obs[key].nunique()
colors = adata.uns[Key.uns.colors(key)]
if order is not None:
row_order, col_order = order
row_colors = [colors[i] for i in row_order]
col_colors = [colors[i] for i in col_order]
n_rows = len(row_order)
n_cols = len(col_order)
else:
row_colors = col_colors = colors
row_cmap = mcolors.ListedColormap(row_colors)
col_cmap = mcolors.ListedColormap(col_colors)
row_norm = mcolors.BoundaryNorm(np.arange(n_rows + 1), row_cmap.N)
col_norm = mcolors.BoundaryNorm(np.arange(n_cols + 1), col_cmap.N)
return row_cmap, col_cmap, row_norm, col_norm, n_rows
def _heatmap(
adata: AnnData,
key: str,
rows: list[str] | None = None,
cols: list[str] | None = None,
title: str = "",
method: str | None = None,
cont_cmap: str | mcolors.Colormap = "bwr",
annotate: bool = True,
fontsize: int | None = None,
figsize: tuple[float, float] | None = None,
dpi: int | None = None,
cbar_kwargs: Mapping[str, Any] = MappingProxyType({}),
ax: Axes | None = None,
n_digits: int = 2,
show_cols: bool = True,
show_rows: bool = True,
**kwargs: Any,
) -> mpl.figure.Figure:
cbar_kwargs = dict(cbar_kwargs)
if fontsize is not None:
kwargs["annot_kws"] = {"fontdict": {"fontsize": fontsize}}
if ax is None:
fig, ax = plt.subplots(constrained_layout=True, dpi=dpi, figsize=figsize)
else:
fig = ax.figure
if method is not None:
row_order, col_order, row_link, col_link = sq.pl._utils._dendrogram(
adata.X, method, optimal_ordering=adata.n_obs <= 1500
)
else:
row_order = (
np.arange(len(adata.obs[key]))
if rows is None
else np.argwhere(adata.obs.index.isin(np.array(rows).astype(str))).flatten()
)
col_order = (
np.arange(len(adata.var_names))
if cols is None
else np.argwhere(adata.var_names.isin(np.array(cols).astype(str))).flatten()
)
row_order = row_order[::-1]
row_labels = adata.obs[key].iloc[row_order]
col_labels = adata.var_names[col_order]
data = adata[row_order, col_order].copy().X
# row_cmap, col_cmap, row_norm, col_norm, n_cls = sq.pl._utils._get_cmap_norm(adata, key, order=(row_order, len(row_order) + col_order))
row_cmap, col_cmap, row_norm, col_norm, n_cls = _get_cmap_norm(adata, key, order=(row_order, col_order))
col_norm = mcolors.BoundaryNorm(np.arange(len(col_order) + 1), col_cmap.N)
row_sm = mpl.cm.ScalarMappable(cmap=row_cmap, norm=row_norm)
col_sm = mpl.cm.ScalarMappable(cmap=col_cmap, norm=col_norm)
vmin = kwargs.pop("vmin", np.nanmin(data))
vmax = kwargs.pop("vmax", np.nanmax(data))
vcenter = kwargs.pop("vcenter", 0)
norm = mpl.colors.TwoSlopeNorm(vcenter=vcenter, vmin=vmin, vmax=vmax)
cont_cmap = copy(cm.get_cmap(cont_cmap))
cont_cmap.set_bad(color="grey")
annot = np.round(data[::-1], n_digits).astype(str) if annotate else None
if "significant" in adata.layers:
significant = adata.layers["significant"].astype(str)
annot = np.char.add(np.empty_like(data[::-1], dtype=str), significant[row_order[:, None], col_order][::-1])
ax = sns.heatmap(
data[::-1],
cmap=cont_cmap,
norm=norm,
ax=ax,
square=True,
annot=annot,
cbar=False,
fmt="",
**kwargs,
)
for _, spine in ax.spines.items():
spine.set_visible(True)
ax.tick_params(top=False, bottom=False, labeltop=False, labelbottom=False)
ax.set_xticks([])
ax.set_yticks([])
divider = make_axes_locatable(ax)
row_cats = divider.append_axes("left", size=0.1, pad=0.1)
col_cats = divider.append_axes("bottom", size=0.1, pad=0.1)
cax = divider.append_axes("right", size="2%", pad=0.1)
if method is not None: # cluster rows but don't plot dendrogram
col_ax = divider.append_axes("top", size="5%")
sch.dendrogram(col_link, no_labels=True, ax=col_ax, color_threshold=0, above_threshold_color="black")
col_ax.axis("off")
c = fig.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cont_cmap),
cax=cax,
ticks=np.linspace(norm.vmin, norm.vmax, 10),
orientation="vertical",
format="%0.2f",
**cbar_kwargs,
)
c.ax.tick_params(labelsize=fontsize)
# column labels colorbar
c = fig.colorbar(col_sm, cax=col_cats, orientation="horizontal", ticklocation="bottom")
if rows == cols or show_cols is False:
c.set_ticks([])
c.set_ticklabels([])
else:
c.set_ticks(np.arange(len(col_labels)) + 0.5)
c.set_ticklabels(col_labels, fontdict={"fontsize": fontsize})
if np.any([len(l) > 3 for l in col_labels]):
c.ax.tick_params(rotation=90)
c.outline.set_visible(False)
# row labels colorbar
c = fig.colorbar(row_sm, cax=row_cats, orientation="vertical", ticklocation="left")
if show_rows is False:
c.set_ticks([])
c.set_ticklabels([])
else:
c.set_ticks(np.arange(n_cls) + 0.5)
c.set_ticklabels(row_labels, fontdict={"fontsize": fontsize})
c.set_label(key, fontsize=fontsize)
c.outline.set_visible(False)
ax.set_title(title, fontdict={"fontsize": fontsize})
return fig, ax
def _reorder(values, order, axis=1):
if axis == 0:
values = values.iloc[order, :]
elif axis == 1:
values = values.iloc[:, order]
else:
raise ValueError("The axis parameter accepts only values 0 and 1.")
return values
def _clip(values, min_threshold=None, max_threshold=None, new_min=None, new_max=None, new_middle=None):
values_clipped = values.copy()
if new_middle is not None:
values_clipped[:] = new_middle
if min_threshold is not None:
values_clipped[values < min_threshold] = new_min if new_min is not None else min_threshold
if max_threshold is not None:
values_clipped[values > max_threshold] = new_max if new_max is not None else max_threshold
return values_clipped
def adjust_box_widths(g, fac):
"""Adjust the widths of a seaborn-generated boxplot."""
# iterating through Axes instances
for ax in g.axes:
# iterating through axes artists:
for c in ax.get_children():
# searching for PathPatches
if isinstance(c, PathPatch):
# getting current width of box:
p = c.get_path()
verts = p.vertices
verts_sub = verts[:-1]
xmin = np.min(verts_sub[:, 0])
xmax = np.max(verts_sub[:, 0])
xmid = 0.5 * (xmin + xmax)
xhalf = 0.5 * (xmax - xmin)
# setting new width of box
xmin_new = xmid - fac * xhalf
xmax_new = xmid + fac * xhalf
verts_sub[verts_sub[:, 0] == xmin, 0] = xmin_new
verts_sub[verts_sub[:, 0] == xmax, 0] = xmax_new
# setting new width of median line
for l in ax.lines:
if np.all(l.get_xdata() == [xmin, xmax]):
l.set_xdata([xmin_new, xmax_new])
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/gr/_build.py | src/cellcharter/gr/_build.py | from __future__ import annotations
import numpy as np
import pandas as pd
import scipy.sparse as sps
from anndata import AnnData
from scipy.sparse import csr_matrix
from squidpy._constants._pkg_constants import Key
from squidpy._docs import d
from squidpy.gr._utils import _assert_connectivity_key
@d.dedent
def remove_long_links(
adata: AnnData,
distance_percentile: float = 99.0,
connectivity_key: str | None = None,
distances_key: str | None = None,
neighs_key: str | None = None,
copy: bool = False,
) -> tuple[csr_matrix, csr_matrix] | None:
"""
Remove links between cells at a distance bigger than a certain percentile of all positive distances.
It is designed for data with generic coordinates.
Parameters
----------
%(adata)s
distance_percentile
Percentile of the distances between cells over which links are trimmed after the network is built.
%(conn_key)s
distances_key
Key in :attr:`anndata.AnnData.obsp` where spatial distances are stored.
Default is: :attr:`anndata.AnnData.obsp` ``['{{Key.obsp.spatial_dist()}}']``.
neighs_key
Key in :attr:`anndata.AnnData.uns` where the parameters from gr.spatial_neighbors are stored.
Default is: :attr:`anndata.AnnData.uns` ``['{{Key.uns.spatial_neighs()}}']``.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`tuple` with the new spatial connectivities and distances matrices.
Otherwise, modifies the ``adata`` with the following keys:
- :attr:`anndata.AnnData.obsp` ``['{{connectivity_key}}']`` - the new spatial connectivities.
- :attr:`anndata.AnnData.obsp` ``['{{distances_key}}']`` - the new spatial distances.
- :attr:`anndata.AnnData.uns` ``['{{neighs_key}}']`` - :class:`dict` containing parameters.
"""
connectivity_key = Key.obsp.spatial_conn(connectivity_key)
distances_key = Key.obsp.spatial_dist(distances_key)
neighs_key = Key.uns.spatial_neighs(neighs_key)
_assert_connectivity_key(adata, connectivity_key)
_assert_connectivity_key(adata, distances_key)
conns, dists = adata.obsp[connectivity_key], adata.obsp[distances_key]
if copy:
conns, dists = conns.copy(), dists.copy()
threshold = np.percentile(np.array(dists[dists != 0]).squeeze(), distance_percentile)
conns[dists > threshold] = 0
dists[dists > threshold] = 0
conns.eliminate_zeros()
dists.eliminate_zeros()
if copy:
return conns, dists
else:
adata.uns[neighs_key]["params"]["radius"] = threshold
def _remove_intra_cluster_links(labels, adjacency):
target_labels = np.array(labels.iloc[adjacency.indices])
source_labels = np.array(
labels.iloc[np.repeat(np.arange(adjacency.indptr.shape[0] - 1), np.diff(adjacency.indptr))]
)
inter_cluster_mask = (source_labels != target_labels).astype(int)
adjacency.data *= inter_cluster_mask
adjacency.eliminate_zeros()
return adjacency
@d.dedent
def remove_intra_cluster_links(
adata: AnnData,
cluster_key: str,
connectivity_key: str | None = None,
distances_key: str | None = None,
copy: bool = False,
) -> tuple[csr_matrix, csr_matrix] | None:
"""
Remove links between cells that belong to the same cluster.
Used in :func:`cellcharter.gr.nhood_enrichment` to consider only interactions between cells of different clusters.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` of the cluster labeling to consider.
%(conn_key)s
distances_key
Key in :attr:`anndata.AnnData.obsp` where spatial distances are stored.
Default is: :attr:`anndata.AnnData.obsp` ``['{{Key.obsp.spatial_dist()}}']``.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`tuple` with the new spatial connectivities and distances matrices.
Otherwise, modifies the ``adata`` with the following keys:
- :attr:`anndata.AnnData.obsp` ``['{{connectivity_key}}']`` - the new spatial connectivities.
- :attr:`anndata.AnnData.obsp` ``['{{distances_key}}']`` - the new spatial distances.
"""
connectivity_key = Key.obsp.spatial_conn(connectivity_key)
distances_key = Key.obsp.spatial_dist(distances_key)
_assert_connectivity_key(adata, connectivity_key)
_assert_connectivity_key(adata, distances_key)
conns = adata.obsp[connectivity_key].copy() if copy else adata.obsp[connectivity_key]
dists = adata.obsp[distances_key].copy() if copy else adata.obsp[distances_key]
conns, dists = (_remove_intra_cluster_links(adata.obs[cluster_key], adjacency) for adjacency in [conns, dists])
if copy:
return conns, dists
def _connected_components(adj: sps.spmatrix, min_cells: int = 250, count: int = 0) -> np.ndarray:
n_components, labels = sps.csgraph.connected_components(adj, return_labels=True)
components, counts = np.unique(labels, return_counts=True)
small_components = components[counts < min_cells]
small_components_idxs = np.in1d(labels, small_components)
labels[small_components_idxs] = -1
labels[~small_components_idxs] = pd.factorize(labels[~small_components_idxs])[0] + count
return labels, (n_components - len(small_components))
@d.dedent
def connected_components(
adata: AnnData,
cluster_key: str | None = None,
min_cells: int = 250,
connectivity_key: str | None = None,
out_key: str = "component",
copy: bool = False,
) -> None | np.ndarray:
"""
Compute the connected components of the spatial graph.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored. If :class:`None`, the connected components are computed on the whole dataset.
min_cells
Minimum number of cells for a connected component to be considered.
%(conn_key)s
out_key
Key in :attr:`anndata.AnnData.obs` where the output matrix is stored if ``copy = False``.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`numpy.ndarray` with the connected components labels.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.obs` ``['{{out_key}}']`` - - the above mentioned :class:`numpy.ndarray`.
"""
connectivity_key = Key.obsp.spatial_conn(connectivity_key)
output = pd.Series(index=adata.obs.index, dtype="object")
count = 0
if cluster_key is not None:
cluster_values = adata.obs[cluster_key].unique()
for cluster in cluster_values:
adata_cluster = adata[adata.obs[cluster_key] == cluster]
labels, n_components = _connected_components(
adj=adata_cluster.obsp[connectivity_key], min_cells=min_cells, count=count
)
output[adata.obs[cluster_key] == cluster] = labels
count += n_components
else:
labels, n_components = _connected_components(
adj=adata.obsp[connectivity_key],
min_cells=min_cells,
)
output.loc[:] = labels
output = output.astype(str).astype("category")
output[output == "-1"] = np.nan
output = output.cat.remove_unused_categories()
output = output.cat.reorder_categories(sorted(output.cat.categories, key=lambda x: int(x)))
if copy:
return output.values
adata.obs[out_key] = output
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/gr/_nhood.py | src/cellcharter/gr/_nhood.py | from __future__ import annotations
import warnings
from concurrent.futures import ProcessPoolExecutor, as_completed
from functools import partial
from itertools import combinations
import numpy as np
import pandas as pd
import scipy.sparse as sp
from anndata import AnnData
from squidpy._constants._pkg_constants import Key
from squidpy._docs import d
from squidpy.gr._utils import _assert_categorical_obs, _assert_connectivity_key
from tqdm.auto import tqdm
from cellcharter.gr._build import _remove_intra_cluster_links
def _observed_n_clusters_links(adj, labels, symmetric=True):
labels_unique = labels.cat.categories
obs = np.zeros((len(labels_unique), len(labels_unique)))
for i, l1 in enumerate(labels_unique):
total_cluster_links = adj[labels.values == l1]
for j, l2 in enumerate(labels_unique):
other_cluster_links = total_cluster_links[:, labels.values == l2]
if not symmetric:
obs[i, j] = np.sum(other_cluster_links) / np.sum(total_cluster_links)
else:
obs[i, j] = np.sum(other_cluster_links)
obs = pd.DataFrame(obs, columns=labels_unique, index=labels_unique)
return obs
def _expected_n_clusters_links(adj, labels, only_inter=False, symmetric=True):
labels_unique = labels.cat.categories
degrees = np.array([np.mean(np.sum(adj, axis=1))] * adj.shape[0])
exp = np.zeros((len(labels_unique), len(labels_unique)))
for i, c1 in enumerate(labels_unique):
for j, c2 in enumerate(labels_unique):
source_factor = np.sum(degrees[labels == c1])
target_factor = np.sum(degrees[labels == c2])
exp[i, j] = target_factor
exp[i, j] /= np.sum(degrees)
if c1 == c2:
if symmetric:
exp[i, j] /= 2
elif only_inter:
exp[i, j] = np.nan
if symmetric:
exp[i, j] *= source_factor
exp = pd.DataFrame(exp, columns=labels_unique, index=labels_unique)
return exp
def _observed_permuted(adj, labels, observed, symmetric=True, batch_size=1):
results = []
for _ in range(batch_size):
# Permute labels
curr_labels = labels.sample(frac=1).reset_index(drop=True)
permuted = _observed_n_clusters_links(adj, curr_labels, symmetric=symmetric)
counts_pos = permuted.values[observed.values > 0] < observed.values[observed.values > 0]
counts_neg = permuted.values[observed.values < 0] > observed.values[observed.values < 0]
results.append((permuted, counts_pos, counts_neg))
return results
def _nhood_enrichment(
adj,
labels,
log_fold_change: bool = False,
only_inter: bool = True,
symmetric: bool = False,
pvalues: bool = False,
n_perms: int = 1000,
n_jobs: int = 1,
batch_size: int = 10,
observed_expected=False,
):
if only_inter:
adj = _remove_intra_cluster_links(labels, adj)
cluster_categories = labels.cat.categories
observed = _observed_n_clusters_links(
adj,
labels=labels,
symmetric=symmetric,
)
if not pvalues:
expected = _expected_n_clusters_links(
adj,
labels=labels,
only_inter=only_inter,
symmetric=symmetric,
)
else:
counts = np.zeros_like(observed.values)
expected = np.zeros_like(observed.values)
if n_jobs == 1:
with tqdm(total=n_perms) as pbar:
for _ in range(n_perms):
results = _observed_permuted(adj, labels=labels, observed=observed, symmetric=symmetric)
for permuted, counts_pos, counts_neg in results:
expected += permuted
counts[observed.values > 0] += counts_pos
counts[observed.values < 0] += counts_neg
pbar.update(1)
else:
n_batches = (n_perms + batch_size - 1) // batch_size
worker = partial(
_observed_permuted, adj, labels=labels, observed=observed, symmetric=symmetric, batch_size=batch_size
)
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
with tqdm(total=n_perms) as pbar:
futures = [executor.submit(worker) for _ in range(n_batches)]
for future in as_completed(futures):
batch_results = future.result()
for permuted, counts_pos, counts_neg in batch_results:
expected += permuted
counts[observed.values > 0] += counts_pos
counts[observed.values < 0] += counts_neg
pbar.update(1)
if pbar.n >= n_perms: # Ensure we don't exceed n_perms
break
expected /= n_perms
expected = pd.DataFrame(expected, columns=cluster_categories, index=cluster_categories)
emprical_pvalues = pd.DataFrame(1 - (counts / n_perms), columns=observed.columns, index=observed.index)
enrichment = np.log2(observed / expected) if log_fold_change else observed - expected
if only_inter:
np.fill_diagonal(observed.values, np.nan)
np.fill_diagonal(expected.values, np.nan)
np.fill_diagonal(enrichment.values, np.nan)
result = {"enrichment": enrichment}
if pvalues:
result["pvalue"] = emprical_pvalues
if observed_expected:
result["observed"] = observed
result["expected"] = expected
return result
@d.dedent
def nhood_enrichment(
adata: AnnData,
cluster_key: str,
connectivity_key: str | None = None,
log_fold_change: bool = False,
only_inter: bool = True,
symmetric: bool = False,
pvalues: bool = False,
n_perms: int = 1000,
n_jobs: int = 1,
batch_size: int = 10,
observed_expected: bool = False,
copy: bool = False,
) -> dict | None:
"""
A modified version of squidpy's `neighborhood enrichment <https://squidpy.readthedocs.io/en/stable/api/squidpy.gr.nhood_enrichment.html>`_.
This function computes the neighborhood enrichment between clusters in the spatial graph.
It allows for the computation of the expected neighborhood enrichment using the analytical formula or by permutation.
The analytical version is much faster, but the version based on permutation allows to estimate p-values for each enrichment value.
Setting the symmetric parameter to `False` allows to compute the neighborhood enrichment between `cell1` and `cell2` as the ratio between the number of links between `cell1` and `cell2` and the total number of links of `cell1`.
This results in enrichment values that are not symmetric, i.e. the neighborhood enrichment between `cell1` and `cell2` is not equal to the enrichment between `cell2` and `cell1`.
Parameters
----------
%(adata)s
%(cluster_key)s
%(conn_key)s
only_inter
Consider only links between cells that belong to the different clusters.
symmetric
If `True`, the neighborhood enrichment between `cell1` and `cell2` is equal to the enrichment between `cell2` and `cell1`.
pvalues
If `True`, compute the p-values for each neighborhood enrichment value using permutation of the cluster labels.
n_perms
Number of permutations to use to compute the expected neighborhood enrichment if `pvalues` is `True`.
n_jobs
Number of jobs to run in parallel if `pvalues` is `True`.
batch_size
Number of permutations to run in each batch if `pvalues` is `True`.
%(copy)s
observed_expected
If `True`, return the observed and expected neighborhood proportions.
Returns
-------
If ``copy = True``, returns a :class:`dict` with the following keys:
- ``'enrichment'`` - the neighborhood enrichment.
- ``'pvalue'`` - the enrichment pvalues (if `pvalues` is `True`).
- ``'observed'`` - the observed neighborhood proportions (if `observed_expected is True`).
- ``'expected'`` - the expected neighborhood proportions (if `observed_expected is True`).
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['{cluster_key}_nhood_enrichment']`` - the above mentioned dict.
- :attr:`anndata.AnnData.uns` ``['{cluster_key}_nhood_enrichment']['params']`` - the parameters used.
"""
connectivity_key = Key.obsp.spatial_conn(connectivity_key)
_assert_connectivity_key(adata, connectivity_key)
_assert_categorical_obs(adata, key=cluster_key)
result = _nhood_enrichment(
adata.obsp[connectivity_key],
adata.obs[cluster_key],
log_fold_change=log_fold_change,
only_inter=only_inter,
symmetric=symmetric,
pvalues=pvalues,
n_perms=n_perms,
n_jobs=n_jobs,
batch_size=batch_size,
observed_expected=observed_expected,
)
if copy:
return result
else:
adata.uns[f"{cluster_key}_nhood_enrichment"] = result
adata.uns[f"{cluster_key}_nhood_enrichment"]["params"] = {
"connectivity_key": connectivity_key,
"log_fold_change": log_fold_change,
"only_inter": only_inter,
"symmetric": symmetric,
"pvalues": pvalues,
"n_perms": n_perms if pvalues else None,
}
def _generate_sample_permutations(samples1, samples2, n_perms):
"""Generator function to yield sample permutations one at a time."""
all_samples = np.concatenate((samples1, samples2))
n_samples1 = len(samples1)
for _ in range(n_perms):
# Generate one permutation at a time
perm = np.random.permutation(all_samples)
yield perm[:n_samples1]
def _observed_expected_diff_enrichment(enrichments, condition1, condition2):
observed = enrichments[condition1] - enrichments[condition2]
return observed.loc[enrichments[condition1].index, enrichments[condition1].columns]
def _diff_nhood_enrichment(
labels: pd.Categorical,
conditions: pd.Categorical,
condition_groups: tuple[str, str],
connectivities: sp.csr_matrix,
pvalues: bool = False,
libraries: pd.Categorical | None = None,
n_perms: int = 1000,
n_jobs: int = 1,
**nhood_kwargs,
):
enrichments = {}
for condition in condition_groups:
condition_mask = conditions == condition
if isinstance(condition_mask, pd.Series):
condition_mask = condition_mask.values
labels_condition = labels[condition_mask]
labels_condition = labels_condition.cat.set_categories(labels.cat.categories)
connectivities_condition = connectivities[condition_mask, :][:, condition_mask]
enrichments[condition] = _nhood_enrichment(connectivities_condition, labels_condition, **nhood_kwargs)[
"enrichment"
]
result = {}
condition_pairs = combinations(condition_groups, 2)
for condition1, condition2 in condition_pairs:
observed_diff_enrichment = _observed_expected_diff_enrichment(enrichments, condition1, condition2)
result_key = f"{condition1}_{condition2}"
result[result_key] = {"enrichment": observed_diff_enrichment}
if pvalues:
result[result_key]["pvalue_counts"] = np.zeros_like(observed_diff_enrichment.values)
samples1 = libraries[conditions == condition1].unique()
samples2 = libraries[conditions == condition2].unique()
sample_perm_generator = _generate_sample_permutations(samples1, samples2, n_perms)
if n_jobs == 1:
with tqdm(total=n_perms) as pbar:
for samples_condition1_permuted in sample_perm_generator:
condition_permuted = pd.Categorical(libraries.isin(samples_condition1_permuted).astype(int))
expected_diff_enrichment = _diff_nhood_enrichment(
labels, condition_permuted, [0, 1], libraries, connectivities, pvalues=False, **nhood_kwargs
)["0_1"]["enrichment"]
counts_pos = (
expected_diff_enrichment.values[observed_diff_enrichment.values > 0]
< observed_diff_enrichment.values[observed_diff_enrichment.values > 0]
)
counts_neg = (
expected_diff_enrichment.values[observed_diff_enrichment.values < 0]
> observed_diff_enrichment.values[observed_diff_enrichment.values < 0]
)
result[result_key]["pvalue_counts"][observed_diff_enrichment.values > 0] += counts_pos
result[result_key]["pvalue_counts"][observed_diff_enrichment.values < 0] += counts_neg
pbar.update(1)
else:
worker = partial(
_diff_nhood_enrichment,
labels=labels,
condition_groups=[0, 1],
libraries=libraries,
connectivities=connectivities,
pvalues=False,
**nhood_kwargs,
)
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
with tqdm(total=n_perms) as pbar:
futures = []
for _ in range(n_perms):
condition_permuted = pd.Categorical(libraries.isin(next(sample_perm_generator)).astype(int))
futures.append(executor.submit(worker, conditions=condition_permuted))
for future in as_completed(futures):
future_result = future.result()
expected_diff_enrichment = future_result["0_1"]["enrichment"]
counts_pos = (
expected_diff_enrichment.values[observed_diff_enrichment.values > 0]
< observed_diff_enrichment.values[observed_diff_enrichment.values > 0]
)
counts_neg = (
expected_diff_enrichment.values[observed_diff_enrichment.values < 0]
> observed_diff_enrichment.values[observed_diff_enrichment.values < 0]
)
result[result_key]["pvalue_counts"][observed_diff_enrichment.values > 0] += counts_pos
result[result_key]["pvalue_counts"][observed_diff_enrichment.values < 0] += counts_neg
pbar.update(1)
return result
@d.dedent
def diff_nhood_enrichment(
adata: AnnData,
cluster_key: str,
condition_key: str,
condition_groups: tuple[str, str] | None = None,
connectivity_key: str | None = None,
pvalues: bool = False,
library_key: str | None = "library_id",
n_perms: int = 1000,
n_jobs: int | None = None,
copy: bool = False,
**nhood_kwargs,
) -> dict | None:
r"""
Differential neighborhood enrichment between conditions.
Parameters
----------
%(adata)s
%(cluster_key)s
condition_key
Key in :attr:`anndata.AnnData.obs` where the sample condition is stored.
condition_groups
The condition groups to compare. If `None`, all conditions in `condition_key` will be used.
%(conn_key)s
pvalues
If `True`, compute the p-values for each differential neighborhood enrichment through permutation of the condition key for each Z-dimension.
library_key
If multiple library_id, column in anndata.AnnData.obs which stores mapping between library_id and obs. Used only if `pvalues` is `True` to permute the condition labels.
n_perms
Number of permutations to use to compute the expected neighborhood enrichment if `pvalues` is `True`.
n_jobs
Number of jobs to run in parallel if `pvalues` is `True`. `-1` means using all processors.
%(copy)s
nhood_kwargs
Keyword arguments for :func:`gr.nhood_enrichment`. The following arguments are not allowed:
- ``n_perms``
- ``pvalues``
- ``n_jobs``
Returns
-------
If ``copy = True``, returns a :class:`dict` of all pairwise differential neighborhood enrichments between conditions stored as ``{condition1}_{condition2}``.
The differential neighborhood enrichment is a :class:`dict` with the following keys:
- ``'enrichment'`` - the differential neighborhood enrichment.
- ``'pvalue'`` - the enrichment pvalues (if `pvalues` is `True`).
"""
connectivity_key = Key.obsp.spatial_conn(connectivity_key)
_assert_connectivity_key(adata, connectivity_key)
_assert_categorical_obs(adata, key=cluster_key)
_assert_categorical_obs(adata, key=condition_key)
if condition_groups is None:
condition_groups = adata.obs[condition_key].cat.categories
if "observed_expected" in nhood_kwargs:
warnings.warn(
"The `observed_expected` can be used only in `pl.nhood_enrichment`, hence it will be ignored.", stacklevel=2
)
diff_nhood = _diff_nhood_enrichment(
labels=adata.obs[cluster_key],
conditions=adata.obs[condition_key],
condition_groups=condition_groups,
connectivities=adata.obsp[connectivity_key],
pvalues=pvalues,
libraries=adata.obs[library_key] if pvalues else None,
n_perms=n_perms,
n_jobs=n_jobs,
**nhood_kwargs,
)
result = {}
for condition_pair_key in diff_nhood.keys():
result[condition_pair_key] = {}
result[condition_pair_key]["enrichment"] = diff_nhood[condition_pair_key]["enrichment"]
if pvalues:
result[condition_pair_key]["pvalue"] = pd.DataFrame(
1 - (diff_nhood[condition_pair_key]["pvalue_counts"] / n_perms),
columns=diff_nhood[condition_pair_key]["enrichment"].columns,
index=diff_nhood[condition_pair_key]["enrichment"].index,
)
if copy:
return result
else:
adata.uns[f"{cluster_key}_{condition_key}_diff_nhood_enrichment"] = result
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/gr/__init__.py | src/cellcharter/gr/__init__.py | from ._aggr import aggregate_neighbors
from ._build import connected_components, remove_intra_cluster_links, remove_long_links
from ._group import enrichment
from ._nhood import diff_nhood_enrichment, nhood_enrichment
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/gr/_group.py | src/cellcharter/gr/_group.py | from __future__ import annotations
import numpy as np
import pandas as pd
from anndata import AnnData
from squidpy._docs import d
from tqdm import tqdm
def _proportion(obs, id_key, val_key, normalize=True):
df = pd.pivot(obs[[id_key, val_key]].value_counts().reset_index(), index=id_key, columns=val_key)
df[df.isna()] = 0
df.columns = df.columns.droplevel(0)
if normalize:
return df.div(df.sum(axis=1), axis=0)
else:
return df
def _observed_permuted(annotations, group_key, label_key):
annotations[group_key] = annotations[group_key].sample(frac=1).reset_index(drop=True).values
return _proportion(annotations, id_key=label_key, val_key=group_key).reindex().T
def _enrichment(observed, expected, log=True):
enrichment = observed.div(expected, axis="index", level=0)
if log:
with np.errstate(divide="ignore"):
enrichment = np.log2(enrichment)
enrichment = enrichment.fillna(enrichment.min())
return enrichment
def _empirical_pvalues(observed, expected):
pvalues = np.zeros(observed.shape)
pvalues[observed.values > 0] = (
1 - np.sum(expected[:, observed.values > 0] < observed.values[observed.values > 0], axis=0) / expected.shape[0]
)
pvalues[observed.values < 0] = (
1 - np.sum(expected[:, observed.values < 0] > observed.values[observed.values < 0], axis=0) / expected.shape[0]
)
return pd.DataFrame(pvalues, columns=observed.columns, index=observed.index)
@d.dedent
def enrichment(
adata: AnnData,
group_key: str,
label_key: str,
pvalues: bool = False,
n_perms: int = 1000,
log: bool = True,
observed_expected: bool = False,
copy: bool = False,
) -> pd.DataFrame | tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame] | None:
"""
Compute the enrichment of `label_key` in `group_key`.
Parameters
----------
%(adata)s
group_key
Key in :attr:`anndata.AnnData.obs` where groups are stored.
label_key
Key in :attr:`anndata.AnnData.obs` where labels are stored.
pvalues
If `True`, compute empirical p-values by permutation. It will result in a slower computation.
n_perms
Number of permutations to compute empirical p-values.
log
If `True` use log2 fold change, otherwise use fold change.
observed_expected
If `True`, return also the observed and expected proportions.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the following keys:
- ``'enrichment'`` - the enrichment values.
- ``'pvalue'`` - the enrichment pvalues (if `pvalues is True`).
- ``'observed'`` - the observed proportions (if `observed_expected is True`).
- ``'expected'`` - the expected proportions (if `observed_expected is True`).
Otherwise, modifies the ``adata`` with the following keys:
- :attr:`anndata.AnnData.uns` ``['{group_key}_{label_key}_nhood_enrichment']`` - the above mentioned dict.
- :attr:`anndata.AnnData.uns` ``['{group_key}_{label_key}_nhood_enrichment']['params']`` - the parameters used.
"""
observed = _proportion(adata.obs, id_key=label_key, val_key=group_key).reindex().T
observed[observed.isna()] = 0
if not pvalues:
expected = adata.obs[group_key].value_counts() / adata.shape[0]
# Repeat over the number of labels
expected = pd.concat([expected] * len(observed.columns), axis=1, keys=observed.columns)
else:
annotations = adata.obs.copy()
expected = [_observed_permuted(annotations, group_key, label_key) for _ in tqdm(range(n_perms))]
expected = np.stack(expected, axis=0)
empirical_pvalues = _empirical_pvalues(observed, expected)
expected = np.mean(expected, axis=0)
expected = pd.DataFrame(expected, columns=observed.columns, index=observed.index)
enrichment = _enrichment(observed, expected, log=log)
result = {"enrichment": enrichment}
if observed_expected:
result["observed"] = observed
result["expected"] = expected
if pvalues:
result["pvalue"] = empirical_pvalues
if copy:
return result
else:
adata.uns[f"{group_key}_{label_key}_enrichment"] = result
adata.uns[f"{group_key}_{label_key}_enrichment"]["params"] = {"log": log}
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/gr/_aggr.py | src/cellcharter/gr/_aggr.py | from __future__ import annotations
import warnings
from typing import Optional, Union
import numpy as np
import scipy.sparse as sps
from anndata import AnnData
from scipy.sparse import spdiags
from squidpy._constants._pkg_constants import Key as sqKey
from squidpy._docs import d
from tqdm.auto import tqdm
from cellcharter._constants._pkg_constants import Key
from cellcharter._utils import str2list
def _aggregate_mean(adj, x):
return adj @ x
def _aggregate_var(adj, x):
mean = adj @ x
mean_squared = adj @ (x * x)
return mean_squared - mean * mean
def _aggregate(adj, x, method):
if method == "mean":
return _aggregate_mean(adj, x)
elif method == "var":
return _aggregate_var(adj, x)
else:
raise NotImplementedError
def _mul_broadcast(mat1, mat2):
return spdiags(mat2, 0, len(mat2), len(mat2)) * mat1
def _hop(adj_hop, adj, adj_visited=None):
adj_hop = adj_hop @ adj
if adj_visited is not None:
adj_hop = adj_hop > adj_visited # Logical not for sparse matrices
adj_visited = adj_visited + adj_hop
return adj_hop, adj_visited
def _normalize(adj):
deg = np.array(np.sum(adj, axis=1)).squeeze()
with warnings.catch_warnings():
# If a cell doesn't have neighbors deg = 0 -> divide by zero
warnings.filterwarnings(action="ignore", category=RuntimeWarning)
deg_inv = 1 / deg
deg_inv[deg_inv == float("inf")] = 0
return _mul_broadcast(adj, deg_inv)
def _setdiag(array, value):
if isinstance(array, sps.csr_matrix):
array = array.tolil()
array.setdiag(value)
array = array.tocsr()
if value == 0:
array.eliminate_zeros()
return array
def _aggregate_neighbors(
adj: sps.spmatrix,
X: np.ndarray,
nhood_layers: list,
aggregations: Optional[Union[str, list]] = "mean",
disable_tqdm: bool = True,
) -> np.ndarray:
adj = adj.astype(bool)
adj = _setdiag(adj, 0)
adj_hop = adj.copy()
adj_visited = _setdiag(adj.copy(), 1)
Xs = []
for i in tqdm(range(0, max(nhood_layers) + 1), disable=disable_tqdm):
if i in nhood_layers:
if i == 0:
Xs.append(X)
else:
if i > 1:
adj_hop, adj_visited = _hop(adj_hop, adj, adj_visited)
adj_hop_norm = _normalize(adj_hop)
for agg in aggregations:
x = _aggregate(adj_hop_norm, X, agg)
Xs.append(x)
if sps.issparse(X):
return sps.hstack(Xs)
else:
return np.hstack(Xs)
@d.dedent
def aggregate_neighbors(
adata: AnnData,
n_layers: Union[int, list],
aggregations: Optional[Union[str, list]] = "mean",
connectivity_key: Optional[str] = None,
use_rep: Optional[str] = None,
sample_key: Optional[str] = None,
out_key: Optional[str] = "X_cellcharter",
copy: bool = False,
) -> np.ndarray | None:
"""
Aggregate the features from each neighborhood layers and concatenate them, and optionally with the cells' features, into a single vector.
Parameters
----------
%(adata)s
n_layers
Which neighborhood layers to aggregate from.
If :class:`int`, the output vector includes the cells' features and the aggregated features of the neighbors until the layer at distance ``n_layers``, i.e. cells | 1-hop neighbors | ... | ``n_layers``-hop.
If :class:`list`, every element corresponds to the distances at which the neighbors' features will be aggregated and concatenated. For example, [0, 1, 3] corresponds to cells|1-hop neighbors|3-hop neighbors.
aggregations
Which functions to use to aggregate the neighbors features. Default: ```mean``.
connectivity_key
Key in :attr:`anndata.AnnData.obsp` where spatial connectivities are stored.
use_rep
Key of the features. If :class:`None`, adata.X is used. Else, the key is used to access the field in the AnnData .obsm mapping.
sample_key
Key in :attr:`anndata.AnnData.obs` where the sample labels are stored. Must be different from :class:`None` if adata contains multiple samples.
out_key
Key in :attr:`anndata.AnnData.obsm` where the output matrix is stored if ``copy = False``.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`numpy.ndarray` of the features aggregated and concatenated.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.obsm` ``['{{out_key}}']`` - the above mentioned :class:`numpy.ndarray`.
"""
connectivity_key = sqKey.obsp.spatial_conn(connectivity_key)
sample_key = Key.obs.sample if sample_key is None else sample_key
aggregations = str2list(aggregations)
X = adata.X if use_rep is None else adata.obsm[use_rep]
if isinstance(n_layers, int):
n_layers = list(range(n_layers + 1))
if sps.issparse(X):
X_aggregated = sps.dok_matrix(
(X.shape[0], X.shape[1] * ((len(n_layers) - 1) * len(aggregations) + 1)), dtype=np.float32
)
else:
X_aggregated = np.empty(
(X.shape[0], X.shape[1] * ((len(n_layers) - 1) * len(aggregations) + 1)), dtype=np.float32
)
if sample_key in adata.obs:
samples = adata.obs[sample_key].unique()
sample_idxs = [adata.obs[sample_key] == sample for sample in samples]
else:
sample_idxs = [np.arange(adata.shape[0])]
for idxs in tqdm(sample_idxs, disable=(len(sample_idxs) == 1)):
X_sample_aggregated = _aggregate_neighbors(
adj=adata[idxs].obsp[connectivity_key],
X=X[idxs],
nhood_layers=n_layers,
aggregations=aggregations,
disable_tqdm=(len(sample_idxs) != 1),
)
X_aggregated[idxs] = X_sample_aggregated
if isinstance(X_aggregated, sps.dok_matrix):
X_aggregated = X_aggregated.tocsr()
if copy:
return X_aggregated
adata.obsm[out_key] = X_aggregated
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/gr/_utils.py | src/cellcharter/gr/_utils.py | """Graph utilities."""
from __future__ import annotations
from anndata import AnnData
def _assert_distances_key(adata: AnnData, key: str) -> None:
if key not in adata.obsp:
key_added = key.replace("_distances", "")
raise KeyError(
f"Spatial distances key `{key}` not found in `adata.obsp`. "
f"Please run `squidpy.gr.spatial_neighbors(..., key_added={key_added!r})` first."
)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/tl/_shape.py | src/cellcharter/tl/_shape.py | from __future__ import annotations
import warnings
from collections import deque
from concurrent.futures import ProcessPoolExecutor, as_completed
import h5py
import networkx as nx
import numpy as np
import pandas as pd
import shapely
import sknw
from anndata import AnnData
from anndata._io.specs.registry import _REGISTRY, IOSpec
from h5py import Dataset, Group
from matplotlib.path import Path
from rasterio import features
from scipy.spatial import Delaunay
from shapely import geometry, wkb
from shapely.geometry import Polygon
from shapely.ops import polygonize, unary_union
from skimage.morphology import skeletonize
from squidpy._docs import d
# 1. Define a custom encoding spec
polygon_spec = IOSpec(encoding_type="polygon", encoding_version="1.0.0")
# 2. Writer: Polygon → WKB → uint8 vlen array (object-path)
@_REGISTRY.register_write(Group, Polygon, polygon_spec)
def _write_polygon(group: Group, key: str, poly: Polygon, *, _writer, dataset_kwargs):
# 2.1 Serialize to WKB bytes
raw: bytes = wkb.dumps(poly)
# 2.2 View as a 1D uint8 array
arr: np.ndarray = np.frombuffer(raw, dtype=np.uint8)
# 2.3 Create a vlen dtype over uint8
dt = h5py.special_dtype(vlen=np.dtype("uint8"))
# 2.4 Create an empty length-1 dataset with that dtype
dset = group.create_dataset(key, shape=(1,), dtype=dt)
# 2.5 Assign element-wise to invoke object-path conversion
dset[0] = arr
# 3. Reader: uint8 vlen array → bytes → Polygon
@_REGISTRY.register_read(Dataset, polygon_spec)
def _read_polygon(dataset: Dataset, *, _reader) -> Polygon:
# 3.1 dataset[0] returns the inner uint8 array
arr: np.ndarray = dataset[0]
# 3.2 Recover raw WKB and load
return wkb.loads(arr.tobytes())
def _alpha_shape(coords, alpha):
"""
Compute the alpha shape (concave hull) of a set of points.
Adapted from `here <https://web.archive.org/web/20200726174718/http://blog.thehumangeo.com/2014/05/12/drawing-boundaries-in-python/>`_.
Parameters
----------
coords : np.array
Array of coordinates of points.
alpha : float
Alpha value to influence the gooeyness of the border. Smaller numbers
don't fall inward as much as larger numbers. Too large, and you lose
everything!
Returns
-------
concave_hull : shapely.geometry.Polygon
Concave hull of the points.
"""
tri = Delaunay(coords)
triangles = coords[tri.simplices]
a = ((triangles[:, 0, 0] - triangles[:, 1, 0]) ** 2 + (triangles[:, 0, 1] - triangles[:, 1, 1]) ** 2) ** 0.5
b = ((triangles[:, 1, 0] - triangles[:, 2, 0]) ** 2 + (triangles[:, 1, 1] - triangles[:, 2, 1]) ** 2) ** 0.5
c = ((triangles[:, 2, 0] - triangles[:, 0, 0]) ** 2 + (triangles[:, 2, 1] - triangles[:, 0, 1]) ** 2) ** 0.5
s = (a + b + c) / 2.0
areas = (s * (s - a) * (s - b) * (s - c)) ** 0.5
circums = a * b * c / (4.0 * areas)
filtered = triangles[circums < alpha]
edge1 = filtered[:, (0, 1)]
edge2 = filtered[:, (1, 2)]
edge3 = filtered[:, (2, 0)]
edge_points = np.unique(np.concatenate((edge1, edge2, edge3)), axis=0) # .tolist()
m = geometry.MultiLineString(edge_points.tolist())
triangles = list(polygonize(m.geoms))
return unary_union(triangles), triangles, edge_points
def _process_component(points, component, hole_area_ratio=0.1, alpha_start=2000):
alpha = alpha_start
polygon, triangles, edge_points = _alpha_shape(points, alpha)
while (
type(polygon) is not geometry.polygon.Polygon
or type(polygon) is geometry.MultiPolygon
or edge_points.shape[0] < 10
):
alpha *= 2
polygon, triangles, edge_points = _alpha_shape(points, alpha)
boundary_with_holes = max(triangles, key=lambda triangle: triangle.area)
boundary = polygon
for interior in boundary_with_holes.interiors:
interior_polygon = geometry.Polygon(interior)
hole_to_boundary_ratio = interior_polygon.area / boundary.area
if hole_to_boundary_ratio > hole_area_ratio:
try:
difference = boundary.difference(interior_polygon)
if isinstance(difference, geometry.Polygon):
boundary = difference
except Exception: # noqa: B902
pass
return component, boundary
@d.dedent
def boundaries(
adata: AnnData,
cluster_key: str = "component",
min_hole_area_ratio: float = 0.1,
alpha_start: int = 2000,
copy: bool = False,
) -> None | dict[int, geometry.Polygon]:
"""
Compute the topological boundaries of sets of cells.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored.
min_hole_area_ratio
Minimum ratio between the area of a hole and the area of the boundary.
alpha_start
Starting value for the alpha parameter of the alpha shape algorithm.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the boundaries as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['boundaries']`` - the above mentioned :class:`dict`.
"""
assert 0 <= min_hole_area_ratio <= 1, "min_hole_area_ratio must be between 0 and 1"
assert alpha_start > 0, "alpha_start must be greater than 0"
clusters = [cluster for cluster in adata.obs[cluster_key].unique() if cluster != "-1" and not pd.isnull(cluster)]
boundaries = {}
with ProcessPoolExecutor() as executor:
futures = {
executor.submit(
_process_component,
adata.obsm["spatial"][adata.obs[cluster_key] == cluster, :2],
cluster,
min_hole_area_ratio,
alpha_start,
): cluster
for cluster in clusters
}
for future in as_completed(futures):
component, boundary = future.result()
boundaries[component] = boundary
if copy:
return boundaries
adata.uns[f"shape_{cluster_key}"] = {"boundary": boundaries}
def _find_dangling_branches(graph, total_length, min_ratio=0.05):
total_length = np.sum(list(nx.get_edge_attributes(graph, "weight").values()))
adj = nx.to_numpy_array(graph, weight=None)
adj_w = nx.to_numpy_array(graph)
n_neighbors = np.sum(adj, axis=1)
node_total_dist = np.sum(adj_w, axis=1)
dangling_nodes = np.argwhere((node_total_dist < min_ratio * total_length) & (n_neighbors == 1))
if dangling_nodes.shape[0] != 1:
dangling_nodes = dangling_nodes.squeeze()
else:
dangling_nodes = dangling_nodes[0]
return dangling_nodes
def _remove_dangling_branches(graph, min_ratio=0.05):
total_length = np.sum(list(nx.get_edge_attributes(graph, "weight").values()))
dangling_branches = _find_dangling_branches(graph, total_length=total_length, min_ratio=min_ratio)
while len(dangling_branches) > 0:
idx2node = dict(enumerate(graph.nodes))
for i in dangling_branches:
graph.remove_node(idx2node[i])
dangling_branches = _find_dangling_branches(graph, total_length=total_length, min_ratio=min_ratio)
def _longest_path_from_node(graph, u):
visited = dict.fromkeys(graph.nodes)
distance = {i: -1 for i in list(graph.nodes)}
idx2node = dict(enumerate(graph.nodes))
try:
adj_lil = nx.to_scipy_sparse_matrix(graph, format="lil")
except AttributeError:
adj_lil = nx.to_scipy_sparse_array(graph, format="lil")
adj = {i: [idx2node[neigh] for neigh in neighs] for i, neighs in zip(graph.nodes, adj_lil.rows)}
weight = nx.get_edge_attributes(graph, "weight")
distance[u] = 0
queue = deque()
queue.append(u)
visited[u] = True
while queue:
front = queue.popleft()
for i in adj[front]:
if not visited[i]:
visited[i] = True
source, target = min(i, front), max(i, front)
distance[i] = distance[front] + weight[(source, target)]
queue.append(i)
farthest_node = max(distance, key=distance.get)
longest_path_length = distance[farthest_node]
return farthest_node, longest_path_length
def _longest_path_length(graph):
# first DFS to find one end point of longest path
node, _ = _longest_path_from_node(graph, list(graph.nodes)[0])
# second DFS to find the actual longest path
_, longest_path_length = _longest_path_from_node(graph, node)
return longest_path_length
def _linearity(boundary, height=1000, min_ratio=0.05):
img, _ = _rasterize(boundary, height=height)
skeleton = skeletonize(img).astype(int)
graph = sknw.build_sknw(skeleton.astype(np.uint16))
graph = graph.to_undirected()
_remove_dangling_branches(graph, min_ratio=min_ratio)
cycles = nx.cycle_basis(graph)
cycles_len = [nx.path_weight(graph, cycle + [cycle[0]], "weight") for cycle in cycles]
longest_path_length = _longest_path_length(graph)
longest_length = np.max(cycles_len + [longest_path_length])
return longest_length / np.sum(list(nx.get_edge_attributes(graph, "weight").values()))
def _rasterize(boundary, height=1000):
minx, miny, maxx, maxy = boundary.bounds
poly = shapely.affinity.translate(boundary, -minx, -miny)
if maxx - minx > maxy - miny:
scale_factor = height / poly.bounds[2]
else:
scale_factor = height / poly.bounds[3]
poly = shapely.affinity.scale(poly, scale_factor, scale_factor, origin=(0, 0, 0))
return features.rasterize([poly], out_shape=(height, int(height * (maxx - minx) / (maxy - miny)))), scale_factor
def linearity(
adata: AnnData,
cluster_key: str = "component",
out_key: str = "linearity",
height: int = 1000,
min_ratio: float = 0.05,
copy: bool = False,
) -> None | dict[int, float]:
"""
Compute the linearity of the topological boundaries of sets of cells.
This function is deprecated. Please use `linearity_metric` instead.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored.
out_key
Key in :attr:`anndata.AnnData.obs` where the metric values are stored if ``copy = False``.
height
Height of the rasterized image.
min_ratio
Minimum ratio between the length of a branch and the total length of the skeleton.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the linearity as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['{{out_key}}']`` - the above mentioned :class:`dict`.
"""
warnings.warn(
"linearity is deprecated and will be removed in the next release. " "Please use `linearity_metric` instead.",
FutureWarning,
stacklevel=2,
)
return linearity_metric(
adata=adata,
cluster_key=cluster_key,
out_key=out_key,
height=height,
min_ratio=min_ratio,
copy=copy,
)
@d.dedent
def linearity_metric(
adata: AnnData,
cluster_key: str = "component",
out_key: str = "linearity",
height: int = 1000,
min_ratio: float = 0.05,
copy: bool = False,
) -> None | dict[int, float]:
"""
Compute the linearity of the topological boundaries of sets of cells.
It rasterizes the polygon and computes the skeleton of the rasterized image.
Then, it computes the longest path in the skeleton and divides it by the total length of the skeleton.
Branches that are shorter than ``min_ratio`` times the total length of the skeleton are removed because are not considered real branches.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored.
out_key
Key in :attr:`anndata.AnnData.obs` where the metric values are stored if ``copy = False``.
height
Height of the rasterized image. The width is computed automatically to preserve the aspect ratio of the polygon. Higher values lead to more precise results but also higher memory usage.
min_ratio
Minimum ratio between the length of a branch and the total length of the skeleton to be considered a real branch and not be removed.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the linearity as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['{{out_key}}']`` - - the above mentioned :class:`dict`.
"""
boundaries = adata.uns[f"shape_{cluster_key}"]["boundary"]
linearity_score = {}
for cluster, boundary in boundaries.items():
linearity_score[cluster] = _linearity(boundary, height=height, min_ratio=min_ratio)
if copy:
return linearity_score
adata.uns[f"shape_{cluster_key}"][out_key] = linearity_score
def _elongation(boundary):
# get the minimum bounding rectangle and zip coordinates into a list of point-tuples
mbr_points = list(zip(*boundary.minimum_rotated_rectangle.exterior.coords.xy))
# calculate the length of each side of the minimum bounding rectangle
mbr_lengths = [geometry.LineString((mbr_points[i], mbr_points[i + 1])).length for i in range(len(mbr_points) - 1)]
# get major/minor axis measurements
minor_axis = min(mbr_lengths)
major_axis = max(mbr_lengths)
return 1 - minor_axis / major_axis
def elongation(
adata: AnnData,
cluster_key: str = "component",
out_key: str = "elongation",
copy: bool = False,
) -> None | dict[int, float]:
"""
Compute the elongation of the topological boundaries of sets of cells.
This function is deprecated. Please use `elongation_metric` instead.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored.
out_key
Key in :attr:`anndata.AnnData.obs` where the metric values are stored if ``copy = False``.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the elongation as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['{{out_key}}']`` - the above mentioned :class:`dict`.
"""
warnings.warn(
"elongation is deprecated and will be removed in the next release. " "Please use `elongation_metric` instead.",
FutureWarning,
stacklevel=2,
)
return elongation_metric(
adata=adata,
cluster_key=cluster_key,
out_key=out_key,
copy=copy,
)
@d.dedent
def elongation_metric(
adata: AnnData,
cluster_key: str = "component",
out_key: str = "elongation",
copy: bool = False,
) -> None | dict[int, float]:
"""
Compute the elongation of the topological boundaries of sets of cells.
It computes the minimum bounding rectangle of the polygon and divides the length of the minor axis by the length of the major axis.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored.
out_key
Key in :attr:`anndata.AnnData.obs` where the metric values are stored if ``copy = False``.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the elongation as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['{{out_key}}']`` - - the above mentioned :class:`dict`.
"""
boundaries = adata.uns[f"shape_{cluster_key}"]["boundary"]
elongation_score = {}
for cluster, boundary in boundaries.items():
elongation_score[cluster] = _elongation(boundary)
if copy:
return elongation_score
adata.uns[f"shape_{cluster_key}"][out_key] = elongation_score
def _axes(boundary):
# get the minimum bounding rectangle and zip coordinates into a list of point-tuples
mbr_points = list(zip(*boundary.minimum_rotated_rectangle.exterior.coords.xy))
# calculate the length of each side of the minimum bounding rectangle
mbr_lengths = [geometry.LineString((mbr_points[i], mbr_points[i + 1])).length for i in range(len(mbr_points) - 1)]
return min(mbr_lengths), max(mbr_lengths)
def _curl(boundary):
factor = boundary.length**2 - 16 * boundary.area
if factor < 0:
factor = 0
fibre_length = boundary.area / ((boundary.length - np.sqrt(factor)) / 4)
_, length = _axes(boundary)
if fibre_length < length:
return 0
else:
return 1 - length / fibre_length
def curl(
adata: AnnData,
cluster_key: str = "component",
out_key: str = "curl",
copy: bool = False,
) -> None | dict[int, float]:
"""
Compute the curl score of the topological boundaries of sets of cells.
This function is deprecated. Please use `curl_metric` instead.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored.
out_key
Key in :attr:`anndata.AnnData.obs` where the metric values are stored if ``copy = False``.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the curl score as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['{{out_key}}']`` - the above mentioned :class:`dict`.
"""
warnings.warn(
"curl is deprecated and will be removed in the next release. " "Please use `curl_metric` instead.",
FutureWarning,
stacklevel=2,
)
return curl_metric(
adata=adata,
cluster_key=cluster_key,
out_key=out_key,
copy=copy,
)
@d.dedent
def curl_metric(
adata: AnnData,
cluster_key: str = "component",
out_key: str = "curl",
copy: bool = False,
) -> None | dict[int, float]:
"""
Compute the curl score of the topological boundaries of sets of cells.
It computes the curl score of each cluster as one minues the ratio between the length of the major axis of the minimum bounding rectangle and the fiber length of the polygon.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the curl score as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['{{out_key}}']`` - - the above mentioned :class:`dict`.
"""
boundaries = adata.uns[f"shape_{cluster_key}"]["boundary"]
curl_score = {}
for cluster, boundary in boundaries.items():
curl_score[cluster] = _curl(boundary)
if copy:
return curl_score
adata.uns[f"shape_{cluster_key}"][out_key] = curl_score
def purity(
adata: AnnData,
cluster_key: str = "component",
library_key: str = "sample",
out_key: str = "purity",
exterior: bool = False,
copy: bool = False,
) -> None | dict[int, float]:
"""
Compute the purity of the topological boundaries of sets of cells.
This function is deprecated. Please use `purity_metric` instead.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored.
library_key
Key in :attr:`anndata.AnnData.obs` where the sample labels are stored.
out_key
Key in :attr:`anndata.AnnData.obs` where the metric values are stored if ``copy = False``.
exterior
If ``True``, the computation of the purity ignores the polygon's internal holes.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the purity as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['{{out_key}}']`` - the above mentioned :class:`dict`.
"""
warnings.warn(
"purity is deprecated and will be removed in the next release. " "Please use `purity_metric` instead.",
FutureWarning,
stacklevel=2,
)
return purity_metric(
adata=adata,
cluster_key=cluster_key,
library_key=library_key,
out_key=out_key,
exterior=exterior,
copy=copy,
)
@d.dedent
def purity_metric(
adata: AnnData,
cluster_key: str = "component",
library_key: str = "sample",
out_key: str = "purity",
exterior: bool = False,
copy: bool = False,
) -> None | dict[int, float]:
"""
Compute the purity of the topological boundaries of sets of cells.
It computes the purity of each cluster as the ratio between the number of cells of the cluster that are within the boundary and the total number of cells within the boundary.
Parameters
----------
%(adata)s
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels are stored.
library_key
Key in :attr:`anndata.AnnData.obs` where the sample labels are stored.
out_key
Key in :attr:`anndata.AnnData.obs` where the metric values are stored if ``copy = False``.
exterior
If ``True``, the computation of the purity ignores the polygon's internal holes.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the purity as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['{{out_key}}']`` - - the above mentioned :class:`dict`.
"""
boundaries = adata.uns[f"shape_{cluster_key}"]["boundary"]
purity_score = {}
for cluster, boundary in boundaries.items():
sample = adata[adata.obs[cluster_key] == cluster].obs[library_key][0]
adata_sample = adata[adata.obs[library_key] == sample]
points = adata_sample.obsm["spatial"][:, :2]
within_mask = np.zeros(points.shape[0], dtype=bool)
if type(boundary) is geometry.multipolygon.MultiPolygon:
for p in boundary.geoms:
path = Path(np.array(p.exterior.coords.xy).T)
within_mask |= np.array(path.contains_points(points))
else:
path = Path(np.array(boundary.exterior.coords.xy).T)
within_mask |= np.array(path.contains_points(points))
if not exterior:
for interior in boundary.interiors:
path = Path(np.array(interior.coords.xy).T)
within_mask &= ~np.array(path.contains_points(points))
purity_score[cluster] = np.sum(adata_sample.obs[cluster_key][within_mask] == cluster) / np.sum(within_mask)
if copy:
return purity_score
adata.uns[f"shape_{cluster_key}"][out_key] = purity_score
@d.dedent
def relative_component_size_metric(
adata: AnnData,
neighborhood_key: str,
cluster_key: str = "component",
library_key: str | None = None,
out_key: str = "rcs",
copy: bool = False,
) -> None | dict[int, float]:
"""
The Relative Component Size (RCS) metric compares a component's cell count to the average component size in its cellular neighborhood, indicating whether it is larger or smaller than expected given the neighborhood's total cells and component count.
Parameters
----------
%(adata)s
neighborhood_key
Key in :attr:`anndata.AnnData.obs` where the neighborhood labels are stored.
cluster_key
Key in :attr:`anndata.AnnData.obs` where the cluster labels from cc.gr.connected_components are stored.
library_key
Key in :attr:`anndata.AnnData.obs` where the sample or condition labels are stored. If None, the average is computed across all samples.
out_key
Key in :attr:`anndata.AnnData.obs` where the metric values are stored if ``copy = False``.
%(copy)s
Returns
-------
If ``copy = True``, returns a :class:`dict` with the cluster labels as keys and the RCS as values.
Otherwise, modifies the ``adata`` with the following key:
- :attr:`anndata.AnnData.uns` ``['shape_{{cluster_key}}']['{{out_key}}']`` - - the above mentioned :class:`dict`.
"""
count = adata.obs[cluster_key].value_counts().to_dict()
df = pd.DataFrame(count.items(), columns=[cluster_key, "count"])
df = pd.merge(df, adata.obs[[cluster_key, neighborhood_key]].drop_duplicates().dropna(), on=cluster_key)
if library_key is not None:
df = pd.merge(df, adata.obs[[cluster_key, library_key]].drop_duplicates().dropna(), on=cluster_key)
group_by = [library_key, neighborhood_key]
else:
group_by = [neighborhood_key]
nbh_counts = adata.obs.groupby(group_by, observed=False).size().reset_index(name="total_neighborhood_cells_image")
df = df.merge(nbh_counts, on=group_by, how="left")
unique_counts = (
adata.obs.groupby(group_by, observed=False)[cluster_key]
.nunique()
.reset_index()
.rename(columns={cluster_key: "unique_components_neighborhood_image"})
)
df = df.merge(unique_counts, on=group_by, how="left")
df["rcs"] = df["count"] / (df["total_neighborhood_cells_image"] / df["unique_components_neighborhood_image"])
if copy:
return df.set_index(cluster_key)["rcs"].to_dict()
adata.uns[f"shape_{cluster_key}"][out_key] = df.set_index(cluster_key)["rcs"].to_dict()
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/tl/_autok.py | src/cellcharter/tl/_autok.py | from __future__ import annotations
import concurrent.futures
import inspect
import json
import logging
import os
import pickle
import warnings
from collections import defaultdict
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List
import anndata as ad
import numpy as np
import pandas as pd
from scipy.signal import find_peaks
from sklearn.metrics import fowlkes_mallows_score, mean_absolute_percentage_error
from torchgmm.base.utils.path import PathType
from tqdm.auto import tqdm
import cellcharter as cc
logger = logging.getLogger(__name__)
class ClusterAutoK:
"""
Identify the best candidates for the number of clusters.
Parameters
----------
n_clusters
Range for number of clusters (bounds included).
max_runs
Maximum number of repetitions for each value of number of clusters.
convergence_tol
Convergence tolerance for the clustering stability. If the Mean Absolute Percentage Error between consecutive iterations is below `convergence_tol` the algorithm stops without reaching `max_runs`.
model_class
Class of the model to be used for clustering. It must accept as `random_state` and `n_clusters` as initialization parameters.
model_params
Keyword args for `model_class`
similarity_function
The similarity function used between clustering results. Defaults to :func:`sklearn.metrics.fowlkes_mallows_score`.
Examples
--------
>>> adata = anndata.read_h5ad(path_to_anndata)
>>> sq.gr.spatial_neighbors(adata, coord_type='generic', delaunay=True)
>>> cc.gr.remove_long_links(adata)
>>> cc.gr.aggregate_neighbors(adata, n_layers=3)
>>> model_params = {
'random_state': 42,
'trainer_params': {
'accelerator':'cpu',
'enable_progress_bar': False
},
}
>>> models = cc.tl.ClusterAutoK(n_clusters=(2,10), model_class=cc.tl.GaussianMixture, model_params=model_params, max_runs=5)
"""
#: The cluster assignments for each repetition and number of clusters.
labels: dict
#: The stability values of all combinations of runs between K and K-1, and between K and K+1
stability: np.ndarray
def __init__(
self,
n_clusters: tuple[int, int] | list[int],
max_runs: int = 10,
convergence_tol: float = 1e-2,
model_class: type = None,
model_params: dict = None,
similarity_function: callable = None,
):
self.n_clusters = (
list(range(*(max(1, n_clusters[0] - 1), n_clusters[1] + 2)))
if isinstance(n_clusters, tuple)
else n_clusters
)
self.max_runs = max_runs
self.convergence_tol = convergence_tol
self.model_class = model_class if model_class else cc.tl.GaussianMixture
self.model_params = model_params if model_params else {}
self.similarity_function = similarity_function if similarity_function else fowlkes_mallows_score
self.stability = []
def fit(self, adata: ad.AnnData, use_rep: str = "X_cellcharter"):
"""
Cluster data multiple times for each number of clusters (K) in the selected range and compute the average stability for each them.
Parameters
----------
adata
Annotated data object.
use_rep
Key in :attr:`anndata.AnnData.obsm` to use as data to fit the clustering model. If ``None``, uses :attr:`anndata.AnnData.X`.
"""
if use_rep not in adata.obsm:
raise ValueError(f"{use_rep} not found in adata.obsm. If you want to use adata.X, set use_rep=None")
X = adata.obsm[use_rep] if use_rep is not None else adata.X
self.labels = defaultdict(list)
self.best_models = {}
random_state = self.model_params.pop("random_state", 0)
if ("trainer_params" not in self.model_params) or (
"enable_progress_bar" not in self.model_params["trainer_params"]
):
self.model_params["trainer_params"] = {"enable_progress_bar": False}
previous_stability = None
for i in range(self.max_runs):
print(f"Iteration {i + 1}/{self.max_runs}")
new_labels = {}
for k in tqdm(self.n_clusters, disable=(len(self.n_clusters) == 1)):
logging_level = logging.getLogger("lightning.pytorch").getEffectiveLevel()
logging.getLogger("lightning.pytorch").setLevel(logging.ERROR)
clustering = self.model_class(n_clusters=k, random_state=i + random_state, **self.model_params)
clustering.fit(X)
new_labels[k] = clustering.predict(X)
logging.getLogger("lightning.pytorch").setLevel(logging_level)
if (k not in self.best_models.keys()) or (clustering.nll_ < self.best_models[k].nll_):
self.best_models[k] = clustering
if i > 0:
with concurrent.futures.ThreadPoolExecutor() as executor:
pairs = [
(new_labels[k], self.labels[k + 1][i])
for i in range(len(list(self.labels.values())[0]))
for k in list(self.labels.keys())[:-1]
]
self.stability.extend(list(executor.map(lambda x: self.similarity_function(*x), pairs)))
if previous_stability is not None:
stability_change = mean_absolute_percentage_error(
np.mean(self._mirror_stability(previous_stability), axis=1),
np.mean(self._mirror_stability(self.stability), axis=1),
)
if stability_change < self.convergence_tol:
for k, new_l in new_labels.items():
self.labels[k].append(new_l)
print(
f"Convergence with a change in stability of {stability_change} reached after {i + 1} iterations"
)
break
previous_stability = deepcopy(self.stability)
for k, new_l in new_labels.items():
self.labels[k].append(new_l)
if self.max_runs > 1:
self.stability = self._mirror_stability(self.stability)
else:
self.stability = None
def _mirror_stability(self, stability):
stability = [
stability[i : i + len(self.n_clusters) - 1] for i in range(0, len(stability), len(self.n_clusters) - 1)
]
stability = list(map(list, zip(*stability)))
return np.array([stability[i] + stability[i - 1] for i in range(1, len(stability))])
@property
def best_k(self) -> int:
"""The number of clusters with the highest stability."""
if self.max_runs <= 1:
raise ValueError("Cannot compute stability with max_runs <= 1")
stability_mean = np.array([np.mean(self.stability[k]) for k in range(len(self.n_clusters[1:-1]))])
best_idx = np.argmax(stability_mean)
return self.n_clusters[best_idx + 1]
@property
def peaks(self) -> List[int]:
"""Find the peaks in the stability curve."""
if self.max_runs <= 1:
raise ValueError("Cannot compute stability with max_runs <= 1")
stability_mean = np.array([np.mean(self.stability[k]) for k in range(len(self.n_clusters[1:-1]))])
peaks, _ = find_peaks(stability_mean)
return np.array(self.n_clusters[1:-1])[peaks]
def predict(self, adata: ad.AnnData, use_rep: str = None, k: int = None) -> pd.Categorical:
"""
Predict the labels for the data in ``use_rep`` using the fitted model.
Parameters
----------
adata
Annotated data object.
use_rep
Key in :attr:`anndata.AnnData.obsm` used as data to fit the clustering model. If ``None``, uses :attr:`anndata.AnnData.obsm['X_cellcharter']` if present, otherwise :attr:`anndata.AnnData.X`.
k
Number of clusters to predict using the fitted model. If ``None``, the number of clusters with the highest stability will be selected. If ``max_runs > 1``, the model with the largest marginal likelihood will be used among the ones fitted on ``k``.
"""
k = self.best_k if k is None else k
assert k is None or k in self.n_clusters
X = (
adata.obsm[use_rep]
if use_rep is not None
else adata.obsm["X_cellcharter"] if "X_cellcharter" in adata.obsm else adata.X
)
return pd.Categorical(self.best_models[k].predict(X).astype(str), categories=np.arange(k).astype(str))
@property
def persistent_attributes(self) -> List[str]:
"""Returns the list of fitted attributes that ought to be saved and loaded. By default, this encompasses all annotations."""
return list(self.__annotations__.keys())
def save(self, path: PathType, best_k=False) -> None:
"""
Saves the ClusterAutoK object and the clustering models to the provided directory using pickle.
Parameters
----------
path
The directory to which all files should be saved.
best_k
Save only the best model out all number of clusters `K`. If ``false``, save the best model for each value of `K`.
Note
----------
If the dictionary returned by :func:`get_params` is not JSON-serializable, this method
uses :mod:`pickle` which is not necessarily backwards-compatible.
"""
path = Path(path)
assert not path.exists() or path.is_dir(), "Estimators can only be saved to a directory."
path.mkdir(parents=True, exist_ok=True)
self._save_parameters(path)
self._save_attributes(path, best_k=best_k)
def _save_parameters(self, path: Path) -> None:
"""
Saves the parameters of ClusterAutoK. By default, it uses JSON and falls back to :mod:`pickle`.
Parameters
----------
path
The directory to which the parameters should be saved.
"""
params = self.get_params()
try:
data = json.dumps(params, indent=4)
with (path / "params.json").open("w+") as f:
f.write(data)
except TypeError:
# warnings.warn(
# f"Failed to serialize parameters of `{self.__class__.__name__}` to JSON. " "Falling back to `pickle`.",
# stacklevel=2,
# )
with (path / "params.pickle").open("wb+") as f:
pickle.dump(params, f)
def _save_attributes(self, path: Path, best_k: bool) -> None:
"""
Saves the attributes of ClusterAutoK. By default, it uses JSON and falls back to :mod:`pickle`.
Parameters
----------
path
The directory to which the fitted attributed should be saved.
best_k
Save only the best model out all number of clusters `K`. If ``false``, save the best model for each value of `K`.
"""
if len(self.persistent_attributes) == 0:
return
if best_k:
model = self.best_models[self.best_k]
model.save(path / "best_models" / f"{model.__class__.__name__}_k{self.best_k}")
else:
for k, model in self.best_models.items():
model.save(path / "best_models" / f"{model.__class__.__name__}_k{k}")
attributes = {
attribute: getattr(self, attribute)
for attribute in self.persistent_attributes
if attribute != "best_models"
}
try:
data = json.dumps(attributes, indent=4)
with (path / "attributes.json").open("w+") as f:
f.write(data)
except TypeError:
# warnings.warn(
# f"Failed to serialize fitted attributes of `{self.__class__.__name__}` to JSON. "
# "Falling back to `pickle`.",
# stacklevel=2,
# )
with (path / "attributes.pickle").open("wb+") as f:
pickle.dump(attributes, f)
@classmethod
def load(cls, path: Path):
"""
Loads the estimator and (if available) the fitted model.
This method should only be expected to work to load an estimator that has previously been saved via :func:`save`.
Parameters
----------
path
The directory from which to load the estimator.
Returns
----------
The loaded estimator, either fitted or not.
"""
path = Path(path)
assert path.is_dir(), "Estimators can only be loaded from a directory."
model = cls._load_parameters(path)
try:
model._load_attributes(path)
except FileNotFoundError:
warnings.warn(f"Failed to read fitted attributes of `{cls.__name__}` at path '{path}'", stacklevel=2)
return model
@classmethod
def _load_parameters(cls, path: Path):
"""
Initializes this estimator by loading its parameters.
If subclasses overwrite :func:`save_parameters`, this method should also be overwritten.
Typically, this method should not be called directly. It is called as part of :func:`load`.
Parameters
----------
path
The directory from which the parameters should be loaded.
"""
json_path = path / "params.json"
pickle_path = path / "params.pickle"
if json_path.exists():
with json_path.open() as f:
params = json.load(f)
else:
with pickle_path.open("rb") as f:
params = pickle.load(f)
return cls(**params)
def _load_attributes(self, path: Path) -> None:
"""
Loads the fitted attributes that are stored at the fitted path.
If subclasses overwrite :func:`save_attributes`, this method should also be overwritten.
Typically, this method should not be called directly. It is called as part of :func:`load`.
Parameters
----------
path
The directory from which the parameters should be loaded.
Raises
----------
FileNotFoundError
If the no fitted attributes have been stored.
"""
json_path = path / "attributes.json"
pickle_path = path / "attributes.pickle"
if json_path.exists():
with json_path.open() as f:
self.set_params(json.load(f))
else:
with pickle_path.open("rb") as f:
self.set_params(pickle.load(f))
self.best_models = {}
for model_dir in os.listdir(path / "best_models"):
model = self.model_class()
model = model.load(path / "best_models" / model_dir)
self.best_models[model.n_clusters] = model
def get_params(self) -> Dict[str, Any]:
"""
Returns the estimator's parameters as passed to the initializer.
Args
----------
deep
Ignored. For Scikit-learn compatibility.
Returns
----------
The mapping from init parameters to values.
"""
signature = inspect.signature(self.__class__.__init__)
parameters = [p.name for p in signature.parameters.values() if p.name != "self"]
return {p: getattr(self, p) for p in parameters}
def set_params(self, values: Dict[str, Any]):
"""
Sets the provided values. The estimator is returned as well, but the estimator on which this function is called is also modified.
Parameters
----------
values
The values to set.
Returns
----------
The estimator where the values have been set.
"""
for key, value in values.items():
setattr(self, key, value)
return self
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/tl/_trvae.py | src/cellcharter/tl/_trvae.py | from __future__ import annotations
import os
from typing import Optional
from anndata import AnnData, read_h5ad
from torch import nn
try:
from scarches.models import TRVAE as scaTRVAE
from scarches.models import trVAE
from scarches.models.base._utils import _validate_var_names
except ImportError:
class TRVAE:
r"""
scArches\'s trVAE model adapted to image-based proteomics data.
The last ReLU layer of the neural network is removed to allow for continuous and real output values
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix. Has to be count data for 'nb' and 'zinb' loss and normalized log transformed data
for 'mse' loss.
condition_key: String
column name of conditions in `adata.obs` data frame.
conditions: List
List of Condition names that the used data will contain to get the right encoding when used after reloading.
hidden_layer_sizes: List
A list of hidden layer sizes for encoder network. Decoder network will be the reversed order.
latent_dim: Integer
Bottleneck layer (z) size.
dr_rate: Float
Dropout rate applied to all layers, if `dr_rate==0` no dropout will be applied.
use_mmd: Boolean
If 'True' an additional MMD loss will be calculated on the latent dim. 'z' or the first decoder layer 'y'.
mmd_on: String
Choose on which layer MMD loss will be calculated on if 'use_mmd=True': 'z' for latent dim or 'y' for first
decoder layer.
mmd_boundary: Integer or None
Choose on how many conditions the MMD loss should be calculated on. If 'None' MMD will be calculated on all
conditions.
recon_loss: String
Definition of Reconstruction-Loss-Method, 'mse', 'nb' or 'zinb'.
beta: Float
Scaling Factor for MMD loss
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
"""
def __init__(
self,
adata: AnnData,
condition_key: str = None,
conditions: Optional[list] = None,
hidden_layer_sizes: list | tuple = (256, 64),
latent_dim: int = 10,
dr_rate: float = 0.05,
use_mmd: bool = True,
mmd_on: str = "z",
mmd_boundary: Optional[int] = None,
recon_loss: Optional[str] = "nb",
beta: float = 1,
use_bn: bool = False,
use_ln: bool = True,
):
raise ImportError("scarches is not installed. Please install scarches to use this method.")
@classmethod
def load(cls, dir_path: str, adata: Optional[AnnData] = None, map_location: Optional[str] = None):
"""
Instantiate a model from the saved output.
Parameters
----------
dir_path
Path to saved outputs.
adata
AnnData object.
If None, will check for and load anndata saved with the model.
map_location
Location where all tensors should be loaded (e.g., `torch.device('cpu')`)
Returns
-------
Model with loaded state dictionaries.
"""
raise ImportError("scarches is not installed. Please install scarches to use this method.")
else:
class TRVAE(scaTRVAE):
r"""
scArches\'s trVAE model adapted to image-based proteomics data.
The last ReLU layer of the neural network is removed to allow for continuous and real output values
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix. Has to be count data for 'nb' and 'zinb' loss and normalized log transformed data
for 'mse' loss.
condition_key: String
column name of conditions in `adata.obs` data frame.
conditions: List
List of Condition names that the used data will contain to get the right encoding when used after reloading.
hidden_layer_sizes: List
A list of hidden layer sizes for encoder network. Decoder network will be the reversed order.
latent_dim: Integer
Bottleneck layer (z) size.
dr_rate: Float
Dropout rate applied to all layers, if `dr_rate==0` no dropout will be applied.
use_mmd: Boolean
If 'True' an additional MMD loss will be calculated on the latent dim. 'z' or the first decoder layer 'y'.
mmd_on: String
Choose on which layer MMD loss will be calculated on if 'use_mmd=True': 'z' for latent dim or 'y' for first
decoder layer.
mmd_boundary: Integer or None
Choose on how many conditions the MMD loss should be calculated on. If 'None' MMD will be calculated on all
conditions.
recon_loss: String
Definition of Reconstruction-Loss-Method, 'mse', 'nb' or 'zinb'.
beta: Float
Scaling Factor for MMD loss
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
"""
def __init__(
self,
adata: AnnData,
condition_key: str = None,
conditions: Optional[list] = None,
hidden_layer_sizes: list | tuple = (256, 64),
latent_dim: int = 10,
dr_rate: float = 0.05,
use_mmd: bool = True,
mmd_on: str = "z",
mmd_boundary: Optional[int] = None,
recon_loss: Optional[str] = "mse",
beta: float = 1,
use_bn: bool = False,
use_ln: bool = True,
):
self.adata = adata
self.condition_key_ = condition_key
if conditions is None:
if condition_key is not None:
self.conditions_ = adata.obs[condition_key].unique().tolist()
else:
self.conditions_ = []
else:
self.conditions_ = conditions
self.hidden_layer_sizes_ = hidden_layer_sizes
self.latent_dim_ = latent_dim
self.dr_rate_ = dr_rate
self.use_mmd_ = use_mmd
self.mmd_on_ = mmd_on
self.mmd_boundary_ = mmd_boundary
self.recon_loss_ = recon_loss
self.beta_ = beta
self.use_bn_ = use_bn
self.use_ln_ = use_ln
self.input_dim_ = adata.n_vars
self.model = trVAE(
self.input_dim_,
self.conditions_,
list(self.hidden_layer_sizes_),
self.latent_dim_,
self.dr_rate_,
self.use_mmd_,
self.mmd_on_,
self.mmd_boundary_,
self.recon_loss_,
self.beta_,
self.use_bn_,
self.use_ln_,
)
decoder_layer_sizes = self.model.hidden_layer_sizes.copy()
decoder_layer_sizes.reverse()
decoder_layer_sizes.append(self.model.input_dim)
self.model.decoder.recon_decoder = nn.Linear(decoder_layer_sizes[-2], decoder_layer_sizes[-1])
self.is_trained_ = False
self.trainer = None
@classmethod
def load(cls, dir_path: str, adata: Optional[AnnData] = None, map_location: Optional[str] = None):
"""
Instantiate a model from the saved output.
Parameters
----------
dir_path
Path to saved outputs.
adata
AnnData object.
If None, will check for and load anndata saved with the model.
map_location
Location where all tensors should be loaded (e.g., `torch.device('cpu')`)
Returns
-------
Model with loaded state dictionaries.
"""
adata_path = os.path.join(dir_path, "adata.h5ad")
load_adata = adata is None
if os.path.exists(adata_path) and load_adata:
adata = read_h5ad(adata_path)
elif not os.path.exists(adata_path) and load_adata:
raise ValueError("Save path contains no saved anndata and no adata was passed.")
attr_dict, model_state_dict, var_names = cls._load_params(dir_path, map_location=map_location)
# Overwrite adata with new genes
adata = _validate_var_names(adata, var_names)
cls._validate_adata(adata, attr_dict)
init_params = cls._get_init_params_from_dict(attr_dict)
model = cls(adata, **init_params)
model.model.load_state_dict(model_state_dict)
model.model.eval()
model.is_trained_ = attr_dict["is_trained_"]
return model
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/tl/__init__.py | src/cellcharter/tl/__init__.py | from ._autok import ClusterAutoK
from ._gmm import Cluster, GaussianMixture
from ._shape import (
boundaries,
curl,
elongation,
linearity,
purity,
relative_component_size_metric,
)
from ._trvae import TRVAE
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/tl/_gmm.py | src/cellcharter/tl/_gmm.py | from __future__ import annotations
import logging
from typing import List, Tuple, cast
import anndata as ad
import numpy as np
import pandas as pd
import scipy.sparse as sps
import torch
from pytorch_lightning import Trainer
from torchgmm.base.data import (
DataLoader,
TensorLike,
collate_tensor,
dataset_from_tensors,
)
from torchgmm.bayes import GaussianMixture as TorchGaussianMixture
from torchgmm.bayes.gmm.lightning_module import GaussianMixtureLightningModule
from torchgmm.bayes.gmm.model import GaussianMixtureModel
from .._utils import AnyRandom
logger = logging.getLogger(__name__)
class GaussianMixture(TorchGaussianMixture):
"""
Adapted version of GaussianMixture clustering model from the `torchgmm <https://github.com/marcovarrone/torchgmm/>`_ library.
Parameters
----------
n_clusters
The number of components in the GMM. The dimensionality of each component is automatically inferred from the data.
covariance_type
The type of covariance to assume for all Gaussian components.
init_strategy
The strategy for initializing component means and covariances.
init_means
An optional initial guess for the means of the components. If provided,
must be a tensor of shape ``[num_components, num_features]``. If this is given,
the ``init_strategy`` is ignored and the means are handled as if K-means
initialization has been run.
convergence_tolerance
The change in the per-datapoint negative log-likelihood which
implies that training has converged.
covariance_regularization
A small value which is added to the diagonal of the
covariance matrix to ensure that it is positive semi-definite.
batch_size: The batch size to use when fitting the model. If not provided, the full
data will be used as a single batch. Set this if the full data does not fit into
memory.
trainer_params
Initialization parameters to use when initializing a PyTorch Lightning
trainer. By default, it disables various stdout logs unless TorchGMM is configured to
do verbose logging. Checkpointing and logging are disabled regardless of the log
level. This estimator further sets the following overridable defaults:
- ``max_epochs=100``.
random_state
Initialization seed.
"""
#: The fitted PyTorch module with all estimated parameters.
model_: GaussianMixtureModel
#: A boolean indicating whether the model converged during training.
converged_: bool
#: The number of iterations the model was fitted for, excluding initialization.
num_iter_: int
#: The average per-datapoint negative log-likelihood at the last training step.
nll_: float
def __init__(
self,
n_clusters: int = 1,
*,
covariance_type: str = "full",
init_strategy: str = "kmeans",
init_means: torch.Tensor = None,
convergence_tolerance: float = 0.001,
covariance_regularization: float = 1e-06,
batch_size: int = None,
trainer_params: dict = None,
random_state: AnyRandom = 0,
):
super().__init__(
num_components=n_clusters,
covariance_type=covariance_type,
init_strategy=init_strategy,
init_means=init_means,
convergence_tolerance=convergence_tolerance,
covariance_regularization=covariance_regularization,
batch_size=batch_size,
trainer_params=trainer_params,
)
self.n_clusters = n_clusters
self.random_state = random_state
def fit(self, data: TensorLike) -> GaussianMixture:
"""
Fits the Gaussian mixture on the provided data, estimating component priors, means and covariances. Parameters are estimated using the EM algorithm.
Parameters
----------
data
The tabular data to fit on. The dimensionality of the Gaussian mixture is automatically inferred from this data.
Returns
----------
The fitted Gaussian mixture.
"""
if sps.issparse(data):
raise ValueError(
"Sparse data is not supported. You may have forgotten to reduce the dimensionality of the data. Otherwise, please convert the data to a dense format."
)
return self._fit(data)
def _fit(self, data) -> GaussianMixture:
try:
return super().fit(data)
except torch._C._LinAlgError as e:
if self.covariance_regularization >= 1:
raise ValueError(
"Cholesky decomposition failed even with covariance regularization = 1. The matrix may be singular."
) from e
else:
self.covariance_regularization *= 10
logger.warning(
f"Cholesky decomposition failed. Retrying with covariance regularization {self.covariance_regularization}."
)
return self._fit(data)
def predict(self, data: TensorLike) -> torch.Tensor:
"""
Computes the most likely components for each of the provided datapoints.
Parameters
----------
data
The datapoints for which to obtain the most likely components.
Returns
----------
A tensor of shape ``[num_datapoints]`` with the indices of the most likely components.
Note
----------
Use :func:`predict_proba` to obtain probabilities for each component instead of the
most likely component only.
Attention
----------
When calling this function in a multi-process environment, each process receives only
a subset of the predictions. If you want to aggregate predictions, make sure to gather
the values returned from this method.
"""
return super().predict(data).numpy()
def predict_proba(self, data: TensorLike) -> torch.Tensor:
"""
Computes a distribution over the components for each of the provided datapoints.
Parameters
----------
data
The datapoints for which to compute the component assignment probabilities.
Returns
----------
A tensor of shape ``[num_datapoints, num_components]`` with the assignment
probabilities for each component and datapoint. Note that each row of the vector sums
to 1, i.e. the returned tensor provides a proper distribution over the components for
each datapoint.
Attention
----------
When calling this function in a multi-process environment, each process receives only
a subset of the predictions. If you want to aggregate predictions, make sure to gather
the values returned from this method.
"""
loader = DataLoader(
dataset_from_tensors(data),
batch_size=self.batch_size or len(data),
collate_fn=collate_tensor,
)
trainer_params = self.trainer_params.copy()
trainer_params["logger"] = False
result = Trainer(**trainer_params).predict(GaussianMixtureLightningModule(self.model_), loader)
return torch.cat([x[0] for x in cast(List[Tuple[torch.Tensor, torch.Tensor]], result)])
def score_samples(self, data: TensorLike) -> torch.Tensor:
"""
Computes the negative log-likelihood (NLL) of each of the provided datapoints.
Parameters
----------
data
The datapoints for which to compute the NLL.
Returns
----------
A tensor of shape ``[num_datapoints]`` with the NLL for each datapoint.
Attention
----------
When calling this function in a multi-process environment, each process receives only
a subset of the predictions. If you want to aggregate predictions, make sure to gather
the values returned from this method.
"""
loader = DataLoader(
dataset_from_tensors(data),
batch_size=self.batch_size or len(data),
collate_fn=collate_tensor,
)
trainer_params = self.trainer_params.copy()
trainer_params["logger"] = False
result = Trainer(**trainer_params).predict(GaussianMixtureLightningModule(self.model_), loader)
return torch.stack([x[1] for x in cast(List[Tuple[torch.Tensor, torch.Tensor]], result)])
class Cluster(GaussianMixture):
"""
Cluster cells or spots based on the neighborhood aggregated features from CellCharter.
Parameters
----------
n_clusters
The number of components in the GMM. The dimensionality of each component is automatically inferred from the data.
covariance_type
The type of covariance to assume for all Gaussian components.
init_strategy
The strategy for initializing component means and covariances.
init_means
An optional initial guess for the means of the components. If provided,
must be a tensor of shape ``[num_components, num_features]``. If this is given,
the ``init_strategy`` is ignored and the means are handled as if K-means
initialization has been run.
convergence_tolerance
The change in the per-datapoint negative log-likelihood which
implies that training has converged.
covariance_regularization
A small value which is added to the diagonal of the
covariance matrix to ensure that it is positive semi-definite.
batch_size: The batch size to use when fitting the model. If not provided, the full
data will be used as a single batch. Set this if the full data does not fit into
memory.
trainer_params
Initialization parameters to use when initializing a PyTorch Lightning
trainer. By default, it disables various stdout logs unless TorchGMM is configured to
do verbose logging. Checkpointing and logging are disabled regardless of the log
level. This estimator further sets the following overridable defaults:
- ``max_epochs=100``.
random_state
Initialization seed.
Examples
--------
>>> adata = anndata.read_h5ad(path_to_anndata)
>>> sq.gr.spatial_neighbors(adata, coord_type='generic', delaunay=True)
>>> cc.gr.remove_long_links(adata)
>>> cc.gr.aggregate_neighbors(adata, n_layers=3)
>>> model = cc.tl.Cluster(n_clusters=11)
>>> model.fit(adata, use_rep='X_cellcharter')
"""
def __init__(
self,
n_clusters: int = 1,
*,
covariance_type: str = "full",
init_strategy: str = "kmeans",
init_means: torch.Tensor = None,
convergence_tolerance: float = 0.001,
covariance_regularization: float = 1e-06,
batch_size: int = None,
trainer_params: dict = None,
random_state: AnyRandom = 0,
):
super().__init__(
n_clusters=n_clusters,
covariance_type=covariance_type,
init_strategy=init_strategy,
init_means=init_means,
convergence_tolerance=convergence_tolerance,
covariance_regularization=covariance_regularization,
batch_size=batch_size,
trainer_params=trainer_params,
random_state=random_state,
)
def fit(self, adata: ad.AnnData, use_rep: str = "X_cellcharter"):
"""
Fit data into ``n_clusters`` clusters.
Parameters
----------
adata
Annotated data object.
use_rep
Key in :attr:`anndata.AnnData.obsm` to use as data to fit the clustering model.
"""
logging_level = logging.root.level
X = adata.X if use_rep is None else adata.obsm[use_rep]
logging_level = logging.getLogger("lightning.pytorch").getEffectiveLevel()
logging.getLogger("lightning.pytorch").setLevel(logging.ERROR)
super().fit(X)
logging.getLogger("lightning.pytorch").setLevel(logging_level)
adata.uns["_cellcharter"] = {k: v for k, v in self.get_params().items() if k != "init_means"}
def predict(self, adata: ad.AnnData, use_rep: str = "X_cellcharter") -> pd.Categorical:
"""
Predict the labels for the data in ``use_rep`` using the fitted model.
Parameters
----------
adata
Annotated data object.
use_rep
Key in :attr:`anndata.AnnData.obsm` used as data to fit the clustering model. If ``None``, uses :attr:`anndata.AnnData.X`.
k
Number of clusters to predict using the fitted model. If ``None``, the number of clusters with the highest stability will be selected. If ``max_runs > 1``, the model with the largest marginal likelihood will be used among the ones fitted on ``k``.
"""
X = adata.X if use_rep is None else adata.obsm[use_rep]
return pd.Categorical(super().predict(X).astype(str), categories=np.arange(self.n_clusters).astype(str))
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/src/cellcharter/tl/_utils.py | src/cellcharter/tl/_utils.py | from itertools import combinations
import numpy as np
from joblib import Parallel, delayed
from sklearn.metrics import adjusted_rand_score
def _stability(labels, similarity_function=adjusted_rand_score, n_jobs=-1):
clusters = list(labels.keys())
max_runs = len(labels[clusters[0]])
num_combinations = max_runs * (max_runs - 1) // 2
stabilities = Parallel(n_jobs=n_jobs)(
delayed(similarity_function)(labels[clusters[k]][i], labels[clusters[k] + 1][j])
for k in range(len(clusters) - 1)
for i, j in combinations(range(max_runs), 2)
)
# Transform test into a list of chunks of size num_combinations
stabilities = [stabilities[i : i + num_combinations] for i in range(0, len(stabilities), num_combinations)]
# Append to every element of test the previous element
stabilities = [stabilities[i] + stabilities[i - 1] for i in range(1, len(stabilities))]
return np.array(stabilities)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/conftest.py | tests/conftest.py | import time
from urllib.error import HTTPError
import anndata as ad
import numpy as np
import pytest
import scanpy as sc
from squidpy._constants._pkg_constants import Key
_adata = sc.read("tests/_data/test_data.h5ad")
_adata.raw = _adata.copy()
@pytest.fixture()
def non_visium_adata() -> ad.AnnData:
non_visium_coords = np.array([[1, 0], [3, 0], [5, 6], [0, 4]])
adata = ad.AnnData(X=non_visium_coords, dtype=int)
adata.obsm[Key.obsm.spatial] = non_visium_coords
return adata
@pytest.fixture()
def adata() -> ad.AnnData:
return _adata.copy()
@pytest.fixture(scope="session")
def codex_adata() -> ad.AnnData:
max_retries = 3
retry_delay = 5 # seconds
for attempt in range(max_retries):
try:
adata = sc.read(
"tests/_data/codex_adata.h5ad", backup_url="https://figshare.com/ndownloader/files/46832722"
)
adata.obs_names_make_unique()
return adata[adata.obs["sample"].isin(["BALBc-1", "MRL-5"])].copy()
except HTTPError as e:
if attempt == max_retries - 1: # Last attempt
pytest.skip(f"Failed to download test data after {max_retries} attempts: {str(e)}")
time.sleep(retry_delay)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/tools/test_gmm.py | tests/tools/test_gmm.py | import pytest
import scipy.sparse as sps
import squidpy as sq
import cellcharter as cc
class TestCluster:
@pytest.mark.parametrize("dataset_name", ["mibitof"])
def test_sparse(self, dataset_name: str):
download_dataset = getattr(sq.datasets, dataset_name)
adata = download_dataset()
adata.X = sps.csr_matrix(adata.X)
sq.gr.spatial_neighbors(adata, coord_type="generic", delaunay=True)
cc.gr.remove_long_links(adata)
gmm = cc.tl.Cluster(n_clusters=(10))
# Check if fit raises a ValueError
with pytest.raises(ValueError):
gmm.fit(adata, use_rep=None)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/tools/test_autok.py | tests/tools/test_autok.py | import numpy as np
import pytest
import scipy.sparse as sps
import squidpy as sq
import cellcharter as cc
class TestClusterAutoK:
@pytest.mark.parametrize("dataset_name", ["mibitof"])
def test_spatial_proteomics(self, dataset_name: str):
download_dataset = getattr(sq.datasets, dataset_name)
adata = download_dataset()
if sps.issparse(adata.X):
adata.X = adata.X.todense()
sq.gr.spatial_neighbors(adata, coord_type="generic", delaunay=True)
cc.gr.remove_long_links(adata)
cc.gr.aggregate_neighbors(adata, n_layers=3)
model_params = {
"init_strategy": "kmeans",
"random_state": 42,
"trainer_params": {"accelerator": "cpu", "enable_progress_bar": False},
}
autok = cc.tl.ClusterAutoK(
n_clusters=(2, 5), model_class=cc.tl.GaussianMixture, model_params=model_params, max_runs=3
)
autok.fit(adata, use_rep="X_cellcharter")
adata.obs[f"cellcharter_{autok.best_k}"] = autok.predict(adata, use_rep="X_cellcharter", k=autok.best_k)
assert len(np.unique(adata.obs[f"cellcharter_{autok.best_k}"])) == autok.best_k
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/tools/test_shape.py | tests/tools/test_shape.py | import numpy as np
import pandas as pd
from anndata import AnnData
from shapely import Polygon
import cellcharter as cc
# Test for cc.tl.boundaries, that computes the topological boundaries of sets of cells.
class TestBoundaries:
def test_boundaries(self, codex_adata: AnnData):
cc.gr.connected_components(codex_adata, cluster_key="cluster_cellcharter", min_cells=250)
cc.tl.boundaries(codex_adata)
boundaries = codex_adata.uns["shape_component"]["boundary"]
assert isinstance(boundaries, dict)
# Check if boundaries contains all components of codex_adata
assert set(boundaries.keys()) == set(codex_adata.obs["component"].cat.categories)
def test_copy(self, codex_adata: AnnData):
cc.gr.connected_components(codex_adata, cluster_key="cluster_cellcharter", min_cells=250)
boundaries = cc.tl.boundaries(codex_adata, copy=True)
assert isinstance(boundaries, dict)
# Check if boundaries contains all components of codex_adata
assert set(boundaries.keys()) == set(codex_adata.obs["component"].cat.categories)
class TestLinearity:
def test_rectangle(self, codex_adata: AnnData):
codex_adata.obs["rectangle"] = 1
polygon = Polygon([(0, 0), (0, 10), (2, 10), (2, 0)])
codex_adata.uns["shape_rectangle"] = {"boundary": {1: polygon}}
linearities = cc.tl.linearity(codex_adata, "rectangle", copy=True)
assert linearities[1] == 1.0
def test_symmetrical_cross(self, codex_adata: AnnData):
codex_adata.obs["cross"] = 1
# Symmetrical cross with arm width of 2 and length of 5
polygon = Polygon(
[(0, 5), (0, 7), (5, 7), (5, 12), (7, 12), (7, 7), (12, 7), (12, 5), (7, 5), (7, 0), (5, 0), (5, 5)]
)
codex_adata.uns["shape_cross"] = {"boundary": {1: polygon}}
linearities = cc.tl.linearity(codex_adata, "cross", copy=True)
# The cross is symmetrical, so the linearity should be 0.5
assert abs(linearities[1] - 0.5) < 0.01
def test_thickness(self, codex_adata: AnnData):
# The thickness of the cross should not influence the linearity
codex_adata.obs["cross"] = 1
# Symmetrical cross with arm width of 2 and length of 5
polygon1 = Polygon(
[(0, 5), (0, 6), (5, 6), (5, 11), (6, 11), (6, 6), (11, 6), (11, 5), (6, 5), (6, 0), (5, 0), (5, 5)]
)
# Symmetrical cross with arm width of 2 and length of 5
polygon2 = Polygon(
[(0, 5), (0, 7), (5, 7), (5, 12), (7, 12), (7, 7), (12, 7), (12, 5), (7, 5), (7, 0), (5, 0), (5, 5)]
)
codex_adata.uns["shape_cross"] = {"boundary": {1: polygon1}}
linearities1 = cc.tl.linearity(codex_adata, "cross", copy=True)
codex_adata.uns["shape_cross"] = {"boundary": {1: polygon2}}
linearities2 = cc.tl.linearity(codex_adata, "cross", copy=True)
assert abs(linearities1[1] - linearities2[1]) < 0.01
class TestRelativeComponentSize:
def test_relative_component_size(self, codex_adata: AnnData):
"""
Test the relative component size metric with a toy dataset.
Setup:
- Neighborhood 0: 66% of cells, one component (component 0)
- Neighborhood 1: 33% of cells, split 50% into component 1 and 50% into component 2
Expected RCS values:
- Component 0: should be 1.0 (only component in neighborhood 0)
- Component 1: should be 1.0 (50% of neighborhood 1, which has 2 components)
- Component 2: should be 1.0 (50% of neighborhood 1, which has 2 components)
"""
total_cells = len(codex_adata)
# Calculate counts based on the specified distribution
# 66% neighborhood 0, 33% neighborhood 1
nbh_0_count = int(total_cells * 0.66)
nbh_1_count = total_cells - nbh_0_count
# Neighborhood 1 is split 50% into component 1 and 50% into component 2
comp_1_count = nbh_1_count // 2
nbh_1_count - comp_1_count
# Create domain (neighborhood) assignments
domains = np.zeros(total_cells, dtype=int)
domains[nbh_0_count:] = 1 # First 66% get domain 0, rest get domain 1
# Create component assignments
components = np.full(total_cells, -1, dtype=int)
components[:nbh_0_count] = 0 # All cells in domain 0 get component 0
components[nbh_0_count : nbh_0_count + comp_1_count] = 1 # First half of domain 1 gets component 1
components[nbh_0_count + comp_1_count :] = 2 # Second half of domain 1 gets component 2
# Add the assignments to the adata
codex_adata.obs["domain"] = pd.Categorical(domains)
codex_adata.obs["component"] = pd.Categorical(components)
codex_adata.obs["sample"] = "test"
rcs_values = cc.tl.relative_component_size_metric(
codex_adata, neighborhood_key="domain", cluster_key="component", copy=True
)
# Component 0: only component in domain 0, so RCS = 1.0
# Component 1: 50% of domain 1, which has 2 components, so RCS = 1.0
# Component 2: 50% of domain 1, which has 2 components, so RCS = 1.0
assert abs(rcs_values[0] - 1.0) < 1e-2
assert abs(rcs_values[1] - 1.0) < 1e-2
assert abs(rcs_values[2] - 1.0) < 1e-2
def test_relative_component_size_unequal_distribution(self, codex_adata: AnnData):
"""
Test RCS metric with unequal component distribution within a neighborhood.
Setup:
- Neighborhood 0: 60% of cells, one component (component 0)
- Neighborhood 1: 40% of cells, split 75% into component 1 and 25% into component 2
Expected RCS values:
- Component 0: should be 1.0 (only component in neighborhood 0)
- Component 1: should be 1.5 (75% of neighborhood 1, which has 2 components, so 0.75/0.5 = 1.5)
- Component 2: should be 0.5 (25% of neighborhood 1, which has 2 components, so 0.25/0.5 = 0.5)
"""
total_cells = len(codex_adata)
# Calculate counts based on the specified distribution
# 60% neighborhood 0, 40% neighborhood 1
nbh_0_count = int(total_cells * 0.60)
nbh_1_count = total_cells - nbh_0_count
# Neighborhood 1 is split 75% into component 1 and 25% into component 2
comp_1_count = int(nbh_1_count * 0.75)
# Create domain (neighborhood) assignments
domains = np.zeros(total_cells, dtype=int)
domains[nbh_0_count:] = 1 # First 60% get domain 0, rest get domain 1
# Create component assignments
components = np.full(total_cells, -1, dtype=int)
components[:nbh_0_count] = 0 # All cells in domain 0 get component 0
components[nbh_0_count : nbh_0_count + comp_1_count] = 1 # 75% of domain 1 gets component 1
components[nbh_0_count + comp_1_count :] = 2 # 25% of domain 1 gets component 2
codex_adata.obs["domain"] = pd.Categorical(domains)
codex_adata.obs["component"] = pd.Categorical(components)
codex_adata.obs["sample"] = "test"
rcs_values = cc.tl.relative_component_size_metric(
codex_adata, neighborhood_key="domain", cluster_key="component", copy=True
)
# Component 0: only component in domain 0, so RCS = 1.0
# Component 1: 75% of domain 1, which has 2 components, so RCS = 0.75/0.5 = 1.5
# Component 2: 25% of domain 1, which has 2 components, so RCS = 0.25/0.5 = 0.5
assert abs(rcs_values[0] - 1.0) < 1e-2
assert abs(rcs_values[1] - 1.5) < 1e-2
assert abs(rcs_values[2] - 0.5) < 1e-2
def test_relative_component_size_multiple_neighborhoods(self, codex_adata: AnnData):
"""
Test RCS metric with multiple neighborhoods and varying component distributions.
Setup:
- Neighborhood 0: 40% of cells, one component (component 0)
- Neighborhood 1: 35% of cells, split 60% into component 1 and 40% into component 2
- Neighborhood 2: 25% of cells, split 33% into component 3, 33% into component 4, 34% into component 5
Expected RCS values:
- Component 0: should be 1.0 (only component in neighborhood 0)
- Component 1: should be 1.2 (60% of neighborhood 1, which has 2 components, so 0.6/0.5 = 1.2)
- Component 2: should be 0.8 (40% of neighborhood 1, which has 2 components, so 0.4/0.5 = 0.8)
- Component 3: should be 0.99 (33% of neighborhood 2, which has 3 components, so 0.33/0.333 ≈ 0.99)
- Component 4: should be 0.99 (33% of neighborhood 2, which has 3 components, so 0.33/0.333 ≈ 0.99)
- Component 5: should be 1.02 (34% of neighborhood 2, which has 3 components, so 0.34/0.333 ≈ 1.02)
"""
total_cells = len(codex_adata)
# Calculate counts based on the specified distribution
nbh_0_count = int(total_cells * 0.40)
nbh_1_count = int(total_cells * 0.35)
nbh_2_count = total_cells - nbh_0_count - nbh_1_count
# Neighborhood 1: 60% component 1, 40% component 2
comp_1_count = int(nbh_1_count * 0.60)
# Neighborhood 2: 33% each for components 3, 4, and 34% for component 5
comp_3_count = int(nbh_2_count * 0.33)
comp_4_count = int(nbh_2_count * 0.33)
# Create domain (neighborhood) assignments
domains = np.zeros(total_cells, dtype=int)
domains[nbh_0_count : nbh_0_count + nbh_1_count] = 1 # Middle 35% get domain 1
domains[nbh_0_count + nbh_1_count :] = 2 # Last 25% get domain 2
# Create component assignments
components = np.full(total_cells, -1, dtype=int)
components[:nbh_0_count] = 0 # All cells in domain 0 get component 0
components[nbh_0_count : nbh_0_count + comp_1_count] = 1 # 60% of domain 1 gets component 1
components[nbh_0_count + comp_1_count : nbh_0_count + nbh_1_count] = 2 # 40% of domain 1 gets component 2
components[nbh_0_count + nbh_1_count : nbh_0_count + nbh_1_count + comp_3_count] = (
3 # 33% of domain 2 gets component 3
)
components[
nbh_0_count + nbh_1_count + comp_3_count : nbh_0_count + nbh_1_count + comp_3_count + comp_4_count
] = 4 # 33% of domain 2 gets component 4
components[nbh_0_count + nbh_1_count + comp_3_count + comp_4_count :] = 5 # 34% of domain 2 gets component 5
codex_adata.obs["domain"] = pd.Categorical(domains)
codex_adata.obs["component"] = pd.Categorical(components)
codex_adata.obs["sample"] = "test"
rcs_values = cc.tl.relative_component_size_metric(
codex_adata, neighborhood_key="domain", cluster_key="component", copy=True
)
assert abs(rcs_values[0] - 1.0) < 1e-2 # Component 0: only component in domain 0
assert abs(rcs_values[1] - 1.2) < 1e-2 # Component 1: 60% of domain 1 (2 components)
assert abs(rcs_values[2] - 0.8) < 1e-2 # Component 2: 40% of domain 1 (2 components)
assert abs(rcs_values[3] - 0.99) < 1e-2 # Component 3: 33% of domain 2 (3 components)
assert abs(rcs_values[4] - 0.99) < 1e-2 # Component 4: 33% of domain 2 (3 components)
assert abs(rcs_values[5] - 1.02) < 1e-2 # Component 5: 34% of domain 2 (3 components)
def test_relative_component_size_cross_sample_domains(self, codex_adata: AnnData):
"""
Test RCS metric when components from the same domain are distributed across different samples.
Setup:
- Domain 0: 50% of cells, split across two samples
* Sample "BALBc-1": 30% of total cells, one component (component 0)
* Sample "MRL-5": 20% of total cells, one component (component 1)
- Domain 1: 50% of cells, split across two samples
* Sample "BALBc-1": 25% of total cells, split 60/40 into components 2 and 3
* Sample "MRL-5": 25% of total cells, split 40/60 into components 4 and 5
Expected RCS values (calculated across all samples for each domain):
- Component 0: 1.2 (60% of domain 0, 2 components, expected average 50%)
- Component 1: 0.8 (40% of domain 0, 2 components, expected average 50%)
- Component 2: 1.2 (15% of domain 1, 4 components, expected average 12.5%)
- Component 3: 0.8 (10% of domain 1, 4 components, expected average 12.5%)
- Component 4: 0.8 (10% of domain 1, 4 components, expected average 12.5%)
- Component 5: 1.2 (15% of domain 1, 4 components, expected average 12.5%)
"""
total_cells = len(codex_adata)
# Calculate counts based on the specified distribution
# Domain 0: 50% total (30% BALBc-1, 20% MRL-5)
# Domain 1: 50% total (25% BALBc-1, 25% MRL-5)
balbc_domain0_count = int(total_cells * 0.30)
mrl_domain0_count = int(total_cells * 0.20)
balbc_domain1_count = int(total_cells * 0.25)
mrl_domain1_count = total_cells - balbc_domain0_count - mrl_domain0_count - balbc_domain1_count
# Domain 1 in BALBc-1: 60% component 2, 40% component 3
balbc_comp2_count = int(balbc_domain1_count * 0.60)
# Domain 1 in MRL-5: 40% component 4, 60% component 5
mrl_comp4_count = int(mrl_domain1_count * 0.40)
# Create sample assignments
samples = np.full(total_cells, "BALBc-1", dtype=object)
samples[balbc_domain0_count : balbc_domain0_count + mrl_domain0_count] = "MRL-5"
samples[
balbc_domain0_count + mrl_domain0_count : balbc_domain0_count + mrl_domain0_count + balbc_domain1_count
] = "BALBc-1"
samples[balbc_domain0_count + mrl_domain0_count + balbc_domain1_count :] = "MRL-5"
# Create domain (neighborhood) assignments
domains = np.zeros(total_cells, dtype=int)
domains[balbc_domain0_count + mrl_domain0_count :] = 1 # First 50% get domain 0, rest get domain 1
# Create component assignments
components = np.full(total_cells, -1, dtype=int)
components[:balbc_domain0_count] = 0 # BALBc-1 domain 0 gets component 0
components[balbc_domain0_count : balbc_domain0_count + mrl_domain0_count] = 1 # MRL-5 domain 0 gets component 1
components[
balbc_domain0_count + mrl_domain0_count : balbc_domain0_count + mrl_domain0_count + balbc_comp2_count
] = 2 # BALBc-1 domain 1 first part gets component 2
components[
balbc_domain0_count
+ mrl_domain0_count
+ balbc_comp2_count : balbc_domain0_count
+ mrl_domain0_count
+ balbc_domain1_count
] = 3 # BALBc-1 domain 1 second part gets component 3
components[
balbc_domain0_count
+ mrl_domain0_count
+ balbc_domain1_count : balbc_domain0_count
+ mrl_domain0_count
+ balbc_domain1_count
+ mrl_comp4_count
] = 4 # MRL-5 domain 1 first part gets component 4
components[balbc_domain0_count + mrl_domain0_count + balbc_domain1_count + mrl_comp4_count :] = (
5 # MRL-5 domain 1 second part gets component 5
)
codex_adata.obs["domain"] = pd.Categorical(domains)
codex_adata.obs["component"] = pd.Categorical(components)
codex_adata.obs["sample"] = pd.Categorical(samples)
rcs_values = cc.tl.relative_component_size_metric(
codex_adata, neighborhood_key="domain", cluster_key="component", copy=True
)
# Domain 0 has 2 components (0 and 1) across all samples
# Domain 1 has 4 components (2, 3, 4, 5) across all samples
assert abs(rcs_values[0] - 1.2) < 1e-2 # Component 0: 60% of domain 0 (2 components, expected 50%)
assert abs(rcs_values[1] - 0.8) < 1e-2 # Component 1: 40% of domain 0 (2 components, expected 50%)
assert (
abs(rcs_values[2] - 1.2) < 1e-2
) # Component 2: 60% of BALBc-1 domain 1 = 15% of total, domain 1 avg = 12.5%
assert (
abs(rcs_values[3] - 0.8) < 1e-2
) # Component 3: 40% of BALBc-1 domain 1 = 10% of total, domain 1 avg = 12.5%
assert (
abs(rcs_values[4] - 0.8) < 1e-2
) # Component 4: 40% of MRL-5 domain 1 = 10% of total, domain 1 avg = 12.5%
assert (
abs(rcs_values[5] - 1.2) < 1e-2
) # Component 5: 60% of MRL-5 domain 1 = 15% of total, domain 1 avg = 12.5%
def test_relative_component_size_within_sample_domains(self, codex_adata: AnnData):
"""
Test RCS metric when components from the same domain are distributed within a sample.
Setup:
- Domain 0: 50% of cells, split across two samples
* Sample "BALBc-1": 30% of total cells, one component (component 0)
* Sample "MRL-5": 20% of total cells, one component (component 1)
- Domain 1: 50% of cells, split across two samples
* Sample "BALBc-1": 25% of total cells, split 60/40 into components 2 and 3
* Sample "MRL-5": 25% of total cells, split 40/60 into components 4 and 5
Expected RCS values:
- Component 0: should be 1.0 (only component in domain 0 of sample "BALBc-1")
- Component 1: should be 1.0 (only component in domain 0 of sample "MRL-5")
- Component 2: should be 1.2 (60% of domain 1 in sample "BALBc-1", which has 2 components)
- Component 3: should be 0.8 (40% of domain 1 in sample "BALBc-1", which has 2 components)
- Component 4: should be 0.8 (40% of domain 1 in sample "MRL-5", which has 2 components)
- Component 5: should be 1.2 (60% of domain 1 in sample "MRL-5", which has 2 components)
"""
total_cells = len(codex_adata)
# Calculate counts based on the specified distribution
# Domain 0: 50% total (30% BALBc-1, 20% MRL-5)
# Domain 1: 50% total (25% BALBc-1, 25% MRL-5)
balbc_domain0_count = int(total_cells * 0.30)
mrl_domain0_count = int(total_cells * 0.20)
balbc_domain1_count = int(total_cells * 0.25)
mrl_domain1_count = total_cells - balbc_domain0_count - mrl_domain0_count - balbc_domain1_count
# Domain 1 in BALBc-1: 60% component 2, 40% component 3
balbc_comp2_count = int(balbc_domain1_count * 0.60)
# Domain 1 in MRL-5: 40% component 4, 60% component 5
mrl_comp4_count = int(mrl_domain1_count * 0.40)
# Create sample assignments
samples = np.full(total_cells, "BALBc-1", dtype=object)
samples[balbc_domain0_count : balbc_domain0_count + mrl_domain0_count] = "MRL-5"
samples[
balbc_domain0_count + mrl_domain0_count : balbc_domain0_count + mrl_domain0_count + balbc_domain1_count
] = "BALBc-1"
samples[balbc_domain0_count + mrl_domain0_count + balbc_domain1_count :] = "MRL-5"
# Create domain (neighborhood) assignments
domains = np.zeros(total_cells, dtype=int)
domains[balbc_domain0_count + mrl_domain0_count :] = 1 # First 50% get domain 0, rest get domain 1
# Create component assignments
components = np.full(total_cells, -1, dtype=int)
components[:balbc_domain0_count] = 0 # BALBc-1 domain 0 gets component 0
components[balbc_domain0_count : balbc_domain0_count + mrl_domain0_count] = 1 # MRL-5 domain 0 gets component 1
components[
balbc_domain0_count + mrl_domain0_count : balbc_domain0_count + mrl_domain0_count + balbc_comp2_count
] = 2 # BALBc-1 domain 1 first part gets component 2
components[
balbc_domain0_count
+ mrl_domain0_count
+ balbc_comp2_count : balbc_domain0_count
+ mrl_domain0_count
+ balbc_domain1_count
] = 3 # BALBc-1 domain 1 second part gets component 3
components[
balbc_domain0_count
+ mrl_domain0_count
+ balbc_domain1_count : balbc_domain0_count
+ mrl_domain0_count
+ balbc_domain1_count
+ mrl_comp4_count
] = 4 # MRL-5 domain 1 first part gets component 4
components[balbc_domain0_count + mrl_domain0_count + balbc_domain1_count + mrl_comp4_count :] = (
5 # MRL-5 domain 1 second part gets component 5
)
codex_adata.obs["domain"] = pd.Categorical(domains)
codex_adata.obs["component"] = pd.Categorical(components)
codex_adata.obs["sample"] = pd.Categorical(samples)
rcs_values = cc.tl.relative_component_size_metric(
codex_adata, neighborhood_key="domain", cluster_key="component", library_key="sample", copy=True
)
# Each component should be calculated relative to its own sample and domain
assert abs(rcs_values[0] - 1.0) < 1e-2 # Component 0: only component in domain 0 of BALBc-1
assert abs(rcs_values[1] - 1.0) < 1e-2 # Component 1: only component in domain 0 of MRL-5
assert abs(rcs_values[2] - 1.2) < 1e-2 # Component 2: 60% of domain 1 in BALBc-1 (2 components)
assert abs(rcs_values[3] - 0.8) < 1e-2 # Component 3: 40% of domain 1 in BALBc-1 (2 components)
assert abs(rcs_values[4] - 0.8) < 1e-2 # Component 4: 40% of domain 1 in MRL-5 (2 components)
assert abs(rcs_values[5] - 1.2) < 1e-2 # Component 5: 60% of domain 1 in MRL-5 (2 components)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/graph/test_build.py | tests/graph/test_build.py | import numpy as np
import pandas as pd
import scipy.sparse as sps
import squidpy as sq
from anndata import AnnData
from squidpy._constants._pkg_constants import Key
import cellcharter as cc
class TestRemoveLongLinks:
def test_remove_long_links(self, non_visium_adata: AnnData):
# ground-truth removing connections longer that 50th percentile
correct_dist_perc = np.array(
[
[0.0, 2.0, 0.0, 4.12310563],
[2.0, 0.0, 0, 5.0],
[0.0, 0, 0.0, 0.0],
[4.12310563, 5.0, 0.0, 0.0],
]
)
correct_graph_perc = np.array(
[[0.0, 1.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0]]
)
sq.gr.spatial_neighbors(non_visium_adata, coord_type="generic", delaunay=True)
cc.gr.remove_long_links(non_visium_adata, distance_percentile=50)
spatial_graph = non_visium_adata.obsp[Key.obsp.spatial_conn()].toarray()
spatial_dist = non_visium_adata.obsp[Key.obsp.spatial_dist()].toarray()
np.testing.assert_array_equal(spatial_graph, correct_graph_perc)
np.testing.assert_allclose(spatial_dist, correct_dist_perc)
class TestRemoveIntraClusterLinks:
def test_mixed_clusters(self, non_visium_adata: AnnData):
non_visium_adata.obsp[Key.obsp.spatial_conn()] = sps.csr_matrix(
np.ones((non_visium_adata.shape[0], non_visium_adata.shape[0]))
)
non_visium_adata.obsp[Key.obsp.spatial_dist()] = sps.csr_matrix(
[[0, 1, 4, 4], [1, 0, 6, 3], [4, 6, 0, 9], [4, 3, 9, 0]]
)
non_visium_adata.obs["cluster"] = ["0", "0", "1", "1"]
correct_conns = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [1, 1, 0, 0], [1, 1, 0, 0]])
correct_dists = np.array([[0, 0, 4, 4], [0, 0, 6, 3], [4, 6, 0, 0], [4, 3, 0, 0]])
cc.gr.remove_intra_cluster_links(non_visium_adata, cluster_key="cluster")
trimmed_conns = non_visium_adata.obsp[Key.obsp.spatial_conn()].toarray()
trimmed_dists = non_visium_adata.obsp[Key.obsp.spatial_dist()].toarray()
np.testing.assert_array_equal(trimmed_conns, correct_conns)
np.testing.assert_allclose(trimmed_dists, correct_dists)
def test_same_clusters(self, non_visium_adata: AnnData):
non_visium_adata.obsp[Key.obsp.spatial_conn()] = sps.csr_matrix(
np.ones((non_visium_adata.shape[0], non_visium_adata.shape[0]))
)
non_visium_adata.obsp[Key.obsp.spatial_dist()] = sps.csr_matrix(
[[0, 1, 4, 4], [1, 0, 6, 3], [4, 6, 0, 9], [4, 3, 9, 0]]
)
non_visium_adata.obs["cluster"] = ["0", "0", "0", "0"]
correct_conns = np.zeros((non_visium_adata.shape[0], non_visium_adata.shape[0]))
correct_dists = np.zeros((non_visium_adata.shape[0], non_visium_adata.shape[0]))
cc.gr.remove_intra_cluster_links(non_visium_adata, cluster_key="cluster")
trimmed_conns = non_visium_adata.obsp[Key.obsp.spatial_conn()].toarray()
trimmed_dists = non_visium_adata.obsp[Key.obsp.spatial_dist()].toarray()
np.testing.assert_array_equal(trimmed_conns, correct_conns)
np.testing.assert_allclose(trimmed_dists, correct_dists)
def test_different_clusters(self, non_visium_adata: AnnData):
non_visium_adata.obsp[Key.obsp.spatial_conn()] = sps.csr_matrix(
np.ones((non_visium_adata.shape[0], non_visium_adata.shape[0]))
)
non_visium_adata.obsp[Key.obsp.spatial_dist()] = sps.csr_matrix(
[[0, 1, 4, 4], [1, 0, 6, 3], [4, 6, 0, 9], [4, 3, 9, 0]]
)
non_visium_adata.obs["cluster"] = ["0", "1", "2", "3"]
correct_conns = non_visium_adata.obsp[Key.obsp.spatial_conn()].copy()
correct_conns.setdiag(0)
correct_dists = non_visium_adata.obsp[Key.obsp.spatial_dist()]
cc.gr.remove_intra_cluster_links(non_visium_adata, cluster_key="cluster")
trimmed_conns = non_visium_adata.obsp[Key.obsp.spatial_conn()].toarray()
trimmed_dists = non_visium_adata.obsp[Key.obsp.spatial_dist()].toarray()
np.testing.assert_array_equal(trimmed_conns, correct_conns.toarray())
np.testing.assert_allclose(trimmed_dists, correct_dists.toarray())
def test_copy(self, non_visium_adata: AnnData):
non_visium_adata.obsp[Key.obsp.spatial_conn()] = sps.csr_matrix(
np.ones((non_visium_adata.shape[0], non_visium_adata.shape[0]))
)
non_visium_adata.obsp[Key.obsp.spatial_dist()] = sps.csr_matrix(
[[0, 1, 4, 4], [1, 0, 6, 3], [4, 6, 0, 9], [4, 3, 9, 0]]
)
non_visium_adata.obs["cluster"] = ["0", "0", "1", "1"]
correct_conns = non_visium_adata.obsp[Key.obsp.spatial_conn()].copy()
correct_dists = non_visium_adata.obsp[Key.obsp.spatial_dist()].copy()
cc.gr.remove_intra_cluster_links(non_visium_adata, cluster_key="cluster", copy=True)
trimmed_conns = non_visium_adata.obsp[Key.obsp.spatial_conn()].toarray()
trimmed_dists = non_visium_adata.obsp[Key.obsp.spatial_dist()].toarray()
np.testing.assert_array_equal(trimmed_conns, correct_conns.toarray())
np.testing.assert_allclose(trimmed_dists, correct_dists.toarray())
class TestConnectedComponents:
def test_component_present(self, adata: AnnData):
sq.gr.spatial_neighbors(adata, coord_type="grid", n_neighs=6, delaunay=False)
cc.gr.connected_components(adata, min_cells=10)
assert "component" in adata.obs
def test_connected_components_no_cluster(self):
adata = AnnData(
X=np.full((4, 2), 1),
)
adata.obsp[Key.obsp.spatial_conn()] = sps.csr_matrix(
np.array(
[
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
]
)
)
correct_components = np.array(["0", "0", "1", "1"])
components = cc.gr.connected_components(adata, min_cells=0, copy=True)
np.testing.assert_array_equal(components, correct_components)
components = cc.gr.connected_components(adata, min_cells=0, copy=False, out_key="comp")
assert "comp" in adata.obs
np.testing.assert_array_equal(adata.obs["comp"].values, correct_components)
def test_connected_components_cluster(self):
adata = AnnData(X=np.full((4, 2), 1), obs={"cluster": ["0", "0", "1", "1"]})
adata.obsp[Key.obsp.spatial_conn()] = sps.csr_matrix(
np.array(
[
[0, 1, 0, 0],
[1, 0, 0, 1],
[0, 0, 0, 1],
[0, 1, 1, 0],
]
)
)
correct_components = np.array(["0", "0", "1", "1"])
components = cc.gr.connected_components(adata, cluster_key="cluster", min_cells=0, copy=True)
np.testing.assert_array_equal(components, correct_components)
components = cc.gr.connected_components(adata, cluster_key="cluster", min_cells=0, copy=False, out_key="comp")
assert "comp" in adata.obs
np.testing.assert_array_equal(adata.obs["comp"].values, correct_components)
def test_connected_components_min_cells(self):
adata = AnnData(X=np.full((5, 2), 1), obs={"cluster": ["0", "0", "0", "1", "1"]})
adata.obsp[Key.obsp.spatial_conn()] = sps.csr_matrix(
np.array(
[
[0, 1, 1, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 1, 1, 0],
]
)
)
correct_components = np.array(["0", "0", "0", "1", "1"])
components = cc.gr.connected_components(adata, cluster_key="cluster", min_cells=2, copy=True)
np.testing.assert_array_equal(components, correct_components)
correct_components = pd.Series(["0", "0", "0", "-1", "-1"])
components = cc.gr.connected_components(adata, cluster_key="cluster", min_cells=3, copy=True)
components = components.add_categories(["-1"])
components = components.fillna("-1")
np.testing.assert_array_equal(components, correct_components)
def test_codex(self, codex_adata: AnnData):
min_cells = 250
correct_number_components = 97
if "component" in codex_adata.obs:
del codex_adata.obs["component"]
cc.gr.connected_components(codex_adata, cluster_key="cluster_cellcharter", min_cells=min_cells)
assert codex_adata.obs["component"].dtype == "category"
assert len(codex_adata.obs["component"].cat.categories) == correct_number_components
for component in codex_adata.obs["component"].cat.categories:
# Check that all components have at least min_cells cells
assert np.sum(codex_adata.obs["component"] == component) >= min_cells
# Check that all cells in the component are in the same cluster
assert len(codex_adata.obs["cluster_cellcharter"][codex_adata.obs["component"] == component].unique()) == 1
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/graph/test_aggregate_neighbors.py | tests/graph/test_aggregate_neighbors.py | import numpy as np
import scipy.sparse as sps
import squidpy as sq
from anndata import AnnData
from cellcharter.gr import aggregate_neighbors
class TestAggregateNeighbors:
def test_aggregate_neighbors(self):
n_layers = 2
aggregations = ["mean", "var"]
G = sps.csr_matrix(
np.array(
[
[0, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
]
)
)
X = np.vstack((np.power(2, np.arange(G.shape[0])), np.power(2, np.arange(G.shape[0])[::-1]))).T.astype(
np.float32
)
adata = AnnData(X=X, obsp={"spatial_connectivities": G})
L1_mean_truth = np.vstack(
([7.5, 16.5, 64.66, 91, 4.5, 2, 66, 34, 8], [60, 132, 87.33, 91, 144, 128, 33, 34, 32])
).T.astype(np.float32)
L2_mean_truth = np.vstack(
([120, 9.33, 8.67, 3, 87.33, 1, 1, 1, 8.5], [3.75, 37.33, 58.67, 96, 64.33, 256, 256, 256, 136])
).T.astype(np.float32)
L1_var_truth = np.vstack(
([5.36, 15.5, 51.85, 116.83, 3.5, 0, 62, 30, 0], [42.90, 124, 119.27, 116.83, 112, 0, 31, 30, 0])
).T.astype(np.float32)
L2_var_truth = np.vstack(
([85.79, 4.99, 5.73, 1.0, 119.27, 0, 0, 0, 7.5], [2.68, 19.96, 49.46, 32, 51.85, 0, 0, 0, 120])
).T.astype(np.float32)
aggregate_neighbors(adata, n_layers=n_layers, aggregations=["mean", "var"])
assert adata.obsm["X_cellcharter"].shape == (adata.shape[0], X.shape[1] * (n_layers * len(aggregations) + 1))
np.testing.assert_allclose(adata.obsm["X_cellcharter"][:, [0, 1]], X, rtol=0.01)
np.testing.assert_allclose(adata.obsm["X_cellcharter"][:, [2, 3]], L1_mean_truth, rtol=0.01)
np.testing.assert_allclose(adata.obsm["X_cellcharter"][:, [4, 5]], L1_var_truth**2, rtol=0.01)
np.testing.assert_allclose(adata.obsm["X_cellcharter"][:, [6, 7]], L2_mean_truth, rtol=0.01)
np.testing.assert_allclose(adata.obsm["X_cellcharter"][:, [8, 9]], L2_var_truth**2, rtol=0.01)
def test_aggregations(self, adata: AnnData):
sq.gr.spatial_neighbors(adata)
aggregate_neighbors(adata, n_layers=3, aggregations="mean", out_key="X_str")
aggregate_neighbors(adata, n_layers=3, aggregations=["mean"], out_key="X_list")
assert (adata.obsm["X_str"] != adata.obsm["X_list"]).nnz == 0
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/graph/test_diff_nhood.py | tests/graph/test_diff_nhood.py | import numpy as np
import pytest
from anndata import AnnData
import cellcharter as cc
_CLUSTER_KEY = "cell_type"
_CONDITION_KEY = "sample"
key = f"{_CLUSTER_KEY}_{_CONDITION_KEY}_diff_nhood_enrichment"
class TestDiffNhoodEnrichment:
def test_enrichment(self, codex_adata: AnnData):
n_conditions = codex_adata.obs[_CONDITION_KEY].cat.categories.shape[0]
cc.gr.diff_nhood_enrichment(
codex_adata, cluster_key=_CLUSTER_KEY, condition_key=_CONDITION_KEY, only_inter=False, log_fold_change=False
)
assert len(codex_adata.uns[key]) == n_conditions * (n_conditions - 1) / 2
for nhood_enrichment in codex_adata.uns[key].values():
enrichment = nhood_enrichment["enrichment"]
assert np.all((enrichment >= -1) & (enrichment <= 1))
del codex_adata.uns[key]
def test_pvalues(self, codex_adata: AnnData):
n_conditions = codex_adata.obs[_CONDITION_KEY].cat.categories.shape[0]
cc.gr.diff_nhood_enrichment(
codex_adata,
cluster_key=_CLUSTER_KEY,
condition_key=_CONDITION_KEY,
library_key="sample",
only_inter=True,
log_fold_change=True,
pvalues=True,
n_perms=100,
)
assert len(codex_adata.uns[key]) == n_conditions * (n_conditions - 1) / 2
for nhood_enrichment in codex_adata.uns[key].values():
assert "pvalue" in nhood_enrichment
pvalue = nhood_enrichment["pvalue"]
assert np.all((pvalue >= 0) & (pvalue <= 1))
del codex_adata.uns[key]
def test_symmetric_vs_nonsymmetric(self, codex_adata: AnnData):
# Test symmetric case
cc.gr.diff_nhood_enrichment(codex_adata, cluster_key=_CLUSTER_KEY, condition_key=_CONDITION_KEY, symmetric=True)
symmetric_result = codex_adata.uns[key].copy()
del codex_adata.uns[key]
# Test non-symmetric case
cc.gr.diff_nhood_enrichment(
codex_adata, cluster_key=_CLUSTER_KEY, condition_key=_CONDITION_KEY, symmetric=False
)
nonsymmetric_result = codex_adata.uns[key]
# Results should be different when symmetric=False
for pair_key in symmetric_result:
assert not np.allclose(
symmetric_result[pair_key]["enrichment"], nonsymmetric_result[pair_key]["enrichment"], equal_nan=True
)
del codex_adata.uns[key]
def test_condition_groups(self, codex_adata: AnnData):
conditions = codex_adata.obs[_CONDITION_KEY].cat.categories[:2]
cc.gr.diff_nhood_enrichment(
codex_adata, cluster_key=_CLUSTER_KEY, condition_key=_CONDITION_KEY, condition_groups=conditions
)
# Should only have one comparison
assert len(codex_adata.uns[key]) == 1
pair_key = f"{conditions[0]}_{conditions[1]}"
assert pair_key in codex_adata.uns[key]
del codex_adata.uns[key]
def test_invalid_inputs(self, codex_adata: AnnData):
# Test invalid cluster key
with pytest.raises(KeyError):
cc.gr.diff_nhood_enrichment(codex_adata, cluster_key="invalid_key", condition_key=_CONDITION_KEY)
# Test invalid condition key
with pytest.raises(KeyError):
cc.gr.diff_nhood_enrichment(codex_adata, cluster_key=_CLUSTER_KEY, condition_key="invalid_key")
# Test invalid library key when using pvalues
with pytest.raises(KeyError):
cc.gr.diff_nhood_enrichment(
codex_adata,
cluster_key=_CLUSTER_KEY,
condition_key=_CONDITION_KEY,
library_key="invalid_key",
pvalues=True,
)
def test_copy_return(self, codex_adata: AnnData):
# Test copy=True returns results without modifying adata
result = cc.gr.diff_nhood_enrichment(
codex_adata, cluster_key=_CLUSTER_KEY, condition_key=_CONDITION_KEY, copy=True
)
assert key not in codex_adata.uns
assert isinstance(result, dict)
assert len(result) > 0
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/graph/test_group.py | tests/graph/test_group.py | import numpy as np
import squidpy as sq
import cellcharter as cc
GROUP_KEY = "batch"
LABEL_KEY = "Cell_class"
key = f"{GROUP_KEY}_{LABEL_KEY}_enrichment"
adata = sq.datasets.merfish()
class TestEnrichment:
def test_enrichment(self):
cc.gr.enrichment(adata, group_key=GROUP_KEY, label_key=LABEL_KEY)
assert key in adata.uns
assert "enrichment" in adata.uns[key]
assert "params" in adata.uns[key]
del adata.uns[key]
def test_copy(self):
enrichment_dict = cc.gr.enrichment(adata, group_key=GROUP_KEY, label_key=LABEL_KEY, copy=True)
assert "enrichment" in enrichment_dict
enrichment_dict = cc.gr.enrichment(
adata, group_key=GROUP_KEY, label_key=LABEL_KEY, copy=True, observed_expected=True
)
assert "enrichment" in enrichment_dict
assert "observed" in enrichment_dict
assert "expected" in enrichment_dict
def test_obs_exp(self):
cc.gr.enrichment(adata, group_key=GROUP_KEY, label_key=LABEL_KEY, observed_expected=True)
assert key in adata.uns
assert "enrichment" in adata.uns[key]
assert "observed" in adata.uns[key]
assert "expected" in adata.uns[key]
observed = adata.uns[key]["observed"]
expected = adata.uns[key]["expected"]
assert observed.shape == (
adata.obs[GROUP_KEY].cat.categories.shape[0],
adata.obs[LABEL_KEY].cat.categories.shape[0],
)
assert expected.shape[0] == adata.obs[GROUP_KEY].cat.categories.shape[0]
assert np.all((observed >= 0) & (observed <= 1))
assert np.all((expected >= 0) & (expected <= 1))
def test_perm(self):
result_analytical = cc.gr.enrichment(
adata, group_key=GROUP_KEY, label_key=LABEL_KEY, pvalues=False, copy=True, observed_expected=True
)
result_perm = cc.gr.enrichment(
adata,
group_key=GROUP_KEY,
label_key=LABEL_KEY,
pvalues=True,
n_perms=5000,
observed_expected=True,
copy=True,
)
np.testing.assert_allclose(result_analytical["enrichment"], result_perm["enrichment"], atol=0.1)
np.testing.assert_allclose(result_analytical["observed"], result_perm["observed"], atol=0.1)
np.testing.assert_allclose(result_analytical["expected"], result_perm["expected"], atol=0.1)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/graph/test_nhood.py | tests/graph/test_nhood.py | import numpy as np
import scipy
import squidpy as sq
from squidpy._constants._pkg_constants import Key
import cellcharter as cc
_CK = "cell type"
key = Key.uns.nhood_enrichment(_CK)
adata = sq.datasets.imc()
sq.gr.spatial_neighbors(adata, coord_type="generic", delaunay=True)
cc.gr.remove_long_links(adata)
class TestNhoodEnrichment:
def test_enrichment(self):
cc.gr.nhood_enrichment(adata, cluster_key=_CK, only_inter=False, log_fold_change=False)
enrichment = adata.uns[key]["enrichment"]
assert np.all((enrichment >= -1) & (enrichment <= 1))
del adata.uns[key]
def test_fold_change(self):
cc.gr.nhood_enrichment(adata, cluster_key=_CK, log_fold_change=True)
del adata.uns[key]
def test_nhood_obs_exp(self):
cc.gr.nhood_enrichment(adata, cluster_key=_CK, only_inter=False, observed_expected=True)
observed = adata.uns[key]["observed"]
expected = adata.uns[key]["expected"]
assert observed.shape[0] == adata.obs[_CK].cat.categories.shape[0]
assert observed.shape == expected.shape
assert np.all((observed >= 0) & (observed <= 1))
assert np.all((expected >= 0) & (expected <= 1))
del adata.uns[key]
def test_symmetric(self):
result = cc.gr.nhood_enrichment(
adata, cluster_key=_CK, symmetric=True, log_fold_change=True, only_inter=False, copy=True
)
assert scipy.linalg.issymmetric(result["enrichment"].values, atol=1e-02)
result = cc.gr.nhood_enrichment(
adata, cluster_key=_CK, symmetric=True, log_fold_change=False, only_inter=False, copy=True
)
assert scipy.linalg.issymmetric(result["enrichment"].values, atol=1e-02)
result = cc.gr.nhood_enrichment(
adata, cluster_key=_CK, symmetric=True, log_fold_change=False, only_inter=True, copy=True
)
result["enrichment"][result["enrichment"].isna()] = 0 # issymmetric fails with NaNs
assert scipy.linalg.issymmetric(result["enrichment"].values, atol=1e-02)
result = cc.gr.nhood_enrichment(
adata, cluster_key=_CK, symmetric=True, log_fold_change=True, only_inter=True, copy=True
)
result["enrichment"][result["enrichment"].isna()] = 0 # issymmetric fails with NaNs
assert scipy.linalg.issymmetric(result["enrichment"].values, atol=1e-02)
def test_perm(self):
result_analytical = cc.gr.nhood_enrichment(
adata, cluster_key=_CK, only_inter=True, pvalues=False, observed_expected=True, copy=True
)
result_perm = cc.gr.nhood_enrichment(
adata,
cluster_key=_CK,
only_inter=True,
pvalues=True,
n_perms=5000,
observed_expected=True,
copy=True,
n_jobs=15,
)
np.testing.assert_allclose(result_analytical["enrichment"], result_perm["enrichment"], atol=0.1)
np.testing.assert_allclose(result_analytical["observed"], result_perm["observed"], atol=0.1)
np.testing.assert_allclose(result_analytical["expected"], result_perm["expected"], atol=0.1)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/plotting/test_plot_stability.py | tests/plotting/test_plot_stability.py | import pytest
import scipy.sparse as sps
import squidpy as sq
import cellcharter as cc
class TestPlotStability:
@pytest.mark.parametrize("dataset_name", ["mibitof"])
def test_spatial_proteomics(self, dataset_name: str):
download_dataset = getattr(sq.datasets, dataset_name)
adata = download_dataset()
if sps.issparse(adata.X):
adata.X = adata.X.todense()
sq.gr.spatial_neighbors(adata, coord_type="generic", delaunay=True)
cc.gr.remove_long_links(adata)
cc.gr.aggregate_neighbors(adata, n_layers=3)
model = cc.tl.ClusterAutoK.load(f"tests/_models/cellcharter_autok_{dataset_name}")
cc.pl.autok_stability(model)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
CSOgroup/cellcharter | https://github.com/CSOgroup/cellcharter/blob/461165f57ee9ac5614f550d99a82bf01fb086948/tests/plotting/test_group.py | tests/plotting/test_group.py | import matplotlib.pyplot as plt
import pytest
import squidpy as sq
try:
from matplotlib.colormaps import get_cmap
except ImportError:
from matplotlib.pyplot import get_cmap
import cellcharter as cc
GROUP_KEY = "batch"
LABEL_KEY = "Cell_class"
key = f"{GROUP_KEY}_{LABEL_KEY}_enrichment"
adata = sq.datasets.merfish()
adata_empirical = adata.copy()
cc.gr.enrichment(adata_empirical, group_key=GROUP_KEY, label_key=LABEL_KEY, pvalues=True, n_perms=1000)
adata_analytical = adata.copy()
cc.gr.enrichment(adata_analytical, group_key=GROUP_KEY, label_key=LABEL_KEY)
class TestProportion:
def test_proportion(self):
cc.pl.proportion(adata, group_key=GROUP_KEY, label_key=LABEL_KEY)
def test_groups_labels(self):
cc.pl.proportion(
adata,
group_key=GROUP_KEY,
label_key=LABEL_KEY,
groups=adata.obs[GROUP_KEY].cat.categories[:3],
labels=adata.obs[LABEL_KEY].cat.categories[:4],
)
class TestPlotEnrichment:
@pytest.mark.parametrize("adata_enrichment", [adata_analytical, adata_empirical])
def test_enrichment(self, adata_enrichment):
cc.pl.enrichment(adata_enrichment, group_key=GROUP_KEY, label_key=LABEL_KEY)
@pytest.mark.parametrize("adata_enrichment", [adata_analytical, adata_empirical])
@pytest.mark.parametrize("label_cluster", [False, True])
@pytest.mark.parametrize("show_pvalues", [False, True])
@pytest.mark.parametrize("groups", [None, adata.obs[GROUP_KEY].cat.categories[:3]])
@pytest.mark.parametrize("labels", [None, adata.obs[LABEL_KEY].cat.categories[:4]])
@pytest.mark.parametrize("size_threshold", [1, 2.5])
@pytest.mark.parametrize("palette", [None, "coolwarm", get_cmap("coolwarm")])
@pytest.mark.parametrize("figsize", [None, (10, 8)])
@pytest.mark.parametrize("alpha,edgecolor", [(1, "red"), (0.5, "blue")])
def test_params(
self,
adata_enrichment,
label_cluster,
show_pvalues,
groups,
labels,
size_threshold,
palette,
figsize,
alpha,
edgecolor,
):
cc.pl.enrichment(
adata_enrichment,
group_key=GROUP_KEY,
label_key=LABEL_KEY,
label_cluster=label_cluster,
groups=groups,
labels=labels,
show_pvalues=show_pvalues,
size_threshold=size_threshold,
palette=palette,
figsize=figsize,
alpha=alpha,
edgecolor=edgecolor,
)
plt.close()
def test_no_pvalues(self):
# If the enrichment data is not present, it should raise an error
with pytest.raises(ValueError):
cc.pl.enrichment(adata_analytical, "group_key", "label_key", show_pvalues=True)
with pytest.raises(ValueError):
cc.pl.enrichment(adata_analytical, "group_key", "label_key", significance=0.01)
with pytest.raises(ValueError):
cc.pl.enrichment(adata_analytical, "group_key", "label_key", significant_only=True)
def test_obs_exp(self):
cc.gr.enrichment(adata, group_key=GROUP_KEY, label_key=LABEL_KEY, observed_expected=True)
cc.pl.enrichment(adata, group_key=GROUP_KEY, label_key=LABEL_KEY)
def test_enrichment_no_enrichment_data(self):
with pytest.raises(ValueError):
cc.pl.enrichment(adata, "group_key", "label_key")
def test_size_threshold_zero(self):
with pytest.raises(ValueError):
cc.pl.enrichment(adata_empirical, group_key=GROUP_KEY, label_key=LABEL_KEY, size_threshold=0)
| python | BSD-3-Clause | 461165f57ee9ac5614f550d99a82bf01fb086948 | 2026-01-05T07:13:12.201168Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.