repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/_partial_dataset.py | stable_nalu/dataset/_partial_dataset.py |
import torch
class PartialDataset(torch.utils.data.Dataset):
def __init__(self, full_dataset, offset, length):
super().__init__()
self.full_dataset = full_dataset
self.offset = offset
self.length = length
def __len__(self):
return self.length
def __getitem__(self, index):
if index >= self.length:
raise IndexError()
return self.full_dataset[self.offset + index]
def __iter__(self):
for i in range(self.length):
yield self[i]
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/_simple_function_abstact.py | stable_nalu/dataset/_simple_function_abstact.py |
import itertools
import math
import numpy as np
import torch
import torch.utils.data
from ._dataloader import FastDataLoader
class ARITHMETIC_FUNCTIONS_STRINGIY:
@staticmethod
def add(*subsets):
return ' + '.join(map(str, subsets))
@staticmethod
def sub(a, b, *extra):
return f'{a} - {b}'
@staticmethod
def mul(*subsets):
return ' * '.join(map(str, subsets))
def div(a, b):
return f'{a} / {b}'
def squared(a, *extra):
return f'{a}**2'
def root(a, *extra):
return f'sqrt({a})'
class ARITHMETIC_FUNCTIONS:
@staticmethod
def add(*subsets):
return np.sum(subsets, axis=0)
@staticmethod
def sub(a, b, *extra):
return a - b
@staticmethod
def mul(*subsets):
return np.prod(subsets, axis=0)
def div(a, b, *extra):
return a / b
def squared(a, *extra):
return a * a
def root(a, *extra):
return np.sqrt(a)
class SimpleFunctionDataset:
def __init__(self, operation, input_size,
subset_ratio=0.25,
overlap_ratio=0.5,
num_subsets=2,
simple=False,
seed=None,
use_cuda=False,
max_size=2**32-1):
super().__init__()
self._operation_name = operation
self._operation = getattr(ARITHMETIC_FUNCTIONS, operation)
self._max_size = max_size
self._use_cuda = use_cuda
self._rng = np.random.RandomState(seed)
if simple:
self._input_size = 4
self.subset_ranges = [(0, 4), (0, 2)]
else:
self._input_size = input_size
subset_size = math.floor(subset_ratio * input_size)
overlap_size = math.floor(overlap_ratio * subset_size)
self.subset_ranges = []
for subset_i in range(num_subsets):
start = 0 if subset_i == 0 else self.subset_ranges[-1][1] - overlap_size
end = start + subset_size
self.subset_ranges.append((start, end))
total_used_size = self.subset_ranges[-1][1]
if total_used_size > input_size:
raise ValueError('too many subsets given the subset and overlap ratios')
offset = self._rng.randint(0, input_size - total_used_size + 1)
self.subset_ranges = [
(start + offset, end + offset)
for start, end in self.subset_ranges
]
def print_operation(self):
subset_str = [
f'sum(v[{start}:{end}])' for start, end in self.subset_ranges
]
return getattr(ARITHMETIC_FUNCTIONS_STRINGIY, self._operation_name)(*subset_str)
def get_input_size(self):
return self._input_size
def fork(self, shape, sample_range, seed=None):
assert shape[-1] == self._input_size
rng = np.random.RandomState(self._rng.randint(0, 2**32 - 1) if seed is None else seed)
return SimpleFunctionDatasetFork(self, shape, sample_range, rng)
class SimpleFunctionDatasetFork(torch.utils.data.Dataset):
def __init__(self, parent, shape, sample_range, rng):
super().__init__()
if not isinstance(sample_range[0], list):
sample_range = [sample_range]
else:
if (sample_range[0][0] - sample_range[0][1]) != (sample_range[1][0] - sample_range[1][1]):
raise ValueError(f'unsymetric range for {sample_range}')
self._shape = shape
self._sample_range = sample_range
self._rng = rng
self._operation = parent._operation
self._input_size = parent._input_size
self._max_size = parent._max_size
self._use_cuda = parent._use_cuda
self._subset_ranges = parent.subset_ranges
def _multi_uniform_sample(self, batch_size):
if len(self._sample_range) == 1:
return self._rng.uniform(
low=self._sample_range[0][0],
high=self._sample_range[0][1],
size=(batch_size, ) + self._shape)
elif len(self._sample_range) == 2:
part_0 = self._rng.uniform(
low=self._sample_range[0][0],
high=self._sample_range[0][1],
size=(batch_size, ) + self._shape)
part_1 = self._rng.uniform(
low=self._sample_range[1][0],
high=self._sample_range[1][1],
size=(batch_size, ) + self._shape)
choose = self._rng.randint(
2,
size=(batch_size, ) + self._shape)
return np.where(choose, part_0, part_1)
else:
raise NotImplemented()
def __getitem__(self, select):
# Assume select represent a batch_size by using self[0:batch_size]
batch_size = select.stop - select.start if isinstance(select, slice) else 1
input_vector = self._multi_uniform_sample(batch_size)
# Compute a and b values
sum_axies = tuple(range(1, 1 + len(self._shape)))
subsets = [
np.sum(input_vector[..., start:end], axis=sum_axies)
for start, end in self._subset_ranges
]
# Compute result of arithmetic operation
output_scalar = self._operation(*subsets)[:, np.newaxis]
# If select is an index, just return the content of one row
if not isinstance(select, slice):
input_vector = input_vector[0]
output_scalar = output_scalar[0]
return (
torch.tensor(input_vector, dtype=torch.float32),
torch.tensor(output_scalar, dtype=torch.float32)
)
def __len__(self):
return self._max_size
def dataloader(self, batch_size=128):
return FastDataLoader(self, batch_size, self._use_cuda)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/number_translation.py | stable_nalu/dataset/number_translation.py |
import os.path as path
import numpy as np
import torch
import torch.utils.data
import torchvision
from ._dataloader import DataLoaderCudaWrapper
id2token = [
'<pad>',
'and',
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'eleven',
'twelve',
'thirteen',
'fourteen',
'fifteen',
'sixteen',
'seventeen',
'eighteen',
'nineteen',
'twenty',
'thirty',
'forty',
'fifty',
'sixty',
'seventy',
'eighty',
'ninety',
'hundred'
]
first_ten_tokens = id2token[2:11]
first_twenty_tokens = id2token[2:21]
tens_tokens = id2token[21:29]
pad_token = id2token[0]
and_token = id2token[1]
hundred_token = id2token[-1]
token2id = {
token: id
for id, token in enumerate(id2token)
}
class NumberTranslationDataset:
def __init__(self,
num_workers=1,
seed=None,
use_cuda=False):
super().__init__()
self._num_workers = num_workers
self._use_cuda = use_cuda
self._rng = np.random.RandomState(seed)
self._train_samples = []
self._valid_samples = []
self._test_samples = []
# add the first 19 tokens
missing_train_ids = set(token2id.values()) - set([pad_token])
for number in range(1, 20):
ids = self.encode(number)
self._train_samples.append((ids, number))
missing_train_ids -= set(ids)
# add the remaining 999 - 19 tokens
remaning_numbers = self._rng.permutation(1000 - 20) + 20
for number in remaning_numbers.tolist():
ids = self.encode(number)
# If the sample contains tokens that have not yet been included in
# the training set, then include it in the training set before
# any other dataset.
if len(missing_train_ids) and len(missing_train_ids & set(ids)) > 0:
self._train_samples.append((ids, number))
missing_train_ids -= set(ids)
# Because the above filter creates a slight bias, for less unique
# tokens, fill the test dataset first as this is the biggest. Thus
# the bias is going to matter the least.
elif len(self._test_samples) < 630:
self._test_samples.append((ids, number))
# In the unlikely case that no occurrences of some token haven't been
# seen, after 631 observations have been added to the test dataset,
# continue adding tokens to the validation dataset.
elif len(self._valid_samples) < 200:
self._valid_samples.append((ids, number))
# Adding to the training dataset last, completly ensures that we train
# over all tokens. Note that it is highly improbable that the order matters,
# but just in case the seed is bad the training dataset is appended last.
elif len(self._train_samples) < 169:
self._train_samples.append((ids, number))
missing_train_ids -= set(ids)
@staticmethod
def encode(number, as_strings=False):
if number <= 0 or number >= 1000:
raise ValueError(f'{number} must be between [1, 999]')
hundreds = number // 100
tens = (number % 100) // 10
ones = number % 10
tokens = []
if hundreds > 0:
tokens.append(first_ten_tokens[hundreds - 1])
tokens.append(hundred_token)
if len(tokens) > 0 and (tens > 0 or ones > 0):
tokens.append(and_token)
if 0 < tens * 10 + ones < 20:
# from [1, 19]
tokens.append(first_twenty_tokens[tens * 10 + ones - 1])
else:
# from [20, 99]
if tens > 0:
tokens.append(tens_tokens[tens - 2])
if ones > 0:
tokens.append(first_ten_tokens[ones - 1])
if as_strings:
return tokens
else:
# pad tokens to have length 6
tokens += [pad_token] * (5 - len(tokens))
return np.asarray([token2id[token] for token in tokens], dtype=np.int64)
def fork(self, subset='train'):
if subset not in {'train', 'valid', 'test'}:
raise ValueError(f'subset must be either train, valid or test, it is {subset}')
if subset == 'train':
dataset = self._train_samples
elif subset == 'valid':
dataset = self._valid_samples
elif subset == 'test':
dataset = self._test_samples
return NumberTranslationDatasetFork(self, dataset)
class NumberTranslationDatasetFork(torch.utils.data.Dataset):
def __init__(self, parent, dataset):
super().__init__()
self._num_workers = parent._num_workers
self._use_cuda = parent._use_cuda
self._dataset = dataset
def __getitem__(self, index):
x, t = self._dataset[index]
return (
torch.tensor(x, dtype=torch.int64),
torch.tensor([t], dtype=torch.float32)
)
def __len__(self):
return len(self._dataset)
def dataloader(self, batch_size=64, shuffle=True):
batcher = torch.utils.data.DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._num_workers)
if self._use_cuda:
return DataLoaderCudaWrapper(batcher)
else:
return batcher
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/network/sequential_mnist.py | stable_nalu/network/sequential_mnist.py |
import torch
from ..abstract import ExtendedTorchModule
from ..layer import GeneralizedLayer, GeneralizedCell
from .regression_mnist import RegressionMnistNetwork
class SequentialMnistNetwork(ExtendedTorchModule):
UNIT_NAMES = GeneralizedCell.UNIT_NAMES
def __init__(self, unit_name, output_size, writer=None,
mnist_digits=[0,1,2,3,4,5,6,7,8,9],
softmax_transform=False,
mnist_outputs=1, model_simplification='none',
nac_mul='none', eps=1e-7,
**kwags):
super().__init__('network', writer=writer, **kwags)
self.unit_name = unit_name
self.output_size = output_size
self.nac_mul = nac_mul
self.eps = eps
self.model_simplification = model_simplification
# TODO: maybe don't make them learnable, properly zero will surfise here
if unit_name == 'LSTM':
self.register_buffer('zero_state_h', torch.Tensor(self.output_size))
self.register_buffer('zero_state_c', torch.Tensor(self.output_size))
else:
self.register_buffer('zero_state', torch.Tensor(self.output_size))
self.image2label = RegressionMnistNetwork(
mnist_digits=mnist_digits,
mnist_outputs=mnist_outputs,
softmax_transform=softmax_transform
)
if nac_mul == 'mnac':
unit_name = unit_name[0:-3] + 'MNAC'
if self.model_simplification == 'none':
self.recurent_cell = GeneralizedCell(mnist_outputs, self.output_size,
unit_name,
writer=self.writer,
**kwags)
self.reset_parameters()
def _best_init_state(self):
if self.nac_mul == 'normal' or self.nac_mul == 'mnac':
return 1
elif self.nac_mul == 'none':
return 0
def reset_parameters(self):
if self.unit_name == 'LSTM':
torch.nn.init.constant_(self.zero_state_h, self._best_init_state())
torch.nn.init.constant_(self.zero_state_c, self._best_init_state())
else:
torch.nn.init.constant_(self.zero_state, self._best_init_state())
self.image2label.reset_parameters()
if self.model_simplification == 'none':
self.recurent_cell.reset_parameters()
def _forward_trainable_accumulator(self, x):
y_all = []
l_all = []
# Perform recurrent iterations over the input
if self.unit_name == 'LSTM':
h_tm1 = (
self.zero_state_h.repeat(x.size(0), 1),
self.zero_state_c.repeat(x.size(0), 1)
)
else:
h_tm1 = self.zero_state.repeat(x.size(0), 1)
for t in range(x.size(1)):
x_t = x[:, t]
l_t = self.image2label(x_t)
if self.nac_mul == 'none' or self.nac_mul == 'mnac':
h_t = self.recurent_cell(l_t, h_tm1)
elif self.nac_mul == 'normal':
h_t = torch.exp(self.recurent_cell(
torch.log(torch.abs(l_t) + self.eps),
torch.log(torch.abs(h_tm1) + self.eps)
))
y_all.append(h_t[0] if self.unit_name == 'LSTM' else h_t)
l_all.append(l_t)
h_tm1 = h_t
return (
torch.stack(l_all).transpose(0, 1),
torch.stack(y_all).transpose(0, 1)
)
def _forward_solved_accumulator(self, x):
y_all = []
l_all = []
h_tm1 = self._best_init_state()
for t in range(x.size(1)):
x_t = x[:, t]
l_t = self.image2label(x_t)
if self.nac_mul == 'normal' or self.nac_mul == 'mnac':
h_t = h_tm1 * l_t
elif self.nac_mul == 'none':
h_t = h_tm1 + l_t
y_all.append(h_t)
l_all.append(l_t)
h_tm1 = h_t
return (
torch.stack(l_all).transpose(0, 1),
torch.stack(y_all).transpose(0, 1)
)
def _forward_pass_through(self, x):
y_all = []
l_all = []
for t in range(x.size(1)):
x_t = x[:, t]
l_t = self.image2label(x_t)
y_all.append(l_t)
l_all.append(l_t)
return (
torch.stack(l_all).transpose(0, 1),
torch.stack(y_all).transpose(0, 1)
)
def forward(self, x):
"""Performs recurrent iterations over the input.
Arguments:
input: Expected to have the shape [obs, time, channels=1, width, height]
"""
if self.model_simplification == 'none':
return self._forward_trainable_accumulator(x)
elif self.model_simplification == 'solved-accumulator':
return self._forward_solved_accumulator(x)
elif self.model_simplification == 'pass-through':
return self._forward_pass_through(x)
else:
raise ValueError('incorrect model_simplification value')
def extra_repr(self):
return 'unit_name={}, output_size={}'.format(
self.unit_name, self.output_size
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/network/sequential_svhn.py | stable_nalu/network/sequential_svhn.py |
import torch
import torchvision
from ..abstract import ExtendedTorchModule
from ..layer import GeneralizedLayer, GeneralizedCell
class SequentialSvhnNetwork(ExtendedTorchModule):
UNIT_NAMES = GeneralizedCell.UNIT_NAMES
def __init__(self, unit_name, output_size, writer=None,
svhn_outputs=1, resnet='resnet18',
model_simplification='none',
nac_mul='none', eps=1e-7,
**kwags):
super().__init__('network', writer=writer, **kwags)
self.unit_name = unit_name
self.output_size = output_size
self.nac_mul = nac_mul
self.eps = eps
self.model_simplification = model_simplification
# TODO: maybe don't make them learnable, properly zero will surfise here
if unit_name == 'LSTM':
self.register_buffer('zero_state_h', torch.Tensor(self.output_size))
self.register_buffer('zero_state_c', torch.Tensor(self.output_size))
else:
self.register_buffer('zero_state', torch.Tensor(self.output_size))
self.image2label = getattr(torchvision.models, resnet)(
num_classes=svhn_outputs
)
if nac_mul == 'mnac':
unit_name = unit_name[0:-3] + 'MNAC'
if self.model_simplification == 'none':
self.recurent_cell = GeneralizedCell(svhn_outputs, self.output_size,
unit_name,
writer=self.writer,
**kwags)
self.reset_parameters()
def _best_init_state(self):
if self.nac_mul == 'normal' or self.nac_mul == 'mnac':
return 1
elif self.nac_mul == 'none':
return 0
def reset_parameters(self):
if self.unit_name == 'LSTM':
torch.nn.init.constant_(self.zero_state_h, self._best_init_state())
torch.nn.init.constant_(self.zero_state_c, self._best_init_state())
else:
torch.nn.init.constant_(self.zero_state, self._best_init_state())
# self.image2label.reset_parameters()
if self.model_simplification == 'none':
self.recurent_cell.reset_parameters()
def _forward_trainable_accumulator(self, x):
y_all = []
l_all = []
# Perform recurrent iterations over the input
if self.unit_name == 'LSTM':
h_tm1 = (
self.zero_state_h.repeat(x.size(0), 1),
self.zero_state_c.repeat(x.size(0), 1)
)
else:
h_tm1 = self.zero_state.repeat(x.size(0), 1)
for t in range(x.size(1)):
x_t = x[:, t]
l_t = self.image2label(x_t)
if self.nac_mul == 'none' or self.nac_mul == 'mnac':
h_t = self.recurent_cell(l_t, h_tm1)
elif self.nac_mul == 'normal':
h_t = torch.exp(self.recurent_cell(
torch.log(torch.abs(l_t) + self.eps),
torch.log(torch.abs(h_tm1) + self.eps)
))
y_all.append(h_t[0] if self.unit_name == 'LSTM' else h_t)
l_all.append(l_t)
h_tm1 = h_t
return (
torch.stack(l_all).transpose(0, 1),
torch.stack(y_all).transpose(0, 1)
)
def _forward_solved_accumulator(self, x):
y_all = []
l_all = []
h_tm1 = self._best_init_state()
for t in range(x.size(1)):
x_t = x[:, t]
l_t = self.image2label(x_t)
if self.nac_mul == 'normal' or self.nac_mul == 'mnac':
h_t = h_tm1 * l_t
elif self.nac_mul == 'none':
h_t = h_tm1 + l_t
y_all.append(h_t)
l_all.append(l_t)
h_tm1 = h_t
return (
torch.stack(l_all).transpose(0, 1),
torch.stack(y_all).transpose(0, 1)
)
def _forward_pass_through(self, x):
y_all = []
l_all = []
for t in range(x.size(1)):
x_t = x[:, t]
l_t = self.image2label(x_t)
y_all.append(l_t)
l_all.append(l_t)
return (
torch.stack(l_all).transpose(0, 1),
torch.stack(y_all).transpose(0, 1)
)
def forward(self, x):
"""Performs recurrent iterations over the input.
Arguments:
input: Expected to have the shape [obs, time, channels=1, width, height]
"""
if self.model_simplification == 'none':
return self._forward_trainable_accumulator(x)
elif self.model_simplification == 'solved-accumulator':
return self._forward_solved_accumulator(x)
elif self.model_simplification == 'pass-through':
return self._forward_pass_through(x)
else:
raise ValueError('incorrect model_simplification value')
def extra_repr(self):
return 'unit_name={}, output_size={}'.format(
self.unit_name, self.output_size
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/network/simple_function_static_test.py | stable_nalu/network/simple_function_static_test.py |
from nose.tools import *
import torch
import numpy as np
from stable_nalu.dataset import SimpleFunctionStaticDataset
from stable_nalu.network import SimpleFunctionStaticNetwork
def test_linear_solves_add():
dataset = SimpleFunctionStaticDataset(operation='add', seed=0)
dataset_test = dataset.fork(input_range=1)
w_1 = np.zeros((100, 2), dtype=np.float32)
w_1[dataset.a_start:dataset.a_end, 0] = 1
w_1[dataset.b_start:dataset.b_end, 1] = 1
w_2 = np.ones((2, 1), dtype=np.float32)
network = SimpleFunctionStaticNetwork('linear', input_size=100)
network.layer_1.layer.weight.data = torch.tensor(np.transpose(w_1))
network.layer_2.layer.weight.data = torch.tensor(np.transpose(w_2))
for i, (x, t) in zip(range(100), dataset_test):
np.testing.assert_almost_equal(
network(x).detach().numpy(),
t.numpy(),
decimal=4
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/network/simple_function_static.py | stable_nalu/network/simple_function_static.py |
import torch
from ..abstract import ExtendedTorchModule
from ..layer import GeneralizedLayer, BasicLayer
class SimpleFunctionStaticNetwork(ExtendedTorchModule):
UNIT_NAMES = GeneralizedLayer.UNIT_NAMES
def __init__(self, unit_name, input_size=100, hidden_size=2, writer=None, first_layer=None, nac_mul='none', eps=1e-7, **kwags):
super().__init__('network', writer=writer, **kwags)
self.unit_name = unit_name
self.input_size = input_size
self.nac_mul = nac_mul
self.eps = eps
if first_layer is not None:
unit_name_1 = first_layer
else:
unit_name_1 = unit_name
self.layer_1 = GeneralizedLayer(input_size, hidden_size,
unit_name_1,
writer=self.writer,
name='layer_1',
eps=eps, **kwags)
if nac_mul == 'mnac':
unit_name_2 = unit_name[0:-3] + 'MNAC'
else:
unit_name_2 = unit_name
self.layer_2 = GeneralizedLayer(hidden_size, 1,
'linear' if unit_name_2 in BasicLayer.ACTIVATIONS else unit_name_2,
writer=self.writer,
name='layer_2',
eps=eps, **kwags)
self.reset_parameters()
self.z_1_stored = None
def reset_parameters(self):
self.layer_1.reset_parameters()
self.layer_2.reset_parameters()
def regualizer(self):
if self.nac_mul == 'max-safe':
return super().regualizer({
'z': torch.mean(torch.relu(1 - self.z_1_stored))
})
else:
return super().regualizer()
def forward(self, input):
self.writer.add_summary('x', input)
z_1 = self.layer_1(input)
self.z_1_stored = z_1
self.writer.add_summary('z_1', z_1)
if self.nac_mul == 'none' or self.nac_mul == 'mnac':
z_2 = self.layer_2(z_1)
elif self.nac_mul == 'normal':
z_2 = torch.exp(self.layer_2(torch.log(torch.abs(z_1) + self.eps)))
elif self.nac_mul == 'safe':
z_2 = torch.exp(self.layer_2(torch.log(torch.abs(z_1 - 1) + 1)))
elif self.nac_mul == 'max-safe':
z_2 = torch.exp(self.layer_2(torch.log(torch.relu(z_1 - 1) + 1)))
else:
raise ValueError(f'Unsupported nac_mul option ({self.nac_mul})')
self.writer.add_summary('z_2', z_2)
return z_2
def extra_repr(self):
return 'unit_name={}, input_size={}'.format(
self.unit_name, self.input_size
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/network/__init__.py | stable_nalu/network/__init__.py |
from .simple_function_static import SimpleFunctionStaticNetwork
from .simple_function_recurrent import SimpleFunctionRecurrentNetwork
from .sequential_svhn import SequentialSvhnNetwork
from .sequential_mnist import SequentialMnistNetwork
from .regression_mnist import RegressionMnistNetwork
from .number_translation import NumberTranslationNetwork | python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/network/simple_function_recurrent.py | stable_nalu/network/simple_function_recurrent.py |
import torch
from ..abstract import ExtendedTorchModule
from ..layer import GeneralizedLayer, GeneralizedCell
class SimpleFunctionRecurrentNetwork(ExtendedTorchModule):
UNIT_NAMES = GeneralizedCell.UNIT_NAMES
def __init__(self, unit_name, input_size=10, writer=None, **kwargs):
super().__init__('network', writer=writer, **kwargs)
self.unit_name = unit_name
self.input_size = input_size
self.hidden_size = 2
# Since for the 'mul' problem, the zero_state should be 1, and for the
# 'add' problem it should be 0. The zero_states are allowed to be
# # optimized.
if unit_name == 'LSTM':
self.zero_state = torch.nn.ParameterDict({
'h_t0': torch.nn.Parameter(torch.Tensor(self.hidden_size)),
'c_t0': torch.nn.Parameter(torch.Tensor(self.hidden_size))
})
else:
self.zero_state = torch.nn.Parameter(torch.Tensor(self.hidden_size))
self.recurent_cell = GeneralizedCell(input_size, self.hidden_size,
unit_name,
writer=self.writer,
name='recurrent_layer',
**kwargs)
self.output_layer = GeneralizedLayer(self.hidden_size, 1,
'linear'
if unit_name in {'GRU', 'LSTM', 'RNN-tanh', 'RNN-ReLU'}
else unit_name,
writer=self.writer,
name='output_layer',
**kwargs)
self.reset_parameters()
def reset_parameters(self):
if self.unit_name == 'LSTM':
for zero_state in self.zero_state.values():
torch.nn.init.zeros_(zero_state)
else:
torch.nn.init.zeros_(self.zero_state)
self.recurent_cell.reset_parameters()
self.output_layer.reset_parameters()
def forward(self, x):
"""Performs recurrent iterations over the input.
Arguments:
input: Expected to have the shape [obs, time, dims]
"""
# Perform recurrent iterations over the input
if self.unit_name == 'LSTM':
h_tm1 = tuple(zero_state.repeat(x.size(0), 1) for zero_state in self.zero_state.values())
else:
h_tm1 = self.zero_state.repeat(x.size(0), 1)
for t in range(x.size(1)):
x_t = x[:, t]
h_t = self.recurent_cell(x_t, h_tm1)
h_tm1 = h_t
# Grap the final hidden output and use as the output from the recurrent layer
z_1 = h_t[0] if self.unit_name == 'LSTM' else h_t
z_2 = self.output_layer(z_1)
return z_2
def extra_repr(self):
return 'unit_name={}, input_size={}'.format(
self.unit_name, self.input_size
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/network/regression_mnist.py | stable_nalu/network/regression_mnist.py |
import torch
from ..abstract import ExtendedTorchModule
from ..layer import GeneralizedLayer, GeneralizedCell
# Copied from https://github.com/pytorch/examples/blob/master/mnist/main.py, just added a
# reset_parameters method and changed final layer to have one output.
class RegressionMnistNetwork(ExtendedTorchModule):
def __init__(self,
mnist_digits=[0,1,2,3,4,5,6,7,8,9],
softmax_transform=False,
mnist_outputs=1, **kwargs):
super().__init__('cnn', **kwargs)
self._softmax_transform = softmax_transform
self.conv1 = torch.nn.Conv2d(1, 20, 5, 1)
self.conv2 = torch.nn.Conv2d(20, 50, 5, 1)
self.fc1 = torch.nn.Linear(4*4*50, 500)
if self._softmax_transform:
if mnist_outputs:
raise ValueError(f"mnist_outputs can't be > 1 with softmax_transform")
self.fc2 = torch.nn.Linear(500, len(mnist_digits))
self.register_buffer('fc3', torch.tensor(mnist_digits, dtype=torch.float32).reshape(1, -1))
else:
self.fc2 = torch.nn.Linear(500, mnist_outputs)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
self.fc1.reset_parameters()
self.fc2.reset_parameters()
def forward(self, x):
x = torch.nn.functional.relu(self.conv1(x))
x = torch.nn.functional.max_pool2d(x, 2, 2)
x = torch.nn.functional.relu(self.conv2(x))
x = torch.nn.functional.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = torch.nn.functional.relu(self.fc1(x))
x = self.fc2(x)
if self._softmax_transform:
x = torch.nn.functional.softmax(x, dim=-1)
x = (x * self.fc3).sum(1, keepdim=True)
return x
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/network/number_translation.py | stable_nalu/network/number_translation.py |
import torch
from ..abstract import ExtendedTorchModule
from ..layer import GeneralizedLayer, GeneralizedCell
class NumberTranslationNetwork(ExtendedTorchModule):
UNIT_NAMES = GeneralizedCell.UNIT_NAMES
def __init__(self, unit_name,
embedding_size=2, # 1 for the number, 1 for the gate ?
hidden_size=2, # 1 for the number, 1 for the gate ?
dictionary_size=30,
writer=None,
**kwags):
super().__init__('network', writer=writer, **kwags)
self.unit_name = unit_name
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.dictionary_size = dictionary_size
self.register_buffer('lstm_zero_state_h', torch.Tensor(hidden_size))
self.register_buffer('lstm_zero_state_c', torch.Tensor(hidden_size))
self.register_buffer('output_zero_state', torch.Tensor(1))
self.embedding = torch.nn.Embedding(dictionary_size, embedding_size)
self.lstm_cell = torch.nn.LSTMCell(embedding_size, hidden_size)
self.output_cell = GeneralizedCell(hidden_size, 1,
unit_name,
writer=self.writer,
name='recurrent_output',
**kwags)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.zeros_(self.lstm_zero_state_h)
torch.nn.init.zeros_(self.lstm_zero_state_c)
torch.nn.init.zeros_(self.output_zero_state)
self.embedding.reset_parameters()
self.lstm_cell.reset_parameters()
self.output_cell.reset_parameters()
def forward(self, x):
"""Performs recurrent iterations over the input.
Arguments:
input: Expected to have the shape [obs, time]
"""
# Perform recurrent iterations over the input
h_1_tm1 = self.lstm_zero_state_h.repeat(x.size(0), 1)
c_1_tm1 = self.lstm_zero_state_c.repeat(x.size(0), 1)
h_2_tm1 = self.output_zero_state.repeat(x.size(0), 1)
for t in range(x.size(1)):
x_t = x[:, t]
h_0_t = self.embedding(x_t)
h_1_t, c_1_t = self.lstm_cell(h_0_t, (h_1_tm1, c_1_tm1))
h_2_t = self.output_cell(h_1_t, h_2_tm1)
# Just use previuse results if x is a <pad> token
h_2_t = torch.where(x[:, t].view(-1, 1) == 0, h_2_tm1, h_2_t)
# Prepear for next iterations
h_1_tm1 = h_1_t
c_1_tm1 = c_1_t
h_2_tm1 = h_2_t
return h_2_t
def extra_repr(self):
return 'unit_name={}, embedding_size={}, hidden_size={}, dictionary_size={}'.format(
self.unit_name, self.embedding_size, self.hidden_size, self.dictionary_size
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/nac_weight_test.py | stable_nalu/functional/nac_weight_test.py |
import numpy as np
import torch
from stable_nalu.functional import nac_weight
def test_nac_weight_calculates_backward_correctly():
w_hat = torch.randn(100, 2, requires_grad=True, dtype=torch.float64)
m_hat = torch.randn(100, 2, requires_grad=True, dtype=torch.float64)
torch.autograd.gradcheck(
lambda w_hat, m_hat: torch.sum((2 * nac_weight(w_hat * 2, m_hat * 2) - 0)**2),
[w_hat, m_hat]
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/sparsity_error.py | stable_nalu/functional/sparsity_error.py |
import torch
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/batch_linear.py | stable_nalu/functional/batch_linear.py |
import torch
def batch_linear(x, W, b=None):
"""Computes y_i = x_i W_i + b_i where i is each observation index.
This is similar to `torch.nn.functional.linear`, but a version that
supports a different W for each observation.
x: has shape [obs, in_dims]
W: has shape [obs, out_dims, in_dims]
b: has shape [out_dims]
"""
if x.size()[1] != W.size()[-1]:
raise ValueError(
f'the in_dim of x ({x.size()[1]}) does not match in_dim of W ({W.size()[-1]})')
if x.size()[0] != W.size()[0]:
raise ValueError(
f'the obs of x ({x.size()[0]}) does not match obs of W ({W.size()[0]})')
obs = x.size()[0]
in_dims = x.size()[1]
out_dims = W.size()[1]
x = x.view(obs, 1, in_dims)
W = W.transpose(-2, -1)
if b is None:
return torch.bmm(x, W).view(obs, out_dims)
else:
b = b.view(1, 1, out_dims)
return torch.baddbmm(1, b, 1, x, W).view(obs, out_dims)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/mnac.py | stable_nalu/functional/mnac.py |
import torch
def mnac(x, W, mode='prod'):
out_size, in_size = W.size()
x = x.view(x.size()[0], in_size, 1)
W = W.t().view(1, in_size, out_size)
if mode == 'prod':
return torch.prod(x * W + 1 - W, -2)
elif mode == 'exp-log':
return torch.exp(torch.sum(torch.log(x * W + 1 - W), -2))
elif mode == 'no-idendity':
return torch.prod(x * W, -2)
else:
raise ValueError(f'mnac mode "{mode}" is not implemented')
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/gumbel.py | stable_nalu/functional/gumbel.py |
import torch
def sample_gumbel(placeholder, eps=1e-10, reuse=False):
"""Samples Gumbel(0, 1) values into the placeholder"""
# Uniform sample between [eps, 1)
if reuse:
uniform = placeholder
else:
uniform = placeholder.uniform_(eps, 1)
# Inverse transform
g = -torch.log(-torch.log(uniform))
return g
def sample_gumbel_softmax(placeholder, logits, tau, **kwargs):
"""Samples values from a gumbel softmax
Arguments:
placeholder: A tensor used to specify the device storage
(cpu or cuda). Note that the content of the placeholder
will be overwritten.
logits: log properbilities, you can use log_softmax to
transform a tensor into log properbilities.
tau: the temperature used, must be tau \in (0, \infty]. tau < 1
makes the distribution more categorical. tau > 1 makes
the distribution more uniform.
"""
g = sample_gumbel(placeholder, **kwargs)
return torch.nn.functional.softmax((logits + g) / tau, dim=-1)
def sample_gumbel_max(placeholder, logits, **kwargs):
"""Samples values from a gumbel max
Arguments:
placeholder: A tensor used to specify the device storage
(cpu or cuda). Note that the content of the placeholder
will be overwritten.
logits: log properbilities, you can use log_softmax to
transform a tensor into log properbilities.
"""
g = sample_gumbel(placeholder, **kwargs)
indices = torch.argmax(logits + g, dim=-1)
# Convert indices to a one-hot encoding
one_hot = torch.zeros_like(logits)
one_hot.scatter_(-1, indices, 1)
return one_hot
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/regualizer_nau_z.py | stable_nalu/functional/regualizer_nau_z.py |
import torch
class RegualizerNAUZ:
def __init__(self, zero=False):
self.zero = zero
self.stored_inputs = []
def __call__(self, W):
if self.zero:
return 0
x_mean = torch.mean(
torch.cat(self.stored_inputs, dim=0),
dim=0, keepdim=True
)
return torch.mean((1 - torch.abs(W)) * (0 - x_mean)**2)
def append_input(self, x):
if self.zero:
return
self.stored_inputs.append(x)
def reset(self):
if self.zero:
return
self.stored_inputs = []
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/__init__.py | stable_nalu/functional/__init__.py |
from .gated_choice import gated_choice
from .nac_weight import nac_weight
from .gumbel import sample_gumbel_softmax, sample_gumbel_max
from .batch_linear import batch_linear
from .mnac import mnac
from .regualizer import Regualizer
from .regualizer_nmu_z import RegualizerNMUZ
from .regualizer_nau_z import RegualizerNAUZ
from .sparsity_error import sparsity_error | python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/gated_choice_test.py | stable_nalu/functional/gated_choice_test.py |
import numpy as np
import torch
from stable_nalu.functional.gated_choice import GatedChoiceNormal
def test_gated_choice_calculates_backward_correctly_indpendent():
g_hat = torch.randn(20, 2, requires_grad=True, dtype=torch.float64)
a_hat = torch.randn(20, 2, requires_grad=True, dtype=torch.float64)
m_hat = torch.randn(20, 2, requires_grad=True, dtype=torch.float64)
torch.autograd.gradcheck(
lambda g_hat, a_hat, m_hat: torch.sum((2 * GatedChoiceNormal.apply(
torch.sigmoid(g_hat),
torch.sigmoid(a_hat),
torch.tanh(m_hat),
mode='test'
) - 0)**2),
[g_hat, a_hat, m_hat]
)
def test_gated_choice_calculates_backward_correctly_dependent():
g_hat = torch.randn(20, 2, requires_grad=True, dtype=torch.float64)
w_hat = torch.randn(20, 2, requires_grad=True, dtype=torch.float64)
torch.autograd.gradcheck(
lambda g_hat, a_hat, m_hat: torch.sum((2 * GatedChoiceNormal.apply(
torch.sigmoid(g_hat),
torch.tanh(m_hat) * torch.sigmoid(w_hat),
torch.tanh(m_hat) * torch.sigmoid(w_hat),
mode='test'
) - 0)**2),
[g_hat, w_hat, m_hat]
)
test_gated_choice_calculates_backward_correctly_indpendent()
test_gated_choice_calculates_backward_correctly_dependent()
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/regualizer.py | stable_nalu/functional/regualizer.py |
import torch
class Regualizer:
def __init__(self, support='nac', type='bias', shape='squared', zero=False, zero_epsilon=0):
super()
self.zero_epsilon = 0
if zero:
self.fn = self._zero
else:
identifier = '_'.join(['', support, type, shape])
self.fn = getattr(self, identifier)
def __call__(self, W):
return self.fn(W)
def _zero(self, W):
return 0
def _mnac_bias_linear(self, W):
return torch.mean(torch.min(
torch.abs(W - self.zero_epsilon),
torch.abs(1 - W)
))
def _mnac_bias_squared(self, W):
return torch.mean((W - self.zero_epsilon)**2 * (1 - W)**2)
def _mnac_oob_linear(self, W):
return torch.mean(torch.relu(
torch.abs(W - 0.5 - self.zero_epsilon)
- 0.5 + self.zero_epsilon
))
def _mnac_oob_squared(self, W):
return torch.mean(torch.relu(
torch.abs(W - 0.5 - self.zero_epsilon)
- 0.5 + self.zero_epsilon
)**2)
def _nac_bias_linear(self, W):
W_abs = torch.abs(W)
return torch.mean(torch.min(
W_abs,
torch.abs(1 - W_abs)
))
def _nac_bias_squared(self, W):
return torch.mean(W**2 * (1 - torch.abs(W))**2)
def _nac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1))
def _nac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1)**2)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/nac_weight.py | stable_nalu/functional/nac_weight.py |
import torch
class NACWeight(torch.autograd.Function):
r"""Implements the NAC weight operator
w = tanh(\hat{w}) * sigmoid(\hat{m})
"""
@staticmethod
def forward(ctx, w_hat, m_hat):
tanh_w_hat = torch.tanh(w_hat)
sigmoid_m_hat = torch.sigmoid(m_hat)
ctx.save_for_backward(tanh_w_hat, sigmoid_m_hat)
return tanh_w_hat * sigmoid_m_hat
@staticmethod
def backward(ctx, grad_output):
tanh_w_hat, sigmoid_m_hat = ctx.saved_tensors
return (
grad_output * (1 - tanh_w_hat*tanh_w_hat)*sigmoid_m_hat,
grad_output * tanh_w_hat*sigmoid_m_hat*(1-sigmoid_m_hat)
)
class NACWeightSign(torch.autograd.Function):
r"""Implements the NAC weight operator but with a hard gradient for \hat{m}
w = tanh(\hat{w}) * sigmoid(\hat{m})
dL/d\hat{m} = (dL/dw) (dw/d\hat{m})
= (dL/dw) * 0.1 * sign(\hat{w}) * sigmoid(\hat{m}) * (1 - sigmoid(\hat{m}))
"""
@staticmethod
def forward(ctx, w_hat, m_hat):
tanh_w_hat = torch.tanh(w_hat)
sigmoid_m_hat = torch.sigmoid(m_hat)
ctx.save_for_backward(w_hat, tanh_w_hat, sigmoid_m_hat)
return tanh_w_hat * sigmoid_m_hat
@staticmethod
def backward(ctx, grad_output):
w_hat, tanh_w_hat, sigmoid_m_hat = ctx.saved_tensors
return (
grad_output * (1 - tanh_w_hat*tanh_w_hat)*sigmoid_m_hat,
grad_output * 0.1*torch.sign(w_hat)*sigmoid_m_hat*(1-sigmoid_m_hat)
)
class NACWeightIndependent(torch.autograd.Function):
r"""Implements the NAC weight operator but with independent optimization.
The optimiation of \hat{w} is independent of \hat{m} and vice versa.
w = tanh(\hat{w}) * sigmoid(\hat{m})
dL/d\hat{w} = (dL/dw) (dw/d\hat{w})
= (dL/dw) (1 - tanh(\hat{w})^2)
dL/d\hat{m} = (dL/dw) (dw/d\hat{m})
= (dL/dw) sigmoid(\hat{m}) * (1 - sigmoid(\hat{m}))
"""
@staticmethod
def forward(ctx, w_hat, m_hat):
tanh_w_hat = torch.tanh(w_hat)
sigmoid_m_hat = torch.sigmoid(m_hat)
ctx.save_for_backward(tanh_w_hat, sigmoid_m_hat)
return tanh_w_hat * sigmoid_m_hat
@staticmethod
def backward(ctx, grad_output):
tanh_w_hat, sigmoid_m_hat = ctx.saved_tensors
return (
grad_output * (1 - tanh_w_hat*tanh_w_hat),
grad_output * sigmoid_m_hat*(1-sigmoid_m_hat)
)
def nac_weight(w_hat, m_hat, mode='normal'):
if mode == 'normal':
return NACWeight.apply(w_hat, m_hat)
elif mode == 'sign':
return NACWeightSign.apply(w_hat, m_hat)
elif mode == 'independent':
return NACWeightIndependent.apply(w_hat, m_hat)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/regualizer_nmu_z.py | stable_nalu/functional/regualizer_nmu_z.py |
import torch
class RegualizerNMUZ:
def __init__(self, zero=False):
self.zero = zero
self.stored_inputs = []
def __call__(self, W):
if self.zero:
return 0
x_mean = torch.mean(
torch.cat(self.stored_inputs, dim=0),
dim=0, keepdim=True
)
return torch.mean((1 - W) * (1 - x_mean)**2)
def append_input(self, x):
if self.zero:
return
self.stored_inputs.append(x)
def reset(self):
if self.zero:
return
self.stored_inputs = []
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/functional/gated_choice.py | stable_nalu/functional/gated_choice.py |
import torch
class GatedChoiceNormal(torch.autograd.Function):
@staticmethod
def forward(ctx, g, v):
ctx.save_for_backward(g, v)
return g * v
@staticmethod
def backward(ctx, grad_output):
g, v = ctx.saved_tensors
return (
grad_output * v,
grad_output * g
)
class GatedChoiceGateFreeGradient(torch.autograd.Function):
@staticmethod
def forward(ctx, g, v):
ctx.save_for_backward(g, v)
return g * v
@staticmethod
def backward(ctx, grad_output):
g, v = ctx.saved_tensors
return (
grad_output * v,
grad_output
)
def gated_choice(g, a, m, mode='normal'):
if mode == 'normal':
return g * a + (1 - g) * m
elif mode == 'gate-free-gradient':
return GatedChoiceGateFreeGradient.apply(g, a) + GatedChoiceGateFreeGradient.apply(1 - g, m)
elif mode == 'test':
return GatedChoiceNormal.apply(g, a) + GatedChoiceNormal.apply(1 - g, m)
else:
raise NotADirectoryError(f'the mode {mode} is not implemented')
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/export/sequential_mnist.py | export/sequential_mnist.py |
import os
import csv
import sys
import argparse
import stable_nalu
# Parse arguments
parser = argparse.ArgumentParser(description='Export results from simple function task')
parser.add_argument('--tensorboard-dir',
action='store',
type=str,
help='Specify the directory for which the data is stored')
parser.add_argument('--csv-out',
action='store',
type=str,
help='Specify the file for which the csv data is stored at')
args = parser.parse_args()
# Set threads
if 'LSB_DJOB_NUMPROC' in os.environ:
allowed_processes = int(os.environ['LSB_DJOB_NUMPROC'])
else:
allowed_processes = None
def matcher(tag):
return (
(
tag.startswith('metric/train') or
tag.startswith('metric/valid') or
tag.startswith('metric/test/extrapolation/')
) and tag.endswith('/mse')
)
reader = stable_nalu.reader.TensorboardMetricReader(
args.tensorboard_dir,
metric_matcher=matcher,
recursive_weight=True,
step_start=1,
processes=allowed_processes
)
with open(args.csv_out, 'w') as csv_fp:
for index, df in enumerate(reader):
df.to_csv(csv_fp, header = (index == 0), index=False)
csv_fp.flush()
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/export/simple_function_static.py | export/simple_function_static.py |
import os
import csv
import sys
import argparse
import stable_nalu
# Parse arguments
parser = argparse.ArgumentParser(description='Export results from simple function task')
parser.add_argument('--tensorboard-dir',
action='store',
type=str,
help='Specify the directory for which the data is stored')
parser.add_argument('--csv-out',
action='store',
type=str,
help='Specify the file for which the csv data is stored at')
parser.add_argument('--export-gate',
action='store_true',
default=False,
help='Export the NALU gate value to the csv file')
args = parser.parse_args()
# Set threads
if 'LSB_DJOB_NUMPROC' in os.environ:
allowed_processes = int(os.environ['LSB_DJOB_NUMPROC'])
else:
allowed_processes = None
def matcher(tag):
return (
tag in ['metric/valid/interpolation', 'metric/test/extrapolation'] or
tag.endswith('nalu/gate/mean') and args.export_gate
)
reader = stable_nalu.reader.TensorboardMetricReader(
args.tensorboard_dir,
metric_matcher=matcher,
step_start=0,
processes=allowed_processes
)
with open(args.csv_out, 'w') as csv_fp:
for index, df in enumerate(reader):
df.to_csv(csv_fp, header = (index == 0), index=False)
csv_fp.flush()
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
SeargeDP/ComfyUI_Searge_LLM | https://github.com/SeargeDP/ComfyUI_Searge_LLM/blob/241d4c8d8243515cf19cab2d2ee051745fc5f592/Searge_LLM_Node.py | Searge_LLM_Node.py | import importlib
import os
import folder_paths
GLOBAL_MODELS_DIR = os.path.join(folder_paths.models_dir, "llm_gguf")
WEB_DIRECTORY = "./web/assets/js"
DEFAULT_INSTRUCTIONS = 'Generate a prompt from "{prompt}"'
try:
Llama = importlib.import_module("llama_cpp_cuda").Llama
except ImportError:
Llama = importlib.import_module("llama_cpp").Llama
class AnyType(str):
"""A special class that is always equal in not equal comparisons. Credit to pythongosssss"""
def __ne__(self, __value: object) -> bool:
return False
anytype = AnyType("*")
class Searge_LLM_Node:
@classmethod
def INPUT_TYPES(cls):
model_options = []
if os.path.isdir(GLOBAL_MODELS_DIR):
gguf_files = [file for file in os.listdir(GLOBAL_MODELS_DIR) if file.endswith('.gguf')]
model_options.extend(gguf_files)
return {
"required": {
"text": ("STRING", {"multiline": True, "dynamicPrompts": True, "default": ""}),
"random_seed": ("INT", {"default": 1234567890, "min": 0, "max": 0xffffffffffffffff}),
"model": (model_options,),
"max_tokens": ("INT", {"default": 4096, "min": 1, "max": 8192}),
"apply_instructions": ("BOOLEAN", {"default": True}),
"instructions": ("STRING", {"multiline": False, "default": DEFAULT_INSTRUCTIONS}),
},
"optional": {
"adv_options_config": ("SRGADVOPTIONSCONFIG",),
}
}
CATEGORY = "Searge/LLM"
FUNCTION = "main"
RETURN_TYPES = ("STRING", "STRING",)
RETURN_NAMES = ("generated", "original",)
def main(self, text, random_seed, model, max_tokens, apply_instructions, instructions, adv_options_config=None):
model_path = os.path.join(GLOBAL_MODELS_DIR, model)
if model.endswith(".gguf"):
generate_kwargs = {'max_tokens': max_tokens, 'temperature': 1.0, 'top_p': 0.9, 'top_k': 50,
'repeat_penalty': 1.2}
if adv_options_config:
for option in ['temperature', 'top_p', 'top_k', 'repeat_penalty']:
if option in adv_options_config:
generate_kwargs[option] = adv_options_config[option]
model_to_use = Llama(
model_path=model_path,
n_gpu_layers=-1,
seed=random_seed,
verbose=False,
n_ctx=2048,
)
if apply_instructions:
req = instructions.replace("{prompt}", text) if "{prompt}" in instructions else f"{instructions} {text}"
messages = [
{"role": "system",
"content": f"You are a helpful assistant."},
{"role": "user",
"content": f"An image generation prompt is a single paragraph summary to describe the subject and "
f"style of an image. It includes a description of the kind of image, the subject of "
f"the image, and some description of the image medium and style in the form of short "
f"keyword.\n\nCreate an image generation prompt for the subject \"a creepy creature "
f"shadow in the dark in a dimly lit tunnel\" in the style \"digital art illustration "
f"with intricate details\"."},
{"role": "assistant",
"content": f"Image Description: A digitally crafted illustration portrays a chilling scene within "
f"a dimly lit, cavernous tunnel. The dominant subject of the image is a mysterious "
f"creature, its form subtly discernible only as a menacing shadow on the walls of the "
f"tunnel. Its elongated silhouette suggests a serpentine creature with sharp "
f"protrusions and an ominous aura. The creature's eyes, a pair of glowing orbs, appear "
f"eerily human-like yet alien. The tunnel is riddled with intricate details that "
f"enhance the eerie atmosphere: dust particles floating lazily in the feeble light, "
f"ancient and crumbling stone, water droplets sparkling on the damp walls, and a "
f"hauntingly beautiful, bioluminescent fungi growing in the shadows. The dimly lit "
f"environment is highlighted by strategically placed light sources that create "
f"dramatic chiaroscuro effects, casting an unsettling and atmospheric glow on the "
f"scene. Digital Art Illustration with Intricate Details (Dark, Atmospheric, "
f"Suspenseful)"},
{"role": "user",
"content": "Now compile the description and style into a single paragraph summary"},
{"role": "assistant",
"content": f"Digital art illustration featuring a dark, atmospheric, and suspenseful scene within "
f"a dimly lit, cavernous tunnel. The subject of the image is a mysterious creature, "
f"depicted only as a menacing shadow on the walls, with elongated silhouette and sharp "
f"protrusions. The creature's eyes, a pair of glowing orbs, appear eerily human-like "
f"yet alien. The tunnel is adorned with intricate details, such as dust particles, "
f"ancient crumbling stone, water droplets, and a hauntingly beautiful bioluminescent "
f"fungi growing in the shadows. Dramatic chiaroscuro effects are created through "
f"strategically placed light sources, casting an unsettling and atmospheric glow on "
f"the scene."},
{"role": "user",
"content": f"create a detailed summary without the title or style"},
{"role": "assistant",
"content": f"A dimly lit, cavernous tunnel is the setting for this chilling digital illustration. "
f"A mysterious creature lurks in the shadows, its elongated silhouette suggestive of a "
f"serpentine creature with sharp protrusions and an ominous aura. The creature's eyes, "
f"a pair of glowing orbs, appear eerily human-like yet alien. The tunnel is riddled "
f"with intricate details that enhance the eerie atmosphere: dust particles floating "
f"lazily in the feeble light, ancient and crumbling stone, water droplets sparkling on "
f"the damp walls, and a hauntingly beautiful, bioluminescent fungi growing in the "
f"shadows. Dramatic chiaroscuro effects are created through strategically placed light "
f"sources, casting an unsettling and atmospheric glow on the scene."},
{"role": "user",
"content": f"Generate a prompt from \"magical garden, sitting on a blue_bench, Disney Princess in "
f"pink_dress, blonde_hair, portrait, Pixar, Disney style, photorealistic, high "
f"details, detailed skin, pores, freckles\""},
{"role": "assistant",
"content": f"In a captivating, Pixar-inspired, Disney-style, photorealistic portrait, a magical "
f"garden unfolds as a breathtaking backdrop. The subject of the image is a Disney "
f"Princess, dressed elegantly in a pink gown with flowing fabric, sitting serenely on "
f"a blue bench. The princess boasts long, blonde hair and a radiant smile. The garden "
f"is meticulously detailed, with vibrant, enchanted flora and fauna, as well as "
f"whimsical details like sparkling fairy lights and a picturesque waterfall. The "
f"princess is highlighted against the lush, detailed foliage, with a focus on the "
f"high-definition details of her porcelain skin, visible freckles, and the intricacies "
f"of her elegant gown. The image is rendered in the captivating, photorealistic style "
f"that exemplifies both the Disney and Pixar brands, capturing the princess's timeless "
f"beauty and the magic of her enchanting surroundings."},
{"role": "user",
"content": req},
]
else:
messages = [
{"role": "system",
"content": f"You are a helpful assistant. Try your best to give the best response possible to "
f"the user."},
{"role": "user",
"content": f"Create a detailed visually descriptive caption of this description, which will be "
f"used as a prompt for a text to image AI system (caption only, no instructions like "
f"\"create an image\").Remove any mention of digital artwork or artwork style. Give "
f"detailed visual descriptions of the character(s), including ethnicity, skin tone, "
f"expression etc. Imagine using keywords for a still for someone who has aphantasia. "
f"Describe the image style, e.g. any photographic or art styles / techniques utilized. "
f"Make sure to fully describe all aspects of the cinematography, with abundant "
f"technical details and visual descriptions. If there is more than one image, combine "
f"the elements and characters from all of the images creatively into a single "
f"cohesive composition with a single background, inventing an interaction between the "
f"characters. Be creative in combining the characters into a single cohesive scene. "
f"Focus on two primary characters (or one) and describe an interesting interaction "
f"between them, such as a hug, a kiss, a fight, giving an object, an emotional "
f"reaction / interaction. If there is more than one background in the images, pick the "
f"most appropriate one. Your output is only the caption itself, no comments or extra "
f"formatting. The caption is in a single long paragraph. If you feel the images are "
f"inappropriate, invent a new scene / characters inspired by these. Additionally, "
f"incorporate a specific movie director's visual style and describe the lighting setup "
f"in detail, including the type, color, and placement of light sources to create the "
f"desired mood and atmosphere. Always frame the scene, including details about the "
f"film grain, color grading, and any artifacts or characteristics specific. "
f"Compress the output to be concise while retaining key visual details. MAX OUTPUT "
f"SIZE no more than 250 characters."
f"\nDescription : {text}"},
]
llm_result = model_to_use.create_chat_completion(messages, **generate_kwargs)
return (llm_result['choices'][0]['message']['content'].strip(), text)
else:
return ("NOT A GGUF MODEL", text)
class Searge_Output_Node:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"text": (anytype, {}),
},
"hidden": {
"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO",
},
}
CATEGORY = "Searge/LLM"
FUNCTION = "main"
RETURN_TYPES = ()
RETURN_NAMES = ()
OUTPUT_NODE = True
def main(self, text, unique_id=None, extra_pnginfo=None):
if unique_id is not None and extra_pnginfo is not None and len(extra_pnginfo) > 0:
workflow = None
if "workflow" in extra_pnginfo:
workflow = extra_pnginfo["workflow"]
node = None
if workflow and "nodes" in workflow:
node = next((x for x in workflow["nodes"] if str(x["id"]) == unique_id), None)
if node:
node["widgets_values"] = [str(text)]
return {"ui": {"text": (str(text),)}}
class Searge_AdvOptionsNode:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"temperature": ("FLOAT", {"default": 1.0, "min": 0.1, "step": 0.05}),
"top_p": ("FLOAT", {"default": 0.9, "min": 0.1, "step": 0.05}),
"top_k": ("INT", {"default": 50, "min": 0}),
"repetition_penalty": ("FLOAT", {"default": 1.2, "min": 0.1, "step": 0.05}),
}
}
CATEGORY = "Searge/LLM"
FUNCTION = "main"
RETURN_TYPES = ("SRGADVOPTIONSCONFIG",)
RETURN_NAMES = ("adv_options_config",)
def main(self, temperature=1.0, top_p=0.9, top_k=50, repetition_penalty=1.2):
options_config = {
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"repeat_penalty": repetition_penalty,
}
return (options_config,)
NODE_CLASS_MAPPINGS = {
"Searge_LLM_Node": Searge_LLM_Node,
"Searge_AdvOptionsNode": Searge_AdvOptionsNode,
"Searge_Output_Node": Searge_Output_Node,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"Searge_LLM_Node": "Searge LLM Node",
"Searge_AdvOptionsNode": "Searge Advanced Options Node",
"Searge_Output_Node": "Searge Output Node",
}
| python | MIT | 241d4c8d8243515cf19cab2d2ee051745fc5f592 | 2026-01-05T07:14:41.725674Z | false |
SeargeDP/ComfyUI_Searge_LLM | https://github.com/SeargeDP/ComfyUI_Searge_LLM/blob/241d4c8d8243515cf19cab2d2ee051745fc5f592/__init__.py | __init__.py | from .Searge_LLM_Node import *
| python | MIT | 241d4c8d8243515cf19cab2d2ee051745fc5f592 | 2026-01-05T07:14:41.725674Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/data_encapsulation.py | code/data_collection/cc/cc_net/data_encapsulation.py | import json
import os
from glob import glob
import gzip
import argparse
parser=argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
type=str,
)
parser.add_argument(
"--output_dir",
type=str,
)
parser.add_argument(
'--keep_bucket',
nargs="+",
type=str,
)
args=parser.parse_args()
print("Encapsulating data...")
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
files = glob(f"{args.input_dir}/*.gz")
with open(os.path.join(args.output_dir,"data.jsonl"),'w',encoding='utf-8') as fo:
for file in files:
if any([bucket in file for bucket in args.keep_bucket]):
print(file)
with gzip.open(file, 'rb') as fi:
for line in fi:
line = json.loads(line)
content = line['raw_content']
line['content'] = content
del line['raw_content']
fo.write(json.dumps(line)+'\n')
print("Encapsulation Done!") | python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/setup.py | code/data_collection/cc/cc_net/setup.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from setuptools import setup # type: ignore
setup(
name="cc_net",
version="1.0.0",
packages=["cc_net"],
# metadata to display on PyPI
author="Guillaume Wenzek",
author_email="guw@fb.com",
description="Tools to download and clean Common Crawl",
keywords="common crawl dataset",
url="https://github.com/facebookresearch/cc_net",
license="CC-BY-NC-4.0",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
project_urls={
"Bug Tracker": "https://github.com/facebookresearch/cc_net/issues",
"Source Code": "https://github.com/facebookresearch/cc_net",
},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.7",
install_requires=[
"beautifulsoup4>=4.7.1",
"pandas>=0.23.4",
"requests>=2.22.0",
"fasttext>=0.9.1",
"sentencepiece>=0.1.82",
"kenlm @ git+https://github.com/kpu/kenlm.git@master",
"func_argparse>=1.1.1",
"psutil>=5.6.3",
"sacremoses",
"submitit>=1.0.0",
"typing_extensions",
],
extras_require={
"dev": ["mypy==0.790", "pytest", "black==19.3b0", "isort==5.6.4"],
# To use scripts inside cc_net/tools
"tools": ["lxml", "sentence_splitter"],
# Memory-efficient hashset.
# This fork only compiles the kind of dict used by cc_net.
# Full version is at https://github.com/atom-moyer/getpy
"getpy": ["getpy @ git+https://github.com/gwenzek/getpy.git@v0.9.10-subset"],
},
package_data={"cc_net": ["data/*"]},
)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/test_transformer.py | code/data_collection/cc/cc_net/tests/test_transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import inspect
import pickle
from pathlib import Path
import pytest
from cc_net import dedup, jsonql, perplexity, split_by_lang, tokenizer
def get_transformers(module):
return [
v
for v in vars(module).values()
if type(v) is type
and issubclass(v, jsonql.Transformer)
and v != jsonql.Transformer
]
ALL_TRANSFORMERS = (
get_transformers(jsonql)
+ get_transformers(dedup)
+ get_transformers(perplexity)
+ get_transformers(tokenizer)
+ get_transformers(split_by_lang)
)
def check_transformer_is_calling_super_init(cls: type):
assert issubclass(cls, jsonql.Transformer)
# accessing __init__ is generally an error, but here we do want to inspect
# the __init__method.
code = inspect.getsource(cls.__init__) # type: ignore
code = code.replace(" ", "")
# Check that super().__init__ is called.
assert "super().__init__()" in code
def test_bad_transformers_are_caught():
class BadTransformer(jsonql.Transformer):
def __init__(self, arg):
# We aren't calling super /!\
self.arg = arg
with pytest.raises(AssertionError):
check_transformer_is_calling_super_init(BadTransformer)
@pytest.mark.parametrize("transformer", ALL_TRANSFORMERS)
def test_transformer_is_correctly_implemented(transformer):
check_transformer_is_calling_super_init(transformer)
@pytest.mark.skipif(
not Path("bin/lid.bin").exists(), reason="bin/lid.bin not found, run `make install`"
)
def test_can_pickle_transformer(tmp_path):
model = Path("bin/lid.bin")
if not model.exists():
return
classifier = split_by_lang.Classifier(model, "text", "lang")
classifier.__enter__()
doc = dict(text="Hello world ! This is English btw.")
original_results = classifier(doc)
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
# Do it again with the unpickled object.
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/test_jsonql.py | code/data_collection/cc/cc_net/tests/test_jsonql.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from pathlib import Path
from typing import Sequence
import numpy as np
import pytest
from cc_net import jsonql
def bar(small_bar: str) -> str:
return small_bar.replace(" ", " " * 10).replace("█", "█" * 10)
def get_output(transformer, data, **kwargs):
with io.StringIO() as output:
# Convert data to a generator so that it's not interpreted as a file list.
jsonql.run_pipe(transformer, kwargs, file=(x for x in data), output=output)
return output.getvalue()
def test_split(tmp_path: Path):
data = [
dict(text="Hello world", lang="en"),
dict(text="Boujour les amis", lang="fr"),
dict(text="Rock your boat", lang="en"),
]
with jsonql.split(tmp_path / "{lang}.json") as split:
list(split.map(data))
summary = split.summary()
assert "Found 2 splits." in summary
en_docs = list(jsonql.read_jsons(tmp_path / "en.json"))
assert [data[0], data[2]] == en_docs
fr_docs = list(jsonql.read_jsons(tmp_path / "fr.json"))
assert [data[1]] == fr_docs
def test_split_bad_pattern(tmp_path: Path):
data = [dict(text="Hello world", lang="en")]
with pytest.raises(KeyError):
with jsonql.split(tmp_path / "{language}.json") as split:
list(split.map(data))
def test_histogram():
data = [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]
hist, bins = jsonql.histogram(data, bins=8, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(1, 10)])
np.testing.assert_almost_equal(hist, [4, 0, 0, 2, 0, 0, 0, 2])
data = [0, 0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.8, 0.8, 1]
hist, bins = jsonql.histogram(data, bins=10, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(11)])
np.testing.assert_almost_equal(hist, [1, 4, 0, 0, 2, 0, 0, 0, 2, 1])
def test_display_stats():
stats = {
jsonql.ALL_DOCUMENTS: 100,
"title": 80,
"title.length": 80 * 50,
"text": 100,
"text.length": 100 * 1000,
"popularity": 8,
"popularity.val": [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9],
}
(title,) = jsonql.display_stats(stats, "title")
assert "title" in title
assert "saw 80 times" in title
assert "average length is" in title
assert "\n" not in title
(text,) = jsonql.display_stats(stats, "text")
assert "text" in text
assert "saw 100 times" in text
assert "average length is" in text
assert "\n" not in text
histogram = jsonql.display_stats(
stats, "popularity", bins=[x / 10 for x in range(1, 10)]
)
assert "popularity" in histogram[0]
assert "saw 8 times" in histogram[0]
assert "histogram is" in histogram[0]
assert "0.100 " + bar("████████") in histogram[1]
assert "0.400 " + bar("████ ") in histogram[2]
assert "0.800 " + bar("████ ") in histogram[3]
cum_histogram = jsonql.display_stats(stats, "popularity", bins=8, cumulative=True)
assert "popularity" in cum_histogram[0]
assert "saw 8 times" in cum_histogram[0]
assert "histogram is" in cum_histogram[0]
assert "0.100 " + bar("████ ") in cum_histogram[1]
assert "0.400 " + bar("██████ ") in cum_histogram[2]
assert "0.800 " + bar("████████") in cum_histogram[3]
def test_describe():
def sample(pop):
return dict(title="Lorem", text="Lorem ipsum dolor sit amet.", popularity=pop)
data = [sample(pop) for pop in [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]]
desc = get_output(
jsonql.describe, data, columns=None, bins=[x / 10 for x in range(1, 10)]
)
assert "Field title saw 8 times (100.0%), average length is 5" in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity saw 8 times (100.0%), histogram is" in desc
assert "0.100 " + bar("████████") in desc
assert "0.400 " + bar("████ ") in desc
assert "0.800 " + bar("████ ") in desc
desc = get_output(jsonql.describe, data, columns=["text"])
assert "Field title saw 8 times (100.0%), average length is 5" not in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity, histogram is:" not in desc
def test_custom_pipe():
def transformer(source, sep=" "):
for i, line in enumerate(source):
res = f"{i}{sep}{line}"
yield res
data = ["hello", "world"]
assert get_output(transformer, data) == "0 hello\n1 world\n"
assert get_output(transformer, data, sep="_") == "0_hello\n1_world\n"
def test_open_read_write(tmp_path: Path):
def _lines(filename: Path) -> Sequence[str]:
# jsonql.lines calls open_read
return list(jsonql.lines(filename))
tmp = tmp_path
with jsonql.open_write(tmp / "a.txt") as o:
print("a", file=o)
assert _lines(tmp / "a.txt") == ["a"]
jsonql.write_jsons([{"a": 1}], tmp / "a.txt")
assert _lines(tmp / "a.txt") == ['{"a": 1}']
with jsonql.open_write(tmp / "a.gz") as o:
print("a", file=o)
assert _lines(tmp / "a.gz") == ["a"]
with jsonql.open_write([tmp / "a0.txt", tmp / "a1.txt"]) as o:
print("a", file=o)
assert _lines(tmp / "a0.txt") == ["a"]
assert not (tmp / "a1.txt").is_file()
with jsonql.open_write([tmp / "b0.txt", tmp / "b1.txt"], max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b0.txt") == ["0" * 2000]
assert _lines(tmp / "b1.txt") == ["1" * 2000]
with jsonql.open_write(tmp / "a_????.json") as o:
print("a", file=o)
assert _lines(tmp / "a_0000.json") == ["a"]
assert not (tmp / "a_0001.json").is_file()
assert _lines(tmp / "a_*.json") == ["a"]
with jsonql.open_write(tmp / "b_??.json", max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b_00.json") == ["0" * 2000]
assert _lines(tmp / "b_01.json") == ["1" * 2000]
assert _lines(tmp / "b_*.json") == ["0" * 2000, "1" * 2000]
def test_split_file(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\nWorld\n"
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_line(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello _|_\nWorld\n"
# split is here ^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello _|_\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_char(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\U0001F40D\nWorld\n"
# split is here ^^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello🐍\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_blocked_gzip(tmp_path: Path):
file = tmp_path / "test.gz"
f = str(file)
# Each object is 10/11 bytes long. We have 2 of them by block.
content = ['{"xx": %d}' % i for i in range(80)]
with jsonql.BlockedGzipWriter(file, "wt", block_size="20B") as o:
for line in content:
print(line, file=o)
jr = jsonql.JsonReader(strict=True)
expected = list(jr.map(content))
# read as one file
assert expected == list(jsonql.read_jsons(file))
# read first block
assert expected[:2] == list(jsonql.read_jsons(f + "[0/40]"))
# read last block
assert expected[-2:] == list(jsonql.read_jsons(f + "[39/40]"))
readers = jsonql.get_block_readers(file, 9)
read_as_several_files = [list(jsonql.read_jsons(r)) for r in readers]
# 40 splits of 2 docs, 9 readers -> 5 splits, 10 docs per reader
assert list(jsonql.grouper(expected, 10)) == read_as_several_files
def test_enter_exit(capsys):
class MyTransformer(jsonql.Transformer):
def __enter__(self):
print("trans: started")
self.ready = True
return self
def __exit__(self, *args):
print("trans: done")
def do(self, x):
return (x, x)
def acc(values):
print("acc: started")
res = 0
for (x, _) in values:
res += int(x)
print("acc: done")
yield f"acc: result={res}"
t = MyTransformer()
data = (str(x) for x in range(10))
print("pipeline: started")
# Print to stdout.
jsonql.run_pipes(t, acc, file=data)
print("pipeline: done")
out = capsys.readouterr().out
assert (
"\n".join(
[
"pipeline: started",
"trans: started",
"acc: started",
"acc: done",
f"acc: result=45",
# Transformers are closed at the very end.
"trans: done",
"pipeline: done\n",
]
)
== out
)
def test_write_to_stdout(capsys):
lines = [str(x) for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "\n".join(lines) + "\n"
def test_write_to_stdout_handle_newlines(capsys):
lines = [str(x) + "\n" for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "".join(lines)
def test_multiprocess(capsys):
mult = jsonql.Mapper(lambda x: f"2x = {2 * int(x)}")
jsonql.run_pipes(mult, processes=2, file=(str(x) for x in range(10)))
out = set(capsys.readouterr().out.strip("\n").split("\n"))
assert set(f"2x = {2 * x}" for x in range(10)) == out
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/test_normalizer.py | code/data_collection/cc/cc_net/tests/test_normalizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import cc_net.text_normalizer as txt
def test_unicode_punct():
weird = ",。、„”“«»1」「《》´∶:?!();–—.~’…━〈〉【】%"
replaced = ',.,""""""""""\'::?!();- - . ~\'...-<>[]%'
assert txt.replace_unicode_punct(weird) == replaced
assert txt.remove_unicode_punct(weird) == ""
def test_numbers():
weird = "023456789 | 0123456789"
normalized = "000000000 | 0000000000"
assert txt.normalize(weird, numbers=True) == normalized
assert txt.normalize(weird, numbers=False) == weird
def test_normalize_for_dedup():
weird = "023´∶:\x10 | ;012 hèllo"
normalized = "000 | ;000 hèllo"
assert normalized == txt.slow_normalize_for_dedup(weird)
assert normalized == txt.normalize_for_dedup(weird)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/test_regroup.py | code/data_collection/cc/cc_net/tests/test_regroup.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from cc_net import jsonql, regroup
def check_regroup(tmp_path, regroup_fn, check_blocks_boundaries=False):
n_shards = 4
n_docs = 20
shards = [
[dict(id=i, shard=s, raw_content="hello world") for i in range(n_docs)]
for s in range(n_shards)
]
shards_files = [tmp_path / f"{s:04d}.json.gz" for s in range(n_shards)]
for shard, shard_file in zip(shards, shards_files):
jsonql.run_pipes(inputs=shard, output=shard_file)
regroup_file = tmp_path / "regroup.json.gz"
start = time.time()
regroup_fn(shards_files, regroup_file)
duration = time.time() - start
print(f"{regroup_fn.__module__}.{regroup_fn.__name__} took {duration}s")
regrouped = list(jsonql.read_jsons(regroup_file))
assert [doc for shard in shards for doc in shard] == regrouped
readers = jsonql.get_block_readers(regroup_file, n_shards)
if not check_blocks_boundaries:
assert [doc for shard in shards for doc in shard] == [
doc for reader in readers for doc in jsonql.read_jsons(reader)
]
return
for shard, reader in zip(shards, readers):
block = [doc for doc in jsonql.read_jsons(reader)]
assert shard == block
def test_regroup(tmp_path):
# With regroup boundaries will be every 256Mb.
check_regroup(tmp_path, regroup.reshard, check_blocks_boundaries=False)
def test_fast_regroup(tmp_path):
# With fast regroup boundaries should match the shards.
check_regroup(tmp_path, regroup.fast_reshard, check_blocks_boundaries=True)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/conftest.py | code/data_collection/cc/cc_net/tests/conftest.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
def _request_is_disabled(self, *args, **kwargs):
raise Exception(
f"Your code tried to call 'request' with: {args}, {kwargs}. Unit test aren't allowed to reach internet."
)
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
"""Remove requests.sessions.Session.request for all tests."""
monkeypatch.setattr("requests.sessions.Session.request", _request_is_disabled)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/test_parse_wet_file.py | code/data_collection/cc/cc_net/tests/test_parse_wet_file.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from cc_net import process_wet_file
def test_parsing():
sample = Path(__file__).parent / "data" / "sample.warc.txt"
with open(sample) as f:
documents = list(process_wet_file.parse_warc_file(f))
expected_urls = [
"http://sample_english.com",
"http://sample_chinese.zh",
"http://sample_russian.ru",
]
assert expected_urls == [d["url"] for d in documents]
expected_domains = ["sample_english.com", "sample_chinese.zh", "sample_russian.ru"]
assert expected_domains == [d["source_domain"] for d in documents]
expected_date = [
"2019-03-18T00:00:00Z",
"2019-03-18T00:00:01Z",
"2019-03-18T00:00:02Z",
]
assert expected_date == [d["date_download"] for d in documents]
expected_title = [
"Famous Mark Twain Quotes",
"馬克·吐溫名言",
"Цитаты знаменитого Марка Твена",
]
assert expected_title == [d["title"] for d in documents]
expected_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't.
"""
assert expected_quotes == documents[0]["raw_content"]
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/test_minify.py | code/data_collection/cc/cc_net/tests/test_minify.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/__init__.py | code/data_collection/cc/cc_net/tests/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/test_flat_hash_set.py | code/data_collection/cc/cc_net/tests/test_flat_hash_set.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import pytest
from cc_net.flat_hash_set import HASH_TYPE, FlatHashSet, NaiveHashSet
def as_dict(flat_hash_set) -> dict:
return {k: v for (k, v) in flat_hash_set.items()}
need_getpy = pytest.mark.skipif(
FlatHashSet == NaiveHashSet, reason="getpy isn't installed"
)
def same_behavior(test_case):
def run_case():
naive = as_dict(test_case(FlatHashSet))
flat = as_dict(test_case(NaiveHashSet))
assert naive == flat
return need_getpy(run_case)
@same_behavior
def test_setitem(hash_set_cls):
h = hash_set_cls()
h[np.arange(10, dtype=h.dtype)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=h.dtype)] = np.ones(5, dtype=np.uint8)
return h
@same_behavior
def test_add_dup(hash_set_cls):
h = hash_set_cls()
h.add(np.arange(10, dtype=h.dtype))
h.add(np.arange(5, dtype=h.dtype))
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h), f"add_dup with {hash_set_cls.__name__}"
return h
@need_getpy
def test_gp_dict():
import getpy as gp # type: ignore
h = gp.Dict(HASH_TYPE, np.uint8)
h[np.arange(10, dtype=HASH_TYPE)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=HASH_TYPE)] = np.ones(5, dtype=np.uint8)
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h)
def check_reload(h, dump, load, tmp_path):
dump_path = tmp_path / dump.__name__
dump(h, dump_path)
h2 = type(h)()
load(h2, dump_path)
assert as_dict(h) == as_dict(h2)
@pytest.mark.parametrize("hash_set_cls", [FlatHashSet, NaiveHashSet])
def test_loading(tmp_path, hash_set_cls):
h = hash_set_cls()
x = np.random.randint(0, 2 ** 32, (100,), dtype=h.dtype)
h.add(x)
check_reload(h, hash_set_cls.dump, hash_set_cls.load, tmp_path)
check_reload(h, hash_set_cls.dump_np, hash_set_cls.load_np, tmp_path)
if hasattr(hash_set_cls, "dump_gp"):
check_reload(h, hash_set_cls.dump_gp, hash_set_cls.load_gp, tmp_path)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/tests/test_dedup.py | code/data_collection/cc/cc_net/tests/test_dedup.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
from typing import Iterable, Sequence
from cc_net import dedup, jsonql
from cc_net.dedup import str_hash
from cc_net.flat_hash_set import FlatHashSet
def text(*args: str) -> str:
return "\n".join(args)
def write_docs(file: Path, docs: Iterable[Sequence[str]]):
file.parent.mkdir(exist_ok=True)
with open(file, "w") as f:
for sentences in docs:
doc = dict(text=text(*sentences))
print(json.dumps(doc), file=f)
def as_dict(hash_set):
if not isinstance(hash_set, dict):
hash_set = {k: v for (k, v) in hash_set.items()}
return hash_set
def load_hashes(file):
results = dedup.FlatHashSet()
results.load(file)
return as_dict(results)
LENGTHS = ["original_length", "length"]
def assert_documents_equal(expected, actual, ignoring={}):
expected = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
actual = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
assert expected == actual
def test_simple_dedup(tmp_path: Path) -> None:
write_docs(
tmp_path / "docs.json",
[
["_Hello", "_World", "I'm so original"],
["_world", "I'm originaler", "_Hello"],
],
)
results = list(dedup.deduplicate(tmp_path / "docs.json", field="text"))
expected = [
# First document is untouched
dict(
text=text("_Hello", "_World", "I'm so original"),
original_nlines=3,
nlines=3,
line_ids=[0, 1, 2],
),
# Second documents loses several lines
dict(text="I'm originaler", original_nlines=3, nlines=1, line_ids=[1]),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_with_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
collector = dedup.HashesCollector(field="text", output=hashes)
list(collector.map(documents))
results = load_hashes(hashes)
expected = {
str_hash(l): l.startswith("_")
for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
}
assert expected == results
def test_dedup_with_np_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
with dedup.HashesCollector(field="text", output=hashes) as d:
list(d.map(documents))
results = FlatHashSet()
results.load_np(hashes)
expected = set(
str_hash(l) for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
)
assert expected == set(results.keys())
def test_dedup_from_hashes(tmp_path: Path):
documents = [
dict(text=text("_Hello", "World", "I'm so original")),
dict(text=text("Good morning", "World", "I'm originaler")),
]
seen = ["_hello", "i'm originaler", "world"]
hashes = [str_hash(h) for h in seen]
h = dedup.FlatHashSet()
h.add(hashes)
# Note: 'world' appears only once and won't be treated as a duplicate.
h.add(hashes[:-1])
h.dump(tmp_path / "hashes.bin")
results = list(
dedup.DuplicatesRemover("text", [tmp_path / "hashes.bin"]).map(documents)
)
expected = [
dict(
text=text("World", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[1, 2],
),
dict(
text=text("Good morning", "World"),
original_nlines=3,
nlines=2,
line_ids=[0, 1],
),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_fast(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
parts = [data / "part_0.json", data / "part_1.json"]
res = tmp_path / "res"
res.mkdir()
h = tmp_path / "hashes.bin"
field = "text"
jsonql.run_pipes(dedup.HashesCollector(field, output=h), file=parts)
for part in parts:
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(
text=text("Good morning", "I'm originaler"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
words = [w for part in [part_0, part_1] for doc in part for w in doc]
expected = {str_hash(s.lower()): s.startswith("_") for s in words}
assert expected == load_hashes(h)
def test_remove_duplicates_sharded(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["_Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
h = tmp_path / "hashes"
h.mkdir()
h0 = FlatHashSet()
h0.add([str_hash(s.lower()) for doc in part_0 for s in doc])
h0.add([str_hash("_world")])
h0.dump(h / "part_0.bin")
assert {
str_hash("hello"): False,
str_hash("_world"): True,
str_hash("i'm so original"): False,
} == as_dict(h0)
h1 = FlatHashSet()
h1.add([str_hash(s.lower()) for doc in part_1 for s in doc])
h1.add([str_hash("_good morning")])
h1.dump(h / "part_1.bin")
assert {
str_hash("_good morning"): True,
str_hash("_world"): False,
str_hash("i'm originaler"): False,
} == as_dict(h1)
res = tmp_path / "res"
res.mkdir()
# dedup.DISABLE_MULTI_PROCESSING = True # Simplifies debugging
dedup.remove_duplicates_sharded(
files=[data / "part_0.json", data / "part_1.json"],
outputs=[res / "part_0.json", res / "part_1.json"],
field="text",
hashes_dir=h,
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
# First pass removes "_world", second "_good morning".
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(text=text("I'm originaler"), original_nlines=3, nlines=1, line_ids=[2])
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/split_by_lang.py | code/data_collection/cc/cc_net/cc_net/split_by_lang.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
from pathlib import Path
from typing import Dict, Optional
import fasttext # type: ignore
from cc_net import jsonql
def get_args():
parser = argparse.ArgumentParser(
description="Read a list of json files and split them ",
parents=[jsonql.io_parser()],
)
parser.add_argument("--pattern", type=str)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--threshold", type=float, default=0)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--out_field", type=str, default="language")
parser.add_argument("--top", type=int, default=1)
return vars(parser.parse_args())
def predict(model, text: str, k: int = 1):
labels, scores = model.predict(text, k=k)
labels = [l.replace("__label__", "") for l in labels]
return labels, scores
def avg_predict(model, text):
# Overall gives the same results than predict(model, text.replace("\n", ""))
text = text.split("\n")
text_len = sum(len(line) for line in text)
if text_len == 0:
return None, 0
scores = [predict(model, line) for line in text]
scores_by_label: Dict[str, float] = collections.defaultdict(float)
for (label, score), line in zip(scores, text):
scores_by_label[label] += score * len(line)
label, score = max(scores_by_label.items(), key=lambda kv: kv[1])
return label, score / text_len
class Classifier(jsonql.Transformer):
def __init__(
self,
model: Path,
field: str,
out_field: str,
threshold: float = 0,
top: int = 1,
language: str = None,
rounding: int = 2,
):
super().__init__()
self.model = model
assert model.exists(), f"Model {model} doesn't exist."
self.field = field
self.out_field = out_field
self.threshold = threshold
self.top = top
self.language = language
self.rounding = rounding
# Fasttext model is a C object and can't be pickled
self.fasttext_model: fasttext._FastText = None
self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0
self.cnt: Dict[str, int] = {}
def _prepare(self):
self.log(f"Loading {self.model}")
self.fasttext_model = fasttext.load_model(str(self.model))
def predict(self, text):
return predict(self.fasttext_model, text.replace("\n", ""), k=self.top)
def do(self, doc: dict) -> Optional[dict]:
text = doc.get(self.field, None)
if not text:
return None
if self.language and doc.get("language") != self.language:
self.n_ignored += 1
return doc
self.n_doc += 1
labels, scores = self.predict(text)
scores.round(self.rounding, out=scores)
for l in labels:
self.cnt[l] = self.cnt.get(l, 0) + 1
if self.top == 1:
existing_label = doc.get(self.out_field, None)
if existing_label and labels[0] != existing_label:
self.n_disagreement += 1
if all(s < self.threshold for s in scores):
return None
self.n_accepted += 1
if self.top == 1:
doc[self.out_field] = labels[0]
doc[self.out_field + "_score"] = scores[0]
else:
doc[self.out_field] = {l: s for l, s in zip(labels, scores)}
return doc
def summary(self):
n_doc, n_accepted, n_disagreement, cnt, out_field = (
self.n_doc,
self.n_accepted,
self.n_disagreement,
self.cnt,
self.out_field,
)
summ = super().summary()
if self.threshold > 0:
ratio = n_accepted / n_doc if n_doc else 0
summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})")
summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}")
disagreement = n_disagreement / n_doc if n_doc else 0
if disagreement:
summ.append(f"{out_field} disagreement is at {disagreement:.1%}.")
return summ
def __repr__(self):
return f"Classifier({self.model})"
def classify_and_split(file, output, pattern, **kwargs):
classifier = Classifier(**kwargs)
splitter = jsonql.split(pattern)
jsonql.run_pipes(classifier, splitter, file=file, output=output)
if __name__ == "__main__":
args = get_args()
pattern = args.get("pattern")
if pattern:
classify_and_split(**args)
else:
args.pop("pattern")
jsonql.run_pipe(Classifier, args)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/jsonql.py | code/data_collection/cc/cc_net/cc_net/jsonql.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Manipulate files containing one json per line.
"""
import argparse
import collections
import contextlib
import functools
import glob
import gzip
import importlib
import inspect
import io
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import tempfile
import time
import typing as tp
import warnings
import zlib
from pathlib import Path
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import numpy as np
import psutil # type: ignore
import requests
from typing_extensions import Protocol
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M",
)
NEWLINE = " N3WL1N3 "
FilterFn = Callable[[dict], bool]
FileDescriptor = Union[Path, List[Path], str]
WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None]
ReadableFileLike = Union[Iterable[str], FileDescriptor, None]
def io_parser():
"""Parser shared by all commands to get input/output files."""
parser = argparse.ArgumentParser(add_help=False)
file_help = """File to read from. Can be specified several times for several files.
Be careful that bash will expand glob patterns **before** sending the args
to python. To use globs put it inside single quotes:
jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1
jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1
[Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1
[Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1
"""
parser.add_argument("-f", "--file", type=Path, action="append", help=file_help)
parser.add_argument("-o", "--output", type=Path, default="-")
parser.add_argument("--processes", type=int, default=1)
return parser
def get_parser():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them"
)
subparsers = parser.add_subparsers()
def add_subparser(function, arguments):
doc = function.__doc__.split("\n")[0]
p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()])
p.set_defaults(command=function)
for k, v in arguments.items():
p.add_argument(k, **v)
add_subparser(
select,
{
"columns": dict(nargs="+", help="Extract the value of the given fields"),
"--skip_empty": dict(
action="store_true", help="Skip lines without the requested fields"
),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE,
help="Replace newlines found in the text by the given string",
),
},
)
add_subparser(
where,
{
"clauses": dict(nargs="+", help=""),
"--requires": dict(
action="append", help="Python module required by the clauses code."
),
},
)
add_subparser(
merge,
{
"columns": dict(nargs="+", help=""),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE, help="Replace the given string by actual newlines"
),
},
)
add_subparser(
describe,
{
"columns": dict(nargs="*", help=""),
"--bins": dict(
default="auto", help="Number of bins for computing the histograms"
),
"--cumulative": dict(
action="store_true", help="Compute cumulative histograms"
),
"--weights": dict(type=str, help="Column used to weight histograms"),
},
)
add_subparser(split, {"--pattern": dict(type=str)})
add_subparser(shard, {})
return parser
def _split_array(array, sep):
last = 0
for i, x in enumerate(array):
if x != sep:
continue
yield array[last:i]
last = i + 1
if last != len(array):
yield array[last:]
def main(raw_args):
parser = get_parser()
pipeline = []
file = "-"
output = "-"
processes = 1
for args_group in _split_array(raw_args, "--"):
args = vars(parser.parse_args(args_group))
command = args.pop("command")
file = args.pop("file") or file
output = args.pop("output") or output
processes = args.pop("processes") or processes
pipeline.append(as_pipe(command, args))
if not pipeline:
parser.print_help()
return
run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes)
class Transformer:
"""
Wrapper around functions transforming documents.
This allows `run_pipes` to automatically parallelize the pipeline.
Provides:
* Automatic logging. Logging can be changed with the `summary` method.
Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable.
* Automatic parallelization without pickling. The transformers are shared
across processes, and the object is usually not pickled.
* Basic pickling / unpickling in case it's still needed.
By default will only pickle the arguments passed to the constructor.
* Delayed initialization. Internal state which is not pickable should be set
inside the `_prepare` function.
"""
parallelisable: bool = True
expect_json: bool = False
warn_when_pickling: bool = False
ready: bool = False
def __init_subclass__(cls, expect_json: bool = None):
"""Detects if the subclass expects json as input."""
spec = inspect.getfullargspec(cls.do)
if expect_json is None:
expect_json = spec.annotations.get(spec.args[1], None) == dict
cls.expect_json = expect_json
def __new__(cls, *args, **kwargs):
"""Creates the transformer and save the arguments passed to the constructor."""
t = super().__new__(cls)
Transformer.__init__(t, args, kwargs)
return t
def __init__(self, state_args: tuple = None, state_kwargs: dict = None):
"""
Init the transformer counters.
If state_args/state_kwargs are set they will override whatever was
originally passed to the subclass constructor.
"""
if state_args is not None:
self.__args = state_args
if state_kwargs is not None:
self.__kwargs = state_kwargs
self.start_time = time.time()
self.__last_log = self.start_time
self.processed = 0
# Log every 5 min unless specified other wise.
self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60))
self.__cls = type(self)
self._logger = logging.getLogger(self.__cls.__name__)
def __call__(self, x):
assert self.ready, f"{self} is not ready."
if x is None:
return
y = self.do(x)
self.processed += 1
if time.time() - self.__last_log > self._log_freq:
self.log_summary()
return y
def do(self, x):
raise NotImplementedError(f"'do' not implemented in {type(self)}")
def summary(self) -> List[str]:
return [self.speed_summary()]
def speed_summary(self) -> str:
delay = time.time() - self.start_time
h = delay / 3600
s = self.processed / delay
return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)."
def log(self, message):
self._logger.info(message)
def log_summary(self) -> None:
if not self.ready:
self.log("Not ready.")
return
summ = self.summary() or []
for line in summ:
self.log(line)
self.__last_log = time.time()
def map(self, source: Iterable) -> Iterator:
if self.ready:
for x in source:
yield self(x)
# since we have been prepared by caller,
# caller is also responsible for calling `close`.
return
else:
with self:
for x in source:
yield self(x)
def __getstate__(self) -> Tuple[tuple, dict, bool]:
return (self.__args, self.__kwargs, self.expect_json)
def __setstate__(self, state: Tuple[tuple, dict, bool]):
if self.warn_when_pickling:
warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.")
(args, kwargs, expect_json) = state
# When unpickling `__new__` isn't called so we have to doit ourselves.
Transformer.__init__(self, state_args=args, state_kwargs=kwargs)
type(self).__init__(self, *args, **kwargs)
assert self.expect_json == expect_json
# __setstate__ is called by multiprocessing right before calling
# the object so we need to initialize everything.
self.__enter__()
def _prepare(self) -> None:
pass
def __enter__(self) -> "Transformer":
# In multiprocessing __enter__ is always called twice, so we are idempotent.
# Because we call __enter__ when deserializing this transformer and
# also when the parent transformer is deserialized.
self.start_time = time.time()
if self.ready:
return self
self._prepare()
self.ready = True
return self
def __exit__(self, *args) -> None:
self.close()
self.log_summary()
def close(self) -> None:
pass
def as_pipe(transformer, kwargs):
if isinstance(transformer, type):
return transformer(**kwargs)
return lambda source: transformer(source, **kwargs)
def compose(fns: List[Transformer]) -> Transformer:
if len(fns) == 1:
return fns[0]
return MultiTransformer(fns)
class MultiTransformer(Transformer):
def __init__(self, transformers: List[Transformer]):
super().__init__()
self.transformers = transformers
def __repr__(self) -> str:
pipeline = " | ".join(type(t).__name__ for t in self.transformers)
return f"<{pipeline}>"
def do(self, x):
for t in self.transformers:
x = t(x)
return x
def _prepare(self):
for t in self.transformers:
t.__enter__()
return self
def __exit__(self, *args):
for t in self.transformers:
t.__exit__(*args)
def summary(self):
return itertools.chain(*(t.summary() for t in self.transformers))
class Mapper(Transformer):
def __init__(self, fn):
super().__init__()
self.fn = fn
def do(self, x):
return self.fn(x)
def run_pipe(
command,
kwargs: dict = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
):
kwargs = kwargs or {}
if isinstance(kwargs, argparse.ArgumentParser):
kwargs = vars(kwargs.parse_args())
file = file or Path(kwargs.pop("file", "-"))
output = output or Path(kwargs.pop("output", "-"))
return run_pipes(as_pipe(command, kwargs), file=file, output=output)
def run_pipes(
*fns: Union[Transformer, Callable[[Iterable], Iterable]],
inputs: Iterable[dict] = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
processes: int = 1,
chunksize: int = 10_000,
):
"""
Run full document processing pipeline.
- fns: list of functions to run over the documents. Can be:
* `Iterable -> Iterable` function
* jsonql.Transformer instance
Using transformers allow the pipeline to process documents in parallel.
- inputs: iterable to read the documents from
- file: if inputs is not given, will read documents from this file.
- output: writable file like.
- processes: number of processes to use. -1 means all CPU available.
- chunksize: chunksize for multiprocessing.Pool.imap_unordered
"""
expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json
if expect_json and inputs is None:
fns = (JsonReader(),) + fns
transformers = []
for t in fns:
if not isinstance(t, Transformer):
break
if not t.parallelisable:
break
transformers.append(t)
pipes = fns[len(transformers) :]
log = logging.getLogger(__name__).info
if inputs is None:
data: Iterable = open_read(file)
else:
data = inputs
if processes == -1:
processes = os.cpu_count() or 0
with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack:
if transformers:
log(f"preparing {transformers}")
transform = stack.enter_context(compose(transformers))
if processes <= 1:
data = transform.map(data)
else:
p = multiprocessing.current_process()
log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}")
pool = stack.enter_context(
multiprocessing.Pool(
processes=processes,
initializer=_set_global_transformer,
initargs=(transform,),
)
)
data = pool.imap_unordered(
_global_transformer, data, chunksize=chunksize
)
for fn in pipes:
if isinstance(fn, Transformer):
data = fn.map(data)
else:
data = fn(data)
write_jsons(data, output)
# Allows to share transformer acroos subprocess.
# Used by `run_pipes`
_GLOBAL_TRANSFORMER: Optional[Transformer] = None
def _set_global_transformer(transformer: Transformer):
global _GLOBAL_TRANSFORMER
p = multiprocessing.current_process()
logging.info(
f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}"
)
assert transformer.ready, f"{transformer} isn't ready"
_GLOBAL_TRANSFORMER = transformer
def _global_transformer(document: str) -> Optional[dict]:
assert _GLOBAL_TRANSFORMER is not None
return _GLOBAL_TRANSFORMER(document)
def lines(file: ReadableFileLike) -> Iterator[str]:
return (line.strip("\n") for line in open_read(file))
def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]:
reader = JsonReader(strict=strict)
lines = open_read(file)
for line in lines:
if line is None:
continue
yield reader(line)
reader.log_summary()
def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None:
eol = os.linesep
with open_write(file) as o:
for res in source:
if res is None:
continue
if isinstance(res, dict):
json.dump(res, o, ensure_ascii=False)
o.write(eol)
continue
if isinstance(res, str):
res = res.rstrip("\n")
print(res, file=o)
class JsonReader(Transformer):
def __init__(self, strict: bool = False):
super().__init__()
self.ready = True
self.strict = strict
self.num_errors = 0
def do(self, line: str) -> Optional[dict]:
if line is None:
return None
if isinstance(line, dict):
return line
line = line.rstrip("\n")
if not line:
return None
try:
return json.loads(line)
except json.decoder.JSONDecodeError as e:
self.log_error(e)
if self.strict:
raise
return None
def log_error(self, e: json.decoder.JSONDecodeError):
self.num_errors += 1
if self.num_errors > 10:
return
MAX_LEN = 80
snippet, snippet_len = e.doc, len(e.doc)
col = e.pos
if snippet_len > MAX_LEN:
if col < MAX_LEN:
start = 0
elif snippet_len - col < MAX_LEN:
start = snippet_len - MAX_LEN
else:
start = col - MAX_LEN // 2
snippet = e.doc[start : start + MAX_LEN]
col = col - start
logging.warning(
"\n".join(
[
f"Invalid json (length={len(e.doc)}) {e}",
snippet,
" " * (col - 1) + "^",
]
)
)
def summary(self):
summ = super().summary()
if self.num_errors > 0:
summ.append(f"Skipped {self.num_errors} invalid json.")
return summ
def compile_column(column, newline):
if callable(column):
return column
if column == "*":
return json.dumps
if re.match(r"[_a-z][_a-z0-9]*", column):
def extract_col(doc):
v = doc.get(column, "")
if isinstance(v, str) and newline != "\n":
v = v.rstrip("\n").replace("\n", newline)
return v
return extract_col
return compile_expr(column)
def select(lines, columns, skip_empty=False, separator="\t", newline="\n"):
"""Yields the content of the requested columns."""
column_parsers = [compile_column(c, newline) for c in columns]
for doc in read_jsons(lines):
values = []
empty = True
for parse_col in column_parsers:
v = parse_col(doc)
values.append(str(v) or "")
empty = empty and v is None
if skip_empty and empty:
continue
yield separator.join(values)
def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None):
if not isinstance(clause, str):
return clause
args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})"
args_list = list(re.findall(args_re, clause))
if not args_list:
# This is only a warning because you may want to have eg random sampling
# that doesn't depend on the document.
logging.warn(
f"Warning: No variable found in expression: <{clause}>\n"
"Variables should be written inside braces, eg: {language}=='en'"
)
python_like = re.sub(args_re, r"doc.get('\1', None)", clause)
requires = requires or []
modules = {r: importlib.import_module(r) for r in requires}
return eval(f"lambda doc: {python_like}", modules)
class where(Transformer):
"""Filters the data using python code.
Ex: `jsonql where 'len({text}) > 100'`
"""
def __init__(
self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = []
):
super().__init__()
self.raw_clauses = clauses
self.requires = requires
self.n_selected = 0
self.clauses: List[FilterFn] = []
def _prepare(self):
self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses]
def do(self, doc: dict) -> Optional[dict]:
assert self.clauses
if not doc or not all((c(doc) for c in self.clauses)):
return None
self.n_selected += 1
return doc
def summary(self):
n_selected, n_docs = self.n_selected, self.processed
selectivity = n_selected / n_docs if n_docs else 0
return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"]
def merge(lines, columns, separator="\t", newline=NEWLINE):
"""Reads tab separated columns and output a json using the given headers.
Headers are of form {key}[%{type}]
{type} can be one of {"f": float, "i": int, "b": bool, "s": string}.
Default type is string.
A special header "_" means interpret this column as json, and append all other
columns to it. Must appear only once and on last position.
Ex:
`echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}`
`echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}`
`echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}`
"""
handle_newlines = lambda s: s.replace(newline, "\n")
type_mapping: Dict[str, Callable] = {
"f": float,
"i": int,
"b": bool,
"s": handle_newlines,
}
type_parsing = [
type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns
]
columns = [f.split("%")[0] for f in columns]
doc_index = columns.index("_") if "_" in columns else -1
read_json = JsonReader()
def parse(line):
parts = line.split(separator, len(columns) - 1)
doc: Dict[str, tp.Any] = {}
for i, value in enumerate(parts):
if columns[i] == "_":
doc.update(read_json(parts[doc_index]))
else:
try:
doc[columns[i]] = type_parsing[i](value)
except ValueError:
logging.error(
f"Error when parsing column {i} of line: {line[:100]}..."
)
return doc
for line in lines:
yield json.dumps(parse(line))
class split(Transformer):
"""Split a files in several smaller files based on the value of a field."""
# Not parallelisable since we are writing to files.
parallelisable = False
def __init__(
self,
pattern: Union[Path, str] = None,
split_fn: Callable[[dict], str] = None,
mkdir: bool = False,
):
super().__init__()
assert not (
pattern and split_fn
), "split can't have both a pattern and a split_fn"
if split_fn is not None:
self.split_fn = split_fn
else:
assert pattern, "split need either a pattern or a split_fn"
self.split_fn = self.make_split_fn(str(pattern))
self.mkdir = mkdir
self.o: dict = {}
def make_split_fn(self, pattern: str) -> Callable[[dict], str]:
candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern))
return lambda doc: pattern.format(**{c: doc[c] for c in candidates})
def do(self, doc):
filename = self.split_fn(doc)
if not filename:
return
o = self.o.get(filename, None)
if o is None:
if self.mkdir:
Path(filename).parent.mkdir(parents=True, exist_ok=True)
self.o[filename] = open_write(filename)
print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True)
def summary(self):
summ = super().summary()
summ.append(f"Found {len(self.o)} splits.")
return summ
def close(self):
for file in self.o.values():
file.close()
def histogram(values, bins, weights):
hist, bins = np.histogram(values, bins=bins)
# n_bins = len(hist)
if weights is not None:
# Bins can't be auto-determined if weights is supplied.
# So we first compute the bins without the weights then recompute
# the histogram with the weights.
hist, bins = np.histogram(values, bins=bins, weights=weights)
# cumsum = np.cumsum(hist)
# total = cumsum[-1]
# for i in range(n_bins - 1):
# if cumsum[i] / total > 0.9:
# useful_range = np.linspace(bins[0], bins[i + 1], n_bins)
# new_bins = np.append(useful_range, [bins[-1]])
# return np.histogram(values, bins=new_bins, weights=weights)
return hist, bins
def _parse_bins(bins):
try:
if isinstance(bins, str):
if "," in bins:
bins = [int(b) for b in bins.split(",")]
else:
bins = int(bins)
except ValueError:
pass
return bins
ALL_DOCUMENTS = "<ALL_DOCUMENTS>"
MAX_LABEL_LEN = 100
def bar_chart(hist, bins):
n = sum(hist)
max_h = max(hist)
out = []
for i, h in enumerate(hist):
h_size = 80 * h // max_h
dh_size = 80 * (h - hist[i - 1]) // max_h
if h_size == 0 or dh_size == 0:
continue
bar = "█" * h_size
out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}")
out.append(f"{bins[-1]:8.3f}")
return out
def display_stats(stats, key, weights=None, bins="auto", cumulative=False):
out = []
documents = stats[ALL_DOCUMENTS]
count = stats.get(key, 0)
r = count / documents if documents else 0
out.append(f"Field {key} saw {count} times ({r:5.1%})")
length = stats.get(key + ".length", None)
avg_length = length // count if length else 0
if length is not None:
out[-1] += f", average length is {length // count}"
values = stats.get(key + ".val", None)
if values:
out[-1] += f", histogram is: (bins={bins})"
if weights:
if weights not in stats:
logging.warn(f"Warning: weights column {weights} not found.")
if weights + ".val" not in stats:
logging.warn(
f"Warning: weights column {weights} is not a numeric column."
)
weights = stats.get(weights + ".val")
hist, bins = histogram(values, _parse_bins(bins), weights)
if cumulative:
hist = np.cumsum(hist)
out += bar_chart(hist, bins)
cnt = stats.get(key + ".cnt", None)
if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1:
cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
out[-1] += ", top 100 labels:"
for label, n in cnt[:100]:
if n < 5:
continue
out.append(f"{label:25}: {n:6} ({n / count:5.1%})")
return out
def describe(source, columns=None, weights=None, **kwargs):
"""Compute some statistics about a dataset.
Stats can be restricted to a subset of columns."""
MAX_HIST_SIZE = 100_000_000
MAX_CNT_SIZE = 1000
stats = {ALL_DOCUMENTS: 0}
needed = columns + [weights] if columns else None
for doc in read_jsons(source):
stats[ALL_DOCUMENTS] += 1
for k, v in doc.items():
if needed and k not in needed:
continue
stats[k] = get_or_set(stats, k, 0) + 1
if isinstance(v, str):
stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v)
if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels
continue
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
if v in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[v] += 1
elif type(v) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values.append(v)
elif type(v) is list and len(v) and type(v[0]) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values += v
elif type(v) is dict:
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
for label in v:
if label in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[label] += 1
documents = stats[ALL_DOCUMENTS]
yield f"Stats computed on {documents} documents:"
for k in stats:
if columns and k not in columns:
continue
if "." in k or k == ALL_DOCUMENTS:
continue
for line in display_stats(stats, k, weights=weights, **kwargs):
yield line
def shard(lines):
"""Shard a file in several smaller ones."""
# The creation of the shard is handle in a generic way. Do we need this ?
return lines
# *** Utils ***
def get_or_set(dictionary, key, default):
if key not in dictionary:
dictionary[key] = default
return dictionary[key]
class SimpleIO(Protocol):
"""A subset of methods from TextIO."""
def close(self) -> None:
...
def write(self, line: str) -> int:
...
def __enter__(self) -> "SimpleIO":
...
def __exit__(self, exc_type, exc_value, traceback):
...
def open_read(filename: ReadableFileLike) -> Iterable[str]:
"""Open the given file, list of files or files matching the given glob and read lines.
`filename` is None or "-" -> reads from stdin
`filename` is a Path / str -> interprets filename as a glob and open files matching it
`filename` is a list -> opens sequentially all files from the list using `open_read`
`filename` is something else -> returns the object wrapped in a `nullcontext`
This allows to pass already openened files or iterables.
`open_read` will decompress gzip files, given they have ".gz" suffix.
"""
if filename is None:
return sys.stdin
if isinstance(filename, list):
assert isinstance(filename[0], Path)
if len(filename) == 0:
return []
if len(filename) > 1:
return _yield_from(filename)
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
if filename.startswith("http://") or filename.startswith("https://"):
return open_remote_file(filename)
filename = Path(filename)
if not isinstance(filename, Path):
# we might have received an iterable, return it unmodified.
return filename # type: ignore
# Expand glob patterns only when reading
files = [Path(f) for f in sorted(glob.glob(str(filename)))]
if len(files) > 1:
return _yield_from(files)
if len(files) == 1:
filename = files[0]
assert isinstance(filename, Path)
if filename.name.endswith("]"):
return block_reader(filename)
logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'")
if filename.suffix == ".gz":
file: TextIO = gzip.open(filename, "rt") # type: ignore
else:
file = open(filename, "rt")
return _close_when_exhausted(file)
def _close_when_exhausted(file: TextIO) -> Iterable[str]:
with file:
yield from file
def _yield_from(files: list) -> Iterable[str]:
for file in files:
yield from open_read(file)
def open_write(
filename: WritableFileLike, max_size: str = "4G"
) -> tp.ContextManager[TextIO]:
"""Open the given file, list of files or files matching the given glob.
The return value is a ContextManager meant to be used inside a `with` block:
```
with open_write("foo.txt") as o:
...
Write mode:
replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`.
If filename ends with ".gz", creates a blocked gzip file with random access.
"""
if filename is None:
return contextlib.nullcontext(sys.stdout)
if isinstance(filename, list):
if len(filename) > 1:
return MultiFile(filename, "w", max_size)
else:
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
filename = Path(filename)
if not isinstance(filename, Path):
assert hasattr(filename, "write"), f"{filename} doesn't have a .write method."
# We return a 'TextIO' even though we only check for `.write` method,
# this works better with eg `print`.
return contextlib.nullcontext(tp.cast(TextIO, filename))
mode = "wt"
if "?" in filename.name:
return sharded_file(filename, mode, max_size)
logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}")
# TODO: should we use another format ?
if filename.suffix == ".gz":
return BlockedGzipWriter(Path(filename), mode, block_size="64M")
return open(filename, "wt")
def parse_size(size):
unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3}
unit = size[-1].upper()
assert (
unit in unit_map
), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}."
return int(size[:-1]) * unit_map[unit]
class MultiFile(SimpleIO):
def __init__(self, files: Iterable[Path], mode="w", max_size="4G"):
self.name = str(files)
self.mode = mode
self.files = iter(files)
self.max_size = parse_size(max_size)
self.current_handle: Optional[TextIO] = None
self.current_block_size = 0
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | true |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/dedup.py | code/data_collection/cc/cc_net/cc_net/dedup.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to remove duplicate paragraphs across one or several shards.
"""
import argparse
import gc
import hashlib
import logging
import multiprocessing
import os
import tempfile
import time
from pathlib import Path
from typing import Iterable, List, Optional, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet
from cc_net.jsonql import mem_footprint_gb
from cc_net.text_normalizer import normalize_for_dedup
BYTE_ORDER = "little"
HASH_SIZE = HASH_TYPE(0).nbytes
DISABLE_MULTI_PROCESSING = False
FilesOrDir = Union[List[Path], Path]
def get_args():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them",
parents=[jsonql.io_parser()],
)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--output_hashes", type=str)
parser.add_argument("--no_finalize", action="store_false", dest="finalize")
# parser.add_argument("--mem_gb", type=int)
parser.add_argument("--hashes", type=str)
return vars(parser.parse_args())
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
log = logging.getLogger(__name__).info
def run_par(processes):
# This is different from multiprocessing.map since it allows for kwargs.
processes = list(processes)
if len(processes) == 1 or DISABLE_MULTI_PROCESSING:
for f, args, kwargs in processes:
f(*args, **kwargs)
return
log(f"Starting {len(processes)} subprocess")
processes = [
multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes
]
for p in processes:
p.start()
for p in processes:
p.join()
failed = 0
for p in processes:
if p.exitcode != 0:
log(f"Process failed with code {p.exitcode}: {p}")
failed += 1
assert failed == 0, f"{failed} processes failed..."
def split_file(file, n_splits):
for i in range(n_splits):
yield jsonql.SplitFile(file, i, n_splits)
def merge(hashes_1, hashes_2, output):
if isinstance(hashes_1, str):
h1 = FlatHashSet()
h1.load(hashes_1)
else:
h1 = hashes_1
if isinstance(hashes_2, str):
h2 = FlatHashSet()
h2.load(hashes_2)
else:
h2 = hashes_2
h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2))
dup = h1.__contains__(h2_np)
# Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to
# h1 with their value.
h1[h2_np] = dup
if output:
h1.dump(output)
return h1
def merge_shard(hash_files, output):
h = FlatHashSet()
h.load(hash_files[0])
for hash_file in hash_files[1:]:
h = merge(h, hash_file, output=None)
print(f"Merged {hash_file}. We now have {len(h)} hashes.")
h.dump(output)
print(f"Saved {len(h)} hashes to {output}.")
def _dump_sentence_hashes(source: Path, output: Path, field: str):
treated = 0
started = time.time()
with open(output, "wb") as o:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content:
continue
h = compute_hashes(content)
if h is None:
continue
h.tofile(o)
treated += 1
if treated % 100_000 == 0:
delay = time.time() - started
log(
f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)"
)
def _remove_duplicate_hashes(duplicates, source, output):
batch_size = 100_000
n_lines, n_lines_kept = 0, 0
with open(source, "rb") as f, open(output, "wb") as o:
log(f"Opening {source} with mode rb")
log(f"Opening {output} with mode wb")
while True:
hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size)
if hashes.size == 0:
break
keep = duplicates[hashes] < 1
kept = keep.sum()
hashes *= keep
hashes.tofile(o)
n_lines += hashes.size
n_lines_kept += kept
removed = n_lines - n_lines_kept
selectivity = n_lines_kept / n_lines if n_lines else 0
log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}")
def remove_duplicates_sharded(
files: List[Path],
outputs: List[Path],
hashes_dir: FilesOrDir,
field: str,
group_hashes: int = 1,
tmp_dir: Path = None,
min_len: int = 0,
):
"""Remove duplicates in several passes, when all hashes don't fit in RAM.
Note: The current implementation is not doing a 'perfect' deduplication.
If a hash appear exactly once in each shard of hashes it won't be detected
as a duplicate. This can be fixed if hashes are fully dedup beforehand.
"""
assert len(files) == len(outputs)
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
if len(hashes_files) <= group_hashes:
log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
rm_dups = DuplicatesRemover(field, hashes_files)
rm_dups._prepare()
run_par(
(jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
for f, o in zip(files, outputs)
)
return
log(f"Starting deduplicate_sharded on {files}.")
tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
def tmp_files(i):
return [
Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
for f in files
]
last = tmp_files(0)
run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
hashes = FlatHashSet()
for h in group:
hashes.load(h)
log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
intermediates = tmp_files(i + 1)
# Remove hashes in parallel. Since modern OS have "copy-on-write" and
# `hashes` is read-only, we will only have one version of it in RAM.
run_par(
(_remove_duplicate_hashes, (hashes, f, tmp), {})
for f, tmp in zip(last, intermediates)
)
# Force hashes to be freed, before we start allocating a new one.
del hashes
gc.collect()
for tmp in last:
os.remove(tmp)
last = intermediates
def finalize(source, dedup_hashes, min_len):
n_chars, n_chars_kept = 0, 0
with open(dedup_hashes, "rb") as hashes:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content or len(content) < min_len:
continue
sentences = content.split("\n")
doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
chars, kept_chars = finalize_doc(doc, field, doc_hashes)
n_chars += chars
n_chars_kept += kept_chars
yield doc
selectivity = n_chars_kept / n_chars if n_chars else 0
log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
dedup_hashes = last
run_par(
[
(
jsonql.run_pipe,
(finalize,),
dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
)
for h, f, o in zip(dedup_hashes, files, outputs)
]
)
tmp_directory.cleanup()
def compute_hashes(content) -> Optional[np.ndarray]:
if not content:
return None
lines = content.split("\n")
# save hashes as bytes but reinterpret them as uint64.
hashes = np.fromiter(
(
hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[
:HASH_SIZE
]
for l in lines
),
dtype=np.dtype((bytes, HASH_SIZE)),
count=len(lines),
)
return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape)
def finalize_doc(doc, field, hashes=None):
content = doc.get(field)
lines = content.split("\n")
n_chars = len(content)
if "original_nlines" not in doc:
doc["original_nlines"] = doc.get("nlines", len(lines))
if "original_length" not in doc:
doc["original_length"] = doc.get("length", n_chars)
if hashes is None:
hashes = doc.pop(field + "_hash")
# Remove duplicates inside doc
seen: Set[int] = set()
original_line_ids = doc.get("line_ids", range(len(hashes)))
line_ids = []
new_lines = []
for l, line, h in zip(original_line_ids, lines, hashes):
if h not in seen and h != 0:
line_ids.append(l)
new_lines.append(line)
seen.add(h)
doc[field] = "\n".join(new_lines)
doc["nlines"] = len(line_ids)
n_chars_kept = len(doc[field])
doc["length"] = n_chars_kept
doc["line_ids"] = line_ids
return n_chars, n_chars_kept
class HashesCollector(jsonql.Transformer):
"""
Collect all hashes found of lines found in the `field` of the source documents.
"""
parallelisable = False
def __init__(
self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None
):
super().__init__()
self.n_lines = 0
self.field = field
self.output = output
self.hashes = FlatHashSet() if hashes is None else hashes
self.num_hashes_end = 0
self.num_hashes_start = len(self.hashes)
def summary(self) -> List[str]:
summ = super().summary()
h = self.num_hashes_end if self.hashes is None else len(self.hashes)
h = (h - self.num_hashes_start) // 1000
max_mem = mem_footprint_gb()
n = self.n_lines // 1000
summ.append(
f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM."
)
return summ
def do(self, doc: dict) -> None:
doc_hashes = compute_hashes(doc.get(self.field))
if doc_hashes is None:
return
self.hashes.add(doc_hashes)
self.n_lines += doc_hashes.size
def close(self):
if self.output and self.hashes:
self.hashes.dump(self.output)
self.log(f"Saved {len(self.hashes)} hashes to {self.output}")
# Save the number of hashes.
self.num_hashes_end = len(self.hashes)
# Free up mem even if the transformer is kept somewhere else.
self.hashes = None # type: ignore
class DuplicatesRemover(jsonql.Transformer):
"""DuplicatesRemover"""
# The hashes can't be pickled so they will have to be read back from disk.
warn_when_pickling = True
def __init__(self, field: str, hashes_files: List[Path], collect: bool = False):
"""
Remove duplicates
"""
super().__init__()
self.field = field
self.collect = collect
self.hashes_files = hashes_files
self.duplicates: Optional[AbstractDedupHashSet] = None
self.n_lines, self.n_lines_kept = 0, 0
self.n_chars, self.n_chars_kept = 0, 0
def _prepare(self):
if self.duplicates is not None:
return
self.duplicates = FlatHashSet()
start = time.time()
for h in self.hashes_files:
shard_start = time.time()
self.duplicates.load(str(h))
delay = time.time() - shard_start
self.log(
f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)"
)
delay = time.time() - start
self.log(
f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)"
)
def do(self, doc: dict) -> Optional[dict]:
content = doc.get(self.field)
if not content:
return None
doc_hashes = compute_hashes(content)
assert self.duplicates is not None
seen = (
self.duplicates.add(doc_hashes)
if self.collect
else self.duplicates[doc_hashes]
)
keep = seen < True
kept = keep.sum()
if kept == 0:
return None
doc_hashes = doc_hashes * keep
self.n_lines += keep.size
self.n_lines_kept += kept
chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes)
self.n_chars += chars
self.n_chars_kept += kept_chars
return doc
def summary(self) -> List[str]:
summ = super().summary()
end_time = time.time()
n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed
speed = n_docs / (end_time - self.start_time)
summ.append(
f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]"
)
selectivity = self.n_lines_kept / self.n_lines if n_lines else 0
summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).")
n_chars_kept, n_chars = self.n_chars_kept, self.n_chars
selectivity = n_chars_kept / n_chars if n_chars else 0
summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
return summ
def deduplicate(
file: jsonql.ReadableFileLike, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (but keep the first occurence)."""
dup_remover = DuplicatesRemover(field, [], collect=True)
return dup_remover.map(jsonql.read_jsons(file))
def deduplicate_two_pass(
file: jsonql.FileDescriptor, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (even removing the first occurence).
This is what is done in the paper, and in mine.py
"""
try:
if isinstance(file, Path):
hash_file: Path = file.with_suffix(".bin")
else:
hash_file = jsonql._tmp(Path("hashes.bin"))
jsonql.run_pipes(
jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file
)
dup_remover = DuplicatesRemover(field, [hash_file])
return dup_remover.map(jsonql.read_jsons(file))
finally:
if hash_file.exists():
hash_file.unlink()
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/regroup.py | code/data_collection/cc/cc_net/cc_net/regroup.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import subprocess
from pathlib import Path
from typing import List
import func_argparse
import numpy as np
from cc_net import jsonql
def get_index(file: Path) -> Path:
return file.parent / (file.name + ".index")
def _get_tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Read the given files and concatenate them to the output file.
Can remove original files on completion, or just write dummy content into them to free disk.
"""
if tmp is None:
tmp = _get_tmp(output)
logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}")
jsonql.run_pipes(file=inputs, output=tmp)
tmp.replace(output)
tmp_index = get_index(tmp)
if tmp_index.exists():
tmp_index.replace(get_index(output))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def fast_reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Same as reshard but don't re-compress the output.
This will lead to a bigger output file, especially if the shards are very small.
"""
if tmp is None:
tmp = _get_tmp(output)
with open(tmp, "wb") as o:
subprocess.run(["cat"] + [str(f) for f in inputs], stdout=o)
tmp.replace(output)
indexes_files = [get_index(i) for i in inputs]
existing_indexes = sum(i.exists() for i in indexes_files)
assert (
existing_indexes == len(indexes_files) or existing_indexes == 0
), "some indexes don't exist."
if existing_indexes > 0:
indexes = [np.load(idx) for idx in indexes_files]
for i in range(len(indexes) - 1):
indexes[i + 1] += indexes[i][-1]
with open(str(output) + ".index", "wb") as o:
np.save(o, np.concatenate(indexes))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def determine_groups(
inputs: List[Path], target_size: int = 4 * 1024 ** 3
) -> List[List[Path]]:
if len(inputs) == 0:
return []
sample = inputs[:10]
typical_size = sum(s.stat().st_size for s in sample) / len(sample)
group_size = min(target_size // typical_size, len(inputs))
group_size = max(group_size, 1)
return jsonql.grouper(inputs, group_size)
if __name__ == "__main__":
func_argparse.single_main(reshard)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/get_wiki_cirrus.py | code/data_collection/cc/cc_net/cc_net/get_wiki_cirrus.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Creates mono-lingual corpus from Wikipedia.
"""
import functools
import re
import subprocess
import urllib.request
from pathlib import Path
from typing import Dict
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql, text_normalizer
CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch"
CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz")
def tmp(file: Path) -> Path:
return file.parent / ("tmp." + file.name)
def opening(file: Path, output: Path = None, n_docs: int = 1_000_000):
"""Will dump the tokenized opening text of the given Wikipedia.
Args:
- file: File containing the Wikipedia dump.
- output: Output file.
- n_docs: How many docs to parse
- tokenize: whether to tokenize the text
- lang: Language code used to chose the tokenizer
"""
assert file.exists()
return jsonql.run_pipes(
functools.partial(extract_opening_text, n_docs=n_docs),
file=file,
output=tmp(output) if output else None,
)
if output:
tmp(output).replace(output)
def extract_opening_text(source, n_docs: int = 10_000):
i = 0
for doc in jsonql.read_jsons(source):
if not doc:
continue
text = doc.get("opening_text")
if not text:
continue
yield text_normalizer.normalize(text)
i += 1
if i >= n_docs:
break
def dl(lang: str, output_dir: Path, date: str = None):
"""Download the cirrus extract for the given lang.
See https://dumps.wikimedia.org/other/cirrussearch for the full list of files.
Args:
- lang: The Wikipedia code for the language.
- output_dir: Output directory. File will be `{lang}.json.gz`
- date: Date of a specific Cirrus dump.
"""
urls = get_cirrus_urls(date)
assert (
lang in urls
), f"--lang {lang} not found. Available languages are: {urls.keys()}"
assert output_dir, "--output_dir folder needed."
output_dir.mkdir(exist_ok=True)
output = output_dir / (lang + ".json.gz")
print(f"Downloading {lang} wiki from {urls[lang]} to {output}")
wget(urls[lang], output)
def get_cirrus_urls(date: str = None) -> Dict[str, str]:
if date is None:
cirrus_page = BeautifulSoup(
urllib.request.urlopen(CIRRUS_URL), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")]
dumps.remove("..")
dumps.remove("current")
# We take the oldest dump since the most recent might be incomplete.
# The page only link to the N latest dumps so the dump won't be too old.
date = min(dumps)
cirrus_url = "/".join((CIRRUS_URL, date))
print("Will use the Wikipedia dump from:", date, cirrus_url)
cirrus_page = BeautifulSoup(
urllib.request.urlopen(cirrus_url), features="html.parser"
)
urls = {}
for link in cirrus_page.findAll("a"):
match = CIRRUS_DUMP_RE.match(link.get("href"))
if not match:
continue
urls[match.group(1)] = "/".join([cirrus_url, link.get("href")])
assert urls, f"No valid download urls found at {cirrus_url}"
return urls
def wget(url: str, output: Path):
subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True)
tmp(output).replace(output)
assert (
output.stat().st_size > 10_000
), f"File {output} downloaded from {url} looks too small"
if __name__ == "__main__":
func_argparse.main(dl, opening)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/minify.py | code/data_collection/cc/cc_net/cc_net/minify.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import base64
import hashlib
import itertools
import urllib.parse
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Sequence, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.execution import get_executor
from cc_net.jsonql import mem_footprint_gb
HASH_SIZE = 4
HASH_TYPE = np.uint32
PUBLIC_FIELDS = ["url", "digest"]
COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"]
DATA = Path(__file__).parent.parent / "data"
# This is similar to dedup methods but with use 32 bits hashes.
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def _str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
def get_hashes(lines: Iterable[str]) -> List[bytes]:
h = HASH_SIZE
return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines]
def encode_hashes(hashes: Iterable[bytes]) -> str:
return base64.b64encode(b"".join(hashes)).decode("ascii")
def encode_as_hashes(lines: Iterable[str]) -> str:
return encode_hashes(get_hashes(lines))
def decode_hashes(compact: str) -> List[bytes]:
all_hashes = base64.b64decode(compact)
res = []
assert len(all_hashes) % HASH_SIZE == 0
for i in range(len(all_hashes) // HASH_SIZE):
chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE]
res.append(chunk)
return res
def encode_line_ids(line_ids: Sequence[int]) -> str:
arr = np.array(line_ids, dtype="<u2")
return base64.b64encode(arr.tobytes()).decode("ascii")
def decode_line_ids(compact: str) -> List[int]:
ids_bytes = bytearray(base64.b64decode(compact))
return np.ndarray(len(ids_bytes) // 2, dtype="<i2", buffer=ids_bytes)
def get_doc_key(digest: str) -> int:
assert digest.startswith("sha1:")
h = base64.b32decode(digest[5:])
return _b2i(h[:HASH_SIZE])
class Minifier(jsonql.Transformer):
ready = True
def __init__(self):
self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS)
def do(self, doc: dict) -> Optional[dict]:
line_ids: List[int] = doc.pop("line_ids")
fields = self.fields
keys = list(doc.keys())
for k in keys:
if k not in fields:
doc.pop(k, None)
p = doc.get("perplexity", 0)
doc["line_ids"] = encode_line_ids(line_ids)
if p:
doc["perplexity"] = round(p, 1)
s = doc.get("language_score", 0)
if s:
doc["language_score"] = round(s, 2)
return doc
class MetadataFetcher(jsonql.Transformer):
"""Reads documents from CC snapshot and join precomputed metadata.
CC snapshots are split in segments. Each segment is 64Mb long.
The metadata must also be stored in segments of the same size and names.
"""
def __init__(self, folder: Union[Path, str]):
self.ready = True
self.metadata: Dict[int, dict] = {}
self._segments: Set[str] = set()
self.read_doc = 0
self.missed_doc = 0
self.missed_par = 0
self.processed_par = 0
if isinstance(folder, str):
# detect path passed as string
if urllib.parse.urlparse(folder).scheme == "":
folder = Path(folder)
assert folder.exists(), f"Metadata folder not found: {folder}"
self.folder = folder
self.segment: str = ""
self.segments_read_twice = 0
def meta_file(self, segment: str) -> str:
file_name = segment.split("/")[-1]
assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet")
if isinstance(self.folder, str):
return urllib.parse.urljoin(
self.folder, file_name.replace(".warc.wet", ".json")
)
meta_file = self.folder / file_name.replace(".warc.wet", ".json")
assert (
meta_file.exists()
), f"Couldn't find metadata file for segment {segment} at {meta_file}"
return str(meta_file)
def fetch_metadata(self, segment: str) -> None:
meta_file = self.meta_file(segment)
k = get_doc_key
self.metadata = {}
collision = 0
for m in jsonql.read_jsons(meta_file):
key = k(m["digest"])
if key in self.metadata:
collision += 1
self.metadata[key] = m
self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}")
if collision > 0:
self._logger.warning(f"Found {collision} collisions !")
self.segment = segment
if segment in self._segments:
self.log("Cache miss")
self.segments_read_twice += 1
self._segments.add(segment)
def do(self, doc: dict) -> Optional[dict]:
if self.segment != doc["cc_segment"]:
self.fetch_metadata(doc["cc_segment"])
digest = doc["digest"]
key = get_doc_key(digest)
if key not in self.metadata:
return None
metadata = self.metadata.pop(key)
return self.clean(metadata, doc)
def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]:
line_ids = decode_line_ids(metadata.pop("line_ids"))
lines = full_doc["raw_content"].split("\n")
cleaned = []
for l in line_ids:
if l >= len(lines) or l < 0:
self.missed_par += 1
continue
cleaned.append(lines[l])
self.processed_par += len(line_ids)
if not cleaned:
self.missed_doc += 1
return None
full_doc["raw_content"] = "\n".join(cleaned)
full_doc["original_nlines"] = full_doc["nlines"]
full_doc["original_length"] = full_doc["length"]
full_doc["nlines"] = len(cleaned)
full_doc["length"] = len(full_doc["raw_content"])
for key, value in metadata.items():
full_doc[key] = value
return full_doc
def summary(self) -> List[str]:
summ = super().summary()
mem = mem_footprint_gb()
len_cache = len(self.metadata)
summ.append(
f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g."
)
if self.missed_doc:
r = self.missed_doc / self.processed
summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !")
if self.missed_par:
r = self.missed_par / self.processed
summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !")
return summ
def _expand_files(files: List[Path]) -> List[Path]:
if len(files) == 1 and files[0].is_dir():
folder = files[0]
files = sorted(folder.glob("*.json.gz"))
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert files, "No files found"
return files
def minify_file(file: Path, output: Path) -> str:
"""Minify the given file."""
jsonql.run_pipes(Minifier(), file=file, output=output)
return f"Minified {output}"
def minify(
files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1
):
"""Minify all the files in the given folder."""
files = _expand_files(files)
output_dir.mkdir(exist_ok=True)
with open(output_dir / "files.txt", "w") as o:
for f in files:
print(f.name, file=o)
outputs = [output_dir / f.name for f in files]
ex = get_executor(
"minify",
output_dir / "logs",
execution,
timeout_hour=2,
cpus=1,
task_parallelism=parallelism,
)
ex(minify_file, files, outputs)
def fetch_metadata_file(
file: Union[Path, str],
metadata_dir: Union[Path, str],
output: Path,
cache_dir: Path = None,
):
unminifier = MetadataFetcher(metadata_dir)
tmp = output.with_name("tmp." + output.name)
jsonql.run_pipes(unminifier, file=file, output=tmp)
tmp.rename(output)
return f"Fetched metadata for {file}. Results at {output}."
def fetch_metadata(
files: List[str],
metadata_dir: Union[Path, str],
output_dir: Path,
execution: str = "mp",
parallelism: int = -1,
cache_dir: Path = None,
):
if len(files) == 1 and Path(files[0]).is_dir():
folder = Path(files[0])
files = [str(f) for f in sorted(folder.glob("*.json.gz"))]
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert len(files) > 0, "No files given."
output_dir.mkdir(exist_ok=True)
outputs = [output_dir / str(f).split("/")[-1] for f in files]
if cache_dir is None:
cache_dir = output_dir / "wet_cache"
cache_dir.mkdir(exist_ok=True)
if str(cache_dir) == "none":
cache_dir = None
files = [f for f, o in zip(files, outputs) if not o.exists()]
outputs = [o for o in outputs if not o.exists()]
if not files:
return
ex = get_executor(
"unminify",
output_dir / "logs",
execution,
timeout_hour=8,
cpus=1,
task_parallelism=parallelism,
mem_gb=32,
)
ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir))
if __name__ == "__main__":
import func_argparse
func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/process_wet_file.py | code/data_collection/cc/cc_net/cc_net/process_wet_file.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import logging
import re
import tempfile
import time
import urllib.request
from pathlib import Path
from typing import ContextManager, Iterable, Iterator, List, Optional, Sequence
from urllib.parse import urlparse
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql
WET_URL_ROOT = "https://data.commoncrawl.org"
logger = logging.getLogger(__name__)
def cc_wet_paths_url(dump_id: str) -> str:
return "/".join([WET_URL_ROOT, "crawl-data", "CC-MAIN-" + dump_id, "wet.paths.gz"])
@functools.lru_cache()
def cc_segments(dump_id: str, cache_dir: Path = None) -> List[str]:
wet_paths = cc_wet_paths_url(dump_id)
cache_dir = cache_dir or jsonql._tmp_dir()
wet_paths_cache = cache_dir / f"wet_{dump_id}.paths.gz"
f = jsonql.open_remote_file(wet_paths, cache=wet_paths_cache)
return [segment.strip() for segment in f]
def list_dumps() -> List[str]:
home_page = BeautifulSoup(
urllib.request.urlopen("http://index.commoncrawl.org/"), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in home_page.findAll("a")]
dumps = [a[8:] for a in dumps if re.match(r"^CC-MAIN-\d\d\d\d-\d\d$", a)]
return sorted(dumps)
def ls():
for dump in list_dumps():
print(dump, "->", cc_wet_paths_url(dump))
def parse_doc(headers: List[str], doc: List[str]) -> Optional[dict]:
"""BEFORE 2020, Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: [url]
WARC-Date: [crawldate: 2019-02-15T19:15:59Z]
WARC-Record-ID: <urn:uuid:8865156e-d5f1-4734-9c68-4b46eaf2bb7e>
WARC-Refers-To: <urn:uuid:340152e2-65cf-4143-b522-8ce4e2d069d7>
WARC-Block-Digest: sha1:S3DTWCONT2L6ORTGCY2KXEZ37LNBB7V2
Content-Type: text/plain
Content-Length: 7743
AFTER 2020, Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: http://100greatpiano.com/video/wilhelm-kempff-plays-beethovens-moonlight-sonata/
WARC-Date: 2023-01-26T22:21:08Z
WARC-Record-ID: <urn:uuid:ccafeba8-a08b-47d0-86be-cf0855f4f6d0>
WARC-Refers-To: <urn:uuid:935a6ef4-8708-41f5-a152-412cdf1b48c1>
WARC-Block-Digest: sha1:2WURD74BLDCLPV6INBQEQ6OOJRQDPJBA
WARC-Identified-Content-Language: eng,jpn
Content-Type: text/plain
Content-Length: 886
"""
if not headers or not doc:
return None
try:
warc_type = headers[1].split()[1]
if warc_type != "conversion":
return None
url = headers[2].split()[1]
date = headers[3].split()[1]
digest = headers[6].split()[1]
# process length, may be in the 8th or 9th position
try:
length = int(headers[9].split()[1])
except IndexError as e:
length = int(headers[8].split()[1])
except Exception as e:
logger.warning("Can't parse header:", e, headers, doc)
return None
# Docs are separated by two empty lines.
last = None
if not doc[-1] and not doc[-2]:
last = -2
title, doc = doc[0], doc[1:last]
return {
"url": url,
"date_download": date,
"digest": digest,
"length": length,
"nlines": len(doc),
"source_domain": urlparse(url).netloc,
"title": title,
"raw_content": "\n".join(doc),
}
def group_by_docs(warc_lines: Iterable[str]) -> Iterable[dict]:
doc: List[str] = []
headers, read_headers = [], True
for warc in warc_lines:
warc = warc.strip()
if read_headers:
headers.append(warc)
read_headers = warc != ""
continue
if warc == "WARC/1.0":
# We reached the beginning of the new doc.
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
headers, doc, read_headers = [warc], [], True
continue
doc.append(warc)
# Return the last document
if doc:
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
def parse_warc_file(lines: Iterable[str], min_len: int = 1) -> Iterator[dict]:
n_doc = 0
n_ok = 0
for doc in group_by_docs(lines):
n_doc += 1
if not doc or len(doc["raw_content"]) < min_len:
continue
n_ok += 1
yield doc
if n_doc > 0:
logger.info(f"Kept {n_ok:_d} documents over {n_doc:_d} ({n_ok / n_doc:.1%}).")
else:
logger.info(f"Found no documents")
def dl(
dump: str,
shard: int,
num_shards: int,
output: Path = None,
num_segments_per_shard: int = 0,
):
"""Download a shard of the common crawl, and export it to json.
Arguments:
output: filename of the output file
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: manual control of the number of segment per shard.
"""
reader = CCShardReader(dump, shard, num_shards, num_segments_per_shard)
jsonql.run_pipes(inputs=reader, output=output)
logger.info(f"Done. {output} is ready.")
class CCSegmentsReader(Iterable[dict]):
def __init__(
self, segments: Sequence[str], min_len: int = 0, cache_dir: Path = None
):
self._segments = segments
self.min_len = min_len
if cache_dir is not None:
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True)
self.cache_dir = cache_dir
self.retrieved_segments = 0
def segment_url(self, segment: str):
return "/".join((WET_URL_ROOT, segment))
@property
def segments(self) -> Sequence[str]:
return self._segments
def open_segment(self, segment: str) -> Iterable[str]:
url = self.segment_url(segment)
file: Optional[Path] = None
if self.cache_dir:
file = self.cache_dir / segment.split("/")[-1]
if not file or not file.exists():
self.retrieved_segments += 1
return jsonql.open_remote_file(url, cache=file)
def __iter__(self) -> Iterator[dict]:
n = len(self.segments)
for i, segment in enumerate(self.segments):
start = time.time()
# TODO: start downloading the next segment in the background
for doc in parse_warc_file(self.open_segment(segment), self.min_len):
doc["cc_segment"] = segment
yield doc
if i + 1 >= n:
continue
end = time.time()
delay = (end - start) / 3600 * (n - 1 - i)
logger.info(
f"Parsed {i + 1} / {n} files. Estimated remaining time: {delay:.1f}h"
)
class CCShardReader(CCSegmentsReader):
def __init__(
self,
dump: str,
shard: int,
num_shards: int = -1,
num_segments_per_shard: int = 40,
min_len: int = 300,
cache_dir: Path = None,
):
"""Downloads a shard of Common Crawl, and yields dict.
Arguments:
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: if set will limit the number of files by shard.
Useful for testing.
"""
super().__init__([], min_len=min_len, cache_dir=cache_dir)
self.dump = dump
self.shard = shard
assert num_shards > 0 or num_segments_per_shard > 0
self.num_shards = num_shards
self.num_segments_per_shard = num_segments_per_shard
@property
def segments(self) -> Sequence[str]:
# Delaying the initialization allows to delay the looking up of the WET files
if self._segments:
return self._segments
segments = cc_segments(self.dump, self.cache_dir)
n = len(segments)
if self.num_shards < 0:
self.num_shards = n // self.num_segments_per_shard
i_min = (self.shard * n) // self.num_shards
i_max = ((self.shard + 1) * n) // self.num_shards
if self.num_segments_per_shard > 0:
i_max = min(i_max, i_min + self.num_segments_per_shard)
self._segments = segments[i_min:i_max]
return self._segments
def _tmp(prefix: str = None, suffix: str = None, dir: Path = None) -> Path:
_, tmp_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
return Path(tmp_path)
@contextlib.contextmanager
def timer(name: str = "-"):
start = time.time()
yield None
delay = time.time() - start
print(f"{name} took {delay:.1f}s")
def benchmark(tmp_path: Path):
segments = [
"crawl-data/CC-MAIN-2019-09/segments/1550249406966.99/wet/CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
]
seg_file = tmp_path / "CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
with timer("from network"):
list(CCSegmentsReader(segments))
with timer("from network, with caching"):
list(CCSegmentsReader(segments, cache_dir=tmp_path))
assert seg_file.exists()
with timer("from disk"):
CCSegmentsReader(segments, cache_dir=tmp_path)
seg_file.unlink()
if __name__ == "__main__":
func_argparse.main(ls, dl)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/__main__.py | code/data_collection/cc/cc_net/cc_net/__main__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import func_argparse
import cc_net.mine
def main():
func_argparse.parse_and_call(cc_net.mine.get_main_parser())
if __name__ == "__main__":
main()
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/mine.py | code/data_collection/cc/cc_net/cc_net/mine.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Main script to download a CC dump, remove duplicates, split by language and
filter the documents.
The pipeline parameters are described in the `Config` class.
"""
import hashlib
import json
import time
import warnings
from argparse import ArgumentParser
from collections import defaultdict
from itertools import repeat
from pathlib import Path
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Tuple
import func_argparse
# Local scripts
from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file
from cc_net import regroup as regroup_module
from cc_net import split_by_lang
from cc_net.execution import Executor
# Constant
FILE_DIR = Path(__file__).parent
CUTOFF_CSV = FILE_DIR / "data" / "cutoff.csv"
DEFAULT_PIPELINE = [
"dedup",
"lid",
"keep_lang",
"sp",
"lm",
"pp_bucket",
"drop",
"split_by_lang",
]
class Config(NamedTuple):
"""
Mine Common Crawl with the given settings.
config_name
dump: CC dump id
output_dir: working directory
mined_dir: name of the destination folder, full path will be {ouput_dir}/{mined_dir}/{dump_id}
execution: chose how to parallelize the execution
num_shards: number of shards to split the dump
min_shard: start at shard `min_shard` if specified
num_segments_per_shard: allow to download a small portion of CC (eg for tests)
min_len: remove documents shorter than this (in chars)
hashes_in_mem: number of shards hashes to use for dedup
lang_whitelist: only treat those languages
lang_blacklist: ignore those languages
lang_threshold: remove docs whose top language score is lower than this
keep_bucket: keep only those perplexity bucket chose from (head, middle, tail, all)
lm_dir: folder containing LMs
lm_languages: only use LMs for the following languages
cutoff: cutoff file to use for split in head/middle/tail
mine_num_processes: number of processes to use for mining
target_size: size of finals files produce during `regroup` stage
cleanup_after_regroup: delete intermediary files after regroup
task_parallelism: max number of task to run in parallel
pipeline: restricts the mining pipeline to the given steps. Order is important !
experiments: (HACK) enable specific experiments in the code
"""
config_name: str = "base"
dump: str = "2017-51"
output_dir: Path = Path("data")
mined_dir: str = "mined"
execution: str = "auto"
num_shards: int = 1600
min_shard: int = -1
num_segments_per_shard: int = -1
metadata: Optional[str] = None
min_len: int = 300
hash_in_mem: int = 50
lang_whitelist: Sequence[str] = []
lang_blacklist: Sequence[str] = []
lang_threshold: float = 0.5
keep_bucket: Sequence[str] = []
lm_dir: Path = Path("data/lm_sp")
cutoff: Path = CUTOFF_CSV
lm_languages: Optional[Sequence[str]] = None
mine_num_processes: int = 16
target_size: str = "4G"
cleanup_after_regroup: bool = False
task_parallelism: int = -1
pipeline: Sequence[str] = DEFAULT_PIPELINE
experiments: Sequence[str] = []
cache_dir: Optional[Path] = None
def get_executor(
self, name: str, timeout_hour: int = 1, mem_gb: int = 1, cpus: int = 1
) -> Executor:
name = "_".join((name, self.config_name, *self.experiments))
return execution.get_executor(
name,
self.output_dir / "logs",
self.execution,
timeout_hour=timeout_hour,
mem_gb=mem_gb,
cpus=cpus,
task_parallelism=self.task_parallelism,
)
def get_cc_shard(self, shard: int) -> process_wet_file.CCShardReader:
dump_cache: Optional[Path] = None
if self.cache_dir:
self.cache_dir.mkdir(exist_ok=True)
dump_cache = self.cache_dir / self.dump
dump_cache.mkdir(exist_ok=True)
return process_wet_file.CCShardReader(
self.dump,
shard=shard,
num_shards=self.num_shards,
num_segments_per_shard=self.num_segments_per_shard,
min_len=self.min_len,
cache_dir=dump_cache,
)
@classmethod
def from_json(cls, json_file: Path) -> "Config":
raw_lines = json_file.read_text().splitlines()
raw_lines = [l for l in raw_lines if not l.strip().startswith("//")]
json_config = json.loads("".join(raw_lines))
path_keys = ["cache_dir", "lm_dir", "output_dir"]
for key in path_keys:
if key in json_config:
json_config[key] = Path(json_config[key])
return Config(**json_config)
@property
def will_split(self) -> bool:
return "split_by_lang" in self.pipeline or "split_by_segment" in self.pipeline
def get_lm_languages(self) -> Sequence[str]:
if self.lm_languages is not None:
return self.lm_languages
if self.lang_whitelist:
return self.lang_whitelist
languages = [m.name.split(".")[0] for m in self.lm_dir.glob("*.arpa.bin")]
if self.lang_blacklist:
languages = [l for l in languages if l not in self.lang_blacklist]
return languages
def get_mined_dir(self, regroup: bool = False) -> Path:
if self.will_split and not regroup:
return self.output_dir / f"{self.mined_dir}_split" / self.dump
return self.output_dir / self.mined_dir / self.dump
BASE_CONFIG = Config()
BYLANG_CONFIG = Config(
config_name="by_lang",
mined_dir="mined_by_lang",
pipeline=list(BASE_CONFIG.pipeline[:-1]) + ["split_by_lang"],
)
REPRODUCE_CONFIG = Config(
config_name="reproduce",
dump="2019-09",
mined_dir="reproduce",
pipeline=["fetch_metadata", "keep_lang", "keep_bucket", "split_by_lang"],
metadata="https://dl.fbaipublicfiles.com/cc_net/1.0.0",
# Optional filtering:
# It won't change much the execution speed, but decreases the disk requirement.
# Restrict languages
lang_whitelist=["fr"],
# Restrict perplexity buckets
# Top languages have been split in perplexity buckets according
# to a Wikipedia trained LM.
# The buckets from low perplexity (good) to high (bad) are:
# ["head", "middle", "tail"]
# Languages without a LM have only one bucket "all".
# It won't change much the execution speed, but decreases the disk requirement.
keep_bucket=["head", "all"],
mine_num_processes=1,
)
TEST_CONFIG = BASE_CONFIG._replace(
config_name="test",
dump="2019-09",
output_dir=Path("test_data"),
execution="local",
num_shards=4,
num_segments_per_shard=1,
hash_in_mem=2,
mine_num_processes=2,
lang_whitelist=["de", "it", "fr"],
target_size="32M",
cleanup_after_regroup=False,
cache_dir=Path("test_data/wet_cache"),
)
PREDEF_CONFIGS = {
"base": BASE_CONFIG,
"by_lang": BYLANG_CONFIG,
"test": TEST_CONFIG,
"test_slurm": TEST_CONFIG._replace(execution="slurm,partition=dev"),
"debug": TEST_CONFIG._replace(config_name="debug", mine_num_processes=0),
"reproduce": REPRODUCE_CONFIG,
"augment": BASE_CONFIG._replace(
config_name="augment", dump="2019-13", lang_blacklist=["en"]
),
}
def tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def finalize(tmp_output: Path, output: Path) -> None:
if not tmp_output.exists():
warnings.warn(f"Targeted tmp output {tmp_output} doesn't exists.")
return
tmp_index = tmp_output.parent / (tmp_output.name + ".index")
tmp_output.rename(output)
if tmp_index.exists():
tmp_index.rename(output.parent / (output.name + ".index"))
def _transpose(iterable: Sequence[Tuple[Any, ...]], n=-1) -> Tuple[List, ...]:
if n < 0:
n = len(iterable[0])
columns: tuple = tuple([] for _ in range(n))
for row in iterable:
assert len(row) == n, f"Found tuple of len({len(row)}, expected {n}: {row}"
for i in range(n):
columns[i].append(row[i])
return columns
def hashes(conf: Config) -> List[Path]:
"""Computes hashes for each shard."""
hashes_dir = conf.output_dir / "hashes" / conf.dump
outputs = [hashes_dir / f"{shard:04d}.bin" for shard in range(conf.num_shards)]
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if not missing_outputs:
return outputs
hashes_dir.mkdir(parents=True, exist_ok=True)
# With FlatHashSet we need ~2Gb of RAM / shard, but we need to account for
# overhead due to how the dynamic allocation works.
ex = conf.get_executor(f"hashes_{conf.dump}", mem_gb=4, timeout_hour=6, cpus=2)
ex(_hashes_shard, repeat(conf), *_transpose(missing_outputs))
# Wait a bit so that files appears on the disk.
time.sleep(20)
assert all(o.exists() for o in outputs)
return outputs
def _hashes_shard(conf: Config, shard: int, output: Path):
tmp_output = tmp(output)
jsonql.run_pipes(
dedup.HashesCollector(field="raw_content", output=tmp_output),
inputs=conf.get_cc_shard(shard),
)
finalize(tmp_output, output)
return f"Hashed {output}"
HASHES_IN_MEM = [0, 1, 2, 5, 10, 20, 50, 100, 200, 400]
def mine(conf: Config) -> List[Path]:
"""Remove dups, run LID and LMs, and split by lang and quality."""
mined_dir = conf.get_mined_dir()
if conf.min_shard == -1:
shard_range = list(range(conf.num_shards))
else:
shard_range = list(range(conf.min_shard, conf.num_shards))
if conf.will_split:
# Give a directories when splitting
outputs = [mined_dir / f"{shard:04d}" for shard in shard_range]
else:
# Files otherwise
outputs = [
mined_dir / f"{shard:04d}.json.gz" for shard in shard_range
]
if "mini_again" in conf.experiments:
mined_dir = conf.output_dir / "mini_again" / conf.dump
outputs = [mined_dir / f"{shard:04d}" for shard in shard_range]
# TODO: try to reduce this / make it a function of "hash_in_mem" / num_langs
mem_gb = 60 + 1 * conf.hash_in_mem
timeout_hour = 5
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
outputs = [
conf.output_dir / f"hashes_exp/{conf.dump}_0000_dedup{h:03d}.json.gz"
for h in HASHES_IN_MEM
]
mem_gb = int(max(HASHES_IN_MEM) * 1.2)
timeout_hour = 8
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if "mini_again" in conf.experiments:
missing_outputs = [
(shard, o)
for shard, o in enumerate(outputs)
if shard in [5, 139] and not o.exists()
]
if not missing_outputs:
return outputs
mined_dir.mkdir(parents=True, exist_ok=True)
ex = conf.get_executor(
f"mine_{conf.dump}",
mem_gb=mem_gb,
timeout_hour=timeout_hour,
cpus=conf.mine_num_processes + 1,
)
# Compute hashes firsts.
if "dedup" in conf.pipeline:
hashes_groups = list(jsonql.grouper(hashes(conf), conf.hash_in_mem))
hashes_files: Iterable[List[Path]] = [
hashes_groups[shard // conf.hash_in_mem] for shard, o in missing_outputs
]
else:
hashes_files = repeat([])
ex(_mine_shard, repeat(conf), hashes_files, *_transpose(missing_outputs))
assert all(o.exists() for o in outputs)
return outputs
def _get_segment(tmp_output: Path, doc: dict) -> str:
segment: str = doc["cc_segment"].split("/")[-1]
return str(tmp_output / segment.replace(".warc.wet.gz", ".json.gz"))
def _mine_shard(conf: Config, hashes: List[Path], shard: int, output: Path) -> str:
print(conf.pipeline)
assert conf.pipeline
tmp_output = tmp(output)
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
hashes_in_mem = shard
hashes = hashes[: HASHES_IN_MEM[hashes_in_mem]]
shard = 0
cc_shard = conf.get_cc_shard(shard)
steps: Dict[str, Optional[jsonql.Transformer]] = {}
lang_id = Path("bin") / "lid.bin"
steps["lid_before_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_before_dedup", top=5
)
steps["dedup"] = dedup.DuplicatesRemover(field="raw_content", hashes_files=hashes)
steps["lid"] = split_by_lang.Classifier(
model=lang_id,
field="raw_content",
out_field="language",
top=1,
threshold=conf.lang_threshold,
)
steps["lid_after_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_after_dedup", top=5
)
if conf.lang_blacklist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") not in set(conf.lang_blacklist)]
)
elif conf.lang_whitelist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") in set(conf.lang_whitelist)]
)
else:
steps["keep_lang"] = None
tok_field = "tokenized"
steps["sp"] = perplexity.MultiSentencePiece(
{l: conf.lm_dir / f"{l}.sp.model" for l in conf.get_lm_languages()},
field="raw_content",
output_field=tok_field,
normalize=True,
)
steps["lm"] = perplexity.DocLM(
{l: conf.lm_dir / f"{l}.arpa.bin" for l in conf.get_lm_languages()},
field=tok_field,
output_field="perplexity",
normalize=False, # Normalization is done before SentencePiece
# load_method=kenlm.LoadMethod.PARALLEL_READ,
)
steps["pp_bucket"] = perplexity.PerplexityBucket(CUTOFF_CSV)
steps["drop"] = perplexity.DropKeys(tok_field)
steps["keep_bucket"] = None
if conf.keep_bucket:
steps["keep_bucket"] = jsonql.where(
[lambda doc: doc.get("bucket", "all") in conf.keep_bucket]
)
if "fetch_metadata" in conf.pipeline:
# TODO: better default
assert conf.metadata is not None
steps["fetch_metadata"] = minify.MetadataFetcher(
f"{conf.metadata}/{conf.dump}/"
)
steps["minify"] = minify.Minifier()
pattern = str(tmp_output / "{language}_{bucket}.json.gz")
steps["split_by_lang"] = jsonql.split(pattern=str(pattern), mkdir=True)
steps["split_by_segment"] = jsonql.split(
split_fn=lambda doc: _get_segment(tmp_output, doc), mkdir=True
)
pipeline = filter(None, (steps[s] for s in conf.pipeline))
jsonql.run_pipes(
*pipeline,
inputs=cc_shard,
processes=conf.mine_num_processes,
chunksize=100,
# The splitter takes care of writing to files.
output=tmp_output if not conf.will_split else None,
)
finalize(tmp_output, output)
return f"Mined {output}"
def regroup(conf: Config, all_dirs: List[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs
all_files = [f for d in all_dirs for f in d.glob("*.json.gz")]
if not all_files:
print(f"No .json.gz file found in {all_dirs[0]}")
splits: Dict[str, List[Path]] = defaultdict(list)
for f in all_files:
split = f.name.split(".")[0]
splits[split].append(f)
print(f"Identified {len(all_files)} files to regroup from {len(splits)} splits.")
inputs: List[List[Path]] = []
outputs: List[Path] = []
target_size = jsonql.parse_size(conf.target_size)
for split, files in splits.items():
cuts = list(regroup_module.determine_groups(files, target_size=target_size))
if not cuts:
continue
pattern = f"{split}_????.json.gz"
existing_outputs = sorted(regroup_dir.glob(pattern))
if not conf.cleanup_after_regroup:
# We still have all the inputs so it is safe to overwrite existing outputs.
assert len(existing_outputs) <= len(cuts)
existing_outputs = []
if len(existing_outputs) > 0 and len(cuts) == 1:
# append to existing file if size allows it.
new_size = (
sum(f.stat().st_size for f in cuts[0])
+ existing_outputs[-1].stat().st_size
)
if new_size < target_size:
print(f"Will append {cuts[0]} to {existing_outputs[-1]}")
cuts[0].insert(0, existing_outputs.pop(-1))
n_existing = len(existing_outputs)
for i, cut in enumerate(cuts):
# avoid overwriting existing files.
j = i + n_existing
output = regroup_dir / f"{split}_{j:04}.json.gz"
inputs.append(cut)
outputs.append(output)
print(
str(regroup_dir / pattern),
"->",
len(cuts),
f"shards ({n_existing} already there).",
)
ex = conf.get_executor(f"regroup_{conf.dump}", mem_gb=1, timeout_hour=12, cpus=2)
ex(_regroup, repeat(conf), inputs, outputs)
return regroup_dir
def _regroup(conf: Config, inputs: List[Path], output: Path) -> str:
output.parent.mkdir(parents=True, exist_ok=True)
regroup_module.fast_reshard(
inputs, output, tmp=tmp(output), rm_original=conf.cleanup_after_regroup
)
return f"Regrouped {output}"
def move_segments(conf: Config, all_dirs: Sequence[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
# check that mining is over.
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs, "Received no dirs to move"
assert all(
d.is_dir() for d in all_dirs
), f"move_segments was expecting dirs received files: {all_dirs[:10]}..."
regroup_dir.parent.mkdir(exist_ok=True)
regroup_dir.mkdir(exist_ok=True)
ex = conf.get_executor(f"moveseg_{conf.dump}", mem_gb=1, timeout_hour=1, cpus=2)
def _move_segments(subdir: Path, regroup_dir: Path) -> str:
n = 0
for f in subdir.iterdir():
if not f.is_file() or f.is_symlink():
continue
n += f.name.endswith(".json.gz")
new_name = regroup_dir / f.name
target = new_name.resolve()
assert f.resolve() != target
# this make the job idempotent.
f.rename(new_name)
f.symlink_to(target)
if n == 0:
return ""
return f"Moved {n} .json.gz files from {subdir} to {regroup_dir}"
ex(_move_segments, all_dirs, repeat(regroup_dir))
print(f"Results are in {regroup_dir}")
return regroup_dir
def _validate_test(conf: Config, output_dir: Path, generate: bool = False):
stats: Dict[str, dict] = {}
for file in sorted(output_dir.glob("*.json.gz")):
fname = "/".join((file.parent.name, file.name))
# The order of documents is not guaranteed inside a shard,
lines = sorted(jsonql.open_read(file))
content = "\n".join(lines)
size = len(content)
checksum = hashlib.sha1(bytes(content, encoding="utf-8")).hexdigest()
# first_document = json.loads(lines[0])
stats[fname] = {"size": size, "checksum": checksum}
def dump(x):
return json.dumps(x, indent=2, ensure_ascii=False)
print("*** Stats ***")
stats_raw = dump(stats)
stats_file = FILE_DIR / "data" / "test_stats.json"
if generate:
print("Saving stats to", stats_file)
stats_file.write_text(stats_raw)
return
expected_stats: Dict[str, dict] = {}
if stats_file.exists():
expected_stats = json.loads(stats_file.read_text())
if expected_stats == stats:
print("Everything looks good !")
return
stats_file.with_suffix(".actual.json").write_text(stats_raw)
print("*** Expected Stats ***")
print(dump(expected_stats))
print("*** Diff ***")
for fname in sorted(expected_stats.keys()):
print(fname)
assert fname in expected_stats, "missing file " + fname
if expected_stats[fname]["size"] != stats[fname]["size"]:
print(
" - Expected size",
expected_stats[fname]["size"],
", size",
stats[fname]["size"],
)
if expected_stats[fname]["checksum"] != stats[fname]["checksum"]:
print(
" - Expected checksum",
expected_stats[fname]["checksum"],
", checksum",
stats[fname]["checksum"],
)
def get_main_parser() -> ArgumentParser:
# Generates the 'main' parser by patching a 'Config' parser
p = func_argparse.func_argparser(Config)
# Override defaults value to None, so we know what was set by the user.
# Note that it will keep the original default values in the help message.
p.set_defaults(**{f: None for f in Config._fields})
p.add_argument("--config", type=str, default="base")
p.set_defaults(__command=main)
return p
def main(config: str = "base", **config_as_dict: Any) -> None:
# Use the given 'config' as default value.
config_base = config
if config_base in PREDEF_CONFIGS:
conf = PREDEF_CONFIGS[config_base]
elif Path(config_base).exists():
conf = Config.from_json(Path(config_base))
else:
raise ValueError(
f"Invalid value {config_base} for --config. "
f"Choose from ({', '.join(PREDEF_CONFIGS)}) or give an existing .json file."
)
conf = conf._replace(**{k: v for (k, v) in config_as_dict.items() if v is not None})
print(f"Will run cc_net.mine.main with the following config:", conf)
all_files = mine(conf)
if conf.will_split:
assert all_files
assert all(d.is_dir() for d in all_files)
all_dirs = all_files
if "split_by_lang" in conf.pipeline:
# Only try regrouping if we split the shards.
regroup(conf, all_dirs)
elif "split_by_segment" in conf.pipeline:
# If we split by segment then regrouping is trivial, since segments appear in only one shard.
move_segments(conf, all_dirs)
if conf.config_name == "test":
_validate_test(conf, conf.get_mined_dir(regroup=True))
if __name__ == "__main__":
func_argparse.parse_and_call(get_main_parser())
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/__init__.py | code/data_collection/cc/cc_net/cc_net/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/text_normalizer.py | code/data_collection/cc/cc_net/cc_net/text_normalizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import unicodedata
UNICODE_PUNCT = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
UNICODE_PUNCT_RE = re.compile(f"[{''.join(UNICODE_PUNCT.keys())}]")
def replace_unicode_punct(text: str) -> str:
return "".join((UNICODE_PUNCT.get(c, c) for c in text))
def remove_unicode_punct(text: str) -> str:
"""More aggressive version of replace_unicode_punct but also faster."""
return UNICODE_PUNCT_RE.sub("", text)
def strip_accents(line: str) -> str:
"""Strips accents from a piece of text."""
nfd = unicodedata.normalize("NFD", line)
output = [c for c in nfd if unicodedata.category(c) != "Mn"]
if len(output) == line:
return line
return "".join(output)
# Build a regex matching all control characters.
NON_PRINTING_CHARS_RE = re.compile(
f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]"
)
DIGIT_RE = re.compile(r"\d")
PUNCT_OR_NON_PRINTING_CHARS_RE = re.compile(
(UNICODE_PUNCT_RE.pattern + NON_PRINTING_CHARS_RE.pattern).replace("][", "")
)
def remove_non_printing_char(text: str) -> str:
return NON_PRINTING_CHARS_RE.sub("", text)
def normalize_spacing_for_tok(text: str, language: str = "en") -> str:
res = (
text.replace("\r", "")
# remove extra spaces
.replace("(", " (")
.replace(")", ") ")
.replace(" +", " ")
)
res = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)\1", res)
res = res.replace("( ", "(").replace(" )", ")")
res = re.sub(r"(\d) \%", r"\1\%", res)
res = res.replace(" :", ":").replace(" ;", ";")
res = res.replace("`", "'").replace("''", ' " ')
res = (
res.replace("„", '"')
.replace("“", '"')
.replace("”", '"')
.replace("–", "-")
.replace("—", " - ")
.replace(" +", " ")
.replace("´", "'")
.replace("([a-z])‘([a-z])", r"\1'\2/")
.replace("([a-z])’([a-z])", r"\1'\2/")
.replace("‘", '"')
.replace("‚", '"')
.replace("’", '"')
.replace("''", '"')
.replace("´´", '"')
.replace("…", "...")
# French quotes
.replace(" « ", ' "')
.replace("« ", '"')
.replace("«", '"')
.replace(" » ", '" ')
.replace(" »", '"')
.replace("»", '"')
# handle pseudo-spaces
.replace(" %", "%")
.replace("nº ", "nº ")
.replace(" :", ":")
.replace(" ºC", " ºC")
.replace(" cm", " cm")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ;", ";")
.replace(", ", ", ")
.replace(" +", " ")
.replace(".", ". ")
)
# English "quotation," followed by comma, style
if language == "en":
res = re.sub(r"\"([,\.]+)", r"\1\"", res)
# Czech is confused
elif language == "cs" or language == "cz":
pass
# German/Spanish/French "quotation", followed by comma, style
else:
res = res.replace(',"', '",')
res = re.sub(
r"(\.+)\"(\s*[^<])", r"\"\1\2", res
) # don't fix period at end of sentence
if (
language == "de"
or language == "es"
or language == "cz"
or language == "cs"
or language == "fr"
):
res = re.sub(r"(\d) (\d)", r"\1,\2", res)
else:
res = re.sub(r"(\d) (\d)", r"\1.\2", res)
return res
def normalize(line: str, accent=True, case=True, numbers=True, punct=1) -> str:
line = line.strip()
if not line:
return line
if case:
line = line.lower()
if accent:
line = strip_accents(line)
if numbers:
line = DIGIT_RE.sub("0", line)
if punct == 1:
line = replace_unicode_punct(line)
elif punct == 2:
line = remove_unicode_punct(line)
line = remove_non_printing_char(line)
return line
def slow_normalize_for_dedup(line: str) -> str:
return normalize(line, accent=False, case=True, numbers=True, punct=2)
def normalize_for_dedup(line: str) -> str:
line = line.strip()
if not line:
return line
# case
line = line.lower()
# numbers
line = DIGIT_RE.sub("0", line)
line = PUNCT_OR_NON_PRINTING_CHARS_RE.sub("", line)
return line
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/execution.py | code/data_collection/cc/cc_net/cc_net/execution.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import functools
import itertools
import logging
import os
import sys
import time
import warnings
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized
import submitit
from typing_extensions import Protocol
class Executor(Protocol):
def __call__(self, function: Callable[..., str], *args: Iterable) -> None:
...
class SubmititRetryOnTimeout(submitit.helpers.Checkpointable):
def __init__(self, fn: Callable):
self.fn = fn
self.__name__ = fn.__name__
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def get_executor(
name: str,
log_dir: Path,
execution: str,
timeout_hour: float = 1.0,
mem_gb: int = 1,
cpus: int = 1,
task_parallelism: int = -1,
options: dict = {},
) -> Executor:
execution_mode = execution.split(",")[0]
options.update(
{kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in
execution.split(",")[1:]}
)
if execution_mode == "mp":
warnings.warn("Execution mode 'mp' is deprecated, use 'local'.")
execution_mode = "local"
cluster = None if execution_mode == "auto" else execution_mode
# use submitit to detect which executor is available
ex = submitit.AutoExecutor(log_dir, cluster=cluster)
if task_parallelism == -1: # we are on slurm
ex.parameters['slurm_time'] = int(timeout_hour * 60)
else:
ex.parameters['timeout_min'] = int(timeout_hour * 60)
if ex.cluster == "local":
ex.parameters['timeout_min'] = int(timeout_hour * 60)
# LocalExecutor doesn't respect task_parallelism
return functools.partial(custom_map_array, ex, task_parallelism)
if ex.cluster == "debug":
ex.parameters['timeout_min'] = int(timeout_hour * 60)
return debug_executor
# We are on slurm
if task_parallelism == -1:
task_parallelism = 500
ex.update_parameters(
name=name,
slurm_time=int(timeout_hour * 60),
slurm_mem_per_cpu=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
else:
ex.update_parameters(
name=name,
timeout_min=int(timeout_hour * 60),
mem_gb=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
return functools.partial(map_array_and_wait, ex)
def map_array_and_wait(
ex: submitit.AutoExecutor, function: Callable[..., str],
*args: Iterable
):
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
approx_length = _approx_length(*args)
print(f"Submitting {f_name} in a job array ({approx_length} jobs)")
jobs = ex.map_array(function, *args)
if not jobs:
return
failed_jobs = []
done = 0
total = len(jobs)
job_array_id = jobs[0].job_id.split("_")[0]
print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).")
for job in submitit.helpers.as_completed(jobs):
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).",
job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def debug_executor(function: Callable[..., Optional[str]],
*args: Iterable) -> None:
logging.getLogger().setLevel(logging.DEBUG)
approx_length = _approx_length(*args)
for i, x in enumerate(zip(*args)):
try:
message = function(*x)
except Exception:
try:
import ipdb as pdb # type: ignore
except ImportError:
import pdb # type: ignore
import traceback
traceback.print_exc()
print("")
pdb.post_mortem()
sys.exit(1)
if message is not None:
print(message, f"({i + 1} / {approx_length})")
def _approx_length(*args: Iterable):
for a in args:
if isinstance(a, Sized):
return len(a)
return -1
def custom_map_array(
ex: submitit.AutoExecutor,
parallelism: int,
function: Callable[..., Optional[str]],
*args: Iterable,
) -> None:
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
jobs_args = list(zip(*args))
total = len(jobs_args)
if parallelism < 0:
parallelism = os.cpu_count() or 0
assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}"
print(
f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}")
enqueued = 0
done = 0
running_jobs: List[submitit.Job] = []
failed_jobs: List[submitit.Job] = []
while done < len(jobs_args):
# Try to queue more job if we have some bandwidth.
if enqueued < total and len(running_jobs) < parallelism:
running_jobs.append(ex.submit(function, *jobs_args[enqueued]))
enqueued += 1
continue
# Else wait for some job to finish
if not running_jobs:
warnings.warn(
f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}"
)
break
job = get_next_job(running_jobs)
running_jobs.remove(job)
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).",
job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def get_next_job(
jobs: Sequence[submitit.Job], poll_frequency: float = 10
) -> submitit.Job:
"""
Waits for any of the job to finish and returns it.
jobs: list of jobs
poll_frequency: frequency in second at which we check job status
"""
start = time.time()
waiting = False
while True:
for job in jobs:
if job.done():
return job
if not waiting:
job_ids = [j.job_id for j in jobs[:4]]
suffix = "..." if len(jobs) > 4 else ""
print(
f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}"
)
waiting = True
time.sleep(poll_frequency)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/tokenizer.py | code/data_collection/cc/cc_net/cc_net/tokenizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/perplexity.py | code/data_collection/cc/cc_net/cc_net/perplexity.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Tuple, Union
import kenlm # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
import sentencepiece # type: ignore
from cc_net import jsonql, text_normalizer
LMDescriptor = Union[Dict[str, Path], Union[Path, str]]
def get_args():
parser = argparse.ArgumentParser(
description="Compute the score of each sentences of a document",
parents=[jsonql.io_parser()],
)
parser.add_argument("--models", type=str)
parser.add_argument("--sentences", action="store_true", default=False)
parser.add_argument(
"--languages", type=str, help="Ignore doc with another language"
)
parser.add_argument("--field", type=str, default=None)
parser.add_argument("--newline", type=str, default="\n")
return vars(parser.parse_args())
def pp(log_score, length):
return 10.0 ** (-log_score / length)
class SentencePiece(jsonql.Transformer):
# Sentence Pieces model have to be read back from disk.
warning_when_pickling = True
def __init__(
self,
model: Path,
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.model = model
self.field = field
self.output_field = output_field
self.normalize = normalize
self.sp: sentencepiece.SentencePieceProcessor = None
def _prepare(self):
if self.sp is not None:
return
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.load(str(self.model))
return self
def do(self, document: dict) -> dict:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
tokenized = self.sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class MultiSentencePiece(jsonql.Transformer):
warning_when_pickling = True
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.field = field
self.output_field = output_field
self.normalize = normalize
self._prefetch: Sequence[str] = []
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.sp: Dict[str, sentencepiece.SentencePieceProcessor] = {}
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_sp(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_sp(self, lang) -> Optional[sentencepiece.SentencePieceProcessor]:
sp = self.sp.get(lang)
if sp is not None:
return sp
if lang not in self.models:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
sp = sentencepiece.SentencePieceProcessor()
sp.load(str(self.models[lang]))
self.sp[lang] = sp
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return sp
def do(self, document: dict) -> Optional[dict]:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
sp = self.get_sp(document.get("language"))
if sp is None:
return document
tokenized = sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class DocLM(jsonql.Transformer):
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "perplexity",
newline: str = "\n",
normalize: bool = True,
load_method: int = 2,
):
super().__init__()
self.field = field
self.output_field = output_field
self.newline = newline
self.normalize = normalize
self._prefetch: Sequence[str] = []
self.lm_config = kenlm.Config()
# This is the default settings
# POPULATE will mmap the models and populate the pages.
# Maybe that's not the best way when the models are on a network disk.
# TODO: try copying models file, try READ or PARALLEL_READ
self.lm_config.load_method = load_method
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.lm: Dict[str, kenlm.Model] = {}
self.n_lines = 0
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_lm(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_lines(self, document: dict) -> List[str]:
lang = document.get("language")
if not lang:
return []
if lang not in self.models:
return []
content = document.get(self.field)
if not content:
return []
lines = content.split(self.newline)
self.n_lines += len(lines)
return lines
def get_lm(self, lang: Optional[str]) -> Optional[kenlm.Model]:
if lang is None:
return None
lm = self.lm.get(lang)
if lm is not None:
return lm
model = self.models.get(lang)
if model is None:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
lm = kenlm.Model(str(model), self.lm_config)
self.lm[lang] = lm
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return lm
def do(self, document: dict) -> dict:
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return document
doc_log_score, doc_length = 0, 0
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
document[self.output_field] = round(pp(doc_log_score, doc_length), 1)
return document
def summary(self):
delay = time.time() - self.start_time
h = delay / 3600
s = self.n_lines / delay
summ = super().summary()
summ.append(f"Processed {self.n_lines:_} lines in {h:.2}h ({s:.1} lines/s).")
return summ
class SentencesLM(DocLM):
"""Returns the score of each individual paragraph."""
def do(self, document: dict) -> Optional[str]: # type: ignore
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return None
sentences = []
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
sentences.append(f"{pp(log_score, length)}\t{line}")
return "\n".join(sentences)
class PerplexityBucket(jsonql.Transformer):
def __init__(
self, cutoff_csv: Path, percentile_head: int = 30, percentile_tail: int = 60
):
super().__init__()
self.cutoff_csv = cutoff_csv
self.percentile_head = percentile_head
self.percentile_tail = percentile_tail
self.cutoffs: Dict[str, Tuple[float, float]] = {}
def _prepare(self) -> None:
cutoffs = pd.read_csv(self.cutoff_csv, index_col=0)
self.cutoffs = {
l: (cutoffs[l][self.percentile_head], cutoffs[l][self.percentile_tail])
for l in cutoffs.columns
}
def get_bucket(self, doc: dict) -> str:
perplexity = doc.get("perplexity", -1)
lang = doc.get("language")
if lang not in self.cutoffs or perplexity < 0:
return "all"
pp_head, pp_tail = self.cutoffs[lang]
if perplexity < pp_head:
return "head"
if perplexity < pp_tail:
return "middle"
return "tail"
def do(self, doc: dict) -> dict:
doc["bucket"] = self.get_bucket(doc)
return doc
class DropKeys(jsonql.Transformer):
def __init__(self, *keys):
super().__init__()
self.keys = keys
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
for key in self.keys:
document.pop(key, None)
return document
class RemoveSmall(jsonql.Transformer):
def __init__(self, field, min_len):
super().__init__()
self.field = field
self.min_len = min_len
self.removed = 0
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
content = document.get(self.field)
if not content or len(content) < self.min_len:
self.removed += 1
return None
return document
def summary(self):
r, n = self.removed, self.processed
ratio = r / n if n else 0
return [f"Removed {r} small documents out of {n} ({ratio:.1%})"]
def perplexity_to_bin(file: Path, output: Path, models, tok_field: str):
pp_field = "perplexity"
lm = DocLM(models, tok_field, output_field=pp_field)
stats: List[float] = []
max_stats = 1_000_000
batch_size = 100_000
i = 0
batch = []
with open(output, "wb") as o:
for doc in jsonql.read_jsons(file):
i += 1
pp = lm(doc)[pp_field]
if len(stats) < max_stats:
stats.append(pp)
batch.append(pp)
if len(batch) >= batch_size:
np.array(batch, dtype=np.float32).tofile(o)
batch = []
if len(batch) > 0:
np.array(batch, dtype=np.float32).tofile(o)
if __name__ == "__main__":
args = get_args()
output = Path(args["output"])
if output.suffix == ".bin":
perplexity_to_bin(args["file"], output, args["models"], args["field"])
else:
jsonql.run_pipe(DocLM, args)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/flat_hash_set.py | code/data_collection/cc/cc_net/cc_net/flat_hash_set.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import time
import warnings
from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type
import numpy as np
HASH_TYPE: Type[np.uint64] = np.uint64
GETPY_WARNING = False
class AbstractDedupHashSet(Sized, Iterable[np.uint64]):
"""A dict-like that returns `True` for keys that have been added more than once.
The API is batched and expect np.array as input. This batching grants better
perf when using the C++ implementation.
"""
dtype: Type[np.uint64] = HASH_TYPE
def __repr__(self):
implementation = type(self).__name__
return f"[{implementation}, len: {len(self)}"
def __len__(self) -> int:
...
def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:
...
def __getitem__(self, values) -> np.ndarray:
...
def __setitem__(self, keys, values) -> None:
...
def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:
...
def keys(self) -> Iterable[np.uint64]:
...
def __iter__(self) -> Iterator[np.uint64]:
return iter(self.keys())
def add(self, h, contains=None):
"""Add the given keys. First time a key is added the value is set to 0,
then it's set to one."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
if contains is None:
contains = self.__contains__(h)
self.__setitem__(h, contains)
return contains
def merge(self, keys, values):
contains = self.__contains__(keys)
self.__setitem__(keys, contains | values)
def dump(self, filename):
return self.dump_np(filename)
def load(self, filename):
return self.load_np(filename)
def dump_np(self, filename):
kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)])
items = np.fromiter(self.items(), dtype=kv_type, count=len(self))
with open(filename, "wb") as f:
np.save(f, items)
def load_np(self, filename):
items = np.load(str(filename))
keys = items["k"].copy()
values = items["v"].copy()
self.merge(keys, values)
def dump_np2(self, filename):
keys = np.fromiter(
(k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)
)
with open(filename, "wb") as f:
np.save(f, keys)
values = np.fromiter(
(v for (k, v) in self.items()), dtype=np.uint8, count=len(self)
)
with open(str(filename) + ".val", "wb") as f:
np.save(f, values)
def load_np2(self, filename):
keys = np.load(filename)
values = np.load(str(filename) + ".val")
self.merge(keys, values)
class NaiveHashSet(dict, AbstractDedupHashSet):
"""Pure python implementation of AbstractDedupHashSet.
This implementation is quite fast, since Python dict are heavily optimized.
"""
def __init__(self, iterable=None):
super().__init__()
global GETPY_WARNING
if GETPY_WARNING:
warnings.warn(
"Module 'getpy' not found. Deduplication will take more RAM."
" Try `pip install cc_net[getpy]"
)
GETPY_WARNING = False
def __contains__(self, values):
"""Returns `True` if the object has been added at list once."""
contains_point = super().__contains__
return np.fromiter(
map(contains_point, values), count=len(values), dtype=np.uint8
)
def __getitem__(self, values):
"""Returns `True` if the object has been added at list twice."""
get_point = super().get
return np.fromiter(
map(lambda x: get_point(x, False), values),
count=len(values),
dtype=np.uint8,
)
def __setitem__(self, keys, values):
assert len(keys) == len(values)
for k, v in zip(keys, values):
dict.__setitem__(self, k, v)
try:
import getpy as gp # type: ignore
class _FlatHashSet(gp.Dict, AbstractDedupHashSet):
"""C++ backed implementation of AbstractDedupHashSet.
This implementation is slightly slower than the Python one but uses
3x less RAM.
See https://github.com/atom-moyer/getpy.
"""
def __init__(self):
super().__init__(HASH_TYPE, np.uint8, default_value=False)
def __contains__(self, h):
"""Returns `True` if the object has been added at list once."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
c = gp.Dict.__contains__(self, h)
c.dtype = np.uint8
return c
def dump(self, filename):
return self.dump_gp(filename)
def load(self, filename):
return self.load_gp(filename)
def dump_gp(self, filename):
return gp.Dict.dump(self, str(filename))
def load_gp(self, filename):
"""Override gp.Dict.load, to correctly merge values instead of overwriting."""
other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)
other.load(str(filename))
n = len(other)
keys = np.fromiter(
(k for (k, v) in other.items()), dtype=HASH_TYPE, count=n
)
values = np.fromiter(
(v for (k, v) in other.items()), dtype=np.uint8, count=n
)
self.merge(keys, values)
FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet
except ImportError:
GETPY_WARNING = True
FlatHashSet = NaiveHashSet
def timeit(message, function, *args):
start = time.time()
function(*args)
end = time.time()
print(message, f"took {end - start:.0f}s")
def compare_load(*filenames):
assert filenames, "No file given"
def load_list():
hashes = []
for f in filenames:
h = FlatHashSet()
h.load(f)
print(f"Loaded {h} from {f}.")
hashes.append(h)
return hashes
def load_all(load, ext):
hashes = FlatHashSet()
for f in filenames:
load(hashes, f + ext)
def dump_all(hashes, dump, ext):
for h, f in zip(hashes, filenames):
dump(h, f + ext)
hashes = load_list()
dump_gp = getattr(FlatHashSet, "dump_gp")
if dump_gp is not None:
timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test")
timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test")
timeit(
"Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test"
)
load_gp = getattr(FlatHashSet, "load_gp")
if load_gp is not None:
timeit("Loading using gp.load", load_all, load_gp, ".gp.test")
timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test")
timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test")
# Loading 10 shards:
# [dedup] Dumping using gp.dump took 52s
# [dedup] Dumping using dump_np took 270s
# [dedup] Dumping using dump_np2 took 483s
#
# [dedup] Loading using gp.load took 654s
# [dedup] Loading using load_np took 82s
# [dedup] Loading using load_np2 took 76s
if __name__ == "__main__":
compare_load(*sys.argv[1:])
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/tools/make_dmoz_corpus.py | code/data_collection/cc/cc_net/cc_net/tools/make_dmoz_corpus.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This code is used to train a fastText classifier to label document with DMOZ categories.
The data, distributed under the cc-by 3.0 license
(https://web.archive.org/web/20140605215533/http://www.dmoz.org/license.html),
can be downloaded from
https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz.
"""
import urllib.request
from io import StringIO
from pathlib import Path
from typing import Dict, Set
from urllib.parse import urlparse
import func_argparse
from lxml import etree # type: ignore
from cc_net import jsonql
TaggedUrls = Dict[str, Set[str]]
DMOZ_TAGS_URL = "https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz"
def add_tags(url: str, tags: Set[str], url2tags: TaggedUrls):
if url in url2tags:
url2tags[url] &= tags
else:
url2tags[url] = tags
def load_tags(filename: Path = None) -> TaggedUrls:
if filename is None:
with StringIO("".join(jsonql.open_remote_file(DMOZ_TAGS_URL))) as dmoz:
tree = etree.parse(dmoz)
else:
tree = etree.parse(str(filename))
root = tree.getroot()
url2tags: Dict[str, Set[str]] = {}
for external_page in root.iterfind("{http://dmoz.org/rdf/}ExternalPage"):
url = external_page.get("about")
domain = urlparse(url).netloc
for topic in external_page.iterfind("{http://dmoz.org/rdf/}topic"):
# print(url, topic.text)
# Tags looks like Top/Arts/Animation/Anime/Collectibles
tags = set(topic.text.split("/")[1:])
add_tags(url, tags, url2tags)
add_tags(domain, tags, url2tags)
return url2tags
def dl(output: Path) -> None:
urllib.request.urlretrieve(DMOZ_TAGS_URL, output)
def make_corpus(file: Path, tags_file: Path = None, output: Path = None) -> None:
"""
Loads a tags file and create a training dataset using the given webpages.
Arguments:
- file: CC shard file
- tags_file: dmoz tagging file, (like the one produced by `dl`)
- output: ""
"""
url2tags = load_tags(tags_file)
with jsonql.open_write(output) as o:
for document in jsonql.read_jsons(file):
if not document:
continue
url = document["url"]
domain = document["source_domain"]
if url in url2tags:
tags = url2tags[url]
elif domain in url2tags:
tags = url2tags[domain]
else:
continue
if len(tags) == 0:
continue
fasttext_tags = ["__label__" + tag for tag in tags]
content = document["tokenized"].replace("\n", " ").lower()
if len(content) > 200:
print(" ".join(fasttext_tags), content, file=o) # type: ignore
if __name__ == "__main__":
func_argparse.single_main(make_corpus)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/tools/expand_corpus.py | code/data_collection/cc/cc_net/cc_net/tools/expand_corpus.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to search sentences in CC similar to sentences in another corpus.
"""
import functools
import logging
import math
import subprocess
from collections import Counter
from pathlib import Path
from typing import Iterable, List, Optional, Set, Tuple
import func_argparse
import submitit
from kenlm import Model as KenlmModel # type: ignore
from sentence_splitter import SentenceSplitter # type: ignore
from sentencepiece import SentencePieceProcessor # type: ignore
from cc_net import dedup, jsonql, perplexity, text_normalizer
KENLM = Path("./bin/lmplz")
KENLM_BUILD = Path("./bin/build_binary")
VOCAB_SIZE = 2 ** 16 - 10
PROCESSES = 16
def normalize(corpus: Path, output_dir: Path) -> Path:
normalized = output_dir / (corpus.stem + ".normalized")
if normalized.exists():
return normalized
print("Will normalize", corpus, "to", normalized)
jsonql.run_pipes(
jsonql.Mapper(text_normalizer.normalize),
file=corpus,
output=normalized,
processes=PROCESSES,
)
return normalized
# TODO use classic files directory.
def sp_model(lang: str) -> Path:
return Path(f"/checkpoint/guw/cc_clean/lm_sp/{lang}.sp.model")
def _dataset(dataset: Optional[Path], lang: str) -> Path:
return (
dataset
or Path("/datasets01_101/common_crawl/020919") / f"{lang}_head_*.json.gz"
)
class SentencePiece(jsonql.Transformer):
def __init__(self, model: Path):
super().__init__()
self.model = model
self.sp: SentencePieceProcessor = None # type: ignore
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.model))
def do(self, line: str) -> str:
return " ".join(self.sp.encode_as_pieces(line))
class ExtractSentences(jsonql.Transformer):
def __init__(
self,
sp_model: Path,
lm_model: Path,
field: str = "raw_content",
threshold: float = float("+inf"),
):
super().__init__()
self.sp_model = sp_model
self.lm_model = lm_model
self.field = field
self.threshold = threshold
self.sp: SentencePieceProcessor = None
self.lm: KenlmModel = None
self.splitter: SentenceSplitter = None
self.hashes: Set[int] = set()
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.sp_model))
self.splitter = SentenceSplitter("en")
self.lm = KenlmModel(str(self.lm_model))
def do(self, document: dict) -> Optional[str]:
content: Optional[str] = document.get(self.field)
if not content:
return None
all_sentences = [
s for l in content.split("\n") if l for s in self.splitter.split(text=l)
]
unique_sentences = []
for s in all_sentences:
if not s:
continue
h = dedup.str_hash(s)
if h in self.hashes:
continue
self.hashes.add(h)
unique_sentences.append(s)
scores = []
for sentence in unique_sentences:
normalized = text_normalizer.normalize(sentence)
pieces = self.sp.encode_as_pieces(normalized)
log_score = self.lm.score(" ".join(pieces))
pp = -1
if len(pieces):
pp = perplexity.pp(log_score, len(pieces))
scores.append(pp)
res = filter(
lambda pp_s: self.threshold > pp_s[0] > 0, zip(scores, unique_sentences)
)
return "\n".join(f"{pp}\t{s}" for (pp, s) in res) or None
def tokenize(corpus: Path, output_dir: Path, lang: str) -> Path:
tokenized = output_dir / (corpus.stem + ".tokenized")
if tokenized.exists():
return tokenized
print("Will SentencePiece", corpus, "to", tokenized)
jsonql.run_pipes(
SentencePiece(sp_model(lang)),
file=normalize(corpus, output_dir),
output=tokenized,
processes=PROCESSES,
)
return tokenized
def train_lm(
corpus: Path,
output_dir: Path,
lang: str = "en",
vocab_size: int = VOCAB_SIZE,
ngrams: int = 5,
):
lm_text_file = output_dir / (corpus.stem + ".arpa")
lm_bin_file = output_dir / (corpus.stem + ".arpa.bin")
if lm_bin_file.exists():
return lm_bin_file
assert KENLM.exists(), f"{KENLM} binary to train kenlm model not found."
normalized = normalize(corpus, output_dir)
tokenized = tokenize(normalized, output_dir, lang)
print("Will train LM", lm_text_file, "on", tokenized)
kenlm_cmd = [
str(KENLM),
f"--order={ngrams}",
"--memory=8G",
f"--temp_prefix={jsonql._tmp_dir()}",
f"--text={tokenized}",
f"--arpa={lm_text_file}",
f"--vocab_estimate={vocab_size}",
"--discount_fallback",
]
subprocess.run(kenlm_cmd, check=True)
print("Will create binary model", lm_bin_file, "from", lm_text_file)
subprocess.run([str(KENLM_BUILD), str(lm_text_file), str(lm_bin_file)], check=True)
return lm_bin_file
def uniform_sampling_wrt_perplexity(
paragraphes: Iterable[str],
rounding: float = 100.0,
cut: float = 1000.0,
samples: int = 20,
) -> Iterable[str]:
max_samples = math.floor(cut / rounding * samples)
n = 0
buckets = Counter([0.0])
logging.info(f"Will sample {max_samples} sentences.")
for lines in paragraphes:
for line in lines.split("\n"):
if not line:
continue
pp = float(line[: line.find("\t")])
pp = math.floor(pp / rounding) * rounding
if pp > cut:
continue
if buckets[pp] > samples:
continue
yield line
buckets[pp] += 1
if buckets[pp] > samples:
logging.info(f"Bucket {pp} is full ({samples} samples, {n} total)")
n += 1
if n > max_samples:
return
def sample(
corpus: Path,
output_dir: Path,
dataset: Path = None,
n: int = 10_000,
lang: str = "en",
) -> Path:
sample_file = output_dir / (corpus.stem + ".pp_sample.tsv")
if sample_file.exists():
return sample_file
dataset = _dataset(dataset, lang)
extractor = ExtractSentences(
sp_model(lang), train_lm(corpus, output_dir), field="raw_content"
)
sampling = functools.partial(
uniform_sampling_wrt_perplexity, rounding=100.0, cut=1000.0, samples=n // 10
)
print(f"Will sample data from {dataset} to {sample_file}")
try:
jsonql.run_pipes(
extractor, sampling, file=dataset, output=sample_file, processes=PROCESSES
)
except Exception:
sample_file.unlink()
raise
subprocess.run(["sort", "-n", "-o", sample_file, sample_file], check=True)
subprocess.run(["head", sample_file], check=True)
return sample_file
def mine(
corpus: Path,
output_dir: Path,
threshold: float,
dataset: Path = None,
lang: str = "en",
) -> List[Path]:
"""Search sentences in CC similar to the one in the given corpus.
Args:
- corpus: corpus to train the LM one. Assumes one sentence per line.
- output_dir: where to store the results
- threshold: maximum perplexity to have
- dataset: glob pattern matching CC shards.
- lang: search in the files of this language
"""
dataset = _dataset(dataset, lang)
files = list(dataset.parent.glob(dataset.name))
outputs = [output_dir / (f.stem + ".tsv") for f in files]
if all(o.exists() for o in outputs):
return outputs
n = len(outputs)
sp = [sp_model(lang)] * n
lm = [train_lm(corpus, output_dir)] * n
thresholds = [threshold] * n
ex = submitit.AutoExecutor(output_dir / "mining_logs")
ex.update_parameters(
name="mine",
cpus_per_task=PROCESSES,
timeout_min=60 * 24 // PROCESSES,
mem_gb=10,
)
jobs = ex.map_array(_mine, files, outputs, sp, lm, thresholds)
print("Submited job array:", jobs[0])
for j in submitit.helpers.as_completed(jobs):
(i, o) = j.result()
print("Mined sentences from", i, "to", o)
return outputs
def _mine(
file: Path, output: Path, sp: Path, lm: Path, threshold: float
) -> Tuple[Path, Path]:
extractor = ExtractSentences(sp, lm, field="raw_content", threshold=threshold)
jsonql.run_pipes(extractor, file=file, output=output, processes=PROCESSES)
return (file, output)
if __name__ == "__main__":
func_argparse.main(sample, mine)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/tools/dl_cc_100.py | code/data_collection/cc/cc_net/cc_net/tools/dl_cc_100.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import gzip
import logging
import multiprocessing
from collections import defaultdict
from pathlib import Path
from typing import Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple
import cc_net
from cc_net import jsonql
from cc_net.process_wet_file import CCSegmentsReader
# Set this to a directory to use as cache for intermediary files.
# This helps for debugging.
WET_CACHE = None
# WET_CACHE = Path("wet_cache")
S3_BUCKET = "https://dl.fbaipublicfiles.com/cc100"
VERSION = "1.0.0"
CC_100_SNAPSHOTS = [
"2018-05",
"2018-09",
"2018-13",
"2018-17",
"2018-22",
"2018-26",
"2018-30",
"2018-34",
"2018-39",
"2018-43",
"2018-47",
"2018-51",
]
BIG_LANGUAGES = {
"es_XX",
"fr_XX",
"de_DE",
"ja_XX",
"ru_RU",
"zh_CN",
"en_XX",
"it_IT",
"ar_AR",
"nl_XX",
"pl_PL",
"pt_XX",
"tr_TR",
"zh_TW",
}
class Paragraph(NamedTuple):
lang: str
text: str
lm_score: float
def _dl_shard(snapshot: str, shard: int) -> Iterator[Paragraph]:
"""
Download metadata from a shards.
Sample metadata:
{
"cc_segment": "crawl-data/CC-MAIN-2018-51/segments/1544376823009.19/wet/CC-MAIN-20181209185547-20181209211547-00000.warc.wet.gz",
"digest": "sha1:222LWNHN5FM26XGS7WJSMI6IISTVWBKJ",
"url": "http://personals.gearplay.com/ads/DRJONES.htm",
"line_ids": [10],
"languages": ["en_XX"],
"lm_scores": [-2.658],
}
"""
snapshot = snapshot.replace("-", "_")
name = f"snap_{snapshot}_batch_{shard}.json.gz"
url = "/".join([S3_BUCKET, VERSION, name])
shard_metadata: Dict[str, Dict[str, dict]] = defaultdict(dict)
try:
cache_file: Optional[Path] = None
if WET_CACHE is not None:
cache_file = WET_CACHE / name
metadata_file = jsonql.open_remote_file(url, cache_file)
except:
logging.warning(f"Couldn't open {url}")
return
for meta in jsonql.read_jsons(metadata_file):
shard_metadata[meta["cc_segment"]][meta["digest"]] = meta
found_pars, missed_pars = 0, 0
for seg, segment_metadata in shard_metadata.items():
for doc in CCSegmentsReader([seg], cache_dir=WET_CACHE):
if doc["digest"] not in segment_metadata:
continue
meta = segment_metadata[doc["digest"]]
full_pars = [doc["title"]] + doc["raw_content"].split("\n")
assert len(meta["line_ids"]) == len(meta["languages"])
assert len(meta["line_ids"]) == len(meta["lm_scores"])
for i, lang, score in zip(
meta["line_ids"], meta["languages"], meta["lm_scores"]
):
if snapshot != "2018-51" and lang in BIG_LANGUAGES:
# Big languages only come from "2018-51" snapshot
continue
if i >= len(full_pars):
# This is because CC100 was created by saving only urls.
# Some urls appears in different snapshot with slightly different
# versions, but we don't know which one is correct.
# Here we read both versions, but some index may end up
# being incorrect.
# This impact ~3% documents.
missed_pars += 1
continue
yield Paragraph(lang, full_pars[i], score)
found_pars += 1
if missed_pars > 0:
logging.warning(
f"Missed {missed_pars} ({missed_pars / found_pars:%}) paragraphes."
)
def _split_by_par(
paragraphes: Iterator[Paragraph], snapshot: str, shard: int, outdir: Path
) -> int:
outdir.mkdir(exist_ok=True)
outfiles = {}
num_pars = 0
try:
for par in paragraphes:
# MODIFY ME: filter paragraph if needed (languages, score, ...)
if par.lang not in outfiles:
(outdir / par.lang).mkdir(exist_ok=True)
outfile = outdir / par.lang / f"snap_{snapshot}_batch_{shard}.gz"
outfiles[par.lang] = gzip.open(outfile, "wt")
print(par.text, file=outfiles[par.lang])
num_pars += 1
finally:
for o in outfiles.values():
o.close()
logging.info(f"Extracted {num_pars:_d} paragraphs from shard {snapshot}_{shard}")
return num_pars
def dl_shard(snapshot: str, shard: int, outdir: Path) -> int:
return _split_by_par(_dl_shard(snapshot, shard), snapshot, shard, outdir)
@contextlib.contextmanager
def unordered_map(processes: int):
if processes == 0:
yield map
return
with multiprocessing.Pool(processes) as pool:
yield pool.imap_unordered
def dl_snapshot(snapshot: str, outdir: Path, processes: int = 1) -> None:
_dl_shard = functools.partial(dl_shard, snapshot, outdir=outdir)
with unordered_map(processes) as umap:
num_pars = sum(umap(_dl_shard, range(500)))
logging.info(f"Extracted {num_pars:_d} paragraphs from snapshot {snapshot}.")
def dl(
snapshot: str = None, outdir: Path = Path("data_cc100"), processes: int = 1
) -> None:
"""
Download CC100 corpus.
Will create one text file per language and CC snapshot.
- snapshot: restrict to one snapshot. Useful for parallelization.
- outdir: output directory
- processes: number of processes to use
"""
if snapshot is None:
snapshots = CC_100_SNAPSHOTS
else:
snapshots = snapshot.split(",")
invalids = [s for s in snapshots if s not in CC_100_SNAPSHOTS]
assert not invalids, f"Invalid snapshots {invalids}, chose from {CC_100_SNAPSHOTS}"
for snapshot in snapshots:
dl_snapshot(snapshot, outdir, processes)
if __name__ == "__main__":
import func_argparse
func_argparse.single_main(dl)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/cc/cc_net/cc_net/tools/__init__.py | code/data_collection/cc/cc_net/cc_net/tools/__init__.py | python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false | |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/github/language_mapping.py | code/data_collection/github/language_mapping.py | _LANG_TO_EXTENSION = {
"Assembly": ["asm"],
"Batchfile": ["bat", "cmd"],
"C": ["c", "h"],
"C#": ["cs"],
"C++": ["cpp", "hpp", "c++", "h++", "cc", "hh", "C", "H"],
"CMake": ["cmake"],
"CSS": ["css"],
"Dockerfile": ["dockerfile", "Dockerfile"],
"FORTRAN": ['f90', 'f', 'f03', 'f08', 'f77', 'f95', 'for', 'fpp'],
"GO": ["go"],
"Haskell": ["hs"],
"HTML":["html"],
"Java": ["java"],
"JavaScript": ["js"],
"Julia": ["jl"],
"Lua": ["lua"],
"Makefile": ["Makefile"],
"Markdown": ["md", "markdown"],
"PHP": ["php", "php3", "php4", "php5", "phps", "phpt"],
"Perl": ["pl", "pm", "pod", "perl"],
"PowerShell": ['ps1', 'psd1', 'psm1'],
"Python": ["py"],
"Ruby": ["rb"],
"Rust": ["rs"],
"SQL": ["sql"],
"Scala": ["scala"],
"Shell": ["sh", "bash", "command", "zsh"],
"TypeScript": ["ts", "tsx"],
"TeX": ["tex"],
"Visual Basic": ["vb"]
}
_EXTENSION_TO_LANG = {}
for lang in _LANG_TO_EXTENSION:
for extension in _LANG_TO_EXTENSION[lang]:
_EXTENSION_TO_LANG[extension] = lang | python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/github/deduplicate.py | code/data_collection/github/deduplicate.py | import hashlib
import os
ROOT = 'Code' # NOTE: hard-coded.
seen = set()
count = 0
dups = 0
print("Begin to deduplicate data")
for root_dir, _, files in os.walk(ROOT):
for file in files:
count += 1
file_path = os.path.join(root_dir, file)
# Hash the entire file's content.
with open(file_path, 'rb') as f:
bytes = f.read()
hash = hashlib.sha256(bytes).hexdigest()
# Delete identical files.
if hash in seen:
os.remove(file_path)
dups += 1
else:
seen.add(hash)
# Periodically print progress and the running duplication ratio.
if count % 10000 == 0:
print(f'Processed {count:,} files, duplicates so far: {dups:,} ({dups/count:.1%})')
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/github/data_encapsulation.py | code/data_collection/github/data_encapsulation.py | import os
import json
import argparse
from language_mapping import _EXTENSION_TO_LANG,_LANG_TO_EXTENSION
ROOT="Code" # NOTE: hard-coded.
INFO="InfoLists"
OUTPUT_DIR=r"RawData"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--language", type=str, help="Language to search for.",default="python")
args = parser.parse_args()
count=0
dups=0
print("Begin to encapsulation data")
info_dict={}
for file in os.listdir(INFO):
with open(os.path.join(INFO,file),"r",encoding="utf-8") as f:
content=json.load(f)
info_dict[os.path.splitext(file)[0]]=content
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
with open(os.path.join(OUTPUT_DIR,"data.jsonl"),"w",encoding="utf-8") as jsonl_file:
for root_dir,_,files in os.walk(ROOT):
for file in files:
path_parts=os.path.normpath(root_dir).split(os.sep)
_code_type=path_parts[1]
user=path_parts[2]
name=path_parts[3]
if file.split(".")[-1] not in _EXTENSION_TO_LANG.keys():
continue
code_type=_EXTENSION_TO_LANG[file.split(".")[-1]]
if code_type.lower() != args.language.lower():
continue
repo_name=f"{path_parts[2]}/{path_parts[3]}"
count+=1
file_path=os.path.join(root_dir,file)
try:
with open(file_path,"r",encoding="utf-8") as f:
content=f.read()
except Exception as e:
print(f'Skipping problematic file {file_path} due to: {e}')
line={
"content":content,
"repo_name":repo_name,
"path":os.path.join("/".join(path_parts[4:]),file),
"size":os.path.getsize(file_path), # Byte
"language":code_type,
}
line.update(info_dict[f"{_code_type}_top_repos"][repo_name.strip()])
json_line=json.dumps(line)
jsonl_file.write(json_line+"\n")
print(count) | python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/github/minhash_deduplication.py | code/data_collection/github/minhash_deduplication.py | import json
import multiprocessing as mp
from multiprocessing import freeze_support
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
NON_ALPHA = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
MIN_NUM_TOKENS = 10
NUM_PERM = 256
def get_min_hash(tokens: List[str]) -> Optional[MinHash]:
"""Compute the MinHash of a code snippet."""
if len(tokens) < MIN_NUM_TOKENS:
return None
min_hash = MinHash(num_perm=NUM_PERM)
for token in set(tokens):
min_hash.update(token.encode())
return min_hash
def get_tokens(code: str) -> Set[str]:
"""Tokenize a code snippet."""
return {t for t in NON_ALPHA.split(code) if len(t.strip()) > 0}
class DuplicationIndex:
def __init__(
self,
*,
duplication_jaccard_threshold: float = 0.85,
):
self._duplication_jaccard_threshold = duplication_jaccard_threshold
self._num_perm = NUM_PERM
self._index = MinHashLSH(threshold=self._duplication_jaccard_threshold, num_perm=self._num_perm)
self._duplicate_clusters = defaultdict(set)
def add(self, code_key: Tuple, min_hash: MinHash) -> None:
"""Add a key to _index (MinHashLSH)
the min_hash is used to query closest matches based on the jaccard_threshold.
The new key is either added to a existing cluster of one close match,
or a new cluster is created. The clusters created in this way, depend on the order of add.
Args:
code_key (Tuple of (index, repo_name, path)):
Theoritically any hasbale key. Here we use a tuple to retrieve the information later.
min_hash: MinHash of the code_key.
"""
close_duplicates = self._index.query(min_hash)
if code_key in self._index.keys:
print(f"Duplicate key {code_key}")
return
self._index.insert(code_key, min_hash)
if len(close_duplicates) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(code_key)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(code_key)
def get_duplicate_clusters(self) -> List[List[Dict]]:
"""Export the duplicate clusters.
For each cluster, the first element is the base element of the cluster.
The base element has an estimation jaccard similarity higher than the threshold with all the other elements.
Returns:
duplicate_clusters (List[List[Dict]]):
List of duplicate clusters.
"""
duplicate_clusters = []
for base, duplicates in self._duplicate_clusters.items():
cluster = [base] + list(duplicates)
cluster = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(cluster)
return duplicate_clusters
def save(self, filepath) -> None:
duplicate_clusters = self.get_duplicate_clusters()
with open(filepath, "w") as f:
json.dump(duplicate_clusters, f)
def _compute_min_hash(element):
index, data = element
min_hash = get_min_hash([t for t in NON_ALPHA.split(data["content"]) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def minhash_iter(dataset_iterator: Type[Dataset]):
with mp.Pool(processes=4) as pool:
for data in pool.imap_unordered(
_compute_min_hash,
ThreadedIterator(dataset_iterator, max_queue_size=10000),
chunksize=100,
):
if data is not None:
yield data
def make_duplicate_clusters(dataset_iterator: Type[Dataset], jaccard_threshold: float):
"""Find duplicate clusters in the dataset in two steps:
1. Compute MinHash for each code snippet. MinHash is a tool for fast jaccard similarity estimation.
This step is computed using an asynchronous multiprocessing pool, minhash_iter
2. Find duplicate clusters. The computed MinHash is added sequentially to the DuplicationIndex.
This step cannot be parallelized. So using asynchronous thread in the previous step helps to speed up the process.
"""
di = DuplicationIndex(duplication_jaccard_threshold=jaccard_threshold)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(dataset_iterator)), max_queue_size=100)): #获取minhash值
di.add(filename, min_hash)
return di.get_duplicate_clusters()
def jaccard_similarity(code1: str, code2: str) -> float:
"""Compute the Jaccard similarity of two code snippets."""
tokens1 = get_tokens(code1)
tokens2 = get_tokens(code2)
return len(tokens1 & tokens2) / len(tokens1 | tokens2)
_shared_dataset = None
def _find_cluster_extremes_shared(cluster, jaccard_threshold):
"""Find a reduced cluster such that each code in the origin cluster is similar to at least one code in the reduced cluster.
Two codes are similar if their Jaccard similarity is above the threshold.
Args:
cluster (List[dict]):
cluster is a list of dict, each dict contains the following keys:
- base_index
- repo_name
- path
This is a typical output of DuplicationIndex.get_duplicate_clusters()
jaccard_threshold (float):
threshold for Jaccard similarity.
Two codes are similar if their Jaccard similarity is above the threshold.
Returns:
extremes (List[dict]):
A reduced representation of the cluster. The field copies is added to each dict.
The copies field indicates the number of similar codes in the cluster for a extreme.
"""
extremes = []
for element1 in cluster:
code1 = _shared_dataset[element1["base_index"]]["content"]
for element2 in extremes:
code2 = _shared_dataset[element2["base_index"]]["content"]
if jaccard_similarity(code1, code2) >= jaccard_threshold:
element2["copies"] += 1
break
else:
element1["copies"] = 1
extremes.append(element1)
return extremes
def find_extremes(cluster_list, dataset, jaccard_threshold):
"""Call the _find_cluster_extremes_shared function in a parallel fashion.
Args:
cluster_list (List[List[Dict]]):
each cluster is a list of dicts with the key base_index,
referring to the index of the base code in the dataset.
dataset (Type[Dataset]):
dataset is used to access the content of the code snippets,
using the base_index from the cluster_list.
dataset is shared between all the processes using a glabal variable (any other way to share the dataset?),
otherwise the multi processing is not speeded up.
jaccard_threshold (float):
the threshold for the jaccard similarity. The default value is 0.85
Returns:
extremes_list (List[Dict]):
Each cluster is reduced to extremes.
See _find_cluster_extremes_shared for the definition of extremes.
"""
global _shared_dataset
_shared_dataset = dataset
extremes_list = []
f = partial(_find_cluster_extremes_shared, jaccard_threshold=jaccard_threshold)
with mp.Pool(processes=4) as pool:
for extremes in tqdm(
pool.imap_unordered(
f,
cluster_list,
),
total=len(cluster_list),
):
extremes_list.append(extremes)
return extremes_list
def deduplicate_dataset(
dataset: Type[Dataset], jaccard_threshold: float = 0.85
) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""Deduplicate the dataset using minhash and jaccard similarity.
This function first generate duplicate clusters, then each cluster
is reduced to the extremes that are similar to the other elements in the cluster.
Codes are called similar if their Jaccard similarity is greater than jaccard_threshold (0.85 default).
Args:
dataset (Type[Dataset]):
The dataset to deduplicate.
jaccard_threshold (float, default=0.85):
jaccard threshold to determine if two codes are similar
Returns:
ds_dedup (Type[Dataset]):
The deduplicated dataset.
duplicate_clusters (List[List[Dict]]):
The list of duplicate clusters.
Each cluster is a list of dicts with the following keys:
- base_index : int
The index of the code in the original dataset.
- repo_name : str
- path : str
- copies : int
The number of copies of the code in the cluster. (find_cluster_extremes)
- is_extreme : bool
Whether the code is an extreme in the cluster.
All the codes in the cluster are removed from the dataset except the extremes.
Example:
>>> from datasets import load_dataset
>>> from minhash_deduplication import deduplicate_dataset
>>> ds = load_dataset("lvwerra/codeparrot-clean", split="train")
>>> ds_dedup, duplicate_clusters = deduplicate_dataset(ds, jaccard_threshold=0.85)
"""
duplicate_clusters = make_duplicate_clusters(dataset, jaccard_threshold)
duplicate_indices = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
extreme_dict = {}
extremes_clusters = find_extremes(duplicate_clusters, dataset, jaccard_threshold)
for extremes in extremes_clusters:
for element in extremes:
extreme_dict[element["base_index"]] = element
remove_indices = duplicate_indices - set(extreme_dict.keys())
ds_filter = dataset.filter(lambda x, idx: idx not in remove_indices, with_indices=True)
for cluster in duplicate_clusters:
for element in cluster:
element["is_extreme"] = element["base_index"] in extreme_dict
if element["is_extreme"]:
element["copies"] = extreme_dict[element["base_index"]]["copies"]
print(f"Original dataset size: {len(dataset)}")
print(f"Number of duplicate clusters: {len(duplicate_clusters)}")
print(f"Files in duplicate cluster: {len(duplicate_indices)}")
print(f"Unique files in duplicate cluster: {len(extreme_dict)}")
print(f"Filtered dataset size: {len(ds_filter)}")
return ds_filter, duplicate_clusters
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/github/data_clean.py | code/data_collection/github/data_clean.py | import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from datasets import load_dataset,Features,Value
from minhash_deduplication import deduplicate_dataset
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoTokenizer, HfArgumentParser
@dataclass
class PreprocessingArguments:
"""
Configuration for preprocessing data.
"""
num_workers: Optional[int] = field(
default=None,
metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
},
)
dataset_name: Optional[str] = field(
default="transformersbook/codeparrot", metadata={"help": "Folder or name of dataset to process."}
)
output_dir: Optional[str] = field(
default="ProcessedData", metadata={"help": "Folder to save processed processed dataset."}
)
samples_per_file: Optional[int] = field(
default=100_000, metadata={"help": "Number of files to save per JSON output file."}
)
text_column: Optional[str] = field(default="content", metadata={"help": "Column containing text data to process."})
line_max: Optional[float] = field(
default=1000, metadata={"help": "Maximum line length in file, otherwise file is filtered."}
)
line_mean: Optional[float] = field(
default=100, metadata={"help": "Maximum mean line length in file, otherwise file is filtered."}
)
alpha_frac: Optional[float] = field(
default=0.25, metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."}
)
min_token_ratio: Optional[float] = field(
default=1.5, metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."}
)
filter_proba: Optional[float] = field(
default=0.7, metadata={"help": "Probability for filtering config, test and uncommon files."}
)
tokenizer: Optional[str] = field(
default="codeparrot/codeparrot",
metadata={"help": "Name or path to the tokenizer."},
)
near_deduplication: Optional[bool] = field(
default=False, metadata={"help": "If True, near-duplicate samples are removed."}
)
jaccard_threshold: Optional[float] = field(
default=0.85, metadata={"help": "Jaccard threshold for near-duplicate samples."}
)
PATTERN = re.compile(r"\s+")
def get_hash(example):
"""Get hash of content field."""
return {"hash": hashlib.md5(re.sub(PATTERN, "", example["content"]).encode("utf-8")).hexdigest()}
def line_stats(example):
"""Calculates mean and max line length of file."""
line_lengths = [len(line) for line in example["content"].splitlines()]
return {"line_mean": np.mean(line_lengths), "line_max": max(line_lengths) if line_lengths else 0}
def alpha_stats(example):
"""Calculates mean and max line length of file."""
alpha_frac = np.mean([c.isalnum() for c in example["content"]])
return {"alpha_frac": alpha_frac}
def check_uniques(example, uniques):
"""Check if current hash is still in set of unique hashes and remove if true."""
if example["hash"] in uniques:
uniques.remove(example["hash"])
return True
else:
return False
def is_autogenerated(example, scan_width=5):
"""Check if file is autogenerated by looking for keywords in the first few lines of the file."""
keywords = ["auto-generated", "autogenerated", "automatically generated"]
lines = example["content"].splitlines()
for _, line in zip(range(scan_width), lines):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def is_config_or_test(example, scan_width=5, coeff=0.05):
"""Check if file is a configuration file or a unit test by :
1- looking for keywords in the first few lines of the file.
2- counting number of occurence of the words 'config' and 'test' with respect to number of lines.
"""
keywords = ["unit tests", "test file", "configuration file"]
lines = example["content"].splitlines()
count_config = 0
count_test = 0
# first test
for _, line in zip(range(scan_width), lines):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
nlines = example["content"].count("\n")
threshold = int(coeff * nlines)
for line in lines:
count_config += line.lower().count("config")
count_test += line.lower().count("test")
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def has_no_keywords(example):
"""Check if a python file has none of the keywords for: funcion, class, for loop, while loop."""
keywords = ["def ", "class ", "for ", "while "]
lines = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
# filtering files that use the assignment operator = less than 5 times
def has_few_assignments(example, minimum=4):
"""Check if file uses symbol '=' less than `minimum` times."""
lines = example["content"].splitlines()
counter = 0
for line in lines:
counter += line.lower().count("=")
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
# filtering files with ratio between number of characters and number of tokens after tokenization < 1.5 (the average ratio is 3.6)
def char_token_ratio(example):
"""Compute character/token ratio of the file with tokenizer."""
input_ids = tokenizer(example["content"], truncation=False)["input_ids"]
ratio = len(example["content"]) / len(input_ids)
return {"ratio": ratio}
def preprocess(example):
"""Chain all preprocessing steps into one function to not fill cache."""
results = {}
results.update(get_hash(example))
results.update(line_stats(example))
results.update(alpha_stats(example))
results.update(char_token_ratio(example))
results.update(is_autogenerated(example))
results.update(is_config_or_test(example))
results.update(has_no_keywords(example))
results.update(has_few_assignments(example))
return results
def filter(example, uniques, args):
"""Filter dataset with heuristics. Config, test and has_no_keywords files are removed with a given probability."""
if not check_uniques(example, uniques):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def save_jsonl(dataset,file_path):
with open(file_path,"w",encoding="utf-8") as jsonl_file:
for data in dataset:
json_line=json.dumps(data)
jsonl_file.write(json_line+"\n")
if __name__ == "__main__":
# Settings
print('Begin to clean data')
parser = HfArgumentParser(PreprocessingArguments)
args = parser.parse_args()
if args.num_workers is None:
args.num_workers = multiprocessing.cpu_count()
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
# Load dataset
t_start = time.time()
ds = load_dataset("json", data_files=os.path.join(r"RawData",args.dataset_name), split="train",cache_dir="cache")
ds = ds.filter(lambda example: len(example['content'])>0)
features=ds.features.copy()
features['createdAt']=Value('string')
features['pushedAt']=Value('string')
ds = ds.cast(features)
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
t_start = time.time()
ds = ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
uniques = set(ds.unique("hash"))
frac = len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
t_start = time.time()
ds_filter = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
from multiprocessing import freeze_support
freeze_support()
t_start = time.time()
ds_filter, duplicate_clusters = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
output_dir = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
data_dir = output_dir / "data"
data_dir.mkdir(exist_ok=True)
t_start = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
file_path = str(data_dir / f"file-{file_number+1:012}.jsonl")
end_index = min(len(ds_filter), index + args.samples_per_file)
save_jsonl(ds_filter.select(list(range(index, end_index))),file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/github/gh_crawler.py | code/data_collection/github/gh_crawler.py | import requests
import sys
import time
import json
import os
import argparse
# Insert GitHub API token here, in place of *TOKEN*.
headers = {"Authorization": "token *TOKEN*"}
def run_query(max_stars):
end_cursor = None
repositories = set()
info_list={}
while end_cursor != "":
query = f"""
{{
search(query: "language:{args.language} fork:false created:>{args.created_at} sort:stars stars:<{max_stars}", type: REPOSITORY, first: 100 {', after: "' + end_cursor + '"' if end_cursor else ''}) {{
edges {{
node {{
... on Repository {{
url
isPrivate
isDisabled
isLocked
createdAt
pushedAt
name
licenseInfo {{
name
}}
owner {{
login
}}
stargazers {{
totalCount
}}
}}
}}
}}
pageInfo {{
hasNextPage
endCursor
}}
}}
}}
"""
print(f' Retrieving next page; {len(repositories)} repositories in this batch so far.')
# Attempt a query up to three times, pausing when a query limit is hit.
attempts = 0
success = False
while not success and attempts < 10:
try:
request = requests.post('https://api.github.com/graphql', json={'query': query}, headers=headers)
print(query)
print(request)
content = request.json()
if 'data' not in content or 'search' not in content['data']:
# If this is simply a signal to pause querying, wait two minutes.
if 'message' in content and 'wait' in content['message']:
attempts += 1
time.sleep(120)
# Otherwise, assume we've hit the end of the stream.
else:
break
else:
success = True
except:
continue
if not success:
break
end_cursor = get_end_cursor(content)
new_repositories, is_done,info = get_repositories(content)
repositories.update(new_repositories)
info_list.update(info)
if len(repositories) > args.num_repos or is_done:
break
return repositories,info_list
def get_end_cursor(content):
page_info = content['data']['search']['pageInfo']
has_next_page = page_info['hasNextPage']
if has_next_page:
return page_info['endCursor']
return ""
def get_repositories(content):
edges = content['data']['search']['edges']
repositories_with_stars = []
info_list={}
for edge in edges:
if edge['node']['isPrivate'] is False and edge['node']['isDisabled'] is False and edge['node']['isLocked'] is False:
repository = edge['node']['url']
star_count = edge['node']['stargazers']['totalCount']
info={
"url":edge['node']['url'],
"createdAt":edge['node']['createdAt'],
'pushedAt':edge['node']['pushedAt'],
"repo_name":f"{edge['node']['owner']['login']}/{edge['node']['name']}",
"licenses":edge['node']['licenseInfo']["name"] if edge['node']['licenseInfo'] else "null",
"stars_count":star_count
}
info_list[f"{edge['node']['owner']['login']}/{edge['node']['name']}"]=info
if star_count < args.min_stars:
return repositories_with_stars, True, info_list
repositories_with_stars.append((repository, star_count))
#info_list[f"{edge['node']['owner']['login']}/{edge['node']['name']}"]=info
return repositories_with_stars, False,info_list
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--language", type=str, help="Language to search for.",default="python")
parser.add_argument("--num_repos", type=int, help="Number of repositories to search for.",default=100)
parser.add_argument("--min_stars", type=int, help="Minimum number of stars for a repository to be included.",default=10)
parser.add_argument("--created_at", type=str, help="Date after which a repository is considered active.",default='2023-09-01')
args = parser.parse_args()
repositories = set() # Keep track of a set of repositories seen to avoid duplicate entries across pages.
next_max_stars = 1_000_000_000 # Initialize to a very high value.
info_list={}
if not os.path.exists(r"TopLists"):
os.mkdir(r"TopLists")
if not os.path.exists(r"InfoLists"):
os.mkdir(r"InfoLists")
retry=0
with open(f'TopLists/{args.language}_top_repos.txt', 'w',encoding='utf-8') as f:
while len(repositories) < args.num_repos:
results,info = run_query(next_max_stars) # Get the next set of pages.
info_list.update(info)
if not results:
break
new_repositories = [repository for repository, _ in results]
next_max_stars = min([stars for _, stars in results])
# If a query returns no new repositories, drop it.
if len(repositories | set(new_repositories)) == len(repositories):
retry+=1
if retry>5:
break
for repository, stars in sorted(results, key=lambda e: e[1], reverse=True):
if repository not in repositories:
repositories.add(repository)
f.write(f'{stars}\t{repository}\n')
f.flush()
print(f'Collected {len(repositories):,} repositories so far; lowest number of stars: {next_max_stars:,}')
with open(f'InfoLists/{args.language}_top_repos.json', 'w',encoding='utf-8') as f:
json.dump(info_list,f,indent=4)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/github/extract_code.py | code/data_collection/github/extract_code.py | """Copies all files belonging to a given language to a new directory."""
import os
import sys
from shutil import copyfile
from language_mapping import _EXTENSION_TO_LANG,_LANG_TO_EXTENSION
MAX_FILE_SIZE = 1024 ** 2 # 1 MB
MIN_FILE_TOKENS = 100
def main():
if len(sys.argv) <= 3:
raise ValueError('Provide a language, source directory and target directory.')
language = sys.argv[1]
proj_dir = sys.argv[2]
out_dir = sys.argv[3]
print(f'Processing: {proj_dir}')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
files_found = 0
for root, _, files in os.walk(proj_dir):
for file in files:
if any(file.endswith(ext) for ext in _EXTENSION_TO_LANG.keys()):
in_path = os.path.join(root, file)
if not os.path.exists(in_path):
continue
if os.path.getsize(in_path) > MAX_FILE_SIZE:
continue
with open(in_path, errors='ignore') as f_in:
text = f_in.read()
rel_path = root[len(proj_dir)+1:].replace('/', '__')
out_path = os.path.join(out_dir, rel_path + ('__' if rel_path else '') + file)
if not os.path.exists(out_path):
try:
copyfile(in_path, out_path)
except Exception as e:
print(f'Skipping problematic file {in_path} due to: {e}')
files_found += 1
print(f'Done processing; copied {files_found} files.')
if __name__ == '__main__':
main() | python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/arxiv/arxiv_downloader.py | code/data_collection/arxiv/arxiv_downloader.py | import xml.etree.ElementTree as ET
from easydict import EasyDict
import argparse
import queue
import subprocess
import signal
import sys
import threading
import os
THREAD_NUM = 4
CMD_TEMPLATE = "s3cmd get --recursive --skip-existing --requester-pays -q s3://arxiv/{} {} "
OUTPUT_DIR ="ArxivSrc"
ERROR_LOG = "error.log"
def xml2dict(node):
if not isinstance(node, ET.Element):
raise Exception("node format error.")
if len(node) == 0:
return node.tag, node.text
data = {}
temp = None
for child in node:
key, val = xml2dict(child)
if key in data:
if type(data[key]) == list:
data[key].append(val)
else:
temp = data[key]
data[key] = [temp, val]
else:
data[key] = val
return node.tag, data
def check_data(yymm, start_yymm):
if yymm > 5000 or yymm < start_yymm:
return False
return True
running_processes=[]
stop_event=threading.Event()
def signal_handler(signal,frame):
print("Ctrl+C detected. Stopping all threads and processes.")
stop_event.set()
for process in running_processes:
process.terminate()
sys.exit(0)
def task(task_queue,cmd_template, output_dir, error_log):
while not task_queue.empty() and not stop_event.is_set():
task = task_queue.get()
cmd = cmd_template.format(task,output_dir)
completed_process=subprocess.Popen(cmd,shell=True,text=True)
running_processes.append(completed_process)
completed_process.wait()
print(f"{task} return code:",completed_process.returncode)
if completed_process.returncode != 0:
with open(error_log,"a",encoding="utf-8") as f:
f.write(task+"\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--start_yymm",type=str,default="2309")
parser.add_argument("--num",type=int,default=-1)
args=parser.parse_args()
print(args)
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
tree = ET.parse("arXiv_src_manifest.xml")
node = tree.getroot()
tag, data = xml2dict(node)
#print(data)
data = EasyDict(data)
dumps=[]
for dump in data.file:
if check_data(int(dump.yymm),int(args.start_yymm)):
dumps.append(dump)
if args.num != -1:
dumps = dumps[:args.num]
threads=[]
signal.signal(signal.SIGINT, signal_handler)
print("Total Num of Dumps:",len(dumps))
task_queue=queue.Queue()
for dump in dumps:
task_queue.put(dump.filename)
for i in range(THREAD_NUM):
t = threading.Thread(target=task,args=(task_queue,CMD_TEMPLATE,OUTPUT_DIR,ERROR_LOG))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print("All tasks done.")
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/arxiv/data_encapsulation.py | code/data_collection/arxiv/data_encapsulation.py | import os
import json
from utils import *
print("Encapsulating data...")
OUTPUT_DIR='data'
sh('mkdir -p '+OUTPUT_DIR)
info_dict=jsonl2dict('arxiv-metadata-oai-snapshot.json',key='id')
files=ls('clean_out')
with open(os.path.join(OUTPUT_DIR,"data.jsonl"),'w',encoding='utf-8') as f:
for file in files:
file_name=file.split('/')[-1]
id=file_name.split('_')[0]
id=id.rstrip('.tex.md')
info=info_dict[id]
t=file_name.split('_')
if "extract" in file_name:
src_name="_".join(file_name.split('_')[2:])[:-3]
else:
src_name=file_name[:-3]
line={
'content':fread(file),
"id":info['id'],
"filename":src_name,
"title":info['title'],
"authors":info['authors'],
"doi":info['doi'],
"license":info['license'],
'journal-ref':info['journal-ref'],
"catagories":info['categories'],
}
f.write(json.dumps(line)+'\n')
print("Encapsulation Done!") | python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/arxiv/data_clean.py | code/data_collection/arxiv/data_clean.py | from utils import *
import re
import multiprocessing as mp
def clean(file):
with open(file,'r',encoding='utf-8') as f:
content = f.readlines()
new_content = []
for l in content:
if not l.strip().startswith(":::"):
new_content.append(l)
file_name = file.split("/")[-1]
with open(f"clean_out/{file_name}",'w',encoding='utf-8') as f:
f.writelines(new_content)
if __name__ == "__main__":
print("Cleaning...")
pool = mp.Pool(8)
files = ls("out")
sh("mkdir -p clean_out")
pool.map(clean,files)
pool.close()
pool.join()
print("Cleaning Done") | python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/arxiv/utils.py | code/data_collection/arxiv/utils.py | import os
from functools import reduce
import operator
# import mailparser
# import lm_dataformat as lmd
from tqdm import tqdm
import json
class ExitCodeError(Exception): pass
def sh(x):
if os.system(x): raise ExitCodeError()
def ls(x):
return [x + '/' + fn for fn in os.listdir(x)]
def lsr(x):
if os.path.isdir(x):
return reduce(operator.add, map(lsr, ls(x)), [])
else:
return [x]
def fwrite(fname, content):
with open(fname, 'w') as fh:
fh.write(content)
def fread(fname):
with open(fname) as fh:
return fh.read()
class each:
def __init__(self, f):
self.f = f
def __rrshift__(self, other):
return list(map(self.f, other))
class filt:
def __init__(self, f):
self.f = f
def __rrshift__(self, other):
return list(filter(self.f, other))
class apply:
def __init__(self, f):
self.f = f
def __rrshift__(self, other):
return self.f(other)
class one:
def __rrshift__(self, other):
try:
if isinstance(other, list):
assert len(other) == 1
return other[0]
return next(other)
except:
return None
class join:
def __init__(self, sep):
self.sep = sep
def __rrshift__(self, other):
if other is None: return
try:
return self.sep.join(other)
except:
return None
Y = object()
def id(x):
return x
class Reflective:
def __getattribute__(self, f):
def _fn(*args, **kwargs):
return lambda x: x.__getattribute__(f)(*args, **kwargs)
return _fn
def __getitem__(self, a):
return lambda x: x[a]
def __mul__(self, other):
if other == Y:
def _f(x, y=None):
if y == None:
x, y = x
return x * y
return _f
return lambda x: x * other
def __rmul__(self, other):
if other == Y:
def _f(x, y=None):
if y == None:
x, y = x
return y * x
return _f
return lambda x: other * x
def __add__(self, other):
if other == Y:
def _f(x, y=None):
if y == None:
x, y = x
return x + y
return _f
return lambda x: x + other
def __radd__(self, other):
if other == Y:
def _f(x, y=None):
if y == None:
x, y = x
return y + x
return _f
return lambda x: other + x
# (b -> a -> b) -> b -> [a] -> b
def foldl(f, init, arr):
curr = init
for elem in arr:
curr = f(curr, elem)
return curr
# (a -> b -> b) -> b -> [a] -> b
def foldr(f, init, arr):
curr = init
for elem in arr[::-1]:
curr = f(elem, curr)
return curr
def comp(*fs):
if len(fs) == 1:
return fs[0]
def _f(x):
for f in fs[::-1]:
x = f(x)
return x
return _f
X = Reflective()
def jsonl2list(path):
data=[]
with open(path, 'r', encoding='utf-8') as reader:
for line in reader:
data.append(json.loads(line))
return data
def jsonl2dict(path,key):
data={}
with open(path, 'r', encoding='utf-8') as reader:
for line in reader:
l = json.loads(line)
data[l[key]]=l
return data | python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/arxiv/arxiv_extractor.py | code/data_collection/arxiv/arxiv_extractor.py | from utils import *
import magic
mime = magic.Magic(mime=True)
import multiprocessing as mp
import chardet
import time
import os
sh("mkdir -p tmp tmp2 out done fallback_needed errored")
def any_to_utf8(b):
try:
return b.decode('utf-8')
except UnicodeDecodeError:
guess = chardet.detect(b)['encoding']
if not guess or guess == 'UTF-8': return
try:
return b.decode(guess)
except (UnicodeDecodeError, LookupError):
return
def convert(tex):
print(tex)
out_name = tex.split('/')[2:] >> join('_')
try:
with open(tex, 'rb') as fh:
b = fh.read()
cont = any_to_utf8(b)
if cont is None: return
fwrite(tex, cont)
except FileNotFoundError:
return
try:
pandoc_dir = tex.split('/')[:-1] >> join('/')
file_name = tex.split('/')[-1]
sh(f'cd {pandoc_dir} && timeout 10s pandoc -s {file_name} -o {os.getcwd()}/out/{out_name}.md --wrap=none')
print(os.path.exists(f'out/{out_name}.md'))
except ExitCodeError:
import traceback
traceback.print_exc()
try:
if '_extract' in tex.split('/')[:-1] >> join('/'):
loc = tex.split('/')[:-1] >> join('/')
else:
loc = tex
sh(f'mv {loc} fallback_needed/')
return
except ExitCodeError:
import traceback
traceback.print_exc()
def preextract_tar(dump):
dump_name = dump.split('/')[-1][:-4]
sh(f"(mkdir -p tmp2/{dump_name}; tar xf {dump} -C tmp2/{dump_name} && touch tmp2/done_{dump_name}; echo finished preload of {dump_name}) &")
def copy_tar(dump):
dump_name = dump.split('/')[-1][:-4]
for i in range(120):
if os.path.exists(f'tmp2/done_{dump_name}'):
sh(f'mv tmp2/{dump_name}/* tmp')
return True
print('waiting for tar...')
time.sleep(1)
return False
pool = mp.Pool(8)
files = ls('ArxivSrc')
sh("rm -rf tmp/* tmp2/*")
preextract_tar(files[0])
for i, dump in enumerate(tqdm(files)):
if i + 1 < len(files): preextract_tar(files[i + 1])
# try:
sh("rm -rf tmp/*")
if not copy_tar(dump): continue
print(dump)
for doc in lsr('tmp'):
if doc.endswith('.gz'):
sh(f"gunzip {doc}")
type = mime.from_file(doc[:-3])
if type == 'application/x-tar':
sh(f"mkdir -p {doc[:-3]}_extract && tar xf {doc[:-3]} -C {doc[:-3]}_extract")
sh(f"rm {doc[:-3]}")
elif type == 'text/x-tex':
sh(f"mv {doc[:-3]} {doc[:-3]}.tex")
else:
sh(f"rm {doc[:-3]}")
elif doc.endswith('.pdf'):
sh(f"rm {doc}")
def tex_files():
for doc in ls(ls('tmp')[0]):
if os.path.isdir(doc):
for name in ['main', 'Main', 'MAIN', 'paper', 'Paper']:
for file in ls(doc):
if file.endswith('.tex') and name in file:
yield file
break
else:
continue
break
else:
if ls(doc) >> filt(X.endswith('.tex')) >> apply(len) == 1:
yield ls(doc) >> filt(X.endswith('.tex')) >> one()
continue
for titledoc in ls(doc) >> filt(X.endswith('.tex')):
try:
if r'\title' in fread(titledoc):
yield titledoc
except:
pass
elif doc.endswith('.tex'):
yield doc
texfiles = list(tex_files())
pool.map(convert, texfiles)
sh(f'mv {dump} done')
print(f'marking {dump} as done')
pool.close()
pool.join() | python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/data_collection/arxiv/data_tag_filter.py | code/data_collection/arxiv/data_tag_filter.py | import json
import os
INPUT= 'data/data.jsonl'
SAVE='math_data/data.jsonl'
KEY = 'catagories'
l=[]
with open(INPUT, 'r', encoding="utf-8") as f:
for line in f:
data = json.loads(line)
if data[KEY].split(" ")[0].split(".")[0]=="math":
l.append(data)
os.makedirs(os.path.dirname(SAVE), exist_ok=True)
with open(SAVE, 'w', encoding='utf-8') as f:
for d in l:
json.dump(d, f, ensure_ascii=False)
f.write('\n')
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/evaluation/main.py | code/evaluation/main.py | import torch
from torch.utils.data import DataLoader
import argparse
import torch
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
)
from packed_dataset import EvalDataset
import numpy as np
from tqdm import tqdm
def cross_entropy(
logits, targets, attention_mask: torch.Tensor = None
):
logits = logits.reshape(-1, logits.size(-1))
targets = targets.reshape(-1)
if attention_mask is not None:
attention_mask = attention_mask.reshape(-1)
targets = targets.masked_fill(~attention_mask, -1)
return torch.nn.functional.cross_entropy(logits, targets, ignore_index=-1, reduction='sum')
@torch.no_grad()
def validate(args, model, val_dataloader: DataLoader, device):
model.eval()
losses = []
for k, (val_data, attention_mask) in enumerate(tqdm(val_dataloader)):
input_ids = val_data[:, 0: args.block_size].contiguous().to(device)
targets = val_data[:, 1: args.block_size + 1].contiguous().long().to(device)
attention_mask = attention_mask[:, 1: args.block_size + 1].contiguous().to(device)
logits = model(input_ids).logits
loss = cross_entropy(logits, targets, attention_mask=attention_mask)
loss = loss.cpu().item()
losses.append(loss)
print("%.8f" % loss)
out = np.array(losses).sum()
return out
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--task_name",
type=str,
)
parser.add_argument(
"--model_name",
type=str
)
parser.add_argument(
'--block_size',
type=int,
default=1900,
)
parser.add_argument(
'--stride',
type=int,
default=512,
)
parser.add_argument(
'--batch_size',
type=int
)
parser.add_argument(
'--file_num',
default=-1,
type=int
)
parser.add_argument(
'--flash',
action="store_true",
help="set this if you want to use flash attention",
)
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--cache_dir", type=str, default=None)
args = parser.parse_args()
print(args)
device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(
args.model_name,
use_fast=True if ("llemma" in args.model_name) or ("mpt" in args.model_name) else False,
cache_dir=args.cache_dir,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
args.model_name,
device_map="auto",
torch_dtype=torch.bfloat16,
cache_dir=args.cache_dir,
trust_remote_code=True,
use_flash_attention_2=True if args.flash and "mpt" not in args.model_name else False
)
valdataset = EvalDataset(
args=args,
task_name=args.task_name,
block_size=args.block_size + 1,
tokenizer=tokenizer,
stride=args.stride,
vocab_size=tokenizer.vocab_size,
file_num=args.file_num
)
valdataloader = DataLoader(valdataset, batch_size=args.batch_size, shuffle=False)
total_loss = validate(args, model, valdataloader, device)
print("-"*10, "Result", "-"*10)
print("Total loss:", total_loss)
print("Character num:", valdataset.character_num)
print("BPC:", total_loss / (valdataset.character_num * np.log(2)) )
print("finished")
if __name__ == "__main__":
main()
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
hkust-nlp/llm-compression-intelligence | https://github.com/hkust-nlp/llm-compression-intelligence/blob/ff39c4161d4cd16c7603d85f436da123773448bb/code/evaluation/packed_dataset.py | code/evaluation/packed_dataset.py | import numpy as np
import torch
from torch.utils.data import Dataset
from datasets import load_dataset
import math
class EvalDataset(Dataset):
def __init__(self, args, task_name, block_size, stride, tokenizer, file_num=-1, dtype="auto", vocab_size=None):
self.args = args
self.task_name = task_name
self.block_size = block_size
self.tokenizer = tokenizer
self.file_num = file_num
self.data = None
self.stride = stride
if dtype == "auto":
if vocab_size is None:
raise ValueError("vocab_size cannot be None when dtype='auto'")
if vocab_size is not None and vocab_size < 65500:
self._dtype = np.uint16
else:
self._dtype = np.int32
else:
self._dtype = dtype
self._prepare()
self.prev_end_loc = 0
self.seq_len = len(self.data)
self.begin_loc = 0
def _prepare(self):
self._curr_idx = 0
self._arr = []
self._raw_dataset = load_dataset(
"hkust-nlp/llm-compression",
self.task_name,
split='test[:]' if self.file_num == -1 else f"test[:{self.file_num}]",
cache_dir=self.args.cache_dir,
)
self.raw_dataset = self._raw_dataset.filter(lambda example: len(example['content']) > 0)
self.character_num = 0
for i in range(len(self.raw_dataset)):
self.character_num += len(self.raw_dataset[i]['content'])
self.data = self.raw_dataset.map(
lambda example: {"encoding": np.array(self.tokenizer.encode(example['content']), dtype=self._dtype)}, num_proc=8)
self.data = np.concatenate([a['encoding'] for a in self.data], axis=0)
def __len__(self):
return math.floor((len(self.data)-self.block_size)/self.stride+1)
def __getitem__(self,item):
end_loc = min(self.begin_loc+self.block_size, self.seq_len)
trg_len = end_loc - self.prev_end_loc
input_ids = self.data[self.begin_loc:end_loc]
attention_mask = np.ones((len(input_ids),), dtype=bool)
attention_mask[:-trg_len] = False
self.prev_end_loc = end_loc
self.begin_loc = self.begin_loc + self.stride
return torch.tensor(input_ids), torch.tensor(attention_mask, dtype=bool)
| python | MIT | ff39c4161d4cd16c7603d85f436da123773448bb | 2026-01-05T07:14:42.211982Z | false |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/constants.py | constants.py | '''Global constants'''
from helpers import _kwh, _a, _w, _v
DTUVARIANT_AHOY = "ahoy"
DTUVARIANT_OPENDTU = "opendtu"
DTUVARIANT_TEMPLATE = "template"
PRODUCTNAME = "henne49_dbus-opendtu"
CONNECTION = "TCP/IP (HTTP)"
MODE_TIMEOUT = "timeout"
MODE_RETRYCOUNT = "retrycount"
# Status codes for the DTU
STATUSCODE_STARTUP = 0
STATUSCODE_RUNNING = 7
STATUSCODE_STANDBY = 8
STATUSCODE_BOOTLOADING = 9
STATUSCODE_ERROR = 10
VICTRON_PATHS = {
"/Ac/Energy/Forward": {
"initial": None,
"textformat": _kwh,
}, # energy produced by pv inverter
"/Ac/Power": {"initial": None, "textformat": _w},
"/Ac/L1/Voltage": {"initial": None, "textformat": _v},
"/Ac/L2/Voltage": {"initial": None, "textformat": _v},
"/Ac/L3/Voltage": {"initial": None, "textformat": _v},
"/Ac/L1/Current": {"initial": None, "textformat": _a},
"/Ac/L2/Current": {"initial": None, "textformat": _a},
"/Ac/L3/Current": {"initial": None, "textformat": _a},
"/Ac/L1/Power": {"initial": None, "textformat": _w},
"/Ac/L2/Power": {"initial": None, "textformat": _w},
"/Ac/L3/Power": {"initial": None, "textformat": _w},
"/Ac/L1/Energy/Forward": {"initial": None, "textformat": _kwh},
"/Ac/L2/Energy/Forward": {"initial": None, "textformat": _kwh},
"/Ac/L3/Energy/Forward": {"initial": None, "textformat": _kwh},
"/Ac/Out/L1/I": {"initial": None, "textformat": _a},
"/Ac/Out/L1/V": {"initial": None, "textformat": _v},
"/Ac/Out/L1/P": {"initial": None, "textformat": _w},
"/Dc/0/Voltage": {"initial": None, "textformat": _v},
}
| python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | false |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/dbus_opendtu.py | dbus_opendtu.py | #!/usr/bin/env python
'''module to read data from dtu/template and show in VenusOS'''
from imports import *
def getConfig():
"""
Reads the configuration from a config.ini file and sets up logging.
The function reads the configuration file located in the same directory as the script.
It configures the logging level based on the value specified in the configuration file.
Returns:
configparser.ConfigParser: The configuration object containing the parsed configuration.
"""
# configure logging
config = configparser.ConfigParser()
config_path = f"{os.path.dirname(os.path.realpath(__file__))}/config.ini"
if not os.path.exists(config_path):
raise FileNotFoundError(f"Config file not found: {config_path}")
config.read(config_path)
logging_level = config["DEFAULT"]["Logging"].upper()
logging.basicConfig(
format="%(levelname)s %(message)s",
level=logging_level,
)
return config
def get_DbusServices(config):
"""
Retrieves and registers D-Bus services based on the provided configuration.
Args:
config (dict): Configuration dictionary containing the necessary settings.
Returns:
list: A list of registered DbusService instances.
"""
services = []
# region Get the configuration values
try:
number_of_inverters = int(config["DEFAULT"]["NumberOfInvertersToQuery"])
except (KeyError, ValueError) as ex:
logging.warning("NumberOfInvertersToQuery: %s", ex)
logging.warning("NumberOfInvertersToQuery not set, using default")
number_of_inverters = 0
try:
number_of_templates = int(config["DEFAULT"]["NumberOfTemplates"])
except (KeyError, ValueError) as ex:
logging.warning("NumberOfTemplates: %s", ex)
logging.warning("NumberOfTemplates not set, using default")
number_of_templates = 0
try:
dtuvariant = config["DEFAULT"]["DTU"]
except KeyError:
logging.critical("DTU key not found in configuration")
return
# endregion
# region Register the inverters
if dtuvariant != constants.DTUVARIANT_TEMPLATE:
logging.info("Registering dtu devices")
servicename = get_config_value(config, "Servicename", "INVERTER", 0, "com.victronenergy.pvinverter")
service = DbusService(
servicename=servicename,
actual_inverter=0,
)
services.append(service)
if number_of_inverters == 0:
# pylint: disable=W0621
number_of_inverters = service.get_number_of_inverters()
# If there are no inverters or templates, return an empty list
if number_of_inverters == 0 and number_of_templates == 0:
logging.critical("No inverters or templates to query")
return [] # Empty list
if number_of_inverters > 1:
# start our main-service if there are more than 1 inverter
for actual_inverter in range(number_of_inverters - 1):
servicename = get_config_value(
config,
"Servicename",
"INVERTER",
actual_inverter + 1,
"com.victronenergy.pvinverter"
)
services.append(DbusService(
servicename=servicename,
actual_inverter=actual_inverter + 1,
))
# endregion
# region Register the templates
for actual_template in range(number_of_templates):
logging.critical("Registering Templates")
servicename = get_config_value(
config,
"Servicename",
"TEMPLATE",
actual_template,
"com.victronenergy.pvinverter"
)
services.append(DbusService(
servicename=servicename,
actual_inverter=actual_template,
istemplate=True,
))
# endregion
return services
def sign_of_life_all_services(services):
"""
Sends a 'sign of life' signal to all services in the provided list.
Args:
services (list): A list of service objects. Each service object must have a 'sign_of_life' method.
Returns:
bool: Always returns True to keep the timeout active.
"""
for service in services:
service.sign_of_life()
return True
def update_all_services(services):
"""
Updates all services in the provided list.
Args:
services (list): A list of service objects.
Each service object must have an 'update' method and
a 'polling_interval' and a 'polling_last_polling' attribute.
Returns:
bool: Always returns True to keep the timeout active.
"""
if sys.version_info.major == 2:
current_time = gobject.get_current_time()
else:
current_time = gobject.get_real_time() // 1000
for service in services:
if current_time - service.last_polling >= service.polling_interval:
service.update()
service.last_polling = current_time
return True
def main():
""" Main function """
config = getConfig()
signofliveinterval = int(get_config_value(config, "SignOfLifeLog", "DEFAULT", "", 1))
logging.debug("SignOfLifeLog: %d", signofliveinterval)
# TODO: I think it is better to run the tests inside CI/CD pipeline instead of running it here
# tests.run_tests()
try:
logging.info("Start")
from dbus.mainloop.glib import DBusGMainLoop # pylint: disable=E0401,C0415
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
DBusGMainLoop(set_as_default=True)
services = get_DbusServices(config)
logging.info("Registered %d services", len(services))
# Use a single timeout to call sign_of_life for all services
gobject.timeout_add(signofliveinterval * 60 * 1000, sign_of_life_all_services, services)
# Use another timeout to update all services
gobject.timeout_add(1000, update_all_services, services)
logging.info("Connected to dbus, and switching over to gobject.MainLoop() (= event based)")
mainloop = gobject.MainLoop()
mainloop.run()
except Exception as error: # pylint: disable=W0718
logging.critical("Error at %s", "main", exc_info=error)
if __name__ == "__main__":
main()
| python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | false |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/imports.py | imports.py | # imports.py
""" Imports for the project """
# pylint: disable=w0611
# system imports:
import logging
import logging.handlers
import os
import configparser
import sys
# our imports:
import constants
import tests
from helpers import *
# Victron imports:
from dbus_service import DbusService
if sys.version_info.major == 2:
import gobject # pylint: disable=E0401
else:
from gi.repository import GLib as gobject # pylint: disable=E0401
| python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | false |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/helpers.py | helpers.py | '''Module containing various helper functions'''
# File specific rules
# pylint: disable=broad-except
# system imports
import functools
import time
import os
# our imports:
import logging
# region formatting helping functions (used in constant)
def _kwh(_p, value: float) -> str:
return f"{value:.2f}KWh"
def _a(_p, value: float) -> str:
return f"{value:.1f}A"
def _w(_p, value: float) -> str:
return f"{value:.1f}W"
def _v(_p, value: float) -> str:
return f"{value:.1f}V"
# endregion
def get_config_value(config, name, inverter_or_template, inverter_or_tpl_number, defaultvalue=None):
'''check if config value exist in current inverter/template's section, otherwise throw error'''
if name in config[f"{inverter_or_template}{inverter_or_tpl_number}"]:
return config[f"{inverter_or_template}{inverter_or_tpl_number}"][name]
if defaultvalue is None and inverter_or_template == "INVERTER":
raise ValueError(f"config entry '{name}' not found. "
f"(Hint: Deprecated Host ONPREMISE entries must be moved to DEFAULT section.)")
return defaultvalue
def get_default_config(config, name, defaultvalue):
'''check if config value exist in DEFAULT section, otherwise return defaultvalue'''
if name in config["DEFAULT"]:
return config["DEFAULT"][name]
return defaultvalue
def get_value_by_path(meter_data: dict, path):
'''Try to extract 'path' from nested array 'meter_data' (derived from json document) and return the found value'''
value = meter_data
for path_entry in path:
try:
value = value[path_entry]
except Exception:
try:
value = value[int(path_entry)]
except Exception:
value = 0
return value
def convert_to_expected_type(value: str, expected_type: [str, int, float, bool], # type: ignore
default: [None, str, int, float, bool]) -> [None, str, int, float, bool]: # type: ignore
''' Try to convert value to expected_type, otherwise return default'''
try:
conversion_functions = {
str: str,
int: int,
float: float,
bool: is_true
}
return conversion_functions[expected_type](value)
except (ValueError, TypeError, KeyError):
return default
def get_ahoy_field_by_name(meter_data, actual_inverter, fieldname, use_ch0_fld_names=True):
'''get the value by name instead of list index'''
# fetch value from record call:
# - but there seem to be more than one value per type and Inverter, and we don't know which one to take
# values = meter_data["record"]["inverter"][actual_inverter] # -> array of dicts
# for value_dict in values:
# if value_dict["fld"] == fieldname:
# val = value_dict["val"]
# print(f"returning fieldname {fieldname}: value {val}")
# return val
# raise ValueError(f"Fieldname {fieldname} not found in meter_data.")
data = None
# If "use_ch0_fld_names" is true, then the field names from the ch0_fld_names section in the JSON is used
# instead of the "fld_names" channel which includes DC-Parameter like "U_DC"
if use_ch0_fld_names:
data_field_names = meter_data["ch0_fld_names"]
data_index = data_field_names.index(fieldname)
ac_channel_index = 0
data = meter_data["inverter"][actual_inverter]["ch"][ac_channel_index][data_index]
else:
data_field_names = meter_data["fld_names"]
data_index = data_field_names.index(fieldname)
# TODO - check if this channel has to be adjusted
dc_channel_index = 1 # 1 = DC1, 2 = DC2 etc.
data = meter_data["inverter"][actual_inverter]["ch"][dc_channel_index][data_index]
return data
def is_true(val):
'''helper function to test for different true values'''
return val in (1, '1', True, "True", "TRUE", "true")
def timeit(func):
'''decorator to measure execution time of a function'''
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
elapsed_time = time.time() - start_time
logging.debug(f"function {func.__name__} finished in {round(elapsed_time * 1000)} ms")
return result
return wrapped_func
def read_version(file_name):
try:
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir, file_name)
with open(file_path, 'r') as file:
line = file.readline()
version = line.split(':')[-1].strip()
return version
except FileNotFoundError:
logging.error(f"File {file_name} not found in the current directory.")
return 0.1 | python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | false |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/tests.py | tests.py | '''(Unit) tests'''
# system imports:
import json
import logging
import os
import time
# our imports:
import constants
# Victron imports:
from dbus_service import DbusService
from helpers import get_value_by_path
OPENDTU_TEST_DATA_FILE = "docs/opendtu_status.json"
AHOY_TEST_DATA_FILE_LIVE = "docs/ahoy_0.5.93_live.json"
AHOY_TEST_DATA_FILE_RECORD = "docs/ahoy_0.5.93_record-live.json"
AHOY_TEST_DATA_FILE_IV_0 = "docs/ahoy_0.5.93_inverter-id-0.json"
TEMPLATE_TASMOTA_TEST_DATA_FILE = "docs/tasmota_shelly_2pm.json"
def test_opendtu_reachable(test_service):
'''Test if DTU is reachable'''
test_service.set_dtu_variant(constants.DTUVARIANT_OPENDTU)
test_data = load_json_file(OPENDTU_TEST_DATA_FILE)
test_service.set_test_data(test_data)
assert test_service.is_data_up2date() is False
test_data = load_json_file(OPENDTU_TEST_DATA_FILE, '"reachable": false', '"reachable":"1"')
test_service.set_test_data(test_data)
assert test_service.is_data_up2date() is True
test_data = load_json_file(OPENDTU_TEST_DATA_FILE, '"reachable": false', '"reachable":1')
test_service.set_test_data(test_data)
assert test_service.is_data_up2date() is True
test_data = load_json_file(OPENDTU_TEST_DATA_FILE, '"reachable": false', '"reachable":true')
test_service.set_test_data(test_data)
assert test_service.is_data_up2date() is True
test_data = load_json_file(OPENDTU_TEST_DATA_FILE, '"reachable": false', '"reachable":false')
test_service.set_test_data(test_data)
assert test_service.is_data_up2date() is False
def test_opendtu_producing(test_service):
'''test if the opendtu inverter is producing'''
test_service.set_dtu_variant(constants.DTUVARIANT_OPENDTU)
test_data = load_json_file(OPENDTU_TEST_DATA_FILE)
test_service.set_test_data(test_data)
# current, power are 0 because inverter is not producing
# (power, pvyield total, current, voltage)
assert test_service.get_values_for_inverter() == (0, 270.4660034, 0, 226.1999969, 0.699999988)
test_data = load_json_file(OPENDTU_TEST_DATA_FILE, '"producing": false', '"producing":"1"')
test_service.set_test_data(test_data)
# (power, pvyield total, current, voltage)
assert test_service.get_values_for_inverter() == (31.79999924, 270.4660034, 0.140000001, 226.1999969, 0.699999988)
def load_json_file(filename, find_str=None, replace_str=None):
'''Load json data from filename (relative to main file). If given, find_str is replaced by replace_str'''
with open(f"{(os.path.dirname(os.path.realpath(__file__)))}/{filename}", encoding="utf-8") as file:
json_str = file.read()
if find_str is not None:
json_str = json_str.replace(find_str, replace_str)
return json.loads(json_str)
def load_ahoy_test_data():
'''Load Test data for Ahoy'''
test_data = load_json_file(AHOY_TEST_DATA_FILE_LIVE)
# not needed: test_data["record"] = load_json_file(AHOY_TEST_DATA_FILE_RECORD)
test_data["inverter"] = []
test_data["inverter"].append(load_json_file(AHOY_TEST_DATA_FILE_IV_0))
return test_data
def load_template_tasmota_test_data():
'''Load Test data for Template for tasmota case'''
test_data = load_json_file(TEMPLATE_TASMOTA_TEST_DATA_FILE)
return test_data
def test_ahoy_values(test_service):
'''test with ahoy data'''
test_service.set_dtu_variant(constants.DTUVARIANT_AHOY)
test_data = load_ahoy_test_data()
test_service.set_test_data(test_data)
# (power, pvyield total, current, voltage)
assert test_service.get_values_for_inverter() == (223.7, 422.603, 0.98, 229.5, 33.3)
def test_ahoy_timestamp(test_service):
'''test the timestamps for ahoy'''
test_service.set_dtu_variant(constants.DTUVARIANT_AHOY)
test_data = load_ahoy_test_data()
test_service.set_test_data(test_data)
assert test_service.is_data_up2date() is False
test_data = load_ahoy_test_data()
test_data["inverter"][0]["ts_last_success"] = time.time() - 10
test_service.set_test_data(test_data)
assert test_service.is_data_up2date() is True
def test_ahoy_get_number_of_inverters(test_service):
'''test if get_number_of_inverters works correctly'''
test_service.set_dtu_variant(constants.DTUVARIANT_AHOY)
test_data = load_ahoy_test_data()
test_service.set_test_data(test_data)
assert test_service.get_number_of_inverters() == 3
def test_get_value_by_path():
test_meter_data = {
"a": 1,
"b": {
"c": 3,
"arr": ["x", "y"],
}
}
assert 1 == get_value_by_path(test_meter_data, ["a"])
assert 3 == get_value_by_path(test_meter_data, ["b", "c"])
assert "y" == get_value_by_path(test_meter_data, ["b", "arr", 1]) # not: ["b", "arr[1]"]
def test_template_values(test_service):
'''test with template test data for tasmota'''
test_service.set_dtu_variant(constants.DTUVARIANT_TEMPLATE)
test_service.custpower = "StatusSNS/ENERGY/Power/0".split("/")
test_service.custcurrent = "StatusSNS/ENERGY/Current/0".split("/")
test_service.custpower_default = 999
test_service.custcurrent_default = 999
test_service.custpower_factor = 2
test_service.custtotal_default = 99
test_service.custtotal_factor = 1
test_service.custvoltage = "StatusSNS/ENERGY/Voltage".split("/")
test_service.custvoltage_default = 99.9
test_service.custtotal = "StatusSNS/ENERGY/Today".split("/")
test_data = load_template_tasmota_test_data()
test_service.set_test_data(test_data)
logging.debug("starting test for test_template_values")
(power, pvyield, current, voltage, dc_voltage) = test_service.get_values_for_inverter()
print(power, pvyield, current, voltage, dc_voltage)
assert (power, pvyield, current, voltage, dc_voltage) == (320.0, 0.315, 0.734, 235, None)
def run_tests():
'''function to run tests'''
test_get_value_by_path()
test_service = DbusService(servicename="testing", actual_inverter=0)
test_opendtu_reachable(test_service)
test_opendtu_producing(test_service)
test_ahoy_values(test_service)
test_ahoy_timestamp(test_service)
test_template_values(test_service)
logging.debug("tests have passed")
| python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | false |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/dbus_service.py | dbus_service.py | '''DbusService and PvInverterRegistry'''
# File specific rules
# pylint: disable=broad-except, import-error, wrong-import-order, wrong-import-position
# region [Imports]
# system imports:
import configparser
import os
import platform
import sys
import logging
import time
import requests # for http GET
from requests.auth import HTTPDigestAuth
# our imports:
import constants
from helpers import *
# victron imports:
import dbus
sys.path.insert(
1,
os.path.join(
os.path.dirname(__file__),
"/opt/victronenergy/dbus-systemcalc-py/ext/velib_python",
),
)
from vedbus import VeDbusService # noqa - must be placed after the sys.path.insert
# endregion
class DbusServiceRegistry(type):
"""
Metaclass for registering and iterating over D-Bus services.
This metaclass maintains a registry of D-Bus services and provides an iterator
to iterate over the registered services.
Methods:
__iter__(cls): Returns an iterator over the registered D-Bus services.
"""
def __iter__(cls):
return iter(cls._registry)
class DbusService:
'''Main class to register PV Inverter in DBUS'''
__metaclass__ = DbusServiceRegistry
_registry = []
_meter_data = None
_test_meter_data = None
_servicename = None
def __init__(
self,
servicename,
actual_inverter,
istemplate=False,
):
if servicename == "testing":
self.max_age_ts = 600
self.pvinverternumber = actual_inverter
self.useyieldday = False
return
self._registry.append(self)
self._last_update = 0
self._servicename = servicename
self.last_update_successful = False
# Initiale own properties
self.esptype = None
self.meter_data = None
self.dtuvariant = None
# Initialize error handling properties
self.error_mode = None
self.retry_after_seconds = 0
self.min_retries_until_fail = 0
self.error_state_after_seconds = 0
self.failed_update_count = 0
self.reset_statuscode_on_next_success = False
if not istemplate:
self._read_config_dtu(actual_inverter)
self.numberofinverters = self.get_number_of_inverters()
else:
self._read_config_template(actual_inverter)
logging.info("%s /DeviceInstance = %d", servicename, self.deviceinstance)
# Allow for multiple Instance per process in DBUS
dbus_conn = (
dbus.SessionBus()
if "DBUS_SESSION_BUS_ADDRESS" in os.environ
else dbus.SystemBus(private=True)
)
self._dbusservice = VeDbusService(f"{servicename}.http_{self.deviceinstance}", bus=dbus_conn, register=False)
self._paths = constants.VICTRON_PATHS
# Create the management objects, as specified in the ccgx dbus-api document
self._dbusservice.add_path("/Mgmt/ProcessName", __file__)
self._dbusservice.add_path("/Mgmt/ProcessVersion",
"Unkown version, and running on Python " + platform.python_version())
self._dbusservice.add_path("/Mgmt/Connection", constants.CONNECTION)
# Create the mandatory objects
self._dbusservice.add_path("/DeviceInstance", self.deviceinstance)
self._dbusservice.add_path("/ProductId", 0xFFFF) # id assigned by Victron Support from SDM630v2.py
self._dbusservice.add_path("/ProductName", constants.PRODUCTNAME)
self._dbusservice.add_path("/CustomName", self._get_name())
logging.info(f"Name of Inverters found: {self._get_name()}")
self._dbusservice.add_path("/Connected", 1)
self._dbusservice.add_path("/Latency", None)
self._dbusservice.add_path("/FirmwareVersion", read_version('version.txt'))
self._dbusservice.add_path("/HardwareVersion", 0)
self._dbusservice.add_path("/Position", self.acposition) # normaly only needed for pvinverter
self._dbusservice.add_path("/Serial", self._get_serial(self.pvinverternumber))
self._dbusservice.add_path("/UpdateIndex", 0)
# set path StatusCode to 7=Running so VRM detects a working PV-Inverter
self._dbusservice.add_path("/StatusCode", constants.STATUSCODE_RUNNING)
# If the Servicname is an (AC-)Inverter, add the Mode path (to show it as ON)
# Also, we will set different paths and variables in the _update(self) method.
# for this device class. For more information about the paths and ServiceNames...
# @see: https://github.com/victronenergy/venus/wiki/dbus
if self._servicename == "com.victronenergy.inverter":
# Set Mode to 2 to show it as ON
# 2=On;4=Off;5=Eco
self._dbusservice.add_path("/Mode", 2)
# set the SystemState flaf to 9=Inverting
# /SystemState/State -> 0: Off
# -> 1: Low power
# -> 9: Inverting
self._dbusservice.add_path("/State", 9)
# add path values to dbus
for path, settings in self._paths.items():
self._dbusservice.add_path(
path,
settings["initial"],
gettextcallback=settings["textformat"],
writeable=True,
onchangecallback=self._handlechangedvalue,
)
self._dbusservice.register()
self.polling_interval = self._get_polling_interval()
self.last_polling = 0
@staticmethod
def get_ac_inverter_state(current):
'''return the state of the inverter based on the current value'''
try:
float_current = float(current)
except ValueError:
float_current = 0
if float_current > 0:
ac_inverter_state = 9 # = Inverting
else:
ac_inverter_state = 0 # = Off
return ac_inverter_state
@staticmethod
def _handlechangedvalue(path, value):
logging.debug("someone else updated %s to %s", path, value)
return True # accept the change
@staticmethod
def _get_config():
config = configparser.ConfigParser()
config.read(f"{(os.path.dirname(os.path.realpath(__file__)))}/config.ini")
return config
@staticmethod
def get_processed_meter_value(meter_data: dict, path_to_value, default_value: any, factor: int = 1) -> any:
'''return the processed meter value by applying the factor and return a default value due an Exception'''
raw_value = get_value_by_path(meter_data, path_to_value)
raw_value = convert_to_expected_type(raw_value, float, default_value)
if isinstance(raw_value, (float, int)):
value = float(raw_value * float(factor))
else:
value = default_value
return value
# read config file
def _read_config_dtu(self, actual_inverter):
config = self._get_config()
self.pvinverternumber = actual_inverter
self.dtuvariant = str(config["DEFAULT"]["DTU"])
if self.dtuvariant not in (constants.DTUVARIANT_OPENDTU, constants.DTUVARIANT_AHOY):
raise ValueError(f"Error in config.ini: DTU must be one of \
{constants.DTUVARIANT_OPENDTU}, \
{constants.DTUVARIANT_AHOY}")
self.deviceinstance = int(config[f"INVERTER{self.pvinverternumber}"]["DeviceInstance"])
self.acposition = int(get_config_value(config, "AcPosition", "INVERTER", self.pvinverternumber))
self.useyieldday = int(get_config_value(config, "useYieldDay", "DEFAULT", "", 0))
self.pvinverterphase = str(config[f"INVERTER{self.pvinverternumber}"]["Phase"])
self.host = get_config_value(config, "Host", "INVERTER", self.pvinverternumber)
self.username = get_config_value(config, "Username", "DEFAULT", "", self.pvinverternumber)
self.password = get_config_value(config, "Password", "DEFAULT", "", self.pvinverternumber)
self.digestauth = is_true(get_config_value(config, "DigestAuth", "INVERTER", self.pvinverternumber, False))
try:
self.max_age_ts = int(config["DEFAULT"]["MaxAgeTsLastSuccess"])
except (KeyError, ValueError) as ex:
logging.warning("MaxAgeTsLastSuccess: %s", ex)
logging.warning("MaxAgeTsLastSuccess not set, using default")
self.max_age_ts = 600
self.dry_run = is_true(get_default_config(config, "DryRun", False))
self.pollinginterval = int(get_config_value(config, "ESP8266PollingIntervall", "DEFAULT", "", 10000))
self.meter_data = 0
self.httptimeout = get_default_config(config, "HTTPTimeout", 2.5)
self._load_error_handling_config(config)
def _read_config_template(self, template_number):
config = self._get_config()
self.pvinverternumber = template_number
self.custpower = config[f"TEMPLATE{template_number}"]["CUST_Power"].split("/")
self.custpower_factor = config[f"TEMPLATE{template_number}"]["CUST_Power_Mult"]
self.custpower_default = get_config_value(config, "CUST_Power_Default", "TEMPLATE", template_number, None)
self.custtotal = config[f"TEMPLATE{template_number}"]["CUST_Total"].split("/")
self.custtotal_factor = config[f"TEMPLATE{template_number}"]["CUST_Total_Mult"]
self.custtotal_default = get_config_value(config, "CUST_Total_Default", "TEMPLATE", template_number, None)
self.custvoltage = config[f"TEMPLATE{template_number}"]["CUST_Voltage"].split("/")
self.custvoltage_default = get_config_value(config, "CUST_Voltage_Default", "TEMPLATE", template_number, None)
self.custapipath = config[f"TEMPLATE{template_number}"]["CUST_API_PATH"]
self.serial = str(config[f"TEMPLATE{template_number}"]["CUST_SN"])
self.pollinginterval = int(config[f"TEMPLATE{template_number}"]["CUST_POLLING"])
self.host = config[f"TEMPLATE{template_number}"]["Host"]
self.username = config[f"TEMPLATE{template_number}"]["Username"]
self.password = config[f"TEMPLATE{template_number}"]["Password"]
self.dtuvariant = constants.DTUVARIANT_TEMPLATE
self.deviceinstance = int(config[f"TEMPLATE{template_number}"]["DeviceInstance"])
self.customname = config[f"TEMPLATE{template_number}"]["Name"]
self.acposition = int(config[f"TEMPLATE{template_number}"]["AcPosition"])
self.useyieldday = int(get_config_value(config, "useYieldDay", "DEFAULT", "", 0))
self.pvinverterphase = str(config[f"TEMPLATE{template_number}"]["Phase"])
self.digestauth = is_true(get_config_value(config, "DigestAuth", "TEMPLATE", template_number, False))
try:
self.custcurrent = config[f"TEMPLATE{template_number}"]["CUST_Current"].split("/")
except Exception:
# set to undefined because get_nested will solve this to 0
self.custcurrent = "[undefined]"
logging.debug("CUST_Current not set")
self.custcurrent_default = get_config_value(config, "CUST_Current_Default", "TEMPLATE", template_number, None)
try:
self.custdcvoltage = config[f"TEMPLATE{template_number}"]["CUST_DCVoltage"].split("/")
except Exception:
# set to undefined because get_nested will solve this to 0
self.custdcvoltage = "[undefined]"
logging.debug("CUST_DCVoltage not set")
self.custdcvoltage_default = get_config_value(
config, "CUST_DCVoltage_Default", "TEMPLATE", template_number, None)
try:
self.max_age_ts = int(config["DEFAULT"]["MaxAgeTsLastSuccess"])
except (KeyError, ValueError) as ex:
logging.warning("MaxAgeTsLastSuccess: %s", ex)
logging.warning("MaxAgeTsLastSuccess not set, using default")
self.max_age_ts = 600
self.dry_run = is_true(get_default_config(config, "DryRun", False))
self.meter_data = 0
self.httptimeout = get_default_config(config, "HTTPTimeout", 2.5)
self._load_error_handling_config(config)
def _load_error_handling_config(self, config):
'''Loads error handling configuration values from the provided config object.'''
self.error_mode = get_default_config(config, "ErrorMode", constants.MODE_RETRYCOUNT).strip()
self.retry_after_seconds = int(get_default_config(config, "RetryAfterSeconds", 180))
self.min_retries_until_fail = int(get_default_config(config, "MinRetriesUntilFail", 3))
self.error_state_after_seconds = int(get_default_config(config, "ErrorStateAfterSeconds", 0))
# get the Serialnumber
def _get_serial(self, pvinverternumber):
meter_data = None
serial = None
if self.dtuvariant in (constants.DTUVARIANT_AHOY, constants.DTUVARIANT_OPENDTU):
meter_data = self._get_data()
if self.dtuvariant == constants.DTUVARIANT_AHOY:
if not meter_data["inverter"][pvinverternumber]["name"]:
raise ValueError("Response does not contain name")
serial = meter_data["inverter"][pvinverternumber]["serial"]
elif self.dtuvariant == constants.DTUVARIANT_OPENDTU:
if not meter_data["inverters"][pvinverternumber]["serial"]:
raise ValueError("Response does not contain serial attribute try name")
serial = meter_data["inverters"][pvinverternumber]["serial"]
elif self.dtuvariant == constants.DTUVARIANT_TEMPLATE:
serial = self.serial
return serial
def _get_name(self):
if self.dtuvariant in (constants.DTUVARIANT_OPENDTU, constants.DTUVARIANT_AHOY):
meter_data = self._get_data()
meter_data = None
if self.dtuvariant in (constants.DTUVARIANT_OPENDTU, constants.DTUVARIANT_AHOY):
meter_data = self._get_data()
if self.dtuvariant == constants.DTUVARIANT_AHOY:
name = meter_data["inverter"][self.pvinverternumber]["name"]
elif self.dtuvariant == constants.DTUVARIANT_OPENDTU:
name = meter_data["inverters"][self.pvinverternumber]["name"]
else:
name = self.customname
return name
def get_number_of_inverters(self):
'''return number of inverters in JSON response'''
meter_data = self._get_data()
if self.dtuvariant == constants.DTUVARIANT_AHOY:
numberofinverters = len(meter_data["inverter"])
else: # Assuming the only other option is constants.DTUVARIANT_OPENDTU
numberofinverters = len(meter_data["inverters"])
logging.info("Number of Inverters found: %s", numberofinverters)
return numberofinverters
def _get_dtu_variant(self):
return self.dtuvariant
def _get_polling_interval(self):
meter_data = self._get_data()
if self.dtuvariant == constants.DTUVARIANT_AHOY:
# Check for ESP8266 and limit polling
try:
self.esptype = meter_data["generic"]["esp_type"]
except Exception: # pylint: disable=broad-except
self.esptype = meter_data["system"]["esp_type"]
if self.esptype == "ESP8266":
polling_interval = self.pollinginterval
logging.info(f"ESP8266 detected, polling interval {polling_interval/1000} Sek.")
else:
polling_interval = 5000
elif self.dtuvariant == constants.DTUVARIANT_OPENDTU:
polling_interval = 5000
elif self.dtuvariant == constants.DTUVARIANT_TEMPLATE:
polling_interval = self.pollinginterval
return polling_interval
def _get_status_url(self):
url = None
if self.dtuvariant == constants.DTUVARIANT_OPENDTU:
url = self.get_opendtu_base_url() + "/livedata/status"
elif self.dtuvariant == constants.DTUVARIANT_AHOY:
url = self.get_ahoy_base_url() + "/live"
elif self.dtuvariant == constants.DTUVARIANT_TEMPLATE:
url = self.get_template_base_url()
else:
logging.error('no dtuvariant set')
return url
def get_opendtu_base_url(self):
'''Get API base URL for all OpenDTU calls'''
return f"http://{self.host}/api"
def get_ahoy_base_url(self):
'''Get API base URL for all Ahoy calls'''
return f"http://{self.host}/api"
def get_template_base_url(self):
'''Get API base URL for all Template calls'''
return f"http://{self.host}/{self.custapipath}"
def _refresh_data(self):
'''Fetch new data from the DTU API and store in locally if successful.'''
if self.pvinverternumber != 0 and self.dtuvariant != constants.DTUVARIANT_TEMPLATE:
# only fetch new data when called for inverter 0
# (background: data is kept at class level for all inverters)
return
url = self._get_status_url()
meter_data = self.fetch_url(url)
if self.dtuvariant == constants.DTUVARIANT_OPENDTU:
self.check_opendtu_data(meter_data)
if self.dtuvariant == constants.DTUVARIANT_AHOY:
self.check_and_enrich_ahoy_data(meter_data)
self.store_for_later_use(meter_data)
def store_for_later_use(self, meter_data):
'''Store meter data for later use in other methods'''
if self.dtuvariant == constants.DTUVARIANT_TEMPLATE:
self.meter_data = meter_data
else:
DbusService._meter_data = meter_data
def check_and_enrich_ahoy_data(self, meter_data):
''' Check if Ahoy data is valid and enrich it with additional data'''
if not "iv" in meter_data:
raise ValueError("You do not have the latest Ahoy Version to run this script,"
"please upgrade your Ahoy to at least version 0.5.93")
# Check for Attribute (inverter)
if (self._servicename == "com.victronenergy.inverter" and
not "fld_names" in meter_data):
raise ValueError("Response from ahoy does not contain fld_names in data")
# Check for an additonal Attribute
if not "ch0_fld_names" in meter_data:
raise ValueError("Response from ahoy does not contain ch0_fld_names data")
# not needed: meter_data["record"] = self.fetch_ahoy_record_data()
# add the field "inverter" to meter_data:
# This will contain an array of the "iv" data from all inverters.
meter_data["inverter"] = []
for inverter_number in range(len(meter_data["iv"])):
if is_true(meter_data["iv"][inverter_number]):
iv_data = self.fetch_ahoy_iv_data(inverter_number)
while len(meter_data["inverter"]) < inverter_number:
# there was a gap in the sequence of inverter numbers -> fill in a dummy value
meter_data["inverter"].append({})
meter_data["inverter"].append(iv_data)
def check_opendtu_data(self, meter_data):
''' Check if OpenDTU data has the right format'''
# Check for OpenDTU Version
if not "serial" in meter_data["inverters"][self.pvinverternumber]:
raise ValueError("You do not have the latest OpenDTU Version to run this script,"
"please upgrade your OpenDTU to at least version 4.4.3")
def fetch_opendtu_iv_data(self, inverter_serial):
'''Fetch inverter data from OpenDTU device for one inverter'''
iv_url = self._get_status_url() + "?inv=" + inverter_serial
logging.debug(f"Inverter URL: {iv_url}")
return self.fetch_url(iv_url)
def fetch_ahoy_iv_data(self, inverter_number):
'''Fetch inverter data from Ahoy device for one inverter'''
iv_url = self.get_ahoy_base_url() + "/inverter/id/" + str(inverter_number)
logging.debug(f"Inverter URL: {iv_url}")
return self.fetch_url(iv_url)
def fetch_ahoy_record_data(self):
'''Fetch record data from Ahoy device'''
record_live_url = self.get_ahoy_base_url() + "/record/live"
return self.fetch_url(record_live_url)
@timeit
def fetch_url(self, url, try_number=1):
'''Fetch JSON data from url. Throw an exception on any error. Only return on success.'''
try:
logging.debug(f"calling {url} with timeout={self.httptimeout}")
if self.digestauth:
logging.debug("using Digest access authentication...")
json_str = requests.get(url=url, auth=HTTPDigestAuth(
self.username, self.password), timeout=float(self.httptimeout))
elif self.username and self.password:
logging.debug("using Basic access authentication...")
json_str = requests.get(url=url, auth=(
self.username, self.password), timeout=float(self.httptimeout))
else:
json_str = requests.get(
url=url, timeout=float(self.httptimeout))
json_str.raise_for_status() # raise exception on bad status code
# check for response
if not json_str:
logging.info("No Response from DTU")
raise ConnectionError("No response from DTU - ", self.host)
json = None
try:
json = json_str.json()
except json.decoder.JSONDecodeError as error:
logging.debug(f"JSONDecodeError: {str(error)}")
# check for Json
if not json:
# will be logged when catched
raise ValueError(f"Converting response from {url} to JSON failed: "
f"status={json_str.status_code},\nresponse={json_str.text}")
return json
except Exception:
# retry same call up to 3 times
if try_number < 3: # pylint: disable=no-else-return
time.sleep(0.5)
return self.fetch_url(url, try_number + 1)
else:
raise
def _get_data(self) -> dict:
if self._test_meter_data:
return self._test_meter_data
if not DbusService._meter_data:
self._refresh_data()
if self.dtuvariant == constants.DTUVARIANT_TEMPLATE:
return self.meter_data
return DbusService._meter_data
def set_test_data(self, test_data):
'''Set Test Data to run test'''
self._test_meter_data = test_data
def set_dtu_variant(self, dtuvariant):
'''set DTU variant'''
self.dtuvariant = dtuvariant
def is_data_up2date(self):
'''check if data is up to date with timestamp and producing inverter'''
if self.max_age_ts < 0:
# check is disabled by config
return True
meter_data = self._get_data()
if self.dtuvariant == constants.DTUVARIANT_AHOY:
ts_last_success = self.get_ts_last_success(meter_data)
age_seconds = time.time() - ts_last_success
logging.debug("is_data_up2date: inverter #%d: age_seconds=%d, max_age_ts=%d",
self.pvinverternumber, age_seconds, self.max_age_ts)
return 0 <= age_seconds < self.max_age_ts
if self.dtuvariant == constants.DTUVARIANT_OPENDTU:
return is_true(meter_data["inverters"][self.pvinverternumber]["reachable"])
return True
def get_ts_last_success(self, meter_data):
'''return ts_last_success from the meter_data structure - depending on the API version'''
return meter_data["inverter"][self.pvinverternumber]["ts_last_success"]
def sign_of_life(self):
"""
Logs the last update time and the AC power value of the inverter.
This method logs a debug message with the last update time of the inverter
and an info message with the AC power value of the inverter.
Returns:
bool: Always returns True.
"""
logging.debug("Last inverter #%d _update() call: %s", self.pvinverternumber, self._last_update)
logging.info("[%s] Last inverter #%d '/Ac/Power': %s", self._servicename,
self.pvinverternumber, self._dbusservice["/Ac/Power"])
return True
def _refresh_and_update(self):
"""
Helper method to refresh data, handle data update if up-to-date, update index, and set successful flag.
"""
self._refresh_data()
if self.is_data_up2date():
self._handle_data_update()
self._update_index()
return True
def update(self):
"""
Updates inverter data from the DTU (Data Transfer Unit) and sets DBus values if the data is up-to-date.
Main logic:
- In timeout mode: Always attempt reconnect every RetryAfterSeconds. Only set zero values after ErrorStateAfterSeconds has elapsed since last success.
- In retrycount mode: After min_retries_until_fail failures, wait RetryAfterSeconds before next attempt and set zero values immediately.
- Always updates the DBus update index after a refresh.
- Tracks success/failure state and manages reconnect timing.
Exception handling:
- Catches and logs HTTP, value, and general exceptions during update.
- Ensures update state is finalized regardless of outcome.
Returns:
None
"""
logging.debug("_update")
successful = False
now = time.time()
try:
if self.error_mode == constants.MODE_TIMEOUT and self.error_state_after_seconds > 0:
# Set zero values only after ErrorStateAfterSeconds has elapsed since last success
if (not self.last_update_successful and (now - self._last_update) >= self.error_state_after_seconds):
self._handle_reconnect_wait()
# Always allow a reconnect attempt every RetryAfterSeconds
if (now - self._last_update) >= self.retry_after_seconds:
successful = self._refresh_and_update()
# In normal operation (no error), always call _refresh_data on every update
if self.last_update_successful:
successful = self._refresh_and_update()
elif self.error_mode == constants.MODE_RETRYCOUNT:
# Classic retry-count-based error handling
if self.failed_update_count >= self.min_retries_until_fail:
self._handle_reconnect_wait()
# Determine if we should refresh data based on current state and timing
is_last_update_successful = self.last_update_successful
time_since_last_update = now - self._last_update
is_retry_interval_elapsed = time_since_last_update >= self.retry_after_seconds
is_below_min_retries = self.failed_update_count < self.min_retries_until_fail
should_refresh_data = (
is_last_update_successful or
is_retry_interval_elapsed or
is_below_min_retries
)
if should_refresh_data:
successful = self._refresh_and_update()
except requests.exceptions.RequestException as exception:
logging.warning(f"HTTP Error at _update for inverter "
f"{self.pvinverternumber} ({self._get_name()}): {str(exception)}")
except ValueError as error:
logging.warning(f"Error at _update for inverter "
f"{self.pvinverternumber} ({self._get_name()}): {str(error)}")
except Exception as error: # pylint: disable=broad-except
logging.warning(f"Error at _update for inverter "
f"{self.pvinverternumber} ({self._get_name()})", exc_info=error)
finally:
self._finalize_update(successful)
def _handle_reconnect_wait(self):
if not self.reset_statuscode_on_next_success:
self.set_dbus_values_to_zero()
self.reset_statuscode_on_next_success = True
def _should_refresh_data(self, now):
return (
self.last_update_successful or
(now - self._last_update) >= self.retry_after_seconds or
self.failed_update_count < self.min_retries_until_fail
)
def _handle_data_update(self):
if self.dry_run:
logging.info("DRY RUN. No data is sent!!")
else:
self.set_dbus_values()
def _finalize_update(self, successful):
if successful:
if self.reset_statuscode_on_next_success:
self._dbusservice["/StatusCode"] = constants.STATUSCODE_RUNNING
if not self.last_update_successful:
logging.warning(
f"Recovered inverter {self.pvinverternumber} ({self._get_name()}): "
f"Successfully fetched data now: "
f"{'NOT (yet?)' if not self.is_data_up2date() else 'Is'} up-to-date"
)
self.last_update_successful = True
self.failed_update_count = 0
self.reset_statuscode_on_next_success = False
else:
self.last_update_successful = False
self.failed_update_count += 1
def _update_index(self):
if self.dry_run:
return
# increment UpdateIndex - to show that new data is available
index = self._dbusservice["/UpdateIndex"] + 1 # increment index
if index > 255: # maximum value of the index
index = 0 # overflow from 255 to 0
self._dbusservice["/UpdateIndex"] = index
self._last_update = time.time()
def get_values_for_inverter(self):
'''read data and return (power, pvyield, current, voltage, dc-voltage)'''
meter_data = self._get_data()
(power, pvyield, current, voltage, dc_voltage) = (None, None, None, None, None)
if self.dtuvariant == constants.DTUVARIANT_AHOY:
power = get_ahoy_field_by_name(meter_data, self.pvinverternumber, "P_AC")
if self.useyieldday:
pvyield = get_ahoy_field_by_name(meter_data, self.pvinverternumber, "YieldDay") / 1000
else:
pvyield = get_ahoy_field_by_name(meter_data, self.pvinverternumber, "YieldTotal")
voltage = get_ahoy_field_by_name(meter_data, self.pvinverternumber, "U_AC")
dc_voltage = get_ahoy_field_by_name(meter_data, self.pvinverternumber, "U_DC", False)
current = get_ahoy_field_by_name(meter_data, self.pvinverternumber, "I_AC")
elif self.dtuvariant == constants.DTUVARIANT_OPENDTU:
# OpenDTU v24.2.12 breaking API changes 2024-02-19
if "AC" in meter_data["inverters"][self.pvinverternumber]:
root_meter_data = meter_data["inverters"][self.pvinverternumber]
firmware_v24_2_12_or_newer = True
else:
inverter_serial = meter_data["inverters"][self.pvinverternumber]["serial"]
logging.debug(f"Inverter #{self.pvinverternumber} Serial: {inverter_serial}")
root_meter_data = self.fetch_opendtu_iv_data(inverter_serial)["inverters"][0]
logging.debug(f"{root_meter_data}")
firmware_v24_2_12_or_newer = False
producing = is_true(root_meter_data["producing"])
power = (root_meter_data["AC"]["0"]["Power"]["v"]
if producing
else 0)
field_inv = "AC" if firmware_v24_2_12_or_newer else "INV"
if self.useyieldday:
pvyield = root_meter_data[field_inv]["0"]["YieldDay"]["v"] / 1000
else:
pvyield = root_meter_data[field_inv]["0"]["YieldTotal"]["v"]
voltage = root_meter_data["AC"]["0"]["Voltage"]["v"]
dc_voltage = root_meter_data["DC"]["0"]["Voltage"]["v"]
current = (root_meter_data["AC"]["0"]["Current"]["v"]
if producing
else 0)
elif self.dtuvariant == constants.DTUVARIANT_TEMPLATE:
power = self.get_processed_meter_value(
meter_data, self.custpower, self.custpower_default, self.custpower_factor)
pvyield = self.get_processed_meter_value(
meter_data, self.custtotal, self.custtotal_default, self.custtotal_factor)
voltage = self.get_processed_meter_value(meter_data, self.custvoltage, self.custvoltage_default)
| python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | true |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/tests/test_dbus_service.py | tests/test_dbus_service.py | ''' This file contains the unit tests for the DbusService class. '''
import time
import unittest
from unittest.mock import MagicMock, patch
import os
import json
import requests
from constants import MODE_TIMEOUT
from dbus_service import DbusService
def mocked_requests_get(url, params=None, **kwargs): # pylint: disable=unused-argument
"""
Mock function to simulate `requests.get` behavior for specific URLs.
Args:
url (str): The URL to send the GET request to.
params (dict, optional): Dictionary of URL parameters to append to the URL.
**kwargs: Additional arguments passed to the request.
Returns:
MockResponse: A mock response object with predefined JSON data and status code.
Raises:
requests.exceptions.HTTPError: If the status code of the response is not 200.
Mocked URLs and their corresponding JSON files:
- 'http://localhost/api/live': Returns data from 'ahoy_0.5.93_live.json'.
- 'http://localhost/api/inverter/id/0': Returns data from 'ahoy_0.5.93_inverter-id-0.json'.
- 'http://localhost/api/inverter/id/1': Returns data from 'ahoy_0.5.93_inverter-id-1.json'.
- 'http://localhost/cm?cmnd=STATUS+8': Returns data from 'tasmota_shelly_2pm.json'.
- Any other URL: Returns a 404 status code.
"""
class MockResponse:
"""
MockResponse is a mock class to simulate HTTP responses for testing purposes.
Attributes:
json_data (dict): The JSON data to be returned by the mock response.
status_code (int): The HTTP status code of the mock response.
Methods:
json(): Returns the JSON data of the mock response.
raise_for_status(): Raises an HTTPError if the status code is not 200.
"""
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
"""
Returns the JSON data.
Returns:
dict: The JSON data.
"""
return self.json_data
def raise_for_status(self):
"""
Raises an HTTPError if the HTTP request returned an unsuccessful status code.
This method checks the status code of the HTTP response. If the status code is not 200,
it raises an HTTPError with a message containing the status code.
Raises:
requests.exceptions.HTTPError: If the status code is not 200.
"""
if self.status_code != 200:
raise requests.exceptions.HTTPError(f"{self.status_code} Error")
print("Mock URL: ", url)
if url == 'http://localhost/api/live':
json_file_path = os.path.join(os.path.dirname(__file__), '../docs/ahoy_0.5.93_live.json')
with open(json_file_path, 'r', encoding="UTF-8") as file:
json_data = json.load(file)
return MockResponse(json_data, 200)
elif url == 'http://localhost/api/inverter/id/0':
json_file_path = os.path.join(os.path.dirname(__file__), '../docs/ahoy_0.5.93_inverter-id-0.json')
with open(json_file_path, 'r', encoding="UTF-8") as file:
json_data = json.load(file)
return MockResponse(json_data, 200)
elif url == 'http://localhost/api/inverter/id/1':
json_file_path = os.path.join(os.path.dirname(__file__), '../docs/ahoy_0.5.93_inverter-id-1.json')
with open(json_file_path, 'r', encoding="UTF-8") as file:
json_data = json.load(file)
return MockResponse(json_data, 200)
elif url == 'http://localhost/cm?cmnd=STATUS+8':
json_file_path = os.path.join(os.path.dirname(__file__), '../docs/tasmota_shelly_2pm.json')
with open(json_file_path, 'r', encoding="UTF-8") as file:
json_data = json.load(file)
return MockResponse(json_data, 200)
elif url == 'http://localhost/api/livedata/status':
json_file_path = os.path.join(os.path.dirname(__file__), '../docs/opendtu_v24.2.12_livedata_status.json')
with open(json_file_path, 'r', encoding="UTF-8") as file:
json_data = json.load(file)
return MockResponse(json_data, 200)
return MockResponse(None, 404)
class TestDbusService(unittest.TestCase):
""" Test the DbusService class """
@patch('dbus_service.dbus')
def test_init_testing(self, _mock_dbus):
""" Test the initialization of the DbusService class """
servicename = "Nuclear_plant"
actual_inverter = -1
istemplate = False
with self.assertRaises(KeyError):
DbusService(servicename, actual_inverter, istemplate)
myconfig = {
"DEFAULT": {
"DTU": "ahoy",
},
"INVERTER0": {
"Phase": "L1",
"DeviceInstance": "34",
"AcPosition": "1",
"Host": "localhost",
}
}
@patch('dbus_service.DbusService._get_config', return_value=myconfig)
@patch('dbus_service.dbus')
@patch('dbus_service.logging')
@patch('dbus_service.requests.get', side_effect=mocked_requests_get)
def test_init_non_template(self, mock__get_config, mock_dbus, mock_logging, mock_get):
""" Test fetch_url with custom responses for different URLs """
DbusService._meter_data = None
servicename = "com.victronenergy.pvinverter"
actual_inverter = 0
istemplate = False
# Initialize the DbusService
# with self.assertRaises(ValueError):
service = DbusService(servicename, actual_inverter, istemplate)
# Assertions to verify the behavior
self.assertEqual(service.dtuvariant, "ahoy")
config_for_test_if_number_of_inverters_are_set = {
"DEFAULT": {
"DTU": "ahoy",
},
"INVERTER0": {
"Phase": "L1",
"DeviceInstance": "34",
"AcPosition": "1",
"Host": "localhost",
},
}
@patch('dbus_service.DbusService._get_config', return_value=config_for_test_if_number_of_inverters_are_set)
@patch('dbus_service.dbus')
@patch('dbus_service.logging')
@patch('dbus_service.requests.get', side_effect=mocked_requests_get)
def test_if_number_of_inverters_are_set(self, mock__get_config, mock_dbus, mock_logging, mock_get):
""" Test fetch_url with custom responses for different URLs """
servicename = "com.victronenergy.pvinverter"
actual_inverter = 0
istemplate = False
service = DbusService(servicename, actual_inverter, istemplate)
self.assertEqual(service.dtuvariant, "ahoy")
self.assertEqual(service.get_number_of_inverters(), 2)
config_for_test_if_number_of_inverters_are_set_opendtu = {
"DEFAULT": {
"DTU": "opendtu",
},
"INVERTER0": {
"Phase": "L1",
"DeviceInstance": "34",
"AcPosition": "1",
"Host": "localhost",
},
}
@patch('dbus_service.DbusService._get_config', return_value=config_for_test_if_number_of_inverters_are_set_opendtu)
@patch('dbus_service.dbus')
@patch('dbus_service.logging')
@patch('dbus_service.requests.get', side_effect=mocked_requests_get)
def test_if_number_of_inverters_are_set_opendtu(self, mock__get_config, mock_dbus, mock_logging, mock_get):
""" Test fetch_url with custom responses for different URLs """
DbusService._meter_data = None
servicename = "com.victronenergy.pvinverter"
actual_inverter = 0
istemplate = False
service = DbusService(servicename, actual_inverter, istemplate)
self.assertEqual(service.dtuvariant, "opendtu")
self.assertEqual(service.get_number_of_inverters(), 2)
template_config = {
"DEFAULT": {
"DTU": "ahoy",
},
"TEMPLATE0": {
"Username": "",
"Password": "",
"DigestAuth": "False",
"Host": "localhost",
"CUST_SN": "12345678",
"CUST_API_PATH": "cm?cmnd=STATUS+8",
"CUST_POLLING": "2000",
"CUST_Total": "StatusSNS/ENERGY/Total",
"CUST_Total_Mult": "1",
"CUST_Power": "StatusSNS/ENERGY/Power",
"CUST_Power_Mult": "1",
"CUST_Voltage": "StatusSNS/ENERGY/Voltage",
"CUST_Current": "StatusSNS/ENERGY/Current",
"Phase": "L1",
"DeviceInstance": "47",
"AcPosition": "1",
"Name": "Tasmota",
"Servicename": "com.victronenergy.grid"
}
}
@patch('dbus_service.DbusService._get_config', return_value=template_config)
@patch('dbus_service.dbus')
@patch('dbus_service.logging')
@patch('dbus_service.requests.get', side_effect=mocked_requests_get)
def test_init_template(self, mock__get_config, mock_dbus, mock_logging, mock_get):
# Test the initialization with template servicename
servicename = "com.victronenergy.inverter"
actual_inverter = 0
istemplate = True
service = DbusService(servicename, actual_inverter, istemplate)
self.assertEqual(service._servicename, servicename)
self.assertEqual(service.pvinverternumber, actual_inverter)
self.assertFalse(service.last_update_successful)
self.assertIsNotNone(service._dbusservice)
class ReconnectLogicTest(unittest.TestCase):
def setUp(self):
# Set up all required patches and a default DbusService instance for each test
self.patcher_config = patch('dbus_service.DbusService._get_config', return_value={
"DEFAULT": {"DTU": "ahoy", "RetryAfterSeconds": "10"},
"INVERTER0": {"Phase": "L1", "DeviceInstance": "34", "AcPosition": "1", "Host": "localhost"},
})
self.patcher_dbus = patch('dbus_service.dbus')
self.patcher_logging = patch('dbus_service.logging')
self.patcher_requests = patch('dbus_service.requests.get', side_effect=mocked_requests_get)
self.mock_config = self.patcher_config.start()
self.mock_dbus = self.patcher_dbus.start()
self.mock_logging = self.patcher_logging.start()
self.mock_requests = self.patcher_requests.start()
self.addCleanup(self.patcher_config.stop)
self.addCleanup(self.patcher_dbus.stop)
self.addCleanup(self.patcher_logging.stop)
self.addCleanup(self.patcher_requests.stop)
self.service = DbusService("com.victronenergy.pvinverter", 0)
self.service._refresh_data = MagicMock()
self.service.is_data_up2date = MagicMock(return_value=False)
self.service.set_dbus_values = MagicMock()
self.service._update_index = MagicMock()
self.service.dry_run = True
self.service.retry_after_seconds = 300 # seconds
self.service._last_update = time.time() - 100
# Simulate a dbusservice dict for status and value tests
self.service._dbusservice = {k: 1 for k in [
'/StatusCode', '/Ac/Out/L1/V', '/Ac/Out/L1/I', '/Ac/Out/L1/P', '/Dc/0/Voltage', '/Ac/Power',
'/Ac/L1/Current', '/Ac/L1/Energy/Forward', '/Ac/L1/Power', '/Ac/L1/Voltage']}
def test_failed_update_count_increments(self):
"""Test that failed_update_count increases after consecutive failed updates (exceptions)."""
self.service._refresh_data.side_effect = requests.exceptions.RequestException("Test exception")
for _ in range(3):
self.service.last_update_successful = False
self.service.update()
self.assertEqual(self.service.failed_update_count, 3)
self.service._refresh_data.side_effect = None
def test_reconnect_pause_after_3_failures(self):
"""Test that after 3 failures, update() does not call _refresh_data if reconnectAfter time is not over."""
self.service.failed_update_count = 3
self.service.last_update_successful = False
self.service._last_update = time.time() - (4 * 60) # less than reconnectAfter
self.service._refresh_data.reset_mock()
self.service.update()
self.service._refresh_data.assert_not_called()
def test_update_allowed_after_reconnect_pause(self):
"""Test that after 3 failures, update() calls _refresh_data if reconnectAfter time is over."""
self.service.failed_update_count = 3
self.service.last_update_successful = False
self.service._last_update = time.time() - 10 * 60 # more than reconnectAfter
self.service._refresh_data.reset_mock()
self.service.update()
self.service._refresh_data.assert_called_once()
def test_failed_update_count_reset_on_success(self):
"""Test that failed_update_count is reset to 0 after a successful update."""
self.service.failed_update_count = 3
self.service.last_update_successful = True
self.service._last_update = time.time() - 10 * 60
self.service._refresh_data = MagicMock()
self.service.update()
self.assertEqual(self.service.failed_update_count, 0)
def test_reconnect_pause_not_applied_before_3_failures(self):
"""Test that reconnect pause is not applied if failed_update_count < 3 (should update as normal)."""
self.service.failed_update_count = 2
self.service.last_update_successful = False
self.service._last_update = time.time()
self.service._refresh_data.reset_mock()
self.service.update()
self.service._refresh_data.assert_called_once()
def test_statuscode_set_on_reconnect_and_reset(self):
"""Test that on first reconnect error, StatusCode and values are set to error/zero, and on recovery StatusCode is set back to 7."""
# Simulate error state
self.service.failed_update_count = 3
self.service._last_update = time.time()
self.service.retry_after_seconds = 60
self.service.reset_statuscode_on_next_success = False
self.service.update()
self.assertEqual(self.service._dbusservice['/StatusCode'], 10)
self.assertEqual(self.service._dbusservice['/Ac/Power'], 0)
self.assertEqual(self.service._dbusservice['/Ac/L1/Current'], 0)
self.assertEqual(self.service._dbusservice['/Ac/L1/Power'], 0)
self.assertEqual(self.service._dbusservice['/Ac/L1/Voltage'], 0)
self.assertTrue(self.service.reset_statuscode_on_next_success)
# Simulate recovery
self.service.failed_update_count = 0
self.service.reset_statuscode_on_next_success = True
self.service._refresh_data = MagicMock()
self.service.is_data_up2date = MagicMock(return_value=True)
self.service.dry_run = True
self.service.set_dbus_values = MagicMock()
self.service._update_index = MagicMock()
self.service.last_update_successful = False
self.service.update()
self.assertEqual(self.service._dbusservice['/StatusCode'], 7)
self.assertFalse(self.service.reset_statuscode_on_next_success)
def test_timeout_mode_no_zero_before_timeout(self):
"""If ErrorMode=timeout and error_state_after_seconds=600, before 10min no zero/StatusCode=10 is sent."""
self.service.error_mode = MODE_TIMEOUT
self.service.error_state_after_seconds = 600 # 10 minutes
self.service.last_update_successful = False
self.service._last_update = time.time() - 300 # 5 minutes ago
self.service.reset_statuscode_on_next_success = False
self.service.set_dbus_values_to_zero = MagicMock()
self.service.update()
# Should NOT set zero values yet
self.service.set_dbus_values_to_zero.assert_not_called()
self.assertNotEqual(self.service._dbusservice['/StatusCode'], 10)
def test_timeout_mode_zero_after_timeout(self):
"""If ErrorMode=timeout and error_state_after_seconds=600, after 10min zero/StatusCode=10 is sent."""
self.service.error_mode = MODE_TIMEOUT
self.service.error_state_after_seconds = 600 # 10 minutes
self.service.last_update_successful = False
self.service._last_update = time.time() - 601 # just over 10 minutes ago
self.service.reset_statuscode_on_next_success = False
self.service._refresh_data = MagicMock(side_effect=Exception("Test exception for error handling"))
self.service.set_dbus_values_to_zero = MagicMock(wraps=self.service.set_dbus_values_to_zero)
self.service.update()
# Should set zero values now
self.service.set_dbus_values_to_zero.assert_called_once()
self.assertEqual(self.service._dbusservice['/StatusCode'], 10)
def test_timeout_mode_timer_resets_on_success(self):
"""If in timeout mode a successful update occurs in between, the timer is reset and no zero values are sent."""
self.service.error_mode = MODE_TIMEOUT
self.service.error_state_after_seconds = 600 # 10 Minuten
self.service.last_update_successful = False
self.service._last_update = time.time() - 601 # Über Timeout, würde Nullwerte senden
self.service.reset_statuscode_on_next_success = False
self.service._refresh_data.side_effect = requests.exceptions.RequestException("Test exception")
self.service.update()
# reset refresh_data to simulate a successful update
self.service._refresh_data = MagicMock()
self.service.update()
self.assertNotEqual(self.service._dbusservice['/StatusCode'], 10)
def test_normal_operation_successful_update(self):
"""Test that in normal operation, update calls all expected methods and resets error state."""
self.service.failed_update_count = 0
self.service.last_update_successful = True
self.service._last_update = time.time()
self.service.dry_run = False
self.service.is_data_up2date = MagicMock(return_value=True)
self.service.update()
self.service._refresh_data.assert_called_once()
self.service.is_data_up2date.assert_called_once()
self.service.set_dbus_values.assert_called_once()
self.service._update_index.assert_called_once()
self.assertEqual(self.service.failed_update_count, 0)
self.assertTrue(self.service.last_update_successful)
def test_normal_operation_successful_update_timeout_mode(self):
"""Test that in timeout mode, normal operation calls all expected methods and resets error state."""
self.service.error_mode = MODE_TIMEOUT
self.service.error_state_after_seconds = 600 # 10 minutes
self.service.failed_update_count = 0
self.service.last_update_successful = True
self.service._last_update = time.time()
self.service.dry_run = False
self.service.is_data_up2date = MagicMock(return_value=True)
self.service._refresh_data = MagicMock()
self.service.set_dbus_values = MagicMock()
self.service._update_index = MagicMock()
self.service.update()
self.service._refresh_data.assert_called_once()
self.service.is_data_up2date.assert_called_once()
self.service.set_dbus_values.assert_called_once()
self.service._update_index.assert_called_once()
self.assertEqual(self.service.failed_update_count, 0)
self.assertTrue(self.service.last_update_successful)
def test_config_values_are_read_correctly(self):
"""Test that config values are read and mapped to class attributes correctly."""
config = {
"DEFAULT": {
"DTU": "ahoy",
"ErrorMode": "timeout",
"RetryAfterSeconds": "123",
"MinRetriesUntilFail": "7",
"ErrorStateAfterSeconds": "456"
},
"INVERTER0": {
"Phase": "L1",
"DeviceInstance": "34",
"AcPosition": "1",
"Host": "localhost",
},
}
with patch('dbus_service.DbusService._get_config', return_value=config):
service = DbusService("com.victronenergy.pvinverter", 0)
self.assertEqual(service.error_mode, "timeout")
self.assertEqual(service.retry_after_seconds, 123)
self.assertEqual(service.min_retries_until_fail, 7)
self.assertEqual(service.error_state_after_seconds, 456)
if __name__ == '__main__':
unittest.main()
| python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | false |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/tests/__init__.py | tests/__init__.py | python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | false | |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/tests/test_dbus_opendtu.py | tests/test_dbus_opendtu.py | """ Unit tests for the dbus_opendtu.py module """
import unittest
from unittest.mock import patch, MagicMock, mock_open, ANY
import sys
import os
import configparser
# Add the parent directory of dbus_opendtu to the system path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# Mocking the dbus and other dependencies before importing the module to test
sys.modules['dbus'] = MagicMock()
sys.modules['vedbus'] = MagicMock()
# Mock the gi.repository.GLib module
sys.modules['gi'] = MagicMock()
sys.modules['gi.repository'] = MagicMock()
sys.modules['gi.repository.GLib'] = MagicMock()
sys.modules['gi.repository.GLib.MainLoop'] = MagicMock()
sys.modules['dbus.mainloop.glib'] = MagicMock()
from dbus_opendtu import ( # pylint: disable=E0401,C0413
get_DbusServices,
getConfig,
sign_of_life_all_services,
update_all_services,
main
) # noqa
class TestDbusOpendtu(unittest.TestCase):
""" Test cases for the dbus_opendtu module """
@patch('dbus_opendtu.DbusService')
def test_register_service(self, mock_dbus_service):
""" Test the register_service function """
config = {
"DEFAULT": {
"NumberOfInvertersToQuery": "1",
"DTU": "openDTU"
},
"INVERTER0": {
"Phase": "L1",
"DeviceInstance": "34",
"AcPosition": "1"
},
}
mock_dbus_service_instance = mock_dbus_service.return_value
mock_dbus_service_instance.get_number_of_inverters.return_value = 1
get_DbusServices(config)
# Add assertions to verify the behavior
mock_dbus_service.assert_called_once()
# Additional assertions
mock_dbus_service.assert_called_once_with(
servicename="com.victronenergy.pvinverter",
actual_inverter=0,
)
@patch(
"builtins.open",
new_callable=mock_open,
read_data=(
"[DEFAULT]\n"
"Logging=INFO\n"
"NumberOfInvertersToQuery=1\n"
"NumberOfTemplates=1\n"
"DTU=some_dtu"
)
)
@patch("os.path.exists", return_value=True)
@patch("os.path.realpath")
def test_get_config(self, mock_realpath, mock_exists, mock_open): # pylint: disable=W0613
""" Test the get_config function """
# Mock the realpath to return a fixed path
mock_realpath.return_value = "../config.example"
# Call the function
config = getConfig()
# Verify the return type
self.assertIsInstance(config, configparser.ConfigParser)
# Verify the content of the config
self.assertEqual(config["DEFAULT"]["Logging"], "INFO")
self.assertEqual(config["DEFAULT"]["NumberOfInvertersToQuery"], "1")
self.assertEqual(config["DEFAULT"]["NumberOfTemplates"], "1")
self.assertEqual(config["DEFAULT"]["DTU"], "some_dtu")
@patch('dbus_opendtu.DbusService')
@patch('dbus_opendtu.get_config_value')
def test_get_dbus_services_with_inverters(self, mock_get_config_value, mock_dbus_service):
""" Test get_DbusServices with inverters """
mock_get_config_value.side_effect = lambda config, key, section, index, default: f"mock_value_{index}"
mock_dbus_service_instance = mock_dbus_service.return_value
mock_dbus_service_instance.get_number_of_inverters.return_value = 2
config = {
"DEFAULT": {
"NumberOfInvertersToQuery": "2",
"NumberOfTemplates": "0",
"DTU": "openDTU"
}
}
services = get_DbusServices(config)
self.assertEqual(len(services), 2)
mock_dbus_service.assert_any_call(servicename="mock_value_0", actual_inverter=0)
mock_dbus_service.assert_any_call(servicename="mock_value_1", actual_inverter=1)
@patch("dbus_opendtu.get_config_value")
@patch("dbus_opendtu.DbusService")
def test_get_dbus_services_with_templates(self, mock_dbus_service, mock_get_config_value):
""" Test get_DbusServices with templates """
# Mock the get_config_value function to return specific values
def get_config_value_side_effect(config, key, section, index, default):
if key == "NumberOfInvertersToQuery":
return 2 # Return an integer for the number of inverters
return f"mock_value_{index}"
mock_get_config_value.side_effect = get_config_value_side_effect
# Mock the DbusService instance
mock_dbus_service_instance = mock_dbus_service.return_value # pylint: disable=W0612
# Create a mock config
config = MagicMock()
# Call the function
services = get_DbusServices(config)
# Add assertions to verify the behavior
self.assertIsInstance(services, list)
self.assertEqual(len(services), 2)
@patch("dbus_opendtu.DbusService")
def test_get_dbus_services_with_no_inverters_or_templates(self, mock_dbus_service):
""" Test get_DbusServices with no inverters or templates """
# Create a mock config with the required values
config = {
"DEFAULT": {
"NumberOfInvertersToQuery": "0",
"NumberOfTemplates": "0",
"DTU": "openDTU"
},
"INVERTER0": {}, # Add the required key to avoid KeyError
"TEMPLATE0": {} # Add the required key to avoid KeyError
}
# Mock the get_number_of_inverters method to return 0
mock_dbus_service_instance = mock_dbus_service.return_value
mock_dbus_service_instance.get_number_of_inverters.return_value = 0
services = get_DbusServices(config)
self.assertEqual(len(services), 0)
mock_dbus_service.assert_called_once() # called once to check if there are inverters
@patch("dbus_opendtu.DbusService")
def test_get_config_with_invalid_NumberOfInverter_and_Template_values(self, mock_dbus_service):
""" Test get_DbusServices with invalid NumberOfInverter and NumberOfTemplate values """
# Create a mock config with the required values
config = {
"DEFAULT": {
"NumberOfInvertersToQuery": "invalid",
"NumberOfTemplates": "invalid",
"DTU": "openDTU"
},
"INVERTER0": {}, # Add the required key to avoid KeyError
"TEMPLATE0": {} # Add the required key to avoid KeyError
}
# Mock the get_number_of_inverters method to return 0
mock_dbus_service_instance = mock_dbus_service.return_value
mock_dbus_service_instance.get_number_of_inverters.return_value = 0
services = get_DbusServices(config)
self.assertEqual(len(services), 0)
mock_dbus_service.assert_called_once() # called once to check if there are inverters
@patch('dbus_opendtu.DbusService')
@patch('dbus_opendtu.get_config_value')
def test_get_dbus_services_with_missing_dtu_key(self, mock_get_config_value, mock_dbus_service):
""" Test get_DbusServices with missing DTU key """
mock_get_config_value.side_effect = lambda config, key, section, index, default: f"mock_value_{index}"
mock_dbus_service_instance = mock_dbus_service.return_value
config = {
"DEFAULT": {
"NumberOfInvertersToQuery": "1",
"NumberOfTemplates": "1"
}
}
services = get_DbusServices(config)
self.assertIsNone(services)
mock_dbus_service.assert_not_called()
def test_sign_of_life_all_services(self):
""" Test sign_of_life_all_services with a list of mock services """
# Create a list of mock services
mock_service_1 = MagicMock()
mock_service_2 = MagicMock()
services = [mock_service_1, mock_service_2]
# Call the function
result = sign_of_life_all_services(services)
# Verify that the sign_of_life method was called on each service
mock_service_1.sign_of_life.assert_called_once()
mock_service_2.sign_of_life.assert_called_once()
# Verify the return value
self.assertTrue(result)
def test_sign_of_life_all_services_with_empty_list(self):
""" Test sign_of_life_all_services with an empty list """
services = []
# Call the function
result = sign_of_life_all_services(services)
# Verify the return value
self.assertTrue(result)
def test_sign_of_life_all_services_with_no_sign_of_life_method(self):
""" Test sign_of_life_all_services with services missing sign_of_life method """
# Create a list of mock services, one without sign_of_life method
mock_service_1 = MagicMock()
mock_service_2 = MagicMock()
del mock_service_2.sign_of_life
services = [mock_service_1, mock_service_2]
# Call the function and expect an AttributeError
with self.assertRaises(AttributeError):
sign_of_life_all_services(services)
@patch('dbus_opendtu.gobject')
def test_update_all_services(self, mock_gobject):
""" Test update_all_services with valid services """
# Mock the current time
mock_gobject.get_real_time.return_value = 2000000
# Create mock services
mock_service_1 = MagicMock()
mock_service_1.polling_interval = 1000
mock_service_1.last_polling = 1000
mock_service_2 = MagicMock()
mock_service_2.polling_interval = 2000
mock_service_2.last_polling = 1000
services = [mock_service_1, mock_service_2]
# Call the function
result = update_all_services(services)
# Verify that the update method was called on each service
mock_service_1.update.assert_called_once()
mock_service_2.update.assert_not_called()
# Verify that the last_polling attribute was updated
self.assertEqual(mock_service_1.last_polling, 2000)
self.assertEqual(mock_service_2.last_polling, 1000)
# Verify the return value
self.assertTrue(result)
@patch('dbus_opendtu.gobject')
def test_update_all_services_with_no_update_needed(self, mock_gobject):
""" Test update_all_services when no update is needed """
# Mock the current time
mock_gobject.get_real_time.return_value = 2000000
# Create mock services
mock_service_1 = MagicMock()
mock_service_1.polling_interval = 1000
mock_service_1.last_polling = 1999
mock_service_2 = MagicMock()
mock_service_2.polling_interval = 2000
mock_service_2.last_polling = 1999
services = [mock_service_1, mock_service_2]
# Call the function
result = update_all_services(services)
# Verify that the update method was not called on any service
mock_service_1.update.assert_not_called()
mock_service_2.update.assert_not_called()
# Verify the return value
self.assertTrue(result)
@patch('dbus_opendtu.gobject')
def test_update_all_services_with_empty_list(self, mock_gobject):
""" Test update_all_services with an empty list """
services = []
# Call the function
result = update_all_services(services)
# Verify the return value
self.assertTrue(result)
@patch('dbus_opendtu.gobject')
def test_update_all_services_with_missing_attributes(self, mock_gobject):
""" Test update_all_services with services missing required attributes """
# Mock the current time
mock_gobject.get_real_time.return_value = 2000000
# Create mock services, one missing required attributes
mock_service_1 = MagicMock()
mock_service_1.polling_interval = 1000
mock_service_1.last_polling = 1000
mock_service_2 = MagicMock()
del mock_service_2.polling_interval
del mock_service_2.last_polling
services = [mock_service_1, mock_service_2]
# Call the function and expect an AttributeError
with self.assertRaises(AttributeError):
update_all_services(services)
@patch('dbus_opendtu.getConfig')
@patch('dbus_opendtu.get_config_value')
@patch('dbus_opendtu.get_DbusServices')
@patch('dbus_opendtu.sign_of_life_all_services')
@patch('dbus_opendtu.update_all_services')
@patch('dbus_opendtu.gobject')
def test_main(
self,
mock_gobject,
mock_update_all_services,
mock_sign_of_life_all_services,
mock_get_dbus_services,
mock_get_config_value,
mock_get_config,
):
""" Test the main function """
# Mock the configuration
mock_config = MagicMock()
mock_get_config.return_value = mock_config
mock_get_config_value.return_value = 1
# Mock the services
mock_services = [MagicMock()]
mock_get_dbus_services.return_value = mock_services
# Mock the timeout_add method
def timeout_add_mock(interval, callback, *args, **kwargs):
callback(*args, **kwargs)
return True
mock_gobject.timeout_add.side_effect = timeout_add_mock
# Call the main function
main()
# Assertions to verify the behavior
mock_get_config.assert_called_once()
mock_get_dbus_services.assert_called_once_with(mock_config)
mock_update_all_services.assert_called_once_with(mock_services)
mock_sign_of_life_all_services.assert_called_once_with(mock_services)
mock_gobject.MainLoop.assert_called_once()
@patch('dbus_opendtu.gobject')
@patch('dbus_opendtu.logging')
@patch('dbus_opendtu.get_DbusServices')
def test_main_exception(
self,
mock_gobject,
mock_logging,
mock_get_dbus_services, # pylint: disable=W0613
):
""" Test the main function with exception """
mock_get_dbus_services.side_effect = FileNotFoundError
with self.assertRaises(FileNotFoundError):
main()
if __name__ == '__main__':
unittest.main()
| python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | false |
henne49/dbus-opendtu | https://github.com/henne49/dbus-opendtu/blob/9266cc79e781ef8c8e75749922f8634ddd2b7989/tests/test_helpers.py | tests/test_helpers.py | ''' This file contains the unit tests for the helper functions in the helpers.py file. '''
# file ignores
# pylint: disable=too-many-instance-attributes
import sys
import os
import unittest
from unittest.mock import MagicMock
import json
# Add the parent directory of dbus_opendtu to the system path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) # noqa pylint: disable=wrong-import-position
from helpers import (
get_config_value,
get_default_config,
get_value_by_path,
convert_to_expected_type,
get_ahoy_field_by_name,
is_true,
timeit,
_kwh,
_a,
_w,
_v,
)
sys.modules['vedbus'] = MagicMock()
sys.modules['dbus'] = MagicMock()
sys.modules['gi.repository'] = MagicMock()
sys.modules['requests'] = MagicMock()
sys.modules['requests.auth'] = MagicMock()
import dbus_service # noqa pylint: disable=wrong-import-position
# region Helper functions
def get_ahoy_meterdata(filename):
''' Load the meter data from the json file. '''
with open(filename, encoding="utf-8") as file_json:
json_meter_data = json.load(file_json)
# add the field "inverter" to meter_data:
# This will contain an array of the "iv" data from all inverters.
json_meter_data["inverter"] = []
for inverter_number in range(len(json_meter_data["iv"])):
if is_true(json_meter_data["iv"][inverter_number]):
iv_data = fetch_ahoy_iv_data(inverter_number)
while len(json_meter_data["inverter"]) < inverter_number:
# there was a gap in the sequence of inverter numbers -> fill in a dummy value
json_meter_data["inverter"].append({})
json_meter_data["inverter"].append(iv_data)
return json_meter_data
def fetch_ahoy_iv_data(inverter_number):
''' Load the inverter data from the json file. '''
filename = f"./docs/ahoy_0.5.93_inverter-id-{inverter_number}.json"
# Check if the file exists, otherwise return an empty dict.
if not os.path.isfile(filename):
return {}
with open(filename, encoding="utf-8") as file_json:
data = json.load(file_json)
return data
# Load the meter data from the json file.
meter_data_ahoy = get_ahoy_meterdata(filename='./docs/ahoy_0.5.93_live.json')
meter_data = json.loads(
'{"StatusSNS": {"Time": "2021-02-03T15:12:52", "Switch1": "ON", "ENERGY": '
'{"TotalStartTime": "2020-01-05T12:41:22", "Total": 13.48712, "Yesterday": 0, '
'"Today": 0, "Power": 190, "ApparentPower": 0, "ReactivePower": 0, "Factor": 0, '
'"Voltage": 0, "Current": 0}}}')
meter_data_null = json.loads(
'{"StatusSNS": {"Time": "2021-02-03T15:12:52", "Switch1": "ON", "ENERGY": '
'{"TotalStartTime": "2020-01-05T12:41:22", "Total": 13.48712, "Yesterday": 0, '
'"Today": 0, "Power": null, "ApparentPower": null, "ReactivePower": null, "Factor": null, '
'"Voltage": 225.66, "Current": null}}}')
# endregion
class TestHelpersFunctions(unittest.TestCase):
''' This class contains the unit tests for the helper functions in the helpers.py file. '''
def setUp(self):
''' Setup the test environment. '''
# Mock the config
self.config = MagicMock()
self.config.__getitem__.return_value = {
"Username": "",
"Password": "",
"DigistAuth": "False",
"CUST_SN": "12345678",
"CUST_API_PATH": "cm?cmnd=STATUS+8",
"CUST_POLLING": "2000",
"CUST_Power": "StatusSNS/ENERGY/Power",
"CUST_Power_Mult": "1",
"CUST_Total": "StatusSNS/ENERGY/Total",
"CUST_Total_Mult": "1",
"CUST_Voltage": "StatusSNS/ENERGY/Voltage",
"CUST_Current": "StatusSNS/ENERGY/Current",
"Phase": "L1",
"DeviceInstance": "47",
"AcPosition": "1",
"Name": "Tasmota",
"Servicename": "com.victronenergy.grid",
"DTU": "opendtu",
}
self.custpower = self.config["TEMPLATE0"]["CUST_Power"].split("/")
self.custpower_factor = self.config["TEMPLATE0"]["CUST_Power_Mult"]
self.custpower_default = get_config_value(self.config, "CUST_Power_Default", "TEMPLATE", 0, None)
self.custtotal = self.config["TEMPLATE0"]["CUST_Total"].split("/")
self.custtotal_factor = self.config["TEMPLATE0"]["CUST_Total_Mult"]
self.custtotal_default = get_config_value(self.config, "CUST_Total_Default", "TEMPLATE", 0, None)
self.custvoltage = self.config["TEMPLATE0"]["CUST_Voltage"].split("/")
self.custvoltage_default = get_config_value(
self.config, "CUST_Voltage_Default", "TEMPLATE", 0, None)
self.custcurrent = self.config["TEMPLATE0"]["CUST_Current"].split("/")
self.custcurrent_default = get_config_value(
self.config, "CUST_Current_Default", "TEMPLATE", 0)
def test_get_config_value(self):
''' Test the get_config_value() function. '''
self.assertEqual(get_config_value(self.config, "Phase", "INVERTER", 0), "L1")
self.assertEqual(get_config_value(self.config, "Username", "TEMPLATE", 0), "")
self.assertEqual(get_config_value(self.config, "not_exist", "TEMPLATE", 0, "default"), "default")
with self.assertRaises(ValueError):
get_config_value(self.config, "not_exist", "INVERTER", 0)
def test_get_default_config(self):
''' Test the get_default_config() function. '''
self.assertEqual(get_default_config(self.config, "Phase", "L1"), "L1")
self.assertEqual(get_default_config(self.config, "not_exist", "default"), "default")
self.assertEqual(get_default_config(self.config, "DTU", "empty"), "opendtu")
def test_get_value_by_path(self):
''' Test the get_nested() function. '''
self.assertEqual(get_value_by_path(meter_data, self.custpower), 190)
self.assertEqual(get_value_by_path(meter_data, self.custtotal), 13.48712)
self.assertEqual(get_value_by_path(meter_data, ["StatusSNS", "ENERGY", "not_there"]), 0)
self.assertEqual(get_value_by_path(meter_data, ["StatusSNS", "Switch1"]), "ON")
def test_convert_to_expected_type(self):
''' Test the convert_to_expected_type() function. '''
self.assertEqual(convert_to_expected_type("test", str, "default"), "test")
self.assertEqual(convert_to_expected_type("test", str, None), "test")
self.assertEqual(convert_to_expected_type("test", int, 0), 0)
self.assertEqual(convert_to_expected_type("test", int, None), None)
self.assertEqual(convert_to_expected_type("test", float, 0.0), 0.0)
self.assertEqual(convert_to_expected_type("test", float, None), None)
self.assertEqual(convert_to_expected_type("test", bool, False), False)
self.assertEqual(convert_to_expected_type("1", bool, None), True)
self.assertEqual(convert_to_expected_type(None, None, None), None)
def test_get_ahoy_field_by_name(self):
''' Test the get_ahoy_field_by_name() function. '''
self.assertEqual(get_ahoy_field_by_name(meter_data_ahoy, 0, "P_AC"), 223.7)
self.assertEqual(get_ahoy_field_by_name(meter_data_ahoy, 0, "YieldDay"), 2223)
self.assertEqual(get_ahoy_field_by_name(meter_data_ahoy, 0, "YieldTotal"), 422.603)
self.assertEqual(get_ahoy_field_by_name(meter_data_ahoy, 0, "U_AC"), 229.5)
self.assertEqual(get_ahoy_field_by_name(meter_data_ahoy, 0, "U_DC", False), 33.3)
self.assertEqual(get_ahoy_field_by_name(meter_data_ahoy, 0, "I_AC"), 0.98)
self.assertEqual(get_ahoy_field_by_name(meter_data_ahoy, 0, "I_DC", False), 1.75)
self.assertEqual(get_ahoy_field_by_name(meter_data_ahoy, 0, "P_DC", False), 58.1)
def test_get_ahoy_gap_in_inverter_sequence(self):
''' Test the special case when there is a gap in the sequence of inverters IDs.'''
meter_data_ahoy_bad_sequence = get_ahoy_meterdata(
filename='./docs/ahoy_0.7.36_live_gap_in_inverter_sequence.json')
self.assertEqual(get_ahoy_field_by_name(meter_data_ahoy_bad_sequence, 1, "P_AC"), 223.7)
def test_is_true(self):
''' Test the is_true() function. '''
self.assertEqual(is_true("1"), True)
self.assertEqual(is_true("true"), True)
self.assertEqual(is_true("True"), True)
self.assertEqual(is_true("TRUE"), True)
self.assertEqual(is_true("0"), False)
self.assertEqual(is_true("false"), False)
self.assertEqual(is_true("False"), False)
self.assertEqual(is_true("FALSE"), False)
self.assertEqual(is_true("test"), False)
self.assertEqual(is_true(""), False)
self.assertEqual(is_true(None), False)
def test_timeit(self):
''' Test the timeit() function. '''
@timeit
def test_function():
''' Test function. '''
return 1
self.assertEqual(test_function(), 1)
def test_part_get_values_for_inverts(self):
''' Test part of get_values_for_inverter() function, which is in dbus_service
but heavily uses functions in helpers.py. '''
power = dbus_service.DbusService.get_processed_meter_value(
meter_data_null,
self.custpower,
self.custpower_default,
self.custpower_factor
)
pvyield = dbus_service.DbusService.get_processed_meter_value(
meter_data_null,
self.custtotal,
self.custtotal_default,
self.custtotal_factor
)
voltage = dbus_service.DbusService.get_processed_meter_value(
meter_data_null,
self.custvoltage,
self.custpower_default,
)
current = dbus_service.DbusService.get_processed_meter_value(
meter_data_null,
self.custcurrent,
self.custpower_default,
)
self.assertEqual(power, None)
self.assertEqual(pvyield, 13.48712)
self.assertEqual(voltage, 225.66)
self.assertEqual(current, None)
def test_kwh(self):
''' Test the _kwh() function. '''
self.assertEqual(_kwh(None, 123.456), "123.46KWh")
self.assertEqual(_kwh(None, 1.234), "1.23KWh")
self.assertEqual(_kwh(None, -1.234), "-1.23KWh")
self.assertEqual(_kwh(None, 0), "0.00KWh")
self.assertEqual(_kwh(None, 0.1234), "0.12KWh")
self.assertEqual(_kwh(None, 1.5678), "1.57KWh")
def test_a(self):
''' Test the _a() function. '''
self.assertEqual(_a(None, 0), "0.0A")
self.assertEqual(_a(None, 0.45), "0.5A")
self.assertEqual(_a(None, 0.459), "0.5A")
self.assertEqual(_a(None, 1.2345), "1.2A")
self.assertEqual(_a(None, 1.5678), "1.6A")
self.assertEqual(_a(None, -1.5678), "-1.6A")
def test_w(self):
''' Test the _w() function. '''
self.assertEqual(_w(None, 0), "0.0W")
self.assertEqual(_w(None, 0.45), "0.5W")
self.assertEqual(_w(None, 0.459), "0.5W")
self.assertEqual(_w(None, 1.2345), "1.2W")
self.assertEqual(_w(None, 1.5678), "1.6W")
self.assertEqual(_w(None, -1.5678), "-1.6W")
def test_v(self):
''' Test the _v() function. '''
self.assertEqual(_v(None, 0), "0.0V")
self.assertEqual(_v(None, 0.45), "0.5V")
self.assertEqual(_v(None, 0.459), "0.5V")
self.assertEqual(_v(None, 1.2345), "1.2V")
self.assertEqual(_v(None, 1.5678), "1.6V")
self.assertEqual(_v(None, -1.5678), "-1.6V")
if __name__ == '__main__':
unittest.main()
| python | MIT | 9266cc79e781ef8c8e75749922f8634ddd2b7989 | 2026-01-05T07:14:42.526396Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/main.py | main.py | # python 3
# -*- coding:utf-8 -*-
"""
Created on 2023/11/23
@author: Chen Qian
@e-mail: chenqian2020@sjtu.edu.cn
"""
"""
Set <--data_dir> to the directory of the CWRU dataset firstly! (for example: './Datasets_dir/CWRU')
"""
import argparse
import os
from datetime import datetime
from utils.logger import setlogger
import logging
from utils.train_utils import train_utils
import random
import numpy as np
import torch
import time
# Random setting
seed = 999
np.random.seed(seed) # seed
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
mybool = lambda x: x.lower() in ['yes', 'true', 't', 'y', '1']
def parse_args():
parser = argparse.ArgumentParser(description='Train')
# datasets parameters
parser.add_argument('--data_dir', type=str, default='./Datasets_dir/CWRU',
help='the directory of the dataset')
parser.add_argument('--data_name', type=str, default='CWRU', choices=['CWRU', ],
help='the name of the dataset')
parser.add_argument('--data_type', type=str, default='time', choices=['time'],
help='the data_type of the dataset')
parser.add_argument('--normlizetype', type=str, choices=['0-1', '-1-1', 'mean-std'], default='mean-std',
help='data normalization methods')
parser.add_argument('--data_signalsize', type=int, default=1024, help='the name of the data')
parser.add_argument('--SNR', type=float, default=1000, help='activate when SNR in (-100,100) else set to None')
parser.add_argument('--batch_size', type=int, default=64, help='batch size of the training process')
parser.add_argument('--num_workers', type=int, default=0, help='the number of dataloader workers')
parser.add_argument('--test_size', type=float, default=0.3, help='for few-shot analysis')
# models parameters
parser.add_argument('--model_name', type=str, default='TFN_STTF',
choices=["Backbone_CNN", "Random_CNN", "TFN_STTF", "TFN_Chirplet", "TFN_Morlet"],
help='the model to be trained')
parser.add_argument('--kernel_size', type=int, default=11, help='the kernel size of traditional conv layer')
parser.add_argument('--checkpoint_dir', type=str, default=r'./checkpoint',
help='the directory to save the models and the results')
# func-models parameters
parser.add_argument('--mid_channel', type=int, default=32, help='the channel number of preprocessing layer')
parser.add_argument('--clamp_flag', type=mybool, default="True", help='flag to limit the superparams of TFconv layer')
# optimization information
parser.add_argument('--opt', type=str, choices=['sgd', 'adam', 'RMSprop'], default='adam', help='the optimizer')
parser.add_argument('--lr', type=float, default=1e-3, help='the initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='the momentum for sgd')
parser.add_argument('--weight_decay', type=float, default=0, help='the weight decay')
parser.add_argument('--lr_scheduler', type=str, choices=['step', 'exp', 'stepLR', 'fix'], default='stepLR',
help='the learning rate schedule')
parser.add_argument('--gamma', type=float, default=0.99,
help='learning rate scheduler parameter for step and exp') # 0.99
parser.add_argument('--steps', type=str, default='1', help='the learning rate decay for step and stepLR')
# save, load and display information
parser.add_argument('--save_model', type=mybool, default="False", help='max number of epoch')
parser.add_argument('--max_epoch', type=int, default=50, help='max number of epoch')
parser.add_argument('--print_step', type=int, default=5, help='the interval of log training information')
args = parser.parse_args()
return args
if __name__ == '__main__':
os.chdir(os.path.split(os.path.realpath(__file__))[0])
print("current dir: %s" % os.path.curdir)
args = parse_args()
# Process the args.SNR
if args.SNR <= -1e2 or args.SNR >= 1e2:
args.SNR = None
# Prepare the saving path for the models
sub_dir = args.model_name + '-' + args.data_name + '-' + args.data_type + '-' + datetime.strftime(datetime.now(),
'%m%d-%H%M%S')
save_dir = os.path.join(args.checkpoint_dir, sub_dir).replace('\\', '/')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# set the logger
setlogger(os.path.join(save_dir, 'training.log'))
# save the args
for k, v in args.__dict__.items():
logging.info("<args> {}: {}".format(k, v))
# initialize the trainer
trainer = train_utils(args, save_dir)
trainer.setup()
# train the model
time_start_train = time.time()
trainer.train()
logging.info("<training time>: {:.3f}".format(time.time() - time_start_train))
# plot the results
trainer.plot_save()
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/PostProcess/Acc_statistic.py | PostProcess/Acc_statistic.py | """
Created on 2023/11/23
@author: Chen Qian
@e-mail: chenqian2020@sjtu.edu.cn
"""
"""
This script is used to extract the training info from the log files and save to excel, and plot the accuracy figure.
"""
import os
from process_utils.processlib import ExtractInfo
from process_utils.processlib import acc2csv
from process_utils.PlotAccuracy import main as PlotaccMain
def main(root = '../checkpoint/Acc-CWRU',logname = "training.log"):
# extract training info from log files
subdirs = next(os.walk(root))[1]
Info = []
for subdir in subdirs:
if subdir != "postfiles":
filepath = os.path.join(root, subdir, logname)
Params, Dict = ExtractInfo(filepath)
Info.append(Params)
# save to excel and plot
save_dir = os.path.join(root, 'postfiles')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
acc2csv(os.path.join(save_dir, "1-Acc_statistic.xlsx"), Info,focus_column = ['model_name','mid_channel'])
PlotaccMain(os.path.join(save_dir, "1-Acc_statistic.xlsx"),focus_column = ['model_name','mid_channel'])
if __name__ == '__main__':
# set the current directory
os.chdir(os.path.split(os.path.realpath(__file__))[0])
print("current dir: %s" % os.path.curdir)
# acc statistic
main(root = '../checkpoint/Acc-CWRU') | python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/PostProcess/TrainSequentially.py | PostProcess/TrainSequentially.py | """
Created on 2023/11/23
@author: Chen Qian
@e-mail: chenqian2020@sjtu.edu.cn
"""
"""
This script is used to train the models sequentially.
Before run this, please set <--data_dir> to the directory of the CWRU dataset in <main.py> first.
"""
import os, sys
if __name__ == '__main__':
# set the current directory
os.chdir(os.path.split(os.path.realpath(__file__))[0])
print("current dir: %s" % os.path.curdir)
# prepare the command lines
model_list = ["Backbone_CNN", "Random_CNN", "TFN_STTF", "TFN_Chirplet", "TFN_Morlet"]
max_epoch = 50
mid_channel = [16, 32, 64, 128]
command_lines = []
SNR = 1e3
checkpoint_dir = os.path.abspath(r'..\checkpoint\Acc-CWRU').replace('\\', '/')
data_name = 'CWRU'
for item in model_list:
for item2 in mid_channel:
if item == "Backbone_CNN" and item2 != 16:
continue
line = f' --data_name {data_name:s} --SNR {SNR:.2f}' + \
f' --model_name {item:s} --mid_channel {str(item2):s}' \
f' --checkpoint_dir "{checkpoint_dir:s}" --save_model False --max_epoch {max_epoch:d}'
command_lines.append(line)
"""
round: the number of times to run each model, and we set 2 for saving time.
start: the index of the command line to start with. (default: 0)
"""
round = 2;
start = 0
for i, item in enumerate(command_lines):
for p in range(round):
if i * round + p + 1 <= start:
continue
print('-----' * 10)
print('process of total: {:^3d}/{:^3d} \ncommand line: {:s}'.format(i * round + p + 1,
len(command_lines) * round, item))
temp = '%s "%s/main.py" %s' % (sys.executable, os.path.abspath('../'), item)
print("Commandline: %s" % temp)
# run the command line in the terminal (use main.py to train the model)
os.system(temp)
print('-----' * 10)
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/PostProcess/process_utils/PlotAccuracy.py | PostProcess/process_utils/PlotAccuracy.py | """
Created on 2023/11/23
@author: Chen Qian
@e-mail: chenqian2020@sjtu.edu.cn
"""
import pandas as pd
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
def setseaborn():
# set color
current_cmap = sns.color_palette("deep")
sns.set(style="whitegrid")
sns.set(style="ticks", context="notebook", font='Times New Roman', palette=current_cmap, font_scale=2)
return current_cmap
def setdefault():
mpl.rcParams['axes.linewidth'] = 0.4
mpl.rcParams['font.family'] = 'Times New Roman'
mpl.rcParams['xtick.labelsize'] = 6
mpl.rcParams['ytick.labelsize'] = 6
mpl.rcParams['figure.figsize'] = [8 / 2.54, 6 / 2.54]
mpl.rcParams['figure.dpi'] = 1000
def setaxesLW(myaxes, axes_lw=1, tick_len=3, tick_lw=None):
if not tick_lw:
tick_lw = axes_lw / 3 * 2
for item in ['top', 'left', 'bottom', 'right']:
myaxes.spines[item].set_linewidth(axes_lw)
myaxes.tick_params(width=tick_lw, length=tick_len)
def PlotAcc(Datas, labels, ticklabels, savename, BackboneData=None, width=0.2, ylim=[95, 100],
title='The channel number of preprocessing layer', xlabel='Model', ylabel='Test accuracy (%)', ygap=1,
elinewidth=0.7, ncol=5):
cmap = np.array(setseaborn())
setdefault()
color_alpha = 1
cmap = cmap * color_alpha + np.array([1.0, 1, 1]) * (1 - color_alpha)
labelsize, ticksize, legend_size = 6, 6, 6
err_kw = {'elinewidth': elinewidth, 'ecolor': 'k'}
X = np.arange(1, Datas.shape[0] + 1)
fig = plt.figure()
ax = fig.add_subplot()
Bars = []
# plot Datas
for i in range(Datas.shape[1]):
temp = ax.bar(X + width * (i + 1 / 2 - Datas.shape[1] / 2), Datas[:, i, 0], width, label=labels[i],
color=cmap[i + 1], yerr=Datas[:, i, 1], error_kw=err_kw)
Bars.append(temp)
h, l = plt.gca().get_legend_handles_labels()
r, c = int(np.ceil(len(l) / ncol)), ncol
order = [c * (i % r) + (i // r) for i in range(len(l))]
leg = ax.legend([h[i] for i in order], [l[i] for i in order], prop={'size': legend_size},
bbox_to_anchor=(0.5, 1.05), loc='lower center', borderaxespad=0, ncol=ncol)
leg.set_title(title, prop={'size': legend_size})
# plot BackboneData
if BackboneData is None:
start = 1 - width * Datas.shape[1] / 2 - 0.1
end = len(ticklabels) + width * Datas.shape[1] / 2 + 0.1
else:
temp = ax.bar([0], BackboneData[0], width, color=cmap[0], yerr=BackboneData[1], error_kw=err_kw)
Bars.append(temp)
start = 0 - width / 2 * 1 - 0.1
end = len(ticklabels) - 1 + width * Datas.shape[1] / 2 + 0.1
# ax set
ax.set_xlabel(xlabel, fontsize=labelsize)
ax.set_ylabel(ylabel, fontsize=labelsize)
ax.set_xticks(np.arange(len(ticklabels)) if not BackboneData is None else np.arange(len(ticklabels)) + 1)
ax.set_xticklabels(ticklabels, fontsize=ticksize)
ax.set_xlim([start, end])
if ylim:
ax.set_ylim(ylim)
if ygap:
ax.set_yticks(np.arange(ylim[0], ylim[1] + 0.01, ygap))
ax.set_yticklabels(['{:d}'.format(int(item)) for item in np.arange(ylim[0], ylim[1] + 0.01, ygap)],
fontsize=ticksize)
else:
ax.tick_params(axis='y', labelsize=8)
ax.grid(axis='y', linewidth=0.5)
setaxesLW(ax, 0.8)
fig.tight_layout(pad=0.2)
# save
fig.savefig(savename.replace('.jpg', '-withlegend.jpg'))
def main(filedir, focus_column=['model_name', 'mid_channel']):
mid_channel = [16, 32, 64, 128]
# read data from excel
temp = pd.read_excel(filedir, sheet_name='df4')
# obtain data by focus_column
data = temp.loc[:, focus_column + ['mean max acc', 'std max acc']]
data = data.set_index(focus_column)
# prepare model acc from data
model_names = ["Random_CNN", "TFN_STTF", "TFN_Chirplet", "TFN_Morlet"]
model_new_names = ["Backbone\n_CNN", "Random\n_CNN", "TFN\n_STTF", "TFN_\nChirplet", "TFN\n_Morlet"]
backbone_name = "Backbone_CNN"
CNN_Data = data.loc[backbone_name, :].to_numpy().mean(0).squeeze()
Datas = []
for item in model_names:
Datas.append(data.loc[item, :].loc[mid_channel].to_numpy())
Datas = np.array(Datas)
# plot
PlotAcc(Datas, ['16', '32', '64', '128'], model_new_names, os.path.join(os.path.split(filedir)[0], '2-TestAcc.jpg'),
CNN_Data)
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/PostProcess/process_utils/processlib.py | PostProcess/process_utils/processlib.py | """
Created on 2023/11/23
@author: Chen Qian
@e-mail: chenqian2020@sjtu.edu.cn
"""
import os
import numpy as np
from datetime import datetime
import pandas as pd
def ExtractInfo(filepath,append_acc = True):
"""
extract the training info from the log file
"""
start_time, record_time = None, None
print(os.path.abspath(filepath))
Dict = {'current_lr': [], 'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': []}
Params = {'filename': os.path.split(os.path.split(filepath)[0])[-1]}
with open(filepath, 'r',encoding='utf8') as f:
temp = f.readline()
flag_temp = True
while '-----Epoch' not in temp: # read params until epoch
if ": " in temp[15:] and flag_temp:
key = temp.split(": ")[0].split(" ")[-1]
value = temp.split(": ")[1].replace("\n", '')
Params[key] = value
if '---------------------------------' in temp:
flag_temp = False
temp = f.readline()
while temp != '': # readlines until the end
if '-----Epoch' in temp: # read epoch info
Dict['current_lr'].append(float(f.readline().split('current lr: ')[1].split(':')[-1].strip('\n[]')))
temp = f.readline()
if not start_time:
start_time = datetime.strptime(temp[:14], '%m-%d %H:%M:%S')
while "<tempinfo>" in temp: # skip tempinfo
temp = f.readline()
Dict['train_loss'].append(float(temp.split('train-Loss: ')[1].split(' ')[0]))
Dict['train_acc'].append(float(temp.split('train-Acc: ')[1].split(' ')[0].replace(',', '')))
temp = f.readline()
Dict['val_loss'].append(float(temp.split('val-Loss: ')[1].split(' ')[0]))
Dict['val_acc'].append(float(temp.split('val-Acc: ')[1].split(' ')[0].replace(',', '')))
end_time = datetime.strptime(temp[:14], '%m-%d %H:%M:%S')
if '<training time>: ' in temp: # extract training time
record_time = float(temp.split('<training time>: ')[1].split(' ')[0].replace(',', ''))
temp = f.readline()
for key in Dict.keys(): # transform list to np.array
if key != 'params':
Dict[key] = np.array(Dict[key])
Params['train_time'] = record_time if record_time else (end_time - start_time).total_seconds()
if append_acc: # append max acc and final acc
Params['max acc'] = max(Dict['val_acc'])
final_len = int(max(min(len(Dict['val_acc'])*0.5,5),1))
Params['final acc'] = Dict['val_acc'][-final_len:].mean()
return Params,Dict
def acc2csv(savepath,data,focus_column = ['model_name']):
"""
save the training info to excel
"""
# transform data to dataframe
df1 = (pd.DataFrame(data).sort_values(by=focus_column).reset_index(drop=True))
df1 = df1.apply(pd.to_numeric,errors='ignore')
# dataframe 2
number = 3 if 'train_time' in df1.columns else 2
col = df1.drop(['filename','checkpoint_dir'],axis=1).columns.to_list()[:-number]
df2 = df1.drop(['filename','checkpoint_dir'],axis=1).set_index(col)
df2 = pd.concat([df2.groupby(col).mean(),df2.groupby(col).std()],axis=1,keys=['mean','std']).reset_index()
df2.columns = [f'{i} {j}'.strip() for i, j in df2.columns]
# obtain dataframe 3 by focus_column
try:
temp = df1.loc[:,focus_column+['train_time','max acc', 'final acc']].set_index(focus_column)
except:
temp = df1.loc[:, focus_column + ['max acc', 'final acc']].set_index(focus_column)
meanstddf = pd.concat([temp.groupby(focus_column).mean(),temp.groupby(focus_column).std()],axis=1,keys=['mean','std']).reset_index()
df3 = meanstddf.set_index(focus_column)
df3.columns.names = ['mean/std','max/final']
# obtain dataframe 4 by focus_column
df4 = meanstddf
df4.columns = [f'{i} {j}'.strip() for i, j in df4.columns]
df_save = {'df1': df1, 'df2': df2, 'df3': df3, 'df4': df4}
# write to excel
with pd.ExcelWriter(savepath) as writer:
for item in ['df1', 'df2', 'df3', 'df4']:
df_save[item].to_excel(writer, item)
if __name__ == '__main__':
ExtractInfo(r'../../checkpoint/Acc-CWRU/Backbone_CNN-CWRU-time-1122-200445/training.log', append_acc=True)
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Datasets/CWRU.py | Datasets/CWRU.py | """
Created on 2023/11/23
@author: Chen Qian
@e-mail: chenqian2020@sjtu.edu.cn
"""
import pandas as pd
from Datasets.Dataset_utils.DatasetsBase import dataset
import Datasets.Dataset_utils.sequence_aug as aug1d
from Datasets.get_files.CWRU_get_files import get_files
from Datasets.get_files.generalfunciton import data_transforms1d
from Datasets.get_files.generalfunciton import balance_label
from sklearn.model_selection import train_test_split
import os
import numpy as np
import random
import pickle
# random seed
seed = 999
np.random.seed(seed)
random.seed(seed)
class CWRU(object):
num_classes = 10
inputchannel = 1
def __init__(self, args):
self.args = args # args = {'data_type': 'time', 'data_dir': './Datasets_dir/CWRU','test_size': 0.3,'normlizetype': 'mean-std'}
def _preload(self,prefile_dir,args):
"""
preload the data from the prefile_dir
"""
# check if the prefile_dir exists
if os.path.exists(prefile_dir):
with open(prefile_dir, 'rb') as f:
data_pd, storage_args, label_name = pickle.load(f)
if storage_args == args: # check if the args is the same as the args in the prefile_dir, then preload the data
return data_pd,label_name
# else, get the data and save it to the prefile_dir
list_data, label_name = get_files(**args)
data_pd = pd.DataFrame({"data": list_data[0], "label": list_data[1]})
with open(prefile_dir, 'wb') as f:
pickle.dump((data_pd,args,label_name), f)
return data_pd,label_name
def data_preprare(self, signal_size=1024, SNR=None):
test_size = self.args['test_size'] if 'test_size' in self.args.keys() else 0.3
# preload the args
temp_args = {'root':self.args['data_dir'], 'type':self.args['data_type'], 'signal_size':signal_size,
'downsample_rate':1,'SNR':SNR, 'load_condition':3}
# preload the data
data_pd, label_name = self._preload(os.path.join(self.args['data_dir'], 'data_buffer.pkl'), temp_args)
# balance the label numbers to 450
data_pd = balance_label(data_pd, 'label', 450)
# split the data to train and val
train_pd, val_pd = train_test_split(data_pd, test_size=test_size, random_state=40,
stratify=data_pd["label"])
# get the dataset
train_dataset = dataset(list_data=train_pd, transform=data_transforms1d(aug1d, 'train', self.args['normlizetype']))
val_dataset = dataset(list_data=val_pd, transform=data_transforms1d(aug1d, 'val', self.args['normlizetype']))
return (train_dataset, val_dataset), label_name
if __name__ == '__main__': # check CWRU works well
args = {'data_type': 'time', 'data_dir': './Datasets_dir/CWRU','normlizetype': 'mean-std'}
cwru = CWRU(args)
out = cwru.data_preprare()
print(1)
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Datasets/__init__.py | Datasets/__init__.py | from Datasets.CWRU import CWRU | python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Datasets/Dataset_utils/DatasetsBase.py | Datasets/Dataset_utils/DatasetsBase.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
from torch.utils.data import Dataset
class dataset(Dataset):
def __init__(self, list_data, transform=None):
self.seq_data = list_data['data'].tolist()
self.labels = list_data['label'].tolist()
self.transforms = transform
def __len__(self):
return len(self.seq_data)
def __getitem__(self, item):
seq = self.seq_data[item]
label = self.labels[item]
if self.transforms:
seq = self.transforms(seq)
return seq, label | python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Datasets/Dataset_utils/sequence_aug.py | Datasets/Dataset_utils/sequence_aug.py |
import numpy as np
import random
from scipy.signal import resample
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, seq):
for t in self.transforms:
seq = t(seq)
return seq
class Reshape(object):
def __call__(self, seq):
#print(seq.shape)
return seq.transpose()
class to_numpy(object):
def __call__(self, seq):
#print(seq.shape)
return np.array(seq,dtype=np.float32)
class Retype(object):
def __call__(self, seq):
return seq.astype(np.float32)
class AddGaussian(object):
def __init__(self, sigma=0.01):
self.sigma = sigma
def __call__(self, seq):
return seq + np.random.normal(loc=0, scale=self.sigma, size=seq.shape)
class RandomAddGaussian(object):
def __init__(self, sigma=0.01):
self.sigma = sigma
def __call__(self, seq):
if np.random.randint(2):
return seq
else:
return seq + np.random.normal(loc=0, scale=self.sigma, size=seq.shape)
class Scale(object):
def __init__(self, sigma=0.01):
self.sigma = sigma
def __call__(self, seq):
scale_factor = np.random.normal(loc=1, scale=self.sigma, size=(seq.shape[0], 1))
scale_matrix = np.matmul(scale_factor, np.ones((1, seq.shape[1])))
return seq*scale_matrix
class RandomScale(object):
def __init__(self, sigma=0.01):
self.sigma = sigma
def __call__(self, seq):
if np.random.randint(2):
return seq
else:
scale_factor = np.random.normal(loc=1, scale=self.sigma, size=(seq.shape[0], 1))
scale_matrix = np.matmul(scale_factor, np.ones((1, seq.shape[1])))
return seq*scale_matrix
class RandomStretch(object):
def __init__(self, sigma=0.3):
self.sigma = sigma
def __call__(self, seq):
if np.random.randint(2):
return seq
else:
seq_aug = np.zeros(seq.shape)
len = seq.shape[1]
length = int(len * (1 + (random.random()-0.5)*self.sigma))
for i in range(seq.shape[0]):
y = resample(seq[i, :], length)
if length < len:
if random.random() < 0.5:
seq_aug[i, :length] = y
else:
seq_aug[i, len-length:] = y
else:
if random.random() < 0.5:
seq_aug[i, :] = y[:len]
else:
seq_aug[i, :] = y[length-len:]
return seq_aug
class RandomCrop(object):
def __init__(self, crop_len=20):
self.crop_len = crop_len
def __call__(self, seq):
if np.random.randint(2):
return seq
else:
max_index = seq.shape[1] - self.crop_len
random_index = np.random.randint(max_index)
seq[:, random_index:random_index+self.crop_len] = 0
return seq
class Normalize(object):
def __init__(self, type = "0-1"): # "0-1","-1-1","mean-std"
self.type = type
def __call__(self, seq):
if self.type == "0-1":
seq = (seq-seq.min())/(seq.max()-seq.min())
elif self.type == "-1-1":
seq = 2*(seq-seq.min())/(seq.max()-seq.min()) + -1
elif self.type == "mean-std" :
seq = (seq-seq.mean())/seq.std()
else:
raise NameError('This normalization is not included!')
return seq | python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Datasets/get_files/CWRU_get_files.py | Datasets/get_files/CWRU_get_files.py | import os
from scipy.io import loadmat
from Datasets.get_files.generalfunciton import sig_process
from Datasets.get_files.generalfunciton import add_noise
import numpy as np
import random
# set random seed
seed = 999
np.random.seed(seed)
random.seed(seed)
datasetname = ["12k Drive End Bearing Fault Data", "12k Fan End Bearing Fault Data", "48k Drive End Bearing Fault Data",
"Normal Baseline Data"]
normalname = ["97.mat", "98.mat", "99.mat", "100.mat"]
# For 12k Drive End Bearing Fault Data
dataname1 = ["105.mat", "118.mat", "130.mat", "169.mat", "185.mat", "197.mat", "209.mat", "222.mat",
"234.mat"] # 1797rpm
dataname2 = ["106.mat", "119.mat", "131.mat", "170.mat", "186.mat", "198.mat", "210.mat", "223.mat",
"235.mat"] # 1772rpm
dataname3 = ["107.mat", "120.mat", "132.mat", "171.mat", "187.mat", "199.mat", "211.mat", "224.mat",
"236.mat"] # 1750rpm
dataname4 = ["108.mat", "121.mat", "133.mat", "172.mat", "188.mat", "200.mat", "212.mat", "225.mat",
"237.mat"] # 1730rpm
# For 12k Fan End Bearing Fault Data
dataname5 = ["278.mat", "282.mat", "294.mat", "274.mat", "286.mat", "310.mat", "270.mat", "290.mat",
"315.mat"] # 1797rpm
dataname6 = ["279.mat", "283.mat", "295.mat", "275.mat", "287.mat", "309.mat", "271.mat", "291.mat",
"316.mat"] # 1772rpm
dataname7 = ["280.mat", "284.mat", "296.mat", "276.mat", "288.mat", "311.mat", "272.mat", "292.mat",
"317.mat"] # 1750rpm
dataname8 = ["281.mat", "285.mat", "297.mat", "277.mat", "289.mat", "312.mat", "273.mat", "293.mat",
"318.mat"] # 1730rpm
# For 48k Drive End Bearing Fault Data
dataname9 = ["109.mat", "122.mat", "135.mat", "174.mat", "189.mat", "201.mat", "213.mat", "226.mat",
"238.mat"] # 1797rpm
dataname10 = ["110.mat", "123.mat", "136.mat", "175.mat", "190.mat", "202.mat", "214.mat", "227.mat",
"239.mat"] # 1772rpm
dataname11 = ["111.mat", "124.mat", "137.mat", "176.mat", "191.mat", "203.mat", "215.mat", "228.mat",
"240.mat"] # 1750rpm
dataname12 = ["112.mat", "125.mat", "138.mat", "177.mat", "192.mat", "204.mat", "217.mat", "229.mat",
"241.mat"] # 1730rpm
dataname_48k_DE = [dataname9,dataname10,dataname11,dataname12]
# label
label = [1, 2, 3, 4, 5, 6, 7, 8, 9] # The failure data is labeled 1-9
lab_name = ['N','I1','B1','O1','I2','B2','O2','I3','B3','O3']
axis = ["_DE_time", "_FE_time", "_BA_time"]
# generate Training Datasets and Testing Datasets
def get_files(root, type='time', signal_size=1024,downsample_rate=1,SNR=None,load_condition=3):
'''
This function is used to generate the final training set and test set.
root:The location of the data set
type: 'time'| 'fft' | 'slice' | 'CWT' | 'STFT', depends on the signal type, and 'time' is used in this article
signal_size: length size of signal sample
downsample_rate: to downsample signal in a specific rate, 1 is used in this article
SNR: if SNR == None, no noise is added into dataset. else a specific noise is added to dataset
load_condition: 0|1|2|3, choose the load condition of signals.
'''
data_root1 = os.path.join( root, datasetname[3]) # For normal data
data_root2 = os.path.join( root, datasetname[2]) # For 48k Drive End Bearing Fault Data
path1 = os.path.join( data_root1, normalname[load_condition]) # 0->1797rpm ;1->1772rpm;2->1750rpm;3->1730rpm
data, lab = data_load(path1, type=type, signal_size=signal_size,label=0,downsample_rate=downsample_rate,SNR=SNR) # Extract normal data, whose label is 0
for i in range(len(dataname9)): # Extract fault data, whose label is 1-9
path2 = os.path.join(data_root2, dataname_48k_DE[load_condition][i])
data1, lab1 = data_load(path2, type=type, signal_size=signal_size, label=label[i],downsample_rate=downsample_rate,SNR=SNR)
data += data1
lab += lab1
return [data, lab], lab_name
def data_load(filename,type, signal_size, label,downsample_rate=1,SNR=None):
'''
This function is mainly used to generate test data and training data.
filename: Data location
type: 'time'| 'fft' | 'slice' | 'CWT' | 'STFT', depends on the signal type, and 'time' is used in this article
signal_size: length size of signal sample
label: assigned to data label
downsample_rate: to downsample signal in a specific rate, 1 is used in this article
SNR: if SNR == None, no noise is added into dataset. else a specific noise is added to dataset
'''
downsample_rate = max(int(downsample_rate), 1)
func = getattr(sig_process,type)
fl = loadmat(filename.replace('\\','/'))
for i,item in enumerate(fl.keys()): # find axis[0] of mat file
if axis[0] in item: # drive end
fl = fl[item]
break
if i == len(fl)-1:
raise ValueError("target item didn't found in mat file")
fl = fl.squeeze()[::downsample_rate]
if SNR is not None:
fl = add_noise(fl,SNR)
data = []
lab = []
start, end = 0, signal_size
step = int(signal_size//downsample_rate)
while end <= fl.shape[0]:
x = func(fl[start:end])
x = x.reshape([1] + list(x.shape)) # get channel dimension
data.append(x)
lab.append(label)
start += step
end += step
return data, lab | python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Datasets/get_files/generalfunciton.py | Datasets/get_files/generalfunciton.py | import numpy as np
# import pywt
from scipy import signal
import pandas as pd
from torch.utils.data import Dataset
import random
# random.seed(999)
seed = 999
np.random.seed(seed)
random.seed(seed)
def add_noise(sig,SNR): # add noise to sig
noise = np.random.randn(*sig.shape)
noise_var = sig.var() / np.power(10,(SNR/20))
noise = noise /noise.std() * np.sqrt(noise_var)
return sig + noise
def data_transforms1d(aug,dataset_type="train", normlize_type="1-1",aug_flag = False):
if aug_flag:
train_trans = aug.Compose([
aug.to_numpy(),
aug.Normalize(normlize_type),
aug.RandomAddGaussian(),
aug.RandomScale(),
aug.RandomStretch(),
aug.RandomCrop(),
aug.Retype()
])
else:
train_trans = aug.Compose([
aug.to_numpy(),
aug.Normalize(normlize_type),
aug.Retype()
])
transforms = {
'train': train_trans,
'val': aug.Compose([
aug.to_numpy(),
aug.Normalize(normlize_type),
aug.Retype()
])
}
return transforms[dataset_type]
class sig_process(object):
nperseg = 30
adjust_flag = False
def __init__(self):
super(sig_process,self).__init__()
@classmethod
def time(cls,x):
return x
@classmethod
def fft(cls,x):
x = x - np.mean(x)
x = np.fft.fft(x)
x = np.abs(x) / len(x)
x = x[range(int(x.shape[0] / 2))]
x[1:-1] = 2*x[1:-1]
return x
@classmethod
def slice(cls,x):
w = int(np.sqrt(len(x)))
img = x[:w**2].reshape(w,w)
return img
@classmethod
def STFT(cls,x,verbose=False):
while not cls.adjust_flag:
_,_, Zxx = signal.stft(x, nperseg=cls.nperseg)
if abs(Zxx.shape[0] - Zxx.shape[1]) < 2:
cls.adjust_flag = True
elif Zxx.shape[0] > Zxx.shape[1]:
cls.nperseg -= 1
else:
cls.nperseg += 1
f, t, Zxx = signal.stft(x, nperseg=cls.nperseg)
img = np.abs(Zxx) / len(Zxx)
if verbose:
return f, t, img
else:
return img
@classmethod
def STFT8(cls,x,Nc=8):
f, t, Zxx = signal.stft(x, nperseg=Nc*2-1,noverlap=Nc*2-2)
img = np.abs(Zxx) / len(Zxx)
return img
@classmethod
def STFT16(cls,x):
return sig_process.STFT8(x,Nc=16)
@classmethod
def STFT32(cls,x):
return sig_process.STFT8(x, Nc=32)
@classmethod
def STFT64(cls,x):
return sig_process.STFT8(x, Nc=64)
@classmethod
def STFT128(cls,x):
return sig_process.STFT8(x, Nc=128)
@classmethod
def mySTFT(cls,x,verbose=False,nperseg=256,noverlap = None):
if not noverlap:
noverlap = nperseg//2
f, t, Zxx = signal.stft(x, nperseg=nperseg,noverlap=noverlap)
img = np.abs(Zxx) / len(Zxx)
if verbose:
return f, t, img
else:
return img
def balance_label(df,labelcol = 'label', number=None):
value_count = df[labelcol].value_counts()
if number == None:
number = value_count.to_numpy().min()
new_df = pd.concat([df.iloc[df.index[df[labelcol] == lab].to_numpy()[:number]] for lab in value_count.index.sort_values().to_list()])
return new_df | python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/utils/train_utils.py | utils/train_utils.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
"""
Created on 2023/11/23
@author: Chen Qian
@e-mail: chenqian2020@sjtu.edu.cn
"""
import logging
import os
import time
import warnings
import torch
from torch import nn
from torch import optim
import Models
import Datasets
import matplotlib.pyplot as plt
from utils.mysummary import summary
import numpy as np
import scipy.io as io
import random
import matplotlib as mplb
import seaborn as sns
# set random seed
seed = 999
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class train_utils(object):
def __init__(self, args, save_dir: str):
self.args = args
self.save_dir = save_dir
def setup(self):
"""
Initialize the datasets, models, loss and optimizer
:return:
"""
args = self.args
# Consider the gpu or cpu condition
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.device_count = 1
self.device_count = torch.cuda.device_count()
logging.info('using {} gpus'.format(self.device_count))
assert args.batch_size % self.device_count == 0, "batch size should be divided by device count"
else:
warnings.warn("gpu is not available")
self.device = torch.device("cpu")
self.device_count = 1
logging.info('using {} cpu'.format(self.device_count))
# Load the datasets
dataset = getattr(Datasets, args.data_name)
self.datasets = {}
subargs = {k: getattr(args, k) for k in ['data_dir', 'data_type', 'normlizetype', 'test_size']}
(self.datasets['train'], self.datasets['val']), self.label_name = dataset(subargs) \
.data_preprare(signal_size=args.data_signalsize, SNR=self.args.SNR)
self.dataloaders = {x: torch.utils.data.DataLoader(self.datasets[x], batch_size=args.batch_size,
shuffle=(True if x == 'train' else False),
num_workers=args.num_workers,
pin_memory=(True if self.device == 'cuda' else False))
for x in ['train', 'val']}
logging.info(f"dataset_train:{len(self.datasets['train']):d}, dataset_train:{len(self.datasets['val']):d}")
# Define the models
self.model = getattr(Models, args.model_name) \
(in_channels=dataset.inputchannel, out_channels=dataset.num_classes, kernel_size=args.kernel_size,
clamp_flag=args.clamp_flag, mid_channel=args.mid_channel)
self.model.to(self.device)
# summary the model and record model info
try:
info = summary(self.model, self.datasets['train'][0][0].shape, batch_size=-1, device="cuda")
for item in info.split('\n'):
logging.info(item)
except:
print('summary does not work!')
self.criterion = nn.CrossEntropyLoss()
# Define the optimizer
if args.opt == 'sgd':
self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr,
momentum=args.momentum, weight_decay=args.weight_decay)
elif args.opt == 'adam':
self.optimizer = optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.opt == 'RMSprop':
self.optimizer = optim.RMSprop(self.model.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise Exception("optimizer not implement")
# Define the learning rate decay
if args.lr_scheduler == 'step':
steps = [int(step) for step in args.steps.split(',')]
self.lr_scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, steps, gamma=args.gamma)
elif args.lr_scheduler == 'exp':
self.lr_scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, args.gamma)
elif args.lr_scheduler == 'stepLR':
steps = int(args.steps)
self.lr_scheduler = optim.lr_scheduler.StepLR(self.optimizer, steps, args.gamma)
elif args.lr_scheduler == 'fix':
self.lr_scheduler = None
else:
raise Exception("lr schedule not implement")
# confusion matrix initialization
self.c_matrix = {phase: np.zeros([dataset.num_classes, dataset.num_classes]) for phase in ['train', 'val']}
def train(self):
"""
Training process
"""
args = self.args
best_acc = 0.0
step_start = time.time()
self.Records = {"train_loss": [], "train_acc": [], "val_loss": [], "val_acc": [],
"best_epoch": 0} # epoch-wise records
self.MinorRecords = {"train_loss": [], "train_acc": []} # batch-wise records
# Train the models via epochs
for epoch in range(args.max_epoch):
logging.info('-' * 5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-' * 5)
# Record the learning rate
if self.lr_scheduler is not None:
logging.info('current lr: {}'.format(self.lr_scheduler.get_last_lr()))
else:
logging.info('current lr: {}'.format(args.lr))
# Each epoch has a training and val phase
for phase in ['train', 'val']:
# Define the temp variable
epoch_start = time.time()
epoch_acc = 0
epoch_loss = 0.0
batch_acc = 0
batch_loss = 0.0
batch_count = 0
# Set models to train mode or test mode
if phase == 'train':
self.model.train()
else:
self.model.eval()
# batch-wise training or testing
for batch_idx, (inputs, labels) in enumerate(self.dataloaders[phase]):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# Do the learning process, in val, we do not care about the gradient for relaxing
with torch.set_grad_enabled(phase == 'train'):
# forward
logits = self.model(inputs)
loss = self.criterion(logits, labels)
pred = logits.argmax(dim=1)
correct = torch.eq(pred, labels).float().sum().item()
loss_temp = loss.item() * inputs.size(0)
epoch_loss += loss_temp
epoch_acc += correct
# confusion matrix calculation
if epoch == args.max_epoch - 1:
for i, j in zip(labels.detach().cpu().numpy(), pred.detach().cpu().numpy()):
self.c_matrix[phase][i][j] += 1
# Calculate the training information
if phase == 'train':
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step() # update the model parameters
batch_loss += loss_temp
batch_acc += correct
batch_count += inputs.size(0)
# Print the training information
if (batch_idx + 1) % args.print_step == 0:
batch_loss = batch_loss / batch_count
batch_acc = batch_acc / batch_count
temp_time = time.time()
train_time = temp_time - step_start
step_start = temp_time
batch_time = train_time / args.print_step
sample_per_sec = 1.0 * batch_count / train_time
logging.info(
'<tempinfo> Epoch: {:3d} [{:4d}/{:4d}], step: {:3d}, Train Loss: {:.3f}, Train Acc: {:.2f}, {:8.1f} samples/sec, {:.4f} sec/batch'
.format(epoch, (batch_idx + 1) * len(inputs), len(self.dataloaders[phase].dataset),
batch_idx + 1, batch_loss, batch_acc * 100, sample_per_sec, batch_time))
self.MinorRecords['train_loss'].append(batch_loss)
self.MinorRecords['train_acc'].append(batch_acc)
batch_acc = 0
batch_loss = 0.0
batch_count = 0
# Print the epoch information during the end of each epoch (both train and val)
epoch_loss = epoch_loss / len(self.dataloaders[phase].dataset)
epoch_acc = epoch_acc / len(self.dataloaders[phase].dataset)
logging.info('<info> Epoch: {} {}-Loss: {:.4f} {}-Acc: {:.4f}, Cost {:.4f} sec'.format(
epoch, phase, epoch_loss, phase, epoch_acc * 100, time.time() - epoch_start
))
# Record the epoch information
self.Records["%s_loss" % phase].append(epoch_loss)
self.Records["%s_acc" % phase].append(epoch_acc)
# save the best and the final model if needed
if phase == 'val':
if epoch_acc > best_acc:
best_acc = epoch_acc
self.Records['best_epoch'] = epoch
logging.info("save best epoch {}, best acc {:.4f}".format(epoch, epoch_acc))
save_best_data_dir = os.path.join(self.save_dir,
'epoch{}-acc{:.4f}-best_model.pth'.format(epoch,
epoch_acc * 100))
save_best_data = {'epoch': epoch + 1, 'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(), 'opt': self.args.opt}
if epoch == args.max_epoch - 1:
if args.save_model:
# save the best models according to the val accuracy
torch.save(save_best_data, save_best_data_dir)
# save the final models
logging.info("save final epoch {}, final acc {:.4f}".format(epoch, epoch_acc))
save_data = {'epoch': epoch + 1, 'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(), 'opt': self.args.opt}
torch.save(save_data,
os.path.join(self.save_dir,
'epoch{}-acc{:.4f}-final_model.pth'.format(epoch, epoch_acc * 100)))
# Update the learning rate each epoch
if self.lr_scheduler is not None:
self.lr_scheduler.step()
# After training, save the records
# stacks Record list to numpy when finished train process
for k in self.Records.keys():
self.Records[k] = np.array(self.Records[k])
for k in self.MinorRecords.keys():
self.MinorRecords[k] = np.array(self.MinorRecords[k])
# save the records
io.savemat(os.path.join(self.save_dir, "Records.mat"), self.Records)
io.savemat(os.path.join(self.save_dir, "MinorRecords.mat"), self.MinorRecords)
# log the best and final acc and loss, final acc is the mean of the last 5 epochs generally
final_len = int(max(min(args.max_epoch * 0.5, 5), 1))
info = "max train acc in epoch {:2d}: {:10.6f}\n".format(self.Records['train_acc'].argmax() + 1,
self.Records['train_acc'].max()) \
+ "max val acc in epoch {:2d}: {:10.6f}\n".format(self.Records['val_acc'].argmax() + 1,
self.Records['val_acc'].max()) \
+ "final train acc: %.6f\n final val acc: %.6f\n" \
% (self.Records['train_acc'][-final_len:].mean(), self.Records['val_acc'][-final_len:].mean())
for item in info.split('\n'):
logging.info(item)
with open(os.path.join(self.save_dir, 'acc output.txt'), 'w') as f:
f.write(info)
def plot_save(self):
"""
plot confusion matrix and loss curve after training
"""
# set color
current_cmap = sns.color_palette("husl", 10)
sns.set(style="white")
sns.set(style="ticks", context="notebook", font='Times New Roman', palette=current_cmap, font_scale=1.5)
# make dir
self.save_dir_sub = os.path.join(self.save_dir, "postprosess")
if not os.path.exists(self.save_dir_sub):
os.makedirs(self.save_dir_sub)
# plot confusion matrix
mplb.rcParams['font.size'] = 12
for phase in ['train', 'val']:
f, ax = plt.subplots(figsize=(10, 8), dpi=100)
sns.heatmap(self.c_matrix[phase], annot=True, ax=ax)
ax.invert_yaxis()
ax.set_xticklabels(self.label_name)
ax.set_yticklabels(self.label_name)
ax.set_xlabel('predict', fontsize=15)
ax.set_ylabel('true', fontsize=15)
ax.set_title('confusion matrix: %s' % phase, fontsize=18)
f.savefig(os.path.join(self.save_dir_sub, "confusion_matrix_%s.jpg" % phase))
# plot MinorRecords
steps = np.arange(self.MinorRecords['train_loss'].shape[0]) + 1
steps = steps / len(steps) * self.args.max_epoch
fig = plt.figure(figsize=[10, 8], dpi=100)
ax1 = fig.add_subplot(2, 1, 1)
ax1.plot(steps, self.MinorRecords['train_loss'], "b-.d", markersize=3, linewidth=2)
ax1.set_xlabel('epoch', fontfamily='monospace')
ax1.set_ylabel('loss', fontfamily='monospace')
ax2 = fig.add_subplot(2, 1, 2)
ax2.plot(steps, self.MinorRecords['train_acc'] * 100, "b-.d", markersize=3, linewidth=2)
ax2.legend(['train acc', 'val acc'], loc=5)
ax2.set_xlabel('epoch', fontfamily='monospace')
ax2.set_ylabel('acc', fontfamily='monospace')
ax2.set_ylim([80, 100])
fig.tight_layout()
fig.savefig(os.path.join(self.save_dir_sub, "Minor_loss_acc.jpg"))
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/utils/logger.py | utils/logger.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import logging
def setlogger(path):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logFormatter = logging.Formatter("%(asctime)s %(message)s", "%m-%d %H:%M:%S")
fileHandler = logging.FileHandler(path)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/utils/mysummary.py | utils/mysummary.py | """
Based on torchsummary.summary
"""
import torch
import torch.nn as nn
from collections import OrderedDict
import numpy as np
def summary(model, input_size, batch_size=-1, device="cuda"):
return_info = ""
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]["input_shape"] = list(input[0].size())
summary[m_key]["input_shape"][0] = batch_size
if isinstance(output, (list, tuple)):
summary[m_key]["output_shape"] = [
[-1] + list(o.size())[1:] for o in output
]
else:
summary[m_key]["output_shape"] = list(output.size())
summary[m_key]["output_shape"][0] = batch_size
params = 0
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += torch.prod(torch.LongTensor(list(module.weight.size())))
summary[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += torch.prod(torch.LongTensor(list(module.bias.size())))
summary[m_key]["nb_params"] = params
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
and not (module == model)
):
hooks.append(module.register_forward_hook(hook))
device = device.lower()
assert device in [
"cuda",
"cpu",
], "Input device is not valid, please specify 'cuda' or 'cpu'"
if device == "cuda" and torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
# multiple inputs to the network
if isinstance(input_size, tuple):
input_size = [input_size]
# batch_size of 2 for batchnorm
x = [torch.rand(2, *in_size).type(dtype) for in_size in input_size]
# print(type(x[0]))
# create properties
summary = OrderedDict()
hooks = []
# register hook
model.apply(register_hook)
# make a forward pass
# print(x.shape)
model(*x)
# remove these hooks
for h in hooks:
h.remove()
print("----------------------------------------------------------------")
return_info += "----------------------------------------------------------------\n"
line_new = "{:>20} {:>25} {:>15}".format("Layer (type)", "Output Shape", "Param #")
print(line_new)
return_info += line_new +"\n"
print("================================================================")
return_info += "----------------------------------------------------------------\n"
total_params = 0
total_output = 0
trainable_params = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output_shape"]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_params += summary[layer]["nb_params"]
total_output += np.prod(summary[layer]["output_shape"])
if "trainable" in summary[layer]:
if summary[layer]["trainable"] == True:
trainable_params += summary[layer]["nb_params"]
print(line_new)
return_info += line_new + "\n"
# assume 4 bytes/number (float on cuda).
total_input_size = abs(np.prod(input_size) * batch_size * 4. / (1024 ** 2.))
total_output_size = abs(2. * total_output * 4. / (1024 ** 2.)) # x2 for gradients
total_params_size = abs(total_params.numpy() * 4. / (1024 ** 2.))
total_size = total_params_size + total_output_size + total_input_size
print("================================================================")
print("Total params: {0:,}".format(total_params))
print("Trainable params: {0:,}".format(trainable_params))
print("Non-trainable params: {0:,}".format(total_params - trainable_params))
print("----------------------------------------------------------------")
print("Input size (MB): %0.2f" % total_input_size)
print("Forward/backward pass size (MB): %0.2f" % total_output_size)
print("Params size (MB): %0.2f" % total_params_size)
print("Estimated Total Size (MB): %0.2f" % total_size)
print("----------------------------------------------------------------")
return_info += "================================================================\n"
return_info += "Total params: {0:,}\n".format(total_params)
return_info += "Trainable params: {0:,}\n".format(trainable_params)
return_info += "Non-trainable params: {0:,}\n".format(total_params - trainable_params)
return_info += "----------------------------------------------------------------\n"
return_info += "Input size (MB): %0.2f\n" % total_input_size
return_info += "Forward/backward pass size (MB): %0.2f\n" % total_output_size
return_info += "Params size (MB): %0.2f\n" % total_params_size
return_info += "Estimated Total Size (MB): %0.2f\n" % total_size
return_info += "----------------------------------------------------------------\n"
return_info += "================================================================\n"
return return_info
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Models/TFN.py | Models/TFN.py | """
Created on 2023/11/23
@author: Chen Qian
@e-mail: chenqian2020@sjtu.edu.cn
"""
from Models.TFconvlayer import *
from Models.BackboneCNN import CNN
from torch.nn import Conv1d
from utils.mysummary import summary
class Base_FUNC_CNN(CNN):
"""
the base class of TFN
"""
FuncConv1d = BaseFuncConv1d
funckernel_size = 21
def __init__(self, in_channels=1, out_channels=10, kernel_size=15, clamp_flag=True, mid_channel=16):
super().__init__(in_channels, out_channels, kernel_size)
# Reinitialize the first layer by changing the in_channels
args = {x: getattr(self.layer1[0], x) for x in
['in_channels', 'out_channels', 'kernel_size', 'stride', 'padding', 'bias']}
args['bias'] = None if (args['bias'] is None) else True
args['in_channels'] = mid_channel
self.layer1[0] = nn.Conv1d(**args)
# use the TFconvlayer as the first preprocessing layer
self.funconv = self.FuncConv1d(in_channels, mid_channel, self.funckernel_size,
padding=self.funckernel_size // 2,
bias=False, clamp_flag=clamp_flag)
self.superparams = self.funconv.superparams
def forward(self, x):
x = self.funconv(x)
return super().forward(x)
def getweight(self):
"""
get the weight and superparams of the first preprocessing layer (for recording)
"""
weight = self.funconv.weight.cpu().detach().numpy()
superparams = self.funconv.superparams.cpu().detach().numpy()
return weight, superparams
class TFN_STTF(Base_FUNC_CNN):
"""
TFN with TFconv-STTF as the first preprocessing layer
FuncConv1d = TFconv_STTF
kernel_size = mid_channel * 2 - 1
"""
FuncConv1d = TFconv_STTF
def __init__(self, mid_channel=16, **kwargs):
self.funckernel_size = mid_channel * 2 - 1
super().__init__(mid_channel=mid_channel, **kwargs)
class TFN_Chirplet(Base_FUNC_CNN):
"""
TFN with TFconv-Chirplet as the first preprocessing layer
FuncConv1d = TFconv_Chirplet
kernel_size = mid_channel * 2 - 1
"""
FuncConv1d = TFconv_Chirplet
def __init__(self, mid_channel=16, **kwargs):
self.funckernel_size = mid_channel * 2 - 1
super().__init__(mid_channel=mid_channel, **kwargs)
class TFN_Morlet(Base_FUNC_CNN):
"""
TFN with TFconv-Morlet as the first preprocessing layer
FuncConv1d = TFconv_Morlet
kernel_size = mid_channel * 10 - 1
"""
FuncConv1d = TFconv_Morlet
def __init__(self, mid_channel=16, **kwargs):
self.funckernel_size = mid_channel * 10 - 1
super().__init__(mid_channel=mid_channel, **kwargs)
class Random_conv(Conv1d):
"""
traditional Conv1d with random weight
"""
def __init__(self, *pargs, **kwargs):
new_kwargs = {k:v for k,v in kwargs.items() if k in ['in_channels','out_channels','kernel_size','stride','padding','bias']}
super().__init__(*pargs, **new_kwargs)
self.superparams = self.weight
class Random_CNN(Base_FUNC_CNN):
"""
Backbone-CNN with Random_conv as the first preprocessing layer
"""
FuncConv1d = Random_conv
def __init__(self, mid_channel=16, **kwargs):
self.funckernel_size = mid_channel * 2 - 1
super().__init__(mid_channel=mid_channel, **kwargs)
if __name__ == '__main__':
# test all the models
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
for Model in [Base_FUNC_CNN,TFN_STTF,TFN_Morlet,TFN_Chirplet,Random_CNN]:
print('\n\n'+"-"*50+'\n'+Model.__name__+'\n'+"-"*50)
models = Model()
models.to(device)
summary(models, (1, 1024), batch_size=-1, device="cuda")
print("\n\n") | python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Models/TFconvlayer.py | Models/TFconvlayer.py | """
Created on 2023/11/23
@author: Chen Qian
@e-mail: chenqian2020@sjtu.edu.cn
"""
from torch import nn
import torch
import numpy as np
import random
import os
import torch.nn.init as init
import math
import torch.nn.functional as F
# random seed
seed = 999
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# set the min and max frequency of the kernel function
fmin = 0.03
fmax = 0.45
random_scale = 2e-3
# traditional convolution layer with Real-Imaginary mechanism
class BaseConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size[0] if isinstance(kernel_size, tuple) else kernel_size
self.stride = stride
self.padding = padding
self.phases = ['real', 'imag']
self.weight = torch.Tensor(len(self.phases), out_channels, in_channels, self.kernel_size)
if bias:
self.bias = torch.Tensor(len(self.phases), out_channels)
else:
self.bias = None
for phase in self.phases:
self.weight[self.phases.index(phase)] = torch.Tensor(out_channels, in_channels, self.kernel_size)
init.kaiming_uniform_(self.weight[self.phases.index(phase)], a=math.sqrt(5)) # initial weight
if bias:
self.bias[self.phases.index(phase)] = torch.Tensor(out_channels)
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight[self.phases.index(phase)]) # initial bias
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias[self.phases.index(phase)], -bound, bound)
if self.__class__.__name__ == 'BaseConv1d':
self.weight = torch.nn.Parameter(self.weight)
if self.bias is not None:
self.bias = torch.nn.Parameter(self.bias)
def forward(self, input):
result = {}
# the output of the convolution layer is the square root of the sum of the squares of the real and imaginary parts
for phase in self.phases:
if self.bias is None:
result[phase] = F.conv1d(input, self.weight[self.phases.index(phase)],
stride=self.stride, padding=self.padding)
else:
result[phase] = F.conv1d(input, self.weight[self.phases.index(phase)],
bias=self.bias[self.phases.index(phase)],
stride=self.stride, padding=self.padding)
output = torch.sqrt(result[self.phases[0]].pow(2) + result[self.phases[1]].pow(2))
return output
# prepare for kernel function by adding limits and rewrite forward function
class BaseFuncConv1d(BaseConv1d):
def __init__(self, *pargs, **kwargs):
kwargs_new = {}
for k in kwargs.keys():
if k in ['in_channels', 'out_channels', 'kernel_size', 'stride', 'padding', 'bias']:
kwargs_new[k] = kwargs[k]
super().__init__(*pargs, **kwargs_new)
if self.__class__.__name__ == 'BaseFuncConv1d':
self.weight = torch.nn.Parameter(self.weight)
if self.bias is not None:
self.bias = torch.nn.Parameter(self.bias)
self.superparams = self.weight
def _clamp_parameters(self):
with torch.no_grad():
for i in range(len(self.params_bound)):
self.superparams.data[:, :, i].clamp_(self.params_bound[i][0], self.params_bound[i][1])
def WeightForward(self):
if self.clamp_flag:
self._clamp_parameters()
l00 = []
for phase in self.phases:
l0 = []
for i in range(self.superparams.shape[0]):
l1 = []
for j in range(self.superparams.shape[1]):
l1.append(self.weightforward(self.kernel_size, self.superparams[i, j], phase).unsqueeze(0))
l0.append(torch.vstack(l1).unsqueeze(0))
l00.append(torch.vstack(l0).unsqueeze(0))
self.weight = torch.vstack(l00)
def forward(self, input):
if self.__class__.__name__ != 'BaseFuncConv1d':
self.WeightForward()
return super().forward(input)
# kernel functions have differences in the weightforward function and the _reset_parameters function
class TFconv_STTF(BaseFuncConv1d): # kernel_size = out_channels * 2 - 1
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False,
clamp_flag=True, params_bound=((0, 0.5),)):
super().__init__(in_channels, out_channels, kernel_size, stride, padding, bias)
self.clamp_flag = clamp_flag
self.params_bound = params_bound
self.superparams = torch.nn.Parameter(torch.Tensor(out_channels, in_channels, len(params_bound)))
self._reset_parameters()
if self.bias is not None:
self.bias = torch.nn.Parameter(self.bias)
def _reset_parameters(self):
with torch.no_grad():
shape = self.superparams.data[:, :, 0].shape
temp0 = (torch.linspace(fmin, fmax, shape.numel())).reshape(shape)
self.superparams.data[:, :, 0] = temp0
self.WeightForward()
def weightforward(self, lens, params, phase):
if isinstance(lens, torch.Tensor):
lens = int(lens.item())
T = torch.arange(-(lens // 2), lens - (lens // 2)).to(params.device)
sigma = torch.tensor(0.52).to(params.device)
if self.phases.index(phase) == 0:
result = torch.exp(-(T / (lens // 2)).pow(2) / sigma.pow(2) / 2) * torch.cos(2 * math.pi * params[0] * T)
else:
result = torch.exp(-(T / (lens // 2)).pow(2) / sigma.pow(2) / 2) * torch.sin(2 * math.pi * params[0] * T)
return result
class TFconv_Chirplet(BaseFuncConv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False,
clamp_flag=True):
max_t = kernel_size // 2
params_bound = ((0, 0.5), (-1 / max_t ** 2, 1 / max_t ** 2))
super().__init__(in_channels, out_channels, kernel_size, stride, padding, bias)
self.clamp_flag = clamp_flag
self.params_bound = params_bound
self.superparams = torch.nn.Parameter(torch.Tensor(out_channels, in_channels, len(params_bound)))
self._reset_parameters()
if self.bias is not None:
self.bias = torch.nn.Parameter(self.bias)
def _reset_parameters(self):
with torch.no_grad():
shape = self.superparams.data[:, :, 0].shape
temp0 = (torch.linspace(fmin, fmax, shape.numel())).reshape(shape)
self.superparams.data[:, :, 0] = temp0
self.superparams.data[:, :, 1].normal_(0, 1e-4)
self.WeightForward()
def weightforward(self, lens, params, phase):
if isinstance(lens, torch.Tensor):
lens = int(lens.item())
T = torch.arange(-(lens // 2), lens - (lens // 2)).to(params.device)
sigma = torch.tensor(0.52).to(params.device)
if self.phases.index(phase) == 0:
result = torch.exp(-(T / (lens // 2)).pow(2) / sigma.pow(2) / 2) * torch.cos(
2 * math.pi * (params[1] / 2 * T.pow(2) + params[0] * T))
else:
result = torch.exp(-(T / (lens // 2)).pow(2) / sigma.pow(2) / 2) * torch.sin(
2 * math.pi * (params[1] / 2 * T.pow(2) + params[0] * T))
return result
class TFconv_Morlet(BaseFuncConv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False,
clamp_flag=True, params_bound=((0.4, 10),)):
super().__init__(in_channels, out_channels, kernel_size, stride, padding, bias)
self.clamp_flag = clamp_flag
self.params_bound = params_bound
self.superparams = torch.nn.Parameter(torch.Tensor(out_channels, in_channels, len(params_bound)))
self._reset_parameters()
if self.bias is not None:
self.bias = torch.nn.Parameter(self.bias)
def _reset_parameters(self):
with torch.no_grad():
shape = self.superparams.data[:, :, 0].shape
temp_f = torch.pow(torch.tensor(2), (torch.linspace(np.log2(fmin), np.log2(fmax), shape.numel())))
temp_s = (0.2 / temp_f).reshape(shape)
self.superparams.data[:, :, 0] = temp_s
self.WeightForward()
def weightforward(self, lens, params, phase):
if isinstance(lens, torch.Tensor):
lens = int(lens.item())
T = torch.arange(-(lens // 2), lens - (lens // 2)).to(params.device)
T = T / params[0]
sigma = torch.tensor(0.6).to(params.device)
fc_len = int(self.out_channels)
if self.phases.index(phase) == 0:
result = params[0].pow(-1) * torch.exp(-(T / fc_len).pow(2) / sigma.pow(2) / 2) * torch.cos(
2 * math.pi * 0.2 * T)
else:
result = params[0].pow(-1) * torch.exp(-(T / fc_len).pow(2) / sigma.pow(2) / 2) * torch.sin(
2 * math.pi * 0.2 * T)
return result
if __name__ == '__main__':
# test TFconv_STTF, TFconv_Chirplet, TFconv_Morlet
for item in [TFconv_STTF, TFconv_Chirplet, TFconv_Morlet]:
print(item.__name__)
model = TFconv_STTF(1, 8, 15, padding=7, stride=1,
bias=False, clamp_flag=True) # kernel_size should
input = torch.randn([1, 1, 1024])
out = model(input)
out.mean().backward()
print(out.shape)
print(f'{item.__name__:s} test pass')
| python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
ChenQian0618/TFN | https://github.com/ChenQian0618/TFN/blob/bb8af32c380fa047631b960e6a08ed0b261f1874/Models/BackboneCNN.py | Models/BackboneCNN.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
from torch import nn
import torch
from utils.mysummary import summary
# ----------------------------inputsize = 1024-------------------------------------------------------------------------
class CNN(nn.Module):
def __init__(self, in_channels=1, out_channels=10,kernel_size=15,**kwargs):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv1d(in_channels, 16, kernel_size=kernel_size, bias=True), # 16, 1010
nn.BatchNorm1d(16),
nn.ReLU(inplace=True))
self.layer2 = nn.Sequential(
nn.Conv1d(16, 32, kernel_size=3, bias=True), # 32, 1008
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.MaxPool1d(kernel_size=2, stride=2)) # 32, 504
self.layer3 = nn.Sequential(
nn.Conv1d(32, 64, kernel_size=3, bias=True), # 64,502
nn.BatchNorm1d(64),
nn.ReLU(inplace=True))
self.layer4 = nn.Sequential(
nn.Conv1d(64, 128, kernel_size=3, bias=True), # 128,500
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.AdaptiveMaxPool1d(4)) # 128, 4
self.layer5 = nn.Sequential(
nn.Linear(128 * 4, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 64),
nn.ReLU(inplace=True))
self.fc = nn.Linear(64, out_channels)
def forward(self, x):
if len(x.shape) == 4:
x = torch.squeeze(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0), -1)
x = self.layer5(x)
x = self.fc(x)
return x
if __name__ == '__main__':
model = CNN()
info = summary(model, (1, 1024), batch_size=-1, device="cpu")
print(info) | python | MIT | bb8af32c380fa047631b960e6a08ed0b261f1874 | 2026-01-05T07:14:43.861108Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.