sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
keras-team/keras:keras/src/quantizers/awq.py | """AWQ (Activation-aware Weight Quantization) algorithm implementation.
AWQ protects salient weights by finding optimal per-channel scales based on
activation magnitudes, then applies those scales before quantization.
Reference: https://arxiv.org/abs/2306.00978
"""
import types
from keras.src import ops
from keras.src.layers import Dense
from keras.src.layers import EinsumDense
from keras.src.quantizers.quantizers import compute_quantization_parameters
from keras.src.quantizers.quantizers import dequantize_with_sz_map
from keras.src.quantizers.quantizers import dequantize_with_zero_point
from keras.src.quantizers.quantizers import quantize_with_sz_map
from keras.src.quantizers.quantizers import quantize_with_zero_point
def awq_search_optimal_scales(
weights,
activation_magnitudes,
*,
num_grid_points=20,
group_size=-1,
):
"""Search for optimal AWQ scales using grid search.
The AWQ algorithm finds scaling factors that protect salient weights.
For each channel, we search for an optimal ratio in [0, 1] that minimizes
the activation-weighted quantization error.
The key insight: we MULTIPLY weights by scales before quantization to
expand salient weights. This ensures quantization noise is small relative
to the expanded weight magnitude. During inference, we divide by scales
to restore the original magnitude.
Scale formula: scales = x_max.pow(ratio).clamp(min=1e-4)
Loss function: Activation-weighted MSE (approximates output error)
Args:
weights: Weight tensor [out_features, in_features] (transposed kernel).
activation_magnitudes: Per-channel activation magnitudes [in_features].
num_grid_points: Number of grid search points. Defaults to 20.
group_size: Group size for quantization (-1 for per-channel).
Returns:
best_scales: Optimal per-channel scales [in_features].
"""
in_features = ops.shape(weights)[1]
# Compute per-channel activation magnitudes (x_max)
# activations should already be per-channel max magnitudes
x_max = ops.cast(activation_magnitudes, "float32")
# Avoid zero or very small values
x_max = ops.where(ops.less(x_max, 1e-8), ops.ones_like(x_max), x_max)
best_loss = None
best_scales = ops.ones((in_features,), dtype="float32")
# Grid search over ratio values from 0 to 1
for i in range(num_grid_points + 1):
ratio = i / num_grid_points
# Compute scales: x_max^ratio (clipped to avoid numerical issues)
if ratio == 0:
scales = ops.ones_like(x_max)
else:
scales = ops.power(x_max, ratio)
scales = ops.maximum(scales, 1e-4)
# Normalize scales to avoid extreme values
scale_mean = ops.sqrt(ops.multiply(ops.max(scales), ops.min(scales)))
scale_mean = ops.maximum(scale_mean, 1e-8)
scales = ops.divide(scales, scale_mean)
# Apply scales to weights by MULTIPLYING (expand salient weights)
# weights_scaled: [out_features, in_features]
weights_scaled = ops.multiply(weights, scales)
if group_size == -1:
# Per-channel quantization (no grouping)
scale_q, zero_q, maxq = compute_quantization_parameters(
weights_scaled,
bits=4,
symmetric=False,
per_channel=True,
group_size=-1,
compute_dtype="float32",
)
# Quantize and dequantize
quantized = quantize_with_zero_point(
weights_scaled, scale_q, zero_q, maxq
)
dequantized = dequantize_with_zero_point(quantized, scale_q, zero_q)
else:
# Grouped quantization - use proper per-row grouping
scale_q, zero_q, maxq = compute_quantization_parameters(
weights_scaled,
bits=4,
symmetric=False,
per_channel=True,
group_size=group_size,
compute_dtype="float32",
)
# Compute group indices: maps each input feature to its group
g_idx = ops.cast(ops.arange(0, in_features) // group_size, "int32")
# Quantize and dequantize using group index mapping
quantized = quantize_with_sz_map(
weights_scaled, scale_q, zero_q, g_idx, maxq
)
dequantized = dequantize_with_sz_map(
quantized, scale_q, zero_q, g_idx
)
# Scale back down by DIVIDING to restore original magnitude
reconstructed = ops.divide(dequantized, scales)
# Compute activation-weighted MSE loss
# This approximates the output error: ||W*X - W_hat*X||^2
# by weighting each channel's error by x_max^2
weight_error = ops.square(ops.subtract(weights, reconstructed))
# Weight by activation magnitudes squared (broadcast over out_features)
weighted_error = ops.multiply(weight_error, ops.square(x_max))
loss = ops.mean(weighted_error)
# Track best
if best_loss is None:
best_loss = loss
best_scales = scales
else:
is_better = ops.less(loss, best_loss)
if is_better:
best_loss = loss
best_scales = scales
return best_scales
def awq_quantize_matrix(
weights_transpose,
activation_magnitudes,
*,
num_grid_points=20,
group_size=-1,
):
"""Quantize a weight matrix using AWQ.
This function performs the complete AWQ quantization process:
1. Find optimal per-channel scales via grid search
2. Apply scales to weights
3. Compute quantization parameters
4. Quantize weights
Args:
weights_transpose: Weight matrix [out_features, in_features].
activation_magnitudes: Per-channel activation magnitudes [in_features].
num_grid_points: Number of grid search points.
group_size: Group size for quantization.
Returns:
quantized_weights: Quantized weights [out_features, in_features].
scales: Quantization scales [out_features, num_groups].
zeros: Zero points [out_features, num_groups].
awq_scales: AWQ per-channel scales [in_features].
g_idx: Group indices [in_features].
"""
in_features = ops.shape(weights_transpose)[1]
# Step 1: Find optimal AWQ scales via grid search
awq_scales = awq_search_optimal_scales(
weights_transpose,
activation_magnitudes,
num_grid_points=num_grid_points,
group_size=group_size,
)
# Step 2: Apply AWQ scales by MULTIPLYING (expand salient weights)
# weights_scaled: [out_features, in_features]
weights_scaled = ops.multiply(weights_transpose, awq_scales)
if group_size == -1:
# Per-channel quantization (no grouping)
scale_q, zero_q, maxq = compute_quantization_parameters(
weights_scaled,
bits=4,
symmetric=False,
per_channel=True,
group_size=-1,
compute_dtype="float32",
)
# Quantize
quantized = quantize_with_zero_point(
weights_scaled, scale_q, zero_q, maxq
)
# Build group indices (all 0s for per-channel)
g_idx = ops.zeros((in_features,), dtype="float32")
else:
# Grouped quantization - use proper per-row grouping
scale_q, zero_q, maxq = compute_quantization_parameters(
weights_scaled,
bits=4,
symmetric=False,
per_channel=True,
group_size=group_size,
compute_dtype="float32",
)
# Compute group indices: maps each input feature to its group
g_idx = ops.cast(ops.arange(0, in_features) // group_size, "int32")
# Quantize using group index mapping
quantized = quantize_with_sz_map(
weights_scaled, scale_q, zero_q, g_idx, maxq
)
# Convert g_idx to float for storage
g_idx = ops.cast(g_idx, "float32")
return quantized, scale_q, zero_q, awq_scales, g_idx
class AWQ:
"""AWQ quantizer for a single layer.
This class accumulates activation statistics during calibration and
performs AWQ quantization on layer weights.
The AWQ algorithm works by:
1. Collecting per-channel maximum activation magnitudes
2. Using activation magnitudes to determine weight saliency
3. Finding optimal per-channel scales via grid search
4. Applying scales before quantization to protect salient weights
Args:
layer: The layer to quantize (Dense or EinsumDense).
config: AWQConfig instance with quantization parameters.
"""
def __init__(self, layer, config=None):
from keras.src.quantizers.awq_config import AWQConfig
self.original_layer = layer
self.config = config or AWQConfig(dataset=None, tokenizer=None)
self.num_samples = 0
# Handle Dense and EinsumDense layers
if isinstance(layer, Dense) or (
isinstance(layer, EinsumDense) and layer.kernel.ndim == 2
):
self.kernel_shape = layer.kernel.shape
self.rows = self.kernel_shape[0] # in_features
self.columns = self.kernel_shape[1] # out_features
self.layer = layer
elif isinstance(layer, EinsumDense) and layer.kernel.ndim == 3:
# Handle 3D EinsumDense layers (typically from attention blocks)
self.kernel_shape = layer.kernel.shape
shape = list(self.kernel_shape)
d_model_dim_index = shape.index(max(shape))
if d_model_dim_index == 0: # QKV projection case
in_features, heads, head_dim = shape
self.rows = in_features
self.columns = heads * head_dim
elif d_model_dim_index in [1, 2]: # Attention Output case
heads, head_dim, out_features = shape
self.rows = heads * head_dim
self.columns = out_features
else:
raise ValueError(
f"Cannot determine dimensions for EinsumDense kernel "
f"shape {shape}"
)
# Create a temporary object that holds a reshaped 2D version
self.layer = types.SimpleNamespace(
kernel=ops.reshape(layer.kernel, (self.rows, self.columns)),
)
else:
raise TypeError(f"Unsupported layer type for AWQ: {type(layer)}")
# Initialize activation magnitude accumulator (per-channel max)
self.activation_magnitudes = ops.zeros((self.rows,), dtype="float32")
def update_activation_magnitudes(self, input_batch):
"""Update per-channel activation magnitude statistics.
This method tracks the maximum absolute activation value for each
input channel across all calibration batches.
Args:
input_batch: Input activations tensor [batch, ..., in_features].
"""
if input_batch is None:
raise ValueError("Input tensor cannot be None.")
if ops.size(input_batch) == 0:
raise ValueError("Input tensor cannot be empty.")
# Flatten to [batch_samples, in_features]
if len(input_batch.shape) > 2:
input_batch = ops.reshape(input_batch, (-1, input_batch.shape[-1]))
x = ops.cast(input_batch, "float32")
# Compute per-channel max absolute value for this batch
batch_max = ops.max(ops.abs(x), axis=0)
# Update running max
self.activation_magnitudes = ops.maximum(
self.activation_magnitudes, batch_max
)
self.num_samples = self.num_samples + int(ops.shape(x)[0])
def quantize_layer(self):
"""Perform AWQ quantization on the layer.
This method:
1. Runs the AWQ grid search to find optimal scales
2. Quantizes the layer weights
3. Updates the layer's quantized variables
"""
from keras.src import quantizers
weights_matrix = ops.transpose(self.layer.kernel)
# Perform AWQ quantization
quantized, scale, zero, awq_scales, g_idx = awq_quantize_matrix(
weights_matrix,
self.activation_magnitudes,
num_grid_points=self.config.num_grid_points,
group_size=self.config.group_size,
)
# Cast to uint8 for storage
# quantized is already [out_features, in_features]
quantized = ops.cast(quantized, "uint8")
# Pack to 4-bit along axis 0 (output features)
quantized_packed, _, _ = quantizers.pack_int4(
quantized, axis=0, dtype="uint8"
)
# Assign to layer variables
del self.original_layer._kernel
self.original_layer.quantized_kernel.assign(quantized_packed)
self.original_layer.kernel_scale.assign(scale)
self.original_layer.kernel_zero.assign(zero)
self.original_layer.awq_scales.assign(awq_scales)
self.original_layer.g_idx.assign(g_idx)
self.original_layer.is_awq_calibrated = True
def free(self):
"""Free memory used by the quantizer."""
del self.activation_magnitudes
del self.layer
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/awq.py",
"license": "Apache License 2.0",
"lines": 296,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/quantizers/awq_config.py | from keras.src.api_export import keras_export
from keras.src.quantizers.quantization_config import QuantizationConfig
@keras_export("keras.quantizers.AWQConfig")
class AWQConfig(QuantizationConfig):
"""Configuration class for AWQ (Activation-aware Weight Quantization).
AWQ is a post-training quantization method that identifies and protects
salient weights based on activation magnitudes. It applies per-channel
scaling before quantization to minimize accuracy loss.
Methodology:
1. Collects activation statistics from calibration data
2. Identifies salient weight channels based on activation magnitudes
3. Searches for optimal per-channel scaling factors via grid search
4. Applies scaling before quantization to protect important weights
References:
- Original AWQ paper: "AWQ: Activation-aware Weight Quantization for
LLM Compression and Acceleration" (https://arxiv.org/abs/2306.00978)
- Reference implementation: https://github.com/mit-han-lab/llm-awq
Args:
dataset: The calibration dataset. It can be an iterable that yields
strings or pre-tokenized numerical tensors (e.g., a list of
strings, a generator, or a NumPy array). This data is used to
analyze activation patterns.
tokenizer: A tokenizer instance (or a similar callable) that is used
to process the `dataset`.
weight_bits: The number of bits for weight quantization. AWQ presently
only supports 4-bit quantization. Defaults to 4.
num_samples: The number of calibration data samples to use from the
dataset. Defaults to 128.
sequence_length: The sequence length to use for each calibration
sample. Defaults to 512.
group_size: The size of weight groups to quantize together. A
`group_size` of -1 indicates per-channel quantization.
Defaults to 128.
num_grid_points: The number of grid search points for finding optimal
per-channel scales. Higher values may find better scales but
take longer. Defaults to 20.
quantization_layer_structure: A dictionary defining the model's
quantization structure. It should contain:
- "pre_block_layers": list of layers to run before the first
block (e.g., embedding layer).
- "sequential_blocks": list of transformer blocks to quantize
sequentially.
If not provided, the model must implement
`get_quantization_layer_structure`.
Example:
```python
from keras.quantizers import AWQConfig
# Create configuration for 4-bit AWQ quantization
config = AWQConfig(
dataset=calibration_data, # Your calibration dataset
tokenizer=your_tokenizer, # Tokenizer for text data
num_samples=128, # Number of calibration samples
sequence_length=512, # Sequence length for each sample
group_size=128, # Weight grouping for quantization
num_grid_points=20, # Grid search points for scale search
)
# Apply quantization to your model
model.quantize("awq", config=config)
```
"""
def __init__(
self,
dataset,
tokenizer,
*,
weight_bits: int = 4,
num_samples: int = 128,
sequence_length: int = 512,
group_size: int = 128,
num_grid_points: int = 20,
quantization_layer_structure: dict = None,
):
super().__init__()
# AWQ only supports 4-bit quantization
if weight_bits != 4:
raise ValueError(
f"AWQ only supports 4-bit quantization. "
f"Received weight_bits={weight_bits}."
)
if num_samples <= 0:
raise ValueError("num_samples must be a positive integer.")
if sequence_length <= 0:
raise ValueError("sequence_length must be a positive integer.")
if group_size < -1 or group_size == 0:
raise ValueError(
"Invalid group_size. Supported values are -1 (per-channel) "
f"or a positive integer, but got {group_size}."
)
if num_grid_points <= 0:
raise ValueError("num_grid_points must be a positive integer.")
self.dataset = dataset
self.tokenizer = tokenizer
self.weight_bits = weight_bits
self.num_samples = num_samples
self.sequence_length = sequence_length
self.group_size = group_size
self.num_grid_points = num_grid_points
self.quantization_layer_structure = quantization_layer_structure
@property
def mode(self):
return "awq"
def dtype_policy_string(self):
"""Returns the dtype policy string for this configuration.
Returns:
A string representing the dtype policy, e.g. "awq/4/128".
"""
return f"awq/{self.weight_bits}/{self.group_size}"
def get_config(self):
return {
# Dataset and Tokenizer are only required for one-time
# calibration and are not saved in the config.
"dataset": None,
"tokenizer": None,
"weight_bits": self.weight_bits,
"num_samples": self.num_samples,
"sequence_length": self.sequence_length,
"group_size": self.group_size,
"num_grid_points": self.num_grid_points,
"quantization_layer_structure": self.quantization_layer_structure,
}
@classmethod
def from_config(cls, config):
return cls(**config)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/awq_config.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/quantizers/awq_config_test.py | import pytest
from keras.src import testing
from keras.src.quantizers.awq_config import AWQConfig
@pytest.mark.requires_trainable_backend
class AWQConfigTest(testing.TestCase):
"""Test AWQConfig validation and serialization."""
class MockTokenizer:
"""Mock tokenizer for testing purposes."""
def __init__(self):
pass
def test_config_defaults(self):
"""Test default configuration values."""
config = AWQConfig(dataset=["test"], tokenizer=self.MockTokenizer())
self.assertEqual(config.weight_bits, 4)
self.assertEqual(config.num_samples, 128)
self.assertEqual(config.sequence_length, 512)
self.assertEqual(config.group_size, 128)
self.assertEqual(config.num_grid_points, 20)
self.assertEqual(config.mode, "awq")
def test_config_custom_values(self):
"""Test custom configuration values."""
config = AWQConfig(
dataset=["test"],
tokenizer=self.MockTokenizer(),
num_samples=64,
sequence_length=256,
group_size=64,
num_grid_points=30,
)
self.assertEqual(config.num_samples, 64)
self.assertEqual(config.sequence_length, 256)
self.assertEqual(config.group_size, 64)
self.assertEqual(config.num_grid_points, 30)
def test_config_only_4bit(self):
"""Test that AWQ only supports 4-bit quantization."""
with self.assertRaisesRegex(ValueError, "only supports 4-bit"):
AWQConfig(
dataset=["test"], tokenizer=self.MockTokenizer(), weight_bits=8
)
def test_config_invalid_num_samples(self):
"""Test invalid num_samples validation."""
with self.assertRaisesRegex(ValueError, "num_samples must be"):
AWQConfig(
dataset=["test"], tokenizer=self.MockTokenizer(), num_samples=0
)
def test_config_invalid_sequence_length(self):
"""Test invalid sequence_length validation."""
with self.assertRaisesRegex(ValueError, "sequence_length must be"):
AWQConfig(
dataset=["test"],
tokenizer=self.MockTokenizer(),
sequence_length=-1,
)
def test_config_invalid_group_size(self):
"""Test invalid group_size validation."""
with self.assertRaisesRegex(ValueError, "Invalid group_size"):
AWQConfig(
dataset=["test"], tokenizer=self.MockTokenizer(), group_size=0
)
def test_config_invalid_num_grid_points(self):
"""Test invalid num_grid_points validation."""
with self.assertRaisesRegex(ValueError, "num_grid_points must be"):
AWQConfig(
dataset=["test"],
tokenizer=self.MockTokenizer(),
num_grid_points=0,
)
def test_config_per_channel_group_size(self):
"""Test that -1 group_size is valid (per-channel)."""
config = AWQConfig(
dataset=["test"], tokenizer=self.MockTokenizer(), group_size=-1
)
self.assertEqual(config.group_size, -1)
def test_config_serialization(self):
"""Test configuration serialization."""
config = AWQConfig(
dataset=["test"],
tokenizer=self.MockTokenizer(),
group_size=64,
num_grid_points=30,
)
cfg = config.get_config()
self.assertEqual(cfg["weight_bits"], 4)
self.assertEqual(cfg["group_size"], 64)
self.assertEqual(cfg["num_grid_points"], 30)
# Dataset and tokenizer should not be serialized
self.assertIsNone(cfg["dataset"])
self.assertIsNone(cfg["tokenizer"])
def test_dtype_policy_string(self):
"""Test dtype policy string generation."""
config = AWQConfig(
dataset=["test"], tokenizer=self.MockTokenizer(), group_size=128
)
self.assertEqual(config.dtype_policy_string(), "awq/4/128")
config2 = AWQConfig(
dataset=["test"], tokenizer=self.MockTokenizer(), group_size=-1
)
self.assertEqual(config2.dtype_policy_string(), "awq/4/-1")
def test_awq_config_serialization(self):
"""Test AWQConfig serialization and deserialization round-trip."""
config = AWQConfig(
dataset=["test"],
tokenizer=self.MockTokenizer(),
weight_bits=4,
num_samples=64,
sequence_length=256,
group_size=64,
num_grid_points=30,
)
serialized_config = config.get_config()
deserialized_config = AWQConfig.from_config(serialized_config)
# Compare the serializable fields (dataset/tokenizer are not serialized)
self.assertEqual(config.weight_bits, deserialized_config.weight_bits)
self.assertEqual(config.num_samples, deserialized_config.num_samples)
self.assertEqual(
config.sequence_length, deserialized_config.sequence_length
)
self.assertEqual(config.group_size, deserialized_config.group_size)
self.assertEqual(
config.num_grid_points, deserialized_config.num_grid_points
)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/awq_config_test.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/quantizers/awq_core.py | """AWQ core functionality for layer-wise quantization.
This module provides the orchestration logic for applying AWQ quantization
to transformer models in a layer-by-layer fashion.
"""
from contextlib import contextmanager
from absl import logging
from keras.src import ops
from keras.src import utils as keras_utils
from keras.src.dtype_policies.dtype_policy import AWQDTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
from keras.src.quantizers.awq import AWQ
from keras.src.quantizers.awq_config import AWQConfig
from keras.src.quantizers.gptq_core import find_layers_in_block
from keras.src.quantizers.gptq_core import get_dataloader
from keras.src.quantizers.utils import should_quantize_layer
@contextmanager
def stream_activations(layers_map, awq_objects):
"""Context manager to capture activations for AWQ calibration.
Temporarily patches layer.call methods to capture activation statistics
for computing per-channel scaling factors.
Args:
layers_map: Dict[str, Layer]. Mapping from layer names to layers.
awq_objects: Dict[str, AWQ]. Mapping from names to AWQ instances.
Yields:
None: The patched state is active only within the `with` block.
"""
original_calls = {}
def create_hook(name, original_call_func):
def hook(*args, **kwargs):
inp = args[0] if args else kwargs["inputs"]
num_features = awq_objects[name].rows
input_2d = ops.reshape(inp, (-1, num_features))
awq_objects[name].update_activation_magnitudes(input_2d)
return original_call_func(*args, **kwargs)
return hook
try:
for name, layer in layers_map.items():
original_calls[name] = layer.call
layer.call = create_hook(name, layer.call)
yield
finally:
for name, layer in layers_map.items():
layer.call = original_calls[name]
def apply_awq_layerwise(dataloader, config, structure, filters=None):
"""Apply AWQ quantization layer-by-layer to a Keras model.
This function processes the model sequentially, one block at a time:
1. Captures activation statistics through calibration data forward pass
2. Uses activation magnitudes to determine weight saliency
3. Finds optimal per-channel scales via grid search
4. Quantizes weights with AWQ scaling
Args:
dataloader: Calibration data as numpy array.
config: AWQConfig instance.
structure: Dict with 'pre_block_layers' and 'sequential_blocks'.
filters: Optional layer filters.
"""
num_samples = config.num_samples
logging.info("Starting AWQ quantization...")
pre_layers = structure.get("pre_block_layers", [])
transformer_blocks = structure.get("sequential_blocks", [])
if not transformer_blocks:
raise ValueError(
"No sequential blocks found in the structure to quantize."
)
# Process inputs through pre-block layers (e.g., embedding)
inputs = []
for batch in dataloader:
batch = ops.convert_to_tensor(batch, dtype="int32")
for layer in pre_layers:
batch = layer(batch)
inputs.append(batch)
num_samples = min(num_samples, len(inputs))
progbar = keras_utils.Progbar(target=len(transformer_blocks))
for block_idx, block in enumerate(transformer_blocks):
logging.info(f"Quantizing Block {block_idx}")
sub_layers_map = find_layers_in_block(block)
# Apply filters
final_sub_layers_map = {}
for name, layer in sub_layers_map.items():
if not should_quantize_layer(layer, filters):
continue
final_sub_layers_map[name] = layer
sub_layers_map = final_sub_layers_map
if not sub_layers_map:
logging.info(
f" No quantizable layers found in block {block_idx}. Skipping."
)
else:
logging.info(f"Found layers: {list(sub_layers_map.keys())}")
# Create AWQ objects for each layer
awq_objects = {
name: AWQ(layer, config)
for name, layer in sub_layers_map.items()
}
# Capture activation statistics
with stream_activations(sub_layers_map, awq_objects):
for sample_idx in range(num_samples):
current_input = inputs[sample_idx]
if len(current_input.shape) == 2:
current_input = ops.expand_dims(current_input, axis=0)
_ = block(current_input)
# Quantize each layer
for name, awq_object in awq_objects.items():
logging.info(f"Quantizing {name}...")
awq_object.quantize_layer()
awq_object.free()
del awq_objects
# Generate inputs for next block
if block_idx < len(transformer_blocks) - 1:
logging.info(f"Generating inputs for block {block_idx + 1}...")
next_block_inputs = []
for sample_idx in range(num_samples):
current_input = inputs[sample_idx]
if len(current_input.shape) == 2:
current_input = ops.expand_dims(current_input, axis=0)
output = block(current_input)[0]
next_block_inputs.append(output)
inputs = next_block_inputs
progbar.update(current=block_idx + 1)
logging.info("AWQ quantization complete.")
def awq_quantize(config, quantization_layer_structure, filters=None):
"""Main entry point for AWQ quantization.
Args:
config: AWQConfig instance.
quantization_layer_structure: Model structure dictionary.
filters: Optional layer filters.
"""
if config.dataset is None or config.tokenizer is None:
raise ValueError(
"AWQ quantization requires a dataset and tokenizer. "
"Please provide them in the AWQConfig."
)
if quantization_layer_structure is None:
raise ValueError(
"For 'awq' mode, a valid quantization structure must be provided "
"either via `config.quantization_layer_structure` or by overriding "
"`model.get_quantization_layer_structure(mode)`. The structure "
"should be a dictionary with keys 'pre_block_layers' and "
"'sequential_blocks'."
)
# Load calibration data
dataloader = get_dataloader(
config.tokenizer,
config.sequence_length,
config.dataset,
num_samples=config.num_samples,
)
apply_awq_layerwise(
dataloader[: config.num_samples],
config,
quantization_layer_structure,
filters=filters,
)
def get_group_size_for_layer(layer, config):
"""Get group size from config or dtype policy.
Args:
layer: The layer to get group size for.
config: Optional AWQConfig instance.
Returns:
int: The group size for quantization.
Raises:
ValueError: If group size cannot be determined.
"""
if config and isinstance(config, AWQConfig):
return config.group_size
elif isinstance(layer.dtype_policy, AWQDTypePolicy):
return layer.dtype_policy.group_size
elif isinstance(layer.dtype_policy, DTypePolicyMap):
policy = layer.dtype_policy[layer.path]
if isinstance(policy, AWQDTypePolicy):
return policy.group_size
raise ValueError(
"For AWQ quantization, group_size must be specified "
"through AWQConfig or AWQDTypePolicy."
)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/awq_core.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/saving/orbax_util.py | """Orbax checkpoint loading functionality."""
import os
from keras.src.utils import file_utils
from keras.src.utils.module_utils import ocp
def is_orbax_checkpoint(filepath):
"""Check if the given path is an Orbax checkpoint directory.
This function implements custom detection logic instead of relying on
Orbax APIs which may be unreliable in some environments.
"""
if not file_utils.exists(filepath) or not file_utils.isdir(filepath):
return False
try:
# List directory contents
contents = file_utils.listdir(filepath)
# A set is more efficient for membership testing
orbax_indicators = {
"orbax.checkpoint",
"pytree.orbax-checkpoint",
"checkpoint_metadata",
}
# Fast check for standard files
if not orbax_indicators.isdisjoint(contents):
return True
# Check for step directories or temporary files in a single pass
return any(
".orbax-checkpoint-tmp" in item
or (
item.isdigit()
and file_utils.isdir(file_utils.join(filepath, item))
)
for item in contents
)
except (OSError, PermissionError):
# If we can't read the directory, assume it's not a checkpoint
return False
def find_latest_orbax_checkpoint(checkpoint_dir):
"""Find the latest checkpoint in an Orbax checkpoint directory."""
checkpointer = ocp.training.Checkpointer(directory=checkpoint_dir)
latest = checkpointer.latest
if latest is None:
raise ValueError(f"No valid checkpoints found in {checkpoint_dir}")
return os.path.join(checkpoint_dir, str(latest.step))
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/saving/orbax_util.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
keras-team/keras:keras/src/layers/pooling/adaptive_average_pooling1d.py | """Adaptive Average Pooling 1D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveAveragePooling,
)
@keras_export("keras.layers.AdaptiveAveragePooling1D")
class AdaptiveAveragePooling1D(BaseAdaptiveAveragePooling):
"""Adaptive average pooling operation for 1D temporal or spatial data.
This layer applies an adaptive average pooling operation, which pools the
input such that the output has a target length specified by `output_size`,
regardless of the input length. The kernel size and stride are automatically
computed to achieve the target output size.
Args:
output_size: Integer specifying the target output length.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, length, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, length)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 3D tensor
`(batch_size, length, channels)`
- If `data_format="channels_first"`: 3D tensor
`(batch_size, channels, length)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_length, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_length)`
Examples:
>>> import numpy as np
>>> input_seq = np.random.rand(1, 64, 3)
>>> layer = AdaptiveAveragePooling1D(output_size=32)
>>> output_seq = layer(input_seq)
>>> output_seq.shape
(1, 32, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size = (output_size,)
elif isinstance(output_size, (tuple, list)):
if len(output_size) != 1:
raise ValueError(
f"For 1D input, `output_size` tuple must have length 1. "
f"Received: {output_size}"
)
output_size = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or tuple of 1 integer. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size, data_format, **kwargs)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/adaptive_average_pooling1d.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/layers/pooling/adaptive_average_pooling2d.py | """Adaptive Average Pooling 2D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveAveragePooling,
)
@keras_export("keras.layers.AdaptiveAveragePooling2D")
class AdaptiveAveragePooling2D(BaseAdaptiveAveragePooling):
"""Adaptive average pooling operation for 2D spatial data.
This layer applies an adaptive average pooling operation, which pools the
input such that the output has a target spatial size specified by
`output_size`, regardless of the input spatial size. The kernel size
and stride are automatically computed to achieve the target output size.
Args:
output_size: Integer or tuple of 2 integers specifying the
target output size.
If an integer, the same value is used for both height and width.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 4D tensor
`(batch_size, height, width, channels)`
- If `data_format="channels_first"`: 4D tensor
`(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_height, output_width, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_height, output_width)`
Examples:
>>> import numpy as np
>>> input_img = np.random.rand(1, 64, 64, 3)
>>> layer = AdaptiveAveragePooling2D(output_size=32)
>>> output_img = layer(input_img)
>>> output_img.shape
(1, 32, 32, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size_tuple = (output_size, output_size)
elif isinstance(output_size, (tuple, list)) and len(output_size) == 2:
output_size_tuple = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or (height, width) tuple. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size_tuple, data_format, **kwargs)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/adaptive_average_pooling2d.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/layers/pooling/adaptive_average_pooling3d.py | """Adaptive Average Pooling 3D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveAveragePooling,
)
@keras_export("keras.layers.AdaptiveAveragePooling3D")
class AdaptiveAveragePooling3D(BaseAdaptiveAveragePooling):
"""Adaptive average pooling operation for 3D volumetric data.
This layer applies an adaptive average pooling operation, which pools the
input such that the output has a target spatial size specified by
`output_size`, regardless of the input spatial size. The kernel size
and stride are automatically computed to achieve the target output size.
Args:
output_size: Integer or tuple of 3 integers specifying the
target output size.
If an integer, the same value is used for depth, height, and width.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, depth, height, width, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 5D tensor
`(batch_size, depth, height, width, channels)`
- If `data_format="channels_first"`: 5D tensor
`(batch_size, channels, depth, height, width)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_depth, output_height, output_width, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_depth, output_height, output_width)`
Examples:
>>> import numpy as np
>>> input_vol = np.random.rand(1, 32, 32, 32, 3)
>>> layer = AdaptiveAveragePooling3D(output_size=16)
>>> output_vol = layer(input_vol)
>>> output_vol.shape
(1, 16, 16, 16, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size_tuple = (output_size, output_size, output_size)
elif isinstance(output_size, (tuple, list)) and len(output_size) == 3:
output_size_tuple = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or "
f"(depth, height, width) tuple. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size_tuple, data_format, **kwargs)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/adaptive_average_pooling3d.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/layers/pooling/adaptive_max_pooling1d.py | """Adaptive Max Pooling 1D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveMaxPooling,
)
@keras_export("keras.layers.AdaptiveMaxPooling1D")
class AdaptiveMaxPooling1D(BaseAdaptiveMaxPooling):
"""Adaptive max pooling operation for 1D temporal or spatial data.
This layer applies an adaptive max pooling operation, which pools the
input such that the output has a target length specified by `output_size`,
regardless of the input length. The kernel size and stride are automatically
computed to achieve the target output size.
Args:
output_size: Integer specifying the target output length.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, length, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, length)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 3D tensor
`(batch_size, length, channels)`
- If `data_format="channels_first"`: 3D tensor
`(batch_size, channels, length)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_length, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_length)`
Examples:
>>> import numpy as np
>>> input_seq = np.random.rand(1, 64, 3)
>>> layer = AdaptiveMaxPooling1D(output_size=32)
>>> output_seq = layer(input_seq)
>>> output_seq.shape
(1, 32, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size = (output_size,)
elif isinstance(output_size, (tuple, list)):
if len(output_size) != 1:
raise ValueError(
f"For 1D input, `output_size` tuple must have length 1. "
f"Received: {output_size}"
)
output_size = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or tuple of 1 integer. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size, data_format, **kwargs)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/adaptive_max_pooling1d.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/layers/pooling/adaptive_max_pooling2d.py | """Adaptive Max Pooling 2D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveMaxPooling,
)
@keras_export("keras.layers.AdaptiveMaxPooling2D")
class AdaptiveMaxPooling2D(BaseAdaptiveMaxPooling):
"""Adaptive max pooling operation for 2D spatial data.
This layer applies an adaptive max pooling operation, which pools the
input such that the output has a target spatial size specified by
`output_size`, regardless of the input spatial size. The kernel size
and stride are automatically computed to achieve the target output size.
Args:
output_size: Integer or tuple of 2 integers specifying the
target output size.
If an integer, the same value is used for both height and width.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 4D tensor
`(batch_size, height, width, channels)`
- If `data_format="channels_first"`: 4D tensor
`(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_height, output_width, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_height, output_width)`
Examples:
>>> import numpy as np
>>> input_img = np.random.rand(1, 64, 64, 3)
>>> layer = AdaptiveMaxPooling2D(output_size=32)
>>> output_img = layer(input_img)
>>> output_img.shape
(1, 32, 32, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size_tuple = (output_size, output_size)
elif isinstance(output_size, (tuple, list)) and len(output_size) == 2:
output_size_tuple = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or (height, width) tuple. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size_tuple, data_format, **kwargs)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/adaptive_max_pooling2d.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/layers/pooling/adaptive_max_pooling3d.py | """Adaptive Max Pooling 3D layer."""
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_adaptive_pooling import (
BaseAdaptiveMaxPooling,
)
@keras_export("keras.layers.AdaptiveMaxPooling3D")
class AdaptiveMaxPooling3D(BaseAdaptiveMaxPooling):
"""Adaptive max pooling operation for 3D volumetric data.
This layer applies an adaptive max pooling operation, which pools the
input such that the output has a target spatial size specified by
`output_size`, regardless of the input spatial size. The kernel size
and stride are automatically computed to achieve the target output size.
Args:
output_size: Integer or tuple of 3 integers specifying the
target output size.
If an integer, the same value is used for depth, height, and width.
data_format: string, either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, depth, height, width, channels)`.
`"channels_first"` corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
Defaults to the value found in your Keras config file at
`~/.keras/keras.json`. If never set, `"channels_last"` is used.
Input shape:
- If `data_format="channels_last"`: 5D tensor
`(batch_size, depth, height, width, channels)`
- If `data_format="channels_first"`: 5D tensor
`(batch_size, channels, depth, height, width)`
Output shape:
- If `data_format="channels_last"`:
`(batch_size, output_depth, output_height, output_width, channels)`
- If `data_format="channels_first"`:
`(batch_size, channels, output_depth, output_height, output_width)`
Examples:
>>> import numpy as np
>>> input_vol = np.random.rand(1, 32, 32, 32, 3)
>>> layer = AdaptiveMaxPooling3D(output_size=16)
>>> output_vol = layer(input_vol)
>>> output_vol.shape
(1, 16, 16, 16, 3)
"""
def __init__(self, output_size, data_format=None, **kwargs):
if isinstance(output_size, int):
output_size_tuple = (output_size, output_size, output_size)
elif isinstance(output_size, (tuple, list)) and len(output_size) == 3:
output_size_tuple = tuple(output_size)
else:
raise TypeError(
f"`output_size` must be an integer or "
f"(depth, height, width) tuple. "
f"Received: {output_size} of type {type(output_size)}"
)
super().__init__(output_size_tuple, data_format, **kwargs)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/adaptive_max_pooling3d.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/layers/pooling/adaptive_pooling1d_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
SKIP_BACKENDS = ["openvino"]
pytestmark = pytest.mark.skipif(
backend.backend() in SKIP_BACKENDS,
reason=(
"Adaptive pooling tests not supported for backend: {}".format(
backend.backend()
)
),
)
class AdaptivePooling1DLayerTest(testing.TestCase):
"""Tests for AdaptiveAveragePooling1D and AdaptiveMaxPooling1D."""
def _run_layer_test(self, layer_class, x_np, output_size, data_format):
"""Helper: test layer output shape matches compute_output_shape()."""
layer = layer_class(output_size=output_size, data_format=data_format)
y = layer(x_np)
expected_shape = layer.compute_output_shape(x_np.shape)
self.assertEqual(y.shape, expected_shape)
def test_average_pooling_basic_shapes(self):
"""Test AdaptiveAveragePooling1D basic shape transformation."""
shape = (2, 3, 8) # N,C,L
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling1D,
x,
output_size=4,
data_format="channels_first",
)
def test_max_pooling_basic_shapes(self):
"""Test AdaptiveMaxPooling1D basic shape transformation."""
shape = (2, 3, 8)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling1D,
x,
output_size=4,
data_format="channels_first",
)
def test_average_pooling_channels_last(self):
"""Test AdaptiveAveragePooling1D with channels_last format."""
shape = (2, 8, 3) # N,L,C
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling1D,
x,
output_size=4,
data_format="channels_last",
)
def test_max_pooling_channels_last(self):
"""Test AdaptiveMaxPooling1D with channels_last format."""
shape = (2, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling1D,
x,
output_size=4,
data_format="channels_last",
)
def test_average_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveAveragePooling1D."""
layer = layers.AdaptiveAveragePooling1D(
output_size=16, data_format="channels_last"
)
input_shape = (None, 64, 3)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (None, 16, 3))
def test_max_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveMaxPooling1D."""
layer = layers.AdaptiveMaxPooling1D(
output_size=16, data_format="channels_first"
)
input_shape = (2, 3, 64)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (2, 3, 16))
def test_average_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveAveragePooling1D."""
layer = layers.AdaptiveAveragePooling1D(
output_size=32, data_format="channels_first"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (32,))
self.assertEqual(config["data_format"], "channels_first")
def test_max_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveMaxPooling1D."""
layer = layers.AdaptiveMaxPooling1D(
output_size=32, data_format="channels_last"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (32,))
self.assertEqual(config["data_format"], "channels_last")
def test_average_pooling_numerical(self):
"""Test AdaptiveAveragePooling1D numerical correctness."""
inputs = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]]], dtype="float32")
expected = np.array([[[2.0, 5.0]]], dtype="float32")
layer = layers.AdaptiveAveragePooling1D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
def test_max_pooling_numerical(self):
"""Test AdaptiveMaxPooling1D numerical correctness."""
inputs = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]]], dtype="float32")
expected = np.array([[[3.0, 6.0]]], dtype="float32")
layer = layers.AdaptiveMaxPooling1D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/adaptive_pooling1d_test.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/layers/pooling/adaptive_pooling2d_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
SKIP_BACKENDS = ["openvino"]
pytestmark = pytest.mark.skipif(
backend.backend() in SKIP_BACKENDS,
reason=(
"Adaptive pooling tests not supported for backend: {}".format(
backend.backend()
)
),
)
class AdaptivePooling2DLayerTest(testing.TestCase):
"""Tests for AdaptiveAveragePooling2D and AdaptiveMaxPooling2D."""
def _run_layer_test(self, layer_class, x_np, output_size, data_format):
"""Helper: test layer output shape matches compute_output_shape()."""
layer = layer_class(output_size=output_size, data_format=data_format)
y = layer(x_np)
expected_shape = layer.compute_output_shape(x_np.shape)
self.assertEqual(y.shape, expected_shape)
def test_average_pooling_basic_shapes(self):
"""Test AdaptiveAveragePooling2D basic shape transformation."""
shape = (2, 3, 8, 8) # N,C,H,W
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling2D,
x,
output_size=4,
data_format="channels_first",
)
def test_max_pooling_basic_shapes(self):
"""Test AdaptiveMaxPooling2D basic shape transformation."""
shape = (2, 3, 8, 8)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling2D,
x,
output_size=4,
data_format="channels_first",
)
def test_average_pooling_channels_last(self):
"""Test AdaptiveAveragePooling2D with channels_last format."""
shape = (2, 8, 8, 3) # N,H,W,C
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling2D,
x,
output_size=4,
data_format="channels_last",
)
def test_max_pooling_channels_last(self):
"""Test AdaptiveMaxPooling2D with channels_last format."""
shape = (2, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling2D,
x,
output_size=4,
data_format="channels_last",
)
def test_average_pooling_tuple_output_size(self):
"""Test AdaptiveAveragePooling2D with tuple output_size."""
shape = (2, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling2D,
x,
output_size=(4, 4),
data_format="channels_last",
)
def test_max_pooling_tuple_output_size(self):
"""Test AdaptiveMaxPooling2D with tuple output_size."""
shape = (2, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling2D,
x,
output_size=(2, 4),
data_format="channels_last",
)
def test_average_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveAveragePooling2D."""
layer = layers.AdaptiveAveragePooling2D(
output_size=16, data_format="channels_last"
)
input_shape = (None, 64, 64, 3)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (None, 16, 16, 3))
def test_max_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveMaxPooling2D."""
layer = layers.AdaptiveMaxPooling2D(
output_size=(8, 16), data_format="channels_first"
)
input_shape = (2, 3, 64, 64)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (2, 3, 8, 16))
def test_average_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveAveragePooling2D."""
layer = layers.AdaptiveAveragePooling2D(
output_size=32, data_format="channels_first"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (32, 32))
self.assertEqual(config["data_format"], "channels_first")
def test_max_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveMaxPooling2D."""
layer = layers.AdaptiveMaxPooling2D(
output_size=(8, 16), data_format="channels_last"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (8, 16))
self.assertEqual(config["data_format"], "channels_last")
def test_average_pooling2d_numerical(self):
"""Test AdaptiveAveragePooling2D numerical correctness."""
inputs = np.array(
[
[
[
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[13.0, 14.0, 15.0, 16.0],
]
]
],
dtype="float32",
)
expected = np.array([[[[3.5, 5.5], [11.5, 13.5]]]], dtype="float32")
layer = layers.AdaptiveAveragePooling2D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
def test_max_pooling2d_numerical(self):
"""Test AdaptiveMaxPooling2D numerical correctness."""
inputs = np.array(
[
[
[
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[13.0, 14.0, 15.0, 16.0],
]
]
],
dtype="float32",
)
expected = np.array([[[[6.0, 8.0], [14.0, 16.0]]]], dtype="float32")
layer = layers.AdaptiveMaxPooling2D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/adaptive_pooling2d_test.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/layers/pooling/adaptive_pooling3d_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
SKIP_BACKENDS = ["openvino"]
pytestmark = pytest.mark.skipif(
backend.backend() in SKIP_BACKENDS,
reason=(
"Adaptive pooling tests not supported for backend: {}".format(
backend.backend()
)
),
)
class AdaptivePooling3DLayerTest(testing.TestCase):
"""Tests for AdaptiveAveragePooling3D and AdaptiveMaxPooling3D."""
def _run_layer_test(self, layer_class, x_np, output_size, data_format):
"""Helper: test layer output shape matches compute_output_shape()."""
layer = layer_class(output_size=output_size, data_format=data_format)
y = layer(x_np)
expected_shape = layer.compute_output_shape(x_np.shape)
self.assertEqual(y.shape, expected_shape)
def test_average_pooling_basic_shapes(self):
"""Test AdaptiveAveragePooling3D basic shape transformation."""
shape = (2, 3, 8, 8, 8) # N,C,D,H,W
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling3D,
x,
output_size=4,
data_format="channels_first",
)
def test_max_pooling_basic_shapes(self):
"""Test AdaptiveMaxPooling3D basic shape transformation."""
shape = (2, 3, 8, 8, 8)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling3D,
x,
output_size=4,
data_format="channels_first",
)
def test_average_pooling_channels_last(self):
"""Test AdaptiveAveragePooling3D with channels_last format."""
shape = (2, 8, 8, 8, 3) # N,D,H,W,C
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling3D,
x,
output_size=4,
data_format="channels_last",
)
def test_max_pooling_channels_last(self):
"""Test AdaptiveMaxPooling3D with channels_last format."""
shape = (2, 8, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling3D,
x,
output_size=4,
data_format="channels_last",
)
def test_average_pooling_tuple_output_size(self):
"""Test AdaptiveAveragePooling3D with tuple output_size."""
shape = (2, 8, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveAveragePooling3D,
x,
output_size=(4, 4, 4),
data_format="channels_last",
)
def test_max_pooling_tuple_output_size(self):
"""Test AdaptiveMaxPooling3D with tuple output_size."""
shape = (2, 8, 8, 8, 3)
x = np.random.randn(*shape).astype("float32")
self._run_layer_test(
layers.AdaptiveMaxPooling3D,
x,
output_size=(2, 4, 4),
data_format="channels_last",
)
def test_average_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveAveragePooling3D."""
layer = layers.AdaptiveAveragePooling3D(
output_size=8, data_format="channels_last"
)
input_shape = (None, 32, 32, 32, 3)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (None, 8, 8, 8, 3))
def test_max_pooling_compute_output_shape(self):
"""Test compute_output_shape() for AdaptiveMaxPooling3D."""
layer = layers.AdaptiveMaxPooling3D(
output_size=(4, 8, 8), data_format="channels_first"
)
input_shape = (2, 3, 32, 32, 32)
output_shape = layer.compute_output_shape(input_shape)
self.assertEqual(output_shape, (2, 3, 4, 8, 8))
def test_average_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveAveragePooling3D."""
layer = layers.AdaptiveAveragePooling3D(
output_size=16, data_format="channels_first"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (16, 16, 16))
self.assertEqual(config["data_format"], "channels_first")
def test_max_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveMaxPooling3D."""
layer = layers.AdaptiveMaxPooling3D(
output_size=(8, 16, 16), data_format="channels_last"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (8, 16, 16))
self.assertEqual(config["data_format"], "channels_last")
def test_average_pooling3d_numerical(self):
"""Test AdaptiveAveragePooling3D numerical correctness."""
inputs = np.array(
[[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]]],
dtype="float32",
)
expected = np.array(
[[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]]],
dtype="float32",
)
layer = layers.AdaptiveAveragePooling3D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
def test_max_pooling3d_numerical(self):
"""Test AdaptiveMaxPooling3D numerical correctness."""
inputs = np.array(
[[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]]],
dtype="float32",
)
expected = np.array(
[[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]]],
dtype="float32",
)
layer = layers.AdaptiveMaxPooling3D(
output_size=2, data_format="channels_first"
)
outputs = layer(inputs)
self.assertAllClose(outputs, expected, atol=1e-4)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/adaptive_pooling3d_test.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/layers/pooling/base_adaptive_pooling.py | """Base classes for adaptive pooling layers."""
from keras.src import ops
from keras.src.backend import config
from keras.src.layers.layer import Layer
class BaseAdaptivePooling(Layer):
"""Base class shared by all adaptive pooling layers."""
def __init__(self, output_size, data_format=None, **kwargs):
"""Initialize base adaptive pooling layer.
Args:
output_size: Normalized spatial output size as a tuple
(for example, (32,), (32, 32), or (32, 32, 32)).
data_format: Either "channels_last" or "channels_first".
**kwargs: Additional layer keyword arguments.
"""
super().__init__(**kwargs)
self.output_size = output_size
self.data_format = data_format or config.image_data_format()
if self.data_format not in {"channels_first", "channels_last"}:
raise ValueError(
f"Invalid data_format: {self.data_format}. "
"Expected 'channels_first' or 'channels_last'."
)
def compute_output_shape(self, input_shape):
"""Return the output shape tensor after pooling."""
batch_size = input_shape[0]
if self.data_format == "channels_last":
channels = input_shape[-1]
return (batch_size, *self.output_size, channels)
else:
channels = input_shape[1]
return (batch_size, channels, *self.output_size)
def get_config(self):
config_dict = {
"output_size": self.output_size,
"data_format": self.data_format,
}
base_config = super().get_config()
return {**base_config, **config_dict}
class BaseAdaptiveAveragePooling(BaseAdaptivePooling):
"""Base class for adaptive average pooling in 1D, 2D, and 3D."""
def call(self, inputs):
return ops.adaptive_average_pool(
inputs, output_size=self.output_size, data_format=self.data_format
)
class BaseAdaptiveMaxPooling(BaseAdaptivePooling):
"""Base class for adaptive max pooling in 1D, 2D, and 3D."""
def call(self, inputs):
return ops.adaptive_max_pool(
inputs, output_size=self.output_size, data_format=self.data_format
)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/pooling/base_adaptive_pooling.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
keras-team/keras:keras/src/quantizers/quantization_config.py | from keras.src.api_export import keras_export
from keras.src.dtype_policies import QUANTIZATION_MODES
from keras.src.saving import serialization_lib
@keras_export("keras.quantizers.QuantizationConfig")
class QuantizationConfig:
"""Base class for quantization configs.
Subclasses must implement the `mode` property and the `get_config` and
`from_config` class methods.
Args:
weight_quantizer: Quantizer for weights.
activation_quantizer: Quantizer for activations.
"""
def __init__(self, weight_quantizer=None, activation_quantizer=None):
self.weight_quantizer = weight_quantizer
self.activation_quantizer = activation_quantizer
@property
def mode(self):
raise NotImplementedError(
"Subclasses must implement this property. Do not instantiate "
"QuantizationConfig directly."
)
def get_config(self):
return {
"weight_quantizer": serialization_lib.serialize_keras_object(
self.weight_quantizer
),
"activation_quantizer": serialization_lib.serialize_keras_object(
self.activation_quantizer
),
}
@classmethod
def from_config(cls, config):
weight_quantizer = serialization_lib.deserialize_keras_object(
config.get("weight_quantizer")
)
activation_quantizer = serialization_lib.deserialize_keras_object(
config.get("activation_quantizer")
)
return cls(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
)
@staticmethod
def weight_quantizer_or_default(config, default):
if config is not None and config.weight_quantizer is not None:
return config.weight_quantizer
return default
@staticmethod
def activation_quantizer_or_default(config, default):
if config is not None:
return config.activation_quantizer
return default
@keras_export("keras.quantizers.Int8QuantizationConfig")
class Int8QuantizationConfig(QuantizationConfig):
"""Int8 quantization config.
Args:
weight_quantizer: Quantizer for weights.
activation_quantizer: Quantizer for activations. If "default", uses
AbsMaxQuantizer with axis=-1.
"""
def __init__(self, weight_quantizer=None, activation_quantizer="default"):
from keras.src.quantizers.quantizers import AbsMaxQuantizer
if activation_quantizer == "default":
activation_quantizer = AbsMaxQuantizer()
super().__init__(weight_quantizer, activation_quantizer)
if self.weight_quantizer is not None:
if self.weight_quantizer.output_dtype != "int8":
raise ValueError(
"Int8QuantizationConfig requires a weight_quantizer "
"with output_dtype='int8'. Received: "
f"output_dtype={self.weight_quantizer.output_dtype}"
)
@property
def mode(self):
return "int8"
@keras_export("keras.quantizers.Int4QuantizationConfig")
class Int4QuantizationConfig(QuantizationConfig):
"""Int4 quantization config.
Args:
weight_quantizer: Quantizer for weights.
activation_quantizer: Quantizer for activations. If "default", uses
AbsMaxQuantizer with axis=-1.
block_size: Size of groups along the input dimension for sub-channel
quantization. If a positive integer, uses sub-channel quantization
with `ceil(input_dim / block_size)` groups. If `None` or `-1`,
uses per-channel quantization (one scale per output channel).
Default: `128` (sub-channel with 128-element groups).
"""
def __init__(
self,
weight_quantizer=None,
activation_quantizer="default",
block_size=128,
):
if activation_quantizer == "default":
# Use weight-only quantization by default for int4
activation_quantizer = None
super().__init__(weight_quantizer, activation_quantizer)
# Validate block_size
if block_size is not None and block_size != -1 and block_size <= 0:
raise ValueError(
f"block_size must be None, -1, or a positive integer. "
f"Received: block_size={block_size}"
)
self.block_size = block_size
# Sub-channel quantization does not support custom quantizers
is_sub_channel = block_size is not None and block_size > 0
has_custom_quantizer = (
self.weight_quantizer is not None
or self.activation_quantizer is not None
)
if is_sub_channel and has_custom_quantizer:
raise ValueError(
"Int4 sub-channel quantization (block_size > 0) does not "
"support custom quantizers. Either set block_size to None "
"or -1 for per-channel quantization, or remove the custom "
f"quantizer arguments. Received: block_size={block_size}"
)
if self.weight_quantizer is not None:
if self.weight_quantizer.value_range != (-8, 7):
raise ValueError(
"Int4QuantizationConfig requires a weight_quantizer "
"with value_range=(-8, 7). Received: "
f"value_range={self.weight_quantizer.value_range}"
)
if self.weight_quantizer.output_dtype != "int8":
raise ValueError(
"Int4QuantizationConfig requires a weight_quantizer "
"with output_dtype='int8'. Received: "
f"output_dtype={self.weight_quantizer.output_dtype}"
)
@property
def mode(self):
return "int4"
def get_config(self):
config = super().get_config()
config["block_size"] = self.block_size
return config
@classmethod
def from_config(cls, config):
weight_quantizer = serialization_lib.deserialize_keras_object(
config.get("weight_quantizer")
)
activation_quantizer = serialization_lib.deserialize_keras_object(
config.get("activation_quantizer")
)
# Default to None for backwards compatibility with models saved
# before block_size was introduced (those used per-channel mode)
block_size = config.get("block_size", None)
return cls(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
block_size=block_size,
)
@keras_export("keras.quantizers.Float8QuantizationConfig")
class Float8QuantizationConfig(QuantizationConfig):
"""FP8 quantization config.
FP8 mixed-precision training does not support user defined quantizers.
This config is only used to indicate that FP8 mixed-precision training
should be used.
"""
def __init__(self):
super().__init__(None, None)
@property
def mode(self):
return "float8"
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls()
def validate_and_resolve_config(mode, config):
"""Validate and resolve quantization config.
This function validates the quantization config and resolves the mode.
If mode is not provided, it is inferred from the config.
If config is not provided, a default config is inferred from the mode.
Args:
mode: Quantization mode.
config: Quantization config.
"""
# 1. Backwards Compatibility: Handle string shortcuts.
if isinstance(config, str):
mode = config
config = None
_validate_mode(mode)
# 2. Resolve "mode" into a Config object.
if config is None:
if mode == "int8":
config = Int8QuantizationConfig()
elif mode == "int4":
config = Int4QuantizationConfig()
elif mode == "float8":
config = Float8QuantizationConfig()
elif mode == "gptq":
raise ValueError(
"For GPTQ, you must pass a `GPTQConfig` object in the "
"`config` argument."
)
elif mode == "awq":
raise ValueError(
"For AWQ, you must pass an `AWQConfig` object in the "
"`config` argument."
)
else:
if mode is not None:
raise ValueError(
f"Invalid quantization mode. Received: mode={mode}"
)
raise ValueError(
"You must provide either `mode` or `config` to `quantize`."
)
else:
if not isinstance(config, QuantizationConfig):
raise ValueError(
"Argument `config` must be an instance of "
"`QuantizationConfig`. "
f"Received: config={config} (of type {type(config)})"
)
# 3. Validation: Prevent contradictions.
if mode is not None and config.mode != mode:
raise ValueError(
f"Contradictory arguments: mode='{mode}' but "
f"config.mode='{config.mode}'"
)
# Ensure mode is consistent.
mode = config.mode
# Ensure the mode derived from the config is valid.
_validate_mode(mode)
if mode == "gptq":
from keras.src.quantizers.gptq_config import GPTQConfig
if not isinstance(config, GPTQConfig):
raise ValueError(
"Mode 'gptq' requires a valid `config` argument of type "
f"`GPTQConfig`. Received: {type(config)}"
)
if mode == "awq":
from keras.src.quantizers.awq_config import AWQConfig
if not isinstance(config, AWQConfig):
raise ValueError(
"Mode 'awq' requires a valid `config` argument of type "
f"`AWQConfig`. Received: {type(config)}"
)
return config
def _validate_mode(mode):
"""Validates quantization mode."""
if mode is not None and mode not in QUANTIZATION_MODES:
raise ValueError(
"Invalid quantization mode. "
f"Expected one of {QUANTIZATION_MODES}. Received: mode={mode}"
)
def get_block_size_for_layer(layer, config):
"""Determine the block size for int4 quantization.
The block size can be specified either through the `config` argument
or through the `dtype_policy` if it is of type `Int4DTypePolicy`.
The config argument is usually available when quantizing the layer
via the `quantize` method. If the layer was deserialized from a
saved model, the block size should be specified in the `dtype_policy`.
Args:
layer: The layer being quantized.
config: An optional configuration object that may contain the
`block_size` attribute.
Returns:
int or None. The determined block size for int4 quantization.
Returns `None` or `-1` for per-channel quantization.
"""
from keras.src.dtype_policies.dtype_policy import Int4DTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
if config and isinstance(config, Int4QuantizationConfig):
return config.block_size
elif isinstance(layer.dtype_policy, Int4DTypePolicy):
block_size = layer.dtype_policy.block_size
# Convert -1 to None for consistency
return None if block_size == -1 else block_size
elif isinstance(layer.dtype_policy, DTypePolicyMap):
policy = layer.dtype_policy[layer.path]
if isinstance(policy, Int4DTypePolicy):
block_size = policy.block_size
return None if block_size == -1 else block_size
# Fall back to None for legacy QuantizedDTypePolicy
return None
else:
# For backwards compatibility with models that don't have
# Int4DTypePolicy (legacy per-channel mode)
return None
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/quantization_config.py",
"license": "Apache License 2.0",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/quantizers/quantization_config_test.py | import os
from keras.src import layers
from keras.src import models
from keras.src import saving
from keras.src import testing
from keras.src.quantizers.quantization_config import Int4QuantizationConfig
from keras.src.quantizers.quantization_config import Int8QuantizationConfig
from keras.src.quantizers.quantization_config import QuantizationConfig
from keras.src.quantizers.quantization_config import validate_and_resolve_config
from keras.src.quantizers.quantizers import AbsMaxQuantizer
class QuantizationConfigTest(testing.TestCase):
def test_base_quantization_config(self):
config = QuantizationConfig()
with self.assertRaises(NotImplementedError):
_ = config.mode
def test_int8_quantization_config_valid(self):
config = Int8QuantizationConfig()
self.assertEqual(config.mode, "int8")
self.assertIsNone(config.weight_quantizer)
# Valid weight quantizer
q = AbsMaxQuantizer(axis=0, value_range=(-127, 127))
config = Int8QuantizationConfig(weight_quantizer=q)
self.assertEqual(config.weight_quantizer, q)
def test_int8_quantization_config_invalid(self):
# Invalid value_range
with self.assertRaisesRegex(ValueError, "value_range"):
AbsMaxQuantizer(axis=0, value_range=(-256, 256))
def test_int4_quantization_config_valid(self):
config = Int4QuantizationConfig()
self.assertEqual(config.mode, "int4")
self.assertIsNone(config.weight_quantizer)
# Valid weight quantizer with per-channel mode
# (custom quantizers require block_size=None or -1)
q = AbsMaxQuantizer(axis=0, value_range=(-8, 7))
config = Int4QuantizationConfig(weight_quantizer=q, block_size=None)
self.assertEqual(config.weight_quantizer, q)
def test_int4_quantization_config_invalid(self):
# Invalid value_range
q = AbsMaxQuantizer(axis=0, value_range=(-127, 127))
with self.assertRaisesRegex(ValueError, "value_range"):
Int4QuantizationConfig(weight_quantizer=q, block_size=None)
def test_int4_quantization_config_subchannel_rejects_custom_quantizer(self):
# Sub-channel quantization does not support custom quantizers
weight_q = AbsMaxQuantizer(axis=0, value_range=(-8, 7))
activation_q = AbsMaxQuantizer(axis=-1)
# Default block_size=128 is sub-channel, should reject custom quantizer
with self.assertRaisesRegex(
ValueError, "sub-channel quantization.*does not support"
):
Int4QuantizationConfig(weight_quantizer=weight_q)
# Explicit positive block_size should also reject weight quantizer
with self.assertRaisesRegex(
ValueError, "sub-channel quantization.*does not support"
):
Int4QuantizationConfig(weight_quantizer=weight_q, block_size=64)
# Sub-channel should also reject activation quantizer
with self.assertRaisesRegex(
ValueError, "sub-channel quantization.*does not support"
):
Int4QuantizationConfig(activation_quantizer=activation_q)
with self.assertRaisesRegex(
ValueError, "sub-channel quantization.*does not support"
):
Int4QuantizationConfig(
activation_quantizer=activation_q, block_size=64
)
# Per-channel (block_size=None or -1) should accept custom quantizers
config = Int4QuantizationConfig(
weight_quantizer=weight_q, block_size=None
)
self.assertEqual(config.weight_quantizer, weight_q)
config = Int4QuantizationConfig(
weight_quantizer=weight_q, block_size=-1
)
self.assertEqual(config.weight_quantizer, weight_q)
config = Int4QuantizationConfig(
activation_quantizer=activation_q, block_size=None
)
self.assertEqual(config.activation_quantizer, activation_q)
config = Int4QuantizationConfig(
activation_quantizer=activation_q, block_size=-1
)
self.assertEqual(config.activation_quantizer, activation_q)
def test_quantization_config_serialization(self):
config = Int8QuantizationConfig(
weight_quantizer=AbsMaxQuantizer(axis=0),
activation_quantizer=AbsMaxQuantizer(axis=-1),
)
serialized = config.get_config()
deserialized = Int8QuantizationConfig.from_config(serialized)
self.assertIsInstance(deserialized, Int8QuantizationConfig)
self.assertIsInstance(deserialized.weight_quantizer, AbsMaxQuantizer)
self.assertIsInstance(
deserialized.activation_quantizer, AbsMaxQuantizer
)
self.assertEqual(deserialized.weight_quantizer.axis, (0,))
self.assertEqual(deserialized.activation_quantizer.axis, (-1,))
def test_validate_and_resolve_config(self):
# 1. String mode
config = validate_and_resolve_config("int8", None)
self.assertIsInstance(config, Int8QuantizationConfig)
self.assertEqual(config.mode, "int8")
config = validate_and_resolve_config("int4", None)
self.assertIsInstance(config, Int4QuantizationConfig)
self.assertEqual(config.mode, "int4")
# 2. Config object
config_in = Int8QuantizationConfig()
config_out = validate_and_resolve_config(None, config_in)
self.assertIs(config_out, config_in)
# 3. Mode + Config (matching)
config_in = Int8QuantizationConfig()
config_out = validate_and_resolve_config("int8", config_in)
self.assertIs(config_out, config_in)
# 4. Mode + Config (mismatch)
config_in = Int8QuantizationConfig()
with self.assertRaisesRegex(ValueError, "Contradictory arguments"):
validate_and_resolve_config("int4", config_in)
# 5. Invalid mode
with self.assertRaisesRegex(ValueError, "Invalid quantization mode"):
validate_and_resolve_config("invalid_mode", None)
# 6. GPTQ without config
with self.assertRaisesRegex(ValueError, "must pass a `GPTQConfig`"):
validate_and_resolve_config("gptq", None)
# 7. Contradictory config
with self.assertRaisesRegex(ValueError, "Contradictory arguments"):
validate_and_resolve_config("gptq", Int8QuantizationConfig())
# 8. GPTQ with invalid config type (but correct mode)
class FakeGPTQConfig(QuantizationConfig):
@property
def mode(self):
return "gptq"
with self.assertRaisesRegex(ValueError, "requires a valid `config`"):
validate_and_resolve_config("gptq", FakeGPTQConfig())
def test_int8_quantization_config_output_dtype_mismatch(self):
# Invalid output_dtype
q = AbsMaxQuantizer(
axis=0, value_range=(-127, 127), output_dtype="int16"
)
with self.assertRaisesRegex(ValueError, "output_dtype='int8'"):
Int8QuantizationConfig(weight_quantizer=q)
def test_int4_quantization_config_output_dtype_mismatch(self):
# Invalid output_dtype (using per-channel mode to test output_dtype)
q = AbsMaxQuantizer(axis=0, value_range=(-8, 7), output_dtype="int16")
with self.assertRaisesRegex(ValueError, "output_dtype='int8'"):
Int4QuantizationConfig(weight_quantizer=q, block_size=None)
def test_model_save_and_load(self):
"""
Test custom quantizer serialization for model save and load.
"""
# Setup
weight_range = (-100, 100)
custom_quantizer = AbsMaxQuantizer(axis=0, value_range=weight_range)
config = Int8QuantizationConfig(
weight_quantizer=custom_quantizer,
activation_quantizer=None,
)
layer = layers.Dense(10)
layer.build((None, 5))
layer.quantize("int8", config=config)
model = models.Sequential([layer])
model.build((None, 5))
# Save to temp file
filepath = os.path.join(self.get_temp_dir(), "quantized_model.keras")
model.save(filepath)
# Load back
loaded_model = saving.load_model(filepath)
# Verify
loaded_layer = loaded_model.layers[0]
self.assertIsInstance(
loaded_layer.quantization_config, Int8QuantizationConfig
)
quantizer = loaded_layer.quantization_config.weight_quantizer
self.assertIsInstance(quantizer, AbsMaxQuantizer)
self.assertEqual(quantizer.axis, (0,))
self.assertAllEqual(quantizer.value_range, weight_range)
self.assertIsNone(loaded_layer.quantization_config.activation_quantizer)
self.assertTrue(loaded_layer._is_quantized)
def test_awq_requires_config(self):
"""Test that AWQ mode requires a config."""
with self.assertRaisesRegex(ValueError, "AWQConfig"):
validate_and_resolve_config("awq", None)
def test_awq_requires_correct_config_type(self):
"""Test that AWQ requires AWQConfig type."""
# Int8QuantizationConfig has mode='int8', so passing mode='awq' raises
# a contradictory arguments error
with self.assertRaisesRegex(ValueError, "Contradictory arguments"):
validate_and_resolve_config("awq", Int8QuantizationConfig())
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/quantization_config_test.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/quantizers/utils_test.py | from absl.testing import parameterized
from keras.src import layers
from keras.src import testing
from keras.src.quantizers import utils
class UtilsTest(testing.TestCase):
@parameterized.named_parameters(
("none_filter", None, "dense", True),
("regex_match", "dense", "dense_1", True),
("regex_no_match", "conv", "dense_1", False),
("list_match", ["dense", "conv"], "dense_1", True),
("list_no_match", ["conv", "pool"], "dense_1", False),
("callable_match", lambda l: "dense" in l.name, "dense_1", True),
("callable_no_match", lambda l: "conv" in l.name, "dense_1", False),
)
def test_should_quantize_layer(self, filters, layer_name, expected):
layer = layers.Layer(name=layer_name)
self.assertEqual(utils.should_quantize_layer(layer, filters), expected)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/utils_test.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/export/litert.py | from keras.src import layers
from keras.src import models
from keras.src import tree
from keras.src.export.export_utils import get_input_signature
from keras.src.utils import io_utils
from keras.src.utils.module_utils import tensorflow as tf
def export_litert(
model,
filepath,
input_signature=None,
**kwargs,
):
"""Export the model as a LiteRT artifact for inference.
Args:
model: The Keras model to export.
filepath: The path to save the exported artifact.
input_signature: Optional input signature specification. If
`None`, it will be inferred.
**kwargs: Additional keyword arguments passed to the exporter.
"""
exporter = LiteRTExporter(
model=model,
input_signature=input_signature,
**kwargs,
)
exporter.export(filepath)
io_utils.print_msg(f"Saved artifact at '{filepath}'.")
class LiteRTExporter:
"""Exporter for the LiteRT (TFLite) format.
This class handles the conversion of Keras models for LiteRT runtime and
generates a `.tflite` model file. For efficient inference on mobile and
embedded devices, it creates a single callable signature based on the
model's `call()` method.
"""
def __init__(
self,
model,
input_signature=None,
**kwargs,
):
"""Initialize the LiteRT exporter.
Args:
model: The Keras model to export
input_signature: Input signature specification (e.g., TensorFlow
TensorSpec or list of TensorSpec)
**kwargs: Additional export parameters
"""
self.model = model
self.input_signature = input_signature
self.kwargs = kwargs
def export(self, filepath):
"""Exports the Keras model to a TFLite file.
Args:
filepath: Output path for the exported model
Returns:
Path to exported model
"""
# 1. Resolve / infer input signature
if self.input_signature is None:
# Use the standard get_input_signature which handles all model types
# and preserves nested structures (dicts, lists, etc.)
self.input_signature = get_input_signature(self.model)
# 2. Determine input structure and create adapter if needed
# There are 3 cases:
# Case 1: Single input (not nested)
# Case 2: Flat list of inputs (list where flattened == original)
# Case 3: Nested structure (dicts, nested lists, etc.)
# Special handling for Functional models: get_input_signature wraps
# the structure in a list, so unwrap it for analysis
input_struct = self.input_signature
if (
isinstance(self.input_signature, list)
and len(self.input_signature) == 1
):
input_struct = self.input_signature[0]
if not tree.is_nested(input_struct):
# Case 1: Single input - use as-is
model_to_convert = self.model
signature_for_conversion = self.input_signature
elif isinstance(input_struct, list) and len(input_struct) == len(
tree.flatten(input_struct)
):
# Case 2: Flat list of inputs - use as-is
model_to_convert = self.model
signature_for_conversion = self.input_signature
else:
# Case 3: Nested structure (dict, nested lists, etc.)
# Create adapter model that converts flat list to nested structure
adapted_model = self._create_nested_inputs_adapter(input_struct)
# Flatten signature for TFLite conversion
signature_for_conversion = tree.flatten(input_struct)
# Use adapted model and flat list signature for conversion
model_to_convert = adapted_model
# Store original model reference for later use
original_model = self.model
# Temporarily replace self.model with the model to convert
self.model = model_to_convert
try:
# Convert the model to TFLite.
tflite_model = self._convert_to_tflite(signature_for_conversion)
finally:
# Restore original model
self.model = original_model
# Save the TFLite model to the specified file path.
if not filepath.endswith(".tflite"):
raise ValueError(
f"The LiteRT export requires the filepath to end with "
f"'.tflite'. Got: {filepath}"
)
with open(filepath, "wb") as f:
f.write(tflite_model)
return filepath
def _create_nested_inputs_adapter(self, input_signature_struct):
"""Create an adapter model that converts flat list inputs to nested
structure.
This adapter allows models expecting nested inputs (dicts, lists, etc.)
to be exported to TFLite format (which only supports positional/list
inputs).
Args:
input_signature_struct: Nested structure of InputSpecs (dict, list,
etc.)
Returns:
A Functional model that accepts flat list inputs and converts to
nested
"""
# Get flat paths to preserve names and print input mapping
paths_and_specs = tree.flatten_with_path(input_signature_struct)
paths = [".".join(str(e) for e in p) for p, v in paths_and_specs]
io_utils.print_msg(f"Creating adapter for inputs: {paths}")
# Create Input layers for TFLite (flat list-based)
input_layers = []
for path, spec in paths_and_specs:
# Extract the input name from spec or path
name = (
spec.name
if hasattr(spec, "name") and spec.name
else (str(path[-1]) if path else "input")
)
input_layer = layers.Input(
shape=spec.shape[1:], # Remove batch dimension
dtype=spec.dtype,
name=name,
)
input_layers.append(input_layer)
# Reconstruct the nested structure from flat list
inputs_structure = tree.pack_sequence_as(
input_signature_struct, input_layers
)
# Call the original model with nested inputs
outputs = self.model(inputs_structure)
# Build as Functional model (flat list inputs -> nested -> model ->
# output)
adapted_model = models.Model(inputs=input_layers, outputs=outputs)
# Preserve the original model's variables
adapted_model._variables = self.model.variables
adapted_model._trainable_variables = self.model.trainable_variables
adapted_model._non_trainable_variables = (
self.model.non_trainable_variables
)
return adapted_model
def _convert_to_tflite(self, input_signature):
"""Converts the Keras model to TFLite format.
Returns:
A bytes object containing the serialized TFLite model.
"""
# Try direct conversion first for all models
try:
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS,
]
# Keras 3 only supports resource variables
converter.experimental_enable_resource_variables = True
# Apply any additional converter settings from kwargs
self._apply_converter_kwargs(converter)
tflite_model = converter.convert()
return tflite_model
except Exception as e:
# If direct conversion fails, raise the error with helpful message
raise RuntimeError(
f"Direct TFLite conversion failed. This may be due to model "
f"complexity or unsupported operations. Error: {e}"
) from e
def _apply_converter_kwargs(self, converter):
"""Apply additional converter settings from kwargs.
Args:
converter: tf.lite.TFLiteConverter instance to configure
Raises:
ValueError: If any kwarg is not a valid converter attribute
"""
for attr, value in self.kwargs.items():
if attr == "target_spec" and isinstance(value, dict):
# Handle nested target_spec settings
for spec_key, spec_value in value.items():
if hasattr(converter.target_spec, spec_key):
setattr(converter.target_spec, spec_key, spec_value)
else:
raise ValueError(
f"Unknown target_spec attribute '{spec_key}'"
)
elif hasattr(converter, attr):
setattr(converter, attr, value)
else:
raise ValueError(f"Unknown converter attribute '{attr}'")
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/export/litert.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/export/litert_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src import tree
from keras.src.saving import saving_lib
from keras.src.testing.test_utils import named_product
from keras.src.utils.module_utils import litert
from keras.src.utils.module_utils import tensorflow
# Set up LiteRT interpreter with fallback logic:
# 1. Try AI Edge LiteRT interpreter (preferred)
# 2. Fall back to TensorFlow Lite interpreter if AI Edge LiteRT unavailable
AI_EDGE_LITERT_AVAILABLE = False
LiteRTInterpreter = None
if backend.backend() == "tensorflow":
if litert.available:
try:
from ai_edge_litert.interpreter import (
Interpreter as LiteRTInterpreter,
)
AI_EDGE_LITERT_AVAILABLE = True
except (ImportError, OSError):
LiteRTInterpreter = tensorflow.lite.Interpreter
else:
LiteRTInterpreter = tensorflow.lite.Interpreter
# Model types to test (LSTM only if AI Edge LiteRT is available)
model_types = ["sequential", "functional"]
# TODO(#21914): `"lstm"` does not work with ai-edge-litert==1.3.0.
# Unfortunately, for TF 2.20.0, this is the only version which works. Uncomment
# this part when we upgrade TF and ai-edge-litert.
# if AI_EDGE_LITERT_AVAILABLE:
# model_types.append("lstm")
class CustomModel(models.Model):
def __init__(self, layer_list):
super().__init__()
self.layer_list = layer_list
def call(self, input):
output = input
for layer in self.layer_list:
output = layer(output)
return output
def get_model(type="sequential", input_shape=(10,), layer_list=None):
layer_list = layer_list or [
layers.Dense(10, activation="relu"),
layers.BatchNormalization(),
layers.Dense(1, activation="sigmoid"),
]
if type == "sequential":
model = models.Sequential(layer_list)
model.build(input_shape=(None,) + input_shape)
return model
if type == "functional":
input = output = tree.map_shape_structure(layers.Input, input_shape)
for layer in layer_list:
output = layer(output)
return models.Model(inputs=input, outputs=output)
if type == "subclass":
model = CustomModel(layer_list)
model.build(input_shape=(None,) + input_shape)
# Trace the model with dummy data to ensure it's properly built for
# export
dummy_input = np.zeros((1,) + input_shape, dtype=np.float32)
_ = model(dummy_input) # This traces the model
return model
if type == "lstm":
inputs = layers.Input((4, 10))
x = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="sum",
)(inputs)
outputs = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="concat",
)(x)
return models.Model(inputs=inputs, outputs=outputs)
if type == "multi_input":
input1 = layers.Input(shape=input_shape, name="input1")
input2 = layers.Input(shape=input_shape, name="input2")
x1 = layers.Dense(10, activation="relu")(input1)
x2 = layers.Dense(10, activation="relu")(input2)
combined = layers.concatenate([x1, x2])
output = layers.Dense(1, activation="sigmoid")(combined)
return models.Model(inputs=[input1, input2], outputs=output)
if type == "multi_output":
inputs = layers.Input(shape=input_shape)
shared = layers.Dense(20, activation="relu")(inputs)
output1 = layers.Dense(1, activation="sigmoid", name="output1")(shared)
output2 = layers.Dense(3, activation="softmax", name="output2")(shared)
return models.Model(inputs=inputs, outputs=[output1, output2])
raise ValueError(f"Unknown model type: {type}")
def _convert_to_numpy(structure):
return tree.map_structure(
lambda x: x.numpy() if hasattr(x, "numpy") else np.array(x), structure
)
def _normalize_name(name):
normalized = name.split(":")[0]
if normalized.startswith("serving_default_"):
normalized = normalized[len("serving_default_") :]
return normalized
def _set_interpreter_inputs(interpreter, inputs):
input_details = interpreter.get_input_details()
if isinstance(inputs, dict):
for detail in input_details:
key = _normalize_name(detail["name"])
if key in inputs:
value = inputs[key]
else:
matched_key = None
for candidate in inputs:
if key.endswith(candidate) or candidate.endswith(key):
matched_key = candidate
break
if matched_key is None:
raise KeyError(
f"Unable to match input '{detail['name']}' in provided "
f"inputs"
)
value = inputs[matched_key]
interpreter.set_tensor(detail["index"], value)
else:
values = inputs
if not isinstance(values, (list, tuple)):
values = [values]
if len(values) != len(input_details):
raise ValueError(
"Number of provided inputs does not match interpreter signature"
)
for detail, value in zip(input_details, values):
interpreter.set_tensor(detail["index"], value)
def _get_interpreter_outputs(interpreter):
output_details = interpreter.get_output_details()
outputs = [
interpreter.get_tensor(detail["index"]) for detail in output_details
]
return outputs[0] if len(outputs) == 1 else outputs
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="`export_litert` currently supports the TensorFlow backend only.",
)
class ExportLitertTest(testing.TestCase):
"""Test suite for LiteRT (TFLite) model export functionality.
Tests use AI Edge LiteRT interpreter when available, otherwise fall back
to TensorFlow Lite interpreter for validation.
"""
@parameterized.named_parameters(named_product(model_type=model_types))
def test_standard_model_export(self, model_type):
"""Test exporting standard model types to LiteRT format."""
if model_type == "lstm" and not AI_EDGE_LITERT_AVAILABLE:
self.skipTest("LSTM models require AI Edge LiteRT interpreter.")
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
model = get_model(model_type)
batch_size = 1 # LiteRT expects batch_size=1
if model_type == "lstm":
ref_input = np.random.normal(size=(batch_size, 4, 10))
else:
ref_input = np.random.normal(size=(batch_size, 10))
ref_input = ref_input.astype("float32")
ref_output = _convert_to_numpy(model(ref_input))
# Test with model.export()
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
@parameterized.named_parameters(
named_product(struct_type=["tuple", "array", "dict"])
)
def test_model_with_input_structure(self, struct_type):
"""Test exporting models with structured inputs (tuple/array/dict)."""
batch_size = 1 # LiteRT expects batch_size=1
base_input = np.random.normal(size=(batch_size, 10)).astype("float32")
if struct_type == "tuple":
# Use Functional API for proper Input layer handling
input1 = layers.Input(shape=(10,), name="input_1")
input2 = layers.Input(shape=(10,), name="input_2")
output = layers.Add()([input1, input2])
model = models.Model(inputs=[input1, input2], outputs=output)
ref_input = (base_input, base_input * 2)
elif struct_type == "array":
# Use Functional API for proper Input layer handling
input1 = layers.Input(shape=(10,), name="input_1")
input2 = layers.Input(shape=(10,), name="input_2")
output = layers.Add()([input1, input2])
model = models.Model(inputs=[input1, input2], outputs=output)
ref_input = [base_input, base_input * 2]
elif struct_type == "dict":
# Use Functional API for proper Input layer handling
input1 = layers.Input(shape=(10,), name="x")
input2 = layers.Input(shape=(10,), name="y")
output = layers.Add()([input1, input2])
model = models.Model(
inputs={"x": input1, "y": input2}, outputs=output
)
ref_input = {"x": base_input, "y": base_input * 2}
else:
raise AssertionError("Unexpected structure type")
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
ref_output = _convert_to_numpy(
model(tree.map_structure(ops.convert_to_tensor, ref_input))
)
# Test with model.export()
model.export(temp_filepath, format="litert")
export_path = temp_filepath
interpreter = LiteRTInterpreter(model_path=export_path)
interpreter.allocate_tensors()
feed_inputs = ref_input
if isinstance(feed_inputs, tuple):
feed_inputs = list(feed_inputs)
_set_interpreter_inputs(interpreter, feed_inputs)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
# Verify export still works after saving/loading via saving_lib.
archive_path = os.path.join(self.get_temp_dir(), "revived.keras")
saving_lib.save_model(model, archive_path)
revived_model = saving_lib.load_model(archive_path)
revived_output = _convert_to_numpy(revived_model(ref_input))
self.assertAllClose(ref_output, revived_output)
def test_model_with_multiple_inputs(self):
"""Test exporting models with multiple inputs and batch resizing."""
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
# Use Functional API for proper Input layer handling
input_x = layers.Input(shape=(10,), name="x")
input_y = layers.Input(shape=(10,), name="y")
output = layers.Add()([input_x, input_y])
model = models.Model(inputs=[input_x, input_y], outputs=output)
batch_size = 1 # LiteRT expects batch_size=1
ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(model([ref_input_x, ref_input_y]))
# Test with model.export()
model.export(temp_filepath, format="litert")
export_path = temp_filepath
interpreter = LiteRTInterpreter(model_path=export_path)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, [ref_input_x, ref_input_y])
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
# Test with a different batch size by resizing interpreter inputs.
larger_x = np.concatenate([ref_input_x, ref_input_x], axis=0)
larger_y = np.concatenate([ref_input_y, ref_input_y], axis=0)
input_details = interpreter.get_input_details()
interpreter.resize_tensor_input(
input_details[0]["index"], larger_x.shape
)
interpreter.resize_tensor_input(
input_details[1]["index"], larger_y.shape
)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, [larger_x, larger_y])
interpreter.invoke()
larger_output = _get_interpreter_outputs(interpreter)
larger_ref_output = _convert_to_numpy(model([larger_x, larger_y]))
self.assertAllClose(
larger_ref_output, larger_output, atol=1e-4, rtol=1e-4
)
def test_export_with_custom_input_signature(self):
"""Test exporting with custom input signature specification."""
model = get_model("sequential")
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
input_signature = [layers.InputSpec(shape=(None, 10), dtype="float32")]
# Test with model.export()
model.export(
temp_filepath,
format="litert",
input_signature=input_signature,
)
export_path = temp_filepath
self.assertTrue(os.path.exists(export_path))
interpreter = LiteRTInterpreter(model_path=export_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(len(input_details), 1)
self.assertEqual(tuple(input_details[0]["shape"][1:]), (10,))
def test_multi_output_model_export(self):
"""Test exporting multi-output models."""
model = get_model("multi_output")
# Build the model
ref_input = np.random.normal(size=(3, 10)).astype("float32")
model(ref_input)
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
model.export(temp_filepath, format="litert")
tflite_path = temp_filepath
self.assertTrue(os.path.exists(tflite_path))
# Test inference
interpreter = LiteRTInterpreter(model_path=tflite_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
self.assertEqual(len(output_details), 2)
test_input = np.random.random(input_details[0]["shape"]).astype(
np.float32
)
interpreter.set_tensor(input_details[0]["index"], test_input)
interpreter.invoke()
for detail in output_details:
output = interpreter.get_tensor(detail["index"])
self.assertIsInstance(output, np.ndarray)
def test_export_with_verbose(self):
"""Test export with verbose output."""
model = get_model("sequential")
dummy_input = np.random.random((3, 10)).astype(np.float32)
model(dummy_input)
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
# Export with verbose=True
model.export(temp_filepath, format="litert", verbose=True)
tflite_path = temp_filepath
self.assertTrue(os.path.exists(tflite_path))
# Verify the exported model works
interpreter = LiteRTInterpreter(model_path=tflite_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(len(input_details), 1)
def test_export_error_handling(self):
"""Test error handling in export API."""
model = get_model("sequential")
dummy_input = np.random.random((3, 10)).astype(np.float32)
model(dummy_input)
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
# Test with invalid format
with self.assertRaises(ValueError):
model.export(temp_filepath, format="invalid_format")
def test_export_invalid_filepath(self):
"""Test that export fails with invalid file extension."""
model = get_model("sequential")
dummy_input = np.random.random((3, 10)).astype(np.float32)
model(dummy_input)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.txt")
# Should raise ValueError for wrong extension
with self.assertRaises(ValueError):
model.export(temp_filepath, format="litert")
def test_export_subclass_model(self):
"""Test exporting subclass models (uses wrapper conversion path)."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("subclass")
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(model(ref_input))
# Export subclass model - this tests wrapper-based conversion
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
# Verify inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
def test_export_with_optimizations_default(self):
"""Test export with DEFAULT optimization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("sequential")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_default.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(model(ref_input))
# Export with DEFAULT optimization
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.DEFAULT],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify inference still works
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
# Quantized model should be close but not exact
self.assertAllClose(ref_output, litert_output, atol=1e-2, rtol=1e-2)
def test_export_with_optimizations_sparsity(self):
"""Test export with EXPERIMENTAL_SPARSITY optimization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("functional")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_sparsity.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
# Export with EXPERIMENTAL_SPARSITY optimization
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.EXPERIMENTAL_SPARSITY],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
# Output should have valid shape
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_optimizations_size(self):
"""Test export with OPTIMIZE_FOR_SIZE optimization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("sequential")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_size.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
# Export with OPTIMIZE_FOR_SIZE
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.OPTIMIZE_FOR_SIZE],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_optimizations_latency(self):
"""Test export with OPTIMIZE_FOR_LATENCY optimization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("functional")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_latency.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
# Export with OPTIMIZE_FOR_LATENCY
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.OPTIMIZE_FOR_LATENCY],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_multiple_optimizations(self):
"""Test export with multiple optimization options combined."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("sequential")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_multiple.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
# Export with multiple optimizations
model.export(
temp_filepath,
format="litert",
optimizations=[
tensorflow.lite.Optimize.DEFAULT,
tensorflow.lite.Optimize.EXPERIMENTAL_SPARSITY,
],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_representative_dataset(self):
"""Test export with representative dataset for better quantization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("functional")
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.tflite"
)
# Create representative dataset
def representative_dataset():
for _ in range(10):
yield [np.random.normal(size=(1, 10)).astype("float32")]
# Export with optimizations and representative dataset
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.DEFAULT],
representative_dataset=representative_dataset,
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
# Output should have valid shape
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_multiple_kwargs(self):
"""Test export with multiple converter kwargs."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Create a larger model for quantization testing
inputs = layers.Input(shape=(28, 28, 3))
x = layers.Conv2D(32, 3, activation="relu")(inputs)
x = layers.MaxPooling2D()(x)
x = layers.Flatten()(x)
x = layers.Dense(10, activation="softmax")(x)
model = models.Model(inputs, x)
temp_filepath = os.path.join(
self.get_temp_dir(), "multi_kwargs_model.tflite"
)
# Create representative dataset
def representative_dataset():
for _ in range(5):
yield [np.random.normal(size=(1, 28, 28, 3)).astype("float32")]
# Export with multiple kwargs
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.DEFAULT],
representative_dataset=representative_dataset,
experimental_new_quantizer=True,
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify file size is reduced compared to non-quantized
file_size = os.path.getsize(temp_filepath)
self.assertGreater(file_size, 0)
def test_export_optimization_file_size_comparison(self):
"""Test that optimizations reduce file size."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Create a larger model to see size differences
inputs = layers.Input(shape=(28, 28, 3))
x = layers.Conv2D(64, 3, activation="relu")(inputs)
x = layers.Conv2D(64, 3, activation="relu")(x)
x = layers.MaxPooling2D()(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dense(10, activation="softmax")(x)
model = models.Model(inputs, x)
# Export without optimization
filepath_no_opt = os.path.join(
self.get_temp_dir(), "model_no_opt.tflite"
)
model.export(filepath_no_opt, format="litert")
# Export with optimization
filepath_with_opt = os.path.join(
self.get_temp_dir(), "model_with_opt.tflite"
)
model.export(
filepath_with_opt,
format="litert",
optimizations=[tensorflow.lite.Optimize.DEFAULT],
)
# Optimized model should be smaller
size_no_opt = os.path.getsize(filepath_no_opt)
size_with_opt = os.path.getsize(filepath_with_opt)
self.assertLess(
size_with_opt,
size_no_opt,
f"Optimized model ({size_with_opt} bytes) should be smaller "
f"than non-optimized ({size_no_opt} bytes)",
)
# Typically expect ~75% size reduction with quantization
reduction_ratio = size_with_opt / size_no_opt
self.assertLess(
reduction_ratio,
0.5, # Should be less than 50% of original size
f"Expected significant size reduction, got {reduction_ratio:.2%}",
)
def test_signature_def_with_named_model(self):
"""Test that exported models have SignatureDef with input names."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Build a model with explicit layer names
inputs = layers.Input(shape=(10,), name="feature_input")
x = layers.Dense(32, activation="relu", name="encoder")(inputs)
x = layers.Dense(16, activation="relu", name="bottleneck")(x)
outputs = layers.Dense(
1, activation="sigmoid", name="prediction_output"
)(x)
model = models.Model(inputs=inputs, outputs=outputs, name="named_model")
temp_filepath = os.path.join(self.get_temp_dir(), "named_model.tflite")
# Export the model
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
# Load and check SignatureDef
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
# Get SignatureDef information
signature_defs = interpreter.get_signature_list()
self.assertIn("serving_default", signature_defs)
serving_sig = signature_defs["serving_default"]
sig_inputs = serving_sig.get("inputs", [])
sig_outputs = serving_sig.get("outputs", [])
# Verify SignatureDef has inputs and outputs
self.assertGreater(
len(sig_inputs), 0, "Should have at least one input in SignatureDef"
)
self.assertGreater(
len(sig_outputs),
0,
"Should have at least one output in SignatureDef",
)
# Verify input names are preserved (they should match Keras input names)
self.assertIn(
"feature_input",
sig_inputs,
f"Input name 'feature_input' should be in SignatureDef inputs: "
f"{sig_inputs}",
)
# Verify inference works using signature runner
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(model(ref_input))
# Note: For single-output Functional models, Keras returns a tensor
# (not dict). SignatureDef will have generic output names like
# 'output_0'.
# Only multi-output models or models with explicit dict returns have
# named outputs
# Test inference using signature runner for better output name handling
signature_runner = interpreter.get_signature_runner("serving_default")
sig_output = signature_runner(feature_input=ref_input)
# sig_output should be a dict with meaningful output names
self.assertIsInstance(sig_output, dict)
self.assertGreater(
len(sig_output), 0, "Should have at least one output"
)
# For single output, extract the value
if len(sig_output) == 1:
litert_output = list(sig_output.values())[0]
else:
litert_output = list(sig_output.values())
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
def test_signature_def_with_functional_model(self):
"""Test that SignatureDef preserves input/output names for
Functional models."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Create a Functional model with named inputs and outputs
inputs = layers.Input(shape=(10,), name="input_layer")
x = layers.Dense(32, activation="relu", name="hidden_layer")(inputs)
outputs = layers.Dense(1, activation="sigmoid", name="output_layer")(x)
model = models.Model(
inputs=inputs, outputs=outputs, name="functional_model"
)
temp_filepath = os.path.join(
self.get_temp_dir(), "functional_model.tflite"
)
# Export the model
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
# Load and check SignatureDef
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
# Get SignatureDef information
signature_defs = interpreter.get_signature_list()
self.assertIn("serving_default", signature_defs)
serving_sig = signature_defs["serving_default"]
sig_inputs = serving_sig.get("inputs", [])
sig_outputs = serving_sig.get("outputs", [])
# Verify SignatureDef has inputs and outputs
self.assertGreater(
len(sig_inputs), 0, "Should have at least one input in SignatureDef"
)
self.assertGreater(
len(sig_outputs),
0,
"Should have at least one output in SignatureDef",
)
# Verify that input names are preserved
self.assertIn(
"input_layer",
sig_inputs,
f"Input name 'input_layer' should be in SignatureDef inputs: "
f"{sig_inputs}",
)
# Test inference using signature runner for named outputs
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(model(ref_input))
# Use signature runner to get outputs with meaningful names
signature_runner = interpreter.get_signature_runner("serving_default")
sig_output = signature_runner(input_layer=ref_input)
# sig_output should be a dict with output names
self.assertIsInstance(sig_output, dict)
self.assertGreater(
len(sig_output), 0, "Should have at least one output"
)
# For single output, TFLite typically uses generic names like 'output_0'
# Extract the single output value
if len(sig_output) == 1:
litert_output = list(sig_output.values())[0]
else:
litert_output = list(sig_output.values())
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
def test_signature_def_with_multi_input_model(self):
"""Test that SignatureDef preserves names for multi-input models."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Create a multi-input model
input1 = layers.Input(shape=(10,), name="input_1")
input2 = layers.Input(shape=(5,), name="input_2")
concat = layers.Concatenate(name="concat_layer")([input1, input2])
outputs = layers.Dense(1, activation="sigmoid", name="output")(concat)
model = models.Model(
inputs=[input1, input2], outputs=outputs, name="multi_input_model"
)
temp_filepath = os.path.join(
self.get_temp_dir(), "multi_input_model.tflite"
)
# Export the model
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
# Load and check SignatureDef
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
# Get SignatureDef information
signature_defs = interpreter.get_signature_list()
self.assertIn("serving_default", signature_defs)
serving_sig = signature_defs["serving_default"]
sig_inputs = serving_sig.get("inputs", [])
sig_outputs = serving_sig.get("outputs", [])
# Verify SignatureDef has correct number of inputs and outputs
self.assertEqual(
len(sig_inputs), 2, "Should have 2 inputs in SignatureDef"
)
self.assertGreater(
len(sig_outputs),
0,
"Should have at least one output in SignatureDef",
)
# Verify that input names are preserved
self.assertIn(
"input_1",
sig_inputs,
f"Input name 'input_1' should be in SignatureDef inputs: "
f"{sig_inputs}",
)
self.assertIn(
"input_2",
sig_inputs,
f"Input name 'input_2' should be in SignatureDef inputs: "
f"{sig_inputs}",
)
# Test inference using signature runner
batch_size = 1
ref_input1 = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_input2 = np.random.normal(size=(batch_size, 5)).astype("float32")
ref_inputs = [ref_input1, ref_input2]
ref_output = _convert_to_numpy(model(ref_inputs))
# Use signature runner with named inputs
signature_runner = interpreter.get_signature_runner("serving_default")
sig_output = signature_runner(input_1=ref_input1, input_2=ref_input2)
# sig_output should be a dict with output names
self.assertIsInstance(sig_output, dict)
self.assertGreater(
len(sig_output), 0, "Should have at least one output"
)
# For single output, TFLite uses generic names like 'output_0'
# Extract the single output value
if len(sig_output) == 1:
litert_output = list(sig_output.values())[0]
else:
litert_output = list(sig_output.values())
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
def test_signature_def_with_multi_output_model(self):
"""Test that SignatureDef handles multi-output models correctly."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Create a multi-output model
inputs = layers.Input(shape=(10,), name="input_layer")
x = layers.Dense(32, activation="relu", name="shared_layer")(inputs)
output1 = layers.Dense(1, activation="sigmoid", name="output_1")(x)
output2 = layers.Dense(2, activation="softmax", name="output_2")(x)
model = models.Model(
inputs=inputs, outputs=[output1, output2], name="multi_output_model"
)
temp_filepath = os.path.join(
self.get_temp_dir(), "multi_output_model.tflite"
)
# Export the model
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
# Load and check SignatureDef
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
# Get SignatureDef information
signature_defs = interpreter.get_signature_list()
self.assertIn("serving_default", signature_defs)
serving_sig = signature_defs["serving_default"]
sig_inputs = serving_sig.get("inputs", [])
sig_outputs = serving_sig.get("outputs", [])
# Verify SignatureDef structure
self.assertGreater(
len(sig_inputs), 0, "Should have at least one input in SignatureDef"
)
self.assertEqual(
len(sig_outputs), 2, "Should have 2 outputs in SignatureDef"
)
# Test inference using signature runner
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_outputs = _convert_to_numpy(model(ref_input))
# Use signature runner
signature_runner = interpreter.get_signature_runner("serving_default")
sig_output = signature_runner(input_layer=ref_input)
# sig_output should be a dict with output names
self.assertIsInstance(sig_output, dict)
self.assertEqual(len(sig_output), 2, "Should have 2 outputs")
# Note: TFLite uses generic names like 'output_0', 'output_1' for
# SignatureDef outputs. These don't match the Keras layer names
# ('output_1', 'output_2') - this is expected. The names come from
# TensorFlow's symbolic tracing, not from our exporter code.
# Verify outputs match by position
sig_output_values = list(sig_output.values())
for i, ref_out in enumerate(ref_outputs):
self.assertAllClose(
ref_out, sig_output_values[i], atol=1e-4, rtol=1e-4
)
def test_dict_input_adapter_creation(self):
"""Test that dict input adapter is created and works correctly."""
# Create a model with dictionary inputs
input1 = layers.Input(shape=(10,), name="x")
input2 = layers.Input(shape=(10,), name="y")
output = layers.Add()([input1, input2])
model = models.Model(inputs={"x": input1, "y": input2}, outputs=output)
temp_filepath = os.path.join(
self.get_temp_dir(), "dict_adapter_model.tflite"
)
# Export with verbose to verify adapter creation messages
model.export(temp_filepath, format="litert", verbose=True)
# Verify the file was created
self.assertTrue(os.path.exists(temp_filepath))
# Load and test the model
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
# Check input details - should have 2 inputs in list form
input_details = interpreter.get_input_details()
self.assertEqual(len(input_details), 2)
# Test inference
batch_size = 1
x_val = np.random.normal(size=(batch_size, 10)).astype("float32")
y_val = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(
model(
{
"x": ops.convert_to_tensor(x_val),
"y": ops.convert_to_tensor(y_val),
}
)
)
# Set inputs as list (adapter converts list to dict internally)
_set_interpreter_inputs(interpreter, [x_val, y_val])
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
def test_dict_input_signature_inference(self):
"""Test automatic inference of dict input signatures."""
# Create a model with dictionary inputs (without calling it first)
input1 = layers.Input(shape=(5,), name="feature_a")
input2 = layers.Input(shape=(3,), name="feature_b")
concat = layers.Concatenate()([input1, input2])
output = layers.Dense(1)(concat)
model = models.Model(
inputs={"feature_a": input1, "feature_b": input2}, outputs=output
)
temp_filepath = os.path.join(
self.get_temp_dir(), "inferred_dict_model.tflite"
)
# Export without providing input_signature - should be inferred
model.export(temp_filepath, format="litert")
# Verify successful export
self.assertTrue(os.path.exists(temp_filepath))
# Load and verify structure
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(len(input_details), 2)
# Verify shapes match expected
shapes = [tuple(d["shape"][1:]) for d in input_details]
self.assertIn((5,), shapes)
self.assertIn((3,), shapes)
def test_dict_input_with_custom_signature(self):
"""Test dict input export with custom input signature."""
# Create model with dict inputs
input1 = layers.Input(shape=(10,), name="input_x")
input2 = layers.Input(shape=(10,), name="input_y")
output = layers.Multiply()([input1, input2])
model = models.Model(
inputs={"input_x": input1, "input_y": input2}, outputs=output
)
temp_filepath = os.path.join(
self.get_temp_dir(), "dict_custom_sig_model.tflite"
)
# Provide custom dict input signature
input_signature = {
"input_x": layers.InputSpec(shape=(None, 10), dtype="float32"),
"input_y": layers.InputSpec(shape=(None, 10), dtype="float32"),
}
model.export(
temp_filepath, format="litert", input_signature=input_signature
)
# Verify export
self.assertTrue(os.path.exists(temp_filepath))
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
# Test inference
batch_size = 1
x_val = np.random.normal(size=(batch_size, 10)).astype("float32")
y_val = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(
model(
{
"input_x": ops.convert_to_tensor(x_val),
"input_y": ops.convert_to_tensor(y_val),
}
)
)
_set_interpreter_inputs(interpreter, [x_val, y_val])
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
def test_dict_input_numerical_accuracy(self):
"""Test numerical accuracy of dict input models with complex ops."""
# Create a more complex model with dict inputs
input1 = layers.Input(shape=(20,), name="tokens")
input2 = layers.Input(shape=(20,), name="mask")
# Apply some transformations
x1 = layers.Dense(16, activation="relu")(input1)
x2 = layers.Dense(16, activation="relu")(input2)
# Combine
combined = layers.Multiply()([x1, x2])
output = layers.Dense(1, activation="sigmoid")(combined)
model = models.Model(
inputs={"tokens": input1, "mask": input2}, outputs=output
)
temp_filepath = os.path.join(
self.get_temp_dir(), "dict_numerical_model.tflite"
)
model.export(temp_filepath, format="litert")
# Test with multiple samples
batch_size = 1
tokens_val = np.random.normal(size=(batch_size, 20)).astype("float32")
mask_val = np.random.normal(size=(batch_size, 20)).astype("float32")
ref_output = _convert_to_numpy(
model(
{
"tokens": ops.convert_to_tensor(tokens_val),
"mask": ops.convert_to_tensor(mask_val),
}
)
)
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, [tokens_val, mask_val])
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
# Should have good numerical accuracy
self.assertAllClose(ref_output, litert_output, atol=1e-5, rtol=1e-5)
def test_dict_input_preserves_variable_sharing(self):
"""Test that adapter preserves variable sharing from original model."""
# Create model with shared layers
shared_dense = layers.Dense(8, activation="relu")
input1 = layers.Input(shape=(10,), name="branch_a")
input2 = layers.Input(shape=(10,), name="branch_b")
# Both inputs go through same shared layer
x1 = shared_dense(input1)
x2 = shared_dense(input2)
output = layers.Add()([x1, x2])
model = models.Model(
inputs={"branch_a": input1, "branch_b": input2}, outputs=output
)
# Train briefly to ensure weights are meaningful
model.compile(optimizer="adam", loss="mse")
x_train = {
"branch_a": np.random.normal(size=(5, 10)).astype("float32"),
"branch_b": np.random.normal(size=(5, 10)).astype("float32"),
}
y_train = np.random.normal(size=(5, 8)).astype("float32")
model.fit(x_train, y_train, epochs=1, verbose=0)
temp_filepath = os.path.join(
self.get_temp_dir(), "dict_shared_vars_model.tflite"
)
model.export(temp_filepath, format="litert")
# Verify export works and inference matches
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
batch_size = 1
a_val = np.random.normal(size=(batch_size, 10)).astype("float32")
b_val = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(
model(
{
"branch_a": ops.convert_to_tensor(a_val),
"branch_b": ops.convert_to_tensor(b_val),
}
)
)
_set_interpreter_inputs(interpreter, [a_val, b_val])
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
def test_dict_input_multi_output_model(self):
"""Test dict input model with multiple outputs exports successfully."""
# Create model with dict inputs and multiple outputs
input1 = layers.Input(shape=(10,), name="feature_1")
input2 = layers.Input(shape=(10,), name="feature_2")
# Two output branches
output1 = layers.Dense(5, name="output_a")(input1)
output2 = layers.Dense(3, name="output_b")(input2)
model = models.Model(
inputs={"feature_1": input1, "feature_2": input2},
outputs=[output1, output2],
)
temp_filepath = os.path.join(
self.get_temp_dir(), "dict_multi_output_model.tflite"
)
# Main test: export should succeed with dict inputs + multi outputs
model.export(temp_filepath, format="litert")
# Verify file was created
self.assertTrue(os.path.exists(temp_filepath))
# Verify structure
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
# Should have 2 inputs (from dict)
input_details = interpreter.get_input_details()
self.assertEqual(len(input_details), 2)
# Should have 2 outputs
output_details = interpreter.get_output_details()
self.assertEqual(len(output_details), 2)
# Verify shapes
output_shapes = [tuple(d["shape"][1:]) for d in output_details]
self.assertIn((5,), output_shapes)
self.assertIn((3,), output_shapes)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/export/litert_test.py",
"license": "Apache License 2.0",
"lines": 1079,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/callbacks/orbax_checkpoint.py | import warnings
import numpy as np
from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.callbacks.monitor_callback import (
MonitorCallback, # For metric monitoring logic
)
from keras.src.saving import saving_lib
from keras.src.utils.module_utils import ocp
# Context and AsyncOptions are accessed through the lazy-loaded ocp module
# JAX monitoring compatibility: ensure record_scalar exists
# to prevent AttributeError in older JAX versions
try:
import jax
if not hasattr(jax.monitoring, "record_scalar"):
jax.monitoring.record_scalar = lambda *args, **kwargs: None
except ImportError:
pass
def _get_state_tree(model):
"""Get the complete model state as a nested tree structure."""
# For JAX backend, preserve native arrays for performance
# For other backends, convert to numpy arrays
if backend.backend() == "jax":
state_tree = model.get_state_tree()
did_numpy_conversion = False
else:
state_tree = model.get_state_tree(value_format="numpy_array")
did_numpy_conversion = True
# Convert numpy scalar types to Python types for Orbax compatibility
# Only needed when we did numpy conversion
if did_numpy_conversion:
def convert_scalars(obj):
if isinstance(obj, np.ndarray) and obj.ndim == 0:
# Convert 0-dimensional numpy arrays (scalars) to Python types
return obj.item()
elif isinstance(obj, np.generic):
# Convert numpy scalar types (like np.float32) to Python types
return obj.item()
else:
return obj
return tree.map_structure(convert_scalars, state_tree)
else:
return state_tree
@keras_export("keras.callbacks.OrbaxCheckpoint")
class OrbaxCheckpoint(MonitorCallback):
"""Callback to save and load model state using Orbax with a similar API to
ModelCheckpoint.
This callback saves the model's weights and optimizer state asynchronously
using Orbax, allowing training to continue without blocking for I/O.
**Multi-host Support**: When running in a multi-host distributed training
environment with JAX backend, this callback automatically coordinates
checkpointing across all hosts to ensure consistency and proper
synchronization. Multi-host checkpointing is only supported on JAX.
Example:
```python
model.compile(loss=..., optimizer=..., metrics=['accuracy'])
EPOCHS = 10
checkpoint_dir = '/tmp/ckpt'
orbax_checkpoint_callback = keras.callbacks.OrbaxCheckpoint(
directory=checkpoint_dir,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model is saved at the end of every epoch, if it's the best seen so far.
model.fit(epochs=EPOCHS, callbacks=[orbax_checkpoint_callback])
# Alternatively, save checkpoints every N batches -
orbax_checkpoint_callback = keras.callbacks.OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq=100) # Save every 100 batches
model.fit(epochs=EPOCHS, callbacks=[orbax_checkpoint_callback])
```
Args:
directory: path to the directory where to save the checkpoints.
monitor: The metric name to monitor (e.g., 'val_loss').
verbose: Verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`, it only saves when the model
is considered the "best" based on the monitored quantity.
mode: one of {'auto', 'min', 'max'}. Used with `save_best_only`.
save_freq: `'epoch'` or integer. Frequency to save checkpoints.
max_to_keep: Integer, maximum number of recent checkpoints to keep.
If None, keeps all. Defaults to 1.
save_on_background: Boolean, whether to save asynchronously in the
background. Defaults to True.
initial_value_threshold: Floating point initial "best" value for the
monitor, used with `save_best_only`.
"""
def __init__(
self,
directory,
monitor="val_loss",
verbose=0,
save_best_only=False,
mode="auto",
save_freq="epoch",
initial_value_threshold=None,
max_to_keep=1,
save_on_background=True,
save_weights_only=False,
):
# Ensure orbax is available
ocp.initialize()
# Initialize MonitorCallback for handling 'monitor', 'mode', 'best'
# logic
super().__init__(monitor, mode, initial_value_threshold)
self.directory = directory
self.verbose = verbose
self.save_best_only = save_best_only
self.save_freq = save_freq
self.max_to_keep = max_to_keep
self.save_on_background = save_on_background
self.save_weights_only = save_weights_only
self._batches_seen_since_last_saving = 0
self._last_batch_seen = 0
self._current_epoch = 0 # Keep track of epoch
self._total_batches_seen = 0 # Global batch counter for step tracking
self._async_futures = [] # Track async save futures
# Multi-host support
self._multihost_initialized = self._is_multihost_initialized()
if self.save_freq != "epoch" and not isinstance(self.save_freq, int):
raise ValueError(
f"Unrecognized save_freq: {self.save_freq}. "
"Expected save_freq are 'epoch' or integer values"
)
# --- Orbax Checkpointer Setup (V1 API) ---
policies = []
if max_to_keep is not None:
policies.append(
ocp.training.preservation_policies.LatestN(max_to_keep)
)
# Use AnyPreservationPolicy to combine them, or use directly
# if single policy
preservation_policy = None
if policies:
if len(policies) == 1:
preservation_policy = policies[0]
else:
preservation_policy = (
ocp.training.preservation_policies.AnyPreservationPolicy(
policies
)
)
# Create the V1 Checkpointer with direct parameter passing
# Orbax will handle directory creation on all processes as needed
# save_decision_policy is required for proper coordination of
# rapid async saves
self.checkpointer = ocp.training.Checkpointer(
directory=directory,
preservation_policy=preservation_policy,
save_decision_policy=ocp.training.save_decision_policies.FixedIntervalPolicy(
1
),
)
def _is_multihost_initialized(self):
"""Check if multi-host environment is initialized."""
# Multi-host checkpointing is only supported on JAX backend
if backend.backend() != "jax":
return False
multihost = ocp.multihost
# Check if JAX distributed client is initialized
# (indicates multihost setup)
return multihost.is_jax_distributed_client_initialized()
def _sync_processes(self, key=None):
"""Synchronize all processes across hosts."""
if not self._multihost_initialized:
return # No-op for single host
multihost = ocp.multihost
sync_key = key or "orbax_checkpoint_sync"
multihost.sync_global_processes(sync_key)
def is_multihost_enabled(self):
"""Return True if multi-host checkpointing is enabled and initialized.
This method can be used to check if the callback is operating in
a multi-host distributed training environment. Multi-host checkpointing
is only supported on JAX backend.
Returns:
bool: True if multi-host support is active, False otherwise.
"""
return self._multihost_initialized
def is_primary_host(self):
"""Return True if this process is the primary host in multi-host setup.
In multi-host environments, only the primary host typically handles
logging and coordination tasks. Multi-host checkpointing is only
supported on JAX backend.
Returns:
bool: True if this is the primary host, False otherwise.
Always returns True in single-host environments.
"""
if not self._multihost_initialized:
return True # Single host is always primary
multihost = ocp.multihost
return multihost.is_primary_host()
def _should_save_on_batch(self, batch):
"""Check if we should save on this batch."""
if self.save_freq == "epoch":
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
self._total_batches_seen += add_batches
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def _save_checkpoint(self, step, logs=None):
"""Save a checkpoint at the given step with multi-host coordination."""
# --- Prepare Composite State (Backend-Agnostic) ---
state_tree = _get_state_tree(self.model)
# Save the nested state structures directly (preserving layer
# names and structure)
if self.save_weights_only:
composite_state = {
"trainable_variables": state_tree["trainable_variables"],
"non_trainable_variables": state_tree[
"non_trainable_variables"
],
}
else:
composite_state = state_tree
# Include model configuration for full model restoration
# Use saving_lib helper to properly handle shared objects
config_json, _ = saving_lib._serialize_model_as_json(self.model)
composite_state["model_config"] = config_json
# Collect assets if saving full model (not just weights)
assets_dict = None
if not self.save_weights_only:
assets_dict = saving_lib._save_assets_to_dict(self.model)
# Use a single with statement. If context_options is empty,
# Context() uses defaults.
with ocp.Context():
# Determine sync vs async based on save_on_background setting
use_sync = not self.save_on_background
# Always use checkpointables API for consistency
# If no assets, just pass pytree alone
if assets_dict is not None:
payload = {
"pytree": composite_state,
"assets": assets_dict,
}
else:
payload = {
"pytree": composite_state,
}
# Execute save based on sync/async mode
if use_sync:
self.checkpointer.save_checkpointables(step, payload)
else:
future = self.checkpointer.save_checkpointables_async(
step, payload
)
self._async_futures.append(future)
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
# Handle save_best_only logic for batch-level saving
should_save = True
if self.save_best_only:
current = logs.get(self.monitor) if logs else None
if current is None:
warnings.warn(
f"Can save best model only with {self.monitor} "
f"available, skipping save at batch {batch}.",
stacklevel=2,
)
should_save = False
elif not self._is_improvement(current, self.best):
should_save = False
else:
# Update best value when there's improvement
self.best = current
if should_save:
# Use global batch count for Orbax save step
step = self._total_batches_seen
self._save_checkpoint(step=step, logs=logs)
def on_epoch_end(self, epoch, logs=None):
self._current_epoch = epoch
if self.monitor_op is None:
self._set_monitor_op() # From MonitorCallback
# For save_freq="epoch", save at every epoch
should_save = self.save_freq == "epoch"
# Handle save_best_only logic
if should_save and self.save_best_only:
current = logs.get(self.monitor) if logs else None
if current is None:
warnings.warn(
f"Can save best model only with {self.monitor} available, "
f"skipping save at epoch {epoch}.",
stacklevel=2,
)
should_save = False
elif not self._is_improvement(current, self.best):
should_save = False
else:
# Update best value when there's improvement
self.best = current
if should_save:
# Use epoch number as the step for Orbax save
self._save_checkpoint(step=epoch, logs=logs)
def on_train_end(self, logs=None):
# Close the Checkpointer - this waits for any pending async saves
# to complete before closing
try:
self.checkpointer.close()
except Exception:
pass # Ignore errors during cleanup
# Multi-host synchronization: ensure all hosts complete cleanup
self._sync_processes("checkpoint_cleanup")
def wait_until_finished(self):
"""Wait for any in-progress checkpoint operations to complete.
This method blocks until all asynchronous checkpoint save operations
have completed across all hosts in a multi-host setup.
"""
# Wait for all tracked async futures to complete
for future in self._async_futures:
future.result() # Wait for completion
self._async_futures.clear() # Clear completed futures
# Wait for any remaining async operations to complete on this host
self.checkpointer.wait()
# Multi-host synchronization: ensure all hosts complete
self._sync_processes("checkpoint_wait_complete")
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/callbacks/orbax_checkpoint.py",
"license": "Apache License 2.0",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/callbacks/orbax_checkpoint_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import saving
from keras.src import testing
from keras.src import utils
from keras.src.callbacks.orbax_checkpoint import OrbaxCheckpoint
from keras.src.saving import register_keras_serializable
class OrbaxCheckpointTest(testing.TestCase, parameterized.TestCase):
def _create_test_model(self):
"""Create a simple test model compatible with 2-device sharding."""
inputs = layers.Input(shape=(10,), name="input_layer")
x = layers.Dense(6, name="dense_layer")(inputs) # 6 units (div by 2)
outputs = layers.Dense(2, name="output_layer")(x)
model = models.Model(inputs, outputs, name="test_model")
model.compile(optimizer="adam", loss="mse")
return model
def _create_dummy_data(self, num_samples=100):
"""Create dummy training data."""
x = np.random.randn(num_samples, 10)
y = np.random.randn(num_samples, 2) # Match 2 outputs
return x, y
@parameterized.parameters(
{"save_freq": 10, "epochs": 1, "batch_size": 5}, # batch-level
{"save_freq": "epoch", "epochs": 3, "batch_size": None}, # epoch-level
)
@pytest.mark.requires_trainable_backend
def test_checkpoint_saving_basic(self, save_freq, epochs, batch_size):
"""Test basic checkpoint saving with different frequencies."""
model = self._create_test_model()
x, y = self._create_dummy_data(num_samples=50)
checkpoint_dir = os.path.join(
self.get_temp_dir(), f"test_save_{save_freq}_{id(self)}"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir, save_freq=save_freq
)
# Train with specified configuration
fit_kwargs = {"callbacks": [callback], "verbose": 0}
if batch_size:
fit_kwargs["batch_size"] = batch_size
model.fit(x, y, epochs=epochs, **fit_kwargs)
# Verify checkpoint files were created
checkpoint_files = os.listdir(checkpoint_dir)
self.assertGreater(
len(checkpoint_files), 0, "Should have checkpoint files"
)
@parameterized.parameters(
{"mode": "min", "monitor": "loss"},
{"mode": "max", "monitor": "loss"},
)
@pytest.mark.requires_trainable_backend
def test_save_best_only(self, mode, monitor):
"""Test save_best_only with different modes."""
model = self._create_test_model()
x, y = self._create_dummy_data(num_samples=100)
checkpoint_dir = os.path.join(
self.get_temp_dir(), f"test_best_{mode}_{id(self)}"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
monitor=monitor,
save_best_only=True,
mode=mode,
save_freq="epoch",
)
model.fit(x, y, epochs=5, callbacks=[callback], verbose=0)
checkpoint_files = os.listdir(checkpoint_dir)
self.assertGreater(
len(checkpoint_files), 0, "Should have checkpoint files"
)
@parameterized.parameters(
{"save_on_background": False},
{"save_on_background": True},
)
@pytest.mark.requires_trainable_backend
def test_async_vs_sync_saving(self, save_on_background):
"""Test synchronous vs asynchronous saving."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(
self.get_temp_dir(), f"test_async_{save_on_background}_{id(self)}"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq="epoch",
save_on_background=save_on_background,
)
model.fit(x, y, epochs=2, callbacks=[callback], verbose=0)
checkpoint_files = os.listdir(checkpoint_dir)
self.assertGreater(
len(checkpoint_files), 0, "Should have checkpoint files"
)
@pytest.mark.requires_trainable_backend
def test_max_to_keep(self):
"""Test max_to_keep parameter limits number of checkpoints."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(
self.get_temp_dir(), f"test_max_keep_{id(self)}"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir, save_freq="epoch", max_to_keep=2
)
model.fit(x, y, epochs=5, callbacks=[callback], verbose=0)
checkpoint_files = os.listdir(checkpoint_dir)
self.assertLessEqual(len(checkpoint_files), 5)
@pytest.mark.requires_trainable_backend
def test_load_weights_from_orbax_checkpoint(self):
"""Test loading weights from Orbax checkpoint using load_weights."""
# Create and train model to create checkpoint
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(
self.get_temp_dir(), "test_load_weights_orbax"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq="epoch",
save_weights_only=True, # Only save weights for load_weights test
)
# Train to create checkpoint
model.fit(x, y, epochs=1, callbacks=[callback], verbose=0)
# Get original weights after training
original_weights = model.get_weights()
# Create a new model with the same architecture
new_model = self._create_test_model()
# Initialize with different weights to ensure loading works
different_weights = [w * 2 for w in original_weights]
new_model.set_weights(different_weights)
# Verify weights are different initially
new_weights_before = new_model.get_weights()
for orig, new in zip(original_weights, new_weights_before):
self.assertNotAllClose(
orig, new, msg="Weights should be different before loading"
)
# Load weights from Orbax checkpoint
new_model.load_weights(checkpoint_dir)
# Verify weights were loaded correctly
loaded_weights = new_model.get_weights()
for orig, loaded in zip(original_weights, loaded_weights):
self.assertAllClose(
orig,
loaded,
msg="Weights should match after loading from checkpoint",
)
@pytest.mark.requires_trainable_backend
def test_save_freq_epoch(self):
"""Test save_freq='epoch' functionality."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(
self.get_temp_dir(), f"test_epoch_freq_{id(self)}"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq="epoch",
)
# Train for 3 epochs
model.fit(x, y, epochs=3, callbacks=[callback], verbose=0)
# Should have only the latest checkpoint (epoch 2) due to max_to_keep=1
checkpoint_files = os.listdir(checkpoint_dir)
self.assertEqual(
len(checkpoint_files),
1,
f"Should have exactly 1 checkpoint due to max_to_keep=1, "
f"found {len(checkpoint_files)}: {checkpoint_files}",
)
# Check for the latest epoch directory (should be the highest numbered)
# Note: Due to preservation policy behavior, the actual latest kept
# may vary
# So we check that at least one checkpoint exists and has a reasonable
# name
self.assertTrue(
len(checkpoint_files) == 1 and checkpoint_files[0].isdigit(),
f"Should have exactly one checkpoint with numeric name, "
f"found {checkpoint_files}",
)
def test_invalid_save_freq(self):
"""Test error handling for invalid save_freq parameter."""
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_invalid_freq")
with self.assertRaises(ValueError):
OrbaxCheckpoint(directory=checkpoint_dir, save_freq="invalid")
@pytest.mark.requires_trainable_backend
def test_initial_value_threshold(self):
"""Test initial_value_threshold parameter."""
model = self._create_test_model()
x, y = self._create_dummy_data()
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_threshold")
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
monitor="loss",
save_best_only=True,
mode="min",
initial_value_threshold=1.0,
save_freq="epoch",
)
model.fit(x, y, epochs=3, callbacks=[callback], verbose=0)
self.assertTrue(os.path.exists(checkpoint_dir))
@parameterized.parameters(
{"save_on_background": False},
{"save_on_background": True},
)
@pytest.mark.requires_trainable_backend
def test_checkpoint_loading_comprehensive(self, save_on_background):
"""Test checkpoint loading with async and sync saving."""
model = self._create_test_model()
model.compile(optimizer="adam", loss="mse")
x, y = self._create_dummy_data(num_samples=200)
checkpoint_dir = os.path.join(
self.get_temp_dir(),
f"test_loading_{save_on_background}_{id(self)}",
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq="epoch",
save_on_background=save_on_background,
save_weights_only=True,
)
model.fit(x, y, epochs=1, callbacks=[callback], verbose=0)
original_weights = model.get_weights()
# Test load_weights functionality
new_model = self._create_test_model()
new_model.compile(optimizer="adam", loss="mse")
new_x, new_y = self._create_dummy_data(num_samples=10)
new_model.fit(new_x, new_y, epochs=1, batch_size=5, verbose=0)
different_weights = [w * 2 for w in original_weights]
new_model.set_weights(different_weights)
# Verify different before loading
for orig, new in zip(original_weights, new_model.get_weights()):
self.assertNotAllClose(orig, new)
# Load and verify
new_model.load_weights(checkpoint_dir)
for orig, loaded in zip(original_weights, new_model.get_weights()):
self.assertAllClose(orig, loaded)
@pytest.mark.skipif(
backend.backend() != "jax",
reason="Requires JAX backend for distribution",
)
def test_distributed_checkpoint_functionality(self):
"""Test OrbaxCheckpoint with distributed training."""
import jax
from keras.src.distribution import DeviceMesh
from keras.src.distribution import LayoutMap
from keras.src.distribution import ModelParallel
from keras.src.distribution import TensorLayout
from keras.src.distribution import distribution as get_distribution
from keras.src.distribution import set_distribution
# Check if we have at least 1 device
devices = jax.devices()
# Skip test if more than 2 devices, as these tests are designed
# for 2-device scenarios and may not work with more devices
if len(devices) > 2:
self.skipTest(f"Test requires 2 devices, found {len(devices)}")
num_devices = min(2, len(devices))
# Skip if only single device - distributed functionality can't be tested
if num_devices < 2:
self.skipTest(
"Test requires distributed setup with multiple devices"
)
print(f"Available devices: {devices}, using {num_devices} devices")
# Set up multi-device distribution
device_mesh = DeviceMesh((num_devices,), axis_names=["data"])
layout_map = LayoutMap(device_mesh)
layout_map["dense_layer/kernel"] = TensorLayout(axes=("data", None))
layout_map["dense_layer/bias"] = TensorLayout(axes=(None,))
layout_map["output_layer/kernel"] = TensorLayout(axes=(None, "data"))
layout_map["output_layer/bias"] = TensorLayout(axes=(None,))
distribution = ModelParallel(
device_mesh=device_mesh, layout_map=layout_map
)
# Save original distribution state
original_distribution = get_distribution()
try:
# Set distribution
set_distribution(distribution)
# Create and train model with distribution
model = self._create_test_model()
x, y = self._create_dummy_data(num_samples=50)
checkpoint_dir = os.path.join(
self.get_temp_dir(), "test_distributed_checkpoint"
)
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq="epoch",
)
# Train to create checkpoint
model.fit(x, y, epochs=2, callbacks=[callback], verbose=0)
# Get original model predictions and weights
original_predictions = model.predict(x[:5], verbose=0)
original_weights = model.get_weights()
# Load checkpoint using load_weights
# Create fresh model and load weights
fresh_model = self._create_test_model()
fresh_model.load_weights(checkpoint_dir)
loaded_weights = fresh_model.get_weights()
# Verify loaded weights match original
for orig, loaded in zip(original_weights, loaded_weights):
self.assertAllClose(orig, loaded)
# Verify loaded model produces same predictions
loaded_predictions = fresh_model.predict(x[:5], verbose=0)
self.assertAllClose(original_predictions, loaded_predictions)
# Verify sharding is maintained after loading
# Check that both models have the same distribution
current_dist = get_distribution()
self.assertIsNotNone(current_dist)
self.assertEqual(type(current_dist), ModelParallel)
# Verify model variables are sharded correctly
# In JAX, sharded variables should have different sharding info
# Get sharding info for original model variables
original_shardings = {}
for name, var in model.variables.items():
if hasattr(var, "sharding"):
original_shardings[name] = var.sharding
# Get sharding info for loaded model variables
loaded_shardings = {}
for name, var in fresh_model.variables.items():
if hasattr(var, "sharding"):
loaded_shardings[name] = var.sharding
# Verify shardings match
for name in original_shardings:
if name in loaded_shardings:
self.assertEqual(
original_shardings[name],
loaded_shardings[name],
f"Sharding mismatch for variable {name}",
)
print("Distributed checkpoint functionality and sharding verified")
finally:
# Restore original distribution
if original_distribution is not None:
set_distribution(original_distribution)
else:
try:
set_distribution(None)
except:
pass
@pytest.mark.requires_trainable_backend
def test_checkpoint_loading_via_saving_api(self):
"""Test model loading via saving API."""
model = self._create_test_model()
x, y = self._create_dummy_data()
# Test basic model loading
checkpoint_dir = os.path.join(self.get_temp_dir(), "test_basic_loading")
callback = OrbaxCheckpoint(directory=checkpoint_dir, save_freq="epoch")
model.fit(x, y, epochs=1, callbacks=[callback], verbose=0)
original_weights = model.get_weights()
loaded_model = saving.load_model(checkpoint_dir)
# Verify weights and compilation
self.assertEqual(len(original_weights), len(loaded_model.get_weights()))
for orig, loaded in zip(original_weights, loaded_model.get_weights()):
self.assertAllClose(orig, loaded)
self.assertTrue(loaded_model.compiled)
# Test weights-only checkpoint should fail with load_model
weights_only_dir = os.path.join(
self.get_temp_dir(), "test_weights_only"
)
weights_callback = OrbaxCheckpoint(
directory=weights_only_dir,
save_freq="epoch",
save_weights_only=True,
)
model.fit(x, y, epochs=1, callbacks=[weights_callback], verbose=0)
with self.assertRaises(ValueError):
saving.load_model(weights_only_dir)
@parameterized.parameters(
{"save_on_background": False},
{"save_on_background": True},
)
@pytest.mark.requires_trainable_backend
def test_comprehensive_model_state_restoration(self, save_on_background):
"""Test comprehensive model state restoration with exact weight
matching.
Tests sync/async saving, exact weight matching, and complete state
restoration including trainable/non-trainable variables, optimizer
state, and custom layers.
"""
utils.set_random_seed(42)
# Create model with custom layer having non-trainable variables
@register_keras_serializable(package="test")
class CustomLayer(layers.Layer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units), name="kernel"
)
self.moving_mean = self.add_weight(
shape=(self.units,), trainable=False, name="moving_mean"
)
super().build(input_shape)
def call(self, inputs):
return inputs @ self.kernel
# Build model with both trainable and non-trainable variables
inputs = layers.Input(shape=(10,), name="input_layer")
x = layers.Dense(8, name="dense_layer")(inputs)
outputs = CustomLayer(2, name="custom_layer")(x)
model = models.Model(inputs, outputs, name="comprehensive_test_model")
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
x, y = self._create_dummy_data(num_samples=100)
checkpoint_dir = os.path.join(
self.get_temp_dir(),
f"test_comprehensive_{save_on_background}_{id(self)}",
)
# Test saving with exact weight matching
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq="epoch",
save_on_background=save_on_background,
)
model.fit(x, y, epochs=2, verbose=0, callbacks=[callback])
# Verify exact weight matching functionality
final_saved_weights = model.get_weights()
self.assertIsNotNone(final_saved_weights, "Should have saved weights")
# Load and verify complete model restoration
loaded_model = saving.load_model(checkpoint_dir)
# Architecture verification
self.assertEqual(model.name, loaded_model.name)
self.assertEqual(len(model.layers), len(loaded_model.layers))
self.assertTrue(loaded_model.compiled)
# Exact weight matching verification
loaded_weights = loaded_model.get_weights()
self.assertEqual(len(final_saved_weights), len(loaded_weights))
for i, (saved, loaded) in enumerate(
zip(final_saved_weights, loaded_weights)
):
self.assertAllClose(saved, loaded, msg=f"Weight {i} mismatch")
# Verify optimizer variables
for i, (saved, loaded) in enumerate(
zip(model.optimizer.variables, loaded_model.optimizer.variables)
):
self.assertAllClose(saved, loaded, msg=f"Weight {i} mismatch")
@parameterized.parameters(
{"save_on_background": False},
{"save_on_background": True},
)
@pytest.mark.requires_trainable_backend
def test_checkpoint_with_assets(self, save_on_background):
"""Test checkpoint saving/loading with layers that have assets.
Tests that models with preprocessing layers that have vocab assets
can be saved and loaded correctly through Orbax checkpoints.
Passing a vocabulary *file path* (not an inline list) to
StringLookup causes the vocabulary to be stored via
save_assets / load_assets rather than inlined in get_config.
This test verifies the Orbax round-trip for that code path.
"""
# Write a vocabulary file so StringLookup stores it as an asset
# (inline lists are serialized in get_config, not via assets).
vocab_dir = self.get_temp_dir()
vocab_file = os.path.join(vocab_dir, "vocab.txt")
vocab_words = ["cat", "dog", "bird", "fish"]
with open(vocab_file, "w") as f:
f.write("\n".join(vocab_words))
string_lookup = layers.StringLookup(
vocabulary=vocab_file,
output_mode="int",
name="string_lookup_layer",
)
inputs = layers.Input(shape=(1,), dtype="string")
x = string_lookup(inputs)
outputs = layers.Embedding(input_dim=10, output_dim=8)(x)
model = models.Model(inputs, outputs, name="model_with_assets")
model.compile(optimizer="adam", loss="mse")
original_vocab = string_lookup.get_vocabulary()
# Save through OrbaxCheckpoint (the actual Orbax path)
checkpoint_dir = self.get_temp_dir()
callback = OrbaxCheckpoint(
directory=checkpoint_dir,
save_freq="epoch",
save_on_background=save_on_background,
save_weights_only=False,
)
# We can't easily train with string inputs, so invoke the
# save path directly.
callback.set_model(model)
callback._save_checkpoint(step=0)
callback.checkpointer.close()
# Load the model back through the Orbax load path
loaded_model = saving.load_model(checkpoint_dir)
# Verify model structure
self.assertEqual(model.name, loaded_model.name)
self.assertEqual(len(model.layers), len(loaded_model.layers))
# Verify vocabulary (assets) was restored correctly
loaded_string_lookup = loaded_model.get_layer("string_lookup_layer")
loaded_vocab = loaded_string_lookup.get_vocabulary()
self.assertEqual(original_vocab, loaded_vocab)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/callbacks/orbax_checkpoint_test.py",
"license": "Apache License 2.0",
"lines": 491,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/utils/progbar_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import testing
from keras.src.utils import progbar
class ProgbarTest(testing.TestCase):
@parameterized.named_parameters(
[
("float", "float"),
("np", "np"),
("list", "list"),
]
)
def test_update(self, value_type):
if value_type == "float":
values = 1.0
elif value_type == "np":
values = np.array(1.0)
elif value_type == "list":
values = [0.0, 1.0, 2.0]
else:
raise ValueError("Unknown value_type")
pb = progbar.Progbar(target=1, verbose=1)
pb.update(1, values=[("values", values)], finalize=True)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/utils/progbar_test.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/utils/image_utils_test.py | import os
import numpy as np
from absl.testing import parameterized
from keras.src import testing
from keras.src.utils import img_to_array
from keras.src.utils import load_img
from keras.src.utils import save_img
class SaveImgTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("rgb_explicit_format", (50, 50, 3), "rgb.jpg", "jpg", True),
("rgba_explicit_format", (50, 50, 4), "rgba.jpg", "jpg", True),
("rgb_inferred_format", (50, 50, 3), "rgb_inferred.jpg", None, False),
("rgba_inferred_format", (50, 50, 4), "rgba_inferred.jpg", None, False),
)
def test_save_jpg(self, shape, name, file_format, use_explicit_format):
tmp_dir = self.get_temp_dir()
path = os.path.join(tmp_dir, name)
img = np.random.randint(0, 256, size=shape, dtype=np.uint8)
# Test the actual inferred case - don't pass file_format at all
if use_explicit_format:
save_img(path, img, file_format=file_format)
else:
save_img(path, img) # Let it infer from path
self.assertTrue(os.path.exists(path))
# Verify saved image is correctly converted to RGB if needed
loaded_img = load_img(path)
loaded_array = img_to_array(loaded_img)
self.assertEqual(loaded_array.shape, (50, 50, 3))
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/utils/image_utils_test.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/distillation/distillation_loss.py | import keras
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.saving import serialization_lib
from keras.src.utils import tracking
def _convert_loss_to_function(loss_item):
"""Convert a loss string identifier to a loss function.
Arguments:
loss_item: Either a string identifier, a loss function instance,
or `None`.
Returns:
A loss function instance, or `None`.
Raises:
ValueError: If the loss string identifier is unknown.
"""
if loss_item is None:
return None
elif isinstance(loss_item, str):
loss_fn = keras.losses.get(loss_item)
if loss_fn is None:
raise ValueError(f"Unknown loss function: '{loss_item}'.")
return loss_fn
else:
return loss_item
@keras_export("keras.distillation.DistillationLoss")
class DistillationLoss:
"""Base class for distillation loss computation.
Distillation losses define how to compute the distillation loss
between teacher and student outputs. Each loss implements a specific
approach to knowledge transfer, from simple logits matching to feature-based
distillation.
To create custom distillation losses, subclass this class and
override the `compute_loss` method.
"""
def compute_loss(self, teacher_outputs, student_outputs, **kwargs):
"""Compute distillation loss between teacher and student outputs.
This method should implement the specific distillation logic for
transferring knowledge from teacher to student.
Arguments:
teacher_outputs: Outputs from the teacher model. Can be a single
tensor or a list/tuple of tensors for multi-output models.
student_outputs: Outputs from the student model. Can be a single
tensor or a list/tuple of tensors for multi-output models.
**kwargs: Additional arguments for custom distillation_loss.
Returns:
Distillation loss tensor.
"""
raise NotImplementedError("Subclasses must implement compute_loss")
def validate_outputs(self, teacher_outputs, student_outputs):
"""Validate that teacher and student outputs are compatible.
Arguments:
teacher_outputs: Outputs from the teacher model.
student_outputs: Outputs from the student model.
Raises:
ValueError: If outputs are not compatible.
"""
keras.tree.assert_same_structure(teacher_outputs, student_outputs)
def validate_model_compatibility(self, teacher, student):
"""Validate that teacher and student models are compatible.
Arguments:
teacher: The teacher model.
student: The student model.
Raises:
ValueError: If models are not compatible with this distillation
loss.
"""
pass
@keras_export("keras.distillation.FeatureDistillation")
class FeatureDistillation(DistillationLoss):
"""Feature distillation loss.
Feature distillation transfers knowledge from intermediate layers of the
teacher model to corresponding layers of the student model. This approach
helps the student learn better internal representations and often leads
to better performance compared to logits-only distillation.
Arguments:
loss: Loss function to use for feature distillation. Can be:
- String identifier (e.g., 'mse', 'cosine_similarity', 'mae')
- Keras loss instance
- Nested structure of losses matching the layer output structure
- `None` to skip distillation for that output (useful for
multi-output models where you only want to distill some outputs)
At least one loss must be non-`None`. Defaults to 'mse'.
teacher_layer_name: Name of the teacher layer to extract features from.
If `None`, uses the final output. Defaults to `None`.
student_layer_name: Name of the student layer to extract features from.
If `None`, uses the final output. Defaults to `None`.
Examlpe(s):
```python
# Basic feature distillation from final outputs
distillation_loss = FeatureDistillation(loss="mse")
# Distill from specific intermediate layers
distillation_loss = FeatureDistillation(
loss="mse",
teacher_layer_name="dense_1",
student_layer_name="dense_1"
)
# Use cosine similarity for different feature sizes
distillation_loss = FeatureDistillation(
loss="cosine_similarity",
teacher_layer_name="conv2d_2",
student_layer_name="conv2d_1"
)
# With custom loss instance
distillation_loss = FeatureDistillation(
loss=keras.losses.MeanAbsoluteError()
)
# For multi-output models
distillation_loss = FeatureDistillation(
loss=["mse", "cosine_similarity"]
)
# For multi-output models, only distill some outputs
distillation_loss = FeatureDistillation(
loss=["mse", None, "cosine_similarity"] # Skip middle output
)
```
"""
@tracking.no_automatic_dependency_tracking
def __init__(
self, loss="mse", teacher_layer_name=None, student_layer_name=None
):
self.teacher_layer_name = teacher_layer_name
self.student_layer_name = student_layer_name
self.loss = tree.map_structure(_convert_loss_to_function, loss)
flat_losses = tree.flatten(self.loss)
if all(l is None for l in flat_losses):
raise ValueError(
"The `loss` argument in `FeatureDistillation` must "
"contain at least one non-`None` value."
)
def validate_model_compatibility(self, teacher, student):
"""Validate that teacher and student models are compatible for feature
distillation."""
if (
self.teacher_layer_name is not None
or self.student_layer_name is not None
):
teacher_is_subclassed = (
not hasattr(teacher, "inputs") or teacher.inputs is None
)
student_is_subclassed = (
not hasattr(student, "inputs") or student.inputs is None
)
if teacher_is_subclassed or student_is_subclassed:
subclassed_models = []
if teacher_is_subclassed:
subclassed_models.append("teacher")
if student_is_subclassed:
subclassed_models.append("student")
models_str = " and ".join(subclassed_models)
raise ValueError(
f"FeatureDistillation with specific layer names requires "
f"Functional or Sequential models. The {models_str} "
f"model(s) appear to be subclassed (no symbolic "
f"inputs/outputs). Either use Functional/Sequential "
f"models, or use FeatureDistillation without layer names "
f"(to distill final outputs only), or use "
f"LogitsDistillation instead."
)
if self.teacher_layer_name is not None:
try:
teacher.get_layer(name=self.teacher_layer_name)
except ValueError as e:
raise ValueError(f"In teacher model: {e}")
if self.student_layer_name is not None:
try:
student.get_layer(name=self.student_layer_name)
except ValueError as e:
raise ValueError(f"In student model: {e}")
def validate_outputs(self, teacher_outputs, student_outputs):
"""Validate that outputs are compatible for feature distillation."""
super().validate_outputs(teacher_outputs, student_outputs)
try:
tree.assert_same_structure(self.loss, teacher_outputs)
except ValueError as e:
raise ValueError(
f"Loss structure mismatch. "
f"Loss structure: {tree.structure(self.loss)}, "
f"Output structure: {tree.structure(teacher_outputs)}. "
f"Error: {e}"
)
def compute_loss(self, teacher_outputs, student_outputs, **kwargs):
"""Compute feature distillation loss using extracted features.
Arguments:
teacher_outputs: Extracted features from teacher layer.
student_outputs: Extracted features from student layer.
**kwargs: Additional arguments (ignored).
Returns:
Scalar distillation loss tensor.
"""
def apply_loss(loss_fn, teacher_features, student_features):
if loss_fn is None:
return 0.0
loss = keras.ops.mean(loss_fn(teacher_features, student_features))
return loss
loss_values = tree.map_structure(
apply_loss, self.loss, teacher_outputs, student_outputs
)
flat_losses = tree.flatten(loss_values)
return keras.ops.sum(keras.ops.stack(flat_losses))
def get_config(self):
"""Get configuration for serialization."""
return {
"loss": keras.losses.serialize(self.loss),
"teacher_layer_name": self.teacher_layer_name,
"student_layer_name": self.student_layer_name,
}
@classmethod
def from_config(cls, config):
"""Create instance from configuration."""
config = config.copy()
config["loss"] = keras.losses.deserialize(config["loss"])
return cls(**config)
@keras_export("keras.distillation.LogitsDistillation")
class LogitsDistillation(DistillationLoss):
"""Distillation loss that transfers knowledge from final model outputs.
This distillation loss applies temperature scaling to the teacher's logits
before computing the loss between teacher and student predictions. It's the
most common approach for knowledge distillation.
Arguments:
temperature: Temperature for softmax scaling. Higher values produce
softer probability distributions that are easier for the student to
learn. Typical values range from 3-5. Defaults to 3.0.
loss: Loss function to use for distillation. Can be:
- String identifier (e.g., 'kl_divergence',
'categorical_crossentropy')
- Keras loss instance
- Nested structure of losses matching the model output structure
- `None` to skip distillation for that output (useful for
multi-output models where you only want to distill some outputs)
At least one loss must be non-`None`. Defaults to 'kl_divergence'.
Examlpe(s):
```python
# Basic logits distillation with KL divergence
distillation_loss = LogitsDistillation(temperature=3.0)
# With categorical crossentropy loss
distillation_loss = LogitsDistillation(
temperature=4.0,
loss="categorical_crossentropy"
)
# With custom loss instance
distillation_loss = LogitsDistillation(
temperature=4.0,
loss=keras.losses.CategoricalCrossentropy(from_logits=True)
)
# For multi-output models
distillation_loss = LogitsDistillation(
temperature=3.0,
loss=["kl_divergence", "categorical_crossentropy"]
)
# For multi-output models, only distill some outputs
distillation_loss = LogitsDistillation(
temperature=3.0,
loss=["kl_divergence", None] # Skip second output
)
```
"""
@tracking.no_automatic_dependency_tracking
def __init__(
self,
temperature=3.0,
loss="kl_divergence",
):
self.temperature = temperature
self.loss = tree.map_structure(_convert_loss_to_function, loss)
flat_losses = tree.flatten(self.loss)
if all(l is None for l in flat_losses):
raise ValueError("At least one loss must be non-`None`.")
if not isinstance(self.temperature, (int, float)):
raise ValueError(
f"temperature must be a number, got {type(self.temperature)}"
)
if self.temperature <= 0.0:
raise ValueError("temperature must be positive.")
def compute_loss(self, teacher_outputs, student_outputs, **kwargs):
"""Compute distillation loss using the configured loss function.
Arguments:
teacher_outputs: Logits from teacher model. Can be a single tensor,
list/tuple of tensors, or dict of tensors.
student_outputs: Logits from student model. Can be a single tensor,
list/tuple of tensors, or dict of tensors.
**kwargs: Additional arguments (ignored).
Returns:
Distillation loss tensor.
"""
# Apply temperature scaling using tree.map_structure
teacher_scaled = tree.map_structure(
lambda x: keras.ops.divide(x, self.temperature), teacher_outputs
)
student_scaled = tree.map_structure(
lambda x: keras.ops.divide(x, self.temperature), student_outputs
)
# Apply loss function(s) to corresponding outputs
def apply_loss(loss_fn, teacher_logits, student_logits):
if loss_fn is None:
return 0.0
# Special handling for KL divergence (needs probabilities)
if isinstance(loss_fn, keras.losses.KLDivergence):
teacher_probs = keras.ops.softmax(teacher_logits, axis=-1)
student_probs = keras.ops.softmax(student_logits, axis=-1)
loss = keras.ops.mean(loss_fn(teacher_probs, student_probs))
# Scale by temperature^2 for KL (per literature)
return loss * (self.temperature**2)
else:
# For other losses, use logits directly
return keras.ops.mean(loss_fn(teacher_logits, student_logits))
# Apply losses using tree.map_structure
loss_values = tree.map_structure(
apply_loss, self.loss, teacher_scaled, student_scaled
)
# Sum all losses and return scalar
flat_losses = tree.flatten(loss_values)
return keras.ops.sum(keras.ops.stack(flat_losses))
def get_config(self):
"""Get configuration for serialization."""
return {
"temperature": self.temperature,
"loss": serialization_lib.serialize_keras_object(self.loss),
}
@classmethod
def from_config(cls, config):
"""Create instance from configuration."""
config = config.copy()
config["loss"] = keras.losses.deserialize(config["loss"])
return cls(**config)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/distillation/distillation_loss.py",
"license": "Apache License 2.0",
"lines": 324,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/distillation/distillation_loss_test.py | import numpy as np
import pytest
import keras
from keras.src.distillation.distillation_loss import FeatureDistillation
from keras.src.distillation.distillation_loss import LogitsDistillation
from keras.src.distillation.distiller import Distiller
from keras.src.testing import TestCase
@pytest.mark.requires_trainable_backend
class TestLogitsDistillation(TestCase):
"""Test cases for LogitsDistillation distillation_loss."""
def test_logits_distillation_basic(self):
"""Test basic logits distillation structure validation."""
# Create dummy logits
teacher_logits = keras.ops.convert_to_tensor(
np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), dtype="float32"
)
student_logits = keras.ops.convert_to_tensor(
np.array([[2.0, 1.0, 4.0], [3.0, 6.0, 2.0]]), dtype="float32"
)
distillation_loss = LogitsDistillation(temperature=3.0)
distillation_loss.validate_outputs(teacher_logits, student_logits)
incompatible_logits = {"output": teacher_logits}
with self.assertRaises(ValueError):
distillation_loss.validate_outputs(
teacher_logits, incompatible_logits
)
@pytest.mark.requires_trainable_backend
class TestFeatureDistillation(TestCase):
"""Test cases for FeatureDistillation distillation_loss."""
def test_feature_distillation_basic(self):
"""Test basic feature distillation structure validation."""
# Create dummy features
teacher_features = keras.ops.convert_to_tensor(
np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), dtype="float32"
)
student_features = keras.ops.convert_to_tensor(
np.array([[1.1, 2.1, 3.1], [4.1, 5.1, 6.1]]), dtype="float32"
)
distillation_loss = FeatureDistillation(loss="mse")
distillation_loss.validate_outputs(teacher_features, student_features)
incompatible_features = [teacher_features, teacher_features]
with self.assertRaises(ValueError):
distillation_loss.validate_outputs(
teacher_features, incompatible_features
)
@pytest.mark.requires_trainable_backend
class TestEndToEndDistillation(TestCase):
"""End-to-end distillation tests with real models."""
def setUp(self):
"""Set up models and test data for all tests."""
super().setUp()
# Create teacher model
self.teacher = keras.Sequential(
[
keras.layers.Dense(
32, activation="relu", name="teacher_dense_1"
),
keras.layers.Dense(
16, activation="relu", name="teacher_dense_2"
),
keras.layers.Dense(10, name="teacher_output"),
]
)
# Create student model
self.student = keras.Sequential(
[
keras.layers.Dense(
32, activation="relu", name="student_dense_1"
),
keras.layers.Dense(
16, activation="relu", name="student_dense_2"
),
keras.layers.Dense(10, name="student_output"),
]
)
self.x = np.random.random((32, 20)).astype(np.float32)
self.y = np.random.randint(0, 10, (32,)).astype(np.int32)
self.teacher(self.x[:2])
self.student(self.x[:2])
def test_logits_distillation_end_to_end(self):
"""Test end-to-end logits distillation with real models."""
# Create distiller
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=LogitsDistillation(temperature=3.0),
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test training
history = distiller.fit(self.x, self.y, epochs=2, verbose=0)
# Verify training completed
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Verify loss values are reasonable
final_loss = history.history["total_loss"][-1]
self.assertTrue(np.isfinite(final_loss))
self.assertGreater(final_loss, 0.0)
# Test prediction
predictions = distiller.predict(self.x[:5], verbose=0)
self.assertEqual(predictions.shape, (5, 10))
# Test student model access
student_model = distiller.student
self.assertIsInstance(student_model, keras.Model)
def test_feature_distillation_end_to_end(self):
"""Test end-to-end feature distillation with real models."""
# Create distiller with feature distillation
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=FeatureDistillation(
loss="mse",
teacher_layer_name="teacher_dense_1",
student_layer_name="student_dense_1",
),
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test training
history = distiller.fit(self.x, self.y, epochs=2, verbose=0)
# Verify training completed
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Verify feature extraction worked
self.assertIsNotNone(distiller._teacher_feature_extractor)
self.assertIsNotNone(distiller._student_feature_extractor)
# Test that feature extractors have correct outputs
self.assertEqual(
len(distiller._teacher_feature_extractor.outputs), 2
) # final + dense_1
self.assertEqual(
len(distiller._student_feature_extractor.outputs), 2
) # final + dense_1
def test_multi_distillation_loss_distillation_end_to_end(self):
"""Test end-to-end distillation with multiple distillation_loss."""
# Create multiple distillation_loss
distillation_loss = [
LogitsDistillation(temperature=3.0),
FeatureDistillation(
loss="mse",
teacher_layer_name="teacher_dense_1",
student_layer_name="student_dense_1",
),
FeatureDistillation(
loss="mse",
teacher_layer_name="teacher_dense_2",
student_layer_name="student_dense_2",
),
]
# Create distiller
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
distillation_loss_weights=[1.0, 0.5, 0.3],
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test training
history = distiller.fit(self.x, self.y, epochs=2, verbose=0)
# Verify training completed
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Verify efficient feature extraction
self.assertIsNotNone(distiller._teacher_feature_extractor)
self.assertIsNotNone(distiller._student_feature_extractor)
# Should have 3 outputs: final + dense_1 + dense_2
self.assertEqual(len(distiller._teacher_feature_extractor.outputs), 3)
self.assertEqual(len(distiller._student_feature_extractor.outputs), 3)
# Test that loss decreases (learning is happening)
initial_loss = history.history["total_loss"][0]
final_loss = history.history["total_loss"][-1]
self.assertTrue(np.isfinite(initial_loss))
self.assertTrue(np.isfinite(final_loss))
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/distillation/distillation_loss_test.py",
"license": "Apache License 2.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/distillation/distiller.py | import keras
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.distillation.distillation_loss import _convert_loss_to_function
from keras.src.models.model import Model
from keras.src.saving import serialization_lib
@keras_export("keras.distillation.Distiller")
class Distiller(Model):
"""Distillation model for transferring knowledge from teacher to student.
Knowledge distillation transfers knowledge from a large, complex model
(teacher) to a smaller, simpler model (student). The student learns
from both ground truth labels and the teacher's predictions, often
achieving better performance than training on labels alone.
Arguments:
teacher: A trained `keras.Model` that serves as the knowledge source.
The teacher model is frozen during distillation.
student: A `keras.Model` to be trained through distillation.
distillation_losses: List of distillation losses to apply. Can be a
single distillation loss or a list of distillation losses like
`keras.distillation.LogitsDistillation`,
`keras.distillation.FeatureDistillation`, or custom distillation
losses.
distillation_loss_weights: List of weights for each distillation loss.
Must have the same length as `distillation_losses`. If `None`,
equal weights are used.
student_loss_weight: Weight for the student's supervised loss component.
Must be between 0 and 1. Defaults to 0.5.
name: Name for the distiller model. Defaults to `"distiller"`.
**kwargs: Additional keyword arguments passed to the parent `Model`
class.
Attributes:
student: The student model being trained. Access this to get the trained
student model for independent use after distillation training.
teacher: The teacher model providing knowledge. This model is frozen
during training.
Examples:
```python
# Basic distillation with KerasHub models
import keras_hub as hub
teacher = hub.models.CausalLM.from_preset("gemma_2b_en")
student = hub.models.CausalLM.from_preset(
"gemma_1.1_2b_en", load_weights=False
)
# Single distillation loss
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=LogitsDistillation(temperature=3.0),
)
# Compile the distiller (like any Keras model)
distiller.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
# Train the distiller
distiller.fit(x_train, y_train, epochs=10)
# Access the trained student model
trained_student = distiller.student
# Multiple distillation losses
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=[
LogitsDistillation(temperature=3.0),
FeatureDistillation(
teacher_layer_name="dense_1",
student_layer_name="dense_1"
)
],
distillation_loss_weights=[1.0, 0.5],
)
# Compile with custom settings
distiller.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
```
"""
def __init__(
self,
teacher,
student,
distillation_losses,
distillation_loss_weights=None,
student_loss_weight=0.5,
name="distiller",
**kwargs,
):
super().__init__(name=name, **kwargs)
# Validate inputs
self._validate_models(teacher, student)
# Store configuration
self.teacher = teacher
self.student = student
# Validate student_loss_weight
if not isinstance(student_loss_weight, (int, float)):
raise ValueError(
f"student_loss_weight must be a number, got "
f"{type(student_loss_weight)}"
)
if student_loss_weight < 0.0 or student_loss_weight > 1.0:
raise ValueError(
f"student_loss_weight must be between 0.0 and 1.0, "
f"got {student_loss_weight}"
)
self.student_loss_weight = student_loss_weight
# Handle distillation losses configuration
if distillation_losses is None:
raise ValueError(
"'distillation_losses' cannot be `None`. Provide a "
"distillation loss (e.g., LogitsDistillation or "
"FeatureDistillation) or a list of distillation losses."
)
# Convert single distillation loss to list for uniform handling
if not isinstance(distillation_losses, (list, tuple)):
self.distillation_losses = [distillation_losses]
self.distillation_loss_weights = [1.0]
else:
self.distillation_losses = distillation_losses
# Set default weights if not provided
if distillation_loss_weights is None:
self.distillation_loss_weights = [1.0] * len(
distillation_losses
)
else:
if len(distillation_loss_weights) != len(distillation_losses):
raise ValueError(
f"Number of distillation_loss_weights "
f"({len(distillation_loss_weights)}) must match "
f"number of distillation_losses "
f"({len(distillation_losses)})"
)
self.distillation_loss_weights = distillation_loss_weights
# Validate distillation loss compatibility and create extractors
for distillation_loss in self.distillation_losses:
self._validate_distillation_loss_compatibility(
teacher, student, distillation_loss
)
self._create_multi_feature_extractors()
# Freeze teacher model
self.teacher.trainable = False
# Initialize loss tracking metrics
self.student_loss_tracker = keras.metrics.Mean(name="student_loss")
self.distillation_loss_tracker = keras.metrics.Mean(
name="distillation_loss"
)
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
def _validate_models(self, teacher, student):
"""Validate that teacher and student models are compatible."""
if not isinstance(teacher, keras.Model):
raise ValueError(
f"Teacher must be a keras.Model, got {type(teacher)}"
)
if not isinstance(student, keras.Model):
raise ValueError(
f"Student must be a keras.Model, got {type(student)}"
)
self._validate_input_compatibility(teacher, student)
self._validate_output_compatibility(teacher, student)
self._validate_dtype_compatibility(teacher, student)
def _assert_shapes_are_compatible(self, shape1, shape2, context):
"""Assert that two shapes are compatible."""
if len(shape1) != len(shape2):
raise ValueError(
f"Teacher and student {context} shapes have different "
f"dimensions. Teacher: {shape1}, Student: {shape2}."
)
for dim1, dim2 in zip(shape1, shape2):
if dim1 is not None and dim2 is not None and dim1 != dim2:
raise ValueError(
f"Teacher and student {context} shapes are incompatible. "
f"Teacher: {shape1}, Student: {shape2}. "
f"All dimensions must match."
)
def _assert_same_dtype(self, teacher_dtype, student_dtype, context):
"""Assert that teacher and student dtypes are the same."""
if teacher_dtype != student_dtype:
raise ValueError(
f"Teacher and student {context} dtypes must match. "
f"Teacher: {teacher_dtype}, Student: {student_dtype}."
)
def _validate_input_compatibility(self, teacher, student):
"""Validate that teacher and student have compatible input shapes."""
if not hasattr(teacher, "inputs") or not hasattr(student, "inputs"):
return
teacher_inputs = getattr(teacher, "inputs")
student_inputs = getattr(student, "inputs")
if teacher_inputs is None or student_inputs is None:
return
tree.map_structure(
lambda ti, si: self._assert_shapes_are_compatible(
ti.shape, si.shape, "input"
),
teacher_inputs,
student_inputs,
)
def _validate_output_compatibility(self, teacher, student):
"""Validate that teacher and student have compatible output shapes."""
if not hasattr(teacher, "outputs") or not hasattr(student, "outputs"):
return
teacher_outputs = getattr(teacher, "outputs")
student_outputs = getattr(student, "outputs")
if teacher_outputs is None or student_outputs is None:
return
tree.map_structure(
lambda to, so: self._assert_shapes_are_compatible(
to.shape, so.shape, "output"
),
teacher_outputs,
student_outputs,
)
def _validate_dtype_compatibility(self, teacher, student):
"""Validate that teacher and student have compatible data types."""
if not hasattr(teacher, "inputs") or not hasattr(student, "inputs"):
return
if teacher.inputs is None or student.inputs is None:
return
tree.map_structure(
lambda ti, si: self._assert_same_dtype(ti.dtype, si.dtype, "input"),
teacher.inputs,
student.inputs,
)
if not hasattr(teacher, "outputs") or not hasattr(student, "outputs"):
return
if teacher.outputs is None or student.outputs is None:
return
tree.map_structure(
lambda to, so: self._assert_same_dtype(
to.dtype, so.dtype, "output"
),
teacher.outputs,
student.outputs,
)
def _validate_distillation_loss_compatibility(
self, teacher, student, distillation_loss
):
"""Validate that the distillation loss is compatible with teacher
and student models."""
distillation_loss.validate_model_compatibility(teacher, student)
def _create_multi_feature_extractors(self):
"""Create feature extractors for efficient multi-layer extraction."""
teacher_layer_names = []
student_layer_names = []
for distillation_loss in self.distillation_losses:
if (
hasattr(distillation_loss, "teacher_layer_name")
and distillation_loss.teacher_layer_name
):
if (
distillation_loss.teacher_layer_name
not in teacher_layer_names
):
teacher_layer_names.append(
distillation_loss.teacher_layer_name
)
if (
hasattr(distillation_loss, "student_layer_name")
and distillation_loss.student_layer_name
):
if (
distillation_loss.student_layer_name
not in student_layer_names
):
student_layer_names.append(
distillation_loss.student_layer_name
)
self._teacher_feature_extractor = self._create_feature_extractor(
self.teacher, teacher_layer_names
)
self._student_feature_extractor = self._create_feature_extractor(
self.student, student_layer_names
)
def _create_feature_extractor(self, model, layer_names):
"""Create a feature extractor for a model.
Arguments:
model: The model to create an extractor for.
layer_names: List of layer names to extract features from.
Returns:
Feature extractor model or `None` if no layer names provided.
Raises:
ValueError: If model has no symbolic inputs/outputs.
"""
if not layer_names:
return None
if not hasattr(model, "inputs") or model.inputs is None:
raise ValueError(
f"Cannot create feature extractor for {model.name}. "
f"The model has no symbolic inputs attribute."
)
if isinstance(model, keras.Sequential):
final_output = model.layers[-1].output
else:
final_output = model.output
outputs = {"final_output": final_output}
for layer_name in layer_names:
layer = model.get_layer(name=layer_name)
outputs[layer_name] = layer.output
return keras.Model(
inputs=model.inputs,
outputs=outputs,
name=f"{model.name}_multi_feature_extractor",
)
def _extract_all_teacher_features(self, x):
"""Extract all teacher features in a single forward pass."""
if self._teacher_feature_extractor is not None:
return self._teacher_feature_extractor(x, training=False)
else:
return {"final_output": self.teacher(x, training=False)}
def _extract_all_student_features(self, x, y_pred):
"""Extract all student features in a single forward pass."""
if self._student_feature_extractor is not None:
return self._student_feature_extractor(x, training=True)
else:
return {"final_output": y_pred}
def _get_distillation_loss_features(
self, distillation_loss, all_features, is_teacher
):
"""Get the specific features needed by a distillation loss."""
if is_teacher:
layer_name = distillation_loss.teacher_layer_name or "final_output"
else:
layer_name = distillation_loss.student_layer_name or "final_output"
if layer_name not in all_features:
raise ValueError(
f"Layer '{layer_name}' not found in extracted features. "
f"Available: {list(all_features.keys())}"
)
return all_features[layer_name]
def compile(self, optimizer="adam", loss=None, metrics=None, **kwargs):
"""Compile the distiller with proper integration.
Arguments:
optimizer: Optimizer for training the student model.
loss: Student loss function for the student's supervised learning.
Can be a string identifier or a loss function instance.
metrics: Additional metrics to track during training.
**kwargs: Additional arguments passed to parent compile.
"""
if loss is None:
raise ValueError("'loss' cannot be `None`.")
self._student_loss = tree.map_structure(_convert_loss_to_function, loss)
self._student_loss_for_serialization = loss
if metrics is not None and not isinstance(metrics, (list, tuple)):
raise ValueError(
f"metrics must be a list or tuple, got {type(metrics)}"
)
super().compile(
optimizer=optimizer,
loss=None,
metrics=metrics,
**kwargs,
)
def call(self, inputs, training=None, **kwargs):
"""Forward pass returns student predictions."""
return self.student(inputs, training=training, **kwargs)
def compute_loss(
self, x=None, y=None, y_pred=None, sample_weight=None, training=True
):
"""Compute combined distillation loss.
Arguments:
x: Input data.
y: Target data.
y_pred: Model predictions.
sample_weight: Sample weights (currently unused).
training: Whether the model is in training mode.
Returns:
Combined loss tensor.
"""
# Handle case where y_pred is not provided
if y_pred is None:
y_pred = self(x, training=training)
# Compute student loss
student_loss = 0.0
if self.student_loss_weight > 0.0 and y is not None:
loss_values = tree.map_structure(
lambda l, o, o_pred: l(o, o_pred),
self._student_loss,
y,
y_pred,
)
flat_losses = tree.flatten(loss_values)
student_loss = (
keras.ops.sum(keras.ops.stack(flat_losses))
if len(flat_losses) > 1
else flat_losses[0]
)
# Ensure student_loss is a scalar
if hasattr(student_loss, "shape") and len(student_loss.shape) > 0:
student_loss = keras.ops.mean(student_loss)
# Compute distillation loss
distillation_loss = 0.0
if self.student_loss_weight < 1.0:
teacher_features = self._extract_all_teacher_features(x)
student_features = self._extract_all_student_features(x, y_pred)
# Apply distillation losses using pre-extracted features
for distillation_loss_fn, weight in zip(
self.distillation_losses, self.distillation_loss_weights
):
# Get appropriate outputs/features for this distillation loss
if (
hasattr(distillation_loss_fn, "teacher_layer_name")
and distillation_loss_fn.teacher_layer_name is not None
):
# FeatureDistillation with specific layers
try:
distillation_loss_teacher_output = (
self._get_distillation_loss_features(
distillation_loss_fn,
teacher_features,
is_teacher=True,
)
)
distillation_loss_student_output = (
self._get_distillation_loss_features(
distillation_loss_fn,
student_features,
is_teacher=False,
)
)
except ValueError as e:
# Re-raise with context about which loss failed
raise RuntimeError(
f"Failed to extract features for "
f"{type(distillation_loss_fn).__name__} "
f"targeting teacher layer "
f"'{distillation_loss_fn.teacher_layer_name}' "
f"and student layer "
f"'{distillation_loss_fn.student_layer_name}'. "
f"Original error: {e}"
) from e
else:
# LogitsDistillation or FeatureDistillation (final outputs)
distillation_loss_teacher_output = teacher_features[
"final_output"
]
distillation_loss_student_output = y_pred
# Validate outputs are compatible for this distillation loss
distillation_loss_fn.validate_outputs(
distillation_loss_teacher_output,
distillation_loss_student_output,
)
# Compute loss for this distillation loss
current_distillation_loss = distillation_loss_fn.compute_loss(
distillation_loss_teacher_output,
distillation_loss_student_output,
)
# Validate that distillation loss returns a scalar
if (
hasattr(current_distillation_loss, "shape")
and len(current_distillation_loss.shape) > 0
):
raise ValueError(
f"Distillation loss "
f"{distillation_loss_fn.__class__.__name__} "
f"returned a non-scalar loss with shape "
f"{current_distillation_loss.shape}. "
f"The compute_loss method must return a scalar "
f"tensor."
)
# Apply weight and add to total
distillation_loss = keras.ops.add(
distillation_loss,
keras.ops.multiply(weight, current_distillation_loss),
)
# Combine losses
total_loss = keras.ops.add(
keras.ops.multiply(self.student_loss_weight, student_loss),
keras.ops.multiply(
keras.ops.subtract(1.0, self.student_loss_weight),
distillation_loss,
),
)
# Update metrics
self.student_loss_tracker.update_state(student_loss)
self.distillation_loss_tracker.update_state(distillation_loss)
self.total_loss_tracker.update_state(total_loss)
return total_loss
def reset_metrics(self):
"""Reset all metrics."""
super().reset_metrics()
self.student_loss_tracker.reset_state()
self.distillation_loss_tracker.reset_state()
self.total_loss_tracker.reset_state()
def get_config(self):
"""Get configuration for serialization."""
config = super().get_config()
config.update(
{
"teacher": serialization_lib.serialize_keras_object(
self.teacher
),
"student": serialization_lib.serialize_keras_object(
self.student
),
"distillation_losses": [
serialization_lib.serialize_keras_object(distillation_loss)
for distillation_loss in self.distillation_losses
],
"distillation_loss_weights": self.distillation_loss_weights,
"student_loss_weight": self.student_loss_weight,
}
)
return config
@classmethod
def from_config(cls, config):
"""Create instance from configuration."""
config = config.copy()
# Deserialize objects
config["teacher"] = serialization_lib.deserialize_keras_object(
config["teacher"]
)
config["student"] = serialization_lib.deserialize_keras_object(
config["student"]
)
config["distillation_losses"] = [
serialization_lib.deserialize_keras_object(distillation_loss)
for distillation_loss in config["distillation_losses"]
]
return cls(**config)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/distillation/distiller.py",
"license": "Apache License 2.0",
"lines": 520,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/distillation/distiller_test.py | import json
import os
import numpy as np
import pytest
import keras
from keras.src.distillation.distillation_loss import LogitsDistillation
from keras.src.distillation.distiller import Distiller
from keras.src.testing import TestCase
class SimpleTeacher(keras.Model):
"""Simple teacher model for testing."""
def __init__(self, vocab_size=10, hidden_dim=32):
super().__init__()
self.dense1 = keras.layers.Dense(hidden_dim, activation="relu")
self.dense2 = keras.layers.Dense(vocab_size)
def call(self, inputs, training=None):
x = self.dense1(inputs)
return self.dense2(x)
class SimpleStudent(keras.Model):
"""Simple student model for testing."""
def __init__(self, vocab_size=10, hidden_dim=16):
super().__init__()
self.dense1 = keras.layers.Dense(hidden_dim, activation="relu")
self.dense2 = keras.layers.Dense(vocab_size)
def call(self, inputs, training=None):
x = self.dense1(inputs)
return self.dense2(x)
@pytest.mark.requires_trainable_backend
class TestDistiller(TestCase):
"""Essential test cases for the Distiller class."""
def setUp(self):
"""Set up test fixtures."""
super().setUp()
# Create test data
self.x = np.random.random((20, 5)).astype(np.float32)
self.y = np.random.randint(0, 10, (20,)).astype(np.int32)
# Create teacher and student models
self.teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
self.student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models
dummy_input = self.x[:2]
self.teacher(dummy_input)
self.student(dummy_input)
# Create distillation distillation_loss
self.distillation_loss = LogitsDistillation(temperature=2.0)
# Create distiller
self.distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Compile distiller
self.distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
def test_distiller_initialization(self):
"""Test Distiller initialization."""
# Check that teacher is frozen
self.assertFalse(self.teacher.trainable)
# Check that student is trainable
self.assertTrue(self.student.trainable)
# Check student_loss_weight
self.assertEqual(self.distiller.student_loss_weight, 0.5)
# Check distillation_loss (should be a list with one distillation_loss)
self.assertIsInstance(self.distiller.distillation_losses, list)
self.assertEqual(len(self.distiller.distillation_losses), 1)
self.assertIsInstance(
self.distiller.distillation_losses[0], LogitsDistillation
)
# Check that distillation_loss has the correct temperature
self.assertEqual(self.distiller.distillation_losses[0].temperature, 2.0)
# Check that model is compiled
self.assertIsNotNone(self.distiller.optimizer)
# Check if the model has been compiled (different backends may handle
# this differently)
self.assertTrue(
hasattr(self.distiller, "_compile_config")
or hasattr(self.distiller, "compiled_loss"),
"Model should be compiled",
)
def test_distiller_call(self):
"""Test Distiller call method (inference)."""
# Call should return student outputs
outputs = self.distiller(self.x)
# Check output shape
expected_shape = (20, 10) # batch_size, vocab_size
self.assertEqual(outputs.shape, expected_shape)
# Check that outputs are from student, not teacher
student_outputs = self.student(self.x)
self.assertAllClose(outputs, student_outputs)
def test_teacher_freezing(self):
"""Test that teacher is properly frozen."""
# Teacher should be frozen
self.assertFalse(self.teacher.trainable)
# Student should be trainable
self.assertTrue(self.student.trainable)
# Create a new teacher that is trainable and verify it gets frozen
new_teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
self.assertTrue(new_teacher.trainable) # Should be trainable initially
# Create distiller - should freeze the teacher
Distiller(
teacher=new_teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Teacher should now be frozen
self.assertFalse(new_teacher.trainable)
def test_model_compatibility_validation(self):
"""Test model compatibility validation."""
# Test with non-Keras objects
with self.assertRaises(ValueError):
Distiller(
teacher="not_a_model",
student=self.student,
distillation_losses=self.distillation_loss,
)
with self.assertRaises(ValueError):
Distiller(
teacher=self.teacher,
student="not_a_model",
distillation_losses=self.distillation_loss,
)
def test_multi_distillation_loss_functionality(self):
"""Test multi-distillation_loss functionality."""
# Create multiple distillation_loss
distillation_loss = [
LogitsDistillation(temperature=3.0),
LogitsDistillation(temperature=2.0),
]
distillation_loss_weights = [0.7, 0.3]
# Create distiller with multiple distillation_loss
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
distillation_loss_weights=distillation_loss_weights,
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test that distillation_loss are stored correctly
self.assertEqual(len(distiller.distillation_losses), 2)
self.assertEqual(distiller.distillation_loss_weights, [0.7, 0.3])
# Test training
x = np.random.random((10, 5)).astype(np.float32)
y = np.random.randint(0, 10, (10,))
history = distiller.fit(x, y, epochs=1, verbose=0)
# Check metrics
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
def test_multi_distillation_loss_validation(self):
"""Test multi-distillation_loss validation."""
distillation_loss = [
LogitsDistillation(temperature=3.0),
LogitsDistillation(temperature=2.0),
]
# Test that validation passes for valid configurations
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
student_loss_weight=0.5,
)
self.assertEqual(len(distiller.distillation_losses), 2)
# Test invalid distillation_loss weights length
with self.assertRaises(ValueError):
Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
distillation_loss_weights=[1.0], # Wrong length
student_loss_weight=0.5,
)
def test_student_loss_weighting(self):
"""Test student loss weighting functionality."""
# Test with student_loss_weight = 0.0 (only distillation loss)
distiller_0 = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.0,
)
# Test with student_loss_weight = 1.0 (only student loss)
distiller_1 = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=self.distillation_loss,
student_loss_weight=1.0,
)
# Compile both distillers
distiller_0.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
distiller_1.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test that they can be used for training without errors
small_x = self.x[:5]
small_y = self.y[:5]
# Both should train without errors
history_0 = distiller_0.fit(small_x, small_y, epochs=1, verbose=0)
history_1 = distiller_1.fit(small_x, small_y, epochs=1, verbose=0)
# Check that training completed
self.assertIn("total_loss", history_0.history)
self.assertIn("total_loss", history_1.history)
def test_full_training_workflow(self):
"""Test complete training workflow with model.fit() - MOST IMPORTANT."""
# Create larger dataset for training
np.random.seed(42)
x_train = np.random.random((100, 5)).astype(np.float32)
y_train = np.random.randint(0, 10, (100,)).astype(np.int32)
x_val = np.random.random((20, 5)).astype(np.float32)
y_val = np.random.randint(0, 10, (20,)).astype(np.int32)
# Create fresh models for training
teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models to avoid JAX tracer issues
dummy_input = x_train[:2]
teacher(dummy_input)
student(dummy_input)
# Create distiller
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Train the model
history = distiller.fit(
x_train,
y_train,
validation_data=(x_val, y_val),
epochs=3,
batch_size=16,
verbose=0,
)
# Check that training completed
self.assertIn("total_loss", history.history)
self.assertIn("val_total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Check that losses are finite
for loss_name in ["total_loss", "student_loss", "distillation_loss"]:
losses = history.history[loss_name]
self.assertGreater(len(losses), 0)
for loss in losses:
self.assertTrue(np.isfinite(loss))
# Check that the model can make predictions
predictions = distiller.predict(x_val[:5], verbose=0)
self.assertEqual(predictions.shape, (5, 10)) # batch_size, vocab_size
# Check that student weights have changed (indicating learning)
initial_weights = [w.numpy().copy() for w in student.trainable_weights]
# Train a bit more
distiller.fit(x_train[:10], y_train[:10], epochs=1, verbose=0)
final_weights = [w.numpy() for w in student.trainable_weights]
# At least some weights should have changed
weights_changed = any(
not np.allclose(initial, final, atol=1e-6)
for initial, final in zip(initial_weights, final_weights)
)
self.assertTrue(
weights_changed, "Student weights should change during training"
)
def test_evaluation_workflow(self):
"""Test evaluation workflow with model.evaluate()."""
# Create dataset
np.random.seed(42)
x_test = np.random.random((30, 5)).astype(np.float32)
y_test = np.random.randint(0, 10, (30,)).astype(np.int32)
# Create fresh models
teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models to avoid JAX tracer issues
dummy_input = x_test[:2]
teacher(dummy_input)
student(dummy_input)
# Create distiller
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Train briefly
distiller.fit(x_test[:10], y_test[:10], epochs=1, verbose=0)
# Evaluate the model
results = distiller.evaluate(x_test, y_test, verbose=0)
# Check that evaluation returns expected metrics
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0)
# All results should be finite
for result in results:
self.assertTrue(np.isfinite(result))
def test_prediction_workflow(self):
"""Test prediction workflow with model.predict()."""
# Create dataset
np.random.seed(42)
x_test = np.random.random((20, 5)).astype(np.float32)
# Create fresh models
teacher = SimpleTeacher(vocab_size=10, hidden_dim=32)
student = SimpleStudent(vocab_size=10, hidden_dim=16)
# Build models to avoid JAX tracer issues
dummy_input = x_test[:2]
teacher(dummy_input)
student(dummy_input)
# Create distiller
distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=self.distillation_loss,
student_loss_weight=0.5,
)
# Make predictions
predictions = distiller.predict(x_test, verbose=0)
# Check prediction shape
self.assertEqual(predictions.shape, (20, 10)) # batch_size, vocab_size
# Check that predictions are finite
self.assertTrue(np.all(np.isfinite(predictions)))
# Check predictions sum to reasonable values (not zeros/infinities)
prediction_sums = np.sum(predictions, axis=1)
self.assertTrue(np.all(np.isfinite(prediction_sums)))
def test_distiller_serialization_and_saving(self):
"""Test Distiller serialization, saving, and loading."""
# Use standard Sequential models for serialization testing
teacher = keras.Sequential(
[
keras.layers.Dense(
32, activation="relu", name="teacher_dense_1"
),
keras.layers.Dense(
16, activation="relu", name="teacher_dense_2"
),
keras.layers.Dense(10, name="teacher_output"),
]
)
student = keras.Sequential(
[
keras.layers.Dense(
16, activation="relu", name="student_dense_1"
),
keras.layers.Dense(
8, activation="relu", name="student_dense_2"
),
keras.layers.Dense(10, name="student_output"),
]
)
# Create distiller with single distillation_loss
distillation_loss = LogitsDistillation(
temperature=3.0, loss="kl_divergence"
)
original_distiller = Distiller(
teacher=teacher,
student=student,
distillation_losses=distillation_loss,
student_loss_weight=0.7,
)
# Build the models by calling them
x_test = np.random.random((2, 20)).astype(np.float32)
_ = original_distiller(x_test)
# Test get_config
config = original_distiller.get_config()
# Verify all components are in config
required_keys = [
"teacher",
"student",
"distillation_losses",
"distillation_loss_weights",
"student_loss_weight",
]
for key in required_keys:
self.assertIn(key, config, f"Missing key: {key}")
# Test JSON serialization
json_str = json.dumps(config)
self.assertIsInstance(json_str, str)
# Test from_config reconstruction
reconstructed_distiller = Distiller.from_config(config)
# Verify reconstruction
self.assertEqual(reconstructed_distiller.student_loss_weight, 0.7)
self.assertIsInstance(
reconstructed_distiller.distillation_losses[0], LogitsDistillation
)
# Verify distillation_loss parameters
self.assertEqual(
reconstructed_distiller.distillation_losses[0].temperature, 3.0
)
# Test that reconstructed distiller can be used for inference
reconstructed_output = reconstructed_distiller(x_test)
self.assertEqual(reconstructed_output.shape, (2, 10))
# Test model saving and loading (full integration test)
temp_dir = self.get_temp_dir()
model_path = os.path.join(temp_dir, "distiller_model.keras")
# Compile original distiller
original_distiller.compile(
loss="sparse_categorical_crossentropy",
)
# Save the model
original_distiller.save(model_path)
# Load the model
loaded_distiller = keras.models.load_model(model_path)
# Verify loaded model works
loaded_output = loaded_distiller(x_test)
self.assertEqual(loaded_output.shape, (2, 10))
# Verify parameters are preserved
self.assertEqual(loaded_distiller.student_loss_weight, 0.7)
# The core serialization functionality is working
self.assertTrue(True, "Distiller serialization test passed")
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/distillation/distiller_test.py",
"license": "Apache License 2.0",
"lines": 430,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/layers/core/reversible_embedding.py | import copy
import math
from keras.src import dtype_policies
from keras.src import layers
from keras.src import ops
from keras.src import quantizers
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import set_keras_mask
from keras.src.quantizers.quantization_config import QuantizationConfig
from keras.src.quantizers.quantization_config import get_block_size_for_layer
from keras.src.quantizers.quantizers import dequantize_with_sz_map
@keras_export("keras.layers.ReversibleEmbedding")
class ReversibleEmbedding(layers.Embedding):
"""An embedding layer which can project backwards to the input dim.
This layer is an extension of `keras.layers.Embedding` for language models.
This layer can be called "in reverse" with `reverse=True`, in which case the
layer will linearly project from `output_dim` back to `input_dim`.
By default, the reverse projection will use the transpose of the
`embeddings` weights to project to `input_dim` (weights are "tied"). If
`tie_weights=False`, the model will use a separate, trainable variable for
reverse projection.
This layer has no bias terms.
Args:
input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding.
tie_weights: Boolean, whether or not the matrix for embedding and
the matrix for the `reverse` projection should share the same
weights.
embeddings_initializer: Initializer for the `embeddings`
matrix (see `keras.initializers`).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix (see `keras.regularizers`).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix (see `keras.constraints`).
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out.
reverse_dtype: The dtype for the reverse projection computation.
Defaults to the `compute_dtype` of the layer.
logit_soft_cap: If `logit_soft_cap` is set and `reverse=True`, the
output logits will be scaled by
`tanh(logits / logit_soft_cap) * logit_soft_cap`. This narrows the
range of output logits and can improve training.
**kwargs: other keyword arguments passed to `keras.layers.Embedding`,
including `name`, `trainable`, `dtype` etc.
Call arguments:
inputs: The tensor inputs to the layer.
reverse: Boolean. If `True` the layer will perform a linear projection
from `output_dim` to `input_dim`, instead of a normal embedding
call. Default to `False`.
Example:
```python
batch_size = 16
vocab_size = 100
hidden_dim = 32
seq_length = 50
# Generate random inputs.
token_ids = np.random.randint(vocab_size, size=(batch_size, seq_length))
embedding = keras.layers.ReversibleEmbedding(vocab_size, hidden_dim)
# Embed tokens to shape `(batch_size, seq_length, hidden_dim)`.
hidden_states = embedding(token_ids)
# Project hidden states to shape `(batch_size, seq_length, vocab_size)`.
logits = embedding(hidden_states, reverse=True)
```
References:
- [Vaswani et al., 2017](https://arxiv.org/abs/1706.03762)
- [Press and Wolf, 2016](https://arxiv.org/abs/1608.05859)
"""
def __init__(
self,
input_dim,
output_dim,
tie_weights=True,
embeddings_initializer="uniform",
embeddings_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
reverse_dtype=None,
logit_soft_cap=None,
**kwargs,
):
super().__init__(
input_dim,
output_dim,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
embeddings_constraint=embeddings_constraint,
mask_zero=mask_zero,
**kwargs,
)
self.tie_weights = tie_weights
self.reverse_dtype = reverse_dtype
self.logit_soft_cap = logit_soft_cap
def build(self, inputs_shape=None):
super().build(inputs_shape)
if not self.tie_weights and self.quantization_mode not in (
"int8",
"int4",
):
self.reverse_embeddings = self.add_weight(
shape=(self.output_dim, self.input_dim),
initializer=self.embeddings_initializer,
name="reverse_embeddings",
trainable=True,
)
def call(self, inputs, reverse=False):
if not reverse:
result = super().call(inputs)
mask = super().compute_mask(inputs)
if mask is not None:
set_keras_mask(result, mask)
return result
else:
if self.tie_weights:
kernel = ops.transpose(self.embeddings)
else:
kernel = self.reverse_embeddings
if self.reverse_dtype is not None:
inputs = ops.cast(inputs, self.reverse_dtype)
kernel = ops.cast(kernel, self.reverse_dtype)
logits = ops.matmul(inputs, kernel)
# Optionally soft-cap logits.
if self.logit_soft_cap is not None:
soft_cap = self.logit_soft_cap
logits = ops.multiply(
ops.tanh(ops.divide(logits, soft_cap)), soft_cap
)
return logits
def compute_mask(self, inputs, mask=None):
# Disable masking from super class, masking is done directly in call.
return None
def compute_output_shape(self, input_shape, reverse=False):
output_shape = list(input_shape)
if reverse:
output_shape[-1] = self.input_dim
else:
output_shape += [self.output_dim]
return output_shape
def compute_output_spec(self, inputs, reverse=False):
output_shape = list(inputs.shape)
if reverse:
output_shape[-1] = self.input_dim
else:
output_shape += [self.output_dim]
return KerasTensor(output_shape, dtype=self.compute_dtype)
def get_config(self):
config = super().get_config()
config.update(
{
"tie_weights": self.tie_weights,
"reverse_dtype": self.reverse_dtype,
"logit_soft_cap": self.logit_soft_cap,
}
)
return config
@property
def variable_serialization_spec(self):
# Avoid modifying the parent's spec.
_spec = copy.deepcopy(super().variable_serialization_spec)
if not self.tie_weights:
for mode, variable_spec in _spec.items():
variable_spec.append("reverse_embeddings")
if mode in ("int4", "int8"):
variable_spec.append("reverse_embeddings_scale")
if mode == "int4":
# reverse_embeddings_zero only exists for sub-channel
variable_spec.append("reverse_embeddings_zero")
return _spec
def quantized_build(self, embeddings_shape, mode, config=None):
if mode == "int8":
self._int8_build(embeddings_shape, config)
elif mode == "int4":
self._int4_build(embeddings_shape, config)
else:
raise self._quantization_mode_error(mode)
self._is_quantized = True
def _int8_build(self, embeddings_shape, config=None):
if embeddings_shape is None:
embeddings_shape = (self.input_dim, self.output_dim)
super()._int8_build(embeddings_shape=embeddings_shape)
self.inputs_quantizer = (
QuantizationConfig.activation_quantizer_or_default(
config, quantizers.AbsMaxQuantizer(axis=-1)
)
)
if not self.tie_weights:
self.reverse_embeddings = self.add_weight(
name="reverse_embeddings",
shape=(self.output_dim, self.input_dim),
initializer="zeros",
dtype="int8",
trainable=False,
)
self.reverse_embeddings_scale = self.add_weight(
name="reverse_embeddings_scale",
shape=(self.input_dim,),
initializer="ones",
trainable=False,
)
def _int4_build(self, embeddings_shape, config=None):
if embeddings_shape is None:
embeddings_shape = (self.input_dim, self.output_dim)
super()._int4_build(embeddings_shape=embeddings_shape, config=config)
self.inputs_quantizer = (
QuantizationConfig.activation_quantizer_or_default(
config, quantizers.AbsMaxQuantizer(axis=-1)
)
)
if not self.tie_weights:
packed_rows = (self.output_dim + 1) // 2 # ceil for odd dims
self.reverse_embeddings = self.add_weight(
name="reverse_embeddings",
shape=(packed_rows, self.input_dim),
initializer="zeros",
dtype="int8",
trainable=False,
)
# Determine block_size from config or dtype_policy
block_size = get_block_size_for_layer(self, config)
if block_size is None or block_size == -1:
# Per-channel: one scale per output unit (input_dim)
reverse_scale_shape = (self.input_dim,)
else:
# Grouped: scale per group along output_dim (axis=0)
n_groups = math.ceil(self.output_dim / block_size)
reverse_scale_shape = (n_groups, self.input_dim)
self.reverse_embeddings_scale = self.add_weight(
name="reverse_embeddings_scale",
shape=reverse_scale_shape,
initializer="ones",
trainable=False,
)
# Zero point for asymmetric grouped quantization
if block_size is not None and block_size != -1:
self.reverse_embeddings_zero = self.add_weight(
name="reverse_embeddings_zero",
shape=reverse_scale_shape,
initializer="zeros",
trainable=False,
)
def _int8_call(self, inputs, reverse=False):
if not reverse:
return super()._int8_call(inputs)
else:
if self.tie_weights:
kernel = ops.transpose(self._embeddings)
scale = ops.transpose(self.embeddings_scale)
else:
kernel = self.reverse_embeddings
scale = self.reverse_embeddings_scale
if self.inputs_quantizer:
inputs, inputs_scale = self.inputs_quantizer(inputs)
else:
inputs_scale = ops.ones((1,), dtype=self.compute_dtype)
logits = ops.matmul(inputs, kernel)
# De-scale outputs
logits = ops.cast(logits, self.compute_dtype)
logits = ops.divide(logits, ops.multiply(inputs_scale, scale))
# Optionally soft-cap logits.
if self.logit_soft_cap is not None:
soft_cap = self.logit_soft_cap
logits = ops.multiply(
ops.tanh(ops.divide(logits, soft_cap)), soft_cap
)
return logits
def _int4_call(self, inputs, reverse=False):
if not reverse:
return super()._int4_call(inputs)
else:
block_size = getattr(self, "_int4_block_size", None)
if self.tie_weights:
embeddings = ops.transpose(self._embeddings)
scale = self.embeddings_scale
# For tied weights, scale shape is (input_dim,) or
# (input_dim, n_groups). For per-channel, transpose scale.
if block_size is None or block_size == -1:
scale = ops.transpose(scale)
else:
embeddings = self.reverse_embeddings
scale = self.reverse_embeddings_scale
unpacked_embeddings = quantizers.unpack_int4(
embeddings, self.output_dim, axis=0
)
if self.inputs_quantizer:
inputs, inputs_scale = self.inputs_quantizer(inputs)
else:
inputs_scale = ops.ones((1,), dtype=self.compute_dtype)
if block_size is None or block_size == -1:
# Per-channel: do matmul then dequantize
logits = ops.matmul(inputs, unpacked_embeddings)
logits = ops.cast(logits, self.compute_dtype)
logits = ops.divide(logits, ops.multiply(inputs_scale, scale))
elif self.tie_weights:
# Sub-channel with asymmetric quantization (tied weights)
# Must dequantize embeddings before matmul for correctness
# unpacked_embeddings shape: (output_dim, input_dim)
# scale shape: (input_dim, n_groups)
# embeddings_zero shape: (input_dim, n_groups)
# g_idx shape: (output_dim,)
# Transpose scale/zero for dequantization:
# [input_dim, n_groups] -> [n_groups, input_dim]
scale_t = ops.transpose(scale)
zero_t = ops.transpose(self.embeddings_zero)
float_embeddings = dequantize_with_sz_map(
ops.cast(unpacked_embeddings, self.compute_dtype),
scale_t,
zero_t,
self.g_idx,
group_axis=0,
)
# inputs shape: (batch, output_dim)
# float_embeddings shape: (output_dim, input_dim)
logits = ops.matmul(inputs, float_embeddings)
logits = ops.divide(logits, inputs_scale)
else:
# Untied weights with asymmetric grouped quantization
# Must dequantize embeddings before matmul for correctness
# unpacked_embeddings shape: (output_dim, input_dim)
# scale shape: (n_groups, input_dim)
# reverse_embeddings_zero shape: (n_groups, input_dim)
# g_idx shape: (output_dim,) - reuse from forward pass
float_embeddings = dequantize_with_sz_map(
ops.cast(unpacked_embeddings, self.compute_dtype),
scale,
self.reverse_embeddings_zero,
self.g_idx,
group_axis=0,
)
# inputs shape: (batch, output_dim)
# float_embeddings shape: (output_dim, input_dim)
logits = ops.matmul(inputs, float_embeddings)
logits = ops.divide(logits, inputs_scale)
# Optionally soft-cap logits.
if self.logit_soft_cap is not None:
soft_cap = self.logit_soft_cap
logits = ops.multiply(
ops.tanh(ops.divide(logits, soft_cap)), soft_cap
)
return logits
def quantize(self, mode=None, type_check=True, config=None):
if type_check and type(self) is not ReversibleEmbedding:
raise self._not_implemented_error(self.quantize)
self.quantization_config = config
embeddings_shape = (self.input_dim, self.output_dim)
if mode == "int8":
# Quantize `self._embeddings` to int8 and compute corresponding
# scale.
weight_quantizer = QuantizationConfig.weight_quantizer_or_default(
self.quantization_config, quantizers.AbsMaxQuantizer(axis=-1)
)
embeddings_value, embeddings_scale = weight_quantizer(
self._embeddings, to_numpy=True
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
del self._embeddings
if not self.tie_weights:
reverse_weight_quantizer = (
QuantizationConfig.weight_quantizer_or_default(
self.quantization_config,
quantizers.AbsMaxQuantizer(axis=0),
)
)
reverse_embeddings_value, reverse_embeddings_scale = (
reverse_weight_quantizer(
self.reverse_embeddings, to_numpy=True
)
)
reverse_embeddings_scale = ops.squeeze(
reverse_embeddings_scale, axis=0
)
del self.reverse_embeddings
self.quantized_build(
embeddings_shape, mode, self.quantization_config
)
self._embeddings.assign(embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
if not self.tie_weights:
self.reverse_embeddings.assign(reverse_embeddings_value)
self.reverse_embeddings_scale.assign(reverse_embeddings_scale)
elif mode == "int4":
from keras.src.quantizers.quantization_config import (
Int4QuantizationConfig,
)
block_size = None
if isinstance(self.quantization_config, Int4QuantizationConfig):
block_size = self.quantization_config.block_size
use_grouped = block_size is not None and block_size != -1
# Quantize forward embeddings
if not use_grouped:
# Per-channel quantization
weight_quantizer = (
QuantizationConfig.weight_quantizer_or_default(
self.quantization_config,
quantizers.AbsMaxQuantizer(
axis=-1,
value_range=(-8, 7),
output_dtype="int8",
),
)
)
embeddings_value, embeddings_scale = weight_quantizer(
self._embeddings, to_numpy=True
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
else:
# Sub-channel quantization with asymmetric zero point
embeddings_t = ops.transpose(self._embeddings)
embeddings_value_t, scale_t, zero_t = (
quantizers.abs_max_quantize_grouped_with_zero_point(
embeddings_t,
block_size=block_size,
value_range=(-8, 7),
dtype="int8",
to_numpy=True,
)
)
# Transpose back to (input_dim, output_dim) layout
embeddings_value = ops.transpose(embeddings_value_t)
embeddings_scale = ops.transpose(scale_t)
embeddings_zero = ops.transpose(zero_t)
packed_embeddings_value, _, _ = quantizers.pack_int4(
embeddings_value, axis=-1
)
del self._embeddings
# Quantize reverse embeddings if not tied
if not self.tie_weights:
if not use_grouped:
reverse_weight_quantizer = (
QuantizationConfig.weight_quantizer_or_default(
self.quantization_config,
quantizers.AbsMaxQuantizer(
axis=0,
value_range=(-8, 7),
output_dtype="int8",
),
)
)
reverse_embeddings_value, reverse_embeddings_scale = (
reverse_weight_quantizer(
self.reverse_embeddings, to_numpy=True
)
)
reverse_embeddings_scale = ops.squeeze(
reverse_embeddings_scale, axis=0
)
else:
reverse_value, reverse_scale, reverse_zero = (
quantizers.abs_max_quantize_grouped_with_zero_point(
self.reverse_embeddings,
block_size=block_size,
value_range=(-8, 7),
dtype="int8",
to_numpy=True,
)
)
reverse_embeddings_value = reverse_value
reverse_embeddings_scale = reverse_scale
reverse_embeddings_zero = reverse_zero
packed_reverse_embeddings_value, _, _ = quantizers.pack_int4(
reverse_embeddings_value, axis=0
)
del self.reverse_embeddings
self.quantized_build(
embeddings_shape, mode, self.quantization_config
)
self._embeddings.assign(packed_embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
if use_grouped:
self.embeddings_zero.assign(embeddings_zero)
if not self.tie_weights:
self.reverse_embeddings.assign(packed_reverse_embeddings_value)
self.reverse_embeddings_scale.assign(reverse_embeddings_scale)
if use_grouped:
self.reverse_embeddings_zero.assign(reverse_embeddings_zero)
else:
raise self._quantization_mode_error(mode)
# Set new dtype policy.
if self.dtype_policy.quantization_mode is None:
policy_name = mode
if mode == "int4":
# Include block_size in policy name for sub-channel quantization
block_size = get_block_size_for_layer(self, config)
block_size_value = -1 if block_size is None else block_size
policy_name = f"int4/{block_size_value}"
policy = dtype_policies.get(
f"{policy_name}_from_{self.dtype_policy.name}"
)
self.dtype_policy = policy
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/core/reversible_embedding.py",
"license": "Apache License 2.0",
"lines": 489,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/layers/core/reversible_embedding_test.py | import math
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import saving
from keras.src import testing
from keras.src.quantizers.quantization_config import Int4QuantizationConfig
from keras.src.quantizers.quantization_config import Int8QuantizationConfig
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.testing import test_case
from keras.src.testing.test_utils import named_product
class ReversibleEmbeddingTest(test_case.TestCase):
@parameterized.named_parameters(
("int8", "int8", {"axis": -1}, {"axis": -1}),
(
"int4",
"int4",
{"axis": -1, "value_range": (-8, 7), "output_dtype": "int8"},
{"axis": -1},
),
("int8_weight_only", "int8", {"axis": -1}, None),
)
def test_reversible_embedding_quantize(
self, mode, weight_quantizer_args, activation_quantizer_args
):
"""Test ReversibleEmbedding quantization with QuantizationConfig."""
layer = layers.ReversibleEmbedding(
input_dim=10, output_dim=6, tie_weights=True
)
layer.build((None,))
weight_quantizer = AbsMaxQuantizer(**weight_quantizer_args)
if activation_quantizer_args is not None:
activation_quantizer = AbsMaxQuantizer(**activation_quantizer_args)
else:
activation_quantizer = None
if mode == "int8":
config = Int8QuantizationConfig(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
)
elif mode == "int4":
# Custom quantizers require per-channel mode (block_size=None)
config = Int4QuantizationConfig(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
block_size=None,
)
layer.quantize(mode, config=config)
if activation_quantizer_args is not None:
# Verify inputs_quantizer is set correctly
self.assertIsInstance(layer.inputs_quantizer, AbsMaxQuantizer)
else:
# Verify inputs_quantizer is None
self.assertIsNone(layer.inputs_quantizer)
# Verify reverse call works
x = np.random.random((2, 6)).astype("float32")
y = layer(x, reverse=True)
self.assertEqual(y.shape, (2, 10))
@parameterized.named_parameters(
("tie_weights", True),
("untie_weights", False),
)
@pytest.mark.requires_trainable_backend
def test_reversible_embedding_basics(self, tie_weights):
self.run_layer_test(
layers.ReversibleEmbedding,
init_kwargs={
"input_dim": 100,
"output_dim": 32,
"tie_weights": tie_weights,
"embeddings_initializer": "HeNormal",
"logit_soft_cap": 50,
},
input_data=np.random.randint(low=0, high=100, size=(4, 10)),
expected_output_shape=(4, 10, 32),
expected_num_trainable_weights=1 if tie_weights else 2,
)
@parameterized.named_parameters(
("tie_weights", True),
("untie_weights", False),
)
def test_saving(self, tie_weights):
input_data = np.random.randint(low=0, high=100, size=(4, 10))
model = models.Sequential(
[
layers.ReversibleEmbedding(
input_dim=100,
output_dim=32,
tie_weights=tie_weights,
)
]
)
path = os.path.join(self.get_temp_dir(), "model.keras")
model_output = model(input_data)
model.save(path)
restored_model = saving.load_model(path)
restored_output = restored_model(input_data)
self.assertAllClose(model_output, restored_output)
def test_correctness(self):
layer = layers.ReversibleEmbedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array(([2, 1, 0])))
self.assertAllClose(out, np.array([[3.0, 3.0], [2.0, 2.0], [0.0, 0.0]]))
layer = layers.ReversibleEmbedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array(([[1.0, 1.0]])), reverse=True)
self.assertAllClose(out, np.array([[0.0, 4.0, 6.0]]))
layer = layers.ReversibleEmbedding(
input_dim=3, output_dim=2, logit_soft_cap=5
)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array(([[1.0, 1.0]])), reverse=True)
self.assertAllClose(out, np.array([[0.0, 3.320184, 4.168273]]))
def test_reverse_dtype(self):
embedding = layers.ReversibleEmbedding(100, 16, reverse_dtype="float32")
input_data = ops.ones(shape=(4, 10, 16))
output_data = embedding(input_data, reverse=True)
self.assertEqual(output_data.shape, (4, 10, 100))
self.assertDType(output_data, "float32")
if backend.backend() == "torch":
import torch
if not torch.cuda.is_available():
self.skipTest("Torch CPU does not support float16")
embedding = layers.ReversibleEmbedding(100, 16, reverse_dtype="float16")
input_data = ops.ones(shape=(4, 10, 16))
output_data = embedding(input_data, reverse=True)
self.assertEqual(output_data.shape, (4, 10, 100))
self.assertDType(output_data, "float16")
@parameterized.named_parameters(
named_product(mode=("int4", "int8"), tie_weights=(False, True))
)
def test_quantize_int(self, mode, tie_weights):
layer = layers.ReversibleEmbedding(10, 16, tie_weights=tie_weights)
layer.build()
x = np.random.randint(0, 9, size=(64, 3))
x_reverse = np.random.uniform(size=(64, 16)).astype("float32")
y_float = layer(x)
y_reverse_float = layer(x_reverse, reverse=True)
layer.quantize(mode)
# Verify the dtype of the weights.
if not tie_weights:
# The reverse_embeddings's dtype is int8, despite the int4
# quantization, because we pack the int4 values into int8.
self.assertDType(layer.reverse_embeddings, "int8")
self.assertDType(
layer.reverse_embeddings_scale, layer.variable_dtype
)
# Verify the correctness of the outputs.
y_quantized = layer(x)
y_reverse_quantized = layer(x_reverse, reverse=True)
mse = ops.mean(ops.square(y_float - y_quantized))
mse_reverse = ops.mean(
ops.square(y_reverse_float - y_reverse_quantized)
)
self.assertLess(mse, 1e-3) # A weak correctness test
self.assertLess(mse_reverse, 1e-3) # A weak correctness test
# Check model save / load round-trip.
model = models.Sequential([layer])
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Check weights-only save / load round-trip.
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential(
[layers.ReversibleEmbedding(10, 16, tie_weights=tie_weights)]
)
new_model.build((None, 3))
new_model.quantize(mode)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@parameterized.named_parameters(
("int8_tie_weights", "int8_from_mixed_bfloat16", True, 0, 2),
("int8_untie_weights", "int8_from_mixed_bfloat16", False, 0, 4),
("int4_tie_weights", "int4_from_mixed_bfloat16", True, 0, 2),
("int4_untie_weights", "int4_from_mixed_bfloat16", False, 0, 4),
)
@pytest.mark.requires_trainable_backend
def test_quantize_dtype_argument(
self,
dtype,
tie_weights,
num_trainable_weights,
num_non_trainable_weights,
):
self.run_layer_test(
layers.ReversibleEmbedding,
init_kwargs={
"input_dim": 100,
"output_dim": 32,
"tie_weights": tie_weights,
"embeddings_initializer": "HeNormal",
"dtype": dtype,
},
input_data=np.random.randint(low=0, high=100, size=(4, 10)),
expected_output_shape=(4, 10, 32),
expected_num_trainable_weights=num_trainable_weights,
expected_num_non_trainable_weights=num_non_trainable_weights,
expected_num_non_trainable_variables=num_non_trainable_weights,
)
def test_reversible_embedding_int8_custom_quantizer(self):
"""
Test custom quantizer serialization for reversible embedding layer with
int8 quantization.
"""
# Setup
weight_range = (-20, 20)
config = Int8QuantizationConfig(
weight_quantizer=AbsMaxQuantizer(axis=-1, value_range=weight_range),
)
# Build & Quantize
layer = layers.ReversibleEmbedding(input_dim=100, output_dim=16)
layer.build(None)
layer.quantize("int8", config=config)
# Serialize & Deserialize
serialized = layer.get_config()
new_layer = layers.ReversibleEmbedding.from_config(serialized)
# Verify
self.assertIsInstance(
new_layer.quantization_config, Int8QuantizationConfig
)
quantizer = new_layer.quantization_config.weight_quantizer
self.assertIsInstance(quantizer, AbsMaxQuantizer)
self.assertAllEqual(quantizer.value_range, weight_range)
def test_masking(self):
layer = layers.ReversibleEmbedding(3, 2, mask_zero=True)
layer.build()
out = layer(np.array(([2, 1, 0])))
mask = backend.get_keras_mask(out)
self.assertAllClose(mask, np.array([True, True, False]))
out = layer(np.array(([[1.0, 2.0], [0.0, 0.0]])), reverse=True)
mask = backend.get_keras_mask(out)
self.assertIsNone(mask)
@parameterized.named_parameters(
named_product(
block_size=(64, 128, None, -1),
tie_weights=(True, False),
)
)
def test_int4_quantization_block_size(self, block_size, tie_weights):
"""Test int4 quantization with different block_size configurations."""
input_dim, output_dim = 100, 256
layer = layers.ReversibleEmbedding(
input_dim=input_dim, output_dim=output_dim, tie_weights=tie_weights
)
layer.build()
x = np.random.randint(0, input_dim, size=(4, 8))
x_reverse = np.random.random((4, output_dim)).astype("float32")
y_float = layer(x)
y_reverse_float = layer(x_reverse, reverse=True)
# Create config with specified block_size
config = Int4QuantizationConfig(block_size=block_size)
layer.quantize("int4", config=config)
# Verify block_size is stored
self.assertEqual(layer._int4_block_size, block_size)
# Verify embeddings_scale shape
if block_size is None or block_size == -1:
expected_scale_shape = (input_dim,)
else:
n_groups = math.ceil(output_dim / block_size)
expected_scale_shape = (input_dim, n_groups)
self.assertEqual(layer.embeddings_scale.shape, expected_scale_shape)
# Verify reverse_embeddings_scale shape if not tied
if not tie_weights:
if block_size is None or block_size == -1:
expected_reverse_scale_shape = (input_dim,)
else:
n_groups = math.ceil(output_dim / block_size)
expected_reverse_scale_shape = (n_groups, input_dim)
self.assertEqual(
layer.reverse_embeddings_scale.shape,
expected_reverse_scale_shape,
)
# Verify outputs are reasonable
y_quantized = layer(x)
y_reverse_quantized = layer(x_reverse, reverse=True)
mse = ops.mean(ops.square(y_float - y_quantized))
mse_reverse = ops.mean(
ops.square(y_reverse_float - y_reverse_quantized)
)
self.assertLess(mse, 1e-3)
self.assertLess(mse_reverse, 1e-2)
@parameterized.named_parameters(
named_product(
block_size=(64, 128, None),
tie_weights=(True, False),
)
)
def test_int4_block_size_serialization(self, block_size, tie_weights):
"""Test that block_size is preserved through serialization."""
input_dim, output_dim = 50, 128
layer = layers.ReversibleEmbedding(
input_dim=input_dim, output_dim=output_dim, tie_weights=tie_weights
)
layer.build()
config = Int4QuantizationConfig(block_size=block_size)
layer.quantize("int4", config=config)
# Get output before serialization
x = np.random.randint(0, input_dim, size=(2, 8))
y_before = layer(x)
# Save and load model to test full serialization roundtrip
model = models.Sequential([layer])
temp_filepath = os.path.join(
self.get_temp_dir(),
f"int4_block_size_rev_emb_model_{tie_weights}.keras",
)
model.save(temp_filepath)
loaded_model = saving.load_model(temp_filepath)
# Verify block_size is preserved
loaded_layer = loaded_model.layers[0]
self.assertIsInstance(
loaded_layer.quantization_config, Int4QuantizationConfig
)
self.assertEqual(
loaded_layer.quantization_config.block_size, block_size
)
# Verify reverse_embeddings_zero is preserved for untied grouped
if not tie_weights and block_size is not None:
self.assertTrue(hasattr(loaded_layer, "reverse_embeddings_zero"))
self.assertAllClose(
loaded_layer.reverse_embeddings_zero,
layer.reverse_embeddings_zero,
)
# Verify outputs match after deserialization
y_after = loaded_model(x)
self.assertAllClose(y_before, y_after)
@parameterized.named_parameters(
("tie_grouped", True, 64),
("tie_perchannel", True, None),
("untie_grouped", False, 64),
("untie_perchannel", False, None),
)
def test_int4_grouped_vs_perchannel_scale_shapes(
self, tie_weights, block_size
):
"""Test that grouped and per-channel have different scale shapes."""
input_dim, output_dim = 100, 256
layer = layers.ReversibleEmbedding(
input_dim=input_dim, output_dim=output_dim, tie_weights=tie_weights
)
layer.build()
config = Int4QuantizationConfig(block_size=block_size)
layer.quantize("int4", config=config)
if block_size is None or block_size == -1:
# Per-channel
expected_scale_shape = (input_dim,)
expected_reverse_scale_shape = (input_dim,)
else:
# Grouped
n_groups = math.ceil(output_dim / block_size)
expected_scale_shape = (input_dim, n_groups)
expected_reverse_scale_shape = (n_groups, input_dim)
self.assertEqual(layer.embeddings_scale.shape, expected_scale_shape)
if not tie_weights:
self.assertEqual(
layer.reverse_embeddings_scale.shape,
expected_reverse_scale_shape,
)
# Check reverse_embeddings_zero shape for grouped quantization
if block_size is not None and block_size != -1:
self.assertTrue(hasattr(layer, "reverse_embeddings_zero"))
self.assertEqual(
layer.reverse_embeddings_zero.shape,
expected_reverse_scale_shape,
)
else:
self.assertFalse(hasattr(layer, "reverse_embeddings_zero"))
@parameterized.named_parameters(
("grouped_block_4", 4),
("grouped_block_8", 8),
)
@pytest.mark.skipif(
testing.tensorflow_uses_gpu(), reason="Segfault on Tensorflow GPU"
)
def test_int4_subchannel_g_idx_created(self, block_size):
"""Test that g_idx is created for sub-channel int4 quantization."""
input_dim, output_dim = 10, 16
layer = layers.ReversibleEmbedding(
input_dim=input_dim, output_dim=output_dim
)
layer.build()
config = Int4QuantizationConfig(block_size=block_size)
layer.quantize("int4", config=config)
# Verify g_idx is created
self.assertTrue(hasattr(layer, "g_idx"))
# Verify g_idx shape (output_dim for embedding)
self.assertEqual(layer.g_idx.shape, (output_dim,))
# Verify g_idx values (should map each column to its group)
expected_g_idx = np.arange(output_dim) // block_size
self.assertAllClose(layer.g_idx, expected_g_idx)
@pytest.mark.skipif(
testing.tensorflow_uses_gpu(), reason="Segfault on Tensorflow GPU"
)
def test_int4_perchannel_no_g_idx(self):
"""Test that per-channel int4 does NOT create g_idx."""
layer = layers.ReversibleEmbedding(input_dim=10, output_dim=16)
layer.build()
config = Int4QuantizationConfig(block_size=None) # Per-channel
layer.quantize("int4", config=config)
# Verify g_idx is NOT created for per-channel
self.assertFalse(hasattr(layer, "g_idx"))
@pytest.mark.skipif(
testing.tensorflow_uses_gpu(), reason="Segfault on Tensorflow GPU"
)
def test_int4_subchannel_g_idx_serialization(self):
"""Test that g_idx is properly serialized and deserialized."""
input_dim, output_dim = 10, 16
block_size = 8
layer = layers.ReversibleEmbedding(
input_dim=input_dim, output_dim=output_dim
)
layer.build()
config = Int4QuantizationConfig(block_size=block_size)
layer.quantize("int4", config=config)
x = np.array([[1, 2, 3], [4, 5, 6]], dtype="int32")
y_before = layer(x)
g_idx_before = ops.convert_to_numpy(layer.g_idx)
# Save and load
model = models.Sequential([layer])
temp_filepath = os.path.join(
self.get_temp_dir(), "rev_embedding_int4_g_idx_model.keras"
)
model.save(temp_filepath)
loaded_model = saving.load_model(temp_filepath)
# Verify g_idx is preserved
loaded_layer = loaded_model.layers[0]
self.assertTrue(hasattr(loaded_layer, "g_idx"))
self.assertAllClose(loaded_layer.g_idx, g_idx_before)
# Verify outputs match
y_after = loaded_model(x)
self.assertAllClose(y_before, y_after)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/core/reversible_embedding_test.py",
"license": "Apache License 2.0",
"lines": 444,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/tree/torchtree_impl.py | from collections import defaultdict
from torch.utils import _pytree as torch_tree
def register_tree_node_class(cls):
torch_tree.register_pytree_node(
cls,
flatten_fn=lambda x: x.torchtree_flatten(),
unflatten_fn=cls.torchtree_unflatten,
serialized_type_name=f"{cls.__name__}",
flatten_with_keys_fn=lambda x: x.torchtree_flatten_with_keys(),
)
return cls
def _tree_is_leaf(tree, is_leaf=None):
if is_leaf is not None and is_leaf(tree):
return True
return torch_tree._get_node_type(tree) not in torch_tree.SUPPORTED_NODES
def _dict_to_ordered_dict(structure):
# We need to sort dict and defaultdict to ensure a deterministic order that
# that is consistent with other tree implementations.
def func(x):
if type(x) is dict:
return {k: x[k] for k in sorted(x.keys())}
elif type(x) is defaultdict:
return defaultdict(
x.default_factory,
{k: x[k] for k in sorted(x.keys())},
)
return None
def traverse_children():
children, treedef = torch_tree.tree_flatten(
structure,
is_leaf=lambda x: x is not structure,
)
if treedef.num_nodes == 1 and treedef.num_leaves == 1:
return structure
else:
return torch_tree.tree_unflatten(
[_dict_to_ordered_dict(c) for c in children],
treedef,
)
ret = func(structure)
if ret is None:
return traverse_children()
if isinstance(ret, type) and ret.__name__ == "MAP_TO_NONE":
return None
return ret
def is_nested(structure):
return not _tree_is_leaf(structure)
def traverse(func, structure, top_down=True):
def traverse_children():
children, treedef = torch_tree.tree_flatten(
structure,
is_leaf=lambda x: x is not structure,
)
if treedef.num_nodes == 1 and treedef.num_leaves == 1:
return structure
else:
return torch_tree.tree_unflatten(
[traverse(func, c, top_down=top_down) for c in children],
treedef,
)
structure = _dict_to_ordered_dict(structure)
if top_down:
ret = func(structure)
if ret is None:
return traverse_children()
else:
traversed_structure = traverse_children()
ret = func(traversed_structure)
if ret is None:
return traversed_structure
# Detect MAP_TO_NONE without tree_api import to avoid circular import.
if isinstance(ret, type) and ret.__name__ == "MAP_TO_NONE":
return None
return ret
def flatten(structure):
# We need to first sort dicts to ensure a deterministic order that is
# consistent with other tree implementations.
structure = _dict_to_ordered_dict(structure)
leaves, _ = torch_tree.tree_flatten(structure)
return leaves
def flatten_with_path(structure):
# We need to first sort dicts to ensure a deterministic order that is
# consistent with other tree implementations.
structure = _dict_to_ordered_dict(structure)
leaves_with_path, _ = torch_tree.tree_flatten_with_path(structure)
results = []
fields = []
for key, leaf in leaves_with_path:
for k in key:
if isinstance(k, torch_tree.GetAttrKey) and k.name not in fields:
fields.append(k.name)
fields = sorted(fields)
field_to_idx = {f: i for i, f in enumerate(fields)}
for key, leaf in leaves_with_path:
# Convert to a tuple of keys.
path = []
for k in key:
if isinstance(k, torch_tree.SequenceKey):
path.append(k.idx)
elif isinstance(k, torch_tree.MappingKey):
path.append(k.key)
elif isinstance(k, torch_tree.GetAttrKey):
path.append(field_to_idx[k.name])
results.append((tuple(path), leaf))
return results
def map_structure(func, *structures, none_is_leaf=True):
if not structures:
raise ValueError("Must provide at least one structure")
map_func = func
if not none_is_leaf:
def func_skipping_none(*args):
# Check if the reference entry (first one) is None
if args[0] is None:
if not all(s is None for s in args):
raise ValueError(
"Structure mismatch: some arguments are None, others "
f"are not. Received arguments: {args}."
)
return None
return func(*args)
map_func = func_skipping_none
return torch_tree.tree_map(map_func, *structures)
def map_structure_up_to(shallow_structure, func, *structures):
if not structures:
raise ValueError("Must provide at least one structure")
# Add check that `shallow_structure` really is the shallowest.
# Also only call `func` on `structures` and not `shallow_structure`.
def func_with_check_without_shallow_structure(shallow, *args):
if not _tree_is_leaf(shallow):
raise ValueError("Structures don't have the same nested structure.")
return func(*args)
return torch_tree.tree_map(
func_with_check_without_shallow_structure,
shallow_structure,
*structures,
)
def assert_same_structure(a, b):
def check(a_leaf, b_leaf):
if not _tree_is_leaf(a_leaf) or not _tree_is_leaf(b_leaf):
raise ValueError("Structures don't have the same nested structure.")
return None
torch_tree.tree_map(check, a, b)
def assert_same_paths(a, b):
a_paths = set([path for path, _ in flatten_with_path(a)])
b_paths = set([path for path, _ in flatten_with_path(b)])
if a_paths != b_paths:
msg = "`a` and `b` don't have the same paths."
a_diff = a_paths.difference(b_paths)
if a_diff:
msg += f"\nPaths in `a` missing in `b`:\n{a_diff}"
b_diff = b_paths.difference(a_paths)
if b_diff:
msg += f"\nPaths in `b` missing in `a`:\n{b_diff}"
raise ValueError(msg)
def pack_sequence_as(structure, flat_sequence):
# We need to first sort dicts to ensure a deterministic order that is
# consistent with other tree implementations.
structure = _dict_to_ordered_dict(structure)
_, treespec = torch_tree.tree_flatten(structure)
return torch_tree.tree_unflatten(flat_sequence, treespec)
def lists_to_tuples(structure):
def list_to_tuple(instance):
return tuple(instance) if isinstance(instance, list) else None
return traverse(list_to_tuple, structure, top_down=False)
def map_shape_structure(func, structure):
def is_shape_tuple(x):
return isinstance(x, (list, tuple)) and all(
isinstance(e, (int, type(None))) for e in x
)
# We need to first sort dicts to ensure a deterministic order that is
# consistent with other tree implementations.
structure = _dict_to_ordered_dict(structure)
return torch_tree.tree_map(func, structure, is_leaf=is_shape_tuple)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/tree/torchtree_impl.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/quantizers/gptq_config_test.py | from keras.src import testing
from keras.src.quantizers.gptq_config import GPTQConfig
class TestGPTQConfig(testing.TestCase):
def test_invalid_weight_bits(self):
with self.assertRaisesRegex(ValueError, "Unsupported weight_bits"):
GPTQConfig(dataset=None, tokenizer=None, weight_bits=1)
with self.assertRaisesRegex(ValueError, "Unsupported weight_bits"):
GPTQConfig(dataset=None, tokenizer=None, weight_bits=5)
def test_invalid_num_samples(self):
with self.assertRaisesRegex(
ValueError, "num_samples must be a positive"
):
GPTQConfig(dataset=None, tokenizer=None, num_samples=0)
with self.assertRaisesRegex(
ValueError, "num_samples must be a positive"
):
GPTQConfig(dataset=None, tokenizer=None, num_samples=-1)
def test_invalid_sequence_length(self):
with self.assertRaisesRegex(
ValueError, "sequence_length must be a positive"
):
GPTQConfig(dataset=None, tokenizer=None, sequence_length=0)
with self.assertRaisesRegex(
ValueError, "sequence_length must be a positive"
):
GPTQConfig(dataset=None, tokenizer=None, sequence_length=-10)
def test_invalid_hessian_damping(self):
with self.assertRaisesRegex(
ValueError, "hessian_damping must be between"
):
GPTQConfig(dataset=None, tokenizer=None, hessian_damping=-0.1)
with self.assertRaisesRegex(
ValueError, "hessian_damping must be between"
):
GPTQConfig(dataset=None, tokenizer=None, hessian_damping=1.1)
def test_invalid_group_size(self):
with self.assertRaisesRegex(ValueError, "Invalid group_size"):
GPTQConfig(dataset=None, tokenizer=None, group_size=0)
with self.assertRaisesRegex(ValueError, "Invalid group_size"):
GPTQConfig(dataset=None, tokenizer=None, group_size=-2)
def test_dtype_policy_string(self):
config = GPTQConfig(
dataset=None, tokenizer=None, weight_bits=4, group_size=64
)
assert config.dtype_policy_string() == "gptq/4/64"
def test_gptq_config_serialization(self):
config = GPTQConfig(
dataset=None, tokenizer=None, weight_bits=4, group_size=64
)
serialized_config = config.get_config()
deserialized_config = GPTQConfig.from_config(serialized_config)
self.assertDictEqual(config.__dict__, deserialized_config.__dict__)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/gptq_config_test.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation_test.py | import pytest
import tensorflow as tf
from keras.src import backend
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes import (
validation,
)
from keras.src.testing import test_case
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The test targets TensorFlow-specific ragged tensors.",
)
class DensifyBoundingBoxesTest(test_case.TestCase):
def test_densify_ragged_bounding_boxes_batched(self):
ragged_boxes = tf.ragged.constant(
[
[[0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4]],
[[0.5, 0.5, 0.6, 0.6]],
],
dtype=tf.float32,
)
ragged_labels = tf.ragged.constant(
[
[0, 1],
[2],
],
dtype=tf.int32,
)
bounding_boxes = {"boxes": ragged_boxes, "labels": ragged_labels}
max_boxes = 3
densified_data = validation.densify_bounding_boxes(
bounding_boxes.copy(), is_batched=True, max_boxes=max_boxes
)
densified_boxes = densified_data["boxes"]
densified_labels = densified_data["labels"]
self.assertEqual(densified_boxes.shape, (2, max_boxes, 4))
self.assertEqual(densified_labels.shape, (2, max_boxes))
expected_boxes = [
[[0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4], [0.0, 0.0, 0.0, 0.0]],
[[0.5, 0.5, 0.6, 0.6], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
]
expected_labels = [
[0, 1, -1],
[2, -1, -1],
]
self.assertAllClose(densified_boxes, expected_boxes)
self.assertAllEqual(densified_labels, expected_labels)
def test_densify_ragged_bounding_boxes_unbatched(self):
ragged_boxes = tf.ragged.constant(
[[0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4]],
dtype=tf.float32,
)
ragged_labels = tf.ragged.constant([[0], [1]], dtype=tf.int32)
bounding_boxes = {"boxes": ragged_boxes, "labels": ragged_labels}
max_boxes = 4
densified_data = validation.densify_bounding_boxes(
bounding_boxes.copy(), is_batched=False, max_boxes=max_boxes
)
densified_boxes = densified_data["boxes"]
densified_labels = densified_data["labels"]
self.assertEqual(densified_boxes.shape, (max_boxes, 4))
self.assertEqual(densified_labels.shape, (max_boxes, 1))
expected_boxes = [
[0.1, 0.1, 0.2, 0.2],
[0.3, 0.3, 0.4, 0.4],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
expected_labels = [[0], [1], [-1], [-1]]
self.assertAllClose(densified_boxes, expected_boxes)
self.assertAllEqual(densified_labels, expected_labels)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation_test.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/quantizers/gptq.py | import types
from keras.src import ops
from keras.src import quantizers
from keras.src.layers import Dense
from keras.src.layers import EinsumDense
from keras.src.ops import linalg
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.quantizers import GPTQQuantizer
from keras.src.quantizers.quantizers import compute_quantization_parameters
from keras.src.quantizers.quantizers import dequantize_with_zero_point
from keras.src.quantizers.quantizers import quantize_with_zero_point
def _stable_permutation(metric):
"""Return a stable permutation that sorts `metric` in descending order.
Uses an index-based jitter to break ties deterministically."""
n = ops.shape(metric)[0]
idx = ops.arange(0, n, dtype="int32")
# tiny jitter = (idx / n) * 1e-12 so it never flips a real strict ordering
jitter = ops.divide(ops.cast(idx, "float32"), ops.cast(n, "float32"))
metric_jittered = ops.add(metric, ops.multiply(jitter, 1e-12))
# argsort by negative to get descending
return ops.argsort(ops.negative(metric_jittered))
def gptq_quantize_matrix(
weights_transpose,
inv_hessian,
*,
blocksize=128,
group_size=-1,
activation_order=False,
order_metric=None,
compute_scale_zero=compute_quantization_parameters,
):
"""
Implements the GPTQ error correction updates.
For a single column update (column j):
e = invH[j, j] * (w_j - q_j)
W[:, j+1:] -= e * invH[j, j+1:]
where:
- w_j is the original column,
- q_j is the quantized column,
- invH is the inverse Hessian,
- e is the propagated error term.
Across entire blocks:
W[:, future] -= E_block * invH[block, future]
where:
- E_block is the quantization error accumulated for the current block,
- invH[block, future] denotes the cross-block slice of the inverse Hessian,
- W[:, future] are the columns yet to be quantized.
Args:
weights_transpose: Transposed weight matrix [out_features, in_features]
to quantize.
inv_hessian: Inverse Hessian matrix [in_features, in_features] for
error propagation.
blocksize: Size of the blocks to process (default: 128).
group_size: Size of the groups for parameter reuse
(default: -1, no grouping).
activation_order: Whether to apply activation-order permutation
(default: False).
order_metric: Metric for ordering features
(default: None, uses 1 / diag(invH)).
compute_scale_zero: Function to compute scale and zero for
quantization.
Returns:
quantized_weights: Quantized weight matrix [out_features, in_features].
scale: float32. Scale parameters for quantization
[out_features, num_groups].
zero: Zero-point parameters for quantization [out_features, num_groups].
g_idx: int32. Group indices for each feature [in_features].
"""
in_features = ops.shape(weights_transpose)[1]
if activation_order:
# Use 1 / diag(inverse_hessian) as importance proxy by default.
if order_metric is None:
order_metric = ops.reciprocal(
ops.add(ops.diagonal(inv_hessian), 1e-12)
)
else:
# sanitize provided metric
order_metric = ops.cast(order_metric, "float32")
order_metric = ops.where(
ops.isfinite(order_metric),
order_metric,
ops.zeros_like(order_metric),
)
# Sort in descending order by importance
perm = _stable_permutation(order_metric)
inv_perm = ops.argsort(perm)
weights_transpose = ops.take(weights_transpose, perm, axis=1)
inv_hessian = ops.take(
ops.take(inv_hessian, perm, axis=0), perm, axis=1
)
else:
perm = inv_perm = None
# weights_buffer: [out_features, in_features]
weights_buffer = weights_transpose
# Buffer for the final quantized matrix: [out_features, in_features]
quantized_weights_buffer = ops.zeros_like(weights_transpose, dtype="int32")
scale_chunks = []
zero_chunks = []
# Compute effective group size
effective_group = in_features if group_size == -1 else group_size
# Process features in blocks
for block_start in range(0, in_features, blocksize):
block_end = min(block_start + blocksize, in_features)
block_size = block_end - block_start
# Block views
# block_weights: [out_features, block_size]
block_weights = weights_buffer[:, block_start:block_end]
# block_error: [out_features, block_size]
block_error = ops.zeros_like(block_weights)
# block_inv_hessian: [block_size, block_size]
block_inv_hessian = inv_hessian[
block_start:block_end, block_start:block_end
]
# Per-group cached params for reuse within the group
cached_scale = None
cached_zero = None
cached_maxq = None
cached_group_start = -1
for block_idx in range(block_size):
# Current global column index, represents the original column
# in the weight matrix
global_idx = block_start + block_idx
# weight_column: [out_features,]
weight_column = block_weights[:, block_idx]
# Group-wise parameter reuse (compute once per group)
if not effective_group == in_features: # group_size != -1
# Determine the group start index for the current column
group_start = (global_idx // effective_group) * effective_group
if group_start != cached_group_start:
# New group encountered, compute & cache params
# for this group
group_end = min(group_start + effective_group, in_features)
group_slice = weights_buffer[:, group_start:group_end]
cached_scale, cached_zero, cached_maxq = compute_scale_zero(
group_slice
)
# Store params once per group (in the order encountered).
scale_chunks.append(cached_scale)
zero_chunks.append(cached_zero)
cached_group_start = group_start
scale, zero, maxq = cached_scale, cached_zero, cached_maxq
else:
# Single global group covering all columns.
if cached_scale is None:
cached_scale, cached_zero, cached_maxq = compute_scale_zero(
weights_buffer
)
scale_chunks.append(cached_scale)
zero_chunks.append(cached_zero)
cached_group_start = 0
scale, zero, maxq = cached_scale, cached_zero, cached_maxq
# Quantize column and store it.
# quantized_column: [out_features, 1]
quantized_column = quantize_with_zero_point(
ops.expand_dims(weight_column, 1), scale, zero, maxq
)
# Store quantized column in the buffer.
quantized_weights_buffer = ops.slice_update(
quantized_weights_buffer,
(0, global_idx),
ops.cast(quantized_column, "int32"),
)
# Dequantize column to compute error.
# dequantized_col: [out_features,]
dequantized_col = dequantize_with_zero_point(
quantized_column, scale, zero
)[:, 0]
# Error feedback for remaining columns within the block
# block_inv_hessian_diag: scalar
current_block_influence = block_inv_hessian[block_idx, block_idx]
# We divide by current_block_influence to get the
# correct scaling of the error term.
err = ops.divide(
ops.subtract(weight_column, dequantized_col),
current_block_influence,
)
# Record error for propagation to future blocks
block_error = ops.slice_update(
block_error, (0, block_idx), ops.expand_dims(err, 1)
)
# Update remaining columns in the current block
# (those before the current column have already been quantized)
# Propagate error to remaining columns in the block.
if block_idx < block_size - 1:
# update: [out_features, block_size - block_idx - 1]
update = ops.matmul(
ops.expand_dims(err, 1),
ops.expand_dims(
block_inv_hessian[block_idx, block_idx + 1 :], 0
),
)
# tail is a view of the remaining columns in the block
# to be updated
# tail: [out_features, block_size - block_idx - 1]
tail = block_weights[:, block_idx + 1 :]
block_weights = ops.slice_update(
block_weights,
(0, block_idx + 1),
ops.subtract(tail, update),
)
# Propagate block errors to future features (beyond the block)
if block_end < in_features:
# Total update for all future columns, based on the
# accumulated error in this block. This is calculated
# as the matrix product of the block_error and the
# relevant slice of the inverse Hessian.
# total_update: [out_features, in_features - block_end]
total_update = ops.matmul(
block_error, inv_hessian[block_start:block_end, block_end:]
)
# Update the remaining weights in the buffer. This is done
# by subtracting the total_update from the remaining columns.
weights_buffer = ops.concatenate(
[
weights_buffer[:, :block_end],
ops.subtract(weights_buffer[:, block_end:], total_update),
],
axis=1,
)
# Build group indices for each (possibly permuted) column
# base_group = effective_group (int)
base_group = effective_group
# g_idx in permuted domain
g_idx = ops.arange(0, in_features, dtype="int32")
g_idx = ops.divide(g_idx, base_group)
g_idx = ops.cast(g_idx, "float32")
# Map group indices and quantized weights back to original column order
if activation_order:
g_idx = ops.take(g_idx, inv_perm, axis=0)
quantized_weights_buffer = ops.take(
quantized_weights_buffer, inv_perm, axis=1
)
# Concatenate recorded group params
if len(scale_chunks) == 0:
# Edge case: no groups recorded (empty input); fall back to whole matrix
s, z, _ = compute_scale_zero(weights_transpose)
scale = s
zero = z
else:
scale = ops.concatenate(scale_chunks, axis=1)
zero = ops.concatenate(zero_chunks, axis=1)
return quantized_weights_buffer, scale, zero, g_idx
class GPTQ:
def __init__(self, layer, config=GPTQConfig(tokenizer=None, dataset=None)):
self.original_layer = layer
self.num_samples = 0
self.config = config
self.quantizer = GPTQQuantizer(
config, compute_dtype=layer.variable_dtype
)
# Explicitly handle each supported layer type
if isinstance(layer, Dense) or (
isinstance(layer, EinsumDense) and layer.kernel.ndim == 2
):
# For a standard Dense layer, the dimensions are straightforward.
self.kernel_shape = layer.kernel.shape
# rows: [input_features]
self.rows = self.kernel_shape[0]
# columns: [output_features]
self.columns = self.kernel_shape[1]
self.layer = layer
# Handle 3D EinsumDense layers (typically from attention blocks).
elif isinstance(layer, EinsumDense) and layer.kernel.ndim == 3:
# For EinsumDense, we determine the effective 2D dimensions.
self.kernel_shape = layer.kernel.shape
shape = list(self.kernel_shape)
d_model_dim_index = shape.index(max(shape))
if d_model_dim_index == 0: # QKV projection case
in_features, heads, head_dim = shape
self.rows, self.columns = (
in_features,
ops.multiply(heads, head_dim),
)
elif d_model_dim_index in [1, 2]: # Attention Output case
heads, head_dim, out_features = shape
self.rows, self.columns = (
ops.multiply(heads, head_dim),
out_features,
)
# Create a temporary object that holds a reshaped
# 2D version of the kernel.
self.layer = types.SimpleNamespace(
kernel=ops.reshape(layer.kernel, (self.rows, self.columns)),
)
else:
# Raise an error if the layer is not supported.
raise TypeError(f"Unsupported layer type for GPTQ: {type(layer)}")
self.hessian = ops.zeros((self.rows, self.rows), dtype="float32")
def update_hessian_with_batch(self, input_batch):
"""
Updates the running average of the Hessian matrix with a new batch.
This method computes the Hessian matrix for a given batch of input
activations and updates the accumulated Hessian (`self.hessian`) using a
numerically stable running average. This allows the Hessian to be
computed over a large dataset without loading all samples into memory
at once.
The input tensor is first reshaped into a 2D matrix [num_samples,
num_features] before the Hessian is calculated.
Args:
input_batch: A 2D or higher-dimensional tensor of input activations
from a calibration batch.
Raises:
ValueError: If the feature dimension of the input tensor
`input_batch` does not match the dimensions of the
pre-initialized Hessian matrix `self.hessian`.
"""
if input_batch is None:
raise ValueError("Input tensor cannot be None.")
if len(input_batch.shape) < 2:
raise ValueError(
"Input tensor must have rank >= 2 "
f"(got rank {len(input_batch.shape)})."
)
if ops.size(input_batch) == 0:
raise ValueError("Input tensor cannot be empty.")
if len(input_batch.shape) > 2:
# [batch, features]
input_batch = ops.reshape(input_batch, (-1, input_batch.shape[-1]))
x = ops.cast(input_batch, "float32")
num_new_samples = ops.shape(x)[0]
num_prev_samples = self.num_samples
total_samples = ops.add(num_prev_samples, num_new_samples)
if ops.shape(self.hessian)[0] != ops.shape(x)[-1]:
raise ValueError(
f"Hessian dimensions ({ops.shape(self.hessian)[0]}) do not "
f"match input features ({ops.shape(x)[-1]})."
)
# gram_matrix: [features, features]
gram_matrix = ops.matmul(ops.transpose(x), x)
# Ensures numerical stability and symmetry in case of large floating
# point activations.
gram_matrix = ops.divide(
ops.add(gram_matrix, ops.transpose(gram_matrix)), 2.0
)
# Decay previous mean and add current per-sample contribution
# (factor 2/N)
if self.num_samples > 0:
self.hessian = ops.multiply(
self.hessian, ops.divide(num_prev_samples, total_samples)
)
self.hessian = ops.add(
self.hessian,
ops.multiply(ops.divide(2.0, total_samples), gram_matrix),
)
self.num_samples = self.num_samples + ops.shape(x)[0] or 0
def quantize_and_correct_layer(
self,
blocksize=128,
):
"""
Performs GPTQ quantization and correction on the layer's weights.
This method implements the core logic of the "Optimal Brain Quant"
(OBQ) method, as applied by GPTQ, to quantize the weights of a single
layer. It iteratively quantizes blocks of weights and corrects for the
quantization error by updating the remaining weights.
The algorithm follows these main steps:
1. Initialization: It optionally reorders the weight columns based
on activation magnitudes (`activation_order=True`) to protect more
salient
weights.
2. Hessian Modification: The Hessian matrix, pre-computed from
calibration data, is dampened to ensure its invertibility and
stability.
3. Iterative Quantization: The function iterates through the
weight columns in blocks (`blocksize`). In each iteration, it:
a. Quantizes one column.
b. Calculates the quantization error.
c. Updates the remaining weights in the *current* block by
distributing the error, using the inverse Hessian.
4. Block-wise Correction: After a block is quantized, the total
error from that block is propagated to the *next* block of weights
to be processed.
5. Finalization: The quantized weights are reordered back if
`activation_order` was used, and the layer's weights are updated.
This implementation is based on the official GPTQ paper and repository.
For more details, see:
- Paper: https://arxiv.org/abs/2210.17323
- Original Code: https://github.com/IST-DASLab/gptq
Args:
blocksize: (int, optional) The size of the weight block to process
at a time. Defaults to 128.
"""
weights_matrix = ops.transpose(self.layer.kernel)
# Dampen the Hessian for Stability
hessian_diagonal = ops.diagonal(self.hessian)
dead_diagonal = ops.equal(hessian_diagonal, 0.0)
hessian_diagonal = ops.where(dead_diagonal, 1.0, hessian_diagonal)
hessian_matrix = ops.add(
self.hessian,
ops.diag(
ops.where(dead_diagonal, 1.0, ops.zeros_like(hessian_diagonal))
),
)
# Add dampening factor to the Hessian diagonal
damping_factor = ops.multiply(
self.config.hessian_damping, ops.mean(hessian_diagonal)
)
hessian_diagonal = ops.add(hessian_diagonal, damping_factor)
hessian_matrix = ops.add(
ops.subtract(
hessian_matrix, ops.diag(ops.diagonal(hessian_matrix))
),
ops.diag(hessian_diagonal),
)
# Compute the inverse Hessian, which is used for error correction
inverse_hessian = linalg.inv(hessian_matrix)
quantized, scale, zero, g_idx = gptq_quantize_matrix(
weights_matrix,
inv_hessian=inverse_hessian,
blocksize=blocksize,
group_size=self.config.group_size,
activation_order=self.config.activation_order,
order_metric=ops.diagonal(hessian_matrix),
compute_scale_zero=self.quantizer.find_params,
)
quantized = ops.cast(
quantized, self.original_layer.quantized_kernel.dtype
)
if self.config.weight_bits == 4:
# For 4-bit weights, we need to pack them into bytes
quantized, _, _ = quantizers.pack_int4(
quantized, axis=0, dtype="uint8"
)
del self.original_layer._kernel
self.original_layer.quantized_kernel.assign(quantized)
self.original_layer.kernel_scale.assign(scale)
self.original_layer.kernel_zero.assign(zero)
self.original_layer.g_idx.assign(g_idx)
self.original_layer.is_gptq_calibrated = True
def free(self):
del self.hessian
del self.layer
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/gptq.py",
"license": "Apache License 2.0",
"lines": 432,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/quantizers/gptq_config.py | from keras.src.api_export import keras_export
from keras.src.quantizers.quantization_config import QuantizationConfig
@keras_export("keras.quantizers.GPTQConfig")
class GPTQConfig(QuantizationConfig):
"""Configuration class for the GPTQ (Gradient-based Post-Training
Quantization) algorithm.
GPTQ is a post-training quantization method that quantizes neural network
weights to lower precision (e.g., 4-bit) while minimizing the impact on
model accuracy. It works by analyzing the Hessian matrix of the loss
function with respect to the weights and applying optimal quantization
that preserves the most important weight values.
**When to use GPTQ:**
- You want to reduce model size and memory usage
- You need faster inference on hardware that supports low-precision
operations
- You want to maintain model accuracy as much as possible
- You have a pre-trained model that you want to quantize without
retraining
**How it works:**
1. Uses calibration data to compute the Hessian matrix for each layer
2. Applies iterative quantization with error correction
3. Reorders weights based on activation importance (optional)
4. Quantizes weights while minimizing quantization error
**Example usage:**
```python
from keras.quantizers import GPTQConfig
from keras import Model
# Create configuration for 4-bit quantization
config = GPTQConfig(
dataset=calibration_data, # Your calibration dataset
tokenizer=your_tokenizer, # Tokenizer for text data
weight_bits=4, # Quantize to 4 bits
num_samples=128, # Number of calibration samples
sequence_length=512, # Sequence length for each sample
hessian_damping=0.01, # Hessian stabilization factor
group_size=128, # Weight grouping for quantization
symmetric=False, # Use asymmetric quantization
activation_order=True # Reorder weights by importance
)
# Apply quantization to your model
model = Model(...) # Your pre-trained model
model.quantize("gptq", config=config)
# The model now has quantized weights and can be used for inference
```
**Benefits:**
- **Memory reduction**: 4-bit quantization reduces memory by ~8x compared
to float32
- **Faster inference**: Lower precision operations are faster on supported
hardware
- **Accuracy preservation**: Minimizes accuracy loss through optimal
quantization
- **No retraining required**: Works with pre-trained models
**Advanced usage examples:**
**Per-channel quantization (recommended for most cases):**
```python
config = GPTQConfig(
dataset=calibration_data,
tokenizer=tokenizer,
weight_bits=4,
group_size=-1, # -1 enables per-channel quantization
symmetric=False
)
```
**Grouped quantization (for specific hardware requirements):**
```python
config = GPTQConfig(
dataset=calibration_data,
tokenizer=tokenizer,
weight_bits=4,
group_size=64, # 64 weights share the same scale factor
symmetric=True # Use symmetric quantization
)
```
**High-accuracy quantization with activation ordering:**
```python
config = GPTQConfig(
dataset=calibration_data,
tokenizer=tokenizer,
weight_bits=4,
activation_order=True, # Reorder weights by importance
hessian_damping=0.005, # Lower damping for more precise
# quantization
num_samples=256 # More samples for better accuracy
)
```
**References:**
- Original GPTQ paper: "GPTQ: Accurate Post-Training Quantization
for Generative Pre-trained Transformers"
- Implementation based on: https://github.com/IST-DASLab/gptq
- Suitable for: Transformer models, large language models, and other
deep neural networks
**Note:** The quality of quantization depends heavily on the calibration
dataset. Use representative data that covers the expected input
distribution for best results.
Args:
dataset: The calibration dataset. It can be an iterable that yields
strings or pre-tokenized numerical tensors (e.g., a list of
strings, a generator, or a NumPy array). This data is used to
analyze the model's activations.
tokenizer: A `keras_nlp.Tokenizer` instance (or a similar callable)
that is used to process the `dataset` if it contains strings.
weight_bits: (int, optional) The number of bits to quantize weights to.
Defaults to 4.
num_samples: (int, optional) The number of calibration data samples to
use from the dataset. Defaults to 128.
sequence_length: (int, optional) The sequence length to use for each
calibration sample. Defaults to 512.
hessian_damping: (float, optional) The % of Hessian damping to use for
stabilization during inverse calculation. Defaults to 0.01.
group_size: (int, optional) The size of weight groups to quantize
together. A `group_size` of -1 indicates per-channel quantization.
Defaults to 128.
symmetric: (bool, optional) If `True`, uses symmetric quantization.
If `False`, uses asymmetric quantization. Defaults to `False`.
activation_order: (bool, optional) If `True`, reorders weight columns
based on activation magnitude, which can improve quantization
accuracy. Defaults to `False`.
quantization_layer_structure: (dict, optional) A dictionary defining the
model's quantization structure. It should contain:
- "pre_block_layers": list of layers to run before the first block.
- "sequential_blocks": list of blocks to be quantized sequentially.
If not provided, the model must implement
`get_quantization_layer_structure`.
"""
def __init__(
self,
dataset,
tokenizer,
*,
weight_bits: int = 4,
num_samples: int = 128,
per_channel: bool = True,
sequence_length: int = 512,
hessian_damping: float = 0.01,
group_size: int = 128,
symmetric: bool = False,
activation_order: bool = False,
quantization_layer_structure: dict = None,
):
super().__init__()
if weight_bits not in [2, 3, 4, 8]:
raise ValueError(
f"Unsupported weight_bits {weight_bits}. "
"Supported values are 2, 3, 4, and 8."
)
if num_samples <= 0:
raise ValueError("num_samples must be a positive integer.")
if sequence_length <= 0:
raise ValueError("sequence_length must be a positive integer.")
if hessian_damping < 0 or hessian_damping > 1:
raise ValueError("hessian_damping must be between 0 and 1.")
if group_size < -1 or group_size == 0:
raise ValueError(
"Invalid group_size. Supported values are -1 (whole-tensor) "
"or a positive integer, "
f"but got {group_size}."
)
self.dataset = dataset
self.tokenizer = tokenizer
self.num_samples = num_samples
self.per_channel = per_channel
self.sequence_length = sequence_length
self.hessian_damping = hessian_damping
self.weight_bits = weight_bits
self.group_size = group_size
self.symmetric = symmetric
self.activation_order = activation_order
self.quantization_layer_structure = quantization_layer_structure
def get_config(self):
return {
# Dataset and Tokenizer are only required for a one-time
# calibration and are not saved in the config.
"dataset": None,
"tokenizer": None,
"weight_bits": self.weight_bits,
"num_samples": self.num_samples,
"per_channel": self.per_channel,
"sequence_length": self.sequence_length,
"hessian_damping": self.hessian_damping,
"group_size": self.group_size,
"symmetric": self.symmetric,
"activation_order": self.activation_order,
"quantization_layer_structure": self.quantization_layer_structure,
}
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def mode(self):
return "gptq"
def dtype_policy_string(self):
"""Returns the dtype policy string for this configuration.
Returns:
A string representing the dtype policy, e.g. "gptq_4bit".
"""
return f"gptq/{self.weight_bits}/{self.group_size}"
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/gptq_config.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/quantizers/gptq_core.py | import math
from contextlib import contextmanager
import numpy as np
from absl import logging
from keras.src import ops
from keras.src import utils as keras_utils
from keras.src.dtype_policies.dtype_policy import GPTQDTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
from keras.src.layers import Dense
from keras.src.layers import EinsumDense
from keras.src.quantizers.gptq import GPTQ
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.utils import should_quantize_layer
@contextmanager
def stream_hessians(layers_map, gptq_objects):
"""
Temporarily monkey-patch each target layer's `call` method so
that input activations are streamed into the GPTQ instance
running Hessian estimate at capture time.
On `__enter__`: For every (name, layer) in `layers_map`, replaces
`layer.call` with a wrapper that:
1) extracts the layer input from `*args`/`**kwargs`,
2) reshapes it to 2D `[-1, rows]` where
`rows = gptq_objects[name].rows`,
3) calls `gptq_objects[name].update_hessian_with_batch(x2d)`
4) delegates to the original `layer.call` and returns its
output.
On `__exit__`: All original `layer.call` methods are restored even if an
exception occurs.
* Space complexity: O(d**2) per layer (for the Hessian).
* No weights are modified; only GPTQ statistics are updated.
Args:
layers_map: Dict[str, Layer]. Mapping from logical layer names to
the Keras layers that should be patched during calibration. Keys must
match `gptq_objects`.
gptq_objects: Dict[str, GPTQ]. Mapping from names to GPTQ instances.
Yields:
None: The patched state is active only within the `with` block. After
exit, all layers are unpatched and safe to use normally.
Example:
```python
>>> with stream_hessians(layers_map, gptq_objects):
... for sample in calibration_inputs:
... if len(sample.shape) == 2:
... sample = ops.expand_dims(sample, 0)
... _ = block(sample) # hooks update Hessians on-the-fly
>>> # <- original layer.call methods restored here
```
"""
original_calls = {}
def create_hook(name, original_call_func):
def hook(*args, **kwargs):
inp = args[0] if args else kwargs["inputs"]
# Explicitly reshape the input tensor to be 2D, with the
# second dimension matching the number of input features
# expected by the layer's kernel.
# This correctly handles inputs of any dimensionality
# (e.g., 3D or 4D).
num_features = gptq_objects[name].rows
input_2d = ops.reshape(inp, (-1, num_features))
gptq_objects[name].update_hessian_with_batch(input_2d)
return original_call_func(*args, **kwargs)
return hook
try:
for name, layer in layers_map.items():
original_calls[name] = layer.call
layer.call = create_hook(name, layer.call)
yield
finally:
for name, layer in layers_map.items():
layer.call = original_calls[name]
def get_dataloader(
tokenizer,
sequence_length,
dataset,
num_samples=128,
*,
strategy="strided",
seed=42,
stride=None,
eos_id=None,
):
"""
Prepares and chunks the calibration dataloader, repeating short datasets.
All processing happens on the CPU.
Args:
tokenizer: The tokenizer to use for text splitting.
sequence_length: The length of each input sequence.
dataset: The dataset to sample from.
num_samples: The number of samples to generate.
strategy: The sampling strategy to use. Possible values are
1. "strided": Samples are taken at regular intervals.
2. "linspace": Samples are taken at evenly spaced intervals.
3. "random": Samples are taken at random positions.
seed: The random seed for reproducibility. Used only if
strategy="random"
stride: The stride length for "strided" sampling.
eos_id: The end-of-sequence token ID.
Returns:
np.ndarray of shape (num_samples, 1, sequence_length), dtype int32.
"""
if not hasattr(dataset, "__iter__") or isinstance(dataset, (str, bytes)):
raise TypeError(
"The `dataset` argument must be an iterable (e.g., a list of "
"strings, a generator, or a NumPy array). Got type: "
f"{type(dataset).__name__}. Please pass the loaded dataset "
"directly."
)
dataset_list = list(dataset)
if not dataset_list:
raise ValueError("Provided dataset is empty.")
pieces = []
if isinstance(dataset_list[0], str):
for i, s in enumerate(dataset_list):
toks = ops.convert_to_numpy(tokenizer.tokenize(s)).reshape(-1)
pieces.append(toks)
# avoid windows that span document boundaries
if eos_id is not None and i < len(dataset_list) - 1:
pieces.append(np.array([eos_id], dtype=np.int32))
else:
for s in dataset_list:
toks = ops.convert_to_numpy(s).reshape(-1)
pieces.append(toks.astype(np.int32, copy=False))
all_tokens = (
pieces[0].astype(np.int32, copy=False)
if len(pieces) == 1
else np.concatenate(pieces, axis=0).astype(np.int32, copy=False)
)
required_tokens = num_samples * sequence_length
if all_tokens.size < required_tokens:
repeats = math.ceil(required_tokens / max(1, all_tokens.size))
all_tokens = np.tile(all_tokens, repeats)
max_start = all_tokens.size - sequence_length
if max_start < 0:
raise ValueError(
f"Not enough tokens to form one sample of length {sequence_length} "
f"(have {all_tokens.size})."
)
# Choose deterministic, well-spread starts by default
if strategy == "random":
rng = np.random.default_rng(seed)
starts = rng.integers(
0, max_start + 1, size=num_samples, dtype=np.int64
)
elif strategy == "linspace":
# even coverage with no RNG
starts = np.linspace(0, max_start, num_samples, dtype=np.int64)
elif strategy == "strided":
# stride chosen to cover the space roughly uniformly
if stride is None:
stride = max(1, (max_start + 1) // num_samples)
# offset derived deterministically from seed
offset = (
(abs(hash(("gptq-calib", seed))) % (max_start + 1))
if max_start > 0
else 0
)
starts = (offset + np.arange(num_samples, dtype=np.int64) * stride) % (
max_start + 1
)
else:
raise ValueError(f"Unknown strategy: {strategy}")
# Gather contiguous windows
# sliding_window_view avoids building a big index matrix
windows = np.lib.stride_tricks.sliding_window_view(
all_tokens, sequence_length
)
samples = windows[starts] # (num_samples, sequence_length)
return samples.astype(np.int32)[:, None, :]
def find_layers_in_block(block):
"""
Finds all Dense and EinsumDense layers in a transformer block.
Args:
block: A Keras layer representing a transformer block.
Returns:
A dict mapping layer paths to the corresponding Dense or EinsumDense
"""
found_layers = {}
for sub_layer in block._flatten_layers():
if len(list(sub_layer._flatten_layers())) == 1:
if isinstance(sub_layer, (Dense, EinsumDense)):
found_layers[sub_layer.path] = sub_layer
return found_layers
def apply_gptq_layerwise(dataloader, config, structure, filters=None):
"""Applies GPTQ quantization layer-by-layer to a Keras model.
This function uses the provided `structure` to identify pre-quantization
layers and sequential blocks.
The core logic operates as follows:
1. It processes the model sequentially, one block at a time. For each
block, it uses temporary hooks to capture the input activations of
each target layer during a forward pass with the calibration data.
2. These captured activations are used to compute the Hessian matrix for
each layer's weights.
3. The GPTQ algorithm is then applied to each layer to find the optimal
quantized weights that minimize the error introduced.
4. The output activations from the current block are then used as the
input for the next block, ensuring that quantization errors are
accounted for throughout the model.
Args:
dataloader: An iterable providing calibration data.
config: A GPTQConfiguration object.
structure: A dictionary with keys "pre_block_layers" and
"sequential_blocks".
filters: Optional filters to exclude layers from quantization.
Raises:
ValueError: If the function cannot automatically find an embedding
layer or any transformer-like blocks to quantize within the model.
"""
num_samples = config.num_samples
logging.info("Starting model quantization...")
pre_layers = structure.get("pre_block_layers", [])
transformer_blocks = structure.get("sequential_blocks", [])
if not transformer_blocks:
raise ValueError(
"No sequential blocks found in the provided structure to quantize."
)
# Initial inputs are the outputs of the pre-block layers
inputs = []
for batch in dataloader:
batch = ops.convert_to_tensor(batch, dtype="int32")
for layer in pre_layers:
batch = layer(batch)
inputs.append(batch)
num_samples = min(num_samples, len(inputs))
progbar = keras_utils.Progbar(target=len(transformer_blocks))
for block_idx, block in enumerate(transformer_blocks):
logging.info(f"Quantizing Block {block_idx}")
sub_layers_map = find_layers_in_block(block)
# Filter out layers that are not quantized with GPTQ
final_sub_layers_map = {}
for name, layer in sub_layers_map.items():
if not should_quantize_layer(layer, filters):
continue
final_sub_layers_map[name] = layer
sub_layers_map = final_sub_layers_map
if not sub_layers_map:
logging.info(
f" No quantizable layers found in block {block_idx}. Skipping."
)
else:
logging.info(f"Found layers: {list(sub_layers_map.keys())}")
gptq_objects = {
name: GPTQ(layer, config)
for name, layer in sub_layers_map.items()
}
with stream_hessians(sub_layers_map, gptq_objects):
for sample_idx in range(num_samples):
current_input = inputs[sample_idx]
if len(current_input.shape) == 2:
current_input = ops.expand_dims(current_input, axis=0)
_ = block(current_input)
for name, gptq_object in gptq_objects.items():
logging.info(f"Quantizing {name}...")
gptq_object.quantize_and_correct_layer()
gptq_object.free()
del gptq_objects
if block_idx < len(transformer_blocks) - 1:
logging.info(f"Generating inputs for block {block_idx + 1}...")
next_block_inputs = []
for sample_idx in range(num_samples):
current_input = inputs[sample_idx]
if len(current_input.shape) == 2:
current_input = ops.expand_dims(current_input, axis=0)
output = block(current_input)[0]
next_block_inputs.append(output)
inputs = next_block_inputs
progbar.update(current=block_idx + 1)
logging.info("Quantization process complete.")
def gptq_quantize(config, quantization_layer_structure, filters=None):
"""
Quantizes the model using GPTQ.
Args:
config: The GPTQ configuration.
quantization_layer_structure: A dictionary describing the model's layer
structure for quantization.
filters: Optional filters to exclude layers from quantization.
"""
if config.dataset is None or config.tokenizer is None:
raise ValueError(
"GPTQ quantization requires a dataset and a tokenizer. "
"Please provide them in the `GPTQConfig`."
)
if quantization_layer_structure is None:
raise ValueError(
"For 'gptq' mode, a valid quantization structure must be provided "
"either via `config.quantization_layer_structure` or by overriding "
"`model.get_quantization_layer_structure(mode)`. The structure "
"should be a dictionary with keys 'pre_block_layers' and "
"'sequential_blocks'."
)
# Load all data needed from the generator/source in a single call.
total_samples_to_request = config.num_samples
dataloader = get_dataloader(
config.tokenizer,
config.sequence_length,
config.dataset,
num_samples=total_samples_to_request,
)
# Split the materialized data. This works because dataloader
# is now a NumPy array, which can be sliced and reused.
calibration_dataloader = dataloader[: config.num_samples]
apply_gptq_layerwise(
calibration_dataloader,
config,
quantization_layer_structure,
filters=filters,
)
def get_group_size_for_layer(layer, config):
"""Determine the group size for GPTQ quantization.
The group size can be specified either through the `config` argument
or through the `dtype_policy` if it is of type `GPTQDTypePolicy`.
The config argument is usually available when quantizing the layer
via the `quantize` method. If the layer was deserialized from a
saved model, the group size should be specified in the `dtype_policy`.
Args:
config: An optional configuration object that may contain the
`group_size` attribute.
Returns:
int. The determined group size for GPTQ quantization.
Raises:
ValueError: If the group size is not specified in either the
`config` or the `dtype_policy`.
"""
if config and isinstance(config, GPTQConfig):
return config.group_size
elif isinstance(layer.dtype_policy, GPTQDTypePolicy):
return layer.dtype_policy.group_size
elif isinstance(layer.dtype_policy, DTypePolicyMap):
policy = layer.dtype_policy[layer.path]
if not isinstance(policy, GPTQDTypePolicy):
# This should never happen based on how we set the
# quantization mode, but we check just in case.
raise ValueError(
"Expected a `dtype_policy` of type `GPTQDTypePolicy`."
f"Got: {type(policy)}"
)
return policy.group_size
else:
raise ValueError(
"For GPTQ quantization, the group_size must be specified"
"either through a `dtype_policy` of type "
"`GPTQDTypePolicy` or the `config` argument."
)
def get_weight_bits_for_layer(layer, config):
"""Determine the number of weight bits for GPTQ quantization.
The number of weight bits can be specified either through the `config`
argument or through the `dtype_policy` if it is of type
`GPTQDTypePolicy`.
The config argument is usually available when quantizing the layer
via the `quantize` method. If the layer was deserialized from a
saved model, the weight bits should be specified in the `dtype_policy`.
Args:
config: An optional configuration object that may contain the
`weight_bits` attribute.
Returns:
int. The determined number of weight bits for GPTQ quantization.
Raises:
ValueError: If the weight bits is not specified in either the
`config` or the `dtype_policy`.
"""
if config and isinstance(config, GPTQConfig):
return config.weight_bits
elif isinstance(layer.dtype_policy, GPTQDTypePolicy):
return layer.dtype_policy.weight_bits
elif isinstance(layer.dtype_policy, DTypePolicyMap):
policy = layer.dtype_policy[layer.path]
if not isinstance(policy, GPTQDTypePolicy):
# This should never happen based on how we set the
# quantization mode, but we check just in case.
raise ValueError(
"Expected a `dtype_policy` of type `GPTQDTypePolicy`."
f"Got: {type(policy)}"
)
return policy.weight_bits
else:
raise ValueError(
"For GPTQ quantization, the weight_bits must be specified"
"either through a `dtype_policy` of type "
"`GPTQDTypePolicy` or the `config` argument."
)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/gptq_core.py",
"license": "Apache License 2.0",
"lines": 380,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/quantizers/gptq_core_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.gptq_core import get_dataloader
from keras.src.quantizers.gptq_core import gptq_quantize
VOCAB_SIZE = 100
class MockTokenizer:
"""A mock tokenizer that mimics the real API for testing."""
def tokenize(self, text):
return [ord(c) % VOCAB_SIZE for c in "".join(text)]
def __call__(self, text):
return self.tokenize(text)
class EmptyBlock(layers.Layer):
"""A block that contains no quantizable layers."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ln = layers.LayerNormalization()
def call(self, inputs):
return self.ln(inputs)
class TransformerBlock(layers.Layer):
"""A toy transformer block with a quantizable Dense layer."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense = layers.Dense(128)
def call(self, inputs):
return self.dense(inputs)
def _get_model_with_backbone(
has_transformer_layers=True, embedding_name="embedding"
):
"""Creates a KerasHub-style model with a backbone."""
class Backbone(layers.Layer):
def __init__(self, vocab_size, embedding_dim=128, **kwargs):
super().__init__(**kwargs)
# Use direct assignment
setattr(
self,
embedding_name,
layers.Embedding(vocab_size, embedding_dim),
)
# Keep track of layers in a list for the call method
self.transformer_layers = []
if has_transformer_layers:
self.transformer_layers.append(TransformerBlock())
def call(self, inputs):
x = getattr(self, embedding_name)(inputs)
for layer in self.transformer_layers:
x = layer(x)
return x
class Model(models.Model):
def __init__(self, vocab_size, **kwargs):
super().__init__(**kwargs)
# Pass configuration directly
self.backbone = Backbone(vocab_size=vocab_size)
self.classifier = layers.Dense(1, activation="sigmoid")
def call(self, inputs):
x = self.backbone(inputs)
x = layers.GlobalAveragePooling1D()(x)
return self.classifier(x)
model = Model(vocab_size=VOCAB_SIZE)
rng = np.random.default_rng(seed=42)
dummy_input = rng.normal(loc=0, scale=1, size=(2, 64)).astype(np.float32)
_ = model(dummy_input)
return model
def build_all_tokens_strings(dataset, tokenizer, eos_id=None):
pieces = []
for i, s in enumerate(dataset):
toks = np.asarray(tokenizer.tokenize(s), dtype=np.int32).reshape(-1)
pieces.append(toks)
if eos_id is not None and i < len(dataset) - 1:
pieces.append(np.array([eos_id], dtype=np.int32))
return np.concatenate(pieces, axis=0).astype(np.int32, copy=False)
def sliding_windows(x, L):
return np.lib.stride_tricks.sliding_window_view(x, L)
@pytest.mark.requires_trainable_backend
class TestGPTQCore(testing.TestCase):
@parameterized.named_parameters(
[("strided", "strided"), ("linspace", "linspace"), ("random", "random")]
)
def test_shape_and_dtype_strings(self, strategy):
"""Test the shape and dtype of the output for string inputs."""
tok = MockTokenizer()
dataset = ["a b c d e f g", "h i j k"]
seq_len, n = 5, 7
out = get_dataloader(
tok, seq_len, dataset, num_samples=n, strategy=strategy, seed=123
)
self.assertEqual(out.shape, (n, 1, seq_len))
self.assertEqual(out.dtype, np.int32)
@parameterized.named_parameters(
[("strided", "strided"), ("linspace", "linspace"), ("random", "random")]
)
def test_shape_and_dtype_pretokenized(self, strategy):
"""Test the shape and dtype of the output for pre-tokenized inputs."""
tok = MockTokenizer()
# Pre-tokenized inputs; mixed shapes (1, L) and (L,)
seqs = [
np.array([[1, 2, 3, 4]], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
]
tok = MockTokenizer()
seq_len, n = 3, 4
out = get_dataloader(
tok, seq_len, seqs, num_samples=n, strategy=strategy, seed=7
)
self.assertEqual(out.shape, (n, 1, seq_len))
self.assertEqual(out.dtype, np.int32)
def test_strided_is_deterministic_for_same_args(self):
tok = MockTokenizer()
dataset = ["a b c d e", "f g h i j k"]
out1 = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="strided", seed=99
)
out2 = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="strided", seed=99
)
self.assertTrue(ops.all(ops.equal(out1, out2)))
def test_random_reproducibility_by_seed(self):
tok = MockTokenizer()
dataset = ["a b c d e", "f g h i j k"]
a = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="random", seed=123
)
b = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="random", seed=123
)
c = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="random", seed=124
)
self.assertTrue(ops.all(ops.equal(a, b)))
self.assertFalse(ops.all(ops.equal(a, c)))
def test_linspace_windows_match_expected(self):
tok = MockTokenizer()
dataset = ["aa bb cc dd", "ee ff gg"]
seq_len, n = 3, 5
eos_id = None
all_tokens = build_all_tokens_strings(dataset, tok, eos_id=eos_id)
max_start = all_tokens.size - seq_len
expected_starts = np.linspace(0, max_start, n, dtype=np.int64)
expected = sliding_windows(all_tokens, seq_len)[expected_starts]
got = get_dataloader(
tok, seq_len, dataset, num_samples=n, strategy="linspace"
)
self.assertTrue(
ops.all(ops.equal(got[:, 0, :], expected.astype(np.int32)))
)
def test_strided_override_respected(self):
"""Tests that strided windows are disjoint and cover the input."""
tok = MockTokenizer()
# 20 tokens total
# with seq_len=4 and stride=4, we expect disjoint chunks
# in order (modulo offset)
dataset = [" ".join([f"t{i}" for i in range(20)])]
seq_len, n, stride = 4, 5, 4
out = get_dataloader(
tok,
seq_len,
dataset,
num_samples=n,
strategy="strided",
stride=stride,
seed=0,
)
# Validate that each sample is a contiguous run
# of length seq_len from the flattened stream
flat = build_all_tokens_strings(dataset, tok)
for s in out[:, 0, :]:
# Each window should appear as a slice in the flat stream
# (This is a soft check; exact start positions depend on offset.)
joined = " ".join(map(str, s.tolist()))
self.assertIn(joined, " ".join(map(str, flat.tolist())))
def test_eos_insertion_is_present_in_some_window_with_linspace(self):
tok = MockTokenizer()
dataset = ["aa aa", "bb bb"] # len = 5 + 1(EOS) + 5 = 11
eos = 9999
seq_len = 3
n = 3
out = get_dataloader(
tok,
seq_len,
dataset,
num_samples=n,
strategy="linspace",
eos_id=eos,
)
# linspace starts -> [0, 4, 8]; the middle window [4:7]
# includes EOS at 5
windows = out[:, 0, :]
self.assertTrue(
np.any(np.any(windows == eos, axis=1)),
"Expected EOS to appear in at least one sampled window with "
"linspace.",
)
def test_get_dataloader_error_scenarios(self):
"""Tests error cases for get_dataloader."""
with pytest.raises(ValueError, match="Provided dataset is empty"):
get_dataloader(
tokenizer=MockTokenizer(),
sequence_length=10,
dataset=[],
num_samples=10,
)
with self.assertRaisesRegex(
TypeError,
"The `dataset` argument must be an iterable.*Got type: str.*"
"Please pass the loaded dataset directly.",
):
get_dataloader(
tokenizer=MockTokenizer(),
sequence_length=10,
dataset="wikitext2",
num_samples=10,
)
def test_apply_gptq_on_multi_block_model(self):
"""Tests quantization on a model with multiple blocks."""
model = models.Sequential(
[
layers.Embedding(VOCAB_SIZE, 128),
TransformerBlock(),
TransformerBlock(),
]
)
model.build(input_shape=(None, 10))
layer_structure = {
"pre_block_layers": [model.layers[0]],
"sequential_blocks": [model.layers[1], model.layers[2]],
}
config = GPTQConfig(
dataset=["test data"],
tokenizer=MockTokenizer(),
group_size=32,
quantization_layer_structure=layer_structure,
)
model.quantize("gptq", config=config)
@parameterized.named_parameters(
(
"no_embedding_layer",
models.Sequential([layers.Dense(10)]),
"For 'gptq' mode, a valid quantization structure must be provided",
),
(
"no_transformer_blocks",
models.Sequential(
[layers.Embedding(VOCAB_SIZE, 10), layers.Dense(10)]
),
"For 'gptq' mode, a valid quantization structure must be provided",
),
(
"backbone_no_layers",
_get_model_with_backbone(has_transformer_layers=False),
"For 'gptq' mode, a valid quantization structure must be provided",
),
(
"backbone_no_embedding",
_get_model_with_backbone(embedding_name="wrong_name"),
"For 'gptq' mode, a valid quantization structure must be provided",
),
)
def test_apply_gptq_with_unsupported_architectures(
self, model, error_message
):
"""Tests that quantize fails correctly for various unsupported
model architectures."""
if not model.built:
model.build(input_shape=(None, 10))
config = GPTQConfig(dataset=["test"], tokenizer=MockTokenizer())
with self.assertRaisesRegex(ValueError, error_message):
# We pass None as structure to trigger the error
gptq_quantize(config, quantization_layer_structure=None)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/gptq_core_test.py",
"license": "Apache License 2.0",
"lines": 273,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/quantizers/gptq_test.py | from collections.abc import Callable
import numpy as np
import pytest
from absl.testing import parameterized
import keras
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.quantizers.gptq import GPTQ
from keras.src.quantizers.gptq import _stable_permutation
from keras.src.quantizers.gptq import gptq_quantize_matrix
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.quantization_config import QuantizationConfig
from keras.src.quantizers.quantizers import dequantize_with_sz_map
from keras.src.quantizers.quantizers import dequantize_with_zero_point
from keras.src.quantizers.quantizers import quantize_with_zero_point
from keras.src.testing.test_utils import named_product
VOCAB_SIZE = 1000
SEQ_LEN = 128
NUM_SAMPLES = 16
W_BITS = 4
NUM_CLASSES = 32
CALIBRATION_TEXT = r"""
GPTQ (Generative Pre-trained Transformer Quantization) is an advanced
post-training quantization (PTQ) algorithm designed to compress large
language models with minimal accuracy degradation. It addresses the
challenge of reducing model size from high-precision formats like
FP16 to low-bit integers (e.g., INT4, INT3) without the need for
expensive retraining. The algorithm operates on a layer-by-layer basis,
treating the quantization of each weight matrix $W$ as a
reconstruction problem. Its objective is to find a quantized weight
matrix $\hat{W}$ that minimizes the mean squared error of the layer's
output, formulated as $\arg\min_{\hat{W}} \|WX - \hat{W}X\|_F^2$,
where $X$ is a set of calibration inputs. GPTQ's primary innovation
is its greedy, error-compensating quantization process, based on the
Optimal Brain Quantizer (OBQ) framework. It quantizes weights one by
one (or in small groups). After quantizing a single weight $w_q$ to
its discrete value $\hat{w}_q$, it introduces a quantization error of
$\delta = w_q - \hat{w}_q$. This error is then immediately compensated
for by updating all remaining, unquantized weights in the layer.
The update step is guided by second-order information, specifically
the inverse of the Hessian matrix ($\mathbf{H}^{-1}$) of the layer's
reconstruction loss. This inverse Hessian provides a measure of weight
saliency and inter-dependencies. The update applied to the remaining
weights is calculated based on $\delta$ and the corresponding entries
in $\mathbf{H}^{-1}$, effectively propagating the error to less
sensitive weights. This sequential compensation minimizes the
cumulative error across the entire layer, allowing GPTQ to maintain
high model fidelity, as measured by perplexity, even at aggressive
bit-rates.
"""
def _get_test_layer(layer_type, kernel_shape):
if layer_type == "Dense":
layer = layers.Dense(units=kernel_shape[1])
layer.build(input_shape=(None, kernel_shape[0]))
elif layer_type == "EinsumDense":
output_shape = (kernel_shape[1], kernel_shape[2])
layer = layers.EinsumDense(
equation="...h,hio->...io", output_shape=output_shape
)
layer.build(input_shape=(None, kernel_shape[0]))
else:
layer = layers.Layer()
return layer
@pytest.mark.requires_trainable_backend
class GPTQTest(testing.TestCase):
def test_initialization_with_dense_layer(self):
mock_layer = _get_test_layer("Dense", kernel_shape=(64, 128))
gptq_instance = GPTQ(mock_layer)
self.assertEqual(gptq_instance.rows, 64)
self.assertEqual(gptq_instance.columns, 128)
self.assertEqual(gptq_instance.hessian.shape, (64, 64))
def test_initialization_with_einsumdense_3d(self):
mock_layer = _get_test_layer("EinsumDense", kernel_shape=(64, 4, 32))
gptq_instance = GPTQ(mock_layer)
self.assertEqual(gptq_instance.rows, 64)
self.assertEqual(gptq_instance.columns, 4 * 32)
self.assertEqual(gptq_instance.hessian.shape, (64, 64))
def test_update_hessian(self):
dense = _get_test_layer("Dense", kernel_shape=(16, 32))
dense_gptq = GPTQ(dense)
rng = np.random.default_rng(seed=42)
batch1 = rng.standard_normal(size=(8, 16)).astype("float32")
dense_gptq.update_hessian_with_batch(batch1)
self.assertEqual(dense_gptq.num_samples, 8)
H1 = dense_gptq.hessian
batch2 = rng.standard_normal(size=(4, 16)).astype("float32")
dense_gptq.update_hessian_with_batch(batch2)
self.assertEqual(dense_gptq.num_samples, 12)
H2 = dense_gptq.hessian
self.assertNotAllClose(H1, H2)
def test_gptq_on_single_layer(self):
rng = np.random.default_rng(seed=42)
dense = _get_test_layer("Dense", kernel_shape=(16, 32))
config = GPTQConfig(
dataset=None,
tokenizer=None,
weight_bits=4,
symmetric=False,
group_size=-1,
)
dense.quantize("gptq", config=config)
dense_gptq = GPTQ(
dense,
config,
)
calibration_data = rng.standard_normal(size=(128, 16)).astype("float32")
dense_gptq.update_hessian_with_batch(calibration_data)
dense_gptq.quantize_and_correct_layer()
self.assertEqual(backend.standardize_dtype(dense.kernel.dtype), "uint8")
dense_gptq.free()
self.assertIsNone(getattr(dense_gptq, "hessian", None))
self.assertIsNone(getattr(dense_gptq, "layer", None))
def test_unsupported_layer_error(self):
unsupported_layer = _get_test_layer("Unsupported", kernel_shape=None)
with self.assertRaisesRegex(TypeError, "Unsupported layer type"):
GPTQ(unsupported_layer)
def test_update_hessian_invalid_input(self):
rng = np.random.default_rng(seed=42)
dense = _get_test_layer("Dense", kernel_shape=(16, 32))
gptq_instance = GPTQ(dense)
with self.assertRaisesRegex(ValueError, "cannot be None"):
gptq_instance.update_hessian_with_batch(None)
with self.assertRaisesRegex(ValueError, "cannot be empty"):
gptq_instance.update_hessian_with_batch(np.empty((0, 16)))
with self.assertRaisesRegex(ValueError, "match input features"):
bad_input = rng.standard_normal(size=(8, 99))
gptq_instance.update_hessian_with_batch(bad_input)
def test_streaming_equals_big_batch(self):
"""Tests that streaming updates match big batch updates."""
# dummy inputs
x = ops.array(np.random.randn(100, 7), "float32")
# One-shot hessian update
layer_1 = layers.Dense(5, use_bias=False)
layer_1.build(input_shape=(None, 7))
g1 = GPTQ(layer_1)
g1.update_hessian_with_batch(x)
# Streamed hessian update
layer_2 = layers.Dense(5, use_bias=False)
layer_2.build(input_shape=(None, 7))
g2 = GPTQ(layer_2)
g2.update_hessian_with_batch(x[:50])
g2.update_hessian_with_batch(x[50:])
# Both the one-shot and streamed hessian updates should match
self.assertAllClose(g1.hessian, g2.hessian, rtol=1e-6, atol=1e-6)
def test_hessian_matches_closed_form(self):
"""Tests that the Hessian matches the closed-form solution."""
x = ops.array(np.random.randn(128, 7), "float32")
layer = layers.Dense(5, use_bias=False)
layer.build((None, 7))
g = GPTQ(layer)
g.update_hessian_with_batch(x)
expected = ops.multiply(
ops.divide(2.0, x.shape[0]), ops.matmul(ops.transpose(x), x)
)
self.assertAllClose(g.hessian, expected, rtol=1e-6, atol=1e-6)
def test_higher_rank_inputs_are_reshaped(self):
"""Tests that higher-rank inputs are reshaped correctly."""
# x: [batch, time, feat]
x = ops.array(np.random.randn(10, 4, 7), "float32")
x_flat = ops.reshape(x, (-1, ops.shape(x)[-1]))
layer1 = layers.Dense(5, use_bias=False)
layer1.build((None, 7))
g1 = GPTQ(layer1)
g1.update_hessian_with_batch(x)
layer2 = layers.Dense(5, use_bias=False)
layer2.build((None, 7))
g2 = GPTQ(layer2)
g2.update_hessian_with_batch(x_flat)
self.assertAllClose(g1.hessian, g2.hessian, rtol=1e-6, atol=1e-6)
def test_raises_on_feature_mismatch(self):
x = ops.array(np.random.randn(8, 7), "float32")
layer = layers.Dense(5, use_bias=False)
layer.build((None, 6)) # wrong in_features
g = GPTQ(layer)
with self.assertRaisesRegex(ValueError, "do not match input features"):
g.update_hessian_with_batch(x)
with self.assertRaisesRegex(ValueError, "cannot be None"):
g.update_hessian_with_batch(None)
with self.assertRaisesRegex(ValueError, "cannot be empty"):
g.update_hessian_with_batch(
ops.array(np.empty((0, 7), dtype="float32"))
)
def test_num_samples_accumulates_correctly(self):
"""Tests that the number of samples is accumulated correctly when
streaming updates are used."""
x = ops.array(np.random.randn(64, 7), "float32")
layer = layers.Dense(5, use_bias=False)
layer.build((None, 7))
g = GPTQ(layer)
g.update_hessian_with_batch(x[:5])
g.update_hessian_with_batch(x[5:30])
g.update_hessian_with_batch(x[30:])
self.assertEqual(g.num_samples, 64)
def test_numeric_stability_large_values(self):
"""Tests numeric stability of hessian update with large input values."""
x = ops.multiply(ops.array(np.random.randn(32, 7), "float32"), 1e6)
layer = layers.Dense(5, use_bias=False)
layer.build((None, 7))
g = GPTQ(layer)
g.update_hessian_with_batch(x)
# Should be finite and symmetric
self.assertTrue(ops.all(ops.isfinite(g.hessian)))
self.assertTrue(ops.all(ops.equal(g.hessian, ops.transpose(g.hessian))))
def test_einsumdense_2d_kernel_hessian_shape(self):
x = layers.Input((7,))
y = layers.EinsumDense("ab,bc->ac", output_shape=(5,))(x)
model = keras.Model(x, y)
einsum_dense_layer = next(
l for l in model.layers if isinstance(l, layers.EinsumDense)
)
g = GPTQ(einsum_dense_layer)
# should infer rows==7
self.assertEqual(ops.shape(g.hessian), (7, 7))
def test_einsumdense_3d_kernel_streaming_equals_big_batch(self):
"""Tests that streaming updates to the Hessian are equivalent to a big
batch update."""
# Construct a tiny attention-like einsum with 3D kernel
x = layers.Input((7,))
qkv = layers.EinsumDense("bf,fhk->bhk", output_shape=(2, 3))(
x
) # heads=2, head_dim=3
model = keras.Model(x, qkv)
einsum_dense_layer = next(
l for l in model.layers if isinstance(l, layers.EinsumDense)
)
x = ops.array(np.random.randn(50, 7), "float32")
g1 = GPTQ(einsum_dense_layer)
g1.update_hessian_with_batch(x)
g2 = GPTQ(einsum_dense_layer)
g2.update_hessian_with_batch(x[:20])
g2.update_hessian_with_batch(x[20:])
self.assertAllClose(g1.hessian, g2.hessian, rtol=1e-6, atol=1e-6)
def test_identity_inv_hessian_matches_direct_quantization(self):
"""Tests that the matrix quantization without error correction
matches the direct implementation."""
in_features, out_features = 16, 8
weights = ops.reshape(
ops.linspace(
-0.9, 1.1, in_features * out_features, dtype="float32"
),
(in_features, out_features),
)
weights_transpose = ops.transpose(weights)
# inverse_hessian = identity; no cross-feature correction
# (since all off-diagonal elements are zero), which means
# there is no interaction between different features
inverse_hessian = ops.eye(in_features, dtype="float32")
quantized_weights, scale_map, zero_map, g_idx = gptq_quantize_matrix(
weights_transpose,
inverse_hessian,
blocksize=128,
group_size=1, # per-column quantization
activation_order=False,
compute_scale_zero=_compute_scale_zero,
)
dequantized_weights = dequantize_with_sz_map(
quantized_weights, scale_map, zero_map, g_idx
)
# Compare function output with columnwise direct application
# of quantization.
out = ops.zeros_like(weights_transpose)
for j in range(ops.shape(weights_transpose)[1]):
column = weights_transpose[:, j : j + 1]
scale, zero, maxq = _compute_scale_zero(column)
quantized_col = quantize_with_zero_point(column, scale, zero, maxq)
dequantized = dequantize_with_zero_point(quantized_col, scale, zero)
out = ops.slice_update(
out, (0, j), ops.expand_dims(dequantized[:, 0], 1)
)
self.assertAllClose(dequantized_weights, out, atol=1e-6)
def test_activation_order_produces_equivalent_weights(self):
"""
Tests that quantizing with `activation_order=True` yields the same
final weights as `activation_order=False`, because the internal
permutation should be undone.
"""
# Set up shared inputs and a non-trivial permutation.
in_features, out_features = 8, 6
initial_weights = ops.array(
np.random.randn(in_features, out_features), "float32"
)
# Generate a Hessian that creates a non-trivial permutation.
hessian_diag = ops.random.shuffle(
ops.linspace(10.0, 1.0, in_features, dtype="float32")
)
hessian_matrix = ops.diag(hessian_diag)
# Sanity check: ensure the permutation is not the identity.
perm = _stable_permutation(hessian_diag)
self.assertFalse(ops.all(ops.equal(perm, ops.arange(in_features))))
def create_and_quantize(use_activation_order):
layer = layers.Dense(out_features, use_bias=False)
layer.build((None, in_features))
layer.set_weights([ops.copy(initial_weights)])
config = GPTQConfig(
dataset=None,
tokenizer=None,
group_size=-1,
activation_order=use_activation_order,
)
layer.quantize("gptq", config=config)
quantizer = GPTQ(layer, config)
quantizer.hessian = hessian_matrix
quantizer.quantize_and_correct_layer()
return layer
# Quantize two layers, one with and one without activation ordering.
ordered_layer = create_and_quantize(use_activation_order=True)
unordered_layer = create_and_quantize(use_activation_order=False)
self.assertAllClose(
ordered_layer.get_weights()[0],
unordered_layer.get_weights()[0],
msg="Weights should be identical as the permutation is undone.",
)
def _compute_scale_zero(x, **_):
# Per-column asymmetric int4 example
# scale = (max-min)/maxq, zero = round(-min/scale)
maxq = 15.0
xmin = ops.min(x, axis=0, keepdims=True)
xmax = ops.max(x, axis=0, keepdims=True)
scale = ops.divide(ops.subtract(xmax, xmin), ops.add(maxq, 1e-8))
zero = ops.round(ops.divide(ops.negative(xmin), ops.add(scale, 1e-8)))
return scale, zero, maxq
def _get_sequence_classifier():
"""Transformer-based sequence classifier
tokens -> Embedding -> Transformer -> GAP -> Dense(num_classes).
"""
embed_dim = 32
num_heads = 4
ff_dim = 32
class SimpleTransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, **kwargs):
super().__init__(**kwargs)
self.att = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim // num_heads
)
self.ffn = models.Sequential(
[
layers.Dense(ff_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs):
attention_output = self.att(inputs, inputs)
out1 = self.layernorm1(inputs + attention_output)
ffn_output = self.ffn(out1)
return self.layernorm2(out1 + ffn_output)
inputs = layers.Input(shape=(SEQ_LEN,), dtype="int32")
x = layers.Embedding(VOCAB_SIZE, embed_dim)(inputs)
x = SimpleTransformerBlock(embed_dim, num_heads, ff_dim)(x)
x = layers.GlobalAveragePooling1D()(x)
outputs = layers.Dense(NUM_CLASSES)(x)
return models.Model(inputs, outputs)
def _get_simple_model():
return models.Sequential([layers.Dense(10, input_shape=(5,))])
def _mean_kl(p, q):
# Add small epsilon for numerical stability
eps = 1e-8
p = ops.clip(p, eps, 1.0)
q = ops.clip(q, eps, 1.0)
# Compute KL divergence
# D_KL(P || Q) = sum(P * log(P / Q))
return ops.mean(
ops.sum(ops.multiply(p, ops.subtract(ops.log(p), ops.log(q))), axis=-1)
)
def _top1_match_rate(a_logits, b_logits):
"""Calculates the top-1 match rate between two sets of logits.
Formula: T = 1/N * sum(1{argmax(a_i) == argmax(b_i)})
"""
return ops.mean(
ops.equal(ops.argmax(a_logits, axis=-1), ops.argmax(b_logits, axis=-1))
)
DATASETS = {
"string_dataset": lambda: _string_dataset(
CALIBRATION_TEXT, NUM_SAMPLES, SEQ_LEN
),
"token_dataset": lambda: _token_dataset(NUM_SAMPLES, SEQ_LEN),
}
CONFIGS = {
"default": {},
"per_channel": {"group_size": -1, "per_channel": True},
"act_order": {"activation_order": True},
"symmetric": {"symmetric": True},
"group_wise": {"group_size": 8},
"group_wise_act_order": {"group_size": 8, "activation_order": True},
"symmetric_act_order": {"symmetric": True, "activation_order": True},
"symmetric_per_channel": {"symmetric": True, "per_channel": True},
"group_wise_symmetric_8bit": {
"group_size": 8,
"symmetric": True,
"weight_bits": 8,
},
}
def _pad_or_trim_1d(ids, length):
"""Pads or trims a 1D array to a specified length."""
ids = ops.ravel(ops.array(ids, "int64"))
if len(ids) < length:
ids = ops.concatenate(
[ids, ops.zeros(length - len(ids), dtype=ids.dtype)]
)
else:
ids = ids[:length]
return ids
def _char_tokenizer(vocab_size=VOCAB_SIZE, seq_len=SEQ_LEN):
"""Tokenizes strings to char-IDs or passes through int arrays;
outputs shape (1, seq_len)."""
def _tok(x):
if isinstance(x, str):
ids = ops.convert_to_tensor(
np.fromiter((ord(c) % vocab_size for c in x), dtype=np.int64)
)
else:
ids = np.asarray(x, dtype=np.int64)
ids = _pad_or_trim_1d(ids, seq_len)
return ids[None, :]
_tok.tokenize = _tok
return _tok
def _string_dataset(
long_text, num_samples=NUM_SAMPLES, sequence_length=SEQ_LEN
):
"""Yields string slices"""
rng = np.random.default_rng(seed=0)
L = max(1, len(long_text) - sequence_length)
for _ in range(num_samples):
start = rng.integers(0, L) if L > 1 else 0
yield long_text[start : start + sequence_length]
def _token_dataset(
num_samples=NUM_SAMPLES, sequence_length=SEQ_LEN, vocab_size=VOCAB_SIZE
):
"""Yields tokenized samples."""
rng = np.random.default_rng(seed=0)
for _ in range(num_samples):
yield rng.integers(
low=0, high=vocab_size, size=(1, sequence_length), dtype=np.int64
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="torch gives low accuracy on CI, but works well locally",
)
class TestModelQuantization(testing.TestCase):
@parameterized.named_parameters(
named_product(
[
{"testcase_name": dataset_id, "dataset": dataset}
for dataset_id, dataset in DATASETS.items()
],
[
{"testcase_name": config_id, "config": config}
for config_id, config in CONFIGS.items()
],
)
)
def test_quantize_gptq_combinations(self, dataset, config):
"""Tests GPTQ quantization on a tiny transformer classifier.
Validates classification performance of the quantized model
with respect to the full-precision baseline.
"""
rng = np.random.default_rng(seed=321)
keras.utils.set_random_seed(123)
# Build the calibration set.
calibration_set = list(
dataset() if isinstance(dataset, Callable) else dataset
)
self.assertNotEmpty(calibration_set)
# Build classifier and tokenizer
model = _get_sequence_classifier()
tokenizer = _char_tokenizer(vocab_size=VOCAB_SIZE, seq_len=SEQ_LEN)
# Build an eval batch drawn from the SAME distribution as calibration
batch_size = min(8, len(calibration_set))
eval_samples = [
calibration_set[rng.integers(0, len(calibration_set))]
for _ in range(batch_size)
]
x_eval = ops.concatenate([tokenizer(s) for s in eval_samples], axis=0)
# Baseline logits
y_ref = model.predict(x_eval)
embedding_layer = model.layers[1]
transformer_block = model.layers[2]
layer_structure = {
"pre_block_layers": [embedding_layer],
"sequential_blocks": [transformer_block],
}
base_cfg = dict(
dataset=calibration_set,
tokenizer=tokenizer,
weight_bits=W_BITS,
num_samples=NUM_SAMPLES,
sequence_length=SEQ_LEN,
group_size=32,
symmetric=False,
activation_order=False,
quantization_layer_structure=layer_structure,
)
gptq_cfg = GPTQConfig(**{**base_cfg, **config})
# Quantize
model.quantize("gptq", config=gptq_cfg)
# Post-quant logits
y_q = model.predict(x_eval)
top1_match = _top1_match_rate(y_ref, y_q)
p_ref, p_q = ops.softmax(y_ref), ops.softmax(y_q)
kl = _mean_kl(p_ref, p_q)
self.assertGreaterEqual(
top1_match, 0.5, f"Top-1 agreement too low: {top1_match:.3f}"
)
self.assertLessEqual(kl, 0.30, f"KL divergence too high: {kl:.3f}")
@parameterized.named_parameters(
{
"testcase_name": "gptq_with_invalid_config_type",
"mode": "gptq",
"config": {"weight_bits": 4},
"expected_exception": ValueError,
"error_msg": "Argument `config` must be an instance of "
"`QuantizationConfig`",
},
{
"testcase_name": "gptq_with_none_config",
"mode": "gptq",
"config": None,
"expected_exception": ValueError,
"error_msg": "For GPTQ, you must pass a `GPTQConfig` object "
"in the `config` argument.",
},
{
"testcase_name": "gptq_with_base_quantization_config",
"mode": "gptq",
"config": QuantizationConfig(),
"expected_exception": NotImplementedError,
"error_msg": "Do not instantiate QuantizationConfig directly.",
},
{
"testcase_name": "gptq_missing_structure",
"mode": "gptq",
"config": GPTQConfig(dataset=["a"], tokenizer=lambda x: x),
"expected_exception": ValueError,
"error_msg": "For mode='gptq', a valid quantization structure",
},
)
def test_quantize_scenarios(
self, mode, config, expected_exception, error_msg
):
model = _get_simple_model()
with self.assertRaisesRegex(expected_exception, error_msg):
model.quantize(mode, config=config)
def test_gptq_filtering(self):
"""Tests that filters argument works for GPTQ."""
model = _get_sequence_classifier()
tokenizer = _char_tokenizer(vocab_size=VOCAB_SIZE, seq_len=SEQ_LEN)
# Structure
embedding_layer = model.layers[1]
transformer_block = model.layers[2]
layer_structure = {
"pre_block_layers": [embedding_layer],
"sequential_blocks": [transformer_block],
}
config = GPTQConfig(
dataset=[np.zeros((1, SEQ_LEN), dtype="int32")],
tokenizer=tokenizer,
quantization_layer_structure=layer_structure,
weight_bits=4,
group_size=32,
)
target_layer = transformer_block.ffn.layers[0]
def filter_fn(layer):
return layer.name != target_layer.name
model.quantize("gptq", config=config, filters=filter_fn)
# Check that target_layer is NOT quantized.
self.assertIsNone(getattr(target_layer, "quantization_mode", None))
self.assertFalse(hasattr(target_layer, "quantized_kernel"))
# Check that other dense layers ARE quantized.
other_dense = transformer_block.ffn.layers[1]
self.assertEqual(
getattr(other_dense, "quantization_mode", None), "gptq"
)
self.assertTrue(hasattr(other_dense, "quantized_kernel"))
def test_gptq_multi_filtering(self):
"""Tests that list of regex filters works for GPTQ."""
model = _get_sequence_classifier()
tokenizer = _char_tokenizer(vocab_size=VOCAB_SIZE, seq_len=SEQ_LEN)
embedding_layer = model.layers[1]
transformer_block = model.layers[2]
layer_structure = {
"pre_block_layers": [embedding_layer],
"sequential_blocks": [transformer_block],
}
config = GPTQConfig(
dataset=[np.zeros((1, SEQ_LEN), dtype="int32")],
tokenizer=tokenizer,
quantization_layer_structure=layer_structure,
weight_bits=4,
group_size=32,
)
layer0 = transformer_block.ffn.layers[0]
layer1 = transformer_block.ffn.layers[1]
# We want to quantize only layer0.
filters = [f"^{layer0.name}$"]
model.quantize("gptq", config=config, filters=filters)
# Check that layer0 is quantized.
self.assertEqual(getattr(layer0, "quantization_mode", None), "gptq")
self.assertTrue(hasattr(layer0, "quantized_kernel"))
# Check that layer1 is not quantized.
self.assertIsNone(getattr(layer1, "quantization_mode", None))
self.assertFalse(hasattr(layer1, "quantized_kernel"))
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/quantizers/gptq_test.py",
"license": "Apache License 2.0",
"lines": 606,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/layers/preprocessing/data_layer.py | import keras.src.backend
from keras.src import tree
from keras.src.layers.layer import Layer
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
from keras.src.utils import jax_utils
from keras.src.utils import tracking
class DataLayer(Layer):
"""Layer designed for safe use in `tf.data` or `grain` pipeline.
This layer overrides the `__call__` method to ensure that the correct
backend is used and that computation is performed on the CPU.
The `call()` method in subclasses should use `self.backend` ops. If
randomness is needed, define both `seed` and `generator` in `__init__` and
retrieve the running seed using `self._get_seed_generator()`. If the layer
has weights in `__init__` or `build()`, use `convert_weight()` to ensure
they are in the correct backend.
**Note:** This layer and its subclasses only support a single input tensor.
Examples:
**Custom `DataLayer` subclass:**
```python
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.random import SeedGenerator
class BiasedRandomRGBToHSVLayer(DataLayer):
def __init__(self, seed=None, **kwargs):
super().__init__(**kwargs)
self.probability_bias = ops.convert_to_tensor(0.01)
self.seed = seed
self.generator = SeedGenerator(seed)
def call(self, inputs):
images_shape = self.backend.shape(inputs)
batch_size = 1 if len(images_shape) == 3 else images_shape[0]
seed = self._get_seed_generator(self.backend._backend)
probability = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
probability = self.backend.numpy.add(
probability, self.convert_weight(self.probability_bias)
)
hsv_images = self.backend.image.rgb_to_hsv(inputs)
return self.backend.numpy.where(
probability[:, None, None, None] > 0.5,
hsv_images,
inputs,
)
def compute_output_shape(self, input_shape):
return input_shape
```
**Using as a regular Keras layer:**
```python
import numpy as np
x = np.random.uniform(size=(1, 16, 16, 3)).astype("float32")
print(BiasedRandomRGBToHSVLayer()(x).shape) # (1, 16, 16, 3)
```
**Using in a `tf.data` pipeline:**
```python
import tensorflow as tf
tf_ds = tf.data.Dataset.from_tensors(x)
tf_ds = tf_ds.map(BiasedRandomRGBToHSVLayer())
print([x.shape for x in tf_ds]) # [(1, 16, 16, 3)]
```
**Using in a `grain` pipeline:**
```python
import grain
grain_ds = grain.MapDataset.source([x])
grain_ds = grain_ds.map(BiasedRandomRGBToHSVLayer())
print([x.shape for x in grain_ds]) # [(1, 16, 16, 3)]
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.backend = backend_utils.DynamicBackend()
self._allow_non_tensor_positional_args = True
def __call__(self, inputs, **kwargs):
sample_input = tree.flatten(inputs)[0]
if (
not isinstance(sample_input, keras.KerasTensor)
and backend_utils.in_tf_graph()
and not jax_utils.is_in_jax_tracing_scope(sample_input)
):
# We're in a TF graph, e.g. a tf.data pipeline.
self.backend.set_backend("tensorflow")
inputs = tree.map_structure(
lambda x: self.backend.convert_to_tensor(
x, dtype=self.compute_dtype
),
inputs,
)
switch_convert_input_args = False
if self._convert_input_args:
self._convert_input_args = False
switch_convert_input_args = True
try:
outputs = super().__call__(inputs, **kwargs)
finally:
self.backend.reset()
if switch_convert_input_args:
self._convert_input_args = True
return outputs
elif (
not isinstance(sample_input, keras.KerasTensor)
and backend_utils.in_grain_data_pipeline()
):
# We're in a Grain data pipeline. Force computation and data
# placement to CPU.
with keras.src.backend.device_scope("cpu"):
return super().__call__(inputs, **kwargs)
else:
return super().__call__(inputs, **kwargs)
@tracking.no_automatic_dependency_tracking
def _get_seed_generator(self, backend=None):
if not hasattr(self, "seed") or not hasattr(self, "generator"):
raise ValueError(
"The `seed` and `generator` variable must be set in the "
"`__init__` method before calling `_get_seed_generator()`."
)
if backend is None or backend == keras.backend.backend():
return self.generator
if not hasattr(self, "_backend_generators"):
self._backend_generators = {}
if backend in self._backend_generators:
return self._backend_generators[backend]
seed_generator = SeedGenerator(self.seed, backend=self.backend)
self._backend_generators[backend] = seed_generator
return seed_generator
def convert_weight(self, weight):
"""Convert the weight if it is from the a different backend."""
if self.backend.name == keras.backend.backend():
return weight
else:
weight = keras.ops.convert_to_numpy(weight)
return self.backend.convert_to_tensor(weight)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/preprocessing/data_layer.py",
"license": "Apache License 2.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
keras-team/keras:keras/src/layers/preprocessing/data_layer_test.py | import grain
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import testing
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.random import SeedGenerator
class RandomRGBToHSVLayer(DataLayer):
def __init__(self, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = SeedGenerator(seed)
def call(self, inputs):
images_shape = self.backend.shape(inputs)
batch_size = 1 if len(images_shape) == 3 else images_shape[0]
seed = self._get_seed_generator(self.backend._backend)
probability = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
hsv_images = self.backend.image.rgb_to_hsv(
inputs, data_format=self.data_format
)
return self.backend.numpy.where(
probability[:, None, None, None] > 0.5, hsv_images, inputs
)
def compute_output_shape(self, input_shape):
return input_shape
class DataLayerTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
RandomRGBToHSVLayer,
init_kwargs={
"seed": 1337,
"data_format": "channels_last",
},
input_shape=(1, 2, 2, 3),
supports_masking=False,
expected_output_shape=(1, 2, 2, 3),
)
self.run_layer_test(
RandomRGBToHSVLayer,
init_kwargs={
"seed": 1337,
"data_format": "channels_first",
},
input_shape=(1, 3, 2, 2),
supports_masking=False,
expected_output_shape=(1, 3, 2, 2),
)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3)).astype("float32")
else:
input_data = np.random.random((2, 3, 8, 8)).astype("float32")
layer = RandomRGBToHSVLayer(data_format=data_format, seed=1337)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
self.assertDType(output, "float32")
self.assertEqual(list(output.shape), list(input_data.shape))
def test_grain_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3)).astype("float32")
else:
input_data = np.random.random((2, 3, 8, 8)).astype("float32")
layer = RandomRGBToHSVLayer(data_format=data_format, seed=1337)
ds = grain.MapDataset.source(input_data).batch(2).map(layer)
for output in ds[:1]:
self.assertDType(output, "float32")
self.assertEqual(list(output.shape), list(input_data.shape))
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/layers/preprocessing/data_layer_test.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/utils/grain_utils.py | from keras.src import backend
from keras.src import tree
def make_batch(values):
from keras.src import ops
if not values:
raise ValueError("Cannot batch 0 values. Please file a bug.")
with backend.device_scope("cpu"):
return tree.map_structure(lambda *xs: ops.stack(xs), *values)
def make_string_batch(values):
from keras.src import ops
if not values:
raise ValueError("Cannot batch 0 values. Please file a bug.")
def batch_fn(*xs):
if isinstance(xs[0], str):
if backend.backend() == "tensorflow":
import tensorflow as tf
xs = [tf.convert_to_tensor(x, dtype=tf.string) for x in xs]
xs = tf.stack(xs)
return xs
else:
return ops.stack(xs)
with backend.device_scope("cpu"):
return tree.map_structure(batch_fn, *values)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/utils/grain_utils.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
keras-team/keras:keras/src/backend/jax/core_test.py | import os
import jax
import jax.numpy as jnp
import numpy as np
import pytest
import keras
from keras.src import backend
from keras.src import testing
from keras.src.backend.config import is_nnx_enabled
if is_nnx_enabled():
from flax import nnx
from keras.src.backend.jax.core import NnxVariable
@pytest.mark.skipif(
backend.backend() != "jax",
reason="JAX backend specific test for core Variable integration with NNX.",
)
@pytest.mark.skipif(
not is_nnx_enabled(),
reason="Test requires NNX backend to be enabled by default for setup.",
)
class NnxVariableTest(testing.TestCase):
def setup(self):
super().setup()
class NNXModel(nnx.Module):
def __init__(self, rngs):
self.linear = nnx.Linear(2, 3, rngs=rngs)
# Use NnxVariable directly as KerasJaxVariable
# might be JaxVariable if NNX is disabled globally.
self.custom_variable = NnxVariable(jnp.ones((1, 3)))
def __call__(self, x):
return self.linear(x) + self.custom_variable
self.nnx_model = NNXModel(rngs=nnx.Rngs(0))
self.keras_nnx_model = keras.Sequential(
[keras.layers.Dense(units=1, input_shape=(10,))]
)
self.single_dummy_input = np.random.rand(1, 10)
def test_variable_in_nnx_module(self):
self.assertTrue(hasattr(self.nnx_model.custom_variable, "_trace_state"))
self.assertIsNotNone(self.nnx_model.custom_variable._trace_state)
self.assertAllEqual(self.nnx_model.custom_variable.value, [[1, 1, 1]])
self.assertTrue(
isinstance(self.nnx_model.custom_variable, nnx.Variable)
)
def test_model_saving(self):
path = os.path.join(self.get_temp_dir(), "model.keras")
original_outputs = self.keras_nnx_model(self.single_dummy_input)
self.keras_nnx_model.save(path, save_format="keras_v3")
restored_model = keras.models.load_model(path)
restored_outputs = restored_model(self.single_dummy_input)
self.assertAllEqual(original_outputs, restored_outputs)
def test_keras_variable_nnx_split_merge_sync(self):
variable1 = keras.Variable(jnp.array(1.0))
graphdef, state = nnx.split(variable1)
state = jax.tree.map(lambda x: x + 1, state)
variable2 = nnx.merge(graphdef, state)
self.assertEqual(variable2._value, variable2.value)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/backend/jax/core_test.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/export/openvino.py | import warnings
from keras.src import backend
from keras.src import tree
from keras.src.export.export_utils import convert_spec_to_tensor
from keras.src.export.export_utils import get_input_signature
from keras.src.export.export_utils import make_tf_tensor_spec
from keras.src.export.saved_model import DEFAULT_ENDPOINT_NAME
from keras.src.export.saved_model import ExportArchive
from keras.src.utils import io_utils
def export_openvino(
model, filepath, verbose=None, input_signature=None, **kwargs
):
"""Export the model as an OpenVINO IR artifact for inference.
This method exports the model to the OpenVINO IR format,
which includes two files:
a `.xml` file containing the model structure and a `.bin` file
containing the weights.
The exported model contains only the forward pass
(i.e., the model's `call()` method), and can be deployed with the
OpenVINO Runtime for fast inference on CPU and other Intel hardware.
Args:
filepath: `str` or `pathlib.Path`. Path to the output `.xml` file.
The corresponding `.bin` file will be saved alongside it.
verbose: Optional `bool`. Whether to print a confirmation message
after export. If `None`, it uses the default verbosity configured
by the backend.
input_signature: Optional. Specifies the shape and dtype of the
model inputs. If not provided, it will be inferred.
**kwargs: Additional keyword arguments.
Example:
```python
import keras
# Define or load a Keras model
model = keras.models.Sequential([
keras.layers.Input(shape=(128,)),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(10)
])
# Export to OpenVINO IR
model.export("model.xml", format="openvino")
```
"""
assert filepath.endswith(".xml"), (
"The OpenVINO export requires the filepath to end with '.xml'. "
f"Got: {filepath}"
)
import openvino as ov
import openvino.opset15 as ov_opset
from keras.src.backend.openvino.core import OPENVINO_DTYPES
from keras.src.backend.openvino.core import OpenVINOKerasTensor
actual_verbose = verbose if verbose is not None else True
if input_signature is None:
input_signature = get_input_signature(model)
if backend.backend() == "openvino":
import inspect
def parameterize_inputs(inputs, prefix=""):
if isinstance(inputs, (list, tuple)):
return [
parameterize_inputs(e, f"{prefix}{i}")
for i, e in enumerate(inputs)
]
elif isinstance(inputs, dict):
return {k: parameterize_inputs(v, k) for k, v in inputs.items()}
elif isinstance(inputs, OpenVINOKerasTensor):
ov_type = OPENVINO_DTYPES[str(inputs.dtype)]
ov_shape = list(inputs.shape)
param = ov_opset.parameter(shape=ov_shape, dtype=ov_type)
param.set_friendly_name(prefix)
return OpenVINOKerasTensor(param.output(0))
else:
raise TypeError(f"Unknown input type: {type(inputs)}")
if isinstance(input_signature, list) and len(input_signature) == 1:
input_signature = input_signature[0]
sample_inputs = tree.map_structure(
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
input_signature,
)
params = parameterize_inputs(sample_inputs)
signature = inspect.signature(model.call)
if len(signature.parameters) > 1 and isinstance(params, (list, tuple)):
outputs = model(*params)
else:
outputs = model(params)
parameters = [p.output.get_node() for p in tree.flatten(params)]
results = [ov_opset.result(r.output) for r in tree.flatten(outputs)]
ov_model = ov.Model(results=results, parameters=parameters)
flat_specs = tree.flatten(input_signature)
for ov_input, spec in zip(ov_model.inputs, flat_specs):
# Respect the dynamic axes from the original input signature.
dynamic_shape_dims = [
-1 if dim is None else dim for dim in spec.shape
]
dynamic_shape = ov.PartialShape(dynamic_shape_dims)
ov_input.get_node().set_partial_shape(dynamic_shape)
elif backend.backend() in ("tensorflow", "jax"):
inputs = tree.map_structure(make_tf_tensor_spec, input_signature)
decorated_fn = get_concrete_fn(model, inputs, **kwargs)
ov_model = ov.convert_model(decorated_fn)
set_names(ov_model, inputs)
elif backend.backend() == "torch":
import torch
sample_inputs = tree.map_structure(
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
input_signature,
)
sample_inputs = tuple(sample_inputs)
if hasattr(model, "eval"):
model.eval()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
traced = torch.jit.trace(model, sample_inputs)
ov_model = ov.convert_model(traced)
set_names(ov_model, sample_inputs)
else:
raise NotImplementedError(
"`export_openvino` is only compatible with OpenVINO, "
"TensorFlow, JAX and Torch backends."
)
ov.serialize(ov_model, filepath)
if actual_verbose:
io_utils.print_msg(f"Saved OpenVINO IR at '{filepath}'.")
def collect_names(structure):
if isinstance(structure, dict):
for k, v in structure.items():
if isinstance(v, (dict, list, tuple)):
yield from collect_names(v)
else:
yield k
elif isinstance(structure, (list, tuple)):
for v in structure:
yield from collect_names(v)
else:
if hasattr(structure, "name") and structure.name:
yield structure.name
else:
yield "input"
def set_names(model, inputs):
names = list(collect_names(inputs))
for ov_input, name in zip(model.inputs, names):
ov_input.get_node().set_friendly_name(name)
ov_input.tensor.set_names({name})
def _check_jax_kwargs(kwargs):
kwargs = kwargs.copy()
if "is_static" not in kwargs:
kwargs["is_static"] = True
if "jax2tf_kwargs" not in kwargs:
kwargs["jax2tf_kwargs"] = {
"enable_xla": False,
"native_serialization": False,
}
if kwargs["is_static"] is not True:
raise ValueError(
"`is_static` must be `True` in `kwargs` when using the jax backend."
)
if kwargs["jax2tf_kwargs"]["enable_xla"] is not False:
raise ValueError(
"`enable_xla` must be `False` in `kwargs['jax2tf_kwargs']` "
"when using the jax backend."
)
if kwargs["jax2tf_kwargs"]["native_serialization"] is not False:
raise ValueError(
"`native_serialization` must be `False` in "
"`kwargs['jax2tf_kwargs']` when using the jax backend."
)
return kwargs
def get_concrete_fn(model, input_signature, **kwargs):
if backend.backend() == "jax":
kwargs = _check_jax_kwargs(kwargs)
export_archive = ExportArchive()
export_archive.track_and_add_endpoint(
DEFAULT_ENDPOINT_NAME, model, input_signature, **kwargs
)
if backend.backend() == "tensorflow":
export_archive._filter_and_track_resources()
return export_archive._get_concrete_fn(DEFAULT_ENDPOINT_NAME)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/export/openvino.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/export/openvino_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src import tree
from keras.src.export import openvino
from keras.src.saving import saving_lib
from keras.src.testing.test_utils import named_product
try:
import openvino as ov
except ImportError:
ov = None
class CustomModel(models.Model):
def __init__(self, layer_list):
super().__init__()
self.layer_list = layer_list
def call(self, input):
output = input
for layer in self.layer_list:
output = layer(output)
return output
def get_model(type="sequential", input_shape=(10,), layer_list=None):
layer_list = layer_list or [
layers.Dense(10, activation="relu"),
layers.BatchNormalization(),
layers.Dense(1, activation="sigmoid"),
]
if type == "sequential":
return models.Sequential(layer_list)
elif type == "functional":
input = output = tree.map_shape_structure(layers.Input, input_shape)
for layer in layer_list:
output = layer(output)
return models.Model(inputs=input, outputs=output)
elif type == "subclass":
return CustomModel(layer_list)
elif type == "lstm":
# https://github.com/keras-team/keras/issues/21390
inputs = layers.Input((4, 10))
x = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="sum",
)(inputs)
outputs = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="concat",
)(x)
return models.Model(inputs=inputs, outputs=outputs)
@pytest.mark.skipif(ov is None, reason="OpenVINO is not installed")
@pytest.mark.skipif(
backend.backend() not in ("tensorflow", "openvino", "jax", "torch"),
reason=(
"`export_openvino` only currently supports"
"the tensorflow, jax, torch and openvino backends."
),
)
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="Leads to core dumps on CI")
@pytest.mark.skipif(
testing.tensorflow_uses_gpu(), reason="Leads to core dumps on CI"
)
class ExportOpenVINOTest(testing.TestCase):
@parameterized.named_parameters(
named_product(
model_type=["sequential", "functional", "subclass", "lstm"]
)
)
def test_standard_model_export(self, model_type):
if model_type == "lstm":
self.skipTest(
"LSTM export not supported - unimplemented QR operation"
)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.xml")
model = get_model(model_type)
batch_size = 3
if model_type == "lstm":
ref_input = np.random.normal(size=(batch_size, 4, 10))
else:
ref_input = np.random.normal(size=(batch_size, 10))
ref_input = ref_input.astype("float32")
ref_output = model(ref_input)
try:
openvino.export_openvino(model, temp_filepath)
except Exception as e:
if "XlaCallModule" in str(e):
self.skipTest("OpenVINO does not support XlaCallModule yet")
raise e
# Load and run inference with OpenVINO
core = ov.Core()
ov_model = core.read_model(temp_filepath)
compiled_model = core.compile_model(ov_model, "CPU")
ov_output = compiled_model([ref_input])[compiled_model.output(0)]
self.assertAllClose(ref_output, ov_output)
larger_input = np.concatenate([ref_input, ref_input], axis=0)
compiled_model([larger_input])
@parameterized.named_parameters(
named_product(struct_type=["tuple", "array", "dict"])
)
def test_model_with_input_structure(self, struct_type):
class TupleModel(models.Model):
def call(self, inputs):
x, y = inputs
return ops.add(x, y)
class ArrayModel(models.Model):
def call(self, inputs):
x = inputs[0]
y = inputs[1]
return ops.add(x, y)
class DictModel(models.Model):
def call(self, inputs):
x = inputs["x"]
y = inputs["y"]
return ops.add(x, y)
batch_size = 3
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
if struct_type == "tuple":
model = TupleModel()
ref_input = (ref_input, ref_input * 2)
elif struct_type == "array":
model = ArrayModel()
ref_input = [ref_input, ref_input * 2]
elif struct_type == "dict":
model = DictModel()
ref_input = {"x": ref_input, "y": ref_input * 2}
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.xml")
ref_output = model(tree.map_structure(ops.convert_to_tensor, ref_input))
try:
openvino.export_openvino(model, temp_filepath)
except Exception as e:
if "XlaCallModule" in str(e):
self.skipTest("OpenVINO does not support XlaCallModule yet")
raise e
# Load and run inference with OpenVINO
core = ov.Core()
ov_model = core.read_model(temp_filepath)
compiled_model = core.compile_model(ov_model, "CPU")
if isinstance(ref_input, dict):
ov_inputs = [ref_input[key] for key in ref_input.keys()]
else:
ov_inputs = list(ref_input)
ov_output = compiled_model(ov_inputs)[compiled_model.output(0)]
self.assertAllClose(ref_output, ov_output)
# Test with keras.saving_lib
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.keras"
)
saving_lib.save_model(model, temp_filepath)
revived_model = saving_lib.load_model(
temp_filepath,
{
"TupleModel": TupleModel,
"ArrayModel": ArrayModel,
"DictModel": DictModel,
},
)
self.assertAllClose(ref_output, revived_model(ref_input))
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model2.xml")
try:
openvino.export_openvino(revived_model, temp_filepath)
except Exception as e:
if "XlaCallModule" in str(e):
self.skipTest("OpenVINO does not support XlaCallModule yet")
raise e
bigger_ref_input = tree.map_structure(
lambda x: np.concatenate([x, x], axis=0), ref_input
)
if isinstance(bigger_ref_input, dict):
bigger_ov_inputs = [
bigger_ref_input[key] for key in bigger_ref_input.keys()
]
else:
bigger_ov_inputs = list(bigger_ref_input)
compiled_model(bigger_ov_inputs)
def test_model_with_multiple_inputs(self):
class TwoInputsModel(models.Model):
def call(self, x, y):
return x + y
def build(self, y_shape, x_shape):
self.built = True
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.xml")
model = TwoInputsModel()
batch_size = 3
ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = model(ref_input_x, ref_input_y)
try:
openvino.export_openvino(model, temp_filepath)
except Exception as e:
if "XlaCallModule" in str(e):
self.skipTest("OpenVINO does not support XlaCallModule yet")
raise e
# Load and run inference with OpenVINO
core = ov.Core()
ov_model = core.read_model(temp_filepath)
compiled_model = core.compile_model(ov_model, "CPU")
ov_output = compiled_model([ref_input_x, ref_input_y])[
compiled_model.output(0)
]
self.assertAllClose(ref_output, ov_output)
larger_input_x = np.concatenate([ref_input_x, ref_input_x], axis=0)
larger_input_y = np.concatenate([ref_input_y, ref_input_y], axis=0)
compiled_model([larger_input_x, larger_input_y])
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/export/openvino_test.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/trainers/data_adapters/grain_dataset_adapter.py | import itertools
import numpy as np
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
from keras.src.utils.module_utils import grain
from keras.src.utils.module_utils import tensorflow as tf
class GrainDatasetAdapter(DataAdapter):
"""Adapter that handles `grain.DataLoader`, `grain.MapDataset` and
`grain.IterDataset`.
"""
def __init__(self, dataset):
"""Initialize the GrainDatasetAdapter.
Args:
dataset: A Grain dataset instance. Must be one of
`grain.DataLoader`, `grain.MapDataset`, or `grain.IterDataset`.
"""
if not isinstance(
dataset, (grain.MapDataset, grain.IterDataset, grain.DataLoader)
):
raise ValueError(
"Expected `dataset` to be a grain.MapDataset, "
"grain.IterDataset or grain.DataLoader. "
f"Received: {dataset} of type {type(dataset)}"
)
self._dataset = dataset
batch_size, output_signature = self._get_dataset_info(dataset)
self._batch_size = batch_size
self._output_signature = output_signature
self._output_tf_signature = None
def _get_dataset_info(self, dataset):
"""Get the `batch_size` and `output_signature` from the dataset.
We use a small list of batches to infer the `batch_size` and
`output_signature`.
"""
batches = list(
itertools.islice(
dataset, data_adapter_utils.NUM_BATCHES_FOR_TENSOR_SPEC
)
)
output_signature = data_adapter_utils.get_keras_tensor_spec(batches)
flat_output_signature = tree.flatten(output_signature)
batch_size = flat_output_signature[0].shape[0]
if batch_size is not None:
batch_size = int(batch_size)
return batch_size, output_signature
def get_numpy_iterator(self):
# Workaround for internal change in Grain which isn't a part of a
# release yet.
# TODO(abheesht17): Remove this after the next Grain release.
try:
from grain._src.python.shared_memory_array import (
SharedMemoryArrayMetadata,
)
except ImportError:
from grain._src.python.ipc.shared_memory_array import (
SharedMemoryArrayMetadata,
)
def convert_to_numpy(x):
if isinstance(x, (np.ndarray, SharedMemoryArrayMetadata)):
return x
else:
# Using `__array__` should handle `tf.Tensor`, `jax.np.ndarray`,
# `torch.Tensor`, as well as any other tensor-like object that
# has added numpy support.
if hasattr(x, "__array__"):
if data_adapter_utils.is_torch_tensor(x):
x = x.cpu()
x = np.asarray(x)
return x
class ConvertToNumpy(grain.transforms.Map):
def map(self, x):
return tree.map_structure(
convert_to_numpy, x, none_is_leaf=False
)
if isinstance(self._dataset, (grain.MapDataset, grain.IterDataset)):
dataset = self._dataset.map(ConvertToNumpy())
else:
# Instantiate a new `DataLoader`.
dataset = grain.DataLoader(
data_source=self._dataset._data_source,
sampler=self._dataset._sampler,
# Append `ConvertToNumpy`.
operations=list(self._dataset._operations) + [ConvertToNumpy()],
worker_count=self._dataset._multiprocessing_options.num_workers,
worker_buffer_size=self._dataset._multiprocessing_options.per_worker_buffer_size,
shard_options=self._dataset._shard_options,
read_options=self._dataset._read_options,
enable_profiling=self._dataset._multiprocessing_options.enable_profiling,
)
return dataset
def get_jax_iterator(self):
def convert_to_jax_compatible(x):
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_jax_sparse(x)
elif data_adapter_utils.is_tensorflow_sparse(x):
x = data_adapter_utils.tf_sparse_to_jax_sparse(x)
return x
class ConvertToJaxCompatible(grain.transforms.Map):
def map(self, x):
return tree.map_structure(
convert_to_jax_compatible, x, none_is_leaf=False
)
if isinstance(self._dataset, (grain.MapDataset, grain.IterDataset)):
dataset = self._dataset.map(ConvertToJaxCompatible())
else:
# Instantiate a new `DataLoader`.
dataset = grain.DataLoader(
data_source=self._dataset._data_source,
sampler=self._dataset._sampler,
# Append `ConvertToJaxCompatible`.
operations=list(self._dataset._operations)
+ [ConvertToJaxCompatible()],
worker_count=self._dataset._multiprocessing_options.num_workers,
worker_buffer_size=self._dataset._multiprocessing_options.per_worker_buffer_size,
shard_options=self._dataset._shard_options,
read_options=self._dataset._read_options,
enable_profiling=self._dataset._multiprocessing_options.enable_profiling,
)
return dataset
def get_tf_dataset(self):
def convert_to_tf(x):
if x is None:
return tf.experimental.Optional.empty(None)
if data_adapter_utils.is_scipy_sparse(x):
x = data_adapter_utils.scipy_sparse_to_tf_sparse(x)
elif data_adapter_utils.is_jax_sparse(x):
x = data_adapter_utils.jax_sparse_to_tf_sparse(x)
return x
class ConvertToTF(grain.transforms.Map):
def map(self, x):
return tree.map_structure(convert_to_tf, x)
# `tf.data.Dataset.from_generator` does not support lists as output.
# We convert lists to tuples.
class ListToTuple(grain.transforms.Map):
def map(self, x):
return tree.lists_to_tuples(x)
if isinstance(self._dataset, (grain.MapDataset, grain.IterDataset)):
dataset = self._dataset.map(ConvertToTF())
dataset = dataset.map(ListToTuple())
else:
# Instantiate a new `DataLoader`.
dataset = grain.DataLoader(
data_source=self._dataset._data_source,
sampler=self._dataset._sampler,
# Append `ConvertToTF` and `ListToTuple`.
operations=list(self._dataset._operations)
+ [ConvertToTF(), ListToTuple()],
worker_count=self._dataset._multiprocessing_options.num_workers,
worker_buffer_size=self._dataset._multiprocessing_options.per_worker_buffer_size,
shard_options=self._dataset._shard_options,
read_options=self._dataset._read_options,
enable_profiling=self._dataset._multiprocessing_options.enable_profiling,
)
if self._output_tf_signature is None:
self._output_tf_signature = tree.map_structure(
data_adapter_utils.convert_to_tf_tensor_spec,
self._output_signature,
)
return tf.data.Dataset.from_generator(
lambda: dataset, output_signature=self._output_tf_signature
)
def get_torch_dataloader(self):
import torch.utils.data as torch_data
class ConverterIterableDataset(torch_data.IterableDataset):
def __init__(self, iterable):
super().__init__()
self.iterable = iterable
def __iter__(self):
return iter(self.iterable)
# `batch_size=None` indicates that we should not re-batch
return torch_data.DataLoader(
ConverterIterableDataset(self._dataset), batch_size=None
)
@property
def builtin_prefetch(self):
return True
@property
def num_batches(self):
return None
@property
def batch_size(self):
return self._batch_size
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/trainers/data_adapters/grain_dataset_adapter.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/trainers/data_adapters/grain_dataset_adapter_test.py | import grain
import numpy as np
import tensorflow as tf
import torch
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.testing.test_utils import named_product
from keras.src.trainers.data_adapters import grain_dataset_adapter
class Range2DSource(grain.sources.RandomAccessDataSource):
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __getitem__(self, idx):
return np.expand_dims(np.array([self.start + idx]), axis=0)
def __len__(self):
return self.stop - self.start
class GrainDatasetAdapterTest(testing.TestCase):
def _get_dataset(self, dataset_type, worker_count=0, num_threads=0):
x = np.random.normal(size=(34, 4)).astype("float32")
y = np.random.normal(size=(34, 2)).astype("float32")
class MySource(grain.sources.RandomAccessDataSource):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return len(self.x)
if dataset_type == "map_dataset":
dataset = grain.MapDataset.source(MySource(x, y)).batch(
batch_size=16
)
elif dataset_type == "iter_dataset":
dataset = (
grain.MapDataset.source(MySource(x, y))
.to_iter_dataset()
.batch(batch_size=16)
)
else:
source = MySource(x, y)
dataset = grain.DataLoader(
data_source=source,
operations=[grain.transforms.Batch(batch_size=16)],
shard_options=grain.sharding.NoSharding(),
sampler=grain.samplers.IndexSampler(
num_records=len(source), num_epochs=1
),
worker_count=worker_count,
read_options=grain.ReadOptions(num_threads=num_threads),
)
return dataset
@parameterized.named_parameters(
named_product(
dataset_type=["map_dataset", "iter_dataset", "data_loader"]
)
)
def test_basic_flow(self, dataset_type):
dataset = self._get_dataset(dataset_type)
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertEqual(adapter.num_batches, None)
self.assertEqual(adapter.batch_size, 16)
self.assertEqual(adapter.has_partial_batch, None)
self.assertEqual(adapter.partial_batch_size, None)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
@parameterized.named_parameters(
named_product(data_type=["list", "dict", "nested_list", "nested_dict"])
)
def test_nested_data(self, data_type):
if data_type not in ("list", "dict", "nested_list", "nested_dict"):
raise ValueError(
"data_type must be one of 'list', 'dict', 'nested_list' or "
f"'nested_dict'. Received: {data_type}"
)
class NestedSource(grain.sources.RandomAccessDataSource):
def __init__(self, data_type):
self.x = np.random.random((40, 4)).astype("float32")
self.y = np.random.random((40, 2)).astype("float32")
self.data_type = data_type
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
x = self.x[idx]
y = self.y[idx]
if self.data_type == "list":
return x, y
elif self.data_type == "dict":
return {"x": x, "y": y}
elif self.data_type == "nested_list":
return x, (x, y)
elif self.data_type == "nested_dict":
return {"data": {"x": x, "y": y}}
dataset = grain.MapDataset.source(NestedSource(data_type)).batch(
batch_size=4
)
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
if backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
else:
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
for batch in it:
if data_type == "list":
self.assertEqual(len(batch), 2)
bx, by = batch
elif data_type == "dict":
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
elif data_type == "nested_list":
self.assertEqual(len(batch), 2)
bx, (_, by) = batch
elif data_type == "nested_dict":
self.assertEqual(len(batch["data"]), 2)
bx, by = batch["data"]["x"], batch["data"]["y"]
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.shape, (4, 4))
self.assertEqual(by.shape, (4, 2))
def test_multiple_calling_on_iterators(self):
dataset = self._get_dataset("iter_dataset")
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
numpy_it = adapter.get_numpy_iterator()
jax_it = adapter.get_jax_iterator()
tf_it = adapter.get_tf_dataset()
torch_it = adapter.get_torch_dataloader()
for it in (numpy_it, jax_it, tf_it, torch_it):
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertEqual(bx.dtype, by.dtype)
def test_builtin_prefetch(self):
dataset = grain.MapDataset.source(Range2DSource(0, 42))
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertTrue(adapter.builtin_prefetch)
def test_num_batches(self):
dataset = grain.MapDataset.source(Range2DSource(0, 42))
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertEqual(adapter.num_batches, None)
# Test for Infinite Cardinality
dataset = grain.MapDataset.source(Range2DSource(0, 42))
dataset = dataset.repeat()
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
# Test for Unknown Cardinality
dataset = dataset.filter(lambda x: True)
adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
def test_invalid_dataset_type(self):
with self.assertRaisesRegex(
ValueError,
(
r"Expected `dataset` to be a grain.MapDataset, "
r"grain.IterDataset or grain.DataLoader. "
),
):
grain_dataset_adapter.GrainDatasetAdapter(
"This is not a grain.Dataset"
)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/trainers/data_adapters/grain_dataset_adapter_test.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/ops/ops_test.py | import inspect
from absl.testing import parameterized
try:
from keras.api import ops as api_ops_root
except ImportError:
from keras import ops as api_ops_root
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.ops.operation import Operation
from keras.src.testing.test_utils import named_product
from keras.src.utils.naming import to_snake_case
OPS_MODULES = ("core", "image", "linalg", "math", "nn", "numpy")
SELF_PARAMETER = inspect.Parameter(
"self", inspect.Parameter.POSITIONAL_OR_KEYWORD
)
NAME_PARAMETER = inspect.Parameter(
"name", inspect.Parameter.KEYWORD_ONLY, default=None
)
# Parameters with these names are known to always be static (non-tensors).
STATIC_PARAMETER_NAMES = frozenset(
{"axis", "axes", "dtype", "shape", "newshape", "sparse", "ragged"}
)
def op_functions_and_classes(ops_module):
"""Enumerate pairs of op function and op classes in a module.
Will return for instance `(ExpandDims, expand_dims)`, `(Sum, sum)`, ...
Args:
ops_module: the module to explore.
Returns:
iterable returning tuples with function and class pairs.
"""
# Go through all symbols.
for op_class_name in dir(ops_module):
op_class = getattr(ops_module, op_class_name)
# Find the ones that are classes that extend `Operation`.
if isinstance(op_class, type) and Operation in op_class.__mro__:
# Infer what the corresponding op function name should be.
op_function_name = to_snake_case(op_class_name)
# With some exceptions.
op_function_name = {
"batch_norm": "batch_normalization",
"rms_norm": "rms_normalization",
"search_sorted": "searchsorted",
}.get(op_function_name, op_function_name)
# Check if that function exist. Some classes are abstract super
# classes for multiple operations and should be ignored.
op_function = getattr(ops_module, op_function_name, None)
if op_function is not None:
# We have a pair, return it.
yield op_function, op_class
class OperationTest(testing.TestCase):
@parameterized.named_parameters(named_product(module_name=OPS_MODULES))
def test_class_function_consistency(self, module_name):
ops_module = getattr(ops, module_name)
if module_name in ("core", "math"):
# `core` and `math` are not exported as their own module.
api_ops_module = None
else:
api_ops_module = getattr(api_ops_root, module_name)
for op_function, op_class in op_functions_and_classes(ops_module):
name = op_function.__name__
# ==== Check exports ====
# - op should be exported as e.g. `keras.ops.numpy.sum`
# - op should also be exported as e.g. `keras.ops.sum`
if module_name != "image":
# `image` ops are not exported at the top-level.
self.assertIsNotNone(
getattr(api_ops_root, name, None),
f"Not exported as `keras.ops.{name}`",
)
if api_ops_module is not None:
# `core` and `math` are not exported as their own module.
self.assertIsNotNone(
getattr(api_ops_module, name, None),
f"Not exported as `keras.ops.{module_name}.{name}`",
)
# ==== Check handling of name in __init__ ====
# - op class `__init__` should have a `name` parameter at the end,
# which should be keyword only and with a default value of `None`
# - op class `__init__` should call `super().__init__(name=name)`
if op_class.__init__ is Operation.__init__:
# `name` is not keyword only in `Operation`, use this instead.
class_init_signature = inspect.Signature(
[SELF_PARAMETER, NAME_PARAMETER]
)
else:
class_init_signature = inspect.signature(op_class.__init__)
# Check call to super.
self.assertContainsSubsequence(
inspect.getsource(op_class.__init__),
"super().__init__(name=name)",
f"`{op_class.__name__}.__init__` is not calling "
"`super().__init__(name=name)`",
)
static_parameters = list(class_init_signature.parameters.values())
# Remove `self`.
static_parameters = static_parameters[1:]
name_index = -1
if static_parameters[-1].kind == inspect.Parameter.VAR_KEYWORD:
# When there is a `**kwargs`, `name` appears before.
name_index = -2
# Verify `name` parameter is as expected.
self.assertEqual(
static_parameters[name_index],
NAME_PARAMETER,
f"The last parameter of `{op_class.__name__}.__init__` "
"should be `name`, should be a keyword only, and should "
"have a default value of `None`",
)
# Remove `name`, it's not part of the op signature.
static_parameters.pop(name_index)
# ==== Check static parameters ====
# Static parameters are declared in the class' `__init__`.
# Dynamic parameters are declared in the class' `call` method.
# - they should all appear in the op signature with the same name
# - they should have the same default value
# - they should appear in the same order and usually with the
# dynamic parameters first, and the static parameters last.
dynamic_parameters = list(
inspect.signature(op_class.call).parameters.values()
)[1:] # Remove self
op_signature = inspect.signature(op_function)
for p in dynamic_parameters + static_parameters:
# Check the same name appears in the op signature
self.assertIn(
p.name,
op_signature.parameters,
f"Op function `{name}` is missing a parameter that is in "
f"op class `{op_class.__name__}`",
)
# Check default values are the same
self.assertEqual(
p.default,
op_signature.parameters[p.name].default,
f"Default mismatch for parameter `{p.name}` between op "
f"function `{name}` and op class `{op_class.__name__}`",
)
dynamic_parameter_names = [p.name for p in dynamic_parameters]
static_parameter_names = [p.name for p in static_parameters]
# Check for obvious mistakes in parameters that were made dynamic
# but should be static.
for p in dynamic_parameters:
self.assertNotIn(
p.name,
STATIC_PARAMETER_NAMES,
f"`{p.name}` should not be a dynamic parameter in op class "
f"`{op_class.__name__}` based on its name.",
)
self.assertNotIsInstance(
p.default,
(bool, str),
f"`{p.name}` should not be a dynamic parameter in op class "
f"`{op_class.__name__}` based on default `{p.default}`.",
)
# Check order of parameters.
if name in (
"fori_loop",
"vectorized_map",
"while_loop",
"batch_normalization",
"dot_product_attention",
"average",
"einsum",
"full",
"pad",
):
# Loose case:
# order of of parameters is preserved but they are interspersed.
op_dynamic_parameter_names = [
name
for name in op_signature.parameters.keys()
if name in dynamic_parameter_names
]
self.assertEqual(
op_dynamic_parameter_names,
dynamic_parameter_names,
"Inconsistent dynamic parameter order for op "
f"function `{name}` and op class `{op_class.__name__}`",
)
op_static_parameter_names = [
name
for name in op_signature.parameters.keys()
if name in static_parameter_names
]
self.assertEqual(
op_static_parameter_names,
static_parameter_names,
"Inconsistent static parameter order for op "
f"function `{name}` and op class `{op_class.__name__}`",
)
else:
# Strict case:
# dynamic parameters first and static parameters at the end.
self.assertEqual(
list(op_signature.parameters.keys()),
dynamic_parameter_names + static_parameter_names,
"Inconsistent static parameter position for op "
f"function `{name}` and op class `{op_class.__name__}`",
)
# ==== Check compute_output_spec is implement ====
# - op class should override Operation's `compute_output_spec`
self.assertTrue(
hasattr(op_class, "compute_output_spec")
and op_class.compute_output_spec
is not Operation.compute_output_spec,
f"Op class `{op_class.__name__}` should override "
"`compute_output_spec`",
)
@parameterized.named_parameters(named_product(module_name=OPS_MODULES))
def test_backend_consistency(self, module_name):
ops_module = getattr(ops, module_name)
backend_ops_module = getattr(backend, module_name)
for op_function, _ in op_functions_and_classes(ops_module):
name = op_function.__name__
if hasattr(ops_module, f"_{name}"):
# For an op function `foo`, if there is a function named `_foo`,
# that means we have a backend independent implementation.
continue
if name in ("view_as_complex", "view_as_real", "get_item"):
# These ops have an inlined backend independent implementation.
continue
# ==== Check backend implementation ====
# - op should have an implementation in every backend
# - op implementation should have the same signature (same
# parameters, same order, same defaults)
backend_op_function = getattr(backend_ops_module, name, None)
if backend.backend() == "openvino" and backend_op_function is None:
# Openvino is still missing a number of ops.
continue
self.assertIsNotNone(backend_op_function, f"Missing op `{name}`")
if name == "multi_hot":
# multi_hot has code to massage the input parameters before
# calling the backend implementation, so the signature is
# different on purpose.
continue
# Signature should match in every way.
self.assertEqual(
inspect.signature(backend_op_function),
inspect.signature(op_function),
f"Signature mismatch for `{name}`",
)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/ops/ops_test.py",
"license": "Apache License 2.0",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
keras-team/keras:keras/src/callbacks/monitor_callback.py | import warnings
from keras.src import ops
from keras.src.callbacks.callback import Callback
from keras.src.trainers import compile_utils
class MonitorCallback(Callback):
"""Base class for callbacks that monitor a quantity and evaluates
improvements.
This class provides common functionality for callbacks that monitor a
metric during training to determine whether a condition has been met,
such as improvement over time. It encapsulates logic for selecting
the comparison operation based on a `monitor` value and `mode`, and
computing whether a new value is an improvement.
It is intended to be subclassed by other callbacks like `ModelCheckpoint`,
`EarlyStopping`, or `ReduceLROnPlateau`, and is not meant to be used
directly.
Arguments:
monitor: Quantity to be monitored. Defaults to `"val_loss"`.
mode: One of `{"auto", "min", "max"}`. In `min` mode, training will aim
to minimize the monitored quantity; in `'max'` mode it will aim to
maximize it.; in `"auto"` mode, the direction is automatically
inferred from the name of the monitored quantity. Defaults to
`"auto"`.
baseline: Floating point initial "best" value of the metric to be
monitored. If `None` (default), the first monitored value will be
used.
min_delta: Minimum change in the monitored quantity to qualify as an
improvement, i.e. an absolute change of less than min_delta, will
count as no improvement. Defaults to `0`.
Raises:
ValueError: If `mode='auto'` is selected and the direction of the metric
cannot be inferred.
"""
def __init__(
self,
monitor="val_loss",
mode="auto",
baseline=None,
min_delta=0,
):
super().__init__()
if mode not in ["auto", "min", "max"]:
warnings.warn(
f"{self.__class__.__name__} mode '{mode}' is unknown, fallback "
"to auto mode.",
stacklevel=2,
)
mode = "auto"
self.monitor = monitor
self.mode = mode
self.best = baseline
self.min_delta = abs(min_delta)
self.monitor_op = None
def _set_monitor_op(self):
if self.mode == "min":
self.monitor_op = ops.less
elif self.mode == "max":
self.monitor_op = ops.greater
else:
metric_name = self.monitor.removeprefix("val_")
if metric_name == "loss":
self.monitor_op = ops.less
if hasattr(self.model, "metrics"):
all_metrics = []
for m in self.model.metrics:
if isinstance(
m,
(
compile_utils.CompileMetrics,
compile_utils.MetricsList,
),
):
all_metrics.extend(m.metrics)
for m in all_metrics:
if m.name == metric_name:
if hasattr(m, "_direction"):
if m._direction == "up":
self.monitor_op = ops.greater
else:
self.monitor_op = ops.less
if self.monitor_op is None:
raise ValueError(
f"{self.__class__.__name__} callback received "
f"monitor={self.monitor}, but Keras isn't able to "
"automatically determine whether that metric should be "
"maximized or minimized. Pass `mode='max'` in order to "
"monitor based on the highest metric value, or pass "
"`mode='min'` in order to use the lowest value."
)
if self.monitor_op == ops.less:
self.min_delta *= -1
def _is_improvement(self, monitor_value, reference_value):
if reference_value is None:
return True
return self.monitor_op(monitor_value - self.min_delta, reference_value)
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/callbacks/monitor_callback.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
keras-team/keras:keras/src/callbacks/monitor_callback_test.py | import numpy as np
import pytest
from keras.src import callbacks
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import ops
from keras.src import testing
class MonitorCallbackTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_monitor_op_logic(self):
x_train = np.random.random((10, 5))
y_train = np.random.random((10, 1))
x_test = np.random.random((10, 5))
y_test = np.random.random((10, 1))
model = models.Sequential(
(
layers.Dense(1, activation="relu"),
layers.Dense(1, activation="relu"),
)
)
model.compile(
loss="mae",
optimizer="adam",
metrics=[
"mse",
"acc",
"accuracy",
"hinge",
metrics.F1Score(name="f1_score"),
],
)
cases = [
("max", "val_mse", "max"),
("min", "val_loss", "min"),
("auto", "val_mse", "min"),
("auto", "loss", "min"),
("auto", "acc", "max"),
("auto", "val_accuracy", "max"),
("auto", "hinge", "min"),
("auto", "f1_score", "max"),
]
for mode, monitor, expected_mode in cases:
monitor_callback = callbacks.MonitorCallback(monitor, mode)
monitor_callback.set_model(model)
model.fit(
x_train,
y_train,
batch_size=5,
validation_data=(x_test, y_test),
epochs=2,
verbose=0,
)
monitor_callback._set_monitor_op()
if expected_mode == "max":
monitor_op = ops.greater
else:
monitor_op = ops.less
self.assertEqual(monitor_callback.monitor_op, monitor_op)
with self.assertRaises(ValueError):
monitor = "unknown"
monitor_callback = callbacks.MonitorCallback(monitor)
monitor_callback.set_model(model)
model.fit(
x_train,
y_train,
batch_size=5,
validation_data=(x_test, y_test),
epochs=2,
verbose=0,
)
monitor_callback._set_monitor_op()
@pytest.mark.requires_trainable_backend
def test_min_delta(self):
monitor_callback = callbacks.MonitorCallback(mode="max", min_delta=0.5)
monitor_callback._set_monitor_op()
self.assertTrue(monitor_callback._is_improvement(0.75, 0))
self.assertTrue(monitor_callback._is_improvement(0.5, None))
self.assertFalse(monitor_callback._is_improvement(0.5, 0))
self.assertFalse(monitor_callback._is_improvement(0.2, 0.5))
| {
"repo_id": "keras-team/keras",
"file_path": "keras/src/callbacks/monitor_callback_test.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
kovidgoyal/calibre:src/calibre/gui2/preferences/look_feel_tabs/font_selection_dialog.py | from qt.core import QDialog, QDialogButtonBox, QFontDatabase, QFontInfo, QHBoxLayout, QLabel, QListWidget, Qt, QVBoxLayout, QWidget, pyqtSignal
class FontSelectionDialog(QDialog):
fontSelected = pyqtSignal(str, str) # family, style
def __init__(self, family: str = '', style: str = '', min_size=8, medium_size=12, max_size=24, parent=None):
super().__init__(parent)
if family:
self.initial_family, self.initial_style = family, style
else:
font = self.font()
fi = QFontInfo(font)
self.initial_family = fi.family()
self.initial_style = fi.styleName()
self.min_size = min_size
self.medium_size = medium_size
self.max_size = max_size
self.setWindowTitle(_('Select font'))
self._setup_ui()
self._populate_families()
self.families_list.setFocus(Qt.FocusReason.OtherFocusReason)
self.resize(self.sizeHint())
def _setup_ui(self):
main_layout = QVBoxLayout(self)
# Top section: Font families and styles side by side
lists_layout = QHBoxLayout()
# Font families list
families_layout = QVBoxLayout()
families_label = QLabel(_('&Family:'))
self.families_list = QListWidget()
families_label.setBuddy(self.families_list)
self.families_list.currentItemChanged.connect(self._on_family_changed)
families_layout.addWidget(families_label)
families_layout.addWidget(self.families_list)
# Styles list
styles_layout = QVBoxLayout()
styles_label = QLabel(_('&Style:'))
self.styles_list = QListWidget()
styles_label.setBuddy(self.styles_list)
self.styles_list.currentItemChanged.connect(self._on_style_changed)
styles_layout.addWidget(styles_label)
styles_layout.addWidget(self.styles_list)
lists_layout.addLayout(families_layout, 2)
lists_layout.addLayout(styles_layout, 1)
main_layout.addLayout(lists_layout, stretch=20)
# Preview area
preview_group = QWidget()
preview_layout = QVBoxLayout(preview_group)
preview_layout.setContentsMargins(0, 10, 0, 10)
preview_container = QWidget()
self.preview_layout = QVBoxLayout(preview_container)
self.preview_layout.setSpacing(10)
self.preview_layout.setAlignment(Qt.AlignmentFlag.AlignTop)
# Preview labels for different sizes
self.preview_small = QLabel('The quick brown fox jumps over the lazy dog')
self.preview_medium = QLabel(self.preview_small)
self.preview_large = QLabel(self.preview_small)
self.preview_layout.addWidget(self.preview_small)
self.preview_layout.addWidget(self.preview_medium)
self.preview_layout.addWidget(self.preview_large)
preview_layout.addWidget(preview_container)
main_layout.addWidget(preview_group)
# OK/Cancel buttons
button_box = QDialogButtonBox(QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
main_layout.addWidget(button_box)
def _populate_families(self):
'''Populate the families list with smoothly scalable fonts only'''
self.families_list.clear()
# Get all font families
all_families = QFontDatabase.families()
# Filter for smoothly scalable fonts
scalable_families = []
idx = i = 0
for family in all_families:
if QFontDatabase.isSmoothlyScalable(family):
scalable_families.append(family)
if family == self.initial_family:
idx = i
i += 1
scalable_families.sort()
self.families_list.addItems(scalable_families)
# Select the initial item if available
if self.families_list.count() > 0:
self.families_list.setCurrentRow(idx)
self._on_family_changed(self.families_list.currentItem(), None)
def _on_family_changed(self, current, previous):
'''When a family is selected, populate the styles list'''
if not current:
self.styles_list.clear()
return
family = current.text()
self.styles_list.clear()
# Get all styles for this family
styles = QFontDatabase.styles(family)
idx = 0
if family == self.initial_family and self.initial_style in styles:
idx = styles.index(self.initial_style)
self.styles_list.addItems(styles)
# Select first style if available
if self.styles_list.count() > 0:
self.styles_list.setCurrentRow(idx)
self._update_preview()
def _on_style_changed(self, current, previous):
'''Update the preview when style changes'''
self._update_preview()
def _update_preview(self):
'''Update the preview labels with the selected font'''
family_item = self.families_list.currentItem()
style_item = self.styles_list.currentItem()
if not family_item or not style_item:
return
family = family_item.text()
style = style_item.text()
systems = tuple(QFontDatabase.writingSystems(family))
text = ''
for s in systems:
if (t := QFontDatabase.writingSystemSample(s)):
text = t
break
def s(label, sz):
font = QFontDatabase.font(family, style, int(sz))
font.setPointSizeF(sz)
label.setFont(font)
label.setText('')
if label is self.preview_small:
prefix = _('Minimum size:')
elif label is self.preview_medium:
prefix = _('Base size:')
else:
prefix = _('Maximum size:')
label.setText(prefix + ' ' + text)
s(self.preview_small, self.min_size)
s(self.preview_medium, self.medium_size)
s(self.preview_large, self.max_size)
def selected_font(self):
'''Returns the selected font family and style as a tuple'''
family_item = self.families_list.currentItem()
style_item = self.styles_list. currentItem()
if family_item and style_item:
return family_item.text(), style_item.text()
return None, None
def get_font(self, size=None):
'''Returns a QFont object for the selected family and style'''
family, style = self.selected_font()
if family and style:
if size is None:
size = self.medium_size
return QFontDatabase.font(family, style, size)
return None
def accept(self):
'''Override accept to emit signal with selected font'''
family, style = self.selected_font()
if family and style:
self.fontSelected.emit(family, style)
super().accept()
if __name__ == '__main__':
from calibre.gui2 import Application
app = Application()
def show_dialog():
dialog = FontSelectionDialog(min_size=10, medium_size=14, max_size=28)
if dialog.exec() == QDialog.DialogCode.Accepted:
family, style = dialog.selected_font()
selected_font = dialog.get_font(16)
print(f'Selected: {family} - {style}')
print(f'Font: {selected_font. family()} {selected_font.styleName()} {selected_font.pointSize()}pt')
show_dialog()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/preferences/look_feel_tabs/font_selection_dialog.py",
"license": "GNU General Public License v3.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/utils/translator/test_translator.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2026, Kovid Goyal <kovid at kovidgoyal.net>
import gettext
import io
import unittest
import zipfile
from calibre.utils.localization import available_translations, get_lc_messages_path
from calibre_extensions.translator import Translator
class TestTranslator(unittest.TestCase):
def test_translator(self):
test_translator(self, 'und', b'')
with zipfile.ZipFile(P('localization/locales.zip', allow_user_override=False), 'r') as zf:
for lang in available_translations():
mpath = get_lc_messages_path(lang)
if mpath is not None:
data = zf.read(mpath + '/messages.mo')
test_translator(self, lang, data)
for q in ('iso639.mo', 'iso3166.mo'):
try:
data = zf.read(mpath + '/' + q)
except KeyError:
continue
test_translator(self, lang, data, q)
def test_translator(self: TestTranslator, lang: str, data: bytes, q: str = 'messages.mo') -> None:
n = Translator(data) if data else Translator()
o = gettext.GNUTranslations(io.BytesIO(data)) if data else gettext.NullTranslations()
which = f'{lang} - {q}'
self.assertEqual(o.info(), n.info(), f'info() not equal for language: {which}')
self.assertEqual(o.charset(), n.charset(), f'charset() not equal for language: {which}')
if hasattr(o, 'plural'):
pf = o.info().get('plural-forms')
for i in range(1, 100):
self.assertEqual(o.plural(i), n.plural(i), f'plural({i}) not equal for language: {which} and plural-form: {pf}')
if q == 'messages.mo':
og, ng = o.gettext, n.gettext
for x in ('Add books', 'Series', 'Tags', 'Folder'):
if lang == 'ar' and x == 'Folder':
# this is a bug in the python stdlib implementation of gettext() where it
# returns msgstr[plural(1)] instead of msgstr[0] for gettext().
# In the Arabic translation plural(1) == 1 instead of 0
continue
self.assertEqual(og(x), ng(x), f'gettext({x!r}) not equal for language: {which}')
og, ng = o.ngettext, n.ngettext
for singular, plural in (('Series', 'Series'), ('Folder', 'Folders')):
for i in range(1, 10):
self.assertEqual(og(singular, plural, i), ng(singular, plural, i), f'ngettext({singular!r}, {plural!r}, {i}) not equal for language: {which}')
og, ng = o.pgettext, n.pgettext
for context, msg in (('edit book actions', 'Miscellaneous'), ('edit book file type', 'Miscellaneous')):
self.assertEqual(og(context, msg), ng(context, msg), f'pgettext({context!r}, {msg!r}) not equal for language: {which}')
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(TestTranslator)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/utils/translator/test_translator.py",
"license": "GNU General Public License v3.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
kovidgoyal/calibre:.github/workflows/macos_crash_report.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
import json
import posixpath
import sys
from collections import namedtuple
from collections.abc import Mapping
from datetime import datetime
from enum import Enum
from functools import cached_property
from typing import IO
Frame = namedtuple('Frame', 'image_name image_base image_offset symbol symbol_offset')
Register = namedtuple('Register', 'name value')
def surround(x: str, start: int, end: int) -> str:
if sys.stdout.isatty():
x = f'\033[{start}m{x}\033[{end}m'
return x
def cyan(x: str) -> str:
return surround(x, 96, 39)
def bold(x: str) -> str:
return surround(x, 1, 22)
class BugType(Enum):
WatchdogTimeout = '28'
BasebandStats = '195'
GPUEvent = '284'
Sandbox = '187'
TerminatingStackshot = '509'
ServiceWatchdogTimeout = '29'
Session = '179'
LegacyStackshot = '188'
MACorrelation = '197'
iMessages = '189'
log_power = '278'
PowerLog = 'powerlog'
DuetKnowledgeCollector2 = '58'
BridgeRestore = '83'
LegacyJetsam = '198'
ExcResource_385 = '385'
Modem = '199'
Stackshot = '288'
SystemInformation = 'system_profile'
Jetsam_298 = '298'
MemoryResource = '30'
Bridge = '31'
DifferentialPrivacy = 'diff_privacy'
FirmwareIntegrity = '32'
CoreAnalytics_33 = '33'
AutoBugCapture = '34'
EfiFirmwareIntegrity = '35'
SystemStats = '36'
AnonSystemStats = '37'
Crash_9 = '9'
Jetsam_98 = '98'
LDCM = '100'
Panic_10 = '10'
Spin = '11'
CLTM = '101'
Hang = '12'
Panic_110 = '110'
ConnectionFailure = '13'
MessageTracer = '14'
LowBattery = '120'
Siri = '201'
ShutdownStall = '17'
Panic_210 = '210'
SymptomsCPUUsage = '202'
AssumptionViolation = '18'
CoreHandwriting = 'chw'
IOMicroStackShot = '44'
CoreAnalytics_211 = '211'
SiriAppPrediction = '203'
spin_45 = '45'
PowerMicroStackshots = '220'
BTMetadata = '212'
SystemMemoryReset = '301'
ResetCount = '115'
AutoBugCapture_204 = '204'
WifiCrashBinary = '221'
MicroRunloopHang = '310'
Rosetta = '213'
glitchyspin = '302'
System = '116'
IOPowerSources = '141'
PanicStats = '205'
PowerLog_230 = '230'
LongRunloopHang = '222'
HomeProductsAnalytics = '311'
DifferentialPrivacy_150 = '150'
Rhodes = '214'
ProactiveEventTrackerTransparency = '303'
WiFi = '117'
SymptomsCPUWakes = '142'
SymptomsCPUUsageFatal = '206'
Crash_109 = '109'
ShortRunloopHang = '223'
CoreHandwriting_231 = '231'
ForceReset = '151'
SiriAppSelection = '215'
PrivateFederatedLearning = '304'
Bluetooth = '118'
SCPMotion = '143'
HangSpin = '207'
StepCount = '160'
RTCTransparency = '224'
DiagnosticRequest = '312'
MemorySnapshot = '152'
Rosetta_B = '216'
AudioAccessory = '305'
General = '119'
HotSpotIOMicroSS = '144'
GeoServicesTransparency = '233'
MotionState = '161'
AppStoreTransparency = '225'
SiriSearchFeedback = '313'
BearTrapReserved = '153'
Portrait = '217'
AWDMetricLog = 'metriclog'
SymptomsIO = '145'
SubmissionReserved = '170'
WifiCrash = '209'
Natalies = '162'
SecurityTransparency = '226'
BiomeMapReduce = '234'
MemoryGraph = '154'
MultichannelAudio = '218'
honeybee_payload = '146'
MesaReserved = '171'
WifiSensing = '235'
SiriMiss = '163'
ExcResourceThreads_227 = '227'
TestA = 'T01'
NetworkUsage = '155'
WifiReserved = '180'
SiriActionPrediction = '219'
honeybee_heartbeat = '147'
ECCEvent = '172'
KeyTransparency = '236'
SubDiagHeartBeat = '164'
ThirdPartyHang = '228'
OSFault = '308'
CoreTime = '156'
WifiDriverReserved = '181'
Crash_309 = '309'
honeybee_issue = '148'
CellularPerfReserved = '173'
TestB = 'T02'
StorageStatus = '165'
SiriNotificationTransparency = '229'
TestC = 'T03'
CPUMicroSS = '157'
AccessoryUpdate = '182'
xprotect = '20'
MultitouchFirmware = '149'
MicroStackshot = '174'
AppLaunchDiagnostics = '238'
KeyboardAccuracy = '166'
GPURestart = '21'
FaceTime = '191'
DuetKnowledgeCollector = '158'
OTASUpdate = '183'
ExcResourceThreads_327 = '327'
ExcResource_22 = '22'
DuetDB = '175'
ThirdPartyHangDeveloper = '328'
PrivacySettings = '167'
GasGauge = '192'
MicroStackShots = '23'
BasebandCrash = '159'
GPURestart_184 = '184'
SystemWatchdogCrash = '409'
FlashStatus = '176'
SleepWakeFailure = '24'
CarouselEvent = '168'
AggregateD = '193'
WakeupsMonitorViolation = '25'
DifferentialPrivacy_50 = '50'
ExcResource_185 = '185'
UIAutomation = '177'
ping = '26'
SiriTransaction = '169'
SURestore = '194'
KtraceStackshot = '186'
WirelessDiagnostics = '27'
PowerLogLite = '178'
SKAdNetworkAnalytics = '237'
HangWorkflowResponsiveness = '239'
CompositorClientHang = '243'
class CrashReportBase:
def __init__(self, metadata: Mapping, data: str, filename: str = None):
self.filename = filename
self._metadata = metadata
self._data = data
self._parse()
def _parse(self):
self._is_json = False
try:
modified_data = self._data
if '\n \n' in modified_data:
modified_data, rest = modified_data.split('\n \n', 1)
rest = '",' + rest.split('",', 1)[1]
modified_data += rest
self._data = json.loads(modified_data)
self._is_json = True
except json.decoder.JSONDecodeError:
pass
@cached_property
def bug_type(self) -> BugType:
return BugType(self.bug_type_str)
@cached_property
def bug_type_str(self) -> str:
return self._metadata['bug_type']
@cached_property
def incident_id(self):
return self._metadata.get('incident_id')
@cached_property
def timestamp(self) -> datetime:
timestamp = self._metadata.get('timestamp')
timestamp_without_timezone = timestamp.rsplit(' ', 1)[0]
return datetime.strptime(timestamp_without_timezone, '%Y-%m-%d %H:%M:%S.%f')
@cached_property
def name(self) -> str:
return self._metadata.get('name')
def __repr__(self) -> str:
filename = ''
if self.filename:
filename = f'FILENAME:{posixpath.basename(self.filename)} '
return f'<{self.__class__} {filename}TIMESTAMP:{self.timestamp}>'
def __str__(self) -> str:
filename = ''
if self.filename:
filename = self.filename
return cyan(f'{self.incident_id} {self.timestamp}\n{filename}\n\n')
class UserModeCrashReport(CrashReportBase):
def _parse_field(self, name: str) -> str:
name += ':'
for line in self._data.split('\n'):
if line.startswith(name):
field = line.split(name, 1)[1]
field = field.strip()
return field
@cached_property
def faulting_thread(self) -> int:
if self._is_json:
return self._data['faultingThread']
else:
return int(self._parse_field('Triggered by Thread'))
@cached_property
def frames(self) -> list[Frame]:
result = []
if self._is_json:
thread_index = self.faulting_thread
images = self._data['usedImages']
for frame in self._data['threads'][thread_index]['frames']:
image = images[frame['imageIndex']]
result.append(
Frame(image_name=image.get('path'), image_base=image.get('base'), symbol=frame.get('symbol'),
image_offset=frame.get('imageOffset'), symbol_offset=frame.get('symbolLocation')))
else:
in_frames = False
for line in self._data.split('\n'):
if in_frames:
splitted = line.split()
if len(splitted) == 0:
break
assert splitted[-2] == '+'
image_base = splitted[-3]
if image_base.startswith('0x'):
result.append(Frame(image_name=splitted[1], image_base=int(image_base, 16), symbol=None,
image_offset=int(splitted[-1]), symbol_offset=None))
else:
# symbolicated
result.append(Frame(image_name=splitted[1], image_base=None, symbol=image_base,
image_offset=None, symbol_offset=int(splitted[-1])))
if line.startswith(f'Thread {self.faulting_thread} Crashed:'):
in_frames = True
return result
@cached_property
def registers(self) -> list[Register]:
result = []
if self._is_json:
thread_index = self._data['faultingThread']
thread_state = self._data['threads'][thread_index]['threadState']
if 'x' in thread_state:
for i, reg_x in enumerate(thread_state['x']):
result.append(Register(name=f'x{i}', value=reg_x['value']))
for i, (name, value) in enumerate(thread_state.items()):
if name == 'x':
for j, reg_x in enumerate(value):
result.append(Register(name=f'x{j}', value=reg_x['value']))
elif isinstance(value, dict):
result.append(Register(name=name, value=value['value']))
else:
in_frames = False
for line in self._data.split('\n'):
if in_frames:
splitted = line.split()
if len(splitted) == 0:
break
for i in range(0, len(splitted), 2):
register_name = splitted[i]
if not register_name.endswith(':'):
break
register_name = register_name[:-1]
register_value = int(splitted[i + 1], 16)
result.append(Register(name=register_name, value=register_value))
if line.startswith(f'Thread {self.faulting_thread} crashed with ARM Thread State'):
in_frames = True
return result
@cached_property
def exception_type(self):
if self._is_json:
return self._data['exception'].get('type')
else:
return self._parse_field('Exception Type')
@cached_property
def exception_subtype(self) -> str | None:
if self._is_json:
return self._data['exception'].get('subtype')
else:
return self._parse_field('Exception Subtype')
@cached_property
def application_specific_information(self) -> str | None:
result = ''
if self._is_json:
asi = self._data.get('asi')
if asi is None:
return None
return asi
else:
in_frames = False
for line in self._data.split('\n'):
if in_frames:
line = line.strip()
if len(line) == 0:
break
result += line + '\n'
if line.startswith('Application Specific Information:'):
in_frames = True
result = result.strip()
if not result:
return None
return result
def __str__(self) -> str:
result = super().__str__()
result += bold(f'Exception: {self.exception_type}\n')
if self.exception_subtype:
result += bold('Exception Subtype: ')
result += f'{self.exception_subtype}\n'
if self.application_specific_information:
result += bold('Application Specific Information: ')
result += str(self.application_specific_information)
result += '\n'
result += bold('Registers:')
for i, register in enumerate(self.registers):
if i % 4 == 0:
result += '\n'
result += f'{register.name} = 0x{register.value:016x} '.rjust(30)
result += '\n\n'
result += bold('Frames:\n')
for frame in self.frames:
image_base = '_HEADER'
if frame.image_base is not None:
image_base = f'0x{frame.image_base:x}'
result += f'\t[{frame.image_name}] {image_base}'
if frame.image_offset:
result += f' + 0x{frame.image_offset:x}'
if frame.symbol is not None:
result += f' ({frame.symbol} + 0x{frame.symbol_offset:x})'
result += '\n'
return result
def get_crash_report_from_file(crash_report_file: IO) -> CrashReportBase:
metadata = json.loads(crash_report_file.readline())
try:
bug_type = BugType(metadata['bug_type'])
except ValueError:
return CrashReportBase(metadata, crash_report_file.read(), crash_report_file.name)
bug_type_parsers = {
BugType.Crash_109: UserModeCrashReport,
BugType.Crash_309: UserModeCrashReport,
BugType.ExcResourceThreads_327: UserModeCrashReport,
BugType.ExcResource_385: UserModeCrashReport,
}
parser = bug_type_parsers.get(bug_type)
if parser is None:
return CrashReportBase(metadata, crash_report_file.read(), crash_report_file.name)
return parser(metadata, crash_report_file.read(), crash_report_file.name)
if __name__ == '__main__':
with open(sys.argv[-1]) as f:
print(get_crash_report_from_file(f))
| {
"repo_id": "kovidgoyal/calibre",
"file_path": ".github/workflows/macos_crash_report.py",
"license": "GNU General Public License v3.0",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/db/tests/page_counts.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import io
from calibre.db.tests.base import BaseTest
class PageCountTest(BaseTest):
ae = BaseTest.assertEqual
def test_page_count_in_db(self):
test_page_count_in_db(self)
def test_page_count(self):
from calibre.library.page_count import test_page_count
test_page_count(self)
def test_line_counting(self):
from calibre.library.page_count import test_line_counting
test_line_counting(self)
def test_page_count_in_db(self: BaseTest) -> None:
from calibre.db.constants import Pages
from calibre.library.page_count import CHARS_PER_PAGE
from calibre.utils.podofo import sample_pdf_data
txt_data = ('a ' * (2*CHARS_PER_PAGE + 10)).encode()
db = self.init_cache()
# test schema upgrade marked all books as needing scan
def status():
return set(db.backend.execute('SELECT pages,needs_scan FROM books_pages_link'))
self.ae(status(), {(0,1)})
self.ae(db.pages_needs_scan((1,2,19)), {1,2})
counted = []
db.maintain_page_counts.count_callback = counted.append
db.maintain_page_counts.tick_event.clear()
db.queue_pages_scan()
db.maintain_page_counts.tick_event.wait()
self.assertFalse(counted)
self.ae(status(), {(-1,0)})
self.ae(db.field_for('pages', 1), -1)
# test that adding a format queues
def add_format(fmt, data):
db.maintain_page_counts.tick_event.clear()
db.add_format(1, fmt, io.BytesIO(data), replace=True)
db.maintain_page_counts.tick_event.wait()
add_format('txt', txt_data)
self.ae(status(), {(3,0),(-1,0)})
self.ae(1, len(counted))
p = db.get_pages(1)
self.ae(p, Pages(3, p.algorithm, 'TXT', len(txt_data), p.timestamp))
self.ae(db.field_for('pages', 1), 3)
self.ae(db.get_metadata(1).pages, 3)
self.ae(db.get_proxy_metadata(1).pages, 3)
# test that re-adding the same format does not re-count
add_format('txt', txt_data)
self.ae(status(), {(3,0),(-1,0)})
self.ae(1, len(counted))
# test that re-adding a lower priority format does not re-count
add_format('epub', txt_data)
self.ae(status(), {(3,0),(-1,0)})
self.ae(1, len(counted))
# test that adding a higher priority format does recount
add_format('pdf', sample_pdf_data())
self.ae(2, len(counted))
self.assertTrue(counted[-1].endswith('.pdf'))
self.ae(status(), {(1,0),(-1,0)})
p = db.get_pages(1)
self.ae(p, Pages(1, p.algorithm, 'PDF', len(sample_pdf_data()), p.timestamp))
# test forced re-scan
db.maintain_page_counts.tick_event.clear()
db.queue_pages_scan(1, force=True)
db.maintain_page_counts.tick_event.wait()
self.ae(3, len(counted))
# test full force
db.maintain_page_counts.tick_event.clear()
db.queue_pages_scan(force=True)
db.maintain_page_counts.tick_event.wait()
self.ae(4, len(counted))
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/db/tests/page_counts.py",
"license": "GNU General Public License v3.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
kovidgoyal/calibre:src/calibre/db/page_count.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import os
import sys
import weakref
from collections.abc import Iterator
from contextlib import suppress
from queue import Queue, ShutDown
from threading import Event, Thread, current_thread
from typing import TYPE_CHECKING
import apsw
from calibre.constants import cache_dir
from calibre.customize.ui import all_input_formats
from calibre.db.constants import Pages
from calibre.library.page_count import Server
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.config import prefs
from calibre.utils.date import utcnow
from calibre.utils.filenames import make_long_path_useable
if TYPE_CHECKING:
from calibre.db.cache import Cache
CacheRef = weakref.ref['Cache']
NO_COUNTABLE_FORMATS, COUNT_FAILED, DRMED_FORMATS = -1, -2, -3
# yield so that main thread is not starved by GIL
YIELD_TIME = 0.01
class MaintainPageCounts(Thread):
def __init__(self, db_new_api: Cache):
super().__init__(name='MaintainPageCounts', daemon=True)
self.shutdown_event = Event()
self.tick_event = Event()
self.count_callback = lambda fmt_file: None
self.dbref: CacheRef = weakref.ref(db_new_api)
self.queue: Queue[int] = Queue()
self.tdir = ''
self.failure_log_path = os.path.join(cache_dir(), f'page-count-failures-{db_new_api.library_id}.txt')
def log_failure(self, book_id: int, ex: str, tb: str, fmt: str = '') -> None:
title = ''
if db := self.dbref():
with suppress(Exception):
title = db.field_for('title', book_id)
if title:
title = repr(title)
msg = f'[{utcnow().isoformat(" ")}] Failed to count pages for book {book_id} {fmt} {title}, with error: {ex}'
with suppress(Exception), open(make_long_path_useable(self.failure_log_path), 'a') as f:
print(msg, file=f)
print(tb, file=f)
print(file=f)
def queue_scan(self, book_id: int = 0):
self.queue.put(book_id)
def shutdown(self) -> None:
self.shutdown_event.set()
self.queue.shutdown(immediate=True)
self.dbref = lambda: None
def wait_for_worker_shutdown(self, timeout: float | None = None) -> None:
# Because of a python bug Queue.shutdown() does not work if
# current_thread() is not alive (for e.g. during interpreter shutdown)
if current_thread().is_alive() and self.is_alive():
self.join(timeout)
def run(self):
self.all_input_formats = {f.upper() for f in all_input_formats()}
self.sort_order = {fmt.upper(): i for i, fmt in enumerate(prefs['input_format_order'])}
for i, fmt in enumerate(('PDF', 'CBZ', 'CBR', 'CB7', 'TXT', 'TEXT', 'MD', 'TEXTTILE', 'MARKDOWN', 'EPUB')):
self.sort_order[fmt] = -1000 + i
g = self.queue.get
with Server() as server, TemporaryDirectory() as tdir:
self.tdir = tdir
while not self.shutdown_event.is_set():
with suppress(apsw.ConnectionClosedError):
try:
book_id = g()
except ShutDown:
break
if book_id:
self.count_book_and_commit(book_id, server)
else:
self.do_backlog(server)
self.tick_event.set()
def do_backlog(self, server: Server) -> None:
while not self.shutdown_event.is_set():
batch = tuple(self.get_batch())
if not batch:
break
for book_id in batch:
if self.shutdown_event.is_set():
break
self.count_book_and_commit(book_id, server)
self.shutdown_event.wait(YIELD_TIME)
def get_batch(self, size: int = 100) -> Iterator[int]:
' Order results by book id to prioritise newer books '
if db := self.dbref():
with db.safe_read_lock:
for rec in db.backend.execute(f'SELECT book FROM books_pages_link WHERE needs_scan=1 ORDER BY book DESC LIMIT {size}'):
yield rec[0]
def count_book_and_commit(self, book_id: int, server: Server) -> Pages | None:
if (db := self.dbref()) is None or self.shutdown_event.is_set():
return
try:
pages = self.count_book(db, book_id, server)
except Exception as e:
import traceback
traceback.print_exc()
self.log_failure(book_id, str(e), traceback.format_exc())
pages = Pages(COUNT_FAILED, 0, '', 0, utcnow())
if pages is not None and not self.shutdown_event.is_set():
db.set_pages(book_id, pages.pages, pages.algorithm, pages.format, pages.format_size)
return pages
def sort_key(self, fmt: str) -> int:
return self.sort_order.get(fmt, len(self.sort_order) + 100)
def count_book(self, db: Cache, book_id: int, server: Server) -> Pages:
with db.safe_read_lock:
fmts = db._formats(book_id)
pages = db._get_pages(book_id)
fmts = sorted({f.upper() for f in fmts or ()} & self.all_input_formats, key=self.sort_key)
if not fmts:
return Pages(NO_COUNTABLE_FORMATS, 0, '', 0, utcnow())
prev_scan_result = None
if pages is not None:
idx = -1
with suppress(ValueError):
idx = fmts.index(pages.format)
if idx > -1:
sz = -1
with suppress(Exception):
sz = db.format_db_size(book_id, pages.format)
if sz == pages.format_size and pages.algorithm == server.ALGORITHM and pages.pages != COUNT_FAILED:
prev_scan_result = pages
if idx == 0:
return pages
cleanups = []
try:
has_drmed = False
for fmt in fmts:
fmt_file = os.path.join(self.tdir, 'book.' + fmt.lower())
try:
db.copy_format_to(book_id, fmt, fmt_file)
cleanups.append(fmt_file)
fmt_size = os.path.getsize(fmt_file)
except Exception as e:
import traceback
traceback.print_exc()
self.log_failure(book_id, str(e), traceback.format_exc())
continue
try:
self.count_callback(fmt_file)
pages = server.count_pages(fmt_file)
except Exception as e:
import traceback
traceback.print_exc()
self.log_failure(book_id, str(e), traceback.format_exc(), fmt=fmt)
else:
if isinstance(pages, int):
return Pages(pages, server.ALGORITHM, fmt, fmt_size, utcnow())
if 'calibre.ebooks.DRMError:' in pages[1]:
print(f'Failed to count pages in book: {book_id} {fmt} because it is DRM locked', file=sys.stderr)
has_drmed = True
else:
self.log_failure(book_id, pages[0], pages[1], fmt=fmt)
print(f'Failed to count pages in book: {book_id} {fmt} with error:\n{pages[1]}', file=sys.stderr)
return prev_scan_result or Pages(DRMED_FORMATS if has_drmed else COUNT_FAILED, 0, '', 0, utcnow())
finally:
for x in cleanups:
with suppress(OSError):
os.remove(x)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/db/page_count.py",
"license": "GNU General Public License v3.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/library/page_count.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import os
import subprocess
import sys
from concurrent.futures import Executor, ThreadPoolExecutor
from contextlib import closing, suppress
from math import ceil
from multiprocessing import Pipe
from operator import itemgetter
from lxml import etree
from calibre import detect_ncpus
from calibre.constants import iswindows
from calibre.ebooks.oeb.iterator.book import extract_book
from calibre.ebooks.oeb.polish.container import Container as ContainerBase
from calibre.ebooks.oeb.polish.parsing import decode_xml, parse
from calibre.ebooks.oeb.polish.pretty import NON_NAMESPACED_BLOCK_TAGS
from calibre.ebooks.oeb.polish.toc import get_toc
from calibre.ptempfile import TemporaryDirectory, override_base_dir
from calibre.utils.cleantext import clean_xml_chars
from calibre.utils.ipc import eintr_retry_call
from calibre.utils.logging import DevNull
from calibre_extensions.speedup import barename, get_num_of_significant_chars
if iswindows:
from multiprocessing.connection import PipeConnection as Connection
else:
from multiprocessing.connection import Connection
class SimpleContainer(ContainerBase):
tweak_mode = True
def count_pages_pdf(pathtoebook: str) -> int:
from calibre.utils.podofo import get_page_count
try:
return get_page_count(pathtoebook)
except Exception:
from calibre.ebooks.metadata.pdf import get_tools
from calibre.ebooks.pdf.pdftohtml import creationflags
pdfinfo = get_tools()[0]
with open(pathtoebook, 'rb') as f:
for line in subprocess.check_output([pdfinfo, '-'], stdin=f, creationflags=creationflags).decode().splitlines():
field, rest = line.partition(':')[::2]
if field == 'Pages':
return int(rest.strip())
def fname_ok_cb(fname):
from calibre.ebooks.metadata.archive import fname_ok
from calibre.libunzip import comic_exts
return fname_ok(fname) and fname.rpartition('.')[-1].lower() in comic_exts
def count_pages_cbz(pathtoebook: str) -> int:
from calibre.utils.zipfile import ZipFile
with closing(ZipFile(pathtoebook)) as zf:
return sum(1 for _ in filter(fname_ok_cb, zf.namelist()))
def count_pages_cbr(pathtoebook: str) -> int:
from calibre.ebooks.metadata.archive import RAR
with closing(RAR(pathtoebook)) as zf:
return sum(1 for _ in filter(fname_ok_cb, zf.namelist()))
def count_pages_cb7(pathtoebook: str) -> int:
from calibre.ebooks.metadata.archive import SevenZip
with closing(SevenZip(pathtoebook)) as zf:
return sum(1 for _ in filter(fname_ok_cb, zf.namelist()))
def get_length(root: etree.Element) -> int:
' Used for position/length display in the viewer '
return max(CHARS_PER_PAGE, get_line_count(root) * CHARS_PER_LINE)
CHARS_PER_LINE = 70
LINES_PER_PAGE = 36
CHARS_PER_PAGE = CHARS_PER_LINE * LINES_PER_PAGE
head_map = {
'h1': (30, 2),
'h2': (40, 2),
'h3': (50, 2),
'h4': (60, 2),
'h5': (70, 2),
'h6': (70, 1),
}
default_head_map_value = CHARS_PER_LINE, 0
blocks = frozenset(NON_NAMESPACED_BLOCK_TAGS) - {'img', 'video'}
blocks |= frozenset({x.upper() for x in blocks})
def count_char(root: etree.Element) -> int:
stack: list[etree.Element] = [root]
append, pop = stack.append, stack.pop
ans = 0
while stack:
node = pop()
ans += get_num_of_significant_chars(node)
for elem in node.iterchildren():
if not isinstance(elem.tag, str) or barename(elem.tag) not in blocks:
append(elem)
return ans
def count_line(block_elem: etree.Element) -> int:
char_num, line_margin = head_map.get(barename(block_elem.tag), default_head_map_value)
ans = ceil(count_char(block_elem) / char_num)
if ans > 0:
ans += line_margin
return ans
def get_line_count(document_root: etree.Element) -> int:
'''Emulate lines rendering of the content to return the page count.'''
ans = 0
# Visits every non-block tag twice and every other tag once
for elem in document_root.iterdescendants('*'):
if barename(elem.tag) in blocks:
ans += count_line(elem)
return ans
def get_page_count(root: etree.Element) -> int:
return max(1, ceil(get_line_count(root) / LINES_PER_PAGE))
def calculate_number_of_workers(names, in_process_container, max_workers):
num_workers = min(detect_ncpus(), len(names))
if max_workers:
num_workers = min(num_workers, max_workers)
if num_workers > 1:
if len(names) < 3 or sum(os.path.getsize(in_process_container.name_path_map[n]) for n in names) < 128 * 1024:
num_workers = 1
return num_workers
def decode(data: bytes) -> str:
html, _ = decode_xml(data, normalize_to_nfc=True)
return html
def parse_xhtml(path: str):
with open(path, 'rb') as f:
data = f.read()
return parse(data, log=DevNull(), decoder=decode, force_html5_parse=False)
def process(path: str) -> int:
root = parse_xhtml(path)
return get_page_count(root)
def count_pages_oeb(pathtoebook: str, tdir: str, executor: Executor | None = None) -> int:
nulllog = DevNull()
book_fmt, opfpath, input_fmt = extract_book(pathtoebook, tdir, log=nulllog, only_input_plugin=True)
container = SimpleContainer(tdir, opfpath, nulllog)
tocobj = get_toc(container, verify_destinations=False)
if page_list := getattr(tocobj, 'page_list', ()):
uniq_page_numbers = frozenset(map(itemgetter('pagenum'), page_list))
if len(uniq_page_numbers) > 50:
return len(uniq_page_numbers)
spine = {name for name, is_linear in container.spine_names}
paths = {container.get_file_path_for_processing(name, allow_modification=False) for name in spine}
paths = {p for p in paths if os.path.isfile(p)}
if executor is None:
with ThreadPoolExecutor() as executor:
return sum(executor.map(process, paths))
return sum(executor.map(process, paths))
def count_pages_txt(pathtoebook: str) -> int:
with open(pathtoebook, 'rb') as f:
text = f.read().decode('utf-8', 'replace')
e = etree.Element('r')
e.tail = clean_xml_chars(text)
return ceil(get_num_of_significant_chars(e) / CHARS_PER_PAGE)
def count_pages(pathtoebook: str, executor: Executor | None = None) -> int:
ext = pathtoebook.rpartition('.')[-1].lower()
match ext:
case 'pdf':
return count_pages_pdf(pathtoebook)
case 'cbz':
return count_pages_cbz(pathtoebook)
case 'cbr':
return count_pages_cbr(pathtoebook)
case 'cb7':
return count_pages_cb7(pathtoebook)
case 'txt' | 'text' | 'md' | 'textile' | 'markdown':
return count_pages_txt(pathtoebook)
case _:
with TemporaryDirectory() as tdir:
return count_pages_oeb(pathtoebook, tdir, executor=executor)
class Server:
ALGORITHM = 3
def __init__(self, max_jobs_per_worker: int = 2048):
self.worker: subprocess.Popen | None = None
self.tasks_run_by_worker = 0
self.max_jobs_per_worker = max_jobs_per_worker
def ensure_worker(self) -> None:
if self.worker is not None:
if self.tasks_run_by_worker < self.max_jobs_per_worker:
return
self.shutdown_worker()
self.read_pipe, write_pipe = Pipe(False)
with write_pipe:
cmd = f'from calibre.library.page_count import worker_main; worker_main({write_pipe.fileno()})'
from calibre.utils.ipc.simple_worker import start_pipe_worker
self.worker = start_pipe_worker(
cmd, pass_fds=(write_pipe.fileno(),), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
self.tasks_run_by_worker = 0
def shutdown_worker(self) -> None:
if self.worker is not None:
w, self.worker = self.worker, None
self.read_pipe.close()
w.stdin.close()
if w.wait(1) is None:
w.kill()
w.wait()
def count_pages(self, path: str) -> int | tuple[str, str]:
self.ensure_worker()
encoded_path = path.encode().hex() + os.linesep
self.worker.stdin.write(encoded_path.encode())
self.worker.stdin.flush()
self.tasks_run_by_worker += 1
try:
return eintr_retry_call(self.read_pipe.recv)
except Exception:
self.shutdown_worker()
raise
def __enter__(self) -> Server:
return self
def __exit__(self, *a) -> None:
self.shutdown_worker()
def serve_requests(pipe: Connection) -> None:
executor = ThreadPoolExecutor()
for i, line in enumerate(sys.stdin):
path = bytes.fromhex(line.rstrip()).decode()
with TemporaryDirectory(suffix=f'.pc{i}') as base_tdir, override_base_dir(base_tdir):
try:
result = count_pages(path, executor)
except Exception as e:
import traceback
result = str(e), traceback.format_exc()
try:
eintr_retry_call(pipe.send, result)
except EOFError:
break
def worker_main(pipe_fd: int) -> None:
from calibre.utils.formatter import set_template_error_reporter
set_template_error_reporter()
with suppress(KeyboardInterrupt), Connection(pipe_fd, False, True) as pipe:
serve_requests(pipe)
def test_line_counting(self):
line = 'a ' * CHARS_PER_LINE
h1_line = 'h ' * head_map['h1'][0]
def t(doc: str, expected: int):
root = parse(doc)
self.assertEqual(expected, get_line_count(root), doc)
t(f'<p><!--{line}-->{line}<br>{line}', 2)
t(f'<body>{line}<script>{line}</script>', 1)
t(f'<body>{line}<span>{line}<p>', 2)
t(f'<body>{line}<body>{line}', 2)
t(f'<h1>{h1_line}<p>{line}', 4)
t('<p>', 0), t('<h1>', 0)
r = parse('<p><i>abc')
self.assertEqual(count_char(r[1][0]), 3)
r = parse('<p><img>')
self.assertEqual(count_char(r[1][0]), 1000)
def test_page_count(self) -> None:
test_line_counting(self)
files = (
P('quick_start/eng.epub'), P('quick_start/swe.epub'), P('quick_start/fra.epub'),
P('common-english-words.txt'))
with Server(max_jobs_per_worker=2) as s:
for x in files:
res = s.count_pages(x)
if not isinstance(res, int):
raise AssertionError(f'Counting pages for {x} failed with result: {res}')
def develop():
import time
paths = sys.argv[1:]
executor = ThreadPoolExecutor()
for x in paths:
st = time.monotonic()
res = count_pages(x, executor)
print(x, f'{time.monotonic() - st:.2f}', res, flush=True)
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/library/page_count.py",
"license": "GNU General Public License v3.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/gui2/library/bookshelf_view.py | #!/usr/bin/env python
# License: GPLv3
# Copyright: Andy C <achuongdev@gmail.com>, un_pogaz <un.pogaz@gmail.com>, Kovid Goyal <kovid@kovidgoyal.net>
# Imports {{{
import bisect
import math
import random
import struct
import weakref
from collections.abc import Iterable, Iterator
from contextlib import suppress
from functools import lru_cache, partial
from operator import attrgetter
from queue import LifoQueue, ShutDown
from threading import Event, RLock, Thread, current_thread
from time import monotonic
from typing import NamedTuple
from qt.core import (
QAbstractItemView,
QAbstractScrollArea,
QApplication,
QBrush,
QBuffer,
QColor,
QContextMenuEvent,
QEasingCurve,
QEvent,
QFont,
QFontDatabase,
QFontInfo,
QFontMetricsF,
QIcon,
QImage,
QItemSelection,
QItemSelectionModel,
QKeyEvent,
QKeySequence,
QLinearGradient,
QLocale,
QMenu,
QModelIndex,
QMouseEvent,
QObject,
QPainter,
QPainterPath,
QPaintEvent,
QPalette,
QParallelAnimationGroup,
QPen,
QPixmap,
QPixmapCache,
QPoint,
QPointF,
QPropertyAnimation,
QRect,
QRectF,
QResizeEvent,
QSize,
QSizeF,
QStyle,
Qt,
QTextLayout,
QTimer,
QWidget,
pyqtProperty,
pyqtSignal,
)
from xxhash import xxh3_64_intdigest
from calibre import fit_image
from calibre.db.cache import Cache
from calibre.ebooks.metadata import authors_to_string, rating_to_stars
from calibre.gui2 import config, gprefs, resolve_bookshelf_color
from calibre.gui2.library.alternate_views import (
ClickStartData,
cached_emblem,
double_click_action,
handle_enter_press,
handle_selection_click,
handle_selection_drag,
render_emblem,
selection_for_rows,
setup_dnd_interface,
)
from calibre.gui2.library.caches import CoverThumbnailCache, Thumbnailer
from calibre.gui2.library.models import BooksModel
from calibre.gui2.momentum_scroll import MomentumScrollMixin
from calibre.gui2.palette import dark_palette, light_palette
from calibre.utils.formatter import TEMPLATE_ERROR
from calibre.utils.icu import numeric_sort_key
from calibre.utils.img import resize_to_fit
from calibre.utils.iso8601 import UNDEFINED_DATE
from calibre.utils.localization import lang_map
from calibre_extensions.imageops import dominant_color
from calibre_extensions.progress_indicator import contrast_ratio, utf16_slice
# }}}
# Utility functions {{{
def random_from_id(book_id: int, limit: int = 21) -> int:
' Return a pseudo random integer in [0, limit) that is fully determined by book_id '
return xxh3_64_intdigest(b'', seed=book_id) % limit
def normalised_size(size_bytes: int) -> float:
'''Estimate page count from file size.'''
# Average ebook: ~1-2KB per page, so estimate pages from size
if size_bytes and size_bytes > 0:
# Estimate: ~1500 bytes per page (conservative)
estimated_pages = size_bytes // 1500
# Normalise the value
return min(estimated_pages / 2000, 1)
return 0.
def render_spine_text_as_pixmap(
text: str, font: QFont, fm: QFontMetricsF, size: QSize, vertical_alignment: Qt.AlignmentFlag, downwards: bool,
outline_width: float, device_pixel_ratio: float, text_color: QColor, outline_color: QColor,
) -> QPixmap:
ss = (QSizeF(size) * device_pixel_ratio).toSize()
key = f'{font.key()}{ss.width()}{ss.height()}{int(vertical_alignment)}{int(downwards)}{outline_width}{text_color.rgb()}{outline_color.rgb()}{text}'
if pmap := QPixmapCache.find(key):
return pmap
ans = QImage(ss.height(), ss.width(), QImage.Format_ARGB32_Premultiplied)
ans.fill(Qt.GlobalColor.transparent)
ans.setDevicePixelRatio(device_pixel_ratio)
with QPainter(ans) as painter:
painter.setRenderHint(QPainter.RenderHint.Antialiasing | QPainter.RenderHint.TextAntialiasing)
painter.setFont(font)
sz = ans.deviceIndependentSize().transposed()
if downwards:
painter.translate(sz.height(), 0)
painter.rotate(90)
else:
painter.translate(0, sz.width())
painter.rotate(-90)
flags = vertical_alignment | Qt.AlignmentFlag.AlignHCenter | Qt.TextFlag.TextSingleLine
if outline_width > 0:
# Calculate text dimensions
br = painter.boundingRect(QRectF(0, 0, sz.width(), sz.height()), flags, text)
text_width = br.width()
ascent, descent = fm.ascent(), fm.descent()
# Calculate horizontal position for centering
x = (sz.width() - text_width) / 2
# Calculate vertical position based on alignment
if vertical_alignment & Qt.AlignmentFlag.AlignTop:
y = ascent
elif vertical_alignment & Qt.AlignmentFlag.AlignBottom:
y = sz.height() - descent
else: # Default to center
y = sz.height() / 2 + (ascent - descent) / 2
# Create path for outlined text
path = QPainterPath()
path.setFillRule(Qt.FillRule.WindingFill)
path.addText(x, y, font, text)
# Draw text with outline
# Path stroke are draw with the given width,
# but we want the width as outline in addition around to the text, so double it.
painter.strokePath(path, QPen(outline_color, outline_width * 2,
Qt.PenStyle.SolidLine, Qt.PenCapStyle.RoundCap, Qt.PenJoinStyle.RoundJoin))
painter.fillPath(path, text_color)
else:
painter.setPen(text_color)
painter.drawText(QRectF(QPointF(0, 0), sz), flags, text)
pmap = QPixmap.fromImage(ans)
QPixmapCache.insert(key, pmap)
return pmap
# }}}
# Cover functions {{{
class WoodTheme(NamedTuple):
background: QColor
# Main wood body gradient colors (top to bottom)
wood_top: QColor
wood_mid_light: QColor
wood_mid_dark: QColor
wood_bottom: QColor
# Wood grain color
grain_color: QColor
grain_alpha_range: tuple[int, int]
# Knot colors
knot_color: QColor
# Highlight and shadow
highlight_color: QColor
shadow_color: QColor
# Edge colors
edge_color: QColor
end_grain_dark: QColor
end_grain_light: QColor
# Bevel colors
bevel_light: QColor
bevel_dark: QColor
# Bookcase colors
back_panel_base: QColor
back_panel_dark: QColor
side_panel_base: QColor
side_panel_dark: QColor
inner_shadow_color: QColor
cavity_color: QColor
@classmethod
def light_theme(cls) -> WoodTheme:
# Light oak/pine colors for light mode
return WoodTheme(
background=QColor(245, 245, 245),
wood_top=QColor(210, 170, 125),
wood_mid_light=QColor(190, 150, 105),
wood_mid_dark=QColor(170, 130, 90),
wood_bottom=QColor(150, 115, 75),
grain_color=QColor(120, 80, 50),
grain_alpha_range=(15, 40),
knot_color=QColor(100, 65, 40, 50),
highlight_color=QColor(255, 255, 255, 80),
shadow_color=QColor(0, 0, 0, 30),
edge_color=QColor(120, 85, 55),
end_grain_dark=QColor(130, 95, 65),
end_grain_light=QColor(170, 130, 95),
bevel_light=QColor(230, 195, 160, 100),
bevel_dark=QColor(100, 70, 45, 80),
back_panel_base=QColor(160, 120, 80),
back_panel_dark=QColor(130, 95, 60),
side_panel_base=QColor(175, 135, 95),
side_panel_dark=QColor(145, 105, 70),
inner_shadow_color=QColor(60, 40, 25, 20),
cavity_color=QColor(90, 60, 40),
)
@classmethod
def dark_theme(cls) -> WoodTheme:
# Dark walnut/mahogany colors for dark mode
return WoodTheme(
background=QColor(30, 30, 35),
wood_top=QColor(85, 55, 40),
wood_mid_light=QColor(70, 45, 32),
wood_mid_dark=QColor(55, 35, 25),
wood_bottom=QColor(42, 28, 20),
grain_color=QColor(30, 18, 12),
grain_alpha_range=(20, 50),
knot_color=QColor(25, 15, 10, 60),
highlight_color=QColor(255, 220, 180, 35),
shadow_color=QColor(0, 0, 0, 50),
edge_color=QColor(35, 22, 15),
end_grain_dark=QColor(30, 20, 14),
end_grain_light=QColor(65, 42, 30),
bevel_light=QColor(120, 85, 60, 70),
bevel_dark=QColor(20, 12, 8, 90),
back_panel_base=QColor(45, 30, 22),
back_panel_dark=QColor(30, 20, 14),
side_panel_base=QColor(55, 38, 28),
side_panel_dark=QColor(38, 25, 18),
inner_shadow_color=QColor(0, 0, 0, 30),
cavity_color=QColor(20, 14, 10),
)
class ColorTheme(NamedTuple):
text_color_for_dark_background: QColor
text_color_for_light_background: QColor
outline_color_for_dark_background: QColor
outline_color_for_light_background: QColor
# Divider colors
divider_text_color: QColor
divider_line_color: QColor
divider_background_color: QColor
# Selection highlight colors
current_selected_color: QColor
current_color: QColor
selected_color: QColor
@classmethod
def _from_palette(cls, palette: QPalette) -> dict[str, QColor]:
return ColorTheme(
text_color_for_dark_background=dark_palette().color(QPalette.ColorRole.WindowText),
text_color_for_light_background=light_palette().color(QPalette.ColorRole.WindowText),
outline_color_for_dark_background=QColor(0, 0, 0),
outline_color_for_light_background=QColor(255, 255, 255),
divider_text_color=palette.color(QPalette.ColorRole.WindowText),
current_selected_color=palette.color(QPalette.ColorRole.LinkVisited),
current_color=palette.color(QPalette.ColorRole.Mid),
selected_color=palette.color(QPalette.ColorRole.Highlight),
divider_background_color=QColor(),
divider_line_color=QColor(),
)
@classmethod
def light_theme(cls) -> ColorTheme:
rslt = ColorTheme._from_palette(light_palette())
return rslt._replace(
divider_background_color=QColor(250, 250, 250),
divider_line_color=QColor(74, 74, 106),
)
@classmethod
def dark_theme(cls) -> ColorTheme:
rslt = ColorTheme._from_palette(dark_palette())
return rslt._replace(
divider_background_color=QColor(100, 100, 100),
divider_line_color=QColor(180, 180, 182),
)
def is_dark_theme(value=None):
from calibre.gui2 import is_dark_theme
if value is None:
value = gprefs['bookshelf_theme_override']
if value == 'none':
return is_dark_theme()
return value == 'dark'
def color_with_alpha(c: QColor, a: int) -> QColor:
ans = QColor(c)
ans.setAlpha(a)
return ans
class RenderCase:
dark_theme: WoodTheme
light_theme: WoodTheme
theme: WoodTheme
def __init__(self):
self.last_rendered_shelf_at = QRect(0, 0, 0, 0), False
self.last_rendered_background_at = QRect(0, 0, 0, 0), False, False, {}
self.last_rendered_divider_at = QRect(0, 0, 0, 0), False, 0, QColor()
self.last_rendered_background = QPixmap()
self.last_rendered_divider = QPixmap()
self.shelf_cache: dict[int, QPixmap] = {}
self.back_panel_grain = tuple(self.generate_grain_lines(count=80, seed=42))
def generate_grain_lines(self, seed: int = 42, count: int = 60) -> Iterator[tuple[float, float, float, float, float]]:
r = random.Random(seed)
for i in range(count):
y_offset = r.uniform(-0.3, 0.3)
thickness = r.uniform(0.5, 2.0)
alpha = r.uniform(0, 1)
wave_amplitude = r.uniform(0, 2)
wave_frequency = r.uniform(0.01, 0.03)
yield y_offset, thickness, alpha, wave_amplitude, wave_frequency
def ensure_theme(self, is_dark: bool) -> None:
attr = 'dark_theme' if is_dark else 'light_theme'
if not hasattr(self, attr):
setattr(self, attr, WoodTheme.dark_theme() if is_dark else WoodTheme.light_theme())
self.theme = self.dark_theme if is_dark else self.light_theme
def background_as_pixmap(self, width: int, height: int) -> QPixmap:
rect = QRect(0, 0, width, height)
is_dark = is_dark_theme()
q = rect, is_dark, gprefs['bookshelf_use_custom_background'], gprefs['bookshelf_custom_background']
if self.last_rendered_background_at == q:
return self.last_rendered_background
self.last_rendered_background_at = q
self.ensure_theme(is_dark)
ans = QImage(width, height, QImage.Format_ARGB32_Premultiplied)
with QPainter(ans) as painter:
painter.setRenderHint(QPainter.RenderHint.Antialiasing)
if gprefs['bookshelf_use_custom_background']:
self.draw_custom_background(painter, rect)
else:
self.draw_back_panel(painter, rect)
# Add vertical grain for back panel (typical plywood back)
self.draw_back_panel_grain(painter, rect)
self.draw_cavity_shadows(painter, rect)
self.last_rendered_background = QPixmap.fromImage(ans)
return self.last_rendered_background
def draw_custom_background(self, painter: QPainter, interior_rect: QRect) -> None:
r, g, b = resolve_bookshelf_color(for_dark=is_dark_theme())
if tex := resolve_bookshelf_color(for_dark=is_dark_theme(), which='texture'):
from calibre.gui2.preferences.texture_chooser import texture_path
if path := texture_path(tex):
texture = QPixmap()
if texture.load(path):
painter.fillRect(interior_rect, QBrush(texture))
return
painter.fillRect(interior_rect, QBrush(QColor(r, g, b)))
def draw_back_panel(self, painter: QPainter, interior_rect: QRect) -> None:
# Base gradient for back panel (slightly recessed look)
back_gradient = QLinearGradient(interior_rect.left(), 0, interior_rect.right(), 0)
back_gradient.setColorAt(0.0, self.theme.back_panel_dark)
back_gradient.setColorAt(0.15, self.theme.back_panel_base)
back_gradient.setColorAt(0.85, self.theme.back_panel_base)
back_gradient.setColorAt(1.0, self.theme.back_panel_dark)
painter.fillRect(interior_rect, back_gradient)
def draw_back_panel_grain(self, painter: QPainter, rect: QRect) -> None:
painter.save()
painter.setClipRect(rect)
r = random.Random(555)
min_alpha, max_alpha = self.theme.grain_alpha_range
# Vertical grain lines
for i in range(50):
x = rect.left() + r.randint(0, rect.width())
alpha = r.randint(min_alpha // 2, max_alpha // 2)
grain_color = color_with_alpha(self.theme.grain_color, alpha)
pen = QPen(grain_color)
pen.setWidthF(r.uniform(0.5, 1.5))
painter.setPen(pen)
# Slightly wavy vertical line
y1 = rect.top()
y2 = rect.bottom()
wave = r.uniform(-3, 3)
painter.drawLine(int(x), y1, int(x + wave), y2)
painter.restore()
def draw_cavity_shadows(self, painter: QPainter, cavity_rect: QRect) -> None:
side_shadow_width = 20
# Left side shadow
left_shadow_gradient = QLinearGradient(cavity_rect.left(), 0, cavity_rect.left() + side_shadow_width, 0)
left_shadow_gradient.setColorAt(0.0, self.theme.inner_shadow_color)
left_shadow_gradient.setColorAt(1.0, color_with_alpha(self.theme.inner_shadow_color, 0))
painter.fillRect(cavity_rect.x(), cavity_rect.y(), side_shadow_width, cavity_rect.height(), left_shadow_gradient)
# Right side shadow
right_shadow_gradient = QLinearGradient(cavity_rect. right() - side_shadow_width, 0, cavity_rect.right(), 0)
right_shadow_gradient.setColorAt(0.0, color_with_alpha(self.theme.inner_shadow_color, 0))
right_shadow_gradient.setColorAt(1.0, self.theme.inner_shadow_color)
painter.fillRect(
cavity_rect.right() - side_shadow_width, cavity_rect.y(), side_shadow_width, cavity_rect.height(), right_shadow_gradient)
def shelf_as_pixmap(self, width: int, height: int, instance: int) -> QPixmap:
rect = QRect(0, 0, width, height)
is_dark = is_dark_theme()
q = rect, is_dark
if self.last_rendered_shelf_at != q:
self.shelf_cache.clear()
self.last_rendered_shelf_at = q
if ans := self.shelf_cache.get(instance):
return ans
self.ensure_theme(is_dark)
ans = QImage(width, height, QImage.Format_ARGB32_Premultiplied)
with QPainter(ans) as painter:
painter.setRenderHint(QPainter.RenderHint.Antialiasing)
self.draw_shelf_body(painter, rect)
self.draw_wood_grain(painter, rect, tuple(self.generate_grain_lines(102 + instance)))
self.draw_knots(painter, rect, seed=123 + instance)
self.draw_top_highlight(painter, rect)
self.draw_bottom_edge(painter, rect)
self.draw_front_bevel(painter, rect)
self.draw_edges(painter, rect)
self.shelf_cache[instance] = p = QPixmap.fromImage(ans)
return p
def draw_shelf_body(self, painter, rect):
# Base wood gradient
wood_gradient = QLinearGradient(0, rect.top(), 0, rect.bottom())
wood_gradient.setColorAt(0.0, self.theme.wood_top) # Lighter top
wood_gradient.setColorAt(0.3, self.theme.wood_mid_light) # Mid tone
wood_gradient.setColorAt(0.7, self.theme.wood_mid_dark) # Darker
wood_gradient.setColorAt(1.0, self.theme.wood_bottom) # Darkest bottom
painter.fillRect(rect, wood_gradient)
def draw_wood_grain(self, painter, rect, grain_lines, alpha_multiplier: float = 1.0):
painter.save()
painter.setClipRect(rect)
spacing = rect.height() / len(grain_lines)
min_alpha, max_alpha = self.theme.grain_alpha_range
for i, (y_offset, thickness, alpha_factor, wave_amp, wave_freq) in enumerate(grain_lines):
alpha = int((min_alpha + alpha_factor * (max_alpha - min_alpha)) * alpha_multiplier)
# Vary the grain color
grain_color = QColor(self.theme.grain_color)
grain_color.setAlpha(alpha)
pen = QPen(grain_color)
pen.setWidthF(thickness)
painter. setPen(pen)
# Calculate y position with offset
base_y = rect. top() + i * spacing + y_offset * spacing
# Draw wavy grain line
points = []
for x in range(rect. left(), rect.right(), 5):
import math
wave = wave_amp * math. sin(x * wave_freq + i)
points.append((x, base_y + wave))
for j in range(len(points) - 1):
painter.drawLine(
int(points[j][0]), int(points[j][1]),
int(points[j + 1][0]), int(points[j + 1][1])
)
painter.restore()
def draw_knots(self, painter: QPainter, rect: QRect, count: int = 3, seed: int = 123):
painter.save()
painter.setClipRect(rect)
r = random.Random(seed)
for _ in range(count):
knot_x = r.randint(rect.left() + 20, rect.right() - 20)
knot_y = r.randint(rect.top() + 5, rect.bottom() - 5)
knot_size = r.randint(3, 6)
knot_gradient = QLinearGradient(knot_x - knot_size, knot_y, knot_x + knot_size, knot_y)
knot_color_transparent = color_with_alpha(self.theme.knot_color, 0)
knot_gradient.setColorAt(0.0, knot_color_transparent)
knot_gradient.setColorAt(0.5, self.theme.knot_color)
knot_gradient.setColorAt(1.0, knot_color_transparent)
painter.setBrush(QBrush(knot_gradient))
painter.setPen(Qt.PenStyle.NoPen)
painter.drawEllipse(knot_x - knot_size, knot_y - knot_size // 2, knot_size * 2, knot_size)
painter.restore()
def draw_top_highlight(self, painter, rect):
highlight_gradient = QLinearGradient(0, rect.top(), 0, rect.top() + 8)
highlight_gradient.setColorAt(0.0, self.theme.highlight_color)
highlight_gradient.setColorAt(0.5, color_with_alpha(self.theme.highlight_color, self.theme.highlight_color.alpha() // 3))
highlight_gradient.setColorAt(1.0, color_with_alpha(self.theme.highlight_color, 0))
highlight_rect = QRect(rect.x(), rect.y(), rect.width(), 8)
painter.fillRect(highlight_rect, highlight_gradient)
def draw_bottom_edge(self, painter, rect):
shadow_gradient = QLinearGradient(0, rect.bottom() - 6, 0, rect.bottom())
shadow_gradient.setColorAt(0.0, color_with_alpha(self.theme.shadow_color, 0))
shadow_gradient.setColorAt(0.7, self.theme.shadow_color)
shadow_gradient.setColorAt(1.0, color_with_alpha(self.theme.shadow_color, self.theme.shadow_color.alpha() * 2))
shadow_rect = QRect(rect.x(), rect.bottom() - 6, rect. width(), 6)
painter.fillRect(shadow_rect, shadow_gradient)
def draw_front_bevel(self, painter, rect):
# Top chamfer line
chamfer_pen = QPen(self.theme.bevel_light)
chamfer_pen.setWidth(1)
painter.setPen(chamfer_pen)
painter.drawLine(rect.left(), rect.top() + 2, rect.right(), rect.top() + 2)
# Bottom chamfer (darker)
chamfer_pen. setColor(self.theme.bevel_dark)
painter.setPen(chamfer_pen)
painter.drawLine(rect.left(), rect.bottom() - 2, rect. right(), rect.bottom() - 2)
def draw_edges(self, painter, rect):
# Darker edge outline
edge_pen = QPen(self.theme.edge_color)
edge_pen.setWidth(1)
painter.setPen(edge_pen)
painter.setBrush(Qt. BrushStyle. NoBrush)
painter.drawRect(rect)
# Left end grain (slightly different tone)
end_grain_width = 4
left_end = QRect(rect.x(), rect.y(), end_grain_width, rect.height())
end_gradient = QLinearGradient(left_end.left(), 0, left_end.right(), 0)
end_gradient.setColorAt(0.0, self.theme.end_grain_dark)
end_gradient.setColorAt(1.0, color_with_alpha(self.theme.end_grain_light, 0))
painter.fillRect(left_end, end_gradient)
# Right end grain
right_end = QRect(rect. right() - end_grain_width, rect.y(), end_grain_width, rect.height())
end_gradient = QLinearGradient(right_end.left(), 0, right_end.right(), 0)
end_gradient.setColorAt(0.0, color_with_alpha(self.theme.end_grain_light, 0))
end_gradient.setColorAt(1.0, self.theme.end_grain_dark)
painter.fillRect(right_end, end_gradient)
def divider_as_pixmap(self, width: int, height: int, divider_color: QColor, corner_radius: int = 0, offset: int = 0) -> QPixmap:
rect = QRect(0, 0, width, height + offset)
is_dark = is_dark_theme()
q = rect, is_dark, corner_radius, divider_color
if self.last_rendered_divider_at == q:
return self.last_rendered_divider
self.last_rendered_divider_at = q
ans = QImage(width, height + offset, QImage.Format_ARGB32_Premultiplied)
ans.fill(Qt.GlobalColor.transparent)
with QPainter(ans) as painter:
painter.setRenderHint(QPainter.RenderHint.Antialiasing)
path = QPainterPath()
path.addRoundedRect(
QRectF(0, 0, width, height + offset + corner_radius), corner_radius, corner_radius,
)
painter.fillPath(path, divider_color)
self.last_rendered_divider = p = QPixmap.fromImage(ans)
return p
class ImageWithDominantColor(QImage):
_dominant_color: QColor | None = None
DEFAULT_DOMINANT_COLOR = QColor('#8B4513')
@property
def dominant_color(self) -> QColor:
if self._dominant_color is not None:
return self._dominant_color
ans = self.DEFAULT_DOMINANT_COLOR if self.isNull() else dominant_color(self)
if not ans.isValid():
ans = self.DEFAULT_DOMINANT_COLOR
self._dominant_color = ans
return ans
class PixmapWithDominantColor(QPixmap):
dominant_color: QColor = QColor()
@staticmethod
def fromImage(img: QImage) -> PixmapWithDominantColor:
ans = PixmapWithDominantColor(QPixmap.fromImage(img))
if not hasattr(img, 'dominant_color'):
img = ImageWithDominantColor(img)
ans.dominant_color = img.dominant_color
return ans
@lru_cache(maxsize=2)
def default_cover_pixmap(width: int, height: int) -> PixmapWithDominantColor:
i = QImage(I('default_cover.png'))
_, i = resize_to_fit(i, width, height)
return PixmapWithDominantColor.fromImage(ImageWithDominantColor(i))
class ThumbnailerWithDominantColor(Thumbnailer):
thumbnail_class: type[QImage] = ImageWithDominantColor
pixmap_class: type[QPixmap] = PixmapWithDominantColor
def resize_to_fit(self, cover: QImage, width: int, height: int) -> ImageWithDominantColor:
ans = super().resize_to_fit(cover, width, height)
if not isinstance(ans, ImageWithDominantColor):
ans = ImageWithDominantColor(ans)
return ans
def serialize_img(self, x: ImageWithDominantColor, buf: QBuffer) -> bool:
buf.write(struct.pack('@fff', x.dominant_color.redF(), x.dominant_color.greenF(), x.dominant_color.blueF()))
return super().serialize_img(x, buf)
def unserialize_img(self, buf: memoryview) -> ImageWithDominantColor:
try:
r, g, b = struct.unpack_from('@fff', buf)
except Exception:
r = g = b = 0
dc = QColor()
dc.setRedF(r), dc.setGreenF(g), dc.setBlueF(b)
qimg = super().unserialize_img(buf[struct.calcsize('@fff'):])
ans = ImageWithDominantColor(qimg)
ans._dominant_color = dc
return ans
def draw_pixmap_with_shadow(
pixmap: QPixmap, opacity: float = 1.0, has_shadow: bool = True,
shadow_color: QColor = QColor(0, 0, 0, 100), fill_color: QColor = QColor(Qt.GlobalColor.transparent),
) -> tuple[QPixmap, int]:
''' Draw a QPixmap with a nice drop shadow effect. '''
# Create a larger image to accommodate the shadow
shadow_blur = 10 if has_shadow else 0
margin = shadow_blur * 2
total_width, total_height = pixmap.width(), pixmap.height()
if margin > 0:
shadow_offset_x = shadow_offset_y = shadow_blur // 2
total_width += margin * 2 + abs(shadow_offset_x)
total_height += margin * 2 + abs(shadow_offset_y)
# Create shadow image
shadow_image = QImage(total_width, total_height, QImage.Format_ARGB32_Premultiplied)
shadow_image.fill(Qt.GlobalColor.transparent)
with QPainter(shadow_image) as shadow_painter:
shadow_painter.setRenderHint(QPainter.RenderHint.Antialiasing | QPainter.RenderHint.SmoothPixmapTransform)
shadow_painter.setPen(Qt.PenStyle.NoPen)
if margin:
# Draw the shadow shape (rounded rect or simple rect based on preference)
shadow_rect = QRectF(
margin + shadow_offset_x,
margin + shadow_offset_y,
pixmap.width(),
pixmap.height()
)
# Draw multiple layers with decreasing opacity for blur effect
for i in range(shadow_blur, 0, -1):
alpha = int(shadow_color.alpha() * (1 - i / shadow_blur) * 0.5)
blur_color = QColor(shadow_color.red(), shadow_color.green(),
shadow_color.blue(), alpha)
shadow_painter.setBrush(blur_color)
blur_rect = shadow_rect.adjusted(-i, -i, i, i)
shadow_painter.drawRoundedRect(blur_rect, 3, 3)
shadow_painter.fillRect(QRect(margin, margin, pixmap.width(), pixmap.height()), fill_color)
shadow_painter.setOpacity(opacity)
shadow_painter.drawPixmap(margin, margin, pixmap)
return QPixmap.fromImage(shadow_image), margin
class CachedCoverRenderer:
def __init__(self, p: PixmapWithDominantColor) -> None:
self.pixmap = p
self.last_rendered_size = QSize()
self.last_rendered_opacity = -1
self.last_rendered_pixmap = QPixmap()
self.last_rendered_margin = 0
set_pixmap = __init__
def as_pixmap(self, size: QSize, opacity: float, parent: QWidget) -> tuple[QPixmap, int]:
if size == self.last_rendered_size and opacity == self.last_rendered_opacity:
return self.last_rendered_pixmap, self.last_rendered_margin
dpr = parent.devicePixelRatioF()
ss = (QSizeF(size) * dpr).toSize()
pmap = self.pixmap.scaled(ss, transformMode=Qt.TransformationMode.SmoothTransformation)
self.last_rendered_pixmap, self.last_rendered_margin = draw_pixmap_with_shadow(
pmap, has_shadow=gprefs['bookshelf_shadow'], fill_color=self.pixmap.dominant_color, opacity=opacity)
self.last_rendered_pixmap.setDevicePixelRatio(dpr)
self.last_rendered_margin = int(self.last_rendered_margin / dpr)
self.last_rendered_opacity = opacity
self.last_rendered_size = size
return self.last_rendered_pixmap, self.last_rendered_margin
# }}}
# Layout {{{
@lru_cache(maxsize=2)
def all_groupings() -> dict[str, str]:
return {
'authors': '',
'series': _('No series'),
'tags': _('Untagged'),
'publisher': _('No publisher'),
'pubdate': _('Unpublished'),
'timestamp': _('Unknown'),
'rating': _('Unrated'),
'languages': _('No language'),
}
class LayoutConstraints(NamedTuple):
min_spine_width: int = 15
max_spine_width: int = 80
default_spine_width: int = 40
hover_expanded_width: int = 160
spine_height: int = 200
shelf_height: int = 20
shelf_gap: int = 20
divider_width: int = 30
horizontal_gap: int = 2
width: int = 0
side_margin: int = 4
@property
def step_height(self) -> int:
return self.spine_height + self.shelf_height
def height_reduction_for_book_id(book_id: int) -> int:
return random_from_id(book_id) if gprefs['bookshelf_variable_height'] else 0
class ShelfItem(NamedTuple):
start_x: int
case_start_y: int
width: int
idx: int
case_idx: int
reduce_height_by: int = 0
book_id: int = 0
group_name: str = ''
is_hover_expanded: bool = False
@property
def is_divider(self) -> bool:
return self.book_id == 0
def rect(self, lc: LayoutConstraints) -> QRect:
return QRect(
self.start_x + lc.side_margin,
self.case_start_y + self.reduce_height_by + lc.shelf_gap,
self.width,
lc.spine_height - self.reduce_height_by - lc.shelf_gap
)
def contains(self, x: int, gap: int = 0) -> bool:
return self.start_x <= x < self.start_x + self.width + gap
def overlap_length(self, X: ShelfItem) -> int:
xs, xl = X.start_x, X.width
ys, yl = self.start_x, self.width
xe = xs + xl
ye = ys + yl
return max(0, min(xe, ye) - max(xs, ys))
class CaseItem:
start_y: int = 0
width: int = 0
height: int = 0
idx: int = 0
items: list[ShelfItem] | None = None
expanded_item: ShelfItem | None = None
def __init__(self, y: int = 0, height: int = 0, is_shelf: bool = False, idx: int = 0):
self.start_y = y
self.height = height
self.idx = idx
if not is_shelf:
self.items = []
def book_or_divider_at_xpos(self, x: int, lc: LayoutConstraints) -> ShelfItem | None:
if self.items:
idx = bisect.bisect_right(self.items, x, key=attrgetter('start_x'))
if idx > 0:
candidate = self.items[idx-1]
if candidate.contains(x, lc.horizontal_gap):
if candidate.is_hover_expanded:
return candidate
if candidate.idx and (prev := self.items[candidate.idx-1]).is_hover_expanded and prev.contains(x):
return prev
if idx < len(self.items) and (n := self.items[idx]).is_hover_expanded and n.contains(x):
return n
return candidate
return None
def book_or_divider_at_region(self, region: ShelfItem, lc: LayoutConstraints) -> ShelfItem | None:
if self.items:
idx = bisect.bisect_right(self.items, region.start_x, key=attrgetter('start_x'))
if idx > 0:
candidate = self.items[idx-1]
if candidate.contains(region.start_x, lc.horizontal_gap):
if idx < len(self.items):
nc = self.items[idx]
a, b = region.overlap_length(candidate), region.overlap_length(nc)
return candidate if a >= b else nc
return candidate
return None
def closest_book_to(self, idx: int) -> ShelfItem | None:
q = self.items[idx]
if not q.is_divider:
return q
for delta in range(1, len(self.items)):
for i in (idx + delta, idx - delta):
if 0 <= i < len(self.items) and not (ans := self.items[i]).is_divider:
return ans
return None
def _get_x_for_item(self, width: int, lc: LayoutConstraints) -> int | None:
x = (self.width + lc.horizontal_gap) if self.width else 0
if x + width + lc.horizontal_gap > lc.width:
return None
return x
def add_group_divider(self, group_name: str, lc: LayoutConstraints) -> bool:
if not group_name:
return True
if (x := self._get_x_for_item(lc.divider_width, lc)) is None:
return False
s = ShelfItem(start_x=x, group_name=group_name, width=lc.divider_width, case_start_y=self.start_y,
idx=len(self.items), case_idx=self.idx)
self.items.append(s)
self.width = s.start_x + s.width
return True
def add_book(self, book_id: int, width: int, group_name: str, lc: LayoutConstraints) -> bool:
if (x := self._get_x_for_item(width, lc)) is None:
return False
s = ShelfItem(
start_x=x, book_id=book_id, reduce_height_by=height_reduction_for_book_id(book_id),
width=width, group_name=group_name, case_start_y=self.start_y, idx=len(self.items), case_idx=self.idx)
self.items.append(s)
self.width = s.start_x + s.width
return True
@property
def is_shelf(self) -> bool:
return self.items is None
def shift_for_expanded_cover(self, shelf_item: ShelfItem, lc: LayoutConstraints, width: int) -> CaseItem:
if (extra := width - shelf_item.width) <= 0:
return self
ans = CaseItem(y=self.start_y, height=self.height, idx=self.idx)
space_at_right_edge = max(0, lc.width - self.width)
left_shift = 0
right_shift = min(space_at_right_edge, extra)
extra -= right_shift
if extra > 0:
shift_left = shelf_item.idx > 2
shift_right = shelf_item.idx < len(self.items) - 3
if shift_left:
if shift_right:
left_shift += extra // 2
right_shift += extra - left_shift
else:
left_shift += extra
else:
right_shift += extra
if gprefs['bookshelf_hover'] == 'shift':
for i, item in enumerate(self.items):
if i < shelf_item.idx:
if left_shift:
item = item._replace(start_x=item.start_x - left_shift)
elif i == shelf_item.idx:
item = ans.expanded_item = item._replace(start_x=item.start_x - left_shift, width=width, is_hover_expanded=True)
elif right_shift:
item = item._replace(start_x=item.start_x + right_shift)
ans.items.append(item)
ans.width = item.start_x + item.width
else:
ans.items = self.items[:]
item = ans.items[shelf_item.idx]
ans.items[shelf_item.idx] = ans.expanded_item = item._replace(start_x=item.start_x - left_shift, width=width, is_hover_expanded=True)
return ans
def get_grouped_iterator(db: Cache, book_ids_iter: Iterable[int], field_name: str = '') -> Iterator[tuple[str, Iterable[int]]]:
formatter = lambda x: x # noqa: E731
fm = db.field_metadata
sort_key = numeric_sort_key
get_books_in_group = lambda group: db.books_for_field(field_name, group) # noqa: E731
get_field_id_map = lambda: db.get_id_map(field_name) # noqa: E731
sort_map = {book_id: i for i, book_id in enumerate(book_ids_iter)}
all_book_ids = frozenset(sort_map)
ungrouped_name = all_groupings().get(field_name, _('Unknown'))
dt = fm.get(field_name, {}).get('datatype')
match field_name:
case '':
yield '', 0
yield '', book_ids_iter
return
case 'authors':
field_id_map = db.get_id_map('authors')
author_sort_map = db.author_sorts()
def gas(aid: int, au: str) -> str:
try:
return author_sort_map[aid]
except KeyError:
return au
sort_key = {au: gas(aid, au) for aid, au in field_id_map.items()}.__getitem__
get_field_id_map = lambda: field_id_map # noqa: E731
del gas
case 'languages':
lm = lang_map()
formatter = lambda x: lm.get(x, x) # noqa: E731
sort_key = lambda x: numeric_sort_key(formatter(x)) # noqa: E731
case field_name if dt == 'rating':
formatter = rating_to_stars
sort_key = lambda x: -x # noqa: E731
ungrouped_name = _('Unrated')
case field_name if dt == 'datetime':
df = fm[field_name].get('display', {}).get('date_format') or 'dd MMM yyyy'
if 'd' in df:
lsys = QLocale.system().monthName
month_map = db.books_by_month(field=field_name, restrict_to_books=all_book_ids)
get_books_in_group = month_map.__getitem__
get_field_id_map = lambda: {x: x for x in month_map} # noqa: E731
sort_key = lambda x: (-x[0], -x[1]) # noqa: E731
formatter = lambda x: (f'{lsys(x[1], QLocale.FormatType.ShortFormat)} {x[0]}' if x[0] > UNDEFINED_DATE.year else ungrouped_name) # noqa: E731
else:
year_map = db.books_by_year(field=field_name, restrict_to_books=all_book_ids)
get_books_in_group = year_map.__getitem__
get_field_id_map = lambda: {x: x for x in year_map} # noqa: E731
sort_key = lambda x: -x # noqa: E731
formatter = lambda x: str(x) if x > UNDEFINED_DATE.year else ungrouped_name # noqa: E731
field_id_map = get_field_id_map()
yield '', len(field_id_map)
seen = set()
for group in sorted(field_id_map, key=lambda fid: sort_key(field_id_map[fid])):
books_in_group = (get_books_in_group(group) & all_book_ids) - seen
if books_in_group:
seen |= books_in_group
yield formatter(field_id_map[group]), sorted(books_in_group, key=sort_map.__getitem__)
if ungrouped_name and (leftover := all_book_ids - seen):
yield ungrouped_name, sorted(leftover, key=sort_map.__getitem__)
def base_log(f: float, b: float = 1) -> float:
return math.log(1+max(0, min(f, 1))*b, b+1)
def width_from_pages(pages: int, num_of_pages_for_max_width: int = 1500, logarithmic_factor: float = 2) -> float:
return base_log(pages/num_of_pages_for_max_width, b=logarithmic_factor)
def width_from_size(sz: int, log_factor: float = 2) -> float:
return base_log(normalised_size(sz), b=log_factor)
def get_spine_width(
book_id: int, db: Cache, spine_size_template: str, template_cache: dict[str, str],
lc: LayoutConstraints, cache: dict[int, int]
) -> int:
if (ans := cache.get(book_id)) is not None and lc.min_spine_width <= ans <= lc.max_spine_width:
return ans
def linear(f: float):
return lc.min_spine_width + int(max(0, min(f, 1)) * (lc.max_spine_width - lc.min_spine_width))
ans = -1
match spine_size_template:
case '{pages}' | 'pages':
pages = db.field_for('pages', book_id, 0)
if pages > 0:
ans = linear(width_from_pages(pages))
else:
ans = linear(width_from_size(db.field_for('size', book_id, 0)))
case '{size}' | 'size':
ans = linear(width_from_size(db.field_for('size', book_id, 0)))
case '{random}' | 'random':
# range: 0.25-0.75
ans = linear((25+(random_from_id(book_id, limit=51)))/100)
case '':
ans = lc.default_spine_width
case _:
with suppress(Exception):
if 0 <= (x := float(spine_size_template)) <= 1:
ans = linear(x)
if ans < 0:
with suppress(Exception):
mi = db.get_proxy_metadata(book_id)
rslt = mi.formatter.safe_format(spine_size_template, mi, 'template error', mi, template_cache=template_cache)
ans = linear(float(rslt))
if ans <= 0:
ans = lc.default_spine_width
cache[book_id] = ans
return ans
class LayoutPayload(NamedTuple):
invalidate_event: Event
layout_constraints: LayoutConstraints
group_field_name: str
row_to_book_id: tuple[int, ...]
book_id_to_item_map: dict[int, ShelfItem]
book_id_visual_order_map: dict[int, int]
book_ids_in_visual_order: list[int]
min_line_height: int
class BookCase(QObject):
items: list[CaseItem]
layout_finished: bool = False
height: int = 0
shelf_added = pyqtSignal(object, object)
num_of_groups_changed = pyqtSignal()
def __init__(self, parent: QObject = None):
super().__init__(parent)
self.worker: Thread | None = None
self.row_to_book_id: tuple[int, ...] = ()
self._book_id_to_row_map: dict[int, int] = {}
self.book_id_visual_order_map: dict[int, int] = {}
self.book_ids_in_visual_order: list[int] = []
self.num_of_books_that_need_pages_counted = 0
self.using_page_counts = False
self.queue: LifoQueue[LayoutPayload] = LifoQueue()
self.lock = RLock()
self.current_invalidate_event = Event()
self.spine_width_cache: dict[int, int] = {}
self.num_of_groups = 0
self.payload: LayoutPayload | None = None
self.invalidate()
def shutdown(self):
self.current_invalidate_event.set()
self.current_invalidate_event = Event()
with suppress(TypeError):
self.num_of_groups_changed.disconnect()
with suppress(TypeError):
self.shelf_added.disconnect()
if self.worker is not None:
self.queue.shutdown(immediate=True)
w, self.worker = self.worker, None
if current_thread().is_alive() and w.is_alive():
w.join()
def clear_spine_width_cache(self):
self.spine_width_cache = {}
def shelf_with_ypos(self, y: int) -> CaseItem | None:
' Return the container of books or shelf that contains the specified y position '
for shelf in self.iter_shelves_from_ypos(y):
return shelf
return None
def iter_shelves_from_ypos(self, y: int) -> Iterator[CaseItem]:
with self.lock:
idx = bisect.bisect_right(self.items, y, key=attrgetter('start_y'))
if idx > 0:
candidate: CaseItem = self.items[idx-1]
if y < candidate.start_y + candidate.height:
for i in range(idx-1, len(self.items)):
yield self.items[i]
@property
def current_height(self) -> int:
with self.lock:
ans = 0
if self.items:
ans = self.items[-1].start_y + self.items[-1].height
if not self.layout_finished:
ans += self.layout_constraints.step_height
return ans
@property
def max_possible_height(self) -> int:
with self.lock:
if self.layout_finished or self.layout_constraints.width == 0:
return self.current_height
num_of_rows = (self.num_of_groups + len(self.row_to_book_id)) * self.layout_constraints.max_spine_width // self.layout_constraints.width
return (num_of_rows + 1) * self.layout_constraints.step_height
def invalidate(
self, layout_constraints: LayoutConstraints = LayoutConstraints(),
model: BooksModel | None = None, group_field_name: str = '', min_line_height: int = 0,
) -> None:
with self.lock:
self.current_invalidate_event.set()
self.current_invalidate_event = Event()
self.group_field_name = group_field_name
self.items = []
self.height = 0
self.using_page_counts = False
self.num_of_books_that_need_pages_counted = 0
self.layout_constraints = layout_constraints
self.book_id_visual_order_map: dict[int, int] = {}
self.book_ids_in_visual_order = []
self.book_id_to_item_map: dict[int, ShelfItem] = {}
self.num_of_groups = 0
if model is not None and (db := model.db) is not None:
# implies set of books to display has changed
self.row_to_book_id = db.data.index_to_id_map()
self._book_id_to_row_map = {}
self.dbref = weakref.ref(db)
self.layout_finished = not bool(self.row_to_book_id)
self.payload = LayoutPayload(
self.current_invalidate_event, self.layout_constraints, self.group_field_name, self.row_to_book_id,
self.book_id_to_item_map, self.book_id_visual_order_map, self.book_ids_in_visual_order,
min_line_height)
def ensure_layouting_is_current(self) -> None:
with self.lock:
if self.layout_constraints.width > 0 and self.payload is not None:
if self.worker is None:
self.worker = Thread(target=self.layout_thread, name='BookCaseLayout', daemon=True)
self.worker.start()
p, self.payload = self.payload, None
self.queue.put(p)
@property
def book_id_to_row_map(self) -> dict[int, int]:
if self.row_to_book_id and not self._book_id_to_row_map:
self._book_id_to_row_map = {bid: r for r, bid in enumerate(self.row_to_book_id)}
return self._book_id_to_row_map
def layout_thread(self) -> None:
while True:
try:
x = self.queue.get()
except ShutDown:
break
try:
self.do_layout_in_worker(*x)
except Exception:
import traceback
traceback.print_exc()
def do_layout_in_worker(
self, invalidate: Event, lc: LayoutConstraints, group_field_name: str, row_to_book_id: tuple[int, ...],
book_id_to_item_map: dict[int, ShelfItem], book_id_visual_order_map: dict[int, int],
book_ids_in_visual_order: list[int], min_line_height: int,
) -> None:
if lc.width < lc.max_spine_width:
return
def commit_case_item(x: CaseItem) -> int:
with self.lock:
if invalidate.is_set():
return self.height
self.items.append(x)
self.height += lc.spine_height
self.items.append(CaseItem(idx=len(self.items), y=self.height, height=lc.shelf_height, is_shelf=True))
self.height += lc.shelf_height
self.shelf_added.emit(x, self.items[-1])
return self.height
current_case_item = CaseItem(height=lc.spine_height)
mdb = self.dbref()
if mdb is None or invalidate.is_set():
return
db = mdb.new_api
start_with_divider = gprefs['bookshelf_start_with_divider']
spine_size_template = db.pref('bookshelf_spine_size_template', get_default_from_defaults=True) or ''
if gprefs['bookshelf_make_space_for_second_line']:
author_template = db.pref('bookshelf_author_template', get_default_from_defaults=True) or ''
if author_template.strip():
min_line_height *= 2
template_cache = {}
group_iter = get_grouped_iterator(db, row_to_book_id, group_field_name)
_, num_of_groups = next(group_iter)
with self.lock:
if invalidate.is_set():
return
self.num_of_groups = num_of_groups
self.num_of_groups_changed.emit()
num_of_books_that_need_pages_counted = db.num_of_books_that_need_pages_counted()
# Ensure there is enough width for the spine text
min_width = min(max(min_line_height, lc.min_spine_width), lc.max_spine_width-1)
lc = lc._replace(min_spine_width=min_width)
add_group_dividers = gprefs['bookshelf_divider_style'] != 'hidden'
for group_name, book_ids_in_group in group_iter:
if invalidate.is_set():
return
if add_group_dividers and not current_case_item.add_group_divider(group_name, lc):
y = commit_case_item(current_case_item)
current_case_item = CaseItem(y=y, height=lc.spine_height, idx=len(self.items))
current_case_item.add_group_divider(group_name, lc)
for book_id in book_ids_in_group:
if invalidate.is_set():
return
try:
spine_width = get_spine_width(
book_id, db, spine_size_template, template_cache, lc, self.spine_width_cache)
except Exception:
spine_width = lc.default_spine_width
if not current_case_item.add_book(book_id, spine_width, group_name, lc):
case_end_divider = ''
if current_case_item.items[-1].is_divider:
case_end_divider = current_case_item.items.pop(-1).group_name
y = commit_case_item(current_case_item)
current_case_item = CaseItem(y=y, height=lc.spine_height, idx=len(self.items))
if add_group_dividers:
if case_end_divider:
current_case_item.add_group_divider(case_end_divider, lc)
elif start_with_divider:
current_case_item.add_group_divider(group_name, lc)
if not current_case_item.add_book(book_id, spine_width, group_name, lc):
raise ValueError(
f'Failed to add a single book to a new shelf: {book_id=} {spine_width=} {lc.width=} {lc.max_spine_width=}')
book_id_to_item_map[book_id] = current_case_item.items[-1]
book_id_visual_order_map[book_id] = len(book_id_visual_order_map)
book_ids_in_visual_order.append(book_id)
if current_case_item.items:
commit_case_item(current_case_item)
with self.lock:
if invalidate.is_set():
return
self.layout_finished = True
self.num_of_books_that_need_pages_counted = num_of_books_that_need_pages_counted
self.using_page_counts = spine_size_template in ('{pages}', 'pages')
if len(self.items) > 1:
self.shelf_added.emit(self.items[-2], self.items[-1])
else:
self.shelf_added.emit(None, None)
def visual_row_cmp(self, a: int, b: int) -> int:
' Compares if a or b (book_row numbers) is visually before the other in left-to-right top-to-bottom order'
try:
a = self.row_to_book_id[a]
b = self.row_to_book_id[b]
except IndexError:
return a - b
return self.book_id_visual_order_map[a] - self.book_id_visual_order_map[b]
def visual_selection_between(self, a: int, b: int) -> Iterator[int]:
' Return all book_rows visually from a to b in left to right top-to-bottom order '
a = self.row_to_book_id[a]
b = self.row_to_book_id[b]
aidx = self.book_ids_in_visual_order.index(a)
bidx = self.book_ids_in_visual_order.index(b)
s, e = min(aidx, bidx), max(aidx, bidx)
yield from map(self.book_id_to_row_map.__getitem__, self.book_ids_in_visual_order[s:e+1])
def visual_neighboring_book(self, book_id: int, delta: int = 1, allow_wrap: bool = False, in_bound: bool = False) -> int:
idx = self.book_id_visual_order_map[book_id]
nidx = idx + delta
if allow_wrap:
nidx = (nidx + len(self.book_ids_in_visual_order)) % len(self.book_ids_in_visual_order)
if in_bound:
nidx = max(0, min(len(self.book_ids_in_visual_order)-1), nidx)
if 0 <= nidx < len(self.book_ids_in_visual_order):
return self.book_ids_in_visual_order[nidx]
return 0
def shelf_of_book(self, book_id: int) -> CaseItem | None:
if si := self.book_id_to_item_map.get(book_id):
return self.items[si.case_idx]
return None
def end_book_on_shelf_of(self, book_id: int, first: bool = False) -> int:
if ci := self.shelf_of_book(book_id):
return ci.items[0 if first else -1].book_id
return 0
def book_in_column_of(self, book_id: int, delta: int = 1, allow_wrap: bool = False, in_bound: bool = False) -> int:
if not (si := self.book_id_to_item_map.get(book_id)):
return
if not (ci := self.shelf_of_book(book_id)):
return 0
shelf_idx = ci.idx // 2 + delta
num_shelves = len(self.items) // 2
if allow_wrap:
shelf_idx = (shelf_idx + num_shelves) % num_shelves
if in_bound:
shelf_idx = max(0, min(shelf_idx, num_shelves-1))
if shelf_idx < 0 or shelf_idx >= num_shelves:
return 0
target_shelf = self.items[shelf_idx * 2]
if not (target_si := target_shelf.book_or_divider_at_region(si, self.layout_constraints)):
return 0
return ans.book_id if (ans := target_shelf.closest_book_to(target_si.idx)) else 0
# }}}
class ExpandedCover(QObject): # {{{
updated = pyqtSignal()
def __init__(self, parent: BookshelfView):
super().__init__(parent)
self._opacity = 0
self._size = QSize()
self.is_showing_cover = False
self.shelf_item: ShelfItem | None = None
self.case_item: CaseItem | None = None
self.modified_case_item: CaseItem | None = None
self.cover_renderer: CachedCoverRenderer = CachedCoverRenderer(PixmapWithDominantColor())
self.opacity_animation = a = QPropertyAnimation(self, b'opacity')
a.setEasingCurve(QEasingCurve.Type.InOutCubic)
a.setStartValue(0.3)
a.setEndValue(1)
self.size_animation = a = QPropertyAnimation(self, b'size')
a.setEasingCurve(QEasingCurve.Type.OutCubic)
self.animation = a = QParallelAnimationGroup(self)
a.addAnimation(self.opacity_animation)
a.addAnimation(self.size_animation)
self.debounce_timer = t = QTimer(self)
t.setInterval(120)
t.timeout.connect(self.start)
t.setSingleShot(True)
@property
def layout_constraints(self) -> LayoutConstraints:
return self.parent().layout_constraints
def shelf_item_hovered(self, case_item: CaseItem | None = None, shelf_item: ShelfItem | None = None) -> None:
self.pending_shelf_item, self.pending_case_item = shelf_item, case_item
self.debounce_timer.start()
def start(self) -> None:
if getattr(self.pending_shelf_item, 'book_id', -1) == getattr(self.shelf_item, 'book_id', -1):
self.pending_case_item = self.pending_shelf_item = None
return
self.invalidate()
self.shelf_item, self.case_item = self.pending_shelf_item, self.pending_case_item
self.pending_case_item = self.pending_shelf_item = None
if self.shelf_item is not None:
duration = 0 if config['disable_animations'] else gprefs['bookshelf_fade_time']
if duration > 0:
self.opacity_animation.setStartValue(gprefs['bookshelf_thumbnail_opacity'] / 100)
self.opacity_animation.setDuration(duration)
self.size_animation.setDuration(duration)
lc = self.layout_constraints
sz = QSize(self.shelf_item.width, lc.spine_height - self.shelf_item.reduce_height_by)
self.modified_case_item = self.case_item
pixmap, final_sz = self.parent().load_hover_cover(self.shelf_item)
self.cover_renderer.set_pixmap(pixmap)
self.size_animation.setStartValue(sz)
self.size_animation.setEndValue(final_sz)
self.is_showing_cover = True
if duration > 0:
self.animation.start()
else:
self._opacity = 1
self._size = final_sz
self.shift_items()
self.updated.emit()
def invalidate(self) -> None:
self.shelf_item = self.case_item = self.modified_case_item = None
self.animation.stop()
self.debounce_timer.stop()
self.is_showing_cover = False
@pyqtProperty(float)
def opacity(self) -> float:
return self._opacity
@opacity.setter
def opacity(self, val: float) -> None:
self._opacity = val
@pyqtProperty(QSize)
def size(self) -> QSize:
return self._size
@size.setter
def size(self, val: QSize) -> None:
self._size = val
self.shift_items()
self.updated.emit()
def shift_items(self) -> None:
self.modified_case_item = self.case_item.shift_for_expanded_cover(
self.shelf_item, self.layout_constraints, self.size.width())
@property
def expanded_cover_should_be_displayed(self) -> bool:
return self.shelf_item is not None and self.modified_case_item is not None and self.is_showing_cover
def modify_shelf_layout(self, case_item: CaseItem) -> CaseItem:
if self.expanded_cover_should_be_displayed and case_item is self.case_item:
case_item = self.modified_case_item
return case_item
def is_expanded(self, book_id: int) -> bool:
return self.expanded_cover_should_be_displayed and self.shelf_item.book_id == book_id
def draw_expanded_cover(
self, painter: QPainter, scroll_y: int, lc: LayoutConstraints, selection_highlight_color: QColor
) -> None:
shelf_item = self.modified_case_item.items[self.shelf_item.idx]
cover_rect = shelf_item.rect(lc)
cover_rect.translate(0, -scroll_y)
pmap, margin = self.cover_renderer.as_pixmap(cover_rect.size(), self.opacity, self.parent())
painter.drawPixmap(cover_rect.topLeft() - QPoint(margin, margin), pmap)
if selection_highlight_color.isValid():
pen = QPen(selection_highlight_color)
pen.setWidth(2)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
painter.setPen(pen)
painter.setBrush(Qt.BrushStyle.NoBrush)
painter.setOpacity(1.0)
painter.drawRect(cover_rect)
# }}}
class SavedState(NamedTuple):
current_book_id: int
selected_book_ids: set[int]
@setup_dnd_interface
class BookshelfView(MomentumScrollMixin, QAbstractScrollArea):
'''
Enhanced bookshelf view displaying books as spines on shelves.
This view provides an immersive browsing experience with sorting
and grouping capabilities.
'''
files_dropped = pyqtSignal(object)
books_dropped = pyqtSignal(object)
# Dimensions
layout_constraints: LayoutConstraints
DIVIDER_LINE_WIDTH = 2 # Width of the gradient line in divider
TEXT_MARGIN = 6
EMBLEM_SIZE = 24
EMBLEM_MARGIN = 2
def __init__(self, gui):
super().__init__(gui)
self.first_painted_at = 0
self.auto_scroll = True
self.scroll_to_current_after_layout: bool = False
self.theme: ColorTheme = None
self.palette_changed()
self.spine_font = self.default_spine_font = QFont(self.font())
self.spine_font.setBold(True)
self.divider_font = QFont(self.spine_font)
self.base_font_size_pts = QFontInfo(self.spine_font).pointSizeF()
self.outline_width = 0
self.min_line_height = self.base_font_size_pts * 1.2
self.gui = gui
self._model: BooksModel | None = None
self.context_menu: QMenu | None = None
self.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAsNeeded)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
self.resize_debounce_timer = t = QTimer(self)
t.timeout.connect(self.resize_debounced)
t.setSingleShot(True), t.setInterval(200)
self.pages_count_update_check_timer = t = QTimer(self)
t.timeout.connect(self.check_pages_count_update)
t.setSingleShot(True), t.setInterval(2000)
self.setMouseTracking(True)
self.setFocusPolicy(Qt.FocusPolicy.StrongFocus)
QApplication.instance().palette_changed.connect(self.palette_changed)
# Ensure viewport receives mouse events
self.viewport().setMouseTracking(True)
self.viewport().setAttribute(Qt.WidgetAttribute.WA_MouseTracking, True)
# Cover template caching
self.template_inited = False
self.emblem_rules = []
self.template_is_empty = {}
self.first_line_renderer = self.build_template_renderer('title', '{title}')
self.second_line_renderer = self.build_template_renderer('authors', '')
# Initialize drag and drop
setup_dnd_interface(self)
self.bookcase = BookCase(self)
self.bookcase.shelf_added.connect(self.on_shelf_layout_done, type=Qt.ConnectionType.QueuedConnection)
self.bookcase.num_of_groups_changed.connect(self.update_scrollbar_ranges, type=Qt.ConnectionType.QueuedConnection)
# Selection tracking
self._selection_model: QItemSelectionModel = QItemSelectionModel(None, self)
self._selection_model.selectionChanged.connect(self.update_viewport)
self._selection_model.currentChanged.connect(self.on_current_changed)
self.click_start_data: ClickStartData | None = None
# Cover loading and caching
self.expanded_cover = ExpandedCover(self)
self.expanded_cover.updated.connect(self.update_viewport)
self.layout_constraints = LayoutConstraints()
self.layout_constraints = self.layout_constraints._replace(width=self.get_available_width())
self.grouping_mode = ''
self.refresh_settings()
self.cover_cache = CoverThumbnailCache(
name='bookshelf-thumbnail-cache', ram_limit=800,
max_size=gprefs['bookshelf_disk_cache_size'], thumbnailer=ThumbnailerWithDominantColor(),
thumbnail_size=self.thumbnail_size(), parent=self, version=2,
)
self.cover_cache.rendered.connect(self.update_viewport, type=Qt.ConnectionType.QueuedConnection)
def calculate_shelf_geometry(self) -> None:
lc = self.layout_constraints
if (h := gprefs['bookshelf_height']) < 120 or h > 1200:
screen_height = 0
for screen in QApplication.instance().screens():
if screen.availableSize().height() > screen.availableSize().width() * 1.5:
screen_height = max(screen_height, screen.availableSize().width())
else:
screen_height = max(screen_height, screen.availableSize().height())
h = max(100 + lc.shelf_height, screen_height // 3)
lc = lc._replace(spine_height=h - lc.shelf_height, width=self.get_available_width())
# Keep aspect ratio of spines
default = LayoutConstraints()
hr = lc.spine_height / default.spine_height
lc = lc._replace(
min_spine_width=math.ceil(default.min_spine_width * hr),
max_spine_width=math.ceil(default.max_spine_width * hr),
default_spine_width=math.ceil(default.default_spine_width * hr),
hover_expanded_width=math.ceil(default.hover_expanded_width * hr)
)
self.layout_constraints = lc
def thumbnail_size(self) -> tuple[int, int]:
lc = self.layout_constraints
dpr = self.devicePixelRatioF()
sz = QSizeF(lc.max_spine_width * dpr, lc.spine_height * dpr).toSize()
return sz.width(), sz.height()
# Templates rendering methods
def init_template(self, db):
'''Initialize templates and database settings.'''
if not db:
return
if self.template_inited and self.dbref() == db.new_api:
return
def db_pref(key):
return db.new_api.pref(key, get_default_from_defaults=True)
title = db_pref('bookshelf_title_template') or ''
self.first_line_renderer = self.build_template_renderer('title', title)
authors = db_pref('bookshelf_author_template') or ''
self.second_line_renderer = self.build_template_renderer('authors', authors)
self.template_inited = True
self.emblem_rules = db_pref('bookshelf_icon_rules') or []
def build_template_renderer(self, column_name: str, template: str) -> partial[str]:
self.template_is_empty[column_name] = not template.strip()
return partial(self.render_template, column_name, template)
def render_template(self, column_name: str, template: str, book_id: int) -> str:
if not (db := self.dbref()):
return ''
self.init_template(db)
if self.template_is_empty[column_name]:
return ''
if not (m := self.model()):
return ''
match template:
case '{title}':
return db.field_for('title', book_id)
case '{author_sort}':
return db.field_for('author_sort', book_id)
case '{author}' | '{authors}':
return authors_to_string(db.field_for('authors', book_id))
case '{sort}' | '{title_sort}':
return db.field_for('sort', book_id)
mi = db.get_proxy_metadata(book_id)
rslt = mi.formatter.safe_format(
template, mi, TEMPLATE_ERROR, mi, column_name=column_name, template_cache=m.bookshelf_template_cache)
return rslt or ''
def render_emblem(self, book_id: int) -> str:
if not (m := self.model()):
return
db = m.db.new_api
self.init_template(db)
if not self.emblem_rules:
return ''
mi = None
for i, (kind, column, rule) in enumerate(self.emblem_rules):
icon_name, mi = render_emblem(book_id, rule, i, m.bookshelf_emblem_cache, mi, db, m.formatter, m.bookshelf_template_cache, column_name='bookshelf')
if icon_name:
return icon_name
return ''
def refresh_settings(self):
'''Refresh the gui and render settings.'''
self.template_inited = False
s = gprefs['bookshelf_font']
if s and s.get('family'):
self.spine_font = QFontDatabase.font(s['family'], s['style'], int(self.base_font_size_pts))
self.spine_font.setPointSizeF(self.base_font_size_pts)
else:
self.spine_font = self.default_spine_font
self.get_sized_font.cache_clear()
self.get_text_metrics.cache_clear()
self.min_font_size = max(0.1, min(gprefs['bookshelf_min_font_multiplier'], 1)) * self.base_font_size_pts
self.max_font_size = max(1, min(gprefs['bookshelf_max_font_multiplier'], 3)) * self.base_font_size_pts
_, fm, _ = self.get_sized_font(self.min_font_size)
self.outline_width = float(max(0, min(gprefs['bookshelf_outline_width'], 5)))
self.min_line_height = math.ceil(fm.height() + self.outline_width * 2)
self.calculate_shelf_geometry()
self.palette_changed()
if hasattr(self, 'cover_cache'):
self.cover_cache.set_thumbnail_size(*self.thumbnail_size())
self.cover_cache.set_disk_cache_max_size(gprefs['bookshelf_disk_cache_size'])
self.update_ram_cache_size()
self.invalidate(clear_spine_width_cache=True)
def palette_changed(self):
self.setPalette(dark_palette() if is_dark_theme() else light_palette())
self.theme = ColorTheme.dark_theme() if is_dark_theme() else ColorTheme.light_theme()
if gprefs['bookshelf_use_custom_colors']:
values = {}
valid = frozenset(self.theme._fields)
for k, v in gprefs['bookshelf_custom_colors']['dark' if is_dark_theme() else 'light'].items():
if k in valid and v and (c := QColor(v)).isValid():
values[k] = c
self.theme = self.theme._replace(**values)
def view_is_visible(self) -> bool:
'''Return if the bookshelf view is visible.'''
with suppress(AttributeError):
return self.gui.bookshelf_view_button.is_visible
return False
def shutdown(self):
self.resize_debounce_timer.stop()
self.pages_count_update_check_timer.stop()
self.cover_cache.shutdown()
self.bookcase.shutdown()
self.expanded_cover.invalidate()
def setModel(self, model: BooksModel | None) -> None:
'''Set the model for this view.'''
signals = {
'dataChanged': 'model_data_changed', 'rowsInserted': 'model_rows_changed',
'rowsRemoved': 'model_rows_changed', 'modelReset': 'model_reset',
}
if self._model is not None:
for s, tgt in signals.items():
getattr(self._model, s).disconnect(getattr(self, tgt))
self._model = model
self.selectionModel().setModel(model)
if model is not None:
# Create selection model for sync
for s, tgt in signals.items():
getattr(self._model, s).connect(getattr(self, tgt))
self.invalidate(set_of_books_changed=True, clear_spine_width_cache=True)
def model(self) -> BooksModel | None:
'''Return the model.'''
return self._model
def selectionModel(self) -> QItemSelectionModel:
'''Return the selection model (required for AlternateViews integration).'''
return self._selection_model
def model_data_changed(self, top_left, bottom_right, roles):
'''Handle model data changes.'''
self.update_viewport()
def model_rows_changed(self, parent, first, last):
'''Handle model row changes.'''
self.invalidate(set_of_books_changed=True)
def model_reset(self):
'''Handle model reset.'''
self.invalidate(set_of_books_changed=True)
def dbref(self) -> Cache:
'''Return the current database.'''
if m := self.model():
return m.db.new_api
return self.gui.current_db.new_api
def book_id_from_row(self, row: int) -> int | None:
'''Return the book id at this row.'''
with suppress(Exception):
return self.bookcase.row_to_book_id[row]
return None
def row_from_book_id(self, book_id: int) -> int | None:
'''Return the book id at this row.'''
return self.bookcase.book_id_to_row_map.get(book_id)
@property
def has_transient_scrollbar(self) -> bool:
return self.style().styleHint(QStyle.StyleHint.SH_ScrollBar_Transient, widget=self) != 0
def resizeEvent(self, ev: QResizeEvent) -> None:
self.resize_debounce_timer.start()
return super().resizeEvent(ev)
def resize_debounced(self) -> None:
if self.layout_constraints.width != (new_width := self.get_available_width()) and new_width > 20:
self.layout_constraints = self.layout_constraints._replace(width=new_width)
self.invalidate()
def update_scrollbar_ranges(self):
'''Update scrollbar ranges based on the current shelf layouts.'''
total_height = self.bookcase.max_possible_height
viewport_height = self.viewport().height()
self.verticalScrollBar().setRange(0, max(0, total_height - viewport_height))
self.verticalScrollBar().setPageStep(viewport_height)
self.verticalScrollBar().setSingleStep(self.layout_constraints.step_height)
self.update_ram_cache_size()
def get_available_width(self) -> int:
# We always layout assuming scrollbar takes up space unless it is a
# transient scrollbar. This means when all books fit in the viewport there
# will be some extra space on the right. This is an acceptable
# compromise since, layouting is expensive and we cannot know if the
# scrollbar is needed till we do layouting once.
sw = 0 if self.has_transient_scrollbar else self.verticalScrollBar().width()
return self.width() - (2 * self.layout_constraints.side_margin) - sw
def invalidate(
self, set_of_books_changed: bool = False, clear_spine_width_cache: bool = False,
) -> None:
if clear_spine_width_cache:
self.bookcase.clear_spine_width_cache()
self.bookcase.invalidate(
self.layout_constraints, model=self.model() if set_of_books_changed else None,
group_field_name=self.grouping_mode, min_line_height=self.min_line_height)
if set_of_books_changed:
self.expanded_cover.invalidate()
self.update_scrollbar_ranges()
self.update_viewport()
def check_for_pages_update(self):
# If there are a lot of books with pages yet to be counted, re-layout
# once all have been counted
if self.bookcase.num_of_books_that_need_pages_counted > 10 and self.bookcase.using_page_counts:
self.pages_count_update_check_timer.start()
def check_pages_count_update(self):
if (db := self.dbref()):
num_of_books_that_need_pages_counted = db.new_api.num_of_books_that_need_pages_counted()
if num_of_books_that_need_pages_counted:
self.pages_count_update_check_timer.start()
else:
self.invalidate(clear_spine_width_cache=True)
def on_shelf_layout_done(self, books: CaseItem | None, shelf: CaseItem | None) -> None:
if self.view_is_visible():
if self.bookcase.layout_finished:
self.update_scrollbar_ranges()
self.check_for_pages_update()
if self.scroll_to_current_after_layout:
self.scroll_to_current_after_layout = False
if (idx := self.currentIndex()).isValid():
self.scrollTo(idx)
if books is not None and shelf is not None:
y = books.start_y
height = books.height + shelf.height
r = self.viewport().rect()
r.moveTop(self.verticalScrollBar().value())
if self.bookcase.layout_finished or r.intersects(QRect(r.left(), y, r.width(), height)):
self.update_viewport()
@property
def shelves_per_screen(self) -> int:
viewport_height = self.viewport().height()
lc = self.layout_constraints
return max(1, math.ceil(viewport_height / lc.step_height))
def update_ram_cache_size(self):
if hasattr(self, 'cover_cache'):
lc = self.layout_constraints
books_per_shelf = self.get_available_width() / lc.min_spine_width
lm = gprefs['bookshelf_cache_size_multiple'] * books_per_shelf * self.shelves_per_screen
self.cover_cache.set_ram_limit(max(0, int(lm)))
# Paint and Drawing methods
def shown(self):
'''Called when this view becomes active.'''
if db := self.dbref():
db.queue_pages_scan()
QPixmapCache.setCacheLimit(max(QPixmapCache.cacheLimit(), 20 * 1024))
self.bookcase.ensure_layouting_is_current()
def update_viewport(self):
'''Update viewport only if the bookshelf view is visible.'''
if not self.view_is_visible():
return
self.viewport().update()
def draw_emblems(self, painter: QPainter, item: ShelfItem, scroll_y: int) -> tuple[int, int]:
book_id = item.book_id
above, below = [], []
top, bottom = [], []
top_size = bottom_size = 0
if m := self.model():
from calibre.gui2.ui import get_gui
db = m.db
marked = db.data.get_marked(book_id)
if marked:
below.append(m.marked_icon if marked == 'true' else m.marked_text_icon_for(marked))
db = db.new_api
device_connected = get_gui().device_connected is not None
on_device = device_connected and db.field_for('ondevice', book_id)
if on_device:
which = above if below else below
which.append(cached_emblem(0, m.bookshelf_bitmap_cache, ':ondevice'))
custom = self.render_emblem(book_id)
if custom:
match gprefs['bookshelf_emblem_position']:
case 'above':
which = above
case 'below':
which = below
case 'top':
which = top
case 'bottom':
which = bottom
case _:
which = above if below and not above else below
if (icon := cached_emblem(0, m.bookshelf_bitmap_cache, custom)) is not None:
which.append(icon)
def draw_horizontal(emblems: list[QIcon], position: str) -> None:
nonlocal top_size, bottom_size
if not emblems:
return
gap = self.EMBLEM_MARGIN
max_width = (item.width - gap) // len(emblems)
lc = self.layout_constraints
match position:
case 'above':
max_height = lc.shelf_gap
case 'below':
max_height = lc.shelf_height
case 'top' | 'bottom':
max_height = self.EMBLEM_SIZE
sz = min(max_width, max_height)
width = sz
if len(emblems) > 1:
width += gap + sz
x = max(0, (item.width - width) // 2) + item.start_x + lc.side_margin
y = item.case_start_y - scroll_y
match position:
case 'above':
y += lc.shelf_gap + item.reduce_height_by - sz
case 'below':
y += lc.spine_height
case 'top':
y += lc.shelf_gap + item.reduce_height_by + self.EMBLEM_MARGIN
top_size = sz + self.EMBLEM_MARGIN
case 'bottom':
y += lc.spine_height - sz - self.EMBLEM_MARGIN
bottom_size = sz + self.EMBLEM_MARGIN
for ic in emblems:
p = ic.pixmap(sz, sz)
painter.drawPixmap(QPoint(x, y), p)
x += sz + gap
draw_horizontal(above, 'above')
draw_horizontal(below, 'below')
draw_horizontal(top, 'top')
draw_horizontal(bottom, 'bottom')
return top_size, bottom_size
def paintEvent(self, ev: QPaintEvent) -> None:
'''Paint the bookshelf view.'''
if not self.view_is_visible():
return
if not self.first_painted_at:
self.first_painted_at = monotonic()
self.bookcase.ensure_layouting_is_current()
with QPainter(self.viewport()) as painter:
self.do_paint(painter)
def do_paint(self, painter: QPainter) -> None:
painter.setRenderHint(QPainter.RenderHint.Antialiasing | QPainter.RenderHint.SmoothPixmapTransform)
# Get visible area
scroll_y = self.verticalScrollBar().value()
viewport_rect = self.viewport().rect()
visible_rect = viewport_rect.translated(0, scroll_y)
hovered_item: ShelfItem | None = None
sm = self.selectionModel()
current_row = sm.currentIndex().row()
shelf_bases, shelves = [], []
for shelf in self.bookcase.iter_shelves_from_ypos(scroll_y):
if shelf.start_y > visible_rect.bottom():
break
if shelf.is_shelf:
shelf_bases.append(shelf)
continue
nshelf = self.expanded_cover.modify_shelf_layout(shelf)
shelves.append((nshelf, shelf is not nshelf))
if not hasattr(self, 'case_renderer'):
self.case_renderer = RenderCase()
painter.drawPixmap(
QPoint(0, 0), self.case_renderer.background_as_pixmap(viewport_rect.width(), viewport_rect.height()),
)
n = self.shelves_per_screen
for base in shelf_bases:
self.draw_shelf_base(painter, base, scroll_y, self.width(), base.idx % n)
if self.resize_debounce_timer.isActive() and monotonic() - self.first_painted_at < 2:
return
for shelf, has_expanded in shelves:
# Draw books and inline dividers on it
if has_expanded:
hovered_item = shelf.expanded_item
for item in shelf.items:
if item.is_divider:
self.draw_inline_divider(painter, item, scroll_y)
continue
if item is not shelf.expanded_item:
# Draw a book spine at this position
should_draw_emblems = not has_expanded or gprefs['bookshelf_hover'] != 'above' \
or (item.start_x + (item.width / 2) < hovered_item.start_x) \
or (item.start_x + (item.width / 2) > hovered_item.start_x + hovered_item.width)
row = self.bookcase.book_id_to_row_map[item.book_id]
self.draw_spine(painter, item, scroll_y, sm.isRowSelected(row), row == current_row, should_draw_emblems)
if hovered_item is not None:
row = self.bookcase.book_id_to_row_map[hovered_item.book_id]
is_selected, is_current = sm.isRowSelected(row), row == current_row
self.expanded_cover.draw_expanded_cover(
painter, scroll_y, self.layout_constraints, self.selection_highlight_color(is_selected, is_current),
)
self.draw_emblems(painter, hovered_item, scroll_y)
def draw_shelf_base(self, painter: QPainter, shelf: ShelfItem, scroll_y: int, width: int, instance: int) -> None:
p = self.case_renderer.shelf_as_pixmap(width, self.layout_constraints.shelf_height, instance)
shelf_rect = QRect(0, shelf.start_y, width, self.layout_constraints.shelf_height)
shelf_rect.translate(0, -scroll_y)
painter.drawPixmap(QPoint(0, shelf.start_y - scroll_y), p)
def draw_selection_highlight(self, painter: QPainter, spine_rect: QRect, color: QColor) -> None:
painter.save()
pen = QPen(color)
gap = min(4, self.layout_constraints.horizontal_gap // 2)
pen.setWidth(2 * gap)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
painter.setPen(pen)
painter.setBrush(Qt.BrushStyle.NoBrush)
painter.setOpacity(1.0)
painter.drawRect(spine_rect.adjusted(gap, gap, -gap, -gap))
painter.restore()
@lru_cache(maxsize=128)
def get_sized_font(self, sz: float = 9, for_divider: bool = False) -> tuple[QFont, QFontMetricsF, QFontInfo]:
font = QFont(self.divider_font if for_divider else self.spine_font)
font.setPointSizeF(sz)
return font, QFontMetricsF(font), QFontInfo(font)
@lru_cache(maxsize=4096)
def get_text_metrics(
self, first_line: str, second_line: str = '', sz: QSize = QSize(), allow_wrap: bool = False,
outline_width: float = 0, for_divider: bool = False,
) -> tuple[str, str, QFont, QFontMetricsF, bool]:
width, height = sz.width(), sz.height()
font, fm, fi = self.get_sized_font(self.base_font_size_pts, for_divider=for_divider)
extra_height = outline_width * 2 # stroke width above and below
if allow_wrap and not second_line and first_line and fm.boundingRect(first_line).width() > width and height >= 2 * self.min_line_height:
# rather than reducing font size if there is available space, wrap to two lines
font2, fm2, fi2 = font, fm, fi
while math.ceil(2 * (fm2.height() + extra_height)) > height:
font2, fm2, fi2 = self.get_sized_font(font2.pointSizeF() - 0.5, for_divider=for_divider)
if fm2.boundingRect(first_line).width() >= width: # two line font size is larger than one line font size
font, fm, fi = font2, fm2, fi2
has_third_line = False
layout = QTextLayout(first_line, font)
layout.beginLayout()
fl = layout.createLine()
fl.setLineWidth(width)
sl = layout.createLine()
if sl.isValid():
sl.setLineWidth(width)
has_third_line = layout.createLine().isValid()
layout.endLayout()
if sl.isValid():
second_line = utf16_slice(first_line, sl.textStart())
if has_third_line:
second_line = fm.elidedText(second_line, Qt.TextElideMode.ElideRight, width)
return utf16_slice(first_line, 0, fl.textLength()), second_line, font, fm, True
# First adjust font size so that lines fit vertically
# Use height() rather than lineSpacing() as it allows for slightly
# larger font sizes
if math.ceil(fm.height() + extra_height) < height:
while font.pointSizeF() < self.max_font_size:
q, qm, qi = self.get_sized_font(font.pointSizeF() + 1, for_divider=for_divider)
if math.ceil(qm.height() + extra_height) < height:
font, fm = q, qm
else:
break
else:
while math.ceil(fm.height() + extra_height) > height:
nsz = font.pointSizeF()
if nsz < self.min_font_size and second_line:
return '', '', font, fm, False
font, fm, fi = self.get_sized_font(nsz - 0.5, for_divider=for_divider)
# Now reduce the font size as much as needed to fit within width
text = first_line
if second_line and fm.boundingRect(first_line).width() < fm.boundingRect(second_line).width():
text = second_line
while fi.pointSizeF() > self.min_font_size and fm.boundingRect(text).width() > width:
font, fm, fi = self.get_sized_font(font.pointSizeF() - 1, for_divider=for_divider)
if fi.pointSizeF() <= self.min_font_size:
first_line = fm.elidedText(first_line, Qt.TextElideMode.ElideRight, width)
if second_line:
second_line = fm.elidedText(second_line, Qt.TextElideMode.ElideRight, width)
return first_line, second_line, font, fm, False
def draw_inline_divider(self, painter: QPainter, divider: ShelfItem, scroll_y: int):
'''Draw an inline group divider with it group name write vertically and a gradient line.'''
lc = self.layout_constraints
rect = divider.rect(lc).translated(0, -scroll_y)
divider_rect = QRect(-rect.height() // 2, -rect.width() // 2, rect.height(), rect.width())
text_right = gprefs['bookshelf_divider_text_right']
def draw_rounded_divider(corner_radius: int, offset: int):
p = self.case_renderer.divider_as_pixmap(rect.width(), rect.height(), self.theme.divider_background_color, corner_radius, offset)
painter.drawPixmap(rect.adjusted(0, -offset, 0, 0), p)
match gprefs['bookshelf_divider_style']:
case 'block':
painter.fillRect(rect, self.theme.divider_background_color)
case 'gravestone':
radius = rect.width() // 2
offset = radius // 4
draw_rounded_divider(radius, offset)
case 'rounded_corner':
radius = rect.width() // 4
offset = radius // 3
draw_rounded_divider(radius, offset)
# Bottom margin
text_rect = divider_rect.adjusted(
0 if text_right else self.TEXT_MARGIN,
0,
-self.TEXT_MARGIN if text_right else 0,
0,
)
elided_text, _, font, _, _ = self.get_text_metrics(divider.group_name, '', text_rect.size(), for_divider=True)
painter.save()
painter.setFont(font)
painter.setPen(self.theme.divider_text_color)
painter.translate(rect.left() + rect.width() // 2, rect.top() + rect.height() // 2)
painter.rotate(90 if gprefs['bookshelf_up_to_down'] else -90)
alignment = Qt.AlignmentFlag.AlignRight if text_right else Qt.AlignmentFlag.AlignLeft
sized_rect = painter.drawText(text_rect, alignment | Qt.AlignmentFlag.AlignVCenter, elided_text)
# Calculate line dimensions
line_rect = text_rect.adjusted(sized_rect.width(), 0, 0, 0)
overflow = (line_rect.height() - self.DIVIDER_LINE_WIDTH) // 2
line_rect.adjust(0, overflow, 0, -overflow)
# Draw vertical gradient line if long enough
if line_rect.width() > 8:
if text_right:
line_rect.translate(-sized_rect.width(), 0)
color1 = self.theme.divider_line_color.toRgb()
color2 = color1.toRgb()
color1.setAlphaF(0.0) # Transparent at top/bottom
color2.setAlphaF(0.75) # Visible in middle
gradient = QLinearGradient(
QPointF(line_rect.left(), line_rect.left()),
QPointF(line_rect.left() + line_rect.width(), line_rect.left()),
)
gradient.setColorAt(0, color1)
gradient.setColorAt(0.5, color2)
gradient.setColorAt(1, color1)
painter.save()
painter.setPen(Qt.PenStyle.NoPen)
painter.setBrush(QBrush(gradient))
painter.drawRect(line_rect)
painter.restore()
painter.restore()
def default_cover_pixmap(self) -> PixmapWithDominantColor:
lc = self.layout_constraints
sz = (QSizeF(lc.hover_expanded_width, lc.spine_height) * self.devicePixelRatioF()).toSize()
return default_cover_pixmap(sz.width(), sz.height())
def draw_spine(
self, painter: QPainter, spine: ShelfItem, scroll_y: int, is_selected: bool, is_current: bool,
should_draw_emblems: bool,
):
lc = self.layout_constraints
spine_rect = spine.rect(lc).translated(0, -scroll_y)
thumbnail = self.cover_cache.thumbnail_as_pixmap(spine.book_id)
if thumbnail is None: # not yet rendered
self.case_renderer.ensure_theme(is_dark_theme())
spine_color = self.case_renderer.theme.background
else:
if thumbnail.isNull():
thumbnail = self.default_cover_pixmap()
spine_color = thumbnail.dominant_color
if not spine_color.isValid():
spine_color = self.default_cover_pixmap().dominant_color
if is_selected or is_current:
spine_color = spine_color.lighter(120)
# Draw spine background with gradient (darker edges, lighter center)
self.draw_spine_background(painter, spine_rect, spine_color)
# Draw cover thumbnail overlay
self.draw_spine_cover(painter, spine_rect, thumbnail)
# Draw selection highlight around the spine
color = self.selection_highlight_color(is_selected, is_current)
if color.isValid():
self.draw_selection_highlight(painter, spine_rect, color)
top_emblem_size = bottom_emblem_size = 0
if should_draw_emblems:
top_emblem_size, bottom_emblem_size = self.draw_emblems(painter, spine, scroll_y)
# Draw title (rotated vertically)
self.draw_spine_title(painter, spine_rect, spine_color, spine.book_id, top_emblem_size, bottom_emblem_size)
def selection_highlight_color(self, is_selected: bool, is_current: bool) -> QColor:
if is_current and is_selected:
return self.theme.current_selected_color
if is_current:
return self.theme.current_color
if is_selected:
return self.theme.selected_color
return QColor()
def draw_spine_background(self, painter: QPainter, rect: QRect, spine_color: QColor):
'''Draw spine background with gradient (darker edges, lighter center).'''
painter.save()
painter.setOpacity(1.0)
gradient = QLinearGradient(QPointF(rect.topLeft()), QPointF(rect.topRight()))
gradient.setColorAt(0, spine_color.darker(115))
gradient.setColorAt(0.5, spine_color)
gradient.setColorAt(1, spine_color.darker(115))
painter.fillRect(rect, QBrush(gradient))
# Add subtle vertical gradient for depth
vertical_gradient = QLinearGradient(QPointF(rect.topLeft()), QPointF(rect.bottomLeft()))
vertical_gradient.setColorAt(0, QColor(255, 255, 255, 20)) # Slight highlight at top
vertical_gradient.setColorAt(1, QColor(0, 0, 0, 30)) # Slight shadow at bottom
painter.fillRect(rect, QBrush(vertical_gradient))
painter.restore()
def draw_spine_title(
self, painter: QPainter, rect: QRect, spine_color: QColor, book_id: int,
top_emblem_size: int, bottom_emblem_size: int,
) -> None:
'''Draw vertically the title on the spine.'''
first_line, second_line = self.first_line_renderer(book_id), self.second_line_renderer(book_id)
margin = self.TEXT_MARGIN
def calculate_rects(has_two_lines: bool) -> tuple[QRect, QRect]:
if has_two_lines:
first_rect = QRect(rect.left(), rect.top() + margin, rect.width() // 2, rect.height() - 2*margin)
second_rect = first_rect.translated(first_rect.width(), 0)
if gprefs['bookshelf_up_to_down']:
first_rect, second_rect = second_rect, first_rect
else:
first_rect = QRect(rect.left(), rect.top() + margin, rect.width(), rect.height() - 2*margin)
second_rect = QRect()
if top_emblem_size:
first_rect.adjust(0, top_emblem_size, 0, 0)
if has_two_lines:
second_rect.adjust(0, top_emblem_size, 0, 0)
if bottom_emblem_size:
first_rect.adjust(0, 0, 0, -bottom_emblem_size)
if has_two_lines:
second_rect.adjust(0, 0, 0, -bottom_emblem_size)
return first_rect, second_rect
first_rect, second_rect = calculate_rects(bool(second_line))
nfl, nsl, font, fm, was_wrapped = self.get_text_metrics(
first_line, second_line, first_rect.transposed().size(), allow_wrap=True,
outline_width=self.outline_width)
if not nfl and not nsl: # two lines dont fit
second_line = ''
first_rect = QRect(rect.left(), first_rect.top(), rect.width(), first_rect.height())
nfl, nsl, font, fm, _ = self.get_text_metrics(
first_line, second_line, first_rect.transposed().size(), outline_width=self.outline_width)
elif was_wrapped:
first_rect, second_rect = calculate_rects(True)
first_line, second_line, = nfl, nsl
# Determine text color based on spine background brightness
text_color, outline_color = self.get_contrasting_text_color(spine_color)
def draw_text(text: str, rect: QRect, alignment: Qt.AlignmentFlag) -> None:
pixmap = render_spine_text_as_pixmap(
text, font, fm, rect.transposed().size(), alignment, gprefs['bookshelf_up_to_down'],
self.outline_width, self.devicePixelRatioF(), text_color, outline_color)
painter.drawPixmap(rect.topLeft(), pixmap)
if second_line:
draw_text(first_line, first_rect, Qt.AlignmentFlag.AlignBottom)
draw_text(second_line, second_rect, Qt.AlignmentFlag.AlignTop)
else:
draw_text(first_line, first_rect, Qt.AlignmentFlag.AlignVCenter)
def draw_spine_cover(self, painter: QPainter, rect: QRect, thumbnail: PixmapWithDominantColor) -> None:
match gprefs['bookshelf_thumbnail']:
case 'none':
return
# Adjust size
case 'crops':
thumbnail = thumbnail.copy(0, 0, rect.width(), thumbnail.height())
case 'edge':
width = round(max(10, rect.width() * 0.2))
thumbnail = thumbnail.copy(0, 0, width, thumbnail.height())
rect = QRect(rect.x(), rect.y(), width, rect.height())
# Draw with opacity
painter.save()
painter.setOpacity(gprefs['bookshelf_thumbnail_opacity'] / 100)
dpr = thumbnail.devicePixelRatioF()
thumbnail.setDevicePixelRatio(self.devicePixelRatioF())
painter.drawPixmap(rect, thumbnail)
thumbnail.setDevicePixelRatio(dpr)
painter.restore()
# Cover integration methods
def load_hover_cover(self, si: ShelfItem) -> tuple[PixmapWithDominantColor, QSize]:
lc = self.layout_constraints
cover_img = self.dbref().cover(si.book_id, as_image=True)
dpr = self.devicePixelRatioF()
final_sz = QSize(lc.hover_expanded_width, lc.spine_height - si.reduce_height_by)
sz = (QSizeF(final_sz) * dpr).toSize()
if cover_img is None or cover_img.isNull():
cover_pixmap = self.default_cover_pixmap()
resize_needed, nw, nh = fit_image(cover_pixmap.width(), cover_pixmap.height(), sz.width(), sz.height())
if resize_needed:
cover_pixmap = PixmapWithDominantColor(
cover_pixmap.scaled(int(nw), int(nh), Qt.AspectRatioMode.IgnoreAspectRatio, Qt.TransformationMode.SmoothTransformation))
else:
_, cover_img = resize_to_fit(cover_img, sz.width(), sz.height())
cover_pixmap = PixmapWithDominantColor.fromImage(cover_img)
tb = self.cover_cache.thumbnail_as_pixmap(si.book_id)
if tb and tb.dominant_color.isValid():
# ensure that the hover color is the same than the thumbnail/spine
cover_pixmap.dominant_color = tb.dominant_color
final_sz = (QSizeF(cover_pixmap.size()) / dpr).toSize()
return cover_pixmap, final_sz
def get_contrasting_text_color(self, background_color: QColor) -> tuple[QColor, QColor]:
if not background_color or not background_color.isValid():
return self.theme.text_color_for_light_background, self.theme.outline_color_for_light_background
if (contrast_ratio(background_color, self.theme.text_color_for_dark_background)
> contrast_ratio(background_color, self.theme.text_color_for_light_background)):
return self.theme.text_color_for_dark_background, self.theme.outline_color_for_dark_background
return self.theme.text_color_for_light_background, self.theme.outline_color_for_light_background
# Selection methods (required for AlternateViews integration)
def select_rows(self, rows: Iterable[int], using_ids: bool = False) -> None:
if not (m := self.model()):
return
if using_ids:
row_indices = []
for book_id in rows:
if (row := self.row_from_book_id(book_id)) is not None:
row_indices.append(row)
rows = row_indices
sel = selection_for_rows(m, rows)
sm = self.selectionModel()
sm.select(sel, QItemSelectionModel.SelectionFlag.ClearAndSelect | QItemSelectionModel.SelectionFlag.Rows)
def selectAll(self):
m = self.model()
sm = self.selectionModel()
sel = QItemSelection(m.index(0, 0), m.index(m.rowCount(QModelIndex())-1, 0))
sm.select(sel, QItemSelectionModel.SelectionFlag.ClearAndSelect | QItemSelectionModel.SelectionFlag.Rows)
def set_current_row(self, row):
sm = self.selectionModel()
sm.setCurrentIndex(self.model().index(row, 0), QItemSelectionModel.SelectionFlag.NoUpdate)
def set_database(self, newdb, stage=0):
if stage == 0:
self.grouping_mode = newdb.new_api.pref('bookshelf_grouping_mode', '')
# Clear caches when database changes
self.template_inited = False
self.cover_cache.set_database(newdb)
self.invalidate(set_of_books_changed=True, clear_spine_width_cache=True)
def set_context_menu(self, menu: QMenu):
self.context_menu = menu
def populate_group_by_menu(self, grouping_menu: QMenu) -> None:
grouping_menu.clear()
fm = self.gui.current_db.new_api.field_metadata
def add(field: str, name: str) -> None:
action = grouping_menu.addAction(name)
action.setCheckable(True)
action.setChecked(self.grouping_mode == field)
action.triggered.connect(partial(self.set_grouping_mode, field))
add('', _('Ungrouped'))
grouping_menu.addSeparator()
cf = {}
for field, m in fm.custom_field_metadata(include_composites=False).items():
if m['is_category'] or m['datatype'] == 'datetime':
cf[field] = numeric_sort_key(m['name'])
for k in all_groupings():
cf[k] = numeric_sort_key(fm[k]['name'])
for k in sorted(cf, key=cf.get):
add(k, fm[k]['name'])
def contextMenuEvent(self, ev: QContextMenuEvent):
if self.context_menu:
self.context_menu.popup(ev.globalPos())
ev.accept()
def set_grouping_mode(self, mode: str):
'''Set the grouping mode and refresh display.'''
if mode != self.grouping_mode:
self.grouping_mode = mode
self.dbref().set_pref('bookshelf_grouping_mode', mode)
self.invalidate()
def get_selected_ids(self) -> list[int]:
return [self.book_id_from_row(index.row()) for index in self.selectionModel().selectedRows() if index.isValid()]
def current_book_state(self) -> SavedState:
'''Get current book state for restoration.'''
sm = self.selectionModel()
r = sm.currentIndex().row()
current_book_id = 0
if r > -1:
with suppress(Exception):
current_book_id = self.bookcase.row_to_book_id[r]
selected_rows = (index.row() for index in sm.selectedRows())
selected_book_ids = set()
with suppress(Exception):
selected_book_ids = {self.bookcase.row_to_book_id[r] for r in selected_rows}
return SavedState(current_book_id, selected_book_ids)
def restore_current_book_state(self, state: SavedState) -> None:
m = self.model()
if not state or not m:
return
id_to_index = m.db.data.safe_id_to_index
selected_rows = set(map(id_to_index, state.selected_book_ids))
selected_rows.discard(-1)
orig_auto_scroll, self.auto_scroll = self.auto_scroll, self.bookcase.layout_finished
if selected_rows:
self.select_rows(selected_rows)
if (row := id_to_index(state.current_book_id)) > -1:
self.set_current_row(row)
elif not self.currentIndex().isValid():
self.set_current_row(0)
self.auto_scroll = orig_auto_scroll
if not self.bookcase.layout_finished and self.auto_scroll:
self.scroll_to_current_after_layout = True
def marked_changed(self, old_marked: set[int], current_marked: set[int]):
# Refresh display if marked books changed
self.update_viewport()
def rows_for_merge(self, resolved=True):
ans = []
seen = set()
for idx in self.selectionModel().selectedRows():
row = idx.row()
if row not in seen:
seen.add(row)
ans.append(row)
return ans
# Mouse and keyboard events {{{
def keyPressEvent(self, ev: QKeyEvent) -> None:
if handle_enter_press(self, ev, has_edit_cell=False):
return
if ev.matches(QKeySequence.StandardKey.SelectAll):
self.selectAll()
ev.accept()
return
if (key := ev.key()) not in (
Qt.Key.Key_Left, Qt.Key.Key_Right, Qt.Key.Key_Up, Qt.Key.Key_Down, Qt.Key.Key_PageDown,
Qt.Key.Key_PageUp, Qt.Key.Key_Home, Qt.Key.Key_End, Qt.Key.Key_Space,
):
return super().keyPressEvent(ev)
if not self.bookcase.book_ids_in_visual_order or not (m := self.model()):
return
ev.accept()
target_book_id = 0
current_row = self.selectionModel().currentIndex().row()
try:
current_book_id = self.bookcase.row_to_book_id[current_row]
except Exception:
current_book_id = self.bookcase.book_ids_in_visual_order[0]
has_ctrl = bool(ev.modifiers() & Qt.KeyboardModifier.ControlModifier)
has_shift = bool(ev.modifiers() & Qt.KeyboardModifier.ShiftModifier)
ctrl_action = QItemSelectionModel.SelectionFlag.Toggle
no_mods_action = QItemSelectionModel.SelectionFlag.ClearAndSelect
match key:
case Qt.Key.Key_Space:
target_book_id = current_book_id
no_mods_action = QItemSelectionModel.SelectionFlag.Select
case Qt.Key.Key_Left:
target_book_id = self.bookcase.visual_neighboring_book(current_book_id, delta=-1)
ctrl_action = QItemSelectionModel.SelectionFlag.NoUpdate
case Qt.Key.Key_Right:
target_book_id = self.bookcase.visual_neighboring_book(current_book_id, delta=1)
ctrl_action = QItemSelectionModel.SelectionFlag.NoUpdate
case Qt.Key.Key_Up:
target_book_id = self.bookcase.book_in_column_of(current_book_id, delta=-1)
ctrl_action = QItemSelectionModel.SelectionFlag.NoUpdate
case Qt.Key.Key_Down:
target_book_id = self.bookcase.book_in_column_of(current_book_id, delta=1)
ctrl_action = QItemSelectionModel.SelectionFlag.NoUpdate
case Qt.Key.Key_PageUp:
target_book_id = self.bookcase.book_in_column_of(current_book_id, delta=-self.shelves_per_screen, in_bound=True)
case Qt.Key.Key_PageDown:
target_book_id = self.bookcase.book_in_column_of(current_book_id, delta=self.shelves_per_screen, in_bound=True)
case Qt.Key.Key_Home:
if has_ctrl:
target_book_id = self.bookcase.book_ids_in_visual_order[0]
has_ctrl = False
else:
target_book_id = self.bookcase.end_book_on_shelf_of(current_book_id, first=True)
case Qt.Key.Key_End:
if has_ctrl:
target_book_id = self.bookcase.book_ids_in_visual_order[-1]
has_ctrl = False
else:
target_book_id = self.bookcase.end_book_on_shelf_of(current_book_id, first=False)
if target_book_id <= 0:
return
target_index = m.index(self.bookcase.book_id_to_row_map[target_book_id], 0)
sm = self.selectionModel()
if has_shift:
handle_selection_click(self, target_index, self.bookcase.visual_row_cmp, self.selection_between)
elif has_ctrl:
sm.setCurrentIndex(target_index, QItemSelectionModel.SelectionFlag.Rows | ctrl_action)
else:
sm.setCurrentIndex(target_index, QItemSelectionModel.SelectionFlag.Rows | no_mods_action)
self.scrollTo(target_index)
self.update_viewport()
def scrollTo(self, index: QModelIndex, hint: QAbstractItemView.ScrollHint = QAbstractItemView.ScrollHint.EnsureVisible) -> None:
si = self.bookcase.book_id_to_item_map.get(self.book_id_from_row(index.row()))
if si is None:
return
viewport_height = self.viewport().height()
shelf_height = self.layout_constraints.step_height
match hint:
case QAbstractItemView.ScrollHint.PositionAtTop:
y = 0
case QAbstractItemView.ScrollHint.PositionAtBottom:
y = max(0, viewport_height - shelf_height)
case QAbstractItemView.ScrollHint.PositionAtCenter:
y = max(0, (viewport_height - shelf_height)//2)
case QAbstractItemView.ScrollHint.EnsureVisible:
top = si.case_start_y - self.verticalScrollBar().value()
if top >= 0 and top + shelf_height <= viewport_height:
return
y = 0 if top < 0 else max(0, viewport_height - shelf_height)
self.verticalScrollBar().setValue(si.case_start_y - y)
self.update_viewport()
def on_current_changed(self, current: QModelIndex, previous: QModelIndex) -> None:
if self.auto_scroll and self.view_is_visible() and current.isValid():
self.scrollTo(current)
def selection_between(self, a: QModelIndex, b: QModelIndex) -> QItemSelection:
if m := self.model():
return selection_for_rows(m, self.bookcase.visual_selection_between(a.row(), b.row()))
return QItemSelection()
def handle_mouse_move_event(self, ev: QMouseEvent):
ev.accept()
if ev.buttons() & Qt.MouseButton.LeftButton:
handle_selection_drag(self, self.indexAt(ev.pos()), self.click_start_data, self.bookcase.visual_row_cmp, self.selection_between)
return
if gprefs['bookshelf_hover'] == 'none':
return
pos = ev.pos()
case_item, _, shelf_item = self.item_at_position(pos.x(), pos.y())
if shelf_item is not None and not shelf_item.is_divider:
self.expanded_cover.shelf_item_hovered(case_item, shelf_item)
else:
self.expanded_cover.shelf_item_hovered()
def currentIndex(self):
return self.selectionModel().currentIndex()
def handle_mouse_press_event(self, ev: QMouseEvent) -> None:
if ev.button() not in (Qt.MouseButton.LeftButton, Qt.MouseButton.RightButton) or not (index := self.indexAt(ev.pos())).isValid():
return
orig_auto_scroll, self.auto_scroll = self.auto_scroll, False # prevent scrolling while user is interacting
sm = self.selectionModel()
flags = QItemSelectionModel.SelectionFlag.Rows
modifiers = ev.modifiers()
if ev.button() == Qt.MouseButton.RightButton:
modifiers = Qt.KeyboardModifier.NoModifier # no extended selection with right button
if modifiers & Qt.KeyboardModifier.ControlModifier:
# Toggle selection
sm.setCurrentIndex(index, flags | QItemSelectionModel.SelectionFlag.Toggle)
else:
if not modifiers & Qt.KeyboardModifier.ShiftModifier:
sm.setCurrentIndex(index, flags | QItemSelectionModel.SelectionFlag.ClearAndSelect)
self.click_start_data = handle_selection_click(self, index, self.bookcase.visual_row_cmp, self.selection_between)
ev.accept()
self.auto_scroll = orig_auto_scroll
def handle_mouse_release_event(self, ev: QMouseEvent) -> None:
self.click_start_data = None
def mouseDoubleClickEvent(self, ev: QMouseEvent) -> bool:
'''Handle mouse double-click events on the viewport.'''
index = self.indexAt(ev.pos())
self.click_start_data = None
if index.isValid() and (row := index.row()) >= 0:
# Set as current row first
self.set_current_row(row)
ev.accept()
double_click_action(index)
def viewportEvent(self, ev: QEvent) -> None:
if ev.type() == QEvent.Type.Leave:
# Clear hover when mouse leaves viewport
self.expanded_cover.invalidate()
self.update_viewport()
ev.accept()
return True
return super().viewportEvent(ev)
# }}}
def item_at_position(self, x: int, y: int) -> tuple[CaseItem|None, CaseItem|None, ShelfItem|None]:
scroll_y = self.verticalScrollBar().value()
content_y = y + scroll_y
lc = self.layout_constraints
x -= lc.side_margin
if (shelf := self.bookcase.shelf_with_ypos(content_y)) is not None:
modshelf = self.expanded_cover.modify_shelf_layout(shelf)
if (item := modshelf.book_or_divider_at_xpos(x, lc)) is not None:
return shelf, modshelf, item
return None, None, None
def book_id_at_position(self, x: int, y: int) -> int:
_, _, shelf_item = self.item_at_position(x, y)
if shelf_item is not None and not shelf_item.is_divider:
return shelf_item.book_id
return -1
def book_row_at_position(self, x: int, y: int) -> int:
' Find which book is at the given position. x, y are in viewport coordinates '
book_id = self.book_id_at_position(x, y)
if book_id > 0:
if (row := self.row_from_book_id(book_id)) is not None:
return row
return -1
def indexAt(self, pos: QPoint) -> QModelIndex:
if (m := self.model()):
row = self.book_row_at_position(pos.x(), pos.y())
if row >= 0 and (ans := m.index(row, 0)).isValid():
return ans
return QModelIndex()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/library/bookshelf_view.py",
"license": "GNU General Public License v3.0",
"lines": 2309,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/gui2/momentum_scroll.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from collections import deque
from typing import NamedTuple
from qt.core import (
QAbstractScrollArea,
QElapsedTimer,
QGroupBox,
QHBoxLayout,
QLabel,
QListView,
QMainWindow,
QSlider,
QStringListModel,
Qt,
QTimer,
QVBoxLayout,
QWheelEvent,
QWidget,
)
from calibre.gui2 import Application
def same_sign(a: float, b: float) -> bool:
if a == 0 or b == 0:
return True
return (a > 0) == (b > 0)
class ScrollSample(NamedTuple):
'''Store a scroll sample with timestamp for velocity calculation.'''
delta_x: float
delta_y: float
timestamp: int
class MomentumSettings(NamedTuple):
# Deceleration factor (0-1, lower = longer coast)
friction: float = 0.04
min_velocity: float = 0.5 # Minimum velocity before stopping
max_velocity: float = 100 # maximum velocity to prevent runaway scrolling
boost_factor: float = 1.2 # how much to speed up scrolling
velocity_scale: float = 0.9 # Scale factor for initial velocity
timer_interval_ms: int = int(1000/120) # 120 FPS update rate
# Time to wait after ScrollEnd to see if system momentum arrives
momentum_detection_delay_ms: int = 50
# Whether to enable momentum in the specified axis, defers to Qt handling
# of wheelevents when false
enable_x: bool = True
enable_y: bool = True
# How much to scale scroll amounts by
x_multiplier: float = 1
y_multiplier: float = 1
# Synthesize momentum for mouse wheels or trackpads that send NoScrollPhase events
synthesize_without_gestures: bool = False
class MomentumScroller:
'''
Handles momentum/kinetic scrolling for Qt scroll areas.
Behavior by platform/device:
- macOS trackpad: Uses system-provided momentum (ScrollMomentum phase)
- Linux trackpad: Has phases but sometimes no momentum, so we synthesize it when needed
- Mouse wheel (all platforms): No phases, we synthesize momentum if enabled in settings
'''
def __init__(self, scroll_area: QAbstractScrollArea, settings: MomentumSettings = MomentumSettings()):
self.settings = settings
self.scroll_area = scroll_area
self.seen_momentum_event = False
self.synthetic_momentum_already_used = False
# Velocity tracking
self.velocity_x = 0.0
self.velocity_y = 0.0
# Accumulated sub-pixel scroll amounts
self.accumulated_x = 0.0
self.accumulated_y = 0.0
# Sample history for calculating velocity
self.samples: deque[ScrollSample] = deque(maxlen=20)
# Timing
self.elapsed_timer = QElapsedTimer()
self.elapsed_timer.start()
# Animation timer for synthetic momentum
self.momentum_timer = QTimer()
self.momentum_timer.timeout.connect(self._update_momentum)
# Timer to detect if system momentum is coming
self.momentum_detection_timer = QTimer()
self.momentum_detection_timer.setSingleShot(True)
self.momentum_detection_timer.timeout.connect(self._start_synthetic_momentum)
# State tracking
self._in_scroll_gesture = False
self._last_scroll_end_time = 0
def handle_wheel_event(self, event: QWheelEvent) -> bool:
'''
Process a wheel event, respecting system momentum phases when available.
Returns True if the event was handled.
'''
dx, dy = self._get_delta(event)
dx *= self.settings.boost_factor
dy *= self.settings.boost_factor
current_time = self.elapsed_timer.elapsed()
match event.phase():
case Qt.ScrollPhase.NoScrollPhase:
# typically generated by mouse wheel and on windows
if not self.settings.synthesize_without_gestures:
return False
self.samples.append(ScrollSample(dx, dy, current_time))
self.momentum_timer.stop()
self._last_scroll_end_time = current_time
self._do_scroll(dx, dy)
self.momentum_timer.start(self.settings.timer_interval_ms)
case Qt.ScrollPhase.ScrollBegin:
# User started a new scroll gesture
self._in_scroll_gesture = True
self.accumulated_x = 0
self.accumulated_y = 0
self.samples.clear()
# Stop any ongoing synthetic momentum
self.momentum_timer.stop()
self.momentum_detection_timer.stop()
case Qt.ScrollPhase.ScrollUpdate:
# Active scrolling - record sample and apply delta
self.samples.append(ScrollSample(dx, dy, current_time))
self._do_scroll(dx, dy)
case Qt.ScrollPhase.ScrollEnd:
# User lifted fingers
self._in_scroll_gesture = False
self._last_scroll_end_time = current_time
if not self.seen_momentum_event:
if self.synthetic_momentum_already_used:
self.start_momentum_timer()
else:
# Wait briefly to see if system momentum events arrive
# If they do, we'll use those; if not, we synthesize
self.momentum_detection_timer.start(self.settings.momentum_detection_delay_ms)
case Qt.ScrollPhase.ScrollMomentum:
# System-provided momentum (macOS)
self.seen_momentum_event = True
self.momentum_detection_timer.stop()
self.momentum_timer.stop()
self._do_scroll(dx, dy)
return True
def _start_synthetic_momentum(self):
'''
Called after ScrollEnd if no system momentum arrived.
Start our own momentum animation.
'''
if not self.seen_momentum_event:
self.synthetic_momentum_already_used = True
self.start_momentum_timer()
self._update_momentum()
def start_momentum_timer(self):
self.momentum_timer.start(self.settings.timer_interval_ms)
def _get_delta(self, event: QWheelEvent) -> tuple[float, float]:
'''Extract scroll delta from wheel event.'''
pixel_delta = event.pixelDelta()
angle_delta = event.angleDelta()
# Prefer pixel delta (from trackpads), fall back to angle delta
if not pixel_delta.isNull():
return float(pixel_delta.x()), float(pixel_delta.y())
# Convert angle delta to pixels (120 units = 1 step)
h_bar = self.scroll_area.horizontalScrollBar()
v_bar = self.scroll_area.verticalScrollBar()
return angle_delta.x() / 120.0 * (h_bar.singleStep() if h_bar else 1), angle_delta.y() / 120.0 * (v_bar.singleStep() if v_bar else 1)
def _trim_old_samples(self, current_time: int, window_ms: int = 150):
'''Remove samples older than the window.'''
cutoff = current_time - window_ms
while self.samples and self.samples[0].timestamp < cutoff:
self.samples.popleft()
def _calculate_gesture_velocity(self) -> tuple[float, float]:
'''Calculate velocity from the current gesture samples.'''
if len(self.samples) < 2:
if self.samples:
s = self.samples[0]
return s.delta_x * self.settings.velocity_scale, s.delta_y * self.settings.velocity_scale
return 0.0, 0.0
# Use weighted average - more recent samples have higher weight
total_dx = 0.0
total_dy = 0.0
total_weight = 0.0
first_time = self.samples[0].timestamp
last_time = self.samples[-1].timestamp
time_span = max(last_time - first_time, 1)
for sample in self.samples:
weight = 1.0 + (sample.timestamp - first_time) / time_span
total_dx += sample.delta_x * weight
total_dy += sample.delta_y * weight
total_weight += weight
if total_weight > 0:
avg_dx = total_dx / total_weight
avg_dy = total_dy / total_weight
return avg_dx * self.settings.velocity_scale, avg_dy * self.settings.velocity_scale
return 0.0, 0.0
def _clamp_velocity(self, velocity: float) -> float:
m = self.settings.max_velocity
return max(-m, min(velocity, m))
def _accumulate_velocity_from_samples(self) -> None:
'''
Calculate velocity from recent scroll samples and add to existing velocity.
This creates the cumulative effect where repeated swipes increase speed.
'''
if not self.samples:
return
self._trim_old_samples(self._last_scroll_end_time)
if not self.samples:
return
# Calculate new gesture velocity
new_vx, new_vy = self._calculate_gesture_velocity()
self.samples.clear()
# Check direction compatibility and accumulate
# Same direction: add velocities
# Opposite direction: new velocity takes over
if same_sign(self.velocity_x, new_vx):
self.velocity_x = self._clamp_velocity(self.velocity_x + new_vx)
else:
self.velocity_x = new_vx
if same_sign(self.velocity_y, new_vy):
self.velocity_y = self._clamp_velocity(self.velocity_y + new_vy)
else:
self.velocity_y = new_vy
def _update_momentum(self):
'''Called by timer to apply synthetic momentum scrolling.'''
self._accumulate_velocity_from_samples()
# Apply friction
f = 1 - max(0, min(self.settings.friction, 1))
self.velocity_x *= f
self.velocity_y *= f
# Check if we should stop
if max(abs(self.velocity_x), abs(self.velocity_y)) < self.settings.min_velocity:
self._stop_momentum()
return
# Apply the scroll
self._do_scroll(self.velocity_x, self.velocity_y)
def _do_scroll(self, dx: float, dy: float):
'''Apply scroll delta to the scroll area.'''
# Accumulate sub-pixel amounts
self.accumulated_x += dx
self.accumulated_y += dy
# Extract integer pixels to scroll
scroll_x = self.accumulated_x
scroll_y = self.accumulated_y
# Keep the fractional remainder
self.accumulated_x -= int(scroll_x)
self.accumulated_y -= int(scroll_y)
# Apply to scrollbars
h_bar = self.scroll_area.horizontalScrollBar()
v_bar = self.scroll_area.verticalScrollBar()
if scroll_x != 0 and h_bar:
h_bar.setValue(h_bar.value() - int(scroll_x * self.settings.x_multiplier))
if scroll_y != 0 and v_bar:
v_bar.setValue(v_bar.value() - int(scroll_y * self.settings.y_multiplier))
def _stop_momentum(self):
'''Stop momentum and reset state.'''
self.velocity_x = 0
self.velocity_y = 0
self.accumulated_x = 0
self.accumulated_y = 0
self.samples.clear()
self.momentum_timer.stop()
def stop(self):
'''Public method to stop any ongoing momentum scrolling.'''
self.momentum_detection_timer.stop()
self._stop_momentum()
self._in_scroll_gesture = False
class MomentumScrollMixin:
'''
Mixin class to add momentum scrolling to any QAbstractScrollArea subclass.
Automatically uses system momentum on macOS, synthesizes on Linux/Windows.
Usage:
class MyListView(MomentumScrollMixin, QListView):
pass
'''
_momentum_scroller: MomentumScroller | None = None
_momentum_settings: MomentumSettings | None = None
def _ensure_momentum_scroller(self):
if self._momentum_scroller is None:
self._momentum_scroller = MomentumScroller(self, self._momentum_settings or MomentumSettings())
def default_wheel_event_handler(self, event: QWheelEvent):
super().wheelEvent(event)
def wheelEvent(self, event: QWheelEvent):
self._ensure_momentum_scroller()
if (not self._momentum_scroller.settings.enable_x and event.angleDelta().x() != 0) or (
not self._momentum_scroller.settings.enable_y and event.angleDelta().y() != 0):
return self.default_wheel_event_handler(event)
if not self._momentum_scroller.handle_wheel_event(event):
return self.default_wheel_event_handler(event)
event.accept()
def stopMomentumScroll(self):
'''Stop any ongoing momentum scrolling.'''
if self._momentum_scroller:
self._momentum_scroller.stop()
def update_momentum_scroll_settings(self, **kw) -> None:
self._ensure_momentum_scroller()
self._momentum_scroller.settings = self._momentum_scroller.settings._replace(**kw)
# Demo {{{
if __name__ == '__main__':
import sys
class MomentumListView(QListView, MomentumScrollMixin):
'''QListView with momentum scrolling enabled.'''
pass
class DemoWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Momentum Scrolling Demo')
self.setMinimumSize(400, 600)
central = QWidget()
self.setCentralWidget(central)
layout = QVBoxLayout(central)
# Info box
info_box = QGroupBox('Scroll Behavior')
info_layout = QVBoxLayout(info_box)
info_layout.addWidget(QLabel(
'• <b>macOS trackpad</b>: Native system momentum\n'
'• <b>Linux trackpad</b>: Synthetic momentum (phases, no system momentum)\n'
'• <b>Mouse wheel</b>: Synthetic momentum (no phases)'
))
layout.addWidget(info_box)
# Create list view with momentum scrolling
self.list_view = MomentumListView()
items = [f'Item {i} - Scroll me!' for i in range(1, 501)]
model = QStringListModel(items)
self.list_view.setModel(model)
layout.addWidget(self.list_view, 1)
# Tuning controls
tuning_box = QGroupBox('Synthetic Momentum Tuning')
tuning_layout = QVBoxLayout(tuning_box)
# Friction slider
friction_row = QHBoxLayout()
friction_row.addWidget(QLabel('Friction:'))
self.friction_slider = QSlider(Qt.Orientation.Horizontal)
self.friction_slider.setRange(0, 100)
self.friction_slider.setValue(int(MomentumSettings.friction * 100))
self.friction_slider.valueChanged.connect(self._update_friction)
friction_row.addWidget(self.friction_slider)
self.friction_label = QLabel('0.92')
self.friction_label.setMinimumWidth(35)
friction_row.addWidget(self.friction_label)
tuning_layout.addLayout(friction_row)
# Velocity scale slider
velocity_row = QHBoxLayout()
velocity_row.addWidget(QLabel('Velocity: '))
self.velocity_slider = QSlider(Qt.Orientation.Horizontal)
self.velocity_slider.setRange(20, 200)
self.velocity_slider.setValue(80)
self.velocity_slider.valueChanged.connect(self._update_velocity)
velocity_row.addWidget(self.velocity_slider)
self.velocity_label = QLabel('0.80')
self.velocity_label.setMinimumWidth(35)
velocity_row.addWidget(self.velocity_label)
tuning_layout.addLayout(velocity_row)
layout.addWidget(tuning_box)
def _update_friction(self, value):
friction = value / 100
self.friction_label.setText(f'{friction:value}%')
if self.list_view._momentum_scroller:
self.list_view._momentum_scroller.settings = self.list_view._momentum_scroller.settings._replace(friction=friction)
def _update_velocity(self, value):
velocity = value / 100.0
self.velocity_label.setText(f'{velocity:.2f}')
if self.list_view._momentum_scroller:
self.list_view._momentum_scroller.settings = self.list_view._momentum_scroller.settings._replace(velocity_scale=velocity)
app = Application([])
window = DemoWindow()
window.show()
sys.exit(app.exec())
# }}}
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/momentum_scroll.py",
"license": "GNU General Public License v3.0",
"lines": 357,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/gui2/preferences/look_feel_tabs/bookshelf_view.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2025, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from contextlib import suppress
from functools import lru_cache, partial
from qt.core import QDialog, QDialogButtonBox, QFontInfo, QIcon, QInputDialog, QLabel, Qt, QTabWidget, QTextBrowser, QTimer, QVBoxLayout, pyqtSignal
from calibre.gui2 import gprefs
from calibre.gui2.dialogs.confirm_delete import confirm
from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.gui2.preferences import AbortCommit, LazyConfigWidgetBase
from calibre.gui2.preferences.look_feel_tabs import RulesSetting
from calibre.gui2.preferences.look_feel_tabs.bookshelf_view_ui import Ui_bookshelf_tab as Ui_Form
from calibre.gui2.widgets2 import ColorButton
from calibre.utils.filenames import make_long_path_useable
class LogViewer(QDialog): # {{{
def __init__(self, path: str, text: str, parent=None):
super().__init__(parent)
self.log_path = path
self.setWindowTitle(_('Log of page count failures'))
self.l = l = QVBoxLayout(self)
self.la = la = QLabel(_('The log is stored at: {}').format(path))
la.setWordWrap(True)
l.addWidget(la)
self.text = t = QTextBrowser(self)
t.setPlainText(text)
l.addWidget(t)
self.bb = bb = QDialogButtonBox(QDialogButtonBox.StandardButton.Close)
bb.rejected.connect(self.reject)
l.addWidget(bb)
self.clear_button = b = self.bb.addButton(_('&Clear'), QDialogButtonBox.ButtonRole.ResetRole)
b.clicked.connect(self.clear_log)
b.setIcon(QIcon.ic('trash.png'))
self.resize(600, 500)
def clear_log(self):
if not confirm('<p>'+_('The log for page count failures will be <b>permanently deleted</b>! Are you sure?'), 'clear_log_count', self):
return
with suppress(FileNotFoundError):
os.remove(make_long_path_useable(self.log_path))
self.text.setPlainText('')
# }}}
class BookshelfTab(QTabWidget, LazyConfigWidgetBase, Ui_Form):
changed_signal = pyqtSignal()
restart_now = pyqtSignal()
recount_updated = pyqtSignal(object)
def __init__(self, parent=None):
self.current_font_choice = gprefs.defaults['bookshelf_font'].copy()
super().__init__(parent)
def restore_defaults(self):
super().restore_defaults()
self.current_font_choice = gprefs.defaults['bookshelf_font'].copy()
self.update_font_display()
self.populate_custom_color_theme(use_defaults=True)
def update_font_display(self):
text = ''
s = self.current_font_choice
if s.get('family'):
text = s['family'] + ' - ' + (s.get('style') or '')
self.bookshelf_font_display.setText(text)
def initialize(self):
super().initialize()
s = gprefs['bookshelf_font'] or gprefs.defaults['bookshelf_font']
self.current_font_choice = s.copy()
self.update_font_display()
self.populate_custom_color_theme()
def commit(self, *args):
import re
tp = self.opt_bookshelf_spine_size_template.text()
if tp not in ('{pages}', '{random}', '{size}') and re.match(r'\{[^}]+\}', tp) is not None and 'width_from_pages(' not in tp:
if not confirm(_(
'The template used for spine size must return a number between 0 and 1. The template'
' {0} is unlikely to do so. Are you sure?').format(tp), 'confirm-pages-template', parent=self):
raise AbortCommit('abort')
newval = {}
for t,v in self.color_buttons.items():
newval[t] = d = {}
for k,b in v.items():
d[k] = b.color
gprefs['bookshelf_custom_colors'] = newval
gprefs['bookshelf_font'] = self.current_font_choice.copy()
return super().commit(*args)
def genesis(self, gui):
self.gui = gui
db = self.gui.current_db
r = self.register
r('bookshelf_icon_rules', {}, setting=RulesSetting)
r('bookshelf_shadow', gprefs)
r('bookshelf_variable_height', gprefs)
r('bookshelf_fade_time', gprefs)
r('bookshelf_up_to_down', gprefs)
r('bookshelf_height', gprefs)
r('bookshelf_make_space_for_second_line', gprefs)
r('bookshelf_min_font_multiplier', gprefs)
r('bookshelf_max_font_multiplier', gprefs)
r('bookshelf_outline_width', gprefs)
r('bookshelf_divider_text_right', gprefs)
r('bookshelf_start_with_divider', gprefs)
r('bookshelf_divider_style', gprefs, choices=[
(_('Simple text'), 'text'),
(_('Block'), 'block'),
(_('Rounded corners'), 'rounded_corner'),
(_('Gravestone'), 'gravestone'),
(_('Hidden'), 'hidden'),
])
r('bookshelf_thumbnail_opacity', gprefs)
r('bookshelf_thumbnail', gprefs, choices=[
(_('Full'), 'full'),
(_('Cropped'), 'crops'),
(_('Edge'), 'edge'),
(_('Disable'), 'none'),
])
self.opt_bookshelf_thumbnail.setToolTip(_('''\
<p><i>Full</i> - shows the full cover on the spine.
<p><i>Cropped</i> - shows only as much of the cover as will fit on the spine.
<p><i>Edge</i> - same as <i>Cropped</i> except only part of the spine is covered, the rest is a solid color.
<p><i>Disable</i> - The spine will be only the dominant color from the cover.'''))
r('bookshelf_hover', gprefs, choices=[
(_('Shift books on the shelf to make room'), 'shift'),
(_('Above other books on the shelf'), 'above'),
(_('Disable'), 'none'),
])
r('bookshelf_title_template', db.prefs)
r('bookshelf_author_template', db.prefs)
r('bookshelf_spine_size_template', db.prefs)
r('bookshelf_theme_override', gprefs, choices=[
(_('Inherit global setting'), 'none'),
(_('Light'), 'light'),
(_('Dark'), 'dark'),
])
r('bookshelf_use_custom_background', gprefs)
self.background_box.link_config('bookshelf_custom_background')
self.config_cache.link(
self.gui.bookshelf_view.cover_cache,
'bookshelf_disk_cache_size', 'bookshelf_cache_size_multiple',
)
self.opt_bookshelf_spine_size_template.setToolTip(_('''
<p>The template used to calculate a width for the displayed spine.
The template must evaluate to a decimal number between 0.0 and 1.0, which will be used to set the width of the books spine.
An empty template means a fixed spine size for all books.
<p>The special template {2} calculates the number of pages in the book and uses that. Note that
the page size calculation happens in the background, so until the count is completed, the
book size is used as a proxy.
<p>The special template {0} uses the book size to estimate a spine size.
The special template {1} uses a random size.
You can also use a number between 0.0 and 1.0 to pick a fixed size.
<p>Note that this setting is per-library, which means that you have to set it again for every
different calibre library you use.''').format('{size}', '{random}', '{pages}'))
self.template_title_button.clicked.connect(partial(self.edit_template_button, self.opt_bookshelf_title_template, _('Edit template for title')))
self.template_author_button.clicked.connect(partial(self.edit_template_button, self.opt_bookshelf_author_template, _('Edit template for author')))
self.template_pages_button.clicked.connect(partial(self.edit_template_button, self.opt_bookshelf_spine_size_template, _('Edit template for book size')))
self.use_pages_button.clicked.connect(self.use_pages)
self.recount_button.clicked.connect(self.recount_pages)
self.show_log_button.clicked.connect(self.show_log)
self._recount_button_txt = self.recount_button.text()
self.recount_updated.connect(self.update_recount_txt, type=Qt.ConnectionType.QueuedConnection)
self.recount_timer = t = QTimer(self)
t.setInterval(1000) # 1 second
t.timeout.connect(self.count_scan_needed)
self.count_scan_needed()
r('bookshelf_use_custom_colors', gprefs)
self.restore_defaults_colors_button.clicked.connect(self.restore_defaults_colors)
self.color_buttons = {}
layout_map = {
'light': self.custom_colors_light_layout,
'dark': self.custom_colors_dark_layout,
}
for theme, layout in layout_map.items():
self.color_buttons[theme] = theme_map = {}
for r, (k, v) in enumerate(self.color_label_map().items()):
theme_map[k] = b = ColorButton(parent=self)
l = QLabel(v, self)
l.setBuddy(b)
layout.insertRow(r, b, l)
b.color_changed.connect(self.changed_signal)
self.change_font_button.clicked.connect(self.change_font)
self.restore_default_font_button.clicked.connect(self.restore_font)
def change_font(self):
from calibre.gui2.preferences.look_feel_tabs.font_selection_dialog import FontSelectionDialog
s = self.current_font_choice
medium = QFontInfo(self.font()).pointSizeF()
mins = gprefs['bookshelf_min_font_multiplier'] * medium
maxs = gprefs['bookshelf_max_font_multiplier'] * medium
d = FontSelectionDialog(
family=s.get('family') or '', style=s.get('style') or '', parent=self,
min_size=mins, medium_size=medium, max_size=maxs)
if d.exec() == QDialog.DialogCode.Accepted:
family, style = d.selected_font()
self.current_font_choice = {'family': family, 'style': style}
self.update_font_display()
self.changed_signal.emit()
def restore_font(self):
self.current_font_choice = gprefs.defaults['bookshelf_font'].copy()
self.update_font_display()
self.changed_signal.emit()
def lazy_initialize(self):
self.recount_timer.start()
def show_log(self) -> None:
db = self.gui.current_db.new_api
path = db.page_count_failures_log_path
txt = ''
with suppress(FileNotFoundError), open(make_long_path_useable(path)) as f:
txt = f.read()
LogViewer(path, txt, self).exec()
def recount_pages(self) -> None:
ok, force = confirm(_(
'This will cause calibre to rescan all books in your library and update page counts, where changed.'
' The scanning happens in the background and can take up to an hour per thousand books'
' depending on the size of the books and the power of your computer. This is'
' typically never needed and is present mainly to aid debugging and testing. Are you sure?'),
'confirm-pages-recount', parent=self, extra_button=_('Re-count &unchanged as well'))
if ok:
db = self.gui.current_db.new_api
db.mark_for_pages_recount()
db.queue_pages_scan(force=force)
self.gui.library_view.model().zero_page_cache.clear()
self.gui.bookshelf_view.invalidate()
self.count_scan_needed()
def count_scan_needed(self) -> None:
if db := self.gui.current_db:
self.recount_updated.emit(db.new_api.num_of_books_that_need_pages_counted())
def update_recount_txt(self, count) -> None:
msg = self._recount_button_txt
if count > 0:
msg += ' ({})'.format(_('pending recount: {}').format(count))
self.recount_button.setText(msg)
def edit_template_button(self, line_edit, title):
rows = self.gui.library_view.selectionModel().selectedRows()
mi = None
db = self.gui.current_db.new_api
if rows:
ids = list(map(self.gui.library_view.model().id, rows))
mi = []
for bk in ids[0:min(10, len(ids))]:
mi.append(db.get_proxy_metadata(bk))
t = TemplateDialog(self, line_edit.text(), mi=mi, fm=db.field_metadata)
t.setWindowTitle(title)
if t.exec():
line_edit.setText(t.rule[1])
def use_pages(self):
fm = self.gui.current_db.new_api.field_metadata
keys = sorted((k for k in fm.all_field_keys() if fm[k].get('name')), key=lambda k: fm[k].get('name').lower())
names = ['{} ({})'.format(fm[k]['name'], k) for k in keys]
try:
idx = keys.index('{} ({})'.format(_('Pages'), '#pages'))
except ValueError:
idx = 0
item, ok = QInputDialog.getItem(self, _('Choose a column for pages'), _(
'Choose a column from which to get the page count for the book, such as generated by the Count Pages plugin'),
names, idx)
if item and ok and item in names:
key = keys[names.index(item)]
template = f'{{{key}:width_from_pages()}}'
self.opt_bookshelf_spine_size_template.setText(template)
@lru_cache(maxsize=2)
def color_label_map(self) -> dict[str, str]:
return {
'text_color_for_light_background': _('Text on &light spine background'),
'text_color_for_dark_background': _('Text on &dark spine background'),
'outline_color_for_light_background': _('&Outline on light spine background'),
'outline_color_for_dark_background': _('Outli&ne on dark spine background'),
'divider_background_color': _('Divider &background'),
'divider_line_color': _('&Line on the divider'),
'divider_text_color': _('Text on the ÷r'),
'current_color': _('The ¤t book highlight'),
'selected_color': _('The &selected books highlight'),
'current_selected_color': _('&The current and selected book highlight'),
}
def populate_custom_color_theme(self, use_defaults=False):
from calibre.gui2.library.bookshelf_view import ColorTheme
default = {
'light': ColorTheme.light_theme()._asdict(),
'dark': ColorTheme.dark_theme()._asdict(),
}
configs = (gprefs.defaults if use_defaults else gprefs)['bookshelf_custom_colors']
for theme in default:
for k in self.color_label_map():
b = self.color_buttons[theme][k]
b.blockSignals(True)
b.special_default_color = default[theme][k].name()
b.color = configs[theme].get(k)
b.blockSignals(False)
def restore_defaults_colors(self):
for v in self.color_buttons.values():
for b in v.values():
b.color = None
def refresh_gui(self, gui):
gui.bookshelf_view.refresh_settings()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/preferences/look_feel_tabs/bookshelf_view.py",
"license": "GNU General Public License v3.0",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/gui2/actions/llm_book.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from functools import partial
from calibre.gui2 import error_dialog
from calibre.gui2.actions import InterfaceAction
class LLMBookAction(InterfaceAction):
name = 'Discuss book with AI'
action_spec = (_('Discuss book with AI'), 'ai.png', _('Ask AI about books'), 'Ctrl+Alt+A')
action_type = 'current'
action_add_menu = True
dont_add_to = frozenset(('context-menu-device', 'toolbar-device', 'menubar-device'))
def genesis(self):
self.qaction.triggered.connect(self.ask_ai)
self.ask_action = self.menuless_qaction
self.ask_menu = self.qaction.menu()
self.ask_menu.aboutToShow.connect(self.about_to_show_menu)
def initialization_complete(self):
self.gui.iactions['View'].llm_action.setShortcut(self.menuless_qaction.shortcut())
def about_to_show_menu(self):
from calibre.utils.icu import primary_sort_key
m = self.ask_menu
m.clear()
from calibre.gui2.dialogs.llm_book import current_actions
for ac in sorted(current_actions(), key=lambda a: primary_sort_key(a.human_name)):
a = m.addAction(ac.human_name)
a.triggered.connect(partial(self.ask_ai_with_action, ac))
def ask_ai_with_action(self, action=None):
from calibre.gui2.dialogs.llm_book import LLMBookDialog
rows = list(self.gui.library_view.selectionModel().selectedRows())
if not rows or len(rows) == 0:
d = error_dialog(self.gui, _('Cannot ask AI'), _('No book selected'))
d.exec()
return
db = self.gui.library_view.model().db
rows = [r.row() for r in rows]
book_ids = [db.id(r) for r in rows]
d = LLMBookDialog([db.new_api.get_metadata(bid) for bid in book_ids], parent=self.gui)
if action is not None:
d.llm.activate_action(action)
d.exec()
def ask_ai(self):
self.ask_ai_with_action()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/actions/llm_book.py",
"license": "GNU General Public License v3.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
kovidgoyal/calibre:src/calibre/ai/lm_studio/backend.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Ali Sheikhizadeh (Al00X) <al00x@outlook.com> <https://al00x.com>
# Based on code Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import json
import posixpath
from collections.abc import Iterable, Iterator
from functools import lru_cache
from typing import Any, NamedTuple
from urllib.parse import urlparse, urlunparse
from urllib.request import Request
from calibre.ai import ChatMessage, ChatMessageType, ChatResponse
from calibre.ai.lm_studio import LMStudioAI
from calibre.ai.prefs import pref_for_provider
from calibre.ai.utils import chat_with_error_handler, develop_text_chat, download_data, read_streaming_response
module_version = 1
def pref(key: str, defval: Any = None) -> Any:
return pref_for_provider(LMStudioAI.name, key, defval)
def is_ready_for_use() -> bool:
return bool(pref('text_model'))
class Model(NamedTuple):
id: str
owner: str
@classmethod
def from_dict(cls, x: dict[str, Any]) -> Model:
return Model(id=x['id'], owner=x.get('owned_by', 'local'))
def api_url(path: str = '', use_api_url: str | None = None) -> str:
base = (pref('api_url') if use_api_url is None else use_api_url) or LMStudioAI.DEFAULT_URL
purl = urlparse(base)
# LM Studio typically mounts endpoints under /v1
base_path = purl.path or '/v1'
if not base_path.endswith('/v1'):
base_path = posixpath.join(base_path, 'v1')
if path:
path = posixpath.join(base_path, path)
else:
path = base_path
purl = purl._replace(path=path)
return urlunparse(purl)
@lru_cache(8)
def get_available_models(use_api_url: str | None = None) -> dict[str, Model]:
# LM Studio mimics OpenAI: GET /v1/models
url = api_url('models', use_api_url)
ans = {}
try:
data = json.loads(download_data(url))
if 'data' in data:
for m in data['data']:
model = Model.from_dict(m)
ans[model.id] = model
except Exception:
pass
return ans
def does_model_exist_locally(model_id: str, use_api_url: str | None = None) -> bool:
try:
return model_id in get_available_models(use_api_url)
except Exception:
return False
def human_readable_model_name(model_id: str) -> str:
return model_id
def config_widget():
from calibre.ai.lm_studio.config import ConfigWidget
return ConfigWidget()
def save_settings(config_widget):
config_widget.save_settings()
def for_assistant(self: ChatMessage) -> dict[str, Any]:
return {'role': self.type.value, 'content': self.query}
def chat_request(data: dict[str, Any], url_override: str | None = None) -> Request:
url = api_url('chat/completions', url_override)
headers = {
'Content-Type': 'application/json',
}
return Request(url, data=json.dumps(data).encode('utf-8'), headers=headers, method='POST')
def text_chat_implementation(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
model_id = use_model or pref('text_model')
temperature = pref('temperature', 0.7)
data = {
'model': model_id,
'messages': [for_assistant(m) for m in messages],
'stream': True,
'temperature': temperature,
}
rq = chat_request(data)
for data in read_streaming_response(rq, LMStudioAI.name):
for choice in data.get('choices', []):
d = choice.get('delta', {})
content = d.get('content')
role = d.get('role')
if content:
yield ChatResponse(content=content, type=ChatMessageType(role or 'assistant'), plugin_name=LMStudioAI.name)
if 'usage' in data:
yield ChatResponse(has_metadata=True, provider='LM Studio', model=data.get('model', model_id), plugin_name=LMStudioAI.name)
def text_chat(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
yield from chat_with_error_handler(text_chat_implementation(messages, use_model))
def develop(use_model: str = '', msg: str = '') -> None:
m = (ChatMessage(msg),) if msg else ()
develop_text_chat(text_chat, use_model, messages=m)
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/lm_studio/backend.py",
"license": "GNU General Public License v3.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/lm_studio/config.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Ali Sheikhizadeh (Al00X) <al00x@outlook.com> <https://al00x.com>
# Based on code Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from functools import partial
from typing import Any
from qt.core import QComboBox, QCompleter, QDoubleSpinBox, QFormLayout, QHBoxLayout, QLabel, QLineEdit, QListView, QPushButton, QSpinBox, Qt, QWidget
from calibre.ai.lm_studio import LMStudioAI
from calibre.ai.prefs import pref_for_provider, set_prefs_for_provider
from calibre.ai.utils import configure, plugin_for_name
from calibre.gui2 import error_dialog
from calibre.gui2.widgets import BusyCursor
pref = partial(pref_for_provider, LMStudioAI.name)
class ConfigWidget(QWidget):
def __init__(self, parent: QWidget | None = None):
super().__init__(parent)
l = QFormLayout(self)
l.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
la = QLabel('<p>' + _('LM Studio allows you to run AI models locally. Start the LM Studio server (usually on port 1234) and ensure a model is loaded.'))
la.setWordWrap(True)
l.addRow(la)
self.api_url_edit = a = QLineEdit()
a.setClearButtonEnabled(True)
a.setPlaceholderText(_('The LM Studio URL, defaults to {}').format(LMStudioAI.DEFAULT_URL))
l.addRow(_('LM Studio &URL:'), a)
a.setText(pref('api_url') or '')
self.timeout_sb = t = QSpinBox(self)
t.setRange(15, 600)
t.setSingleStep(1)
t.setSuffix(_(' seconds'))
t.setValue(pref('timeout', 120))
l.addRow(_('&Timeout:'), t)
self.temp_sb = temp = QDoubleSpinBox(self)
temp.setRange(0.0, 2.0)
temp.setSingleStep(0.1)
temp.setValue(pref('temperature', 0.7))
temp.setToolTip(_('Controls randomness. 0 is deterministic, higher is more creative.'))
l.addRow(_('T&emperature:'), temp)
# --- Model selector field (ComboBox dropdown) ---
w = QWidget()
h = QHBoxLayout(w)
h.setContentsMargins(0, 0, 0, 0)
self.model_combo = mc = QComboBox(w)
mc.setEditable(True)
mc.setInsertPolicy(QComboBox.NoInsert)
mc.setView(QListView(mc))
mc.setSizeAdjustPolicy(QComboBox.AdjustToContentsOnFirstShow)
completer = QCompleter(mc)
completer.setCaseSensitivity(Qt.CaseInsensitive)
mc.setCompleter(completer)
saved_model = pref('text_model') or ''
if saved_model:
mc.addItem(saved_model)
mc.setCurrentText(saved_model)
else:
mc.setCurrentText('')
self.refresh_btn = rb = QPushButton(_('&Refresh models'))
rb.clicked.connect(self.refresh_models)
h.addWidget(mc, stretch=10)
h.addWidget(rb)
l.addRow(_('&Model:'), w)
self.model_status = ms = QLabel('')
ms.setWordWrap(True)
ms.setTextInteractionFlags(Qt.TextInteractionFlag.TextSelectableByMouse)
l.addRow('', ms)
# Store last loaded models for tooltip
self._last_models: list[str] = []
mc.activated.connect(self._on_model_selected)
def refresh_models(self):
with BusyCursor():
try:
plugin = plugin_for_name(LMStudioAI.name)
backend = plugin.builtin_live_module
models_dict = backend.get_available_models(self.api_url)
keys = list(models_dict.keys()) if models_dict else []
self._last_models = keys
self.model_combo.blockSignals(True)
self.model_combo.clear()
for k in keys:
self.model_combo.addItem(k)
# If the current combo is empty and models exist, select the first one
if not self.model_combo.currentText() and keys:
self.model_combo.setCurrentText(keys[0])
# Restore previous selection if it exists in new list
current_text = (pref('text_model') or '').strip()
if current_text and current_text in keys:
self.model_combo.setCurrentText(current_text)
self.model_combo.blockSignals(False)
if keys:
display_count = 3
sample = ', '.join(keys[:display_count])
msg = _('Found {} models. e.g.: {}').format(len(keys), sample)
if len(keys) > display_count:
msg += _(' (and more)')
self.model_status.setText(msg)
self.model_status.setToolTip(', '.join(keys)) # Full list in tooltip
else:
self.model_status.setText(_('No models found. Ensure a model is loaded in LM Studio.'))
self.model_status.setToolTip('')
except Exception as e:
self.model_status.setText(_('Connection failed: {}').format(str(e)))
self.model_status.setToolTip('')
@property
def api_url(self) -> str:
return self.api_url_edit.text().strip()
@property
def text_model(self) -> str:
return self.model_combo.currentText().strip()
@property
def settings(self) -> dict[str, Any]:
ans = {
'text_model': self.text_model,
'timeout': self.timeout_sb.value(),
'temperature': self.temp_sb.value(),
}
url = self.api_url
if url:
ans['api_url'] = url
return ans
@property
def is_ready_for_use(self) -> bool:
return bool(self.text_model)
def validate(self) -> bool:
if not self.text_model:
error_dialog(self, _('No model specified'), _('You must specify a model ID.'), show=True)
return False
return True
def save_settings(self):
set_prefs_for_provider(LMStudioAI.name, self.settings)
def _on_model_selected(self, index: int):
model_id = self.model_combo.itemText(index)
self.model_status.setText(_('Selected model: {0}').format(model_id))
if __name__ == '__main__':
configure(LMStudioAI.name)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/lm_studio/config.py",
"license": "GNU General Public License v3.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/gui2/dialogs/llm_book.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from collections.abc import Iterator
from functools import lru_cache
from typing import Any
from qt.core import QAbstractItemView, QDialog, QDialogButtonBox, QLabel, QListWidget, QListWidgetItem, QSize, Qt, QUrl, QVBoxLayout, QWidget
from calibre.ai import ChatMessage, ChatMessageType
from calibre.db.cache import Cache
from calibre.ebooks.metadata.book.base import Metadata
from calibre.gui2 import Application, gprefs
from calibre.gui2.llm import ActionData, ConverseWidget, LLMActionsSettingsWidget, LLMSettingsDialogBase, LocalisedResults
from calibre.gui2.ui import get_gui
from calibre.gui2.widgets2 import Dialog
from calibre.library.comments import comments_as_markdown
from calibre.utils.icu import primary_sort_key
from calibre.utils.localization import ngettext
from polyglot.binary import from_hex_unicode
def format_book_for_query(book: Metadata, is_first: bool, num_books: int) -> str:
which = '' if num_books < 2 else ('first ' if is_first else 'next ')
ans = f'The {which}book is: {book.title} by {book.format_authors()}.'
left = get_allowed_fields() - {'title', 'authors'}
if 'series' in left:
ans += f' It is in the series: {book.series}.'
left.discard('series'), left.discard('series_index')
if 'tags' in left:
ans += ' It is tagged with the following tags:' + book.format_tags() + '.'
left.discard('tags')
comments = []
fields = []
for field in left:
if book.is_null(field):
continue
m = book.metadata_for_field(field)
if field == 'comments':
comments.append(comments_as_markdown(book.get(field)))
elif m.get('datatype') == 'comments':
ctype = m.get('display', {}).get('interpret_as') or 'html'
match ctype:
case 'long-text' | 'short-text' | 'markdown':
comments.append(book.get(field))
case _:
comments.append(comments_as_markdown(book.get(field)))
else:
fields.append(book.format_field(field))
if fields:
ans += ' It has the following additional metadata.'
for name, val in fields:
ans += f'\n{name}: {val}'
if comments:
ans += '\nSome notes about this book:\n' + '\n'.join(comments)
return ans
def format_books_for_query(books: list[Metadata]) -> str:
ans = 'I wish to discuss the following book. '
if len(books) > 1:
ans = 'I wish to discuss the following books. '
for i, book in enumerate(books):
ans += format_book_for_query(book, i == 0, len(books)) + '\n---------------\n\n'
return ans
def get_allowed_fields() -> set[str]:
db = get_current_db()
ans = set(db.pref('llm-book-allowed-custom-fields') or ())
return set(gprefs.get('llm-book-allowed-standard-fields') or ()) | ans
class Action(ActionData):
def prompt_text(self, books: list[Metadata]) -> str:
pt = self.prompt_template
return pt.format(
books_word='book' if len(books) < 2 else 'books',
is_are='is' if len(books) < 2 else 'are',
title=books[0].title, authors=books[0].format_authors(), series=books[0].series or '',
)
@lru_cache(2)
def default_actions() -> tuple[Action, ...]:
return (
Action('summarize', _('Summarize'), 'Provide a concise summary of the previously described {books_word}.'),
Action('chapters', _('Chapters'), 'Provide a chapter by chapter summary of the previously described {books_word}.'),
Action('read_next', _('Read next'), 'Suggest some good books to read after the previously described {books_word}.'),
Action('universe', _('Universe'), 'Describe the fictional universe the previously described {books_word} {is_are} set in.'
' Outline major plots, themes and characters in the universe.'),
Action('series', ngettext('Series', 'Series', 1), 'Give the series the previously described {books_word} {is_are} in.'
' List all the books in the series, in both published and internal chronological order.'
' Also describe any prominent spin-off series.')
)
def read_next_action() -> Action:
for ac in default_actions():
if ac.name == 'read_next':
return ac
raise KeyError('No read next action could be found')
def current_actions(include_disabled=False) -> Iterator[Action]:
p = gprefs.get('llm_book_quick_actions') or {}
return Action.unserialize(p, default_actions(), include_disabled)
class LLMSettingsWidget(LLMActionsSettingsWidget):
action_edit_help_text = '<p>' + _(
'The prompt is a template. The expression {0} will be replaced by "book"'
' when there is only a single book being discussed and "books" otherwise.'
' Similarly {1} becomes "is" or "are", as needed. {2}, {3}, {4} are replaced '
' by the title, authors and series of the first book, respectively.'
).format('{books_word}', '{is_are}', '{title}', '{authors}', '{series}')
def get_actions_from_prefs(self) -> Iterator[ActionData]:
yield from current_actions(include_disabled=True)
def set_actions_in_prefs(self, s: dict[str, Any]) -> None:
gprefs.set('llm_book_quick_actions', s)
def create_custom_widgets(self) -> Iterator[str, QWidget]:
yield '', LocalisedResults()
def get_current_db() -> Cache:
if db := getattr(get_current_db, 'ans', None):
return db.new_api
return get_gui().current_db.new_api
class MetadataSettings(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.l = l = QVBoxLayout(self)
la = QLabel(_('Select which metadata fields to send to the AI from the selected books. Note that title and authors are always sent.'))
la.setWordWrap(True)
l.addWidget(la)
self.list_widget = lw = QListWidget(self)
lw.setSelectionMode(QAbstractItemView.SelectionMode.NoSelection)
lw.itemClicked.connect(self.toggle_item)
l.addWidget(lw)
db = get_current_db()
fm = db.field_metadata
allowed = get_allowed_fields()
for field_name in sorted(fm.displayable_field_keys(), key=lambda n: primary_sort_key(fm[n]['label'])):
if field_name in ('title', 'authors', 'author_sort', 'sort', 'id', 'uuid'):
continue
fd = fm[field_name]
item = QListWidgetItem(fd['name'], lw)
item.setToolTip(field_name)
item.setFlags(Qt.ItemFlag.ItemIsEnabled)
item.setCheckState(Qt.CheckState.Checked if field_name in allowed else Qt.CheckState.Unchecked)
item.setData(Qt.ItemDataRole.UserRole, field_name)
bb = QDialogButtonBox(self)
bb.addButton(_('Select &all'), QDialogButtonBox.ButtonRole.ActionRole).clicked.connect(self.select_all)
bb.addButton(_('Select &none'), QDialogButtonBox.ButtonRole.ActionRole).clicked.connect(self.select_none)
l.addWidget(bb)
def __iter__(self):
lw = self.list_widget
return (lw.item(r) for r in range(lw.count()))
def select_all(self):
for item in self:
item.setCheckState(Qt.CheckState.Checked)
def select_none(self):
for item in self:
item.setCheckState(Qt.CheckState.Unchecked)
def toggle_item(self, item):
item.setCheckState(
Qt.CheckState.Unchecked if item.checkState() == Qt.CheckState.Checked else Qt.CheckState.Checked)
def commit(self) -> bool:
allowed_standard = set()
allowed_custom = set()
for item in self:
if item.checkState() == Qt.CheckState.Checked:
f = item.data(Qt.ItemDataRole.UserRole)
if f.startswith('#'):
allowed_custom.add(f)
else:
allowed_standard.add(f)
gprefs.set('llm-book-allowed-standard-fields', sorted(allowed_standard))
db = get_current_db()
db.set_pref('llm-book-allowed-custom-fields', sorted(allowed_custom))
return True
class LLMSettingsDialog(LLMSettingsDialogBase):
def __init__(self, parent=None):
super().__init__(title=_('AI Settings'), name='llm-book-settings-dialog', prefs=gprefs, parent=parent)
def custom_tabs(self) -> Iterator[str, str, QWidget]:
yield 'config.png', _('&Actions'), LLMSettingsWidget(self)
yield 'metadata.png', _('&Metadata'), MetadataSettings(self)
class LLMPanel(ConverseWidget):
NOTE_TITLE = _('AI Assistant Discussion')
def __init__(self, books: list[Metadata], parent: QWidget | None = None):
self.books = books
super().__init__(parent, add_close_button=True)
def settings_dialog(self) -> QDialog:
return LLMSettingsDialog(self)
def handle_chat_link(self, qurl: QUrl) -> bool:
match qurl.host():
case self.quick_action_hostname:
name = from_hex_unicode(qurl.path().strip('/'))
for ac in current_actions():
if ac.name == name:
self.activate_action(ac)
break
return True
return False
def activate_action(self, action: Action) -> None:
self.start_api_call(self.prompt_text_for_action(action))
def choose_action_message(self) -> str:
msg = '<p>'
if len(self.books) > 1:
msg += _('{0} books selected, starting with: <i>{1}</i>').format(len(self.books), self.books[0].title)
else:
msg += _('Selected book: <i>{}</i>').format(self.books[0].title)
msg += self.quick_actions_as_html(current_actions())
msg += '<p>' + _('Or, type a question to the AI below, for example:') + '<br>'
msg += '<i>Discuss the literary influences in this book</i>'
return msg
ready_message = choose_action_message
def create_initial_messages(self, action_prompt: str, **kwargs: Any) -> Iterator[ChatMessage]:
context_header = format_books_for_query(self.books)
context_header += ' When you answer the questions use markdown formatting for the answers wherever possible.'
if len(self.books) > 1:
context_header += ' If any of the specified books are unknown to you, instead of answering the following'
' questions, just say the books are unknown.'
else:
context_header += ' If the specified book is unknown to you instead of answering the following questions'
' just say the book is unknown.'
if language_instruction := self.get_language_instruction():
context_header += ' ' + language_instruction
yield ChatMessage(context_header, type=ChatMessageType.system)
yield ChatMessage(action_prompt)
def prompt_text_for_action(self, action: Action) -> str:
return action.prompt_text(self.books)
class LLMBookDialog(Dialog):
def __init__(self, books: list[Metadata], parent: QWidget | None = None):
self.books = books
super().__init__(
name='llm-book-dialog', title=_('Ask AI about {}').format(books[0].title) if len(books) < 2 else _(
'Ask AI about {} books').format(len(books)),
parent=parent, default_buttons=QDialogButtonBox.StandardButton.Close)
def setup_ui(self):
l = QVBoxLayout(self)
l.setContentsMargins(0, 0, 0, 0)
self.llm = llm = LLMPanel(self.books, parent=self)
self.llm.close_requested.connect(self.accept)
self.finished.connect(self.llm.cleanup_on_close)
l.addWidget(llm)
self.bb.setVisible(False)
self.llm.result_display.input.setFocus(Qt.FocusReason.OtherFocusReason)
def sizeHint(self):
return QSize(600, 750)
def develop():
from calibre.library import db
get_current_db.ans = db()
app = Application([])
LLMBookDialog([Metadata('The Trials of Empire', ['Richard Swan'])]).exec()
del app
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/dialogs/llm_book.py",
"license": "GNU General Public License v3.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/gui2/llm.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import textwrap
from collections.abc import Iterator
from html import escape
from itertools import count
from threading import Thread
from typing import Any, NamedTuple
from qt.core import (
QAbstractItemView,
QApplication,
QCheckBox,
QDateTime,
QDialog,
QDialogButtonBox,
QEvent,
QFormLayout,
QGroupBox,
QHBoxLayout,
QIcon,
QLabel,
QLineEdit,
QListWidget,
QListWidgetItem,
QLocale,
QPlainTextEdit,
QPushButton,
QSize,
QSizePolicy,
Qt,
QTabWidget,
QTextBrowser,
QUrl,
QVBoxLayout,
QWidget,
pyqtSignal,
sip,
)
from calibre.ai import AICapabilities, ChatMessage, ChatMessageType, ChatResponse
from calibre.ai.config import ConfigureAI
from calibre.ai.prefs import plugin_for_purpose
from calibre.ai.prefs import prefs as aiprefs
from calibre.ai.utils import ContentType, StreamedResponseAccumulator, response_to_html
from calibre.customize import AIProviderPlugin
from calibre.gui2 import error_dialog, safe_open_url
from calibre.gui2.chat_widget import Button, ChatWidget, Header
from calibre.gui2.dialogs.confirm_delete import confirm
from calibre.gui2.widgets2 import Dialog
from calibre.utils.icu import primary_sort_key
from calibre.utils.localization import ui_language_as_english
from calibre.utils.logging import ERROR, WARN
from calibre.utils.short_uuid import uuid4
from polyglot.binary import as_hex_unicode
prompt_sep = '\n\n------\n\n'
reasoning_icon = 'reports.png'
def for_display_to_human(self: ChatMessage, is_initial_query: bool = False, content_type: ContentType = ContentType.unknown) -> str:
if self.type is ChatMessageType.system:
return ''
q = self.query
if is_initial_query and (idx := q.find(prompt_sep)) > -1:
q = q[:idx] + '\n\n' + q[idx + len(prompt_sep):]
return response_to_html(q, content_type=content_type)
def show_reasoning(reasoning: str, parent: QWidget | None = None):
d = QDialog(parent)
l = QVBoxLayout(d)
b = QTextBrowser(d)
b.setPlainText(reasoning)
l.addWidget(b)
d.setWindowTitle(_('Reasoning used by AI'))
d.setWindowIcon(QIcon.ic(reasoning_icon))
bb = QDialogButtonBox(QDialogButtonBox.StandardButton.Close, d)
l.addWidget(bb)
d.resize(600, 500)
d.exec()
class ConversationHistory:
def __init__(self):
self.accumulator = StreamedResponseAccumulator()
self.items: list[ChatMessage] = []
self.model_used = ''
self.api_call_active = False
self.current_response_completed = True
self.cost = 0.
self.response_count = 0
self.currency = ''
def __iter__(self) -> Iterator[ChatMessage]:
return iter(self.items)
def reverse_iter(self) -> Iterator[ChatMessage]:
return reversed(self.items)
def __len__(self) -> int:
return len(self.items)
def __bool__(self) -> bool:
return bool(self.items)
def append(self, x: ChatMessage) -> None:
self.items.append(x)
def copy(self, upto: int | None = None) -> ConversationHistory:
ans = ConversationHistory()
ans.model_used = self.model_used
if upto is None:
ans.items = list(self.items)
else:
ans.items = self.items[:upto]
return ans
def only(self, message_index: int) -> ConversationHistory:
ans = self.copy(message_index + 1)
ans.items = [ans.items[-1]]
return ans
def at(self, x: int) -> ChatMessage:
return self.items[x]
def new_api_call(self) -> None:
self.accumulator = StreamedResponseAccumulator()
self.current_response_completed = False
self.api_call_active = True
def finalize_response(self) -> None:
self.current_response_completed = True
self.api_call_active = False
self.accumulator.finalize()
self.items.extend(self.accumulator)
self.response_count += 1
if self.accumulator.metadata.has_metadata:
self.model_used = self.accumulator.metadata.model
self.cost += self.accumulator.metadata.cost
self.currency = self.accumulator.metadata.currency
def format_llm_note(self, assistant_name: str, title: str = '') -> str:
'''
Formats a conversation history into a standardized, self-contained note entry.
'''
if not self:
return ''
main_response = ''
for message in self.reverse_iter():
if message.from_assistant:
main_response = message.query.strip()
break
if not main_response:
return ''
timestamp = QLocale.system().toString(QDateTime.currentDateTime(), QLocale.FormatType.ShortFormat)
sep = '―――'
title = title or _('AI Assistant Note')
header = f'{sep} {title} ({timestamp}) {sep}'
if len(self) == 1:
return f'{header}\n\n{main_response}'
record_lines = []
for message in self:
match message.type:
case ChatMessageType.user:
role = _('You')
case ChatMessageType.assistant:
role = assistant_name
case _:
continue
content = message.query.strip()
entry = f'{role}: {content}'
record_lines.append(entry)
record_body = '\n\n'.join(record_lines)
record_header = f'{sep} {_("Conversation record")} {sep}'
return (
f'{header}\n\n{main_response}\n\n'
f'{record_header}\n\n{record_body}'
)
class ConverseWidget(QWidget):
response_received = pyqtSignal(int, object)
close_requested = pyqtSignal()
def __init__(self, parent=None, add_close_button=False):
super().__init__(parent)
self.counter = count(start=1)
self.hid = hid = uuid4().lower()
self.configure_ai_hostname = f'{hid}.config.calibre'
self.copy_hostname = f'{hid}.copy.calibre'
self.quick_action_hostname = f'{hid}.quick.calibre'
self.reasoning_hostname = f'{hid}.reasoning.calibre'
self.current_api_call_number = 0
self.session_cost = 0.0
self.session_cost_currency = ''
self.update_ai_provider_plugin()
self.clear_current_conversation()
self.layout = QVBoxLayout(self)
self.layout.setContentsMargins(5, 5, 5, 5)
self.result_display = rd = ChatWidget(self, _('Type a question to the AI'))
rd.link_clicked.connect(self.on_chat_link_clicked)
rd.input_from_user.connect(self.run_custom_prompt)
self.layout.addWidget(rd)
self.response_actions_layout = QHBoxLayout()
self.response_buttons = {}
self.add_buttons()
self.response_actions_layout.addStretch()
self.layout.addLayout(self.response_actions_layout)
footer_layout = QHBoxLayout()
self.settings_button = QPushButton(QIcon.ic('config.png'), _('Se&ttings'))
self.settings_button.setCursor(Qt.CursorShape.PointingHandCursor)
self.settings_button.setSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed)
self.settings_button.clicked.connect(self.show_settings)
self.api_usage_label = QLabel('')
footer_layout.addWidget(self.settings_button)
footer_layout.addStretch()
footer_layout.addWidget(self.api_usage_label)
if add_close_button:
self.close_button = b = QPushButton(QIcon.ic('close.png'), _('&Close'))
b.setCursor(Qt.CursorShape.PointingHandCursor)
b.setSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed)
b.clicked.connect(self.close_requested)
footer_layout.addWidget(b)
self.layout.addLayout(footer_layout)
self.response_received.connect(self.on_response_from_ai, type=Qt.ConnectionType.QueuedConnection)
self.show_initial_message()
self.update_cost()
def setFocus(self, reason) -> None:
self.result_display.setFocus(reason)
def language_instruction(self):
lang = ui_language_as_english()
return f'If you can speak in {lang}, then respond in {lang}.'
def quick_actions_as_html(self, actions) -> str:
actions = sorted(actions, key=lambda a: primary_sort_key(a.human_name))
if not actions:
return ''
ans = []
for action in actions:
hn = action.human_name.replace(' ', '\xa0')
ans.append(f'''<a title="{self.prompt_text_for_action(action)}"
href="http://{self.quick_action_hostname}/{as_hex_unicode(action.name)}"
style="text-decoration: none">{hn}</a>''')
links = '\xa0\xa0\xa0 '.join(ans)
return f'<h3>{_("Quick actions")}</h3> {links}'
def show_settings(self):
self.settings_dialog().exec()
self.update_ai_provider_plugin()
self.update_ui_state()
def update_ai_provider_plugin(self):
self.ai_provider_plugin = plugin_for_purpose(AICapabilities.text_to_text)
@property
def is_ready_for_use(self) -> bool:
p = self.ai_provider_plugin
return p is not None and p.is_ready_for_use
def show_initial_message(self):
if self.is_ready_for_use:
msg = self.ready_message()
else:
msg = f'<a href="http://{self.configure_ai_hostname}">{_("First, configure an AI provider")}'
self.result_display.show_message(msg)
def run_custom_prompt(self, prompt: str) -> None:
if prompt := prompt.strip():
self.start_api_call(prompt)
@property
def assistant_name(self) -> str:
return self.ai_provider_plugin.human_readable_model_name(self.conversation_history.model_used) or _('Assistant')
def show_ai_conversation(self):
self.result_display.clear()
assistant = self.assistant_name
is_initial_query = True
content_type = self.conversation_history.accumulator.content_type
for i, message in enumerate(self.conversation_history):
content_for_display = for_display_to_human(message, is_initial_query, content_type)
if message.type is ChatMessageType.user:
is_initial_query = False
if not content_for_display:
continue
header = Header()
is_response = False
if message.from_assistant:
is_response = True
buttons = tuple(self.per_response_buttons(i, message))
buttons += (
Button('edit-copy.png', f'http://{self.copy_hostname}/{i}', _(
'Copy this specific response to the clipboard')),
)
if message.reasoning:
buttons += (Button(reasoning_icon, f'http://{self.reasoning_hostname}/{i}', _(
'Show the reasoning behind this response from the AI')),)
header = Header(assistant, buttons)
self.result_display.add_block(content_for_display, header, is_response)
if self.conversation_history.api_call_active:
a = self.conversation_history.accumulator
has_content = bool(a.all_content)
content_for_display = for_display_to_human(ChatMessage(a.all_content or a.all_reasoning))
activity = _('answering') if has_content else _('thinking')
if not has_content:
content_for_display = '<i>' + content_for_display + '</i>'
self.result_display.add_block(
content_for_display, Header(_('{assistant} {activity}').format(
assistant=assistant, activity=activity) + '…'), is_response=True)
self.result_display.re_render()
self.scroll_to_bottom()
def get_language_instruction(self) -> str:
if aiprefs()['llm_localized_results'] != 'always':
return ''
return self.language_instruction()
def scroll_to_bottom(self) -> None:
self.result_display.scroll_to_bottom()
def start_api_call(self, action_prompt: str, **kwargs: Any) -> None:
if not self.is_ready_for_use:
self.show_error(f'''<b>{_('AI provider not configured.')}</b> <a href="http://{self.configure_ai_hostname}">{_(
'Configure AI provider')}</a>''', is_critical=False)
return
if err := self.ready_to_start_api_call():
self.show_error(f"<b>{_('Error')}:</b> {err}", is_critical=True)
return
if self.conversation_history:
self.conversation_history.append(ChatMessage(action_prompt))
else:
for msg in self.create_initial_messages(action_prompt, **kwargs):
self.conversation_history.append(msg)
self.current_api_call_number = next(self.counter)
self.conversation_history.new_api_call()
Thread(name='LLMAPICall', daemon=True, target=self.do_api_call, args=(
self.conversation_history.copy(), self.current_api_call_number, self.ai_provider_plugin)).start()
self.update_ui_state()
def do_api_call(
self, conversation_history: ConversationHistory, current_api_call_number: int, ai_plugin: AIProviderPlugin
) -> None:
try:
for res in ai_plugin.text_chat(conversation_history.items, conversation_history.model_used):
if sip.isdeleted(self):
return
self.response_received.emit(current_api_call_number, res)
if not sip.isdeleted(self):
self.response_received.emit(current_api_call_number, None)
except RuntimeError:
pass # when self gets deleted between call to sip.isdeleted and next statement
def on_response_from_ai(self, current_api_call_number: int, r: ChatResponse | None) -> None:
if current_api_call_number != self.current_api_call_number:
return
if r is None:
self.conversation_history.finalize_response()
self.update_cost()
elif r.exception is not None:
self.show_error(f'''{_('Talking to AI failed with error:')} {escape(str(r.exception))}''', details=r.error_details, is_critical=True)
else:
self.conversation_history.accumulator.accumulate(r)
self.update_ui_state()
def show_error(self, html: str, is_critical: bool = False, details: str = '') -> None:
self.clear_current_conversation()
level = ERROR if is_critical else WARN
self.result_display.show_message(html, details, level)
def clear_current_conversation(self) -> None:
self.conversation_history = ConversationHistory()
def update_ui_state(self) -> None:
if self.conversation_history:
self.show_ai_conversation()
elif msg := self.choose_action_message():
self.result_display.show_message(msg)
else:
self.show_initial_message()
has_responses = self.conversation_history.response_count > 0
for b in self.response_buttons.values():
b.setEnabled(has_responses)
def update_cost(self):
h = self.conversation_history
if self.session_cost_currency != h.currency:
self.session_cost = 0
self.session_cost_currency = h.currency
if self.session_cost_currency:
self.session_cost += h.cost
cost = _('free')
if self.session_cost:
cost = f'{self.session_cost:.6f}'.rstrip('0').rstrip('.') + f' {self.session_cost_currency}'
self.api_usage_label.setText(f'{_("Queries:")} {self.current_api_call_number} @ {cost}')
else:
self.api_usage_label.setText(f'{_("Queries:")} {self.current_api_call_number}')
def get_conversation_history_for_specific_response(self, message_index: int) -> ConversationHistory | None:
if not (0 <= message_index < len(self.conversation_history)):
return None
ans = self.conversation_history.at(message_index)
if not ans.from_assistant:
return None
return self.conversation_history.only(message_index)
def show_reasoning(self, message_index: int) -> None:
h = self.get_conversation_history_for_specific_response(message_index)
m = h.at(len(h)-1)
if m.reasoning:
show_reasoning(m.reasoning, self)
def copy_specific_note(self, message_index: int) -> None:
history_for_record = self.get_conversation_history_for_specific_response(message_index)
text = history_for_record.format_llm_note(self.assistant_name, self.NOTE_TITLE)
if text:
QApplication.instance().clipboard().setText(text)
def copy_to_clipboard(self) -> None:
text = self.conversation_history.format_llm_note(self.assistant_name, self.NOTE_TITLE)
if text:
QApplication.instance().clipboard().setText(text)
def on_chat_link_clicked(self, qurl: QUrl):
if qurl.scheme() not in ('http', 'https'):
return
match qurl.host():
case self.copy_hostname:
index = int(qurl.path().strip('/'))
self.copy_specific_note(index)
return
case self.reasoning_hostname:
index = int(qurl.path().strip('/'))
self.show_reasoning(index)
return
case self.configure_ai_hostname:
self.show_settings()
return
if self.handle_chat_link(qurl):
return
safe_open_url(qurl)
def set_all_inputs_enabled(self, enabled):
for i in range(self.quick_actions_layout.count()):
widget = self.quick_actions_layout.itemAt(i).widget()
if widget:
widget.setEnabled(enabled)
self.result_display.set_input_enabled(enabled)
def add_button(self, icon: str, text: str, tooltip: str) -> QPushButton:
b = QPushButton(QIcon.ic(icon), text, self)
b.setToolTip(tooltip)
b.setEnabled(False)
self.response_buttons[text] = b
self.response_actions_layout.addWidget(b)
return b
# Subclass API {{{
NOTE_TITLE = ''
def add_buttons(self) -> None:
self.add_button('edit-clear.png', _('&New chat'), _('Start a new conversation')).clicked.connect(
self.start_new_conversation)
self.add_button('edit-copy.png', _('&Copy'), _('Copy this conversation to the clipboard')).clicked.connect(
self.copy_to_clipboard)
def per_response_buttons(self, msgnum: int, msg: ChatMessage) -> Iterator[Button]:
if False:
yield Button()
def settings_dialog(self) -> QDialog:
raise NotImplementedError('implement in subclass')
def handle_chat_link(self, qurl: QUrl) -> bool:
raise NotImplementedError('implement in subclass')
def create_initial_messages(self, action_prompt: str, **kwargs: Any) -> Iterator[ChatMessage]:
raise NotImplementedError('implement in sub class')
def ready_message(self) -> str:
return _('Select text in the book to begin.')
def choose_action_message(self) -> str:
raise NotImplementedError('implement in sub class')
def prompt_text_for_action(self, action) -> str:
raise NotImplementedError('implement in sub class')
def start_new_conversation(self) -> None:
self.clear_current_conversation()
self.update_ui_state()
def ready_to_start_api_call(self) -> str:
return ''
def cleanup_on_close(self) -> None:
self.response_received.disconnect(self.on_response_from_ai)
# }}}
class ActionData(NamedTuple):
name: str
human_name: str
prompt_template: str
is_builtin: bool = True
is_disabled: bool = False
@property
def as_custom_action_dict(self) -> dict[str, Any]:
return {'disabled': self.is_disabled, 'title': self.human_name, 'prompt_template': self.prompt_template}
@classmethod
def unserialize(cls, p: dict[str, Any], default_actions: tuple[ActionData, ...], include_disabled=False) -> Iterator[ActionData]:
dd = p.get('disabled_default_actions', ())
for x in default_actions:
x = x._replace(is_disabled=x.name in dd)
if include_disabled or not x.is_disabled:
yield x
for title, c in p.get('custom_actions', {}).items():
x = cls(f'custom-{title}', title, c['prompt_template'], is_builtin=False, is_disabled=c['disabled'])
if include_disabled or not x.is_disabled:
yield x
class ActionEditDialog(QDialog):
def __init__(self, help_text: str, action: ActionData | None=None, parent=None):
super().__init__(parent)
self.setWindowTitle(_('Edit Quick action') if action else _('Add Quick action'))
self.layout = QFormLayout(self)
self.layout.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
self.name_edit = QLineEdit(self)
self.prompt_edit = QPlainTextEdit(self)
self.prompt_edit.setMinimumHeight(100)
self.layout.addRow(_('Name:'), self.name_edit)
self.layout.addRow(_('Prompt:'), self.prompt_edit)
self.help_label = la = QLabel(help_text)
la.setWordWrap(True)
self.layout.addRow(la)
self.button_box = QDialogButtonBox(QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel)
self.layout.addWidget(self.button_box)
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
if action is not None:
self.name_edit.setText(action.human_name)
self.prompt_edit.setPlainText(action.prompt_template)
self.name_edit.installEventFilter(self)
self.prompt_edit.installEventFilter(self)
def sizeHint(self) -> QSize:
ans = super().sizeHint()
ans.setWidth(max(500, ans.width()))
return ans
def eventFilter(self, obj, event):
if event.type() == QEvent.Type.KeyPress:
if obj is self.name_edit and event.key() in (Qt.Key.Key_Return, Qt.Key.Key_Enter):
self.prompt_edit.setFocus()
return True
if obj is self.prompt_edit and event.key() in (Qt.Key.Key_Return, Qt.Key.Key_Enter):
if event.modifiers() & Qt.KeyboardModifier.ControlModifier:
self.accept()
return True
return super().eventFilter(obj, event)
def get_action(self) -> ActionData:
title = self.name_edit.text().strip()
return ActionData(f'custom-{title}', title, self.prompt_edit.toPlainText().strip(), is_builtin=False)
def accept(self) -> None:
ac = self.get_action()
if not ac.human_name:
return error_dialog(self, _('No name specified'), _('You must specify a name for the Quick action'), show=True)
if not ac.prompt_template:
return error_dialog(self, _('No prompt specified'), _('You must specify a prompt for the Quick action'), show=True)
super().accept()
class LocalisedResults(QCheckBox):
def __init__(self):
super().__init__(_('Ask the AI to respond in the current language'))
self.setToolTip('<p>' + _(
'Ask the AI to respond in the current calibre user interface language. Note that how well'
' this works depends on the individual model being used. Different models support'
' different languages.'))
def load_settings(self):
self.setChecked(aiprefs()['llm_localized_results'] == 'always')
def commit(self) -> bool:
aiprefs()['llm_localized_results'] = 'always' if self.isChecked() else 'never'
return True
class LLMActionsSettingsWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.setMinimumWidth(550)
self.layout = QVBoxLayout(self)
api_model_layout = QFormLayout()
api_model_layout.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
self.custom_widgets = []
for (title, w) in self.create_custom_widgets():
if title:
api_model_layout.addRow(title, w)
else:
api_model_layout.addRow(w)
self.custom_widgets.append(w)
self.layout.addLayout(api_model_layout)
self.qa_gb = gb = QGroupBox(_('&Quick actions:'), self)
self.layout.addWidget(gb)
gb.l = l = QVBoxLayout(gb)
self.actions_list = QListWidget(self)
self.actions_list.setDragDropMode(QAbstractItemView.DragDropMode.InternalMove)
l.addWidget(self.actions_list)
actions_button_layout = QHBoxLayout()
self.add_button = QPushButton(QIcon.ic('plus.png'), _('&Add'))
self.edit_button = QPushButton(QIcon.ic('modified.png'), _('&Edit'))
self.remove_button = QPushButton(QIcon.ic('minus.png'), _('&Remove'))
actions_button_layout.addWidget(self.add_button)
actions_button_layout.addWidget(self.edit_button)
actions_button_layout.addWidget(self.remove_button)
actions_button_layout.addStretch(100)
l.addLayout(actions_button_layout)
self.add_button.clicked.connect(self.add_action)
self.edit_button.clicked.connect(self.edit_action)
self.remove_button.clicked.connect(self.remove_action)
self.actions_list.itemDoubleClicked.connect(self.edit_action)
self.load_settings()
self.actions_list.setFocus()
def load_settings(self):
for w in self.custom_widgets:
w.load_settings()
self.load_actions_from_prefs()
def action_as_item(self, ac: ActionData) -> QListWidgetItem:
item = QListWidgetItem(ac.human_name, self.actions_list)
item.setData(Qt.ItemDataRole.UserRole, ac)
item.setCheckState(Qt.CheckState.Unchecked if ac.is_disabled else Qt.CheckState.Checked)
item.setToolTip(textwrap.fill(ac.prompt_template))
def load_actions_from_prefs(self):
self.actions_list.clear()
for ac in sorted(self.get_actions_from_prefs(), key=lambda ac: primary_sort_key(ac.human_name)):
self.action_as_item(ac)
def add_action(self):
dialog = ActionEditDialog(self.action_edit_help_text, parent=self)
if dialog.exec() == QDialog.DialogCode.Accepted:
action = dialog.get_action()
if action.human_name and action.prompt_template:
self.action_as_item(action)
def edit_action(self):
item = self.actions_list.currentItem()
if not item:
return
action = item.data(Qt.ItemDataRole.UserRole)
if action.is_builtin:
return error_dialog(self, _('Cannot edit'), _(
'Cannot edit builtin actions. Instead uncheck this action and create a new action with the same name.'), show=True)
dialog = ActionEditDialog(self.action_edit_help_text, action, parent=self)
if dialog.exec() == QDialog.DialogCode.Accepted:
new_action = dialog.get_action()
if new_action.human_name and new_action.prompt_template:
item.setText(new_action.human_name)
item.setData(Qt.ItemDataRole.UserRole, new_action)
def remove_action(self):
item = self.actions_list.currentItem()
if not item:
return
action = item.data(Qt.ItemDataRole.UserRole)
if action.is_builtin:
return error_dialog(self, _('Cannot remove'), _(
'Cannot remove builtin actions. Instead simply uncheck it to prevent it from showing up as a button.'), show=True)
if item and confirm(
_('Remove the {} action?').format(item.text()), 'confirm_remove_llm_action',
confirm_msg=_('&Show this confirmation again'), parent=self,
):
self.actions_list.takeItem(self.actions_list.row(item))
def commit(self) -> bool:
for w in self.custom_widgets:
if not w.commit():
return False
disabled_defaults = []
custom_actions = {}
for i in range(self.actions_list.count()):
item = self.actions_list.item(i)
action:ActionData = item.data(Qt.ItemDataRole.UserRole)
action = action._replace(is_disabled=item.checkState() == Qt.CheckState.Unchecked)
if action.is_builtin:
if action.is_disabled:
disabled_defaults.append(action.name)
else:
custom_actions[action.human_name] = action.as_custom_action_dict
s = {}
if disabled_defaults:
s['disabled_default_actions'] = disabled_defaults
if custom_actions:
s['custom_actions'] = custom_actions
self.set_actions_in_prefs(s)
return True
# Subclass API {{{
action_edit_help_text = ''
def get_actions_from_prefs(self) -> Iterator[ActionData]:
raise NotImplementedError('implement in sub class')
def set_actions_in_prefs(self, s: dict[str, Any]) -> None:
raise NotImplementedError('implement in sub class')
def create_custom_widgets(self) -> Iterator[str, QWidget]:
raise NotImplementedError('implement in sub class')
# }}}
class LLMSettingsDialogBase(Dialog):
def __init__(self, name, prefs, title='', parent=None):
super().__init__(title=title or _('AI Settings'), name=name, prefs=prefs, parent=parent)
def custom_tabs(self) -> Iterator[str, str, QWidget]:
if False:
yield 'icon', 'title', QWidget()
def setup_ui(self):
l = QVBoxLayout(self)
self.tabs = tabs = QTabWidget(self)
self.ai_config = ai = ConfigureAI(parent=self)
tabs.addTab(ai, QIcon.ic('ai.png'), _('AI &Provider'))
for (icon, title, widget) in self.custom_tabs():
tabs.addTab(widget, QIcon.ic(icon), title)
tabs.setCurrentIndex(1 if self.ai_config.is_ready_for_use else 0)
l.addWidget(tabs)
l.addWidget(self.bb)
def accept(self):
for i in range(self.tabs.count()):
w = self.tabs.widget(i)
if not w.commit():
self.tabs.setCurrentWidget(w)
return
super().accept()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/llm.py",
"license": "GNU General Public License v3.0",
"lines": 654,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:setup/git_hooks.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, un_pogaz <un.pogaz@gmail.com>
import os
from contextlib import suppress
from typing import NamedTuple
from setup import Command
HOOK_TEMPLATE = '''\
#!/usr/bin/env -S calibre-debug -e -- --
# File generated by calibre "setup.py git_hooks"
import os
import runpy
import sys
base = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.argv[0] = os.path.basename({file!r})
runpy.run_path(os.path.join(base, 'setup', {file!r}), run_name='__main__')
'''
class Hook(NamedTuple):
name: str
file: str
default: bool = True
HOOKS = {h.name:h for h in (
Hook('post-checkout', 'git_post_checkout_hook.py'),
Hook('post-rewrite', 'git_post_rewrite_hook.py'),
Hook('pre-commit', 'git_pre_commit_hook.py'),
# disable by default, because except Kovid, nobody can run this hook
Hook('commit-msg', 'git_commit_msg_hook.py', False),
)}
DEFAULT = ','.join(sorted(h.name for h in HOOKS.values() if h.default))
AVAILABLES = ', '.join(sorted(h for h in HOOKS))
class GitHooks(Command):
description = 'Install/uninstall git hooks'
def add_options(self, parser):
parser.add_option('-n', '--name', default=DEFAULT,
help='Name(s) of the hook to install, separated by commas. '
f'Default: "{DEFAULT}". Hooks available: {AVAILABLES}')
parser.add_option('-u', '--uninstall', default=False, action='store_true',
help='Uninstall the selected hooks')
parser.add_option('-f', '--force', default=False, action='store_true',
help='Force the operations on the hooks')
def run(self, opts):
self.force = opts.force
self.names = []
invalides = []
for candidate in sorted(c.strip().lower() for c in opts.name.split(',')):
if not candidate:
continue
if candidate not in HOOKS:
invalides.append(candidate)
else:
self.names.append(candidate)
if invalides:
self.info('Info: The following hook names are not recognized:', ', '.join(invalides))
if not self.names:
self.info('No supported hook names recognized.')
return
if opts.uninstall:
self.uninstall()
else:
self.install()
def _parse_template(self, hook_name):
base_hooks = os.path.join(os.path.dirname(self.SRC), '.git', 'hooks')
hook = HOOKS[hook_name]
path = self.j(base_hooks, hook.name)
script = HOOK_TEMPLATE.format(file=hook.file)
return path, script
def install(self):
self.info('Installing the hooks:', ', '.join(self.names))
for candidate in self.names:
path, script = self._parse_template(candidate)
if self.e(path):
with open(path, 'rb') as f:
previous = f.read().decode('utf-8')
msg = f'{candidate}: a non-calibre hook is installed.'
if previous == script:
self.info(f'{candidate}: installed.')
continue
elif self.force:
self.info(msg, 'Force installation.')
else:
self.info(msg, 'Skip installation.')
continue
self.info(f'{candidate}: installed.')
with suppress(OSError):
os.remove(path) # remove if symlink
with open(path, 'wb') as f:
f.write(script.encode('utf-8'))
try:
os.chmod(path, 0o744, follow_symlinks=False)
except NotImplementedError: # old python on windows
os.chmod(path, 0o744)
def uninstall(self):
self.info('Uninstalling the hooks:', ', '.join(self.names))
for candidate in self.names:
path, script = self._parse_template(candidate)
if not self.e(path):
self.info(f'{candidate}: no hook to unistall.')
continue
with open(path, 'rb') as f:
previous = f.read().decode('utf-8')
msg = f'{candidate}: a non-calibre hook is installed.'
if previous == script:
self.info(f'{candidate}: unistalled.')
elif self.force:
self.info(msg, 'Force unistallation.')
else:
self.info(msg, 'Skip unistallation.')
continue
os.remove(path)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "setup/git_hooks.py",
"license": "GNU General Public License v3.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/ollama/backend.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import datetime
import http
import json
import posixpath
from collections.abc import Iterable, Iterator, Sequence
from contextlib import suppress
from functools import lru_cache
from typing import Any, NamedTuple
from urllib.parse import urlparse, urlunparse
from urllib.request import Request
from calibre.ai import ChatMessage, ChatMessageType, ChatResponse, ResultBlocked
from calibre.ai.ollama import OllamaAI
from calibre.ai.prefs import pref_for_provider
from calibre.ai.utils import chat_with_error_handler, develop_text_chat, download_data, opener
module_version = 1 # needed for live updates
def pref(key: str, defval: Any = None) -> Any:
return pref_for_provider(OllamaAI.name, key, defval)
def is_ready_for_use() -> bool:
return bool(pref('text_model'))
def headers() -> tuple[tuple[str, str]]:
return (
('Content-Type', 'application/json'),
)
class Model(NamedTuple):
# See https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models
name: str
id: str
family: str
families: Sequence[str]
modified_at: datetime.datetime
can_think: bool
@classmethod
def from_dict(cls, x: dict[str, Any], details: dict[str, Any]) -> Model:
d = x.get('details', {})
return Model(
name=x['name'], id=x['model'], family=d.get('family', ''), families=d.get('families', ()),
modified_at=datetime.datetime.fromisoformat(x['modified_at']), can_think='thinking' in details['capabilities'],
)
def api_url(path: str = '', use_api_url: str | None = None) -> str:
ans = (pref('api_url') if use_api_url is None else use_api_url) or OllamaAI.DEFAULT_URL
purl = urlparse(ans)
base_path = purl.path or '/'
if path:
path = posixpath.join(base_path, path)
purl = purl._replace(path=path)
return urlunparse(purl)
@lru_cache(8)
def get_available_models(use_api_url: str | None = None, headers: Sequence[tuple[str, str]] | None = None) -> dict[str, Model]:
ans = {}
o = opener()
if headers is None:
headers = pref('headers') or ()
for model in json.loads(download_data(api_url('api/tags', use_api_url), headers=headers))['models']:
rq = Request(api_url('api/show', use_api_url), headers=dict(headers), data=json.dumps({'model': model['model']}).encode(), method='POST')
with o.open(rq) as f:
details = json.loads(f.read())
e = Model.from_dict(model, details)
ans[e.id] = e
return ans
def does_model_exist_locally(model_id: str, use_api_url: str | None = None, headers: Sequence[tuple[str, str]] | None = None) -> bool:
try:
return model_id in get_available_models(use_api_url, headers)
except Exception:
return False
def config_widget():
from calibre.ai.ollama.config import ConfigWidget
return ConfigWidget()
def save_settings(config_widget):
config_widget.save_settings()
def human_readable_model_name(model_id: str) -> str:
if m := get_available_models().get(model_id):
model_id = m.name
return model_id
@lru_cache(2)
def model_choice_for_text() -> Model:
return get_available_models()[pref('text_model')]
def chat_request(data: dict[str, Any], model: Model) -> Request:
data['stream'] = True
if model.can_think:
data['think'] = True
return Request(
api_url('api/chat'), data=json.dumps(data).encode('utf-8'),
headers=dict(headers()), method='POST')
def for_assistant(self: ChatMessage) -> dict[str, Any]:
if self.type not in (ChatMessageType.assistant, ChatMessageType.system, ChatMessageType.user, ChatMessageType.developer):
raise ValueError(f'Unsupported message type: {self.type}')
return {'role': self.type.value, 'content': self.query}
def as_chat_responses(d: dict[str, Any], model: Model) -> Iterator[ChatResponse]:
msg = d['message']
content = msg['content']
has_metadata = d['done']
if has_metadata and (dr := d['done_reason']) != 'stop':
yield ChatResponse(exception=ResultBlocked(custom_message=_('Result was blocked for reason: {}').format(dr)))
return
reasoning = msg.get('thinking') or ''
if has_metadata or content or reasoning:
yield ChatResponse(
type=ChatMessageType.assistant, reasoning=reasoning, content=content, has_metadata=has_metadata, model=model.id, plugin_name=OllamaAI.name)
def read_streaming_response(rq: Request) -> Iterator[dict[str, Any]]:
with opener().open(rq, timeout=pref('timeout', 120)) as response:
if response.status != http.HTTPStatus.OK:
details = ''
with suppress(Exception):
details = response.read().decode('utf-8', 'replace')
raise Exception(f'Reading from {OllamaAI.name} failed with HTTP response status: {response.status} and body: {details}')
for raw_line in response:
yield json.loads(raw_line)
def text_chat_implementation(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
# https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
# Doesnt use SSE
if use_model:
model = get_available_models()[use_model]
else:
model = model_choice_for_text()
data = {
'model': model.id,
'messages': [for_assistant(m) for m in messages],
}
rq = chat_request(data, model)
for datum in read_streaming_response(rq):
for res in as_chat_responses(datum, model):
yield res
if res.exception:
break
def text_chat(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
yield from chat_with_error_handler(text_chat_implementation(messages, use_model))
def develop(use_model: str = '', msg: str = '') -> None:
# calibre-debug -c 'from calibre.ai.ollama.backend import develop; develop()'
m = (ChatMessage(msg),) if msg else ()
develop_text_chat(text_chat, use_model, messages=m)
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/ollama/backend.py",
"license": "GNU General Public License v3.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/ollama/config.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from functools import partial
from qt.core import QFormLayout, QLabel, QLineEdit, QPlainTextEdit, QSpinBox, QWidget
from calibre.ai.ollama import OllamaAI
from calibre.ai.prefs import pref_for_provider, set_prefs_for_provider
from calibre.ai.utils import configure, plugin_for_name
from calibre.gui2 import error_dialog
from calibre.gui2.widgets import BusyCursor
pref = partial(pref_for_provider, OllamaAI.name)
class ConfigWidget(QWidget):
def __init__(self, parent: QWidget | None = None):
super().__init__(parent)
l = QFormLayout(self)
l.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
la = QLabel('<p>'+_(
'Ollama allows you to run AI models locally on your own hardware. Once you have it running and properly'
' setup, fill in the fields below to have calibre use it as the AI provider.'
))
la.setWordWrap(True)
la.setOpenExternalLinks(True)
l.addRow(la)
self.api_url_edit = a = QLineEdit()
a.setClearButtonEnabled(True)
a.setPlaceholderText(_('The Ollama URL, defaults to {}').format(OllamaAI.DEFAULT_URL))
a.setToolTip(_('Enter the URL of the machine running your Ollama server, for example: {}').format(
'https://my-ollama-server.com:11434'))
l.addRow(_('Ollama &URL:'), a)
a.setText(pref('api_url') or '')
self.timeout_sb = t = QSpinBox(self)
t.setRange(15, 600), t.setSingleStep(1), t.setSuffix(_(' seconds'))
t.setValue(pref('timeout', 120))
l.addRow(_('&Timeout:'), t)
self.text_model_edit = lm = QLineEdit(self)
lm.setClearButtonEnabled(True)
lm.setToolTip(_(
'Enter the name of the model to use for text based tasks.'
))
lm.setPlaceholderText(_('Enter name of model to use'))
l.addRow(_('Model for &text tasks:'), lm)
lm.setText(pref('text_model') or '')
self.headers_edit = he = QPlainTextEdit(self)
he.setPlaceholderText(_('HTTP headers to send to Ollama, one per line'))
l.addRow(_('HTTP &Headers:'), he)
he.setPlainText('\n'.join(f'{k}: {v}' for (k, v) in pref('headers') or ()))
he.setToolTip('<p>' + _(
'A list of HTTP headers to send with every request to the Ollama API.'
' Add a new header per line in the format: Header-Name: Value'
))
def does_model_exist_locally(self, model_name: str) -> bool:
if not model_name:
return False
plugin = plugin_for_name(OllamaAI.name)
return plugin.builtin_live_module.does_model_exist_locally(model_name, self.api_url, self.headers)
def available_models(self) -> list[str]:
plugin = plugin_for_name(OllamaAI.name)
return sorted(plugin.builtin_live_module.get_available_models(self.api_url, self.headers), key=lambda x: x.lower())
@property
def text_model(self) -> str:
return self.text_model_edit.text().strip()
@property
def timeout(self) -> int:
return self.timeout_sb.value()
@property
def api_url(self) -> str:
return self.api_url_edit.text().strip()
@property
def headers(self) -> tuple[tuple[str, str]]:
ans = []
for line in self.headers_edit.toPlainText().splitlines():
if line := line.strip():
k, sep, v = line.partition(':')
k, v = k.strip(), v.strip()
if k and v:
ans.append((k, v))
return tuple(ans)
@property
def settings(self) -> dict[str, str]:
ans = {
'text_model': self.text_model, 'timeout': self.timeout,
}
if url := self.api_url:
ans['api_url'] = url
if headers := self.headers:
ans['headers'] = headers
return ans
@property
def is_ready_for_use(self) -> bool:
return bool(self.text_model)
def validate(self) -> bool:
if not self.text_model:
error_dialog(self, _('No model specified'), _('You specify a model to use for text based tasks.'), show=True)
return False
with BusyCursor():
exists = self.does_model_exist_locally(self.text_model)
if not exists:
with BusyCursor():
try:
avail = self.available_models()
except Exception:
import traceback
det_msg = _('Failed to get list of available models with error:') + '\n' + traceback.format_exc()
else:
det_msg = _('Available models:') + '\n' + '\n'.join(avail)
error_dialog(self, _('No matching model'), _(
'No model named {} found in Ollama. Click "Show details" to see a list of available models.').format(
self.text_model), show=True, det_msg=det_msg)
return False
return True
def save_settings(self):
set_prefs_for_provider(OllamaAI.name, self.settings)
if __name__ == '__main__':
configure(OllamaAI.name)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/ollama/config.py",
"license": "GNU General Public License v3.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/openai/backend.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import datetime
import json
import os
from collections.abc import Iterable, Iterator, Sequence
from functools import lru_cache
from operator import attrgetter
from typing import Any, NamedTuple
from urllib.request import Request
from calibre.ai import ChatMessage, ChatMessageType, ChatResponse, NoAPIKey, PromptBlocked
from calibre.ai.openai import OpenAI
from calibre.ai.prefs import decode_secret, pref_for_provider
from calibre.ai.utils import chat_with_error_handler, develop_text_chat, get_cached_resource, read_streaming_response
from calibre.constants import cache_dir
module_version = 1 # needed for live updates
MODELS_URL = 'https://api.openai.com/v1/models'
CHAT_URL = 'https://api.openai.com/v1/responses'
def pref(key: str, defval: Any = None) -> Any:
return pref_for_provider(OpenAI.name, key, defval)
def api_key() -> str:
return pref('api_key')
def is_ready_for_use() -> bool:
return bool(api_key())
def decoded_api_key() -> str:
ans = api_key()
if not ans:
raise NoAPIKey('API key required for OpenAI')
return decode_secret(ans)
@lru_cache(2)
def headers() -> tuple[tuple[str, str]]:
api_key = decoded_api_key()
return (
('Authorization', f'Bearer {api_key}'),
('Content-Type', 'application/json'),
)
class Model(NamedTuple):
# See https://platform.openai.com/docs/api-reference/models/retrieve
id: str
id_parts: Sequence[str, ...]
created: datetime.datetime
version: float
@classmethod
def from_dict(cls, x: dict[str, object]) -> Model:
id_parts = tuple(x['id'].split('-'))
try:
version = float(id_parts[1])
except Exception:
version = 0
return Model(id=x['id'], created=datetime.datetime.fromtimestamp(x['created'], datetime.UTC), id_parts=id_parts, version=version)
@property
def is_preview(self) -> bool:
return 'preview' in self.id_parts
def parse_models_list(entries: list[dict[str, Any]]) -> dict[str, Model]:
ans = {}
for entry in entries:
e = Model.from_dict(entry)
ans[e.id] = e
return ans
@lru_cache(2)
def get_available_models() -> dict[str, Model]:
api_key = decoded_api_key()
cache_loc = os.path.join(cache_dir(), 'ai', f'{OpenAI.name}-models-v1.json')
data = get_cached_resource(cache_loc, MODELS_URL, headers=(('Authorization', f'Bearer {api_key}'),))
return parse_models_list(json.loads(data)['data'])
def find_models_matching_name(name: str) -> Iterator[str]:
name = name.strip().lower()
for model in get_available_models().values():
q = model.name.strip().lower()
if name in q:
yield model.id
def config_widget():
from calibre.ai.openai.config import ConfigWidget
return ConfigWidget()
def save_settings(config_widget):
config_widget.save_settings()
def human_readable_model_name(model_id: str) -> str:
return model_id
@lru_cache(2)
def newest_gpt_models() -> dict[str, Model]:
high, medium, low = [], [], []
for model in get_available_models().values():
if model.id_parts[0] == 'gpt' and len(model.id_parts) > 1:
which = high
if 'mini' in model.id.split('-'):
which = medium
elif 'nano' in model.id.split('-'):
which = low
elif len(model.id_parts) == 2:
which = high
which.append(model)
return {
'high': sorted(high, key=attrgetter('created'))[-1],
'medium': sorted(medium, key=attrgetter('created'))[-1],
'low': sorted(low, key=attrgetter('created'))[-1],
}
@lru_cache(2)
def model_choice_for_text() -> Model:
m = newest_gpt_models()
return m.get(pref('model_strategy', 'medium'), m['medium'])
def reasoning_effort():
return {
'none': 'minimal', 'auto': 'medium', 'low': 'low', 'medium': 'medium', 'high': 'high'
}.get(pref('reasoning_strategy', 'auto'), 'medium')
def chat_request(data: dict[str, Any], model: Model) -> Request:
# See https://platform.openai.com/docs/api-reference/responses/create
data['model'] = model.id
data['stream'] = True
if pref('allow_web_searches', True):
data.setdefault('tools', []).append({'type': 'web_search'})
data['reasoning'] = {
'effort': reasoning_effort(),
'summary': 'auto'
}
return Request(
CHAT_URL, data=json.dumps(data).encode('utf-8'),
headers=dict(headers()), method='POST')
def for_assistant(self: ChatMessage) -> dict[str, Any]:
if self.type not in (ChatMessageType.assistant, ChatMessageType.system, ChatMessageType.user, ChatMessageType.developer):
raise ValueError(f'Unsupported message type: {self.type}')
return {'role': self.type.value, 'content': self.query}
def as_chat_responses(d: dict[str, Any], model: Model) -> Iterator[ChatResponse]:
# See https://platform.openai.com/docs/api-reference/responses/object
print(1111111111, d)
if True:
return
content = ''
for choice in d['choices']:
content += choice['delta'].get('content', '')
if (fr := choice['finish_reason']) and fr != 'stop':
yield ChatResponse(exception=PromptBlocked(custom_message=_('Result was blocked for reason: {}').format(fr)))
return
has_metadata = False
if u := d.get('usage'):
u # TODO: implement costing
has_metadata = True
if has_metadata or content:
yield ChatResponse(
id=d['id'],
type=ChatMessageType.assistant, content=content, has_metadata=has_metadata, model=model.id, plugin_name=OpenAI.name)
def text_chat_implementation(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
# See https://platform.openai.com/docs/guides/text?api-mode=responses
if use_model:
model = get_available_models()[use_model]
else:
model = model_choice_for_text()
previous_response_id = ''
messages = mcon = tuple(messages)
for i, m in enumerate(reversed(messages)):
if m.response_id:
previous_response_id = m.response_id
idx = len(mcon) - 1 - i
messages = mcon[idx:]
break
data = {
'input': [for_assistant(m) for m in messages],
}
if previous_response_id:
data['previous_response_id'] = previous_response_id
rq = chat_request(data, model)
for datum in read_streaming_response(rq, OpenAI.name):
for res in as_chat_responses(datum, model):
yield res
if res.exception:
break
def text_chat(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
yield from chat_with_error_handler(text_chat_implementation(messages, use_model))
def develop(use_model: str = '', msg: str = '') -> None:
# calibre-debug -c 'from calibre.ai.openai.backend import develop; develop()'
m = (ChatMessage(msg),) if msg else ()
develop_text_chat(text_chat, use_model, messages=m)
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/openai/backend.py",
"license": "GNU General Public License v3.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/openai/config.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from functools import partial
from qt.core import QCheckBox, QFormLayout, QLabel, QLineEdit, QWidget
from calibre.ai.openai import OpenAI
from calibre.ai.prefs import decode_secret, encode_secret, pref_for_provider, set_prefs_for_provider
from calibre.ai.utils import configure, model_choice_strategy_config_widget, reasoning_strategy_config_widget
from calibre.gui2 import error_dialog
pref = partial(pref_for_provider, OpenAI.name)
class ConfigWidget(QWidget):
def __init__(self, parent: QWidget | None = None):
super().__init__(parent)
l = QFormLayout(self)
l.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
la = QLabel('<p>'+_(
'You have to create an account at {0}, then generate an <i>API key</i>.'
' Then, buy some credits. Finally, <a href="{1}">{1}</a>'
'. OpenAI models cannot be used free of charge via this plugin.'
' OpenAI <a href="{2}">claims not to store prompts</a> and other data and not use it'
' for training except for a small period of retention to prevent abuse and misuse.'
).format(
'<a href="https://platform.openai.com">OpenAI</a>',
'https://platform.openai.com/settings/organization/general',
'https://platform.openai.com/docs/guides/your-data',
))
la.setWordWrap(True)
la.setOpenExternalLinks(True)
l.addRow(la)
self.api_key_edit = a = QLineEdit(self)
a.setPlaceholderText(_('An API key is required'))
l.addRow(_('API &key:'), a)
if key := pref('api_key'):
a.setText(decode_secret(key))
self.model_strategy = ms = model_choice_strategy_config_widget(pref('model_choice_strategy', 'medium'), self)
l.addRow(_('Model &choice strategy:'), ms)
self._allow_web_searches = aws = QCheckBox(_('Allow &searching the web when generating responses'))
aws.setChecked(pref('allow_web_searches', True))
aws.setToolTip(_('If enabled, OpenAI will use web searches to return accurate and up-to-date information for queries, where possible'))
self.reasoning_strat = rs = reasoning_strategy_config_widget(pref('reasoning_strategy', 'auto'), self)
l.addRow(_('&Reasoning effort:'), rs)
@property
def api_key(self) -> str:
return self.api_key_edit.text().strip()
@property
def model_choice_strategy(self) -> str:
return self.model_strategy.currentData()
@property
def reasoning_strategy(self) -> str:
return self.reasoning_strat.currentData()
@property
def allow_web_searches(self) -> bool:
return self._allow_web_searches.isChecked()
@property
def settings(self) -> dict[str, str]:
ans = {
'api_key': encode_secret(self.api_key), 'model_choice_strategy': self.model_choice_strategy,
'reasoning_strategy': self.reasoning_strategy, 'allow_web_searches': self.allow_web_searches,
}
return ans
@property
def is_ready_for_use(self) -> bool:
return bool(self.api_key)
def validate(self) -> bool:
if not self.is_ready_for_use:
error_dialog(self, _('No API key'), _('You must supply a Personal access token to use GitHub AI.'), show=True)
return False
return True
def save_settings(self):
set_prefs_for_provider(OpenAI.name, self.settings)
if __name__ == '__main__':
configure(OpenAI.name)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/openai/config.py",
"license": "GNU General Public License v3.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
kovidgoyal/calibre:src/calibre/ai/github/backend.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import datetime
import json
import os
from collections.abc import Iterable, Iterator
from functools import lru_cache
from typing import Any, NamedTuple
from urllib.request import Request
from calibre.ai import AICapabilities, ChatMessage, ChatMessageType, ChatResponse, NoAPIKey, ResultBlocked
from calibre.ai.github import GitHubAI
from calibre.ai.prefs import decode_secret, pref_for_provider
from calibre.ai.utils import chat_with_error_handler, develop_text_chat, get_cached_resource, read_streaming_response
from calibre.constants import cache_dir
module_version = 1 # needed for live updates
MODELS_URL = 'https://models.github.ai/catalog/models'
CHAT_URL = 'https://models.github.ai/inference/chat/completions'
API_VERSION = '2022-11-28'
def pref(key: str, defval: Any = None) -> Any:
return pref_for_provider(GitHubAI.name, key, defval)
def api_key() -> str:
return pref('api_key')
def is_ready_for_use() -> bool:
return bool(api_key())
def decoded_api_key() -> str:
ans = api_key()
if not ans:
raise NoAPIKey('Personal access token required for GitHub AI')
return decode_secret(ans)
@lru_cache(2)
def headers() -> tuple[tuple[str, str]]:
api_key = decoded_api_key()
return (
('Authorization', f'Bearer {api_key}'),
('X-GitHub-Api-Version', API_VERSION),
('Accept', 'application/vnd.github+json'),
('Content-Type', 'application/json'),
)
class Model(NamedTuple):
# See https://ai.google.dev/api/models#Model
name: str
id: str
url: str
description: str
version: str
context_length: int
output_token_limit: int
capabilities: AICapabilities
thinking: bool
publisher: str
@classmethod
def from_dict(cls, x: dict[str, object]) -> Model:
mid = x['id']
caps = AICapabilities.none
if 'embedding' in x['capabilities'] or 'embeddings' in x['supported_output_modalities']:
caps |= AICapabilities.embedding
else:
input_has_text = x['supported_input_modalities']
output_has_text = x['supported_output_modalities']
if input_has_text:
if output_has_text:
caps |= AICapabilities.text_to_text
return Model(
name=x['name'], id=mid, description=x.get('summary', ''), version=x['version'],
context_length=int(x['limits']['max_input_tokens'] or 0), publisher=x['publisher'],
output_token_limit=int(x['limits']['max_output_tokens'] or 0),
capabilities=caps, url=x['html_url'], thinking='reasoning' in x['capabilities'],
)
def parse_models_list(entries: list[dict[str, Any]]) -> dict[str, Model]:
ans = {}
for entry in entries:
e = Model.from_dict(entry)
ans[e.id] = e
return ans
@lru_cache(2)
def get_available_models() -> dict[str, Model]:
cache_loc = os.path.join(cache_dir(), 'ai', f'{GitHubAI.name}-models-v1.json')
data = get_cached_resource(cache_loc, MODELS_URL)
return parse_models_list(json.loads(data))
def find_models_matching_name(name: str) -> Iterator[str]:
name = name.strip().lower()
for model in get_available_models().values():
q = model.name.strip().lower()
if name in q:
yield model.id
def config_widget():
from calibre.ai.github.config import ConfigWidget
return ConfigWidget()
def save_settings(config_widget):
config_widget.save_settings()
def human_readable_model_name(model_id: str) -> str:
if m := get_available_models().get(model_id):
model_id = m.name
return model_id
@lru_cache(2)
def newest_gpt_models() -> dict[str, Model]:
high, medium, low = [], [], []
def get_date(model: Model) -> datetime.date:
try:
return datetime.date.fromisoformat(model.version)
except Exception:
return datetime.date(2000, 1, 1)
for model in get_available_models().values():
if model.publisher == 'OpenAI' and '(preview)' not in model.name and (idp := model.id.split('/')[-1].split('-')) and 'gpt' in idp:
which = high
if 'mini' in model.id.split('-'):
which = medium
elif 'nano' in model.id.split('-'):
which = low
which.append(model)
return {
'high': sorted(high, key=get_date)[-1],
'medium': sorted(medium, key=get_date)[-1],
'low': sorted(low, key=get_date)[-1],
}
@lru_cache(2)
def model_choice_for_text() -> Model:
m = newest_gpt_models()
return m.get(pref('model_strategy', 'medium'), m['medium'])
def chat_request(data: dict[str, Any], model: Model) -> Request:
data['stream'] = True
data['stream_options'] = {'include_usage': True}
return Request(
CHAT_URL, data=json.dumps(data).encode('utf-8'),
headers=dict(headers()), method='POST')
def for_assistant(self: ChatMessage) -> dict[str, Any]:
if self.type not in (ChatMessageType.assistant, ChatMessageType.system, ChatMessageType.user, ChatMessageType.developer):
raise ValueError(f'Unsupported message type: {self.type}')
return {'role': self.type.value, 'content': self.query}
def as_chat_responses(d: dict[str, Any], model: Model) -> Iterator[ChatResponse]:
# See https://docs.github.com/en/rest/models/inference
content = ''
for choice in d['choices']:
content += choice['delta'].get('content', '')
if (fr := choice['finish_reason']) and fr != 'stop':
yield ChatResponse(exception=ResultBlocked(custom_message=_('Result was blocked for reason: {}').format(fr)))
return
has_metadata = False
if u := d.get('usage'):
u # TODO: implement costing
has_metadata = True
if has_metadata or content:
yield ChatResponse(
type=ChatMessageType.assistant, content=content, has_metadata=has_metadata, model=model.id, plugin_name=GitHubAI.name)
def text_chat_implementation(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
# https://docs.github.com/en/rest/models/inference
if use_model:
model = get_available_models()[use_model]
else:
model = model_choice_for_text()
data = {
'model': model.id,
'messages': [for_assistant(m) for m in messages],
}
rq = chat_request(data, model)
for datum in read_streaming_response(rq, GitHubAI.name):
for res in as_chat_responses(datum, model):
yield res
if res.exception:
break
def text_chat(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
yield from chat_with_error_handler(text_chat_implementation(messages, use_model))
def develop(use_model: str = '', msg: str = '') -> None:
# calibre-debug -c 'from calibre.ai.github.backend import develop; develop()'
m = (ChatMessage(msg),) if msg else ()
develop_text_chat(text_chat, use_model, messages=m)
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/github/backend.py",
"license": "GNU General Public License v3.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/github/config.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from collections.abc import Sequence
from functools import partial
from qt.core import QFormLayout, QHBoxLayout, QLabel, QLineEdit, QWidget
from calibre.ai.github import GitHubAI
from calibre.ai.prefs import decode_secret, encode_secret, pref_for_provider, set_prefs_for_provider
from calibre.ai.utils import configure, model_choice_strategy_config_widget, plugin_for_name
from calibre.gui2 import error_dialog
pref = partial(pref_for_provider, GitHubAI.name)
class ConfigWidget(QWidget):
def __init__(self, parent: QWidget | None = None):
super().__init__(parent)
l = QFormLayout(self)
l.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
la = QLabel('<p>'+_(
'You have to create an account at {0}, then generate a <a href="{1}">Personal access token</a>'
' with the <code>models:read</code> permission.'
' After that, you can use the GitHub AI services a limited number of times a day for free.'
' For more extensive use, you will need to setup <a href="{2}">GitHub models billing</a>.'
).format(
'<a href="https://github.com">GitHub</a>',
'https://docs.github.com/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens',
'https://docs.github.com/billing/concepts/product-billing/github-models',
))
la.setWordWrap(True)
la.setOpenExternalLinks(True)
l.addRow(la)
self.api_key_edit = a = QLineEdit(self)
a.setPlaceholderText(_('A personal access token is required'))
l.addRow(_('Access &token:'), a)
if key := pref('api_key'):
a.setText(decode_secret(key))
self.model_strategy = ms = model_choice_strategy_config_widget(pref('model_choice_strategy', 'medium'), self)
l.addRow(_('Model &choice strategy:'), ms)
self.text_model_edit = lm = QLineEdit(self)
lm.setClearButtonEnabled(True)
lm.setToolTip(_(
'Enter a name of the model to use for text based tasks.'
' If not specified, one is chosen automatically.'
))
lm.setPlaceholderText(_('Optionally, enter name of model to use'))
self.browse_label = la = QLabel(f'<a href="https://github.com/marketplace?type=models">{_("Browse")}</a>')
tm = QWidget()
la.setOpenExternalLinks(True)
h = QHBoxLayout(tm)
h.setContentsMargins(0, 0, 0, 0)
h.addWidget(lm), h.addWidget(la)
l.addRow(_('Model for &text tasks:'), tm)
self.initial_text_model = pm = pref('text_model') or {'name': '', 'id': ''}
if pm:
lm.setText(pm['name'])
@property
def api_key(self) -> str:
return self.api_key_edit.text().strip()
@property
def model_choice_strategy(self) -> str:
return self.model_strategy.currentData()
@property
def settings(self) -> dict[str, str]:
name = self.text_model_edit.text().strip()
ans = {
'api_key': encode_secret(self.api_key), 'model_choice_strategy': self.model_choice_strategy,
}
if name:
ans['text_model'] = {'name': name, 'id': self.model_ids_for_name(name)[0]}
return ans
@property
def is_ready_for_use(self) -> bool:
return bool(self.api_key)
def model_ids_for_name(self, name: str) -> Sequence[str]:
if name and name == self.initial_text_model['name']:
return (self.initial_text_model['id'],)
plugin = plugin_for_name(GitHubAI.name)
return tuple(plugin.builtin_live_module.find_models_matching_name(name))
def validate(self) -> bool:
if not self.is_ready_for_use:
error_dialog(self, _('No API key'), _('You must supply a Personal access token to use GitHub AI.'), show=True)
return False
if (name := self.text_model_edit.text().strip()) and name:
num = len(self.model_ids_for_name(name))
if num == 0:
error_dialog(self, _('No matching model'), _('No model named {} found on GitHub').format(name), show=True)
return False
if num > 1:
error_dialog(self, _('Ambiguous model name'), _('The name {} matches more than one model on GitHub').format(name), show=True)
return False
return True
def save_settings(self):
set_prefs_for_provider(GitHubAI.name, self.settings)
if __name__ == '__main__':
configure(GitHubAI.name)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/github/config.py",
"license": "GNU General Public License v3.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/google/backend.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
# Google studio account management: https://aistudio.google.com/usage
# Docs:
# Text generation: https://ai.google.dev/gemini-api/docs/text-generation#rest
# Image generation with gemini: https://ai.google.dev/gemini-api/docs/image-generation#rest
# Image generation with imagen: https://ai.google.dev/gemini-api/docs/imagen#rest
# TTS: https://ai.google.dev/gemini-api/docs/speech-generation#rest
import json
import os
from collections.abc import Iterable, Iterator
from functools import lru_cache
from typing import Any, NamedTuple
from urllib.request import Request
from calibre.ai import (
AICapabilities,
ChatMessage,
ChatMessageType,
ChatResponse,
Citation,
NoAPIKey,
PromptBlocked,
PromptBlockReason,
ResultBlocked,
ResultBlockReason,
WebLink,
)
from calibre.ai.google import GoogleAI
from calibre.ai.prefs import decode_secret, pref_for_provider
from calibre.ai.utils import chat_with_error_handler, develop_text_chat, get_cached_resource, read_streaming_response
from calibre.constants import cache_dir
module_version = 1 # needed for live updates
API_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta'
MODELS_URL = f'{API_BASE_URL}/models?pageSize=500'
def pref(key: str, defval: Any = None) -> Any:
return pref_for_provider(GoogleAI.name, key, defval)
def api_key() -> str:
return pref('api_key')
def is_ready_for_use() -> bool:
return bool(api_key())
def decoded_api_key() -> str:
ans = api_key()
if not ans:
raise NoAPIKey('API key required for Google AI')
return decode_secret(ans)
class Price(NamedTuple):
above: float
threshold: int = 0
below: float = 0
def get_cost(self, num_tokens: int) -> float:
return (self.below if num_tokens <= self.threshold else self.above) * num_tokens
class Pricing(NamedTuple):
input: Price
output: Price
caching: Price
caching_storage: Price
google_search: Price = Price(35/1e3, 1500)
input_audio: Price | None = None
def get_cost_for_input_token_modality(self, mtc: dict[str, int]) -> float:
cost = self.input
if mtc['modality'] == 'AUDIO' and self.input_audio:
cost = self.input_audio
return cost.get_cost(mtc['tokenCount'])
def get_cost(self, usage_metadata: dict[str, int]) -> tuple[float, str]:
prompt_tokens = usage_metadata['promptTokenCount']
cached_tokens = usage_metadata.get('cachedContentTokenCount', 0)
input_tokens = prompt_tokens - cached_tokens
output_tokens = usage_metadata['totalTokenCount'] - prompt_tokens
if ptd := usage_metadata.get('promptTokenDetails', ()):
input_cost = 0
for mtc in ptd:
input_cost += self.get_cost_for_input_token_modality(mtc)
else:
input_cost = self.input.get_cost(input_tokens)
return input_cost + self.caching.get_cost(cached_tokens) + self.output.get_cost(output_tokens), 'USD'
@lru_cache(2)
def get_model_costs() -> dict[str, Pricing]:
# https://ai.google.dev/gemini-api/docs/pricing
return {
'models/gemini-2.5-pro': Pricing(
input=Price(2.5/1e6, 200_000, 1.25/1e6),
output=Price(15/1e6, 200_000, 10/1e6),
caching=Price(0.25/1e6, 200_000, 0.125/1e6),
caching_storage=Price(4.5/1e6),
),
'models/gemini-2.5-flash': Pricing(
input=Price(0.3/1e6),
output=Price(2.5/1e6),
caching=Price(0.03/1e6),
caching_storage=Price(1/1e6),
input_audio=Price(1/1e6),
),
'models/gemini-2.5-flash-lite': Pricing(
input=Price(0.1/1e6),
input_audio=Price(0.3/1e6),
output=Price(0.4/1e6),
caching=Price(0.01/1e6),
caching_storage=Price(1/1e6),
),
}
class Model(NamedTuple):
# See https://ai.google.dev/api/models#Model
name: str
id: str
slug: str
description: str
version: str
context_length: int
output_token_limit: int
capabilities: AICapabilities
family: str
family_version: float
name_parts: tuple[str, ...]
thinking: bool
pricing: Pricing | None
@classmethod
def from_dict(cls, x: dict[str, object]) -> Model:
caps = AICapabilities.text_to_text
mid = x['name']
if 'embedContent' in x['supportedGenerationMethods']:
caps |= AICapabilities.embedding
family, family_version = '', 0
name_parts = mid.rpartition('/')[-1].split('-')
if len(name_parts) > 1:
family, fv = name_parts[:2]
try:
family_version = float(fv)
except Exception:
family = ''
match family:
case 'imagen':
caps |= AICapabilities.text_to_image
case 'gemini':
if family_version >= 2.5:
caps |= AICapabilities.text_and_image_to_image
if 'tts' in name_parts:
caps |= AICapabilities.tts
pmap = get_model_costs()
return Model(
name=x['displayName'], id=mid, description=x.get('description', ''), version=x['version'],
context_length=int(x['inputTokenLimit']), output_token_limit=int(x['outputTokenLimit']),
capabilities=caps, family=family, family_version=family_version, name_parts=tuple(name_parts),
slug=mid, thinking=x.get('thinking', False), pricing=pmap.get(mid),
)
def get_cost(self, usage_metadata: dict[str, int]) -> tuple[float, str]:
if self.pricing is None:
return 0, ''
return self.pricing.get_cost(usage_metadata)
def parse_models_list(entries: list[dict[str, Any]]) -> dict[str, Model]:
ans = {}
for entry in entries['models']:
e = Model.from_dict(entry)
ans[e.id] = e
return ans
@lru_cache(2)
def get_available_models() -> dict[str, Model]:
api_key = decoded_api_key()
cache_loc = os.path.join(cache_dir(), 'ai', f'{GoogleAI.name}-models-v1.json')
data = get_cached_resource(cache_loc, MODELS_URL, headers=(('X-goog-api-key', api_key),))
return parse_models_list(json.loads(data))
def config_widget():
from calibre.ai.google.config import ConfigWidget
return ConfigWidget()
def save_settings(config_widget):
config_widget.save_settings()
def human_readable_model_name(model_id: str) -> str:
if m := get_available_models().get(model_id):
model_id = m.name
return model_id
@lru_cache(8)
def gemini_models(version: float = 0) -> dict[str, Model]:
models = {}
for m in get_available_models().values():
if m.family and 'preview' not in m.name_parts:
fm = models.setdefault(m.family, {})
fm.setdefault(m.family_version, []).append(m)
gemini = models['gemini']
version = version or max(gemini)
ans = {}
for m in gemini[version]:
if m.name_parts[-1] == 'pro':
ans['high'] = m
elif m.name_parts[-1] == 'flash':
ans['medium'] = m
elif m.name_parts[-2:] == ('flash', 'lite'):
ans['low'] = m
return ans
def model_choice_for_text() -> Model:
m = gemini_models()
return m.get(pref('model_strategy', 'medium')) or m['medium']
def chat_request(data: dict[str, Any], model: Model, streaming: bool = True) -> Request:
headers = {
'X-goog-api-key': decoded_api_key(),
'Content-Type': 'application/json',
}
url = f'{API_BASE_URL}/{model.slug}'
if streaming:
url += ':streamGenerateContent?alt=sse'
else:
url += ':generateContent'
return Request(url, data=json.dumps(data).encode('utf-8'), headers=headers, method='POST')
def thinking_budget(m: Model) -> int | None:
# https://ai.google.dev/gemini-api/docs/thinking#set-budget
if not m.thinking:
return None
limits = 0, 24576
if 'pro' in m.name_parts:
limits = 128, 32768
elif 'lite' in m.name_parts:
limits = 512, 24576
match pref('reasoning_strategy', 'auto'):
case 'auto':
return -1
case 'none':
return limits[0] if 'pro' in m.name_parts else 0
case 'low':
return max(limits[0], int(0.2 * limits[1]))
case 'medium':
return max(limits[0], int(0.5 * limits[1]))
case 'high':
return max(limits[0], int(0.8 * limits[1]))
return None
def for_assistant(self: ChatMessage) -> dict[str, Any]:
return {'text': self.query}
def block_reason(block_reason: str) -> PromptBlockReason:
return {
'SAFETY': PromptBlockReason.safety,
'BLOCKLIST': PromptBlockReason.blocklist,
'PROHIBITED_CONTENT': PromptBlockReason.prohibited_content,
'IMAGE_SAFETY': PromptBlockReason.unsafe_image_generated
}.get(block_reason.upper(), PromptBlockReason.unknown)
def result_block_reason(block_reason: str) -> ResultBlockReason:
# See https://ai.google.dev/api/generate-content#FinishReason
return {
'MAX_TOKENS': ResultBlockReason.max_tokens,
'SAFETY': ResultBlockReason.safety,
'RECITATION': ResultBlockReason.recitation,
'LANGUAGE': ResultBlockReason.unsupported_language,
'BLOCKLIST': ResultBlockReason.blocklist,
'PROHIBITED_CONTENT': ResultBlockReason.prohibited_content,
'SPII': ResultBlockReason.personally_identifiable_info,
'MALFORMED_FUNCTION_CALL': ResultBlockReason.malformed_function_call,
'IMAGE_SAFETY': ResultBlockReason.unsafe_image_generated,
'UNEXPECTED_TOOL_CALL': ResultBlockReason.unexpected_tool_call,
'TOO_MANY_TOOL_CALLS': ResultBlockReason.too_many_tool_calls,
}.get(block_reason.upper(), ResultBlockReason.unknown)
def as_chat_responses(d: dict[str, Any], model: Model) -> Iterator[ChatResponse]:
# See https://ai.google.dev/api/generate-content#generatecontentresponse
if pf := d.get('promptFeedback'):
if br := pf.get('blockReason'):
yield ChatResponse(exception=PromptBlocked(block_reason(br)))
return
grounding_chunks, grounding_supports = [], []
for c in d['candidates']:
has_metadata = False
cost, currency = 0, ''
if fr := c.get('finishReason'):
if fr == 'STOP':
has_metadata = True
cost, currency = model.get_cost(d['usageMetadata'])
else:
yield ChatResponse(exception=ResultBlocked(result_block_reason(fr)))
return
content = c['content']
if gm := c.get('groundingMetadata'):
grounding_chunks.extend(gm['groundingChunks'])
grounding_supports.extend(gm['groundingSupports'])
citations, web_links = [], []
if has_metadata:
for x in grounding_chunks:
if w := x.get('web'):
web_links.append(WebLink(**w))
else:
web_links.append(WebLink())
for s in grounding_supports:
if links := tuple(i for i in s['groundingChunkIndices'] if web_links[i]):
seg = s['segment']
citations.append(Citation(
links, start_offset=seg.get('startIndex', 0), end_offset=seg.get('endIndex', 0), text=seg.get('text', '')))
role = ChatMessageType.user if 'user' == content.get('role') else ChatMessageType.assistant
content_parts = []
reasoning_parts = []
reasoning_details = []
for part in content['parts']:
if text := part.get('text'):
(reasoning_parts if part.get('thought') else content_parts).append(text)
if ts := part.get('thoughtSignature'):
reasoning_details.append({'signature': ts})
yield ChatResponse(
type=role, content=''.join(content_parts), reasoning=''.join(reasoning_parts),
reasoning_details=tuple(reasoning_details), has_metadata=has_metadata, model=model.id,
cost=cost, plugin_name=GoogleAI.name, currency=currency, citations=citations, web_links=web_links,
)
def text_chat_implementation(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
# See https://ai.google.dev/gemini-api/docs/text-generation
if use_model:
model = get_available_models()[use_model]
else:
model = model_choice_for_text()
contents = []
system_instructions = []
for m in messages:
d = system_instructions if m.type is ChatMessageType.system else contents
d.append(for_assistant(m))
data = {
# See https://ai.google.dev/api/generate-content#v1beta.GenerationConfig
'generationConfig': {
'thinkingConfig': {
'includeThoughts': True,
},
},
}
if (tb := thinking_budget(model)) is not None:
data['generationConfig']['thinkingConfig']['thinkingBudget'] = tb
if system_instructions:
data['system_instruction'] = {'parts': system_instructions}
if contents:
data['contents'] = [{'parts': contents}]
if pref('allow_web_searches', True):
data['tools'] = [{'google_search': {}}]
rq = chat_request(data, model)
for datum in read_streaming_response(rq, GoogleAI.name):
for res in as_chat_responses(datum, model):
yield res
if res.exception:
break
def text_chat(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
yield from chat_with_error_handler(text_chat_implementation(messages, use_model))
def develop(use_model: str = '', msg: str = '') -> None:
# calibre-debug -c 'from calibre.ai.google.backend import develop; develop()'
print('\n'.join(f'{k}:{m.id}' for k, m in gemini_models().items()))
m = (ChatMessage(msg),) if msg else ()
develop_text_chat(text_chat, ('models/' + use_model) if use_model else '', messages=m)
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/google/backend.py",
"license": "GNU General Public License v3.0",
"lines": 335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/google/config.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from functools import partial
from qt.core import QCheckBox, QFormLayout, QLabel, QLineEdit, QWidget
from calibre.ai.google import GoogleAI
from calibre.ai.prefs import decode_secret, encode_secret, pref_for_provider, set_prefs_for_provider
from calibre.ai.utils import configure, model_choice_strategy_config_widget, reasoning_strategy_config_widget
from calibre.gui2 import error_dialog
pref = partial(pref_for_provider, GoogleAI.name)
class ConfigWidget(QWidget):
def __init__(self, parent: QWidget | None = None):
super().__init__(parent)
l = QFormLayout(self)
l.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
la = QLabel('<p>'+_(
'You have to create an account at {0}, then generate an'
' API key. After that, you can use the Google AI services a limited number of times a day for free.'
' For more extensive use, you will need to setup a <a href="{1}">Google Cloud billing account</a>.'
' Note that Google will use your prompts for their training data unless you setup the billing account.'
' <a href="{2}>Pricing details</a> for different models.'
).format(
'<a href="https://aistudio.google.com/">Google AI Studio</a>',
'https://aistudio.google.com/usage', 'https://ai.google.dev/gemini-api/docs/pricing',
))
la.setWordWrap(True)
la.setOpenExternalLinks(True)
l.addRow(la)
self.api_key_edit = a = QLineEdit(self)
a.setPlaceholderText(_('An API key is required to use Google AI'))
l.addRow(_('API &key:'), a)
if key := pref('api_key'):
a.setText(decode_secret(key))
self.model_strategy = ms = model_choice_strategy_config_widget(pref('model_choice_strategy', 'medium'), self)
l.addRow(_('Model &choice strategy:'), ms)
self._allow_web_searches = aws = QCheckBox(_('Allow &searching the web when generating responses'))
aws.setChecked(pref('allow_web_searches', True))
aws.setToolTip('<p>' + _('If enabled, Gemini will use Google Web searches to return accurate and up-to-date information for queries, where possible'))
self.reasoning_strat = rs = reasoning_strategy_config_widget(pref('reasoning_strategy'), self)
l.addRow(_('&Reasoning effort:'), rs)
@property
def api_key(self) -> str:
return self.api_key_edit.text().strip()
@property
def model_choice_strategy(self) -> str:
return self.model_strategy.currentData()
@property
def reasoning_strategy(self) -> str:
return self.reasoning_strat.currentData()
@property
def allow_web_searches(self) -> bool:
return self._allow_web_searches.isChecked()
@property
def settings(self) -> dict[str, str]:
return {
'api_key': encode_secret(self.api_key), 'model_choice_strategy': self.model_choice_strategy,
'reasoning_strategy': self.reasoning_strategy, 'allow_web_searches': self.allow_web_searches,
}
@property
def is_ready_for_use(self) -> bool:
return bool(self.api_key)
def validate(self) -> bool:
if self.is_ready_for_use:
return True
error_dialog(self, _('No API key'), _('You must supply an API key to use Google AI.'), show=True)
return False
def save_settings(self):
set_prefs_for_provider(GoogleAI.name, self.settings)
if __name__ == '__main__':
configure(GoogleAI.name)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/google/config.py",
"license": "GNU General Public License v3.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
kovidgoyal/calibre:src/calibre/gui2/chat_widget.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from html import escape
from math import ceil
from typing import NamedTuple
from qt.core import (
QFont,
QFontInfo,
QFrame,
QHBoxLayout,
QIcon,
QPalette,
QSize,
QSizePolicy,
Qt,
QTextBrowser,
QTextEdit,
QToolButton,
QUrl,
QVBoxLayout,
QWidget,
pyqtSignal,
)
from calibre.utils.config_base import tweaks
from calibre.utils.logging import INFO, WARN
from calibre.utils.resources import get_image_path
class Browser(QTextBrowser):
def __init__(self, parent: QWidget = None):
super().__init__(parent)
self.setOpenLinks(False)
self.setMinimumHeight(150)
self.setFrameShape(QFrame.Shape.NoFrame)
self.setContentsMargins(0, 0, 0, 0)
self.document().setDocumentMargin(0)
self.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
if tweaks['change_ai_chat_font_size_by']:
font = QFont(self.font())
font.setPixelSize(QFontInfo(font).pixelSize() + tweaks['change_ai_chat_font_size_by'])
self.setFont(font)
def sizeHint(self) -> QSize:
return QSize(600, 500)
def setHtml(self, html: str) -> None:
super().setHtml(html)
self.document().setDocumentMargin(0)
def scroll_to_bottom(self) -> None:
self.verticalScrollBar().setValue(self.verticalScrollBar().maximum())
class Button(NamedTuple):
icon: str
link: str
tooltip: str = ''
def as_html(self, line_spacing: int) -> str:
path = get_image_path(self.icon)
url = QUrl.fromLocalFile(path).toString(QUrl.ComponentFormattingOption.FullyEncoded)
sz = line_spacing - 2
return f'''<a style="text-decoration: none" href="{escape(self.link)}" title="{escape(self.tooltip)}"
><img height="{sz}" width="{sz}" src="{url}"></a>'''
class Header(NamedTuple):
title: str = ''
buttons: tuple[Button, ...] = ()
def as_html(self, line_spacing: int) -> str:
links = '\xa0\xa0'.join(b.as_html(line_spacing) for b in self.buttons)
title = '<b><i>' + escape(self.title)
if links:
return f'''<table width="100%" cellpadding="0" cellspacing="0"><tr><td>{title}\xa0</td>
<td style="text-align: right">{links}</td></tr></table>'''
return f'<div>{title}</div>'
class InputEdit(QTextEdit):
returnPressed = pyqtSignal()
def __init__(self, parent: QWidget = None, placeholder_text: str = ''):
super().__init__(parent)
self.setPlaceholderText(placeholder_text)
self.height_for_frame = 2 * self.frameWidth() + self.contentsMargins().top() + self.contentsMargins().bottom()
self.line_height = ceil(self.fontMetrics().lineSpacing())
self.maximum_height = 3 * self.line_height + self.height_for_frame
self.min_height = self.line_height + self.height_for_frame
self.textChanged.connect(self.adjust_height)
self.adjust_height()
def calculate_single_line_height(self) -> int:
line_height = self.fontMetrics().lineSpacing()
return ceil(line_height + self.height_for_frame)
def adjust_height(self) -> None:
doc_height = ceil(self.document().size().height())
self.setFixedHeight(max(self.min_height, min(doc_height + self.height_for_frame, self.maximum_height)))
self.ensureCursorVisible()
def set_max_height(self, val: int) -> None:
self.maximum_height = val
self.adjust_height()
def keyPressEvent(self, event):
if event.key() in (Qt.Key.Key_Return, Qt.Key.Key_Enter):
mods = event.modifiers() & (
Qt.KeyboardModifier.ShiftModifier | Qt.KeyboardModifier.ControlModifier |
Qt.KeyboardModifier.AltModifier | Qt.KeyboardModifier.MetaModifier)
if mods in (Qt.KeyboardModifier.NoModifier, Qt.KeyboardModifier.ControlModifier):
self.returnPressed.emit()
return
super().keyPressEvent(event)
@property
def value(self) -> str:
return self.toPlainText()
@value.setter
def value(self, val: str) -> None:
self.setPlainText(val)
class Input(QWidget):
send_requested = pyqtSignal()
def __init__(self, parent: QWidget = None, placeholder_text: str = ''):
super().__init__(parent)
l = QHBoxLayout(self)
l.setContentsMargins(0, 0, 0, 0)
self.text_input = ti = InputEdit(self, placeholder_text)
ti.returnPressed.connect(self.send_requested)
l.addWidget(ti)
self.send_button = b = QToolButton(self)
b.setIcon(QIcon.ic('send.png'))
b.setToolTip(_('Send query to AI'))
b.clicked.connect(self.send_requested)
l.addWidget(b, alignment=Qt.AlignmentFlag.AlignCenter)
def setFocus(self, reason) -> None:
self.text_input.setFocus(reason)
@property
def value(self) -> str:
return self.text_input.value
@value.setter
def value(self, val: str) -> None:
self.text_input.value = val
def set_max_height(self, val: int) -> None:
self.text_input.set_max_height(val)
class ChatWidget(QWidget):
link_clicked = pyqtSignal(QUrl)
input_from_user = pyqtSignal(str)
def __init__(self, parent: QWidget = None, placeholder_text: str = '', disclaimer_text: str | None = None):
super().__init__(parent)
if disclaimer_text is None:
disclaimer_text = _(
'AI generated answers can be inaccurate, please verify any answers before acting on them.')
self.disclaimer_text = disclaimer_text
l = QVBoxLayout(self)
l.setContentsMargins(0, 0, 0, 0)
self.browser = b = Browser(self)
b.anchorClicked.connect(self.link_clicked)
l.addWidget(b)
self.input = iw = Input(parent=self, placeholder_text=placeholder_text)
iw.send_requested.connect(self.on_input)
l.addWidget(iw)
self.blocks: list[str] = []
self.current_message = ''
pal = self.palette()
self.response_color = pal.color(QPalette.ColorRole.Window).name()
self.base_color = pal.color(QPalette.ColorRole.Base).name()
self.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
self.line_spacing = self.browser.fontMetrics().lineSpacing()
def setFocus(self, reason) -> None:
self.input.setFocus(reason)
def wrap_content_in_padding_table(self, html: str, background_color: str = '') -> str:
style = f'style="background-color: {background_color}"' if background_color else ''
return f'''<table width="100%" {style} cellpadding="2"><tr><td>{html}</td></tr></table>'''
# API {{{
def add_block(self, body_html: str, header: Header = Header(), is_response: bool = False) -> None:
self.current_message = ''
html = ''
if header.title or header.buttons:
html += f'<div>{header.as_html(self.line_spacing)}</div>'
html += f'<div>{body_html}</div>'
bg = self.response_color if is_response else self.base_color
self.blocks.append(self.wrap_content_in_padding_table(html, bg))
def replace_last_block(self, body_html: str, header: Header = Header(), is_response: bool = False) -> None:
if self.blocks:
del self.blocks[-1]
self.add_block(body_html, header, is_response)
def show_message(self, msg_html: str, details: str = '', level: int = INFO) -> None:
self.blocks = []
style = ''
if level == WARN:
style += 'color: orange;'
elif level > WARN:
style += 'color: red;'
html = f'<div style="{style}">{msg_html}</div>'
if details:
html += f"<pre>{_('Details:')}\n{escape(details)}</pre>"
self.current_message = self.wrap_content_in_padding_table(html)
self.re_render()
def clear(self) -> None:
self.current_message = ''
self.blocks = []
self.re_render()
def set_input_enabled(self, enabled: bool) -> None:
self.input.setEnabled(enabled)
def scroll_to_bottom(self) -> None:
self.browser.scroll_to_bottom()
# }}}
def resizeEvent(self, ev) -> None:
super().resizeEvent(ev)
self.input.set_max_height(ceil(self.height() * 0.25))
def re_render(self) -> None:
if self.current_message:
self.browser.setHtml(self.current_message)
else:
html = '\n\n'.join(self.blocks)
html += self.wrap_content_in_padding_table(f'<p><i>{escape(self.disclaimer_text)}</i></p>')
self.browser.setHtml(html)
def on_input(self) -> None:
text = self.input.value
self.input.value = ''
self.input_from_user.emit(text)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/chat_widget.py",
"license": "GNU General Public License v3.0",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/utils.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import datetime
import http
import json
import os
import re
import tempfile
from collections.abc import Callable, Iterable, Iterator, Sequence
from contextlib import suppress
from enum import Enum, auto
from functools import lru_cache
from threading import Thread
from typing import Any
from urllib.error import HTTPError, URLError
from urllib.request import ProxyHandler, Request, build_opener
from calibre import get_proxies
from calibre.ai import ChatMessage, ChatMessageType, ChatResponse, Citation, WebLink
from calibre.constants import __version__
from calibre.customize import AIProviderPlugin
from calibre.customize.ui import available_ai_provider_plugins
def atomic_write(path, data):
mode = 'w' if isinstance(data, str) else 'wb'
os.makedirs(os.path.dirname(path), exist_ok=True)
with tempfile.NamedTemporaryFile(mode, delete=False, dir=os.path.dirname(path)) as f:
f.write(data)
os.replace(f.name, path)
def opener(user_agent=f'calibre {__version__}'):
proxies = get_proxies(debug=False)
proxy_handler = ProxyHandler(proxies)
ans = build_opener(proxy_handler)
ans.addheaders = [('User-agent', user_agent)]
return ans
def download_data(url: str, headers: Sequence[tuple[str, str]] = ()) -> bytes:
o = opener()
o.addheaders.extend(headers)
with o.open(url) as f:
return f.read()
def update_cached_data(path: str, url: str, headers: Sequence[tuple[str, str]] = ()) -> None:
raw = download_data(url, headers)
atomic_write(path, raw)
def schedule_update_of_cached_data(path: str, url: str, headers: Sequence[tuple[str, str]] = ()) -> None:
mtime = 0
with suppress(OSError):
mtime = os.path.getmtime(path)
modtime = datetime.datetime.fromtimestamp(mtime)
current_time = datetime.datetime.now()
if current_time - modtime < datetime.timedelta(days=1):
return
Thread(daemon=True, name='AIDataDownload', target=update_cached_data, args=(path, url, headers)).start()
def get_cached_resource(path: str, url: str, headers: Sequence[tuple[str, str]] = ()) -> bytes:
with suppress(OSError):
with open(path, 'rb') as f:
data = f.read()
schedule_update_of_cached_data(path, url, headers)
return data
data = download_data(url, headers)
atomic_write(path, data)
return data
def _read_response(buffer: str) -> Iterator[dict[str, Any]]:
if not buffer.startswith('data: '):
return
buffer = buffer[6:].rstrip()
if buffer == '[DONE]':
return
yield json.loads(buffer)
def read_streaming_response(rq: Request, provider_name: str = 'AI provider', timeout: int = 120) -> Iterator[dict[str, Any]]:
with opener().open(rq, timeout=timeout) as response:
if response.status != http.HTTPStatus.OK:
details = ''
with suppress(Exception):
details = response.read().decode('utf-8', 'replace')
raise Exception(f'Reading from {provider_name} failed with HTTP response status: {response.status} and body: {details}')
buffer = ''
for raw_line in response:
line = raw_line.decode('utf-8')
if line.strip() == '':
if buffer:
yield from _read_response(buffer)
buffer = ''
else:
buffer += line
yield from _read_response(buffer)
def chat_with_error_handler(it: Iterable[ChatResponse]) -> Iterator[ChatResponse]:
try:
yield from it
except HTTPError as e:
try:
details = e.fp.read().decode('utf-8', 'replace')
except Exception:
details = ''
try:
error_json = json.loads(details)
details = error_json.get('error', {}).get('message', details)
except Exception:
pass
yield ChatResponse(exception=e, error_details=details)
except URLError as e:
yield ChatResponse(exception=e, error_details=f'Network error: {e.reason}')
except Exception as e:
import traceback
yield ChatResponse(exception=e, error_details=traceback.format_exc())
class ContentType(Enum):
unknown = auto()
markdown = auto()
ref_link_prefix = 'calibre-link-'
def add_citation(text: str, citation: Citation, web_links: Sequence[WebLink], escaped_titles: Sequence[str]) -> str:
if len(citation.links) == 1:
wl = web_links[citation.links[0]]
escaped_title = escaped_titles[citation.links[0]]
return (
text[:citation.start_offset] +
f'[{text[citation.start_offset:citation.end_offset]}]({wl.uri} "{escaped_title}")' +
text[citation.end_offset:])
citation_links = []
for i, link_num in enumerate(citation.links):
wl = web_links[link_num]
title = escaped_titles[link_num]
citation_links.append(f'[{i+1}]({wl.uri} "{title}")')
return text[:citation.end_offset] + '<sup>' + ', '.join(citation_links) + '</sup>' + text[citation.end_offset:]
def add_citations(text: str, metadata: ChatMessage) -> str:
citations, web_links = metadata.citations, metadata.web_links
if not citations or not web_links:
return text
escaped_titles = tuple(wl.title.replace('"', r'\"') for wl in web_links)
for citation in sorted(citations, key=lambda c: c.end_offset, reverse=True):
if citation.links:
text = add_citation(text, citation, web_links, escaped_titles)
return text
class StreamedResponseAccumulator:
def __init__(self):
self.all_reasoning = self.all_content = ''
self.all_reasoning_details: list[dict[str, Any]] = []
self.metadata = ChatResponse()
self.messages: list[ChatMessage] = []
self.response_id: str = ''
@property
def content_type(self) -> ContentType:
return ContentType.markdown if self.metadata.citations else ContentType.unknown
def __iter__(self) -> Iterator[ChatMessage]:
return iter(self.messages)
def accumulate(self, m: ChatResponse) -> None:
if m.has_metadata:
self.metadata = m
if m.reasoning:
self.all_reasoning += m.reasoning
self.all_reasoning_details.extend(m.reasoning_details)
if m.content:
self.all_content += m.content
if m.id:
self.response_id = m.id
def finalize(self) -> None:
self.messages.append(ChatMessage(
type=ChatMessageType.assistant, query=add_citations(self.all_content, self.metadata), reasoning=self.all_reasoning,
reasoning_details=tuple(self.all_reasoning_details), response_id=self.response_id,
))
@lru_cache(2)
def markdown_patterns(detect_code: bool = False) -> dict[re.Pattern[str], float]:
ans = {re.compile(pat): score for pat, score in {
# Check for Markdown headers (# Header, ## Subheader, etc.)
r'(?m)^#{1,6}\s+.+$': 0.15,
# Check for Markdown two part links and footnotes [..]:
r'(?m)^\[\.+?\]: ': 0.15,
# Check for bold (**text**)
r'\*\*.+?\*\*': 0.05,
# Check for italics (*text*)
r'\*[^*\n]+\*': 0.05,
# Check for unordered lists
r'(?m)^[\s]*[-*+][\s]+.+$': 0.1,
# Check for ordered lists
r'(?m)^[\s]*\d+\.[\s]+.+$': 0.1,
# Check for blockquotes
r'(?m)^[\s]*>[\s]*.+$': 0.1,
# Check for links ([text](url))
r'\[.+?\]\(.+?\)': 0.15,
# Check for tables
r'\|.+\|[\s]*\n\|[\s]*[-:]+[-|\s:]+[\s]*\n': 0.1,
}.items()}
if detect_code:
# Check for inline code (`code`)
ans[re.compile(r'`[^`\n]+`')] = 0.1
# Check for code blocks (```code```)
ans[re.compile(r'```[\s\S]*?```')] = 0.2 # very markdown specific
return ans
def is_probably_markdown(text: str, threshold: float = -1, detect_code: bool = False) -> bool:
if threshold < 0:
threshold = 0.4 if detect_code else 0.2
if not text:
return False
score = 0
for pattern, pscore in markdown_patterns().items():
if pattern.search(text) is not None:
score += pscore
if score >= threshold:
return True
return False
@lru_cache(64)
def response_to_html(text: str, content_type: ContentType = ContentType.unknown, detect_code: bool = False) -> str:
is_markdown = is_probably_markdown(text, detect_code=detect_code) if ContentType is ContentType.unknown else True
if is_markdown:
from calibre.ebooks.txt.processor import create_markdown_object
md = create_markdown_object(('tables', 'footnotes'))
return md.convert(text)
from html import escape
return escape(text).replace('\n', '<br>')
def develop_text_chat(
text_chat: Callable[[Iterable[ChatMessage], str], Iterator[ChatResponse]], use_model: str = '',
messages: Sequence[ChatMessage] = (),
):
acc = StreamedResponseAccumulator()
messages = messages or (
ChatMessage(type=ChatMessageType.system, query='You are William Shakespeare.'),
ChatMessage('Write twenty lines on my supremely beautiful wife. Assume she has honey gold skin and a brilliant smile.')
)
for x in text_chat(messages, use_model):
if x.exception:
raise SystemExit(str(x.exception) + (': ' + x.error_details) if x.error_details else '')
acc.accumulate(x)
if x.content:
print(end=x.content, flush=True)
acc.finalize()
print()
if acc.all_reasoning:
print('Reasoning:')
print(acc.all_reasoning.strip())
print()
if acc.metadata.citations:
print('Response with citations inline:')
print(acc.messages[-1].query.strip())
if acc.metadata.has_metadata:
x = acc.metadata
print(f'\nCost: {x.cost} {x.currency} Provider: {x.provider!r} Model: {x.model!r}')
messages = list(messages)
messages.extend(acc.messages)
print('Messages:')
from pprint import pprint
for msg in messages:
pprint(msg)
def plugin_for_name(plugin_name: str) -> AIProviderPlugin:
for plugin in available_ai_provider_plugins():
if plugin.name == plugin_name:
return plugin
raise KeyError(f'No plugin named {plugin_name} is available')
def configure(plugin_name: str, parent: Any = None) -> None:
from qt.core import QDialog, QDialogButtonBox, QVBoxLayout
from calibre.gui2 import ensure_app
ensure_app(headless=False)
plugin = plugin_for_name(plugin_name)
cw = plugin.config_widget()
class D(QDialog):
def accept(self):
if not cw.validate():
return
super().accept()
d = D(parent=parent)
l = QVBoxLayout(d)
l.addWidget(cw)
bb = QDialogButtonBox(QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel)
bb.accepted.connect(d.accept)
bb.rejected.connect(d.reject)
l.addWidget(bb)
d.resize(d.sizeHint())
if d.exec() == QDialog.DialogCode.Accepted:
plugin.save_settings(cw)
def reasoning_strategy_config_widget(current_val: str = 'auto', parent: Any = None) -> Any:
from qt.core import QComboBox
rs = QComboBox(parent)
rs.addItem(_('Automatic'), 'auto')
rs.addItem(_('Medium'), 'medium')
rs.addItem(_('High'), 'high')
rs.addItem(_('Low'), 'low')
rs.addItem(_('No reasoning'), 'none')
rs.setCurrentIndex(max(0, rs.findData(current_val)))
rs.setToolTip('<p>'+_(
'Select how much "reasoning" AI does when answering queries. More reasoning leads to'
' better quality responses at the cost of increased cost and reduced speed.'))
return rs
def model_choice_strategy_config_widget(current_val: str = 'medium', parent: Any = None) -> Any:
from qt.core import QComboBox
ms = QComboBox(parent)
ms.addItem(_('Cheap and fastest'), 'low')
ms.addItem(_('Medium'), 'medium')
ms.addItem(_('High quality, expensive and slower'), 'high')
ms.setCurrentIndex(max(0, ms.findData(current_val)))
ms.setToolTip('<p>' + _(
'The model choice strategy controls how a model to query is chosen. Cheaper and faster models give lower'
' quality results.'
))
return ms
def find_tests() -> None:
import unittest
class TestAIUtils(unittest.TestCase):
def test_ai_response_accumulator(self):
a = StreamedResponseAccumulator()
a.accumulate(ChatResponse('an initial msg'))
a.accumulate(ChatResponse('. more text.'))
a.accumulate(ChatResponse(has_metadata=True, citations=[
Citation([0], 3, 3 + len('initial')),
Citation([0, 1], 3 + len('initial '), 3 + len('initial msg'))
], web_links=[WebLink('link1', 'dest1'), WebLink('link2', 'dest2')]
))
a.finalize()
self.assertEqual(a.messages[-1].query, 'an [initial](dest1 "link1") msg<sup>[1](dest1 "link1"), [2](dest2 "link2")</sup>. more text.')
return unittest.defaultTestLoader.loadTestsFromTestCase(TestAIUtils)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/utils.py",
"license": "GNU General Public License v3.0",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/prefs.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from collections.abc import Iterator
from copy import deepcopy
from functools import lru_cache
from typing import Any
from calibre.ai import AICapabilities
from calibre.customize import AIProviderPlugin
from calibre.customize.ui import available_ai_provider_plugins
from calibre.utils.config import JSONConfig
from calibre.utils.icu import primary_sort_key
from polyglot.binary import as_hex_unicode, from_hex_unicode
@lru_cache(2)
def prefs() -> JSONConfig:
ans = JSONConfig('ai', permissions=0o600) # make readable only by user as it stores secrets
ans.defaults['providers'] = {}
ans.defaults['purpose_map'] = {}
ans.defaults['llm_localized_results'] = 'never'
return ans
def pref_for_provider(name: str, key: str, defval: Any = None) -> Any:
try:
return prefs()['providers'][name][key]
except Exception:
return defval
def set_prefs_for_provider(name: str, pref_map: dict[str, Any]) -> None:
p = prefs()
p['providers'][name] = deepcopy(pref_map)
p.set('providers', p['providers'])
def plugins_for_purpose(purpose: AICapabilities) -> Iterator[AIProviderPlugin]:
for p in sorted(available_ai_provider_plugins(), key=lambda p: primary_sort_key(p.name)):
if p.capabilities & purpose == purpose:
yield p
def plugin_for_purpose(purpose: AICapabilities) -> AIProviderPlugin | None:
compatible_plugins = {p.name: p for p in plugins_for_purpose(purpose)}
q = prefs()['purpose_map'].get(purpose.purpose, '')
if ans := compatible_plugins.get(q):
return ans
if compatible_plugins:
from calibre.ai.google import GoogleAI
# Prefer Google for text to text as it give us 1500 free web searches per day
if purpose == AICapabilities.text_to_text:
for name, p in compatible_plugins.items():
if name == GoogleAI.name:
return p
return next(iter(compatible_plugins.values()))
return None
def encode_secret(text: str) -> str:
return as_hex_unicode(text)
def decode_secret(text: str) -> str:
return from_hex_unicode(text)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/prefs.py",
"license": "GNU General Public License v3.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
kovidgoyal/calibre:src/calibre/ai/config.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
from qt.core import QComboBox, QDialog, QGroupBox, QHBoxLayout, QLabel, QStackedLayout, QVBoxLayout, QWidget
from calibre.ai import AICapabilities
from calibre.ai.prefs import plugin_for_purpose, plugins_for_purpose, prefs
from calibre.gui2 import Application, error_dialog
class ConfigureAI(QWidget):
def __init__(self, purpose: AICapabilities = AICapabilities.text_to_text, parent: QWidget | None = None):
super().__init__(parent)
plugins = tuple(plugins_for_purpose(purpose))
self.available_plugins = plugins
self.purpose = purpose
self.plugin_config_widgets: tuple[QWidget, ...] = tuple(p.config_widget() for p in plugins)
v = QVBoxLayout(self)
self.gb = QGroupBox(self)
self.stack = s = QStackedLayout(self.gb)
for pc in self.plugin_config_widgets:
pc.setParent(self)
s.addWidget(pc)
if len(plugins) > 1:
self.provider_combo = pcb = QComboBox(self)
pcb.addItems([p.name for p in plugins])
la = QLabel(_('AI &provider:'))
la.setBuddy(pcb)
h = QHBoxLayout()
h.addWidget(la), h.addWidget(pcb), h.addStretch()
v.addLayout(h)
pcb.currentIndexChanged.connect(self.stack.setCurrentIndex)
idx = pcb.findText(getattr(plugin_for_purpose(self.purpose), 'name', ''))
pcb.setCurrentIndex(max(0, idx))
elif len(plugins) == 1:
self.gb.setTitle(_('Configure AI provider: {}').format(plugins[0].name))
else:
self.none_label = la = QLabel(_('No AI providers found that have the capabilities: {}. Make sure you have not'
' disabled some AI provider plugins').format(purpose))
s.addWidget(la)
v.addWidget(self.gb)
@property
def is_ready_for_use(self) -> bool:
if not self.available_plugins:
return False
return self.plugin_config_widgets[self.current_idx].is_ready_for_use
@property
def current_idx(self) -> int:
if len(self.available_plugins) < 2:
return 0
return self.provider_combo.currentIndex()
def validate(self) -> bool:
if not self.available_plugins:
error_dialog(self, _('No AI providers'), self.none_label.text(), show=True)
return False
return self.plugin_config_widgets[self.current_idx].validate()
def commit(self) -> bool:
if not self.validate():
return False
idx = self.current_idx
p, w = self.available_plugins[idx], self.plugin_config_widgets[idx]
if not w.validate():
return False
p.save_settings(w)
pmap = prefs()['purpose_map']
pmap[self.purpose.purpose] = p.name
prefs().set('purpose_map', pmap)
return True
if __name__ == '__main__':
app = Application([])
d = QDialog()
v = QVBoxLayout(d)
w = ConfigureAI(parent=d)
v.addWidget(w)
d.exec()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/config.py",
"license": "GNU General Public License v3.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
kovidgoyal/calibre:src/calibre/ai/open_router/config.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import datetime
import textwrap
from functools import partial
from typing import TYPE_CHECKING, Any
from qt.core import (
QAbstractItemView,
QAbstractListModel,
QCheckBox,
QComboBox,
QDialog,
QDialogButtonBox,
QFormLayout,
QHBoxLayout,
QIcon,
QLabel,
QLineEdit,
QListView,
QLocale,
QModelIndex,
QPushButton,
QSize,
QSortFilterProxyModel,
QSplitter,
Qt,
QTextBrowser,
QUrl,
QVBoxLayout,
QWidget,
pyqtSignal,
)
from calibre.ai import AICapabilities
from calibre.ai.open_router import OpenRouterAI
from calibre.ai.prefs import decode_secret, encode_secret, pref_for_provider, set_prefs_for_provider
from calibre.ai.utils import configure, reasoning_strategy_config_widget
from calibre.customize.ui import available_ai_provider_plugins
from calibre.ebooks.txt.processor import create_markdown_object
from calibre.gui2 import error_dialog, gprefs, safe_open_url
from calibre.gui2.widgets2 import Dialog
from calibre.utils.date import qt_from_dt
from calibre.utils.icu import primary_sort_key
pref = partial(pref_for_provider, OpenRouterAI.name)
if TYPE_CHECKING:
from calibre.ai.open_router.backend import Model as AIModel
class Model(QWidget):
select_model = pyqtSignal(str, bool)
def __init__(self, for_text: bool = True, parent: QWidget | None = None):
super().__init__(parent)
l = QHBoxLayout(self)
l.setContentsMargins(0, 0, 0, 0)
self.for_text = for_text
self.model_id, self.model_name = pref(
'text_model' if for_text else 'text_to_image_model', ('', _('Automatic')))
self.la = la = QLabel(self.model_name)
self.setToolTip(_('The model to use for text related tasks') if for_text else _(
'The model to use for generating images from text'))
self.setToolTip(self.toolTip() + '\n\n' + _(
'If not specified an appropriate model is chosen automatically.\n'
'See the option for "Model choice strategy" to control how models are automatically chosen.'))
self.b = b = QPushButton(_('&Change'))
b.setToolTip(_('Choose a model'))
l.addWidget(la), l.addWidget(b)
b.clicked.connect(self._select_model)
def set(self, model_id: str, model_name: str) -> None:
self.model_id, self.model_name = model_id, model_name
self.la.setText(self.model_name)
def _select_model(self):
self.select_model.emit(self.model_id, self.for_text)
class ModelsModel(QAbstractListModel):
def __init__(self, capabilities, parent: QWidget | None = None):
super().__init__(parent)
for plugin in available_ai_provider_plugins():
if plugin.name == OpenRouterAI.name:
self.backend = plugin.builtin_live_module
break
else:
raise ValueError('Could not find OpenRouterAI plugin')
self.all_models_map = self.backend.get_available_models()
self.all_models = tuple(filter(
lambda m: capabilities & m.capabilities == capabilities, self.all_models_map.values()))
self.sorts = tuple(primary_sort_key(m.name) for m in self.all_models)
def generate_sorts(self, *sorts):
self.sorts = tuple(tuple(f(m) for f in sorts) for m in self.all_models)
def rowCount(self, parent):
return len(self.all_models)
def data(self, index, role):
try:
m = self.all_models[index.row()]
except IndexError:
return None
if role == Qt.ItemDataRole.DisplayRole:
return m.name
if role == Qt.ItemDataRole.UserRole:
return m
if role == Qt.ItemDataRole.UserRole + 1:
return self.sorts[index.row()]
return None
class ProxyModels(QSortFilterProxyModel):
def __init__(self, capabilities, parent=None):
super().__init__(parent)
self.source_model = ModelsModel(capabilities, self)
self.source_model.generate_sorts(lambda x: primary_sort_key(x.name))
self.setSourceModel(self.source_model)
self.filters = []
self.setSortRole(Qt.ItemDataRole.UserRole+1)
def filterAcceptsRow(self, source_row: int, source_parent) -> bool:
try:
m = self.source_model.all_models[source_row]
except IndexError:
return False
for f in self.filters:
if not f(m):
return False
return True
def lessThan(self, left, right):
return left.data(self.sortRole()) < right.data(self.sortRole())
def set_filters(self, *filters):
self.filters = filters
self.invalidate()
def set_sorts(self, *sorts):
self.source_model.generate_sorts(*sorts)
self.invalidate()
def index_for_model_id(self, model_id: str) -> QModelIndex():
for i in range(self.rowCount(QModelIndex())):
ans = self.index(i, 0)
if ans.data(Qt.ItemDataRole.UserRole).id == model_id:
return ans
return QModelIndex()
class ModelDetails(QTextBrowser):
def __init__(self, parent=None):
super().__init__(parent)
self.setOpenLinks(False)
self.anchorClicked.connect(self.open_link)
self.show_help()
def show_help(self):
self.setText(f'''
<p>{_('Pick an AI model to use. Generally, newer models are more capable but also more expensive.')}</p>
<p>{_('By default, an appropriate AI model is chosen automatically based on the query being made.'
' By picking a model explicitly, you have more control over this process.')}</p>
<p>{_('Another criterion to look for is if the model is <i>moderated</i> (that is, its output is filtered by the provider).')}</p>
''')
def show_model_details(self, m: AIModel):
if m.pricing.is_free:
price = f"<b>{_('Free')}</b>"
else:
def fmt(p: float) -> str:
ans = f'$ {p:.2f}'
ans = ans.removesuffix('.00')
return ans
price = ''
if m.pricing.input_token:
price += f'{fmt(m.pricing.input_token * 1e6)}/M {_("input tokens")} '
if m.pricing.output_token:
price += f'{fmt(m.pricing.output_token * 1e6)}/M {_("output tokens")} '
if m.pricing.image:
price += f'$ {fmt(m.pricing.image * 1e3)}/K {_("input images")} '
md = create_markdown_object(extensions=())
created = qt_from_dt(m.created).date()
html = f'''
<h2>{m.name}</h2>
<div>{md.convert(m.description)}</div>
<h2>{_('Price')}</h2>
<p>{price}</p>
<h2>{_('Details')}</h2>
<p>{_('Created:')} {QLocale.system().toString(created, QLocale.FormatType.ShortFormat)}<br>
{_('Content moderated:')} {_('yes') if m.is_moderated else _('no')}<br>
{_('Context length:')} {QLocale.system().toString(m.context_length)}<br>
{_('Identifier:')} {m.id}<br>
{_('See the model on')} <a href="https://openrouter.ai/{m.slug}">OpenRouter.ai</a>
</p>
'''
self.setText(html)
def sizeHint(self):
return QSize(350, 500)
def open_link(self, url: QUrl):
if url.host() == '':
url = 'https://openrouter.ai/' + url.path().lstrip('/')
safe_open_url(url)
class SortLoc(QComboBox):
def __init__(self, initial='', parent=None):
super().__init__(parent)
self.addItem('', '')
self.addItem(_('Newest'), 'newest')
self.addItem(_('Cheapest'), 'cheapest')
self.addItem(_('Name'), 'name')
self.addItem(_('Oldest'), 'oldest')
self.addItem(_('Most expensive'), 'expensive')
if (idx := self.findData(initial)) > -1:
self.setCurrentIndex(idx)
@property
def sort_key(self) -> str:
return self.currentData()
@property
def sort_key_func(self):
match self.sort_key:
case 'oldest':
return lambda x: x.created
case 'newest':
now = datetime.datetime.now(datetime.UTC)
return lambda x: now - x.created
case 'cheapest':
return lambda x: x.pricing.output_token
case 'expensive':
return lambda x: -x.pricing.output_token
case 'name':
return lambda x: primary_sort_key(x.name)
return lambda x: ''
class ChooseModel(Dialog):
def __init__(
self, model_id: str = '', capabilities: AICapabilities = AICapabilities.text_to_text, parent: QWidget | None = None
):
self.capabilities = capabilities
super().__init__(title=_('Choose an AI model'), name='open-router-choose-model', parent=parent)
self.model_id = model_id
def sizeHint(self):
return QSize(700, 500)
@property
def model_id(self) -> str:
ci = self.models.currentIndex()
if ci.isValid():
return ci.data(Qt.ItemDataRole.UserRole).id
return ''
self.models.currentIndex().data(Qt.ItemDataRole.UserRole).id
@model_id.setter
def model_id(self, val):
self.models.setCurrentIndex(self.models.model().index_for_model_id(val))
@property
def model_name(self) -> str:
idx = self.models.currentIndex()
if idx.isValid():
return idx.data(Qt.ItemDataRole.DisplayRole)
return ''
def setup_ui(self):
l = QVBoxLayout(self)
self.only_free = of = QCheckBox(_('Only &free'))
of.setChecked(bool(gprefs.get('openrouter-filter-only-free')))
of.toggled.connect(self.update_filters)
self.only_unmoderated = ou = QCheckBox(_('Only &unmoderated'))
ou.setChecked(bool(gprefs.get('openrouter-filter-only-unmoderated')))
ou.toggled.connect(self.update_filters)
self.search = f = QLineEdit(self)
f.setPlaceholderText(_('Search for models by name'))
f.textChanged.connect(self.update_filters)
f.setClearButtonEnabled(True)
h = QHBoxLayout()
h.addWidget(f), h.addWidget(of), h.addWidget(ou)
l.addLayout(h)
h = QHBoxLayout()
la = QLabel(_('S&ort by:'))
h.addWidget(la)
sorts = tuple(gprefs.get('openrouter-model-sorts') or ('newest', 'cheapest', 'name')) + ('', '', '')
self.sorts = tuple(SortLoc(loc, self) for loc in sorts[:3])
for s in self.sorts:
h.addWidget(s)
if s is not self.sorts[-1]:
h.addWidget(QLabel(' ' + _('and') + ' '))
s.currentIndexChanged.connect(self.update_sorts)
la.setBuddy(self.sorts[0])
h.addStretch()
l.addLayout(h)
self.splitter = s = QSplitter(self)
l.addWidget(s)
self.models = m = QListView(self)
m.setSelectionMode(QAbstractItemView.SelectionMode.SingleSelection)
self.proxy_model = pm = ProxyModels(self.capabilities, m)
m.setModel(pm)
s.addWidget(m)
self.details = d = ModelDetails(self)
s.addWidget(d)
m.selectionModel().currentChanged.connect(self.current_changed)
b = self.bb.addButton(_('Clear choice'), QDialogButtonBox.ButtonRole.ActionRole)
b.setIcon(QIcon.ic('trash.png'))
b.clicked.connect(lambda : setattr(self, 'model_id', ''))
b.setToolTip(_('Let the AI model be chosen dynamically based on the query being made'))
h = QHBoxLayout()
self.counts = QLabel('')
h.addWidget(self.counts), h.addStretch(), h.addWidget(self.bb)
l.addLayout(h)
self.update_filters()
self.update_sorts()
def current_changed(self):
idx = self.models.selectionModel().currentIndex()
if idx.isValid():
model = idx.data(Qt.ItemDataRole.UserRole)
self.details.show_model_details(model)
else:
self.details.show_help()
def update_sorts(self):
self.proxy_model.set_sorts(*(s.sort_key_func for s in self.sorts))
gprefs.set('openrouter-model-sorts', tuple(s.sort_key for s in self.sorts))
self.proxy_model.sort(0, Qt.SortOrder.AscendingOrder)
def update_filters(self):
filters = []
text = self.search.text().strip()
if text:
search_tokens = text.lower().split()
def model_matches(m):
name_tokens = m.name.lower().split()
for tok in search_tokens:
for q in name_tokens:
if tok in q:
break
else:
return False
return True
filters.append(model_matches)
with gprefs:
gprefs.set('openrouter-filter-only-free', self.only_free.isChecked())
gprefs.set('openrouter-filter-only-unmoderated', self.only_unmoderated.isChecked())
if self.only_free.isChecked():
filters.append(lambda m: m.pricing.is_free)
if self.only_unmoderated.isChecked():
filters.append(lambda m: not m.is_moderated)
self.proxy_model.set_filters(*filters)
num_showing = self.proxy_model.rowCount(QModelIndex())
total = self.proxy_model.sourceModel().rowCount(QModelIndex())
if num_showing == total:
self.counts.setText(_('{} models').format(num_showing))
else:
self.counts.setText(_('{0} of {1} models').format(num_showing, total))
self.current_changed()
class ConfigWidget(QWidget):
def __init__(self, parent: QWidget | None = None):
super().__init__(parent)
l = QFormLayout(self)
l.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
la = QLabel('<p>'+_(
'You have to create an account at {0}, then generate an'
' API key and purchase a token amount of credits. After that, you can use any'
' <a href="{1}">AI model</a> you like, including free ones. Your requests are'
' sent to remote providers via OpenRouter.ai, see the <a href="{2}">privacy policy</a>.'
).format('<a href="https://openrouter.ai">OpenRouter.ai</a>', 'https://openrouter.ai/rankings',
'https://openrouter.ai/docs/features/privacy-and-logging'))
la.setWordWrap(True)
la.setOpenExternalLinks(True)
l.addRow(la)
self.api_key_edit = a = QLineEdit(self)
a.setPlaceholderText(_('An API key is required to use OpenRouter'))
l.addRow(_('API &key:'), a)
if key := pref('api_key'):
a.setText(decode_secret(key))
self.model_strategy = ms = QComboBox(self)
l.addRow(_('Model &choice strategy:'), ms)
ms.addItem(_('Free only'), 'free-only')
ms.addItem(_('Free or paid'), 'free-or-paid')
ms.addItem(_('High quality'), 'native')
if strat := pref('model_choice_strategy', 'free-or-paid'):
ms.setCurrentIndex(max(0, ms.findData(strat)))
ms.setToolTip('<p>' + _(
'The model choice strategy controls how a model to query is chosen when no specific'
' model is specified. The choices are:<ul>\n'
'<li><b>Free only</b> - Only uses free models. Can lead to lower quality/slower'
' results, with some rate limiting as well. Prefers unmoderated models where possible. If no free models'
' are available, will fail with an error.\n'
'<li><b>Free or paid</b> - Like Free only, but fallback to non-free models if no free ones are available.\n'
'<li><b>High quality</b> - Automatically choose a model based on the query, for best possible'
" results, regardless of cost. Uses OpenRouter's own automatic model selection."
))
self._allow_web_searches = aws = QCheckBox(_('Allow &searching the web when generating responses'))
aws.setChecked(pref('allow_web_searches', False))
aws.setToolTip('<p>' + _(
'If enabled, OpenRouter will use Exa.ai web searches to return accurate and up-to-date'
' information for queries, where possible. This adds about two cents to the cost of every request.'))
self.reasoning_strat = rs = reasoning_strategy_config_widget(pref('reasoning_strategy', 'auto'), self)
l.addRow(_('&Reasoning effort:'), rs)
self.data_retention = dr = QCheckBox(_('Allow usage of providers that &store prompts'), self)
dr.setToolTip(textwrap.fill(_(
'Some AI providers might store your prompts, usually to use as data for training.'
' When disabled, such providers will not be used. This may prevent usage of some models.')))
dr.setChecked(pref('data_collection', 'deny') == 'allow')
l.addRow(dr)
self.text_model = tm = Model(parent=self)
tm.select_model.connect(self.select_model)
l.addRow(_('Model for &text tasks:'), tm)
def select_model(self, model_id: str, for_text: bool) -> None:
model_choice_target: Model = self.sender()
caps = AICapabilities.text_to_text if for_text else AICapabilities.text_to_image
d = ChooseModel(model_id, caps, self)
if d.exec() == QDialog.DialogCode.Accepted:
model_choice_target.set(d.model_id, d.model_name)
@property
def api_key(self) -> str:
return self.api_key_edit.text().strip()
@property
def model_choice_strategy(self) -> str:
return self.model_strategy.currentData()
@property
def reasoning_strategy(self) -> str:
return self.reasoning_strat.currentData()
@property
def data_collection(self) -> str:
return 'allow' if self.data_retention.isChecked() else 'deny'
@property
def settings(self) -> dict[str, Any]:
ans = {'api_key': encode_secret(self.api_key), 'model_choice_strategy': self.model_choice_strategy,
'reasoning_strategy': self.reasoning_strategy, 'data_collection': self.data_collection}
if self.text_model.model_id:
ans['text_model'] = (self.text_model.model_id, self.text_model.model_name)
return ans
@property
def is_ready_for_use(self) -> bool:
return bool(self.api_key)
def validate(self) -> bool:
if self.is_ready_for_use:
return True
error_dialog(self, _('No API key'), _(
'You must supply an API key to use OpenRouter. Remember to also buy a few credits, even if you'
' plan on using only free models.'), show=True)
return False
def save_settings(self):
set_prefs_for_provider(OpenRouterAI.name, self.settings)
if __name__ == '__main__':
configure(OpenRouterAI.name)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/open_router/config.py",
"license": "GNU General Public License v3.0",
"lines": 414,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/ai/open_router/backend.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import datetime
import json
import os
import re
from collections.abc import Iterable, Iterator, Sequence
from functools import lru_cache
from typing import Any, NamedTuple
from urllib.request import Request
from calibre.ai import AICapabilities, ChatMessage, ChatMessageType, ChatResponse, NoAPIKey, NoFreeModels
from calibre.ai.open_router import OpenRouterAI
from calibre.ai.prefs import decode_secret, pref_for_provider
from calibre.ai.utils import chat_with_error_handler, develop_text_chat, get_cached_resource, read_streaming_response
from calibre.constants import cache_dir
module_version = 1 # needed for live updates
MODELS_URL = 'https://openrouter.ai/api/v1/models'
def pref(key: str, defval: Any = None) -> Any:
return pref_for_provider(OpenRouterAI.name, key, defval)
@lru_cache(2)
def get_available_models() -> dict[str, Model]:
cache_loc = os.path.join(cache_dir(), 'ai', f'{OpenRouterAI.name}-models-v1.json')
data = get_cached_resource(cache_loc, MODELS_URL)
return parse_models_list(json.loads(data))
def human_readable_model_name(model_id: str) -> str:
if m := get_available_models().get(model_id):
model_id = m.name_without_creator_preserving_case
return model_id
class Pricing(NamedTuple):
# Values are in credits per token/request/unit
input_token: float = 0 # cost per input token
output_token: float = 0 # cost per output token
request: float = 0 # per API request
image: float = 0 # per image
web_search: float = 0 # per web search
internal_reasoning: float = 0 # cost per internal reasoning token
input_cache_read: float = 0 # cost per cached input token read
input_cache_write: float = 0 # cost per cached input token write
@classmethod
def from_dict(cls, x: dict[str, str]) -> Pricing:
return Pricing(
input_token=float(x['prompt']), output_token=float(x['completion']), request=float(x.get('request', 0)),
image=float(x.get('image', 0)), web_search=float(x.get('web_search', 0)),
internal_reasoning=float(x.get('internal_reasoning', 0)),
input_cache_read=float(x.get('input_cache_read', 0)), input_cache_write=float(x.get('input_cache_write', 0)),
)
@property
def is_free(self) -> bool:
return max(self) == 0
class Model(NamedTuple):
name: str
id: str
slug: str
created: int
description: str
context_length: int
pricing: Pricing
parameters: tuple[str, ...]
is_moderated: bool
capabilities: AICapabilities
tokenizer: str
@property
def creator(self) -> str:
return self.name.partition(':')[0].lower()
@property
def family(self) -> str:
parts = self.name.split(':')
if len(parts) > 1:
return parts[1].strip().partition(' ')[0].lower()
return ''
@property
def name_without_creator(self) -> str:
return self.name_without_creator_preserving_case.lower()
@property
def name_without_creator_preserving_case(self) -> str:
return re.sub(r' \(free\)$', '', self.name.partition(':')[-1].strip()).strip()
@classmethod
def from_dict(cls, x: dict[str, object]) -> Model:
arch = x['architecture']
capabilities = AICapabilities.none
if 'text' in arch['input_modalities']:
if 'text' in arch['output_modalities']:
capabilities |= AICapabilities.text_to_text
if 'image' in arch['output_modalities']:
capabilities |= AICapabilities.text_to_image
return Model(
name=x['name'], id=x['id'], created=datetime.datetime.fromtimestamp(x['created'], datetime.UTC),
description=x['description'], context_length=x['context_length'], slug=x['canonical_slug'],
parameters=tuple(x['supported_parameters']), pricing=Pricing.from_dict(x['pricing']),
is_moderated=x['top_provider']['is_moderated'], tokenizer=arch['tokenizer'],
capabilities=capabilities,
)
def parse_models_list(entries: dict[str, Any]) -> dict[str, Model]:
ans = {}
for entry in entries['data']:
e = Model.from_dict(entry)
ans[e.id] = e
return ans
def config_widget():
from calibre.ai.open_router.config import ConfigWidget
return ConfigWidget()
def save_settings(config_widget):
config_widget.save_settings()
def api_key() -> str:
return pref('api_key')
def is_ready_for_use() -> bool:
return bool(api_key())
@lru_cache(64)
def free_model_choice(
capabilities: AICapabilities = AICapabilities.text_to_text, allow_paid: bool = False
) -> tuple[Model, ...]:
gemini_free, gemini_paid = [], []
deep_seek_free, deep_seek_paid = [], []
grok_free, grok_paid = [], []
gpt5_free, gpt5_paid = [], []
gpt_oss_free, gpt_oss_paid = [], []
claude_free, claude_paid = [], []
def only(*model_groups: list[Model], sort_key=lambda m: m.created, reverse=True) -> Iterator[Model]:
for models in model_groups:
if models:
models.sort(key=sort_key, reverse=reverse)
yield models[0]
for model in get_available_models().values():
if AICapabilities.text_to_text & capabilities != capabilities:
continue
match model.creator:
case 'google':
if model.family == 'gemini':
gemini_free.append(model) if model.pricing.is_free else gemini_paid.append(model)
case 'deepseek':
deep_seek_free.append(model) if model.pricing.is_free else deep_seek_paid.append(model)
case 'openai':
n = model.name_without_creator
if n.startswith('gpt-5'):
gpt5_free.append(model) if model.pricing.is_free else gpt5_paid.append(model)
elif n.startswith('gpt-oss'):
gpt_oss_free.append(model) if model.pricing.is_free else gpt_oss_paid.append(model)
case 'anthropic':
if model.family == 'claude':
claude_free.append(model) if model.pricing.is_free else claude_paid.append(model)
case 'xai':
if model.family == 'grok' and 'code fast' not in model.name_without_creator:
grok_free.append(model) if model.pricing.is_free else grok_paid.append(model)
free = tuple(only(gemini_free, gpt5_free, grok_free, gpt_oss_free, claude_free, deep_seek_free))
if free:
return free
if not allow_paid:
raise NoFreeModels(_('No free models were found for text to text generation'))
return tuple(sorted(only(gemini_paid, gpt5_paid, grok_paid, claude_paid, deep_seek_paid), key=lambda m: m.pricing.output_token))
def model_choice_for_text() -> Iterator[Model, ...]:
model_id, model_name = pref('text_model', ('', ''))
if m := get_available_models().get(model_id):
yield m
return
match pref('model_choice_strategy', 'free-or-paid'):
case 'free-or-paid':
yield from free_model_choice(allow_paid=True)
case 'free-only':
yield from free_model_choice(allow_paid=False)
case _:
yield get_available_models()['openrouter/auto']
def decoded_api_key() -> str:
ans = api_key()
if not ans:
raise NoAPIKey('API key required for OpenRouter')
return decode_secret(ans)
def chat_request(data: dict[str, Any], url='https://openrouter.ai/api/v1/chat/completions') -> Request:
headers = {
'Authorization': f'Bearer {decoded_api_key()}',
'Content-Type': 'application/json',
'HTTP-Referer': 'https://calibre-ebook.com',
'X-Title': 'calibre',
}
return Request(url, data=json.dumps(data).encode('utf-8'), headers=headers, method='POST')
def for_assistant(self: ChatMessage) -> dict[str, Any]:
ans = {'role': self.type.value, 'content': self.query}
if self.reasoning_details:
ans['reasoning_details'] = self.reasoning_details
return ans
def add_websearch_if_desired(data: dict[str, Any], models: Sequence[Model]) -> None:
# https://openrouter.ai/docs/features/web-search
if pref('allow_web_searches', False):
data['plugins'].append({'id': 'web'})
def text_chat_implementation(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
if use_model:
models = ()
model_id = use_model
else:
models = tuple(model_choice_for_text())
if not models:
models = (get_available_models()['openrouter/auto'],)
model_id = models[0].id
data_collection = pref('data_collection', 'deny')
if data_collection not in ('allow', 'deny'):
data_collection = 'deny'
data = {
'model': model_id,
'plugins': [],
'messages': [for_assistant(m) for m in messages],
'usage': {'include': True},
'stream': True,
'reasoning': {'enabled': True},
'provider': {'data_collection': data_collection},
}
if len(models) > 1:
data['models'] = [m.id for m in models[1:]]
s = pref('reasoning_strategy')
match s:
case 'low' | 'medium' | 'high':
data['reasoning']['effort'] = s
case _:
data['reasoning']['enabled'] = False
add_websearch_if_desired(data, models)
rq = chat_request(data)
for data in read_streaming_response(rq, OpenRouterAI.name):
for choice in data['choices']:
d = choice['delta']
c = d.get('content') or ''
r = d.get('reasoning') or ''
rd = d.get('reasoning_details') or ()
role = d.get('role') or 'assistant'
if c or r or rd:
yield ChatResponse(content=c, reasoning=r, reasoning_details=rd, type=ChatMessageType(role), plugin_name=OpenRouterAI.name)
if u := data.get('usage'):
yield ChatResponse(
cost=float(u['cost'] or 0), currency=_('credits'), provider=data.get('provider') or '',
model=data.get('model') or '', has_metadata=True, plugin_name=OpenRouterAI.name
)
def text_chat(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
yield from chat_with_error_handler(text_chat_implementation(messages, use_model))
def develop(msg: str = '', use_model: str = ''):
# calibre-debug -c 'from calibre.ai.open_router.backend import *; develop()'
m = (ChatMessage(msg),) if msg else ()
develop_text_chat(text_chat, use_model, messages=m)
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/ai/open_router/backend.py",
"license": "GNU General Public License v3.0",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/gui2/viewer/llm.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net> and Amir Tehrani
import string
from collections.abc import Iterator
from functools import lru_cache
from typing import Any
from qt.core import QDialog, QUrl, QVBoxLayout, QWidget, pyqtSignal
from calibre.ai import ChatMessage, ChatMessageType
from calibre.ebooks.metadata import authors_to_string
from calibre.gui2 import Application, error_dialog
from calibre.gui2.chat_widget import Button
from calibre.gui2.llm import ActionData, ConverseWidget, LLMActionsSettingsWidget, LLMSettingsDialogBase, LocalisedResults, prompt_sep
from calibre.gui2.viewer.config import vprefs
from calibre.gui2.viewer.highlights import HighlightColorCombo
from calibre.utils.localization import ui_language_as_english
from polyglot.binary import from_hex_unicode
class Action(ActionData):
@property
def uses_selected_text(self) -> bool:
for _, fname, _, _ in string.Formatter().parse(self.prompt_template):
if fname == 'selected':
return True
return False
def prompt_text(self, selected_text: str = '') -> str:
probably_has_multiple_words = len(selected_text) > 20 or ' ' in selected_text
pt = self.prompt_template
what = 'Text to analyze: ' if probably_has_multiple_words else 'Word to analyze: '
if not probably_has_multiple_words:
match self.name:
case 'explain':
pt = 'Explain the meaning, etymology and common usages of the following word in simple, easy to understand language. {selected}'
case 'define':
pt = 'Explain the meaning and common usages of the following word. {selected}'
case 'translate':
pt = 'Translate the following word into the language {language}. {selected}'
selected_text = (prompt_sep + what + selected_text) if selected_text else ''
return pt.format(selected=selected_text, language=ui_language_as_english()).strip()
@lru_cache(2)
def default_actions() -> tuple[Action, ...]:
return (
Action('explain', _('Explain'), 'Explain the following text in simple, easy to understand language. {selected}'),
Action('define', _('Define'), 'Identify and define any technical or complex terms in the following text. {selected}'),
Action('summarize', _('Summarize'), 'Provide a concise summary of the following text. {selected}'),
Action('points', _('Key points'), 'Extract the key points from the following text as a bulleted list. {selected}'),
Action('grammar', _('Fix grammar'), 'Correct any grammatical errors in the following text and provide the corrected version. {selected}'),
Action('translate', _('Translate'), 'Translate the following text into the language {language}. {selected}'),
)
def current_actions(include_disabled=False) -> Iterator[Action]:
p = vprefs.get('llm_quick_actions') or {}
return Action.unserialize(p, default_actions(), include_disabled)
class LLMSettingsDialog(LLMSettingsDialogBase):
def __init__(self, parent=None):
super().__init__(title=_('AI Settings'), name='llm-settings-dialog', prefs=vprefs, parent=parent)
def custom_tabs(self) -> Iterator[str, str, QWidget]:
yield 'config.png', _('Actions and &highlights'), LLMSettingsWidget(self)
class LLMPanel(ConverseWidget):
add_note_requested = pyqtSignal(str, str)
def __init__(self, parent=None):
super().__init__(parent)
self.save_note_hostname = f'{self.hid}.save.calibre'
self.latched_conversation_text = ''
self.current_selected_text = ''
self.book_title = ''
self.book_authors = ''
def add_buttons(self):
self.add_button('save.png', _('&Save as note'), _('Save this conversation as a note on the current highlight')).clicked.connect(self.save_as_note)
super().add_buttons()
def update_book_metadata(self, metadata):
self.book_title = metadata.get('title', '')
authors = metadata.get('authors', [])
self.book_authors = authors_to_string(authors)
def activate_action(self, action: Action) -> None:
self.start_api_call(self.prompt_text_for_action(action), uses_selected_text=action.uses_selected_text)
def settings_dialog(self) -> QDialog:
return LLMSettingsDialog(self)
def update_with_text(self, text: str) -> None:
self.current_selected_text = text
self.update_ai_provider_plugin()
if not text:
if self.conversation_history:
# preserve the current
return
self.latched_conversation_text = ''
self.update_ui_state()
return
start_new_convo = False
if text != self.latched_conversation_text:
start_new_convo = True
if start_new_convo:
self.latched_conversation_text = text
self.clear_current_conversation()
self.update_ui_state()
def per_response_buttons(self, msgnum, msg):
yield Button('save.png', f'http://{self.save_note_hostname}/{msgnum}', _(
'Save this specific response as the note'))
def create_initial_messages(self, action_prompt: str, **kwargs: Any) -> Iterator[ChatMessage]:
selected_text = self.latched_conversation_text if kwargs.get('uses_selected_text') else ''
if self.book_title:
context_header = f'I am currently reading the book: {self.book_title}'
if self.book_authors:
context_header += f' by {self.book_authors}'
if selected_text:
context_header += '. I have some questions about content from this book.'
else:
context_header += '. I have some questions about this book.'
context_header += ' When you answer the questions use markdown formatting for the answers wherever possible.'
if language_instruction := self.get_language_instruction():
context_header += ' ' + language_instruction
yield ChatMessage(context_header, type=ChatMessageType.system)
yield ChatMessage(action_prompt)
def choose_action_message(self) -> str:
msg = ''
if self.latched_conversation_text:
st = self.latched_conversation_text
if len(st) > 200:
st = st[:200] + '…'
msg = f"<h3>{_('Selected text')}</h3><i>{st}</i>"
msg += self.quick_actions_as_html(current_actions())
msg += '<p>' + _('Or, type a question to the AI below, for example:') + '<br>'
msg += '<i>Summarize this book.</i>'
return msg
def prompt_text_for_action(self, action) -> str:
return action.prompt_text(self.latched_conversation_text)
def save_as_note(self):
if self.conversation_history.response_count > 0 and self.latched_conversation_text:
if not self.current_selected_text:
return error_dialog(self, _('No selected text'), _('Cannot save note as there is currently no selected text'), show=True)
self.add_note_requested.emit(
self.conversation_history.format_llm_note(self.assistant_name),
vprefs.get('llm_highlight_style', ''))
def save_specific_note(self, message_index: int) -> None:
if not self.current_selected_text:
return error_dialog(self, _('No selected text'), _('Cannot save note as there is currently no selected text'), show=True)
history_for_record = self.get_conversation_history_for_specific_response(message_index)
self.add_note_requested.emit(
history_for_record.format_llm_note(self.assistant_name), vprefs.get('llm_highlight_style', ''))
def handle_chat_link(self, qurl: QUrl) -> bool:
match qurl.host():
case self.save_note_hostname:
index = int(qurl.path().strip('/'))
self.save_specific_note(index)
return True
case self.quick_action_hostname:
name = from_hex_unicode(qurl.path().strip('/'))
for ac in current_actions():
if ac.name == name:
self.activate_action(ac)
break
return True
return False
def start_new_conversation(self) -> None:
self.latched_conversation_text = ''
super().start_new_conversation()
def ready_to_start_api_call(self) -> str:
if self.latched_conversation_text:
return ''
return _('No text is selected for this conversation.')
# Settings {{{
class HighlightWidget(HighlightColorCombo):
def load_settings(self) -> None:
if hsn := vprefs.get('llm_highlight_style'):
self.highlight_style_name = hsn
def commit(self) -> bool:
selected_internal_name = self.currentData()
vprefs.set('llm_highlight_style', selected_internal_name)
return True
class LLMSettingsWidget(LLMActionsSettingsWidget):
action_edit_help_text = '<p>' + _(
'The prompt is a template. If you want the prompt to operate on the currently selected'
' text, add <b>{0}</b> to the end of the prompt.'
).format('{selected}')
def get_actions_from_prefs(self) -> Iterator[ActionData]:
yield from current_actions(include_disabled=True)
def set_actions_in_prefs(self, s: dict[str, Any]) -> None:
vprefs.set('llm_quick_actions', s)
def create_custom_widgets(self) -> Iterator[str, QWidget]:
yield _('&Highlight style:'), HighlightWidget(self)
yield '', LocalisedResults()
# }}}
def develop(show_initial_messages: bool = False):
app = Application([])
# return LLMSettingsDialog().exec()
d = QDialog()
l = QVBoxLayout(d)
l.setContentsMargins(0, 0, 0, 0)
llm = LLMPanel(d)
llm.update_with_text('developing my thoughts on the AI apocalypse')
h = llm.conversation_history
if show_initial_messages:
h.model_used = 'google/gemini-2.5-flash-image-preview:free'
h.append(ChatMessage('Testing rendering of conversation widget'))
h.append(ChatMessage('This is a reply from the LLM', type=ChatMessageType.assistant))
h.append(ChatMessage('Another query from the user'))
h.append(
ChatMessage('''\
Nisi nec libero. Cras magna ipsum, scelerisque et, tempor eget, gravida nec, lacus.
Fusce eros nisi, ullamcorper blandit, ultricies eget, elementum eget, pede.
Phasellus id risus vitae nisl ullamcorper congue. Proin est.
Sed eleifend odio sed leo. Mauris tortor turpis, dignissim vel, ornare ac, ultricies quis, magna.
Phasellus lacinia, augue ac dictum tempor, nisi felis ornare magna, eu vehicula tellus enim eu neque.
Fusce est eros, sagittis eget, interdum a, ornare suscipit, massa. Sed vehicula elementum ligula.
Aliquam erat volutpat. Donec odio. Quisque nunc. Integer cursus feugiat magna.
Fusce ac elit ut elit aliquam suscipit. Duis leo est, interdum nec, varius in. ''', type=ChatMessageType.assistant))
h.response_count = 2
llm.show_ai_conversation()
llm.update_ui_state()
l.addWidget(llm)
d.exec()
del app
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/viewer/llm.py",
"license": "GNU General Public License v3.0",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/gui2/actions/column_tooltips.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2022, Charles Haley
#
from qt.core import QDialogButtonBox, Qt, QVBoxLayout
from calibre.gui2 import error_dialog, safe_open_url
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.gui2.widgets2 import Dialog, HTMLDisplay
def column_template_placeholder_text():
return _(
'Notes:\n'
'* The template global variable "{0}" contains the column lookup name.\n'
'* The global variable "{1}" contains the original tooltip text').format('column_lookup_name',
'original_text')
class ToolTipDialog(Dialog):
def __init__(self, title, prefs):
super().__init__(title, 'show_tooltip_dialog',
prefs=prefs,
default_buttons=QDialogButtonBox.StandardButton.Ok)
def setup_ui(self):
l = QVBoxLayout(self)
d = self.display = HTMLDisplay()
l.addWidget(d)
l.addWidget(self.bb)
d.anchor_clicked.connect(safe_open_url)
def set_html(self, tt_text):
self.display.setHtml(tt_text)
class ColumnTooltipsAction(InterfaceAction):
name = 'Column tooltips'
action_spec = (_('Column tooltips'), 'edit_input.png',
_('Define a custom tooltip for values in a column'), ())
action_type = 'current'
action_add_menu = True
action_menu_clone_qaction = _('Edit/define column tooltip')
dont_add_to = frozenset(('context-menu-device', 'menubar-device'))
def genesis(self):
self.qaction.triggered.connect(self.show_template_editor)
m = self.qaction.menu()
ac = self.create_menu_action(m, 'tooltip_in_dialog_box', _('Show item tooltip in a dialog'),
icon='dialog_information.png', triggered=self.show_tooltip_in_dialog, shortcut=None)
m.addAction(ac)
def check_errors(self, only_one_row=False):
view = self.gui.current_view()
if view is not self.gui.library_view:
error_dialog(self.gui, _('No library view available'),
_("You can't set custom tooltips for books on the device.")).exec()
return (None, None, None, None)
idx = view.currentIndex()
if not idx.isValid():
error_dialog(self.gui, _('No column selected'),
_('A column (cell) must be selected'), show=True)
return (None, None, None, None)
column = view.model().column_map[idx.column()]
rows = view.selectionModel().selectedRows()
if not rows:
error_dialog(self.gui, _('No books selected'),
_('At least one book must be selected'), show=True)
return (None, None, None, None)
if only_one_row and len(rows) != 1:
error_dialog(self.gui, _('Only one book'),
_('Only one book can be selected'), show=True)
return (None, None, None, None)
return view, idx, column, rows
def show_tooltip_in_dialog(self):
view, idx, column, rows = self.check_errors(only_one_row=True)
if view is None:
return
from calibre.gui2.ui import get_gui
db = get_gui().current_db.new_api
fm = db.field_metadata.get(column)
col_name = fm['name']
d = ToolTipDialog(
_('Tooltip for column {name}, row {row_num}').format(name=col_name, row_num=rows[0].row()+1),
prefs=db.backend.prefs)
d.set_html(idx.data(Qt.ItemDataRole.ToolTipRole))
d.exec()
def show_template_editor(self):
view, _, column, rows = self.check_errors()
if view is None:
return
mi = []
db = view.model().db
for row in rows:
if row.isValid():
mi.append(db.new_api.get_proxy_metadata(db.data.index_to_id(row.row())))
if mi:
tt_dict = db.new_api.pref('column_tooltip_templates', {})
for i in range(min(len(rows), 10)):
mi.append(db.new_api.get_proxy_metadata(db.data.index_to_id(i)))
template = tt_dict.get(column, '')
text_is_placeholder = False
if not template:
text_is_placeholder = True
template = column_template_placeholder_text()
d = TemplateDialog(self.gui, template, mi=mi, text_is_placeholder=text_is_placeholder)
if d.exec():
tt_dict[column] = d.rule[1]
db.new_api.set_pref('column_tooltip_templates', tt_dict)
def location_selected(self, loc):
enabled = loc == 'library'
self.qaction.setEnabled(enabled)
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/gui2/actions/column_tooltips.py",
"license": "GNU General Public License v3.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/calibre:src/calibre/utils/tts/piper.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import atexit
import json
import os
import sys
from collections.abc import Callable
from functools import partial
from queue import Queue
from threading import Lock, Thread
from typing import Any, NamedTuple
from calibre.constants import ismacos, iswindows
from calibre_extensions import piper
DEFAULT_LENGTH_SCALE = 1.0
DEFAULT_NOISE_SCALE = 0.667
DEFAULT_NOISE_W_SCALE = 0.8
class VoiceConfig(NamedTuple):
espeak_voice_name: str
sample_rate: int
phoneme_id_map: dict[int, list[int]]
length_scale: float
noise_scale: float
noise_w: float
num_speakers: int
sentence_delay: float = 0
normalize_volume: bool = False
def translate_voice_config(x: Any) -> VoiceConfig:
phoneme_id_map: dict[int, list[int]] = {}
for s, pids in x.get('phoneme_id_map', {}).items():
if s:
phoneme_id_map.setdefault(ord(s[0]), []).extend(map(int, pids))
inf = x.get('inference')
def g(d, prop, defval):
ans = d.get(prop, VoiceConfig)
if ans is VoiceConfig:
ans = defval
return ans
return VoiceConfig(
espeak_voice_name=x.get('espeak', {}).get('voice') or 'en-us',
sample_rate=int(g(x.get('audio', {}), 'sample_rate', 22050)),
phoneme_id_map=phoneme_id_map,
length_scale=float(g(inf, 'length_scale', DEFAULT_LENGTH_SCALE)),
noise_scale=float(g(inf, 'noise_scale', DEFAULT_NOISE_SCALE)),
noise_w=float(g(inf, 'noise_w', DEFAULT_NOISE_W_SCALE)),
num_speakers=int(g(x, 'num_speakers', 1)),
)
def load_voice_config(path: str) -> VoiceConfig:
with open(path, 'rb') as f:
return translate_voice_config(json.load(f))
def espeak_data_dir() -> str:
if not getattr(sys, 'frozen', False):
return os.environ.get('CALIBRE_ESPEAK_DATA_DIR', '')
if iswindows:
return os.path.join(os.path.dirname(sys.executables_location), 'share', 'espeak-ng-data')
if ismacos:
return os.path.join(os.path.dirname(sys.frameworks_dir), 'Resources', 'espeak-ng-data')
return os.path.join(sys.executables_location, 'share', 'espeak-ng-data')
def create_voice_config(config_path: str, length_scale_multiplier: float = 0, sentence_delay: float = 0.2) -> VoiceConfig:
cfg = load_voice_config(config_path)
m = max(0.1, 1 + -1 * max(-1, min(length_scale_multiplier, 1))) # maps -1 to 1 to 2 to 0.1
cfg = cfg._replace(sentence_delay=sentence_delay, length_scale=cfg.length_scale * m)
return cfg
def set_voice(config_path: str, model_path:str, length_scale_multiplier: float = 0, sentence_delay: float = 0.2) -> None:
cfg = create_voice_config(config_path, length_scale_multiplier, sentence_delay)
piper.set_voice(cfg, model_path)
class SynthesisResult(NamedTuple):
utterance_id: Any
bytes_per_sample: int
audio_data: bytes
num_samples: int
sample_rate: int
is_last: bool
def simple_test():
d = espeak_data_dir()
if d and not os.path.exists(os.path.join(d, 'voices')):
raise AssertionError(f'{d} does not contain espeak-ng data')
piper.initialize(d)
piper.set_espeak_voice_by_name('en-us')
if not piper.phonemize('simple test'):
raise AssertionError('No phonemes returned by phonemize()')
class Piper(Thread):
def __init__(self):
piper.initialize(espeak_data_dir())
Thread.__init__(self, name='PiperSynth', daemon=True)
self.commands = Queue()
self.as_16bit_samples = True
self._voice_id = 0
self.lock = Lock()
self.result_callback = lambda *a: None
self.start()
@property
def voice_id(self) -> int:
with self.lock:
ans = self._voice_id
return ans
def increment_voice_id(self) -> int:
with self.lock:
self._voice_id += 1
ans = self._voice_id
return ans
def run(self):
while True:
voice_id, cmd = self.commands.get(True)
if cmd is None:
break
if voice_id != self.voice_id:
continue
try:
cmd()
except Exception as e:
import traceback
self.result_callback(None, e, traceback.format_exc())
def shutdown(self):
vid = self.increment_voice_id()
self.commands.put((vid, None))
self.join()
def set_voice(
self, result_callback: Callable[[SynthesisResult, Exception|None, str|None], None],
config_path: str, model_path:str, length_scale_multiplier: float = 0, sentence_delay: float = 0.2,
as_16bit_samples: bool = True,
) -> int:
vid = self.increment_voice_id()
self.result_callback = result_callback
self.as_16bit_samples = as_16bit_samples
cfg = create_voice_config(config_path, length_scale_multiplier, sentence_delay)
self.commands.put((vid, partial(self._set_voice, cfg, model_path)))
return cfg.sample_rate
def _set_voice(self, cfg, model_path):
piper.set_voice(cfg, model_path)
def cancel(self) -> None:
self.increment_voice_id()
self.result_callback = lambda *a: None
def synthesize(self, utterance_id: Any, text: str) -> None:
vid = self.voice_id
self.commands.put((vid, partial(self._synthesize, vid, utterance_id, text)))
def _synthesize(self, voice_id: int, utterance_id: Any, text: str) -> None:
piper.start(text)
bytes_per_sample = 2 if self.as_16bit_samples else 4
while True:
audio_data, num_samples, sample_rate, is_last = piper.next(self.as_16bit_samples)
if self.voice_id == voice_id:
self.result_callback(SynthesisResult(utterance_id, bytes_per_sample, audio_data, num_samples, sample_rate, is_last), None, None)
else:
break
if is_last:
break
_global_piper_instance = None
def global_piper_instance() -> Piper:
global _global_piper_instance
if _global_piper_instance is None:
_global_piper_instance = Piper()
atexit.register(_global_piper_instance.shutdown)
return _global_piper_instance
def global_piper_instance_if_exists() -> Piper | None:
return _global_piper_instance
def play_wav_data(wav_data: bytes):
from qt.core import QAudioOutput, QBuffer, QByteArray, QCoreApplication, QIODevice, QMediaPlayer, QUrl
app = QCoreApplication([])
m = QMediaPlayer()
ao = QAudioOutput(m)
m.setAudioOutput(ao)
qbuffer = QBuffer()
qbuffer.setData(QByteArray(wav_data))
qbuffer.open(QIODevice.OpenModeFlag.ReadOnly)
m.setSourceDevice(qbuffer, QUrl.fromLocalFile('piper.wav'))
m.mediaStatusChanged.connect(
lambda status: app.quit() if status == QMediaPlayer.MediaStatus.EndOfMedia else print(m.playbackState(), status)
)
m.errorOccurred.connect(lambda e, s: (print(e, s, file=sys.stderr), app.quit()))
m.play()
app.exec()
def play_pcm_data(pcm_data, sample_rate):
from calibre_extensions.ffmpeg import wav_header_for_pcm_data
play_wav_data(wav_header_for_pcm_data(len(pcm_data), sample_rate) + pcm_data)
def develop():
from calibre.gui2.tts.piper import piper_cache_dir
p = global_piper_instance()
model_path = os.path.join(piper_cache_dir(), 'en_US-libritts-high.onnx')
q = Queue()
def synthesized(*args):
q.put(args)
sample_rate = p.set_voice(synthesized, model_path+'.json', model_path, sentence_delay=0.3)
p.synthesize(1, 'Testing speech synthesis with piper. A second sentence.')
all_data = []
while (args := q.get()):
sr, exc, tb = args
if exc is not None:
print(tb, file=sys.stderr, flush=True)
print(exc, file=sys.stderr, flush=True)
raise SystemExit(1)
all_data.append(sr.audio_data)
print(f'Got {len(sr.audio_data)} bytes of audio data', flush=True)
if sr.is_last:
break
play_pcm_data(b''.join(all_data), sample_rate)
if __name__ == '__main__':
develop()
| {
"repo_id": "kovidgoyal/calibre",
"file_path": "src/calibre/utils/tts/piper.py",
"license": "GNU General Public License v3.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/kitty:kittens/command_palette/main.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from functools import partial
from typing import Any
from kitty.fast_data_types import add_timer, get_boss
from kitty.typing_compat import BossType
from ..tui.handler import result_handler
def collect_keys_data(opts: Any) -> dict[str, Any]:
"""Collect all keybinding data from options into a JSON-serializable dict."""
from kitty.actions import get_all_actions, groups
from kitty.options.utils import KeyDefinition
from kitty.types import Shortcut
# Build action->group and action->help lookups
action_to_group: dict[str, str] = {}
action_to_help: dict[str, str] = {}
action_to_long_help: dict[str, str] = {}
for group_key, actions in get_all_actions().items():
for action in actions:
action_to_group[action.name] = groups[group_key]
action_to_help[action.name] = action.short_help
action_to_long_help[action.name] = action.long_help
modes: dict[str, dict[str, list[dict[str, str]]]] = {}
def as_sc(k: 'Any', v: KeyDefinition) -> Shortcut:
if v.is_sequence:
return Shortcut((v.trigger,) + v.rest)
return Shortcut((k,))
for mode_name, mode in opts.keyboard_modes.items():
categories: dict[str, list[dict[str, str]]] = {}
for key, defns in mode.keymap.items():
# Use last non-duplicate definition
seen: set[tuple[Any, ...]] = set()
uniq: list[KeyDefinition] = []
for d in reversed(defns):
uid = d.unique_identity_within_keymap
if uid not in seen:
seen.add(uid)
uniq.append(d)
for d in uniq:
sc = as_sc(key, d)
key_repr = sc.human_repr(opts.kitty_mod)
action_repr = d.human_repr()
# Determine category from first word of action definition
action_name = d.definition.split()[0] if d.definition else 'no_op'
category = action_to_group.get(action_name, 'Miscellaneous')
help_text = action_to_help.get(action_name, '')
long_help = action_to_long_help.get(action_name, '')
categories.setdefault(category, []).append({
'key': key_repr,
'action': action_name,
'action_display': action_repr,
'definition': d.definition or action_name,
'help': help_text,
'long_help': long_help,
})
# Sort within categories
for cat in categories:
categories[cat].sort(key=lambda b: b['key'])
# Order categories by the groups order
ordered: dict[str, list[dict[str, str]]] = {}
for group_title in groups.values():
if group_title in categories:
ordered[group_title] = categories.pop(group_title)
# Add any remaining
for cat_name, binds in sorted(categories.items()):
ordered[cat_name] = binds
modes[mode_name] = ordered
# Move push_keyboard_mode <name> bindings from the default mode into the
# respective keyboard mode's section so they appear alongside its shortcuts.
if '' in modes:
new_default_cats: dict[str, list[dict[str, str]]] = {}
for cat_name, bindings in modes[''].items():
keep: list[dict[str, str]] = []
for b in bindings:
if b['action'] == 'push_keyboard_mode':
parts = b['definition'].split()
target = parts[1] if len(parts) > 1 else ''
if target and target in modes:
if 'Enter mode' not in modes[target]:
new_target: dict[str, list[dict[str, str]]] = {'Enter mode': [b]}
new_target.update(modes[target])
modes[target] = new_target
else:
modes[target]['Enter mode'].append(b)
continue
keep.append(b)
if keep:
new_default_cats[cat_name] = keep
modes[''] = new_default_cats
# Add unmapped actions (actions with no keyboard shortcut).
# Collect all action names that already appear in a binding.
mapped_actions: set[str] = set()
for mode_cats in modes.values():
for bindings in mode_cats.values():
for b in bindings:
mapped_actions.add(b['action'])
default_mode_cats = modes.setdefault('', {})
for group_key, actions in get_all_actions().items():
category = groups[group_key]
for action in actions:
if action.name not in mapped_actions:
default_mode_cats.setdefault(category, []).append({
'key': '',
'action': action.name,
'action_display': action.name,
'definition': action.name,
'help': action.short_help,
'long_help': action.long_help,
})
# Re-sort each category: mapped entries (non-empty key) by key first,
# then unmapped entries (empty key) sorted by action name.
for cat in default_mode_cats:
default_mode_cats[cat].sort(key=lambda b: (b['key'] == '', b['key'] or b['action']))
# Re-order default_mode_cats by groups ordering (adding unmapped actions may
# have appended new categories at the end, breaking the established order).
reordered: dict[str, list[dict[str, str]]] = {}
for group_title in groups.values():
if group_title in default_mode_cats:
reordered[group_title] = default_mode_cats[group_title]
for cat_name, binds in default_mode_cats.items():
if cat_name not in reordered:
reordered[cat_name] = binds
modes[''] = reordered
# Emit explicit mode and category ordering since JSON maps lose insertion order
mode_order = list(modes.keys())
category_order: dict[str, list[str]] = {}
for mode_name, cats in modes.items():
category_order[mode_name] = list(cats.keys())
# Mouse mappings
mouse: list[dict[str, str]] = []
for event, action in opts.mousemap.items():
key_repr = event.human_repr(opts.kitty_mod)
mouse.append({'key': key_repr, 'action': action, 'action_display': action, 'help': '', 'long_help': ''})
mouse.sort(key=lambda b: b['key'])
return {
'modes': modes,
'mouse': mouse,
'mode_order': mode_order,
'category_order': category_order,
}
def main(args: list[str]) -> None:
raise SystemExit('This kitten must be used only from a kitty.conf mapping')
def callback(target_window_id: int, action: str, timer_id: int | None) -> None:
boss = get_boss()
w = boss.window_id_map.get(target_window_id)
boss.combine(action, w)
@result_handler(has_ready_notification=True)
def handle_result(args: list[str], data: dict[str, Any], target_window_id: int, boss: BossType) -> None:
if data and (action := data.get('action')):
# run action after event loop tick so command palette overlay is closed
add_timer(partial(callback, target_window_id, action), 0, False)
help_text = 'Browse and trigger keyboard shortcuts and actions'
usage = ''
OPTIONS = r'''
'''.format
if __name__ == '__main__':
main(sys.argv)
elif __name__ == '__doc__':
cd = sys.cli_docs # type: ignore
cd['usage'] = usage
cd['options'] = OPTIONS
cd['help_text'] = help_text
cd['short_desc'] = help_text
| {
"repo_id": "kovidgoyal/kitty",
"file_path": "kittens/command_palette/main.py",
"license": "GNU General Public License v3.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/kitty:kitty_tests/command_palette.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
from . import BaseTest
class TestCommandPalette(BaseTest):
def test_collect_keys_data(self):
from kittens.command_palette.main import collect_keys_data
from kitty.actions import groups
opts = self.set_options()
data = collect_keys_data(opts)
self.assertIn('modes', data)
self.assertIn('mouse', data)
self.assertIn('', data['modes'], 'Default keyboard mode should be present')
default_mode = data['modes']['']
# Should have at least some categories
self.assertTrue(len(default_mode) > 0, 'Should have at least one category')
# All category names should be from the known groups
known_titles = set(groups.values())
for cat_name in default_mode:
self.assertIn(cat_name, known_titles, f'Unknown category: {cat_name}')
# Each category should have bindings with required fields
for cat_name, bindings in default_mode.items():
self.assertIsInstance(bindings, list)
for b in bindings:
self.assertIn('key', b)
self.assertIn('action', b)
self.assertIn('action_display', b)
self.assertIn('definition', b)
self.assertIn('help', b)
self.assertIn('long_help', b)
self.assertIsInstance(b['key'], str)
self.assertIsInstance(b['action'], str)
# key may be empty for unmapped actions; action must always be non-empty
self.assertTrue(len(b['action']) > 0)
# Mouse mappings
self.assertIsInstance(data['mouse'], list)
for b in data['mouse']:
self.assertIn('key', b)
self.assertIn('action', b)
self.assertIn('action_display', b)
def test_collect_keys_categories_ordered(self):
from kittens.command_palette.main import collect_keys_data
from kitty.actions import groups
opts = self.set_options()
data = collect_keys_data(opts)
default_mode = data['modes']['']
cat_names = list(default_mode.keys())
group_titles = list(groups.values())
# Categories should appear in the same order as defined in groups
indices = []
for cat in cat_names:
if cat in group_titles:
indices.append(group_titles.index(cat))
self.ae(indices, sorted(indices), 'Categories should be ordered according to groups dict')
def test_collect_keys_bindings_sorted(self):
from kittens.command_palette.main import collect_keys_data
opts = self.set_options()
data = collect_keys_data(opts)
# Within each category, mapped entries (non-empty key) come first sorted by key,
# then unmapped entries (empty key) sorted by action name.
for cat_name, bindings in data['modes'][''].items():
seen_unmapped = False
for b in bindings:
if b['key'] == '':
seen_unmapped = True
elif seen_unmapped:
self.fail(
f'In category {cat_name!r}, mapped binding {b!r} follows an unmapped one'
)
def test_collect_keys_has_help_text(self):
from kittens.command_palette.main import collect_keys_data
opts = self.set_options()
data = collect_keys_data(opts)
# At least some bindings should have help text
has_help = False
for cat_name, bindings in data['modes'][''].items():
for b in bindings:
if b['help']:
has_help = True
break
if has_help:
break
self.assertTrue(has_help, 'At least some bindings should have help text')
def test_ordering_arrays_present(self):
from kittens.command_palette.main import collect_keys_data
opts = self.set_options()
data = collect_keys_data(opts)
# mode_order should list all modes
self.assertIn('mode_order', data)
self.assertIsInstance(data['mode_order'], list)
self.ae(set(data['mode_order']), set(data['modes'].keys()))
# category_order should list categories for each mode
self.assertIn('category_order', data)
self.assertIsInstance(data['category_order'], dict)
for mode_name in data['modes']:
self.assertIn(mode_name, data['category_order'])
self.ae(
set(data['category_order'][mode_name]),
set(data['modes'][mode_name].keys()),
f'category_order for mode {mode_name!r} should match modes keys',
)
def test_always_includes_unmapped_actions(self):
from kittens.command_palette.main import collect_keys_data
opts = self.set_options()
data = collect_keys_data(opts)
# Unmapped actions (empty key) are always included
found_unmapped = False
for cats in data['modes'].values():
for bindings in cats.values():
for b in bindings:
if b['key'] == '':
found_unmapped = True
# Unmapped actions must still have action and definition
self.assertTrue(len(b['action']) > 0)
self.assertTrue(len(b['definition']) > 0)
break
self.assertTrue(found_unmapped, 'Expected at least one unmapped action to always be present')
def test_unmapped_actions_sorted_order(self):
from kittens.command_palette.main import collect_keys_data
opts = self.set_options()
data = collect_keys_data(opts)
# In each category, mapped bindings (non-empty key) should come before unmapped ones
for cat_name, bindings in data['modes'].get('', {}).items():
seen_unmapped = False
for b in bindings:
if b['key'] == '':
seen_unmapped = True
elif seen_unmapped:
self.fail(
f'In category {cat_name!r}, mapped binding {b!r} follows an unmapped one'
)
| {
"repo_id": "kovidgoyal/kitty",
"file_path": "kitty_tests/command_palette.py",
"license": "GNU General Public License v3.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
kovidgoyal/kitty:kittens/choose_files/main.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from typing import Any
from kitty.conf.types import Definition
from kitty.constants import appname
from kitty.simple_cli_definitions import CONFIG_HELP, CompletionSpec
from kitty.typing_compat import BossType
from ..tui.handler import result_handler
definition = Definition(
'!kittens.choose_files',
)
agr = definition.add_group
egr = definition.end_group
opt = definition.add_option
map = definition.add_map
mma = definition.add_mouse_map
agr('scanning', 'Filesystem scanning') # {{{
opt('show_hidden', 'last', choices=('last', 'yes', 'y', 'true', 'no', 'n', 'false'), long_text='''
Whether to show hidden files. The default value of :code:`last` means remember the last
used value. This setting can be toggled within the program.''')
opt('sort_by_last_modified', 'last', choices=('last', 'yes', 'y', 'true', 'no', 'n', 'false'), long_text='''
Whether to sort the list of entries by last modified, instead of name. Note that sorting only applies
before any query is entered. Once a query is entered entries are sorted by their matching score.
The default value of :code:`last` means remember the last
used value. This setting can be toggled within the program.''')
opt('respect_ignores', 'last', choices=('last', 'yes', 'y', 'true', 'no', 'n', 'false'), long_text='''
Whether to respect .gitignore and .ignore files and the :opt:`ignore` setting.
The default value of :code:`last` means remember the last used value.
This setting can be toggled within the program.''')
opt('+ignore', '', add_to_default=False, long_text='''
An ignore pattern to ignore matched files. Uses the same sytax as :code:`.gitignore` files (see :code:`man gitignore`).
Anchored patterns match with respect to whatever directory is currently being displayed.
Can be specified multiple times to use multiple patterns. Note that every pattern
has to be checked against every file, so use sparingly.
''')
egr() # }}}
agr('appearance', 'Appearance') # {{{
opt('show_preview', 'last', choices=('last', 'yes', 'y', 'true', 'no', 'n', 'false'), long_text='''
Whether to show a preview of the current file/directory. The default value of :code:`last` means remember the last
used value. This setting can be toggled within the program.''')
opt('pygments_style', 'default', long_text='''
The pygments color scheme to use for syntax highlighting of file previews. See :link:`pygments
builtin styles <https://pygments.org/styles/>` for a list of schemes.
This sets the colors used for light color schemes, use :opt:`dark_pygments_style` to change the
colors for dark color schemes.
''')
opt('dark_pygments_style', 'github-dark', long_text='''
The pygments color scheme to use for syntax highlighting with dark colors. See :link:`pygments
builtin styles <https://pygments.org/styles/>` for a list of schemes.
This sets the colors used for dark color schemes, use :opt:`pygments_style` to change the
colors for light color schemes.''')
opt('cache_size', '0.5', option_type='positive_float', long_text='''
The maximum size of the disk cache, in gigabytes, used for previews. Zero or negative values
mean no limit.
''')
opt('syntax_aliases', 'pyj:py pyi:py recipe:py', ctype='strdict_ _:', option_type='syntax_aliases',
long_text='''
File extension aliases for syntax highlight. For example, to syntax highlight
:file:`file.xyz` as :file:`file.abc` use a setting of :code:`xyz:abc`.
Multiple aliases must be separated by spaces.
''')
opt('video_preview', 'width=480 fps=10 duration=5', long_text='''
Control how videos are sampled for previwing. The width controls
the size of the generated thumbnail from the video. Duration controls
how long the generated thumbnail plays for, in seconds. Note that when
changing these you should also use the :code:`--clear-cache` flag
otherwise it will not affect already cached previews.
''')
opt('+previewer', '', long_text='''
Specify an arbitrary program based preview generator. The syntax is::
pattern program arguments...
Here, pattern can be used to match file names or mimetypes. For example:
:code:`name:*.doc` matches files with the extension :code:`.doc`. Similarly,
:code:`mime:image/*` matches all image files. :code:`program` can be any
executable program in PATH. It will be run with the supplied arguments. The last argument
will be the path to the file for which a preview must be generated.
Can be specified multiple times to setup different previewers for different types of files.
Note that previewers specified using this option take precedence over the builtin
previewers.
The command must output preview data to STDOUT, as a JSON object:
.. code-block:: json
{
"lines": ["line1", "line2", "..."],
"image": "absolute path to generated image preview",
"title_extra": "some text to show on the first line",
}
The lines can contain SGR formatting escape codes and will be displayed as is at the
top of the preview panel. The image is optional and must be in one of the JPEG, PNG, GIF, WEBP, APNG
formats.
''')
egr() # }}}
agr('shortcuts', 'Keyboard shortcuts') # {{{
map('Quit', 'quit esc quit')
map('Quit', 'quit ctrl+c quit')
map('Accept current result', 'accept enter accept')
map('Select current result', 'select shift+enter select', long_text='''
When selecting multiple files, this will add the current file to the list of selected files.
You can also toggle the selected status of a file by holding down the :kbd:`Ctrl` key and clicking on
it. Similarly, the :kbd:`Alt` key can be held to click and extend the range of selected files.
''')
map('Type file name', 'typename ctrl+enter typename', long_text='''
Type a file name/path rather than filtering the list of existing files.
Useful when specifying a file or directory name for saving that does not yet exist.
When choosing existing directories, will accept the directory whoose
contents are being currently displayed as the choice.
Does not work when selecting files to open rather than to save.
''')
map('Modify file name', 'modifyname alt+enter modifyname', long_text='''
Modify the name of an existing file and select it for saving.
Useful when specifying a file or directory name for saving that does not yet exist,
but is based on an existing file name.
Does not work when selecting files to open rather than to save.
''')
map('Next result', 'next_result down next 1')
map('Previous result', 'prev_result up next -1')
map('Left result', 'left_result left next left')
map('Right result', 'right_result right next right')
map('First result on screen', 'first_result_on_screen home next first_on_screen')
map('Last result on screen', 'last_result_on_screen end next last_on_screen')
map('First result', 'first_result_on_screen ctrl+home next first')
map('Last result', 'last_result_on_screen ctrl+end next last')
map('Change to currently selected dir', 'cd_current tab cd .')
map('Change to parent directory', 'cd_parent shift+tab cd ..')
map('Change to root directory', 'cd_root ctrl+/ cd /')
map('Change to home directory', 'cd_home ctrl+~ cd ~')
map('Change to home directory', 'cd_home ctrl+` cd ~')
map('Change to home directory', 'cd_home ctrl+shift+` cd ~')
map('Change to temp directory', 'cd_tmp ctrl+t cd /tmp')
map('Next filter', 'next_filter ctrl+f 1')
map('Previous filter', 'prev_filter alt+f -1')
map('Toggle showing dotfiles', 'toggle_dotfiles alt+h toggle dotfiles')
map('Toggle showing ignored files', 'toggle_ignorefiles alt+i toggle ignorefiles')
map('Toggle sorting by dates', 'toggle_sort_by_dates alt+d toggle sort_by_dates')
map('Toggle showing preview', 'toggle_preview alt+p toggle preview')
egr() # }}}
def main(args: list[str]) -> None:
raise SystemExit('This must be run as kitten choose-files')
def relative_path_if_possible(path: str, base: str) -> str:
if not base or not path:
return path
from contextlib import suppress
from pathlib import Path
b = Path(base)
q = Path(path)
with suppress(ValueError):
return str(q.relative_to(b))
return path
@result_handler(has_ready_notification=True)
def handle_result(args: list[str], data: dict[str, Any], target_window_id: int, boss: BossType) -> None:
import shlex
from kitty.utils import shlex_split
paths: list[str] = data.get('paths', [])
if not paths:
boss.ring_bell_if_allowed()
return
w = boss.window_id_map.get(target_window_id)
if w is None:
boss.ring_bell_if_allowed()
return
cwd = w.cwd_of_child
items = []
for path in paths:
if cwd:
path = relative_path_if_possible(path, cwd)
if w.at_prompt and len(tuple(shlex_split(path))) > 1:
path = shlex.quote(path)
items.append(path)
text = (' ' if w.at_prompt else '\n').join(items)
w.paste_text(text)
usage = '[directory to start choosing files in]'
OPTIONS = '''
--mode
type=choices
choices=file,files,save-file,dir,save-dir,dirs,save-files
default=file
The type of object(s) to select
--file-filter
type=list
A list of filters to restrict the displayed files. Can be either mimetypes, or glob style patterns. Can be specified multiple times.
The syntax is :code:`type:expression:Descriptive Name`.
For example: :code:`mime:image/png:Images` and :code:`mime:image/gif:Images` and :code:`glob:*.[tT][xX][Tt]:Text files`.
Note that glob patterns are case-sensitive. The mimetype specification is treated as a glob expressions as well, so you can,
for example, use :code:`mime:text/*` to match all text files. The first filter in the list will be applied by default. Use a filter
such as :code:`glob:*:All` to match all files. Note that filtering only appies to files, not directories.
--suggested-save-file-name
A suggested name when picking a save file.
--suggested-save-file-path
Path to an existing file to use as the save file.
--title
Window title to use for this chooser
--display-title
type=bool-set
Show the window title at the top, useful when this kitten is used in an
OS window without a title bar.
--override -o
type=list
Override individual configuration options, can be specified multiple times.
Syntax: :italic:`name=value`.
--config
type=list
completion=type:file ext:conf group:"Config files" kwds:none,NONE
{config_help}
--write-output-to
Path to a file to which the output is written in addition to STDOUT.
--output-format
choices=text,json,shell,shell-relative
default=text
The format in which to write the output. The :code:`text` format is absolute paths separated by newlines.
The :code:`shell` format is quoted absolute paths separated by spaces, quoting is done only if needed. The
:code:`shell-relative` format is the same as :code:`shell` except it returns paths relative to the starting
directory. Note that when invoked from a mapping, this option is ignored,
and either text or shell format is used automatically based on whether the cursor is at a shell prompt or not.
--write-pid-to
Path to a file to which to write the process ID (PID) of this process to.
--clear-cache
type=bool-set
Clear the caches used by this kitten.
'''.format(config_help=CONFIG_HELP.format(conf_name='choose-files', appname=appname)).format
help_text = '''\
Select one or more files, quickly, using fuzzy finding, by typing just a few characters from
the file name. Browse matching files, using the arrow keys to navigate matches and press :kbd:`Enter`
to select. The :kbd:`Tab` key can be used to change to a sub-folder. See the :doc:`online docs </kittens/choose-files>`
for full details.
'''
if __name__ == '__main__':
main(sys.argv)
elif __name__ == '__doc__':
cd = sys.cli_docs # type: ignore
cd['usage'] = usage
cd['options'] = OPTIONS
cd['help_text'] = help_text
cd['short_desc'] = 'Choose files, fast'
cd['args_completion'] = CompletionSpec.from_string('type:directory')
elif __name__ == '__conf__':
sys.options_definition = definition # type: ignore
| {
"repo_id": "kovidgoyal/kitty",
"file_path": "kittens/choose_files/main.py",
"license": "GNU General Public License v3.0",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
kovidgoyal/kitty:kittens/desktop_ui/main.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from kitty.conf.types import Definition
definition = Definition(
'!kittens.choose_files',
)
agr = definition.add_group
egr = definition.end_group
opt = definition.add_option
map = definition.add_map
mma = definition.add_mouse_map
agr('Appearance')
opt('color_scheme', 'no-preference', choices=('no-preference', 'dark', 'light'), long_text='''\
The color scheme for your system. This sets the initial value of the color scheme. It can be changed subsequently
by using :code:`kitten desktop-ui color-scheme`.
''')
opt('accent_color', 'cyan', long_text='The RGB accent color for your system, can be specified as a color name or in hex a decimal format.')
opt('contrast', 'normal', choices=('normal', 'high'), long_text='The preferred contrast level.')
opt('file_chooser_size', '', long_text='''
The size in lines and columns of the file chooser popup window. By default it is full screen. For example:
:code:`file_chooser_size 25 80` will cause the popup to be of size 25 lines and 80 columns. Note that if you
use this option, depending on the compositor you are running, the popup window may not be properly modal.
''')
opt('+file_chooser_kitty_conf', '',
long_text='Path to config file to use for kitty when drawing the file chooser window. Can be specified multiple times. By default, the'
' normal kitty.conf is used. Relative paths are resolved with respect to the kitty config directory.'
)
opt('+file_chooser_kitty_override', '', long_text='Override individual kitty configuration options, for the file chooser window.'
' Can be specified multiple times. Syntax: :italic:`name=value`. For example: :code:`font_size=20`.'
)
egr()
def main(args: list[str]) -> None:
raise SystemExit('This must be run as kitten desktop-ui')
if __name__ == '__main__':
main(sys.argv)
elif __name__ == '__conf__':
sys.options_definition = definition # type: ignore
| {
"repo_id": "kovidgoyal/kitty",
"file_path": "kittens/desktop_ui/main.py",
"license": "GNU General Public License v3.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
kovidgoyal/kitty:kitty_tests/panels.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
import shlex
import subprocess
def r(msg: str, cmdline: str) -> None:
try:
q = input('Test ' + msg + '? (y/n): ').lower()
if q in ('y', 'yes'):
try:
subprocess.run(['kitten'] + shlex.split(cmdline))
except KeyboardInterrupt:
pass
except KeyboardInterrupt:
raise SystemExit(1)
if __name__ == '__main__':
r('top panel check transpareny, no input focus, margins and struts',
'panel -o background_opacity=0.2 --edge=top --lines=2 --margin-left=50 --margin-right=100')
r('bottom panel, check struts', 'panel -o background_opacity=0.2 --edge=bottom --lines=2 --margin-left=100 --margin-right=50')
r('left panel, check struts', 'panel -o background_opacity=0.2 --edge=left --columns=2 --margin-top=50 --margin-bottom=100')
r('right panel, check struts', 'panel -o background_opacity=0.2 --edge=right --columns=2 --margin-top=50 --margin-bottom=100')
r('background, check transparency and margins and no input focus',
'panel -o background_opacity=0.2 --edge=background --margin-top=50 --margin-bottom=50 --margin-left=100 --margin-right=100')
r('quake, check transparency and focus on show/re-show', 'quick-access-terminal')
| {
"repo_id": "kovidgoyal/kitty",
"file_path": "kitty_tests/panels.py",
"license": "GNU General Public License v3.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
labmlai/annotated_deep_learning_paper_implementations:labml_nn/transformers/flash/test.py | """
### Test Flash Attention Implementation
This is the code to test and measure performance of our flash attention implementation
"""
import torch
import triton
from labml import logger, monit
from labml_nn.transformers.flash import attention
HI_PRES_TORCH = torch.float32
@torch.no_grad()
def _calc_abs_rel_error(a: torch.Tensor, b: torch.Tensor, atol=1e-2):
"""
#### Calculate absolute and relative error for reporting
"""
d = (a - b).abs()
max_abs = d.max()
d = (d - atol).clamp(min=0)
d = d / b.abs()
max_rel = d.max()
return max_abs.cpu().item(), max_rel.cpu().item()
def test_fwd_bwd(batch_size, n_heads, k_heads, q_seq_len, kv_seq_len, d_head, causal, dtype, device):
"""
#### Compare our implementation with naive PyTorch attention
"""
with monit.section(f'Init {q_seq_len} {kv_seq_len} {d_head}'):
torch.manual_seed(20)
q = (torch.empty((batch_size, n_heads, q_seq_len, d_head),
dtype=dtype, device=device).normal_(mean=0.0, std=0.5).requires_grad_())
k = (torch.empty((batch_size, k_heads, kv_seq_len, d_head),
dtype=dtype, device=device).normal_(mean=0.0, std=0.5).requires_grad_())
v = (torch.empty((batch_size, k_heads, kv_seq_len, d_head),
dtype=dtype, device=device).normal_(mean=0.0, std=0.5).requires_grad_())
sm_scale = d_head ** -0.5
d_out = torch.randn_like(q)
# reference implementation
mask = torch.tril(torch.ones((q_seq_len, kv_seq_len), device=device, dtype=torch.bool))
torch.cuda.synchronize()
with monit.section('Pytorch'):
p = torch.matmul(q.view(batch_size, k_heads, -1, q_seq_len, d_head),
k.transpose(2, 3)[:, :, None, :, :]) * sm_scale
if causal:
p[:, :, :, ~mask] = float("-inf")
p = torch.softmax(p.to(HI_PRES_TORCH), dim=-1).to(dtype)
ref_out = torch.matmul(p, v[:, :, None, :, :])
ref_out = ref_out.view(q.shape)
ref_out.backward(d_out)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
torch.cuda.synchronize()
with monit.section('Triton'):
assert q.dtype == dtype
tri_out = attention(q, k, v, causal, sm_scale).to(dtype)
monit.progress(0.5)
tri_out.backward(d_out)
monit.progress(0.9)
tri_dv, v.grad = v.grad.clone(), None # type: ignore
tri_dk, k.grad = k.grad.clone(), None # type: ignore
tri_dq, q.grad = q.grad.clone(), None # type: ignore
torch.cuda.synchronize()
with monit.section('Test') as s:
# compare
passed = True
if not torch.allclose(tri_out, ref_out, atol=1e-2, rtol=0.):
abs_err, rel_err = _calc_abs_rel_error(ref_out, tri_out)
logger.log(('[FAILED]', logger.Text.danger), f' Out mismatch {abs_err} {rel_err}')
passed = False
rtol = 1e-1
if not torch.allclose(tri_dq, ref_dq, atol=1e-2, rtol=rtol):
abs_err, rel_err = _calc_abs_rel_error(ref_dq, tri_dq)
logger.log(('[FAILED]', logger.Text.danger), f' dQ mismatch {abs_err} {rel_err}')
passed = False
if not torch.allclose(tri_dv, ref_dv, atol=1e-2, rtol=rtol):
abs_err, rel_err = _calc_abs_rel_error(ref_dv, tri_dv)
logger.log(('[FAILED]', logger.Text.danger), f' dV mismatch {abs_err} {rel_err}')
passed = False
if not torch.allclose(tri_dk, ref_dk, atol=1e-2, rtol=rtol):
abs_err, rel_err = _calc_abs_rel_error(ref_dk, tri_dk)
logger.log(('[FAILED]', logger.Text.danger), f' dK mismatch {abs_err} {rel_err}')
passed = False
if passed:
logger.log('[PASSED]', logger.Text.success)
s.success = True
else:
s.success = False
torch.cuda.synchronize()
def _perf_triton_fn(*, device, dtype, batch_size, k_heads, n_groups, seq_len, d_head, causal):
"""
Get a partial function to test performance of our implementation
"""
q = torch.randn((batch_size, k_heads * n_groups, seq_len, d_head), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((batch_size, k_heads, seq_len, d_head), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((batch_size, k_heads, seq_len, d_head), dtype=dtype, device=device, requires_grad=True)
sm_scale = d_head ** -0.5
return lambda: attention(q, k, v, causal, sm_scale)
def _perf_flash(*, batch_size, k_heads, n_groups, seq_len, d_head, causal, device, dtype):
"""
Get a partial function to test performance of original flash implementation
"""
q = torch.randn((batch_size, seq_len, k_heads * n_groups, d_head), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((batch_size, seq_len, k_heads, d_head), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((batch_size, seq_len, k_heads, d_head), dtype=dtype, device=device, requires_grad=True)
from flash_attn import flash_attn_func
return lambda: flash_attn_func(q, k, v, causal=causal)
def measure_performance(name, fn, *, batch_size, k_heads, n_groups, seq_len, d_head, causal, is_bwd: bool):
"""
### Measure the speed
"""
if is_bwd:
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
flops_per_matmul = 2.0 * batch_size * k_heads * n_groups * seq_len * seq_len * d_head
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if is_bwd:
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
tf_ps = total_flops * 1e-12 / (ms * 1e-3)
logger.log((f'{name}', logger.Text.key), ': ', f'{ms :,.1f}ms', ' ', f'{tf_ps :,.2f}TFps')
def main():
device = torch.device('cuda:0')
torch.cuda.set_device(device)
dtype = torch.float16
# only works on post-Ampere GPUs right now
test_fwd_bwd(1, 4, 1, 2048, 2048, 128, True, dtype=dtype, device=device)
test_fwd_bwd(16, 32, 8, 2001, 4001, 128, False, dtype=dtype, device=device)
test_fwd_bwd(4, 32, 8, 2048, 1024, 128, False, dtype=dtype, device=device)
test_fwd_bwd(4, 32, 8, 2001, 4001, 128, True, dtype=dtype, device=device)
_conf = {
'batch_size': 16,
'k_heads': 8,
'n_groups': 4,
'seq_len': 2048,
'd_head': 128,
}
for _causal in [False, True]:
for is_bwd in [False, True]:
logger.log(f'{"Causal" if _causal else "Non-causal"} {" Backward" if is_bwd else ""}', logger.Text.title)
measure_performance(f'flash', _perf_flash(causal=_causal, device=device, dtype=dtype, **_conf),
is_bwd=is_bwd,
causal=_causal, **_conf)
measure_performance(f'triton', _perf_triton_fn(causal=_causal, device=device, dtype=dtype, **_conf),
is_bwd=is_bwd,
causal=_causal, **_conf)
if __name__ == "__main__":
main()
| {
"repo_id": "labmlai/annotated_deep_learning_paper_implementations",
"file_path": "labml_nn/transformers/flash/test.py",
"license": "MIT License",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
labmlai/annotated_deep_learning_paper_implementations:labml_nn/helpers/datasets.py | import random
from pathlib import PurePath, Path
from typing import List, Callable, Dict, Optional
from torchvision import datasets, transforms
import torch
from labml import lab
from labml import monit
from labml.configs import BaseConfigs
from labml.configs import aggregate, option
from labml.utils.download import download_file
from torch.utils.data import DataLoader
from torch.utils.data import IterableDataset, Dataset
def _mnist_dataset(is_train, transform):
return datasets.MNIST(str(lab.get_data_path()),
train=is_train,
download=True,
transform=transform)
class MNISTConfigs(BaseConfigs):
"""
Configurable MNIST data set.
Arguments:
dataset_name (str): name of the data set, ``MNIST``
dataset_transforms (torchvision.transforms.Compose): image transformations
train_dataset (torchvision.datasets.MNIST): training dataset
valid_dataset (torchvision.datasets.MNIST): validation dataset
train_loader (torch.utils.data.DataLoader): training data loader
valid_loader (torch.utils.data.DataLoader): validation data loader
train_batch_size (int): training batch size
valid_batch_size (int): validation batch size
train_loader_shuffle (bool): whether to shuffle training data
valid_loader_shuffle (bool): whether to shuffle validation data
"""
dataset_name: str = 'MNIST'
dataset_transforms: transforms.Compose
train_dataset: datasets.MNIST
valid_dataset: datasets.MNIST
train_loader: DataLoader
valid_loader: DataLoader
train_batch_size: int = 64
valid_batch_size: int = 1024
train_loader_shuffle: bool = True
valid_loader_shuffle: bool = False
@option(MNISTConfigs.dataset_transforms)
def mnist_transforms():
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
@option(MNISTConfigs.train_dataset)
def mnist_train_dataset(c: MNISTConfigs):
return _mnist_dataset(True, c.dataset_transforms)
@option(MNISTConfigs.valid_dataset)
def mnist_valid_dataset(c: MNISTConfigs):
return _mnist_dataset(False, c.dataset_transforms)
@option(MNISTConfigs.train_loader)
def mnist_train_loader(c: MNISTConfigs):
return DataLoader(c.train_dataset,
batch_size=c.train_batch_size,
shuffle=c.train_loader_shuffle)
@option(MNISTConfigs.valid_loader)
def mnist_valid_loader(c: MNISTConfigs):
return DataLoader(c.valid_dataset,
batch_size=c.valid_batch_size,
shuffle=c.valid_loader_shuffle)
aggregate(MNISTConfigs.dataset_name, 'MNIST',
(MNISTConfigs.dataset_transforms, 'mnist_transforms'),
(MNISTConfigs.train_dataset, 'mnist_train_dataset'),
(MNISTConfigs.valid_dataset, 'mnist_valid_dataset'),
(MNISTConfigs.train_loader, 'mnist_train_loader'),
(MNISTConfigs.valid_loader, 'mnist_valid_loader'))
def _cifar_dataset(is_train, transform):
return datasets.CIFAR10(str(lab.get_data_path()),
train=is_train,
download=True,
transform=transform)
class CIFAR10Configs(BaseConfigs):
"""
Configurable CIFAR 10 data set.
Arguments:
dataset_name (str): name of the data set, ``CIFAR10``
dataset_transforms (torchvision.transforms.Compose): image transformations
train_dataset (torchvision.datasets.CIFAR10): training dataset
valid_dataset (torchvision.datasets.CIFAR10): validation dataset
train_loader (torch.utils.data.DataLoader): training data loader
valid_loader (torch.utils.data.DataLoader): validation data loader
train_batch_size (int): training batch size
valid_batch_size (int): validation batch size
train_loader_shuffle (bool): whether to shuffle training data
valid_loader_shuffle (bool): whether to shuffle validation data
"""
dataset_name: str = 'CIFAR10'
dataset_transforms: transforms.Compose
train_dataset: datasets.CIFAR10
valid_dataset: datasets.CIFAR10
train_loader: DataLoader
valid_loader: DataLoader
train_batch_size: int = 64
valid_batch_size: int = 1024
train_loader_shuffle: bool = True
valid_loader_shuffle: bool = False
@CIFAR10Configs.calc(CIFAR10Configs.dataset_transforms)
def cifar10_transforms():
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
@CIFAR10Configs.calc(CIFAR10Configs.train_dataset)
def cifar10_train_dataset(c: CIFAR10Configs):
return _cifar_dataset(True, c.dataset_transforms)
@CIFAR10Configs.calc(CIFAR10Configs.valid_dataset)
def cifar10_valid_dataset(c: CIFAR10Configs):
return _cifar_dataset(False, c.dataset_transforms)
@CIFAR10Configs.calc(CIFAR10Configs.train_loader)
def cifar10_train_loader(c: CIFAR10Configs):
return DataLoader(c.train_dataset,
batch_size=c.train_batch_size,
shuffle=c.train_loader_shuffle)
@CIFAR10Configs.calc(CIFAR10Configs.valid_loader)
def cifar10_valid_loader(c: CIFAR10Configs):
return DataLoader(c.valid_dataset,
batch_size=c.valid_batch_size,
shuffle=c.valid_loader_shuffle)
CIFAR10Configs.aggregate(CIFAR10Configs.dataset_name, 'CIFAR10',
(CIFAR10Configs.dataset_transforms, 'cifar10_transforms'),
(CIFAR10Configs.train_dataset, 'cifar10_train_dataset'),
(CIFAR10Configs.valid_dataset, 'cifar10_valid_dataset'),
(CIFAR10Configs.train_loader, 'cifar10_train_loader'),
(CIFAR10Configs.valid_loader, 'cifar10_valid_loader'))
class TextDataset:
itos: List[str]
stoi: Dict[str, int]
n_tokens: int
train: str
valid: str
standard_tokens: List[str] = []
@staticmethod
def load(path: PurePath):
with open(str(path), 'r') as f:
return f.read()
def __init__(self, path: PurePath, tokenizer: Callable, train: str, valid: str, test: str, *,
n_tokens: Optional[int] = None,
stoi: Optional[Dict[str, int]] = None,
itos: Optional[List[str]] = None):
self.test = test
self.valid = valid
self.train = train
self.tokenizer = tokenizer
self.path = path
if n_tokens or stoi or itos:
assert stoi and itos and n_tokens
self.n_tokens = n_tokens
self.stoi = stoi
self.itos = itos
else:
self.n_tokens = len(self.standard_tokens)
self.stoi = {t: i for i, t in enumerate(self.standard_tokens)}
with monit.section("Tokenize"):
tokens = self.tokenizer(self.train) + self.tokenizer(self.valid)
tokens = sorted(list(set(tokens)))
for t in monit.iterate("Build vocabulary", tokens):
self.stoi[t] = self.n_tokens
self.n_tokens += 1
self.itos = [''] * self.n_tokens
for t, n in self.stoi.items():
self.itos[n] = t
def text_to_i(self, text: str) -> torch.Tensor:
tokens = self.tokenizer(text)
return torch.tensor([self.stoi[s] for s in tokens if s in self.stoi], dtype=torch.long)
def __repr__(self):
return f'{len(self.train) / 1_000_000 :,.2f}M, {len(self.valid) / 1_000_000 :,.2f}M - {str(self.path)}'
class SequentialDataLoader(IterableDataset):
def __init__(self, *, text: str, dataset: TextDataset,
batch_size: int, seq_len: int):
self.seq_len = seq_len
data = dataset.text_to_i(text)
n_batch = data.shape[0] // batch_size
data = data.narrow(0, 0, n_batch * batch_size)
data = data.view(batch_size, -1).t().contiguous()
self.data = data
def __len__(self):
return self.data.shape[0] // self.seq_len
def __iter__(self):
self.idx = 0
return self
def __next__(self):
if self.idx >= self.data.shape[0] - 1:
raise StopIteration()
seq_len = min(self.seq_len, self.data.shape[0] - 1 - self.idx)
i = self.idx + seq_len
data = self.data[self.idx: i]
target = self.data[self.idx + 1: i + 1]
self.idx = i
return data, target
def __getitem__(self, idx):
seq_len = min(self.seq_len, self.data.shape[0] - 1 - idx)
i = idx + seq_len
data = self.data[idx: i]
target = self.data[idx + 1: i + 1]
return data, target
class SequentialUnBatchedDataset(Dataset):
def __init__(self, *, text: str, dataset: TextDataset,
seq_len: int,
is_random_offset: bool = True):
self.is_random_offset = is_random_offset
self.seq_len = seq_len
self.data = dataset.text_to_i(text)
def __len__(self):
return (self.data.shape[0] - 1) // self.seq_len
def __getitem__(self, idx):
start = idx * self.seq_len
assert start + self.seq_len + 1 <= self.data.shape[0]
if self.is_random_offset:
start += random.randint(0, min(self.seq_len - 1, self.data.shape[0] - (start + self.seq_len + 1)))
end = start + self.seq_len
data = self.data[start: end]
target = self.data[start + 1: end + 1]
return data, target
class TextFileDataset(TextDataset):
standard_tokens = []
def __init__(self, path: PurePath, tokenizer: Callable, *,
url: Optional[str] = None,
filter_subset: Optional[int] = None):
path = Path(path)
if not path.exists():
if not url:
raise FileNotFoundError(str(path))
else:
download_file(url, path)
with monit.section("Load data"):
text = self.load(path)
if filter_subset:
text = text[:filter_subset]
split = int(len(text) * .9)
train = text[:split]
valid = text[split:]
super().__init__(path, tokenizer, train, valid, '')
def _test_tiny_shakespeare():
from labml import lab
_ = TextFileDataset(lab.get_data_path() / 'tiny_shakespeare.txt', lambda x: list(x),
url='https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt')
if __name__ == '__main__':
_test_tiny_shakespeare()
| {
"repo_id": "labmlai/annotated_deep_learning_paper_implementations",
"file_path": "labml_nn/helpers/datasets.py",
"license": "MIT License",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
labmlai/annotated_deep_learning_paper_implementations:labml_nn/helpers/device.py | import torch
from labml.configs import BaseConfigs, hyperparams, option
class DeviceInfo:
def __init__(self, *,
use_cuda: bool,
cuda_device: int):
self.use_cuda = use_cuda
self.cuda_device = cuda_device
self.cuda_count = torch.cuda.device_count()
self.is_cuda = self.use_cuda and torch.cuda.is_available()
if not self.is_cuda:
self.device = torch.device('cpu')
else:
if self.cuda_device < self.cuda_count:
self.device = torch.device('cuda', self.cuda_device)
else:
self.device = torch.device('cuda', self.cuda_count - 1)
def __str__(self):
if not self.is_cuda:
return "CPU"
if self.cuda_device < self.cuda_count:
return f"GPU:{self.cuda_device} - {torch.cuda.get_device_name(self.cuda_device)}"
else:
return (f"GPU:{self.cuda_count - 1}({self.cuda_device}) "
f"- {torch.cuda.get_device_name(self.cuda_count - 1)}")
class DeviceConfigs(BaseConfigs):
r"""
This is a configurable module to get a single device to train model on.
It can pick up CUDA devices and it will fall back to CPU if they are not available.
It has other small advantages such as being able to view the
actual device name on configurations view of
`labml app <https://github.com/labmlai/labml/tree/master/app>`_
Arguments:
cuda_device (int): The CUDA device number. Defaults to ``0``.
use_cuda (bool): Whether to use CUDA devices. Defaults to ``True``.
"""
cuda_device: int = 0
use_cuda: bool = True
device_info: DeviceInfo
device: torch.device
def __init__(self):
super().__init__(_primary='device')
@option(DeviceConfigs.device)
def _device(c: DeviceConfigs):
return c.device_info.device
hyperparams(DeviceConfigs.cuda_device, DeviceConfigs.use_cuda,
is_hyperparam=False)
@option(DeviceConfigs.device_info)
def _device_info(c: DeviceConfigs):
return DeviceInfo(use_cuda=c.use_cuda,
cuda_device=c.cuda_device)
| {
"repo_id": "labmlai/annotated_deep_learning_paper_implementations",
"file_path": "labml_nn/helpers/device.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
labmlai/annotated_deep_learning_paper_implementations:labml_nn/helpers/metrics.py | import dataclasses
from abc import ABC
import torch
from labml import tracker
class StateModule:
def __init__(self):
pass
# def __call__(self):
# raise NotImplementedError
def create_state(self) -> any:
raise NotImplementedError
def set_state(self, data: any):
raise NotImplementedError
def on_epoch_start(self):
raise NotImplementedError
def on_epoch_end(self):
raise NotImplementedError
class Metric(StateModule, ABC):
def track(self):
pass
@dataclasses.dataclass
class AccuracyState:
samples: int = 0
correct: int = 0
def reset(self):
self.samples = 0
self.correct = 0
class Accuracy(Metric):
data: AccuracyState
def __init__(self, ignore_index: int = -1):
super().__init__()
self.ignore_index = ignore_index
def __call__(self, output: torch.Tensor, target: torch.Tensor):
output = output.view(-1, output.shape[-1])
target = target.view(-1)
pred = output.argmax(dim=-1)
mask = target == self.ignore_index
pred.masked_fill_(mask, self.ignore_index)
n_masked = mask.sum().item()
self.data.correct += pred.eq(target).sum().item() - n_masked
self.data.samples += len(target) - n_masked
def create_state(self):
return AccuracyState()
def set_state(self, data: any):
self.data = data
def on_epoch_start(self):
self.data.reset()
def on_epoch_end(self):
self.track()
def track(self):
if self.data.samples == 0:
return
tracker.add("accuracy.", self.data.correct / self.data.samples)
class AccuracyDirect(Accuracy):
data: AccuracyState
def __call__(self, output: torch.Tensor, target: torch.Tensor):
output = output.view(-1)
target = target.view(-1)
self.data.correct += output.eq(target).sum().item()
self.data.samples += len(target)
| {
"repo_id": "labmlai/annotated_deep_learning_paper_implementations",
"file_path": "labml_nn/helpers/metrics.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
labmlai/annotated_deep_learning_paper_implementations:labml_nn/helpers/optimizer.py | from typing import Tuple
import torch
from labml import tracker
from labml.configs import BaseConfigs, option, meta_config
class OptimizerConfigs(BaseConfigs):
r"""
This creates a configurable optimizer.
Arguments:
learning_rate (float): Learning rate of the optimizer. Defaults to ``0.01``.
momentum (float): Momentum of the optimizer. Defaults to ``0.5``.
parameters: Model parameters to optimize.
d_model (int): Embedding size of the model (for Noam optimizer).
betas (Tuple[float, float]): Betas for Adam optimizer. Defaults to ``(0.9, 0.999)``.
eps (float): Epsilon for Adam/RMSProp optimizers. Defaults to ``1e-8``.
step_factor (int): Step factor for Noam optimizer. Defaults to ``1024``.
Also there is a better (more options) implementation in ``labml_nn``.
`We recommend using that <https://nn.labml.ai/optimizers/configs.html>`_.
"""
optimizer: torch.optim.Adam
learning_rate: float = 0.01
momentum: float = 0.5
parameters: any
d_model: int
betas: Tuple[float, float] = (0.9, 0.999)
eps: float = 1e-8
step_factor: int = 1024
def __init__(self):
super().__init__(_primary='optimizer')
meta_config(OptimizerConfigs.parameters)
@option(OptimizerConfigs.optimizer, 'SGD')
def sgd_optimizer(c: OptimizerConfigs):
return torch.optim.SGD(c.parameters, c.learning_rate, c.momentum)
@option(OptimizerConfigs.optimizer, 'Adam')
def adam_optimizer(c: OptimizerConfigs):
return torch.optim.Adam(c.parameters, lr=c.learning_rate,
betas=c.betas, eps=c.eps)
class NoamOpt:
def __init__(self, model_size: int, learning_rate: float, warmup: int, step_factor: int, optimizer):
self.step_factor = step_factor
self.optimizer = optimizer
self.warmup = warmup
self.learning_rate = learning_rate
self.model_size = model_size
self._rate = 0
def step(self):
rate = self.rate(tracker.get_global_step() / self.step_factor)
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step):
factor = self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5))
return self.learning_rate * factor
def zero_grad(self):
self.optimizer.zero_grad()
@option(OptimizerConfigs.optimizer, 'Noam')
def noam_optimizer(c: OptimizerConfigs):
optimizer = torch.optim.Adam(c.parameters, lr=0.0, betas=c.betas, eps=c.eps)
return NoamOpt(c.d_model, 1, 2000, c.step_factor, optimizer)
def _test_noam_optimizer():
import matplotlib.pyplot as plt
import numpy as np
opts = [NoamOpt(512, 1, 4000, None),
NoamOpt(512, 1, 8000, None),
NoamOpt(2048, 1, 2000, None)]
plt.plot(np.arange(1, 20000), [[opt.rate(i) for opt in opts] for i in range(1, 20000)])
plt.legend(["512:4000", "512:8000", "256:4000"])
plt.title("Optimizer")
plt.show()
if __name__ == '__main__':
_test_noam_optimizer()
| {
"repo_id": "labmlai/annotated_deep_learning_paper_implementations",
"file_path": "labml_nn/helpers/optimizer.py",
"license": "MIT License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
labmlai/annotated_deep_learning_paper_implementations:labml_nn/helpers/schedule.py | from typing import Tuple, List
class Schedule:
def __call__(self, x):
raise NotImplementedError()
class Flat(Schedule):
def __init__(self, value):
self.__value = value
def __call__(self, x):
return self.__value
def __str__(self):
return f"Schedule({self.__value})"
class Dynamic(Schedule):
def __init__(self, value):
self.__value = value
def __call__(self, x):
return self.__value
def update(self, value):
self.__value = value
def __str__(self):
return "Dynamic"
class Piecewise(Schedule):
"""
## Piecewise schedule
"""
def __init__(self, endpoints: List[Tuple[float, float]], outside_value: float = None):
"""
### Initialize
`endpoints` is list of pairs `(x, y)`.
The values between endpoints are linearly interpolated.
`y` values outside the range covered by `x` are
`outside_value`.
"""
# `(x, y)` pairs should be sorted
indexes = [e[0] for e in endpoints]
assert indexes == sorted(indexes)
self._outside_value = outside_value
self._endpoints = endpoints
def __call__(self, x):
"""
### Find `y` for given `x`
"""
# iterate through each segment
for (x1, y1), (x2, y2) in zip(self._endpoints[:-1], self._endpoints[1:]):
# interpolate if `x` is within the segment
if x1 <= x < x2:
dx = float(x - x1) / (x2 - x1)
return y1 + dx * (y2 - y1)
# return outside value otherwise
return self._outside_value
def __str__(self):
endpoints = ", ".join([f"({e[0]}, {e[1]})" for e in self._endpoints])
return f"Schedule[{endpoints}, {self._outside_value}]"
class RelativePiecewise(Piecewise):
def __init__(self, relative_endpoits: List[Tuple[float, float]], total_steps: int):
endpoints = []
for e in relative_endpoits:
index = int(total_steps * e[0])
assert index >= 0
endpoints.append((index, e[1]))
super().__init__(endpoints, outside_value=relative_endpoits[-1][1])
| {
"repo_id": "labmlai/annotated_deep_learning_paper_implementations",
"file_path": "labml_nn/helpers/schedule.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
labmlai/annotated_deep_learning_paper_implementations:labml_nn/helpers/trainer.py | import signal
import typing
from typing import Dict, List, Callable
from typing import Optional, Tuple, Any, Collection
import torch.optim
import torch.optim
import torch.utils.data
import torch.utils.data
from labml import tracker, logger, monit
from labml.configs import BaseConfigs, meta_config, option
from labml.internal.monitor import Loop
from labml.logger import Text
from torch import nn
from .device import DeviceConfigs
from .metrics import StateModule
class TrainingLoopIterator(Collection):
def __init__(self, start: int, total: int, step: Optional[int]):
self.step = step
self.total = total
self.start = start
self.i = None
def __iter__(self):
self.i = None
return self
def __next__(self):
if self.step is not None:
if self.i is None:
self.i = self.start
else:
self.i += self.step
else:
if self.i is None:
self.i = 0
else:
self.i += 1
if self.i >= self.total:
raise StopIteration()
if self.step is None:
return tracker.get_global_step()
else:
return self.i
def __len__(self) -> int:
if self.step is not None:
return (self.total - self.start) // self.step
else:
return self.total
def __contains__(self, x: object) -> bool:
return False
class TrainingLoop:
_iter: Optional[TrainingLoopIterator]
__loop: Loop
__signal_received: Optional[Tuple[Any, Any]]
def __init__(self, *,
loop_count: int,
loop_step: Optional[int],
log_new_line_interval: int,
log_write_interval: int,
is_loop_on_interrupt: bool):
self.__loop_count = loop_count
self.__loop_step = loop_step
self.__log_new_line_interval = log_new_line_interval
self.__log_write_interval = log_write_interval
self.__last_write_step = 0
self.__last_new_line_step = 0
self.__last_save_step = 0
self.__signal_received = None
self.__is_loop_on_interrupt = is_loop_on_interrupt
self._iter = None
def __iter__(self):
self._iter = TrainingLoopIterator(tracker.get_global_step(),
self.__loop_count,
self.__loop_step)
self.__loop = monit.loop(typing.cast(Collection, self._iter))
iter(self.__loop)
try:
self.old_handler = signal.signal(signal.SIGINT, self.__handler)
except ValueError:
pass
return self
@property
def idx(self):
if not self._iter:
return 0
if not self._iter.i:
return 0
if self.__loop_step is None:
return self._iter.i
return self._iter.i / self.__loop_step
def __finish(self):
try:
signal.signal(signal.SIGINT, self.old_handler)
except ValueError:
pass
tracker.save()
tracker.new_line()
def __next__(self):
if self.__signal_received is not None:
logger.log('\nKilling Loop.', Text.danger)
monit.finish_loop()
self.__finish()
raise StopIteration("SIGINT")
try:
global_step = next(self.__loop)
except StopIteration as e:
self.__finish()
raise e
tracker.set_global_step(global_step)
if global_step - self.__last_write_step >= self.__log_write_interval:
tracker.save()
self.__last_write_step = global_step
if global_step - self.__last_new_line_step >= self.__log_new_line_interval:
tracker.new_line()
self.__last_new_line_step = global_step
return global_step
def __handler(self, sig, frame):
# Pass second interrupt without delaying
if self.__signal_received is not None:
logger.log('\nSIGINT received twice. Stopping...', Text.danger)
self.old_handler(*self.__signal_received)
return
if self.__is_loop_on_interrupt:
# Store the interrupt signal for later
self.__signal_received = (sig, frame)
logger.log('\nSIGINT received. Delaying KeyboardInterrupt.', Text.danger)
else:
self.__finish()
logger.log('Killing loop...', Text.danger)
self.old_handler(sig, frame)
def __str__(self):
return "LabTrainingLoop"
class TrainingLoopConfigs(BaseConfigs):
r"""
This is a configurable training loop. You can extend this class for your configurations
if it involves a training loop.
>>> for step in conf.training_loop:
>>> ...
Arguments:
loop_count (int): Total number of steps. Defaults to ``10``.
loop_step (int): Number of steps to increment per iteration. Defaults to ``1``.
log_new_line_interval (int): The interval (in steps) to print a new line to the screen.
Defaults to ``1``.
log_write_interval (int): The interval (in steps) to call :func:`labml.tracker.save`.
Defaults to ``1``.
is_loop_on_interrupt (bool): Whether to handle keyboard interrupts and wait until a iteration is complete.
Defaults to ``False``.
"""
loop_count: int = 10
loop_step: int = 1
log_new_line_interval: int = 1
log_write_interval: int = 1
is_loop_on_interrupt: bool = False
training_loop: TrainingLoop
@option(TrainingLoopConfigs.training_loop)
def _loop_configs(c: TrainingLoopConfigs):
return TrainingLoop(loop_count=c.loop_count,
loop_step=c.loop_step,
log_new_line_interval=c.log_new_line_interval,
log_write_interval=c.log_write_interval,
is_loop_on_interrupt=c.is_loop_on_interrupt)
meta_config(TrainingLoopConfigs.loop_step,
TrainingLoopConfigs.loop_count,
TrainingLoopConfigs.log_new_line_interval,
TrainingLoopConfigs.log_write_interval,
TrainingLoopConfigs.is_loop_on_interrupt)
class ModeState:
def __init__(self):
self._rollback_stack = []
self.is_train = False
self.is_optimize = False
def _enter(self, mode: Dict[str, any]):
rollback = {}
for k, v in mode.items():
if v is None:
continue
rollback[k] = getattr(self, k)
setattr(self, k, v)
self._rollback_stack.append(rollback)
return len(self._rollback_stack)
def _exit(self, n: int):
assert n == len(self._rollback_stack)
rollback = self._rollback_stack[-1]
self._rollback_stack.pop(-1)
for k, v in rollback.items():
setattr(self, k, v)
def update(self, *,
is_train: Optional[bool] = None,
is_optimize: Optional[bool] = None):
return Mode(self,
is_train=is_train,
is_optimize=is_optimize)
class Mode:
def __init__(self, mode: ModeState, **kwargs: any):
self.mode = mode
self.update = {}
for k, v in kwargs.items():
if v is not None:
self.update[k] = v
self.idx = -1
def __enter__(self):
self.idx = self.mode._enter(self.update)
def __exit__(self, exc_type, exc_val, exc_tb):
self.mode._exit(self.idx)
class Trainer:
def __init__(self, *,
name: str,
mode: ModeState,
data_loader: torch.utils.data.DataLoader,
inner_iterations: int,
state_modules: List[StateModule],
is_track_time: bool,
step: Callable[[any, 'BatchIndex'], None]):
self.is_track_time = is_track_time
self.mode = mode
self.name = name
self.step = step
self.state_modules = state_modules
self.__iterable = None
self.__states = [sm.create_state() for sm in self.state_modules]
self.inner_iterations = inner_iterations
self.data_loader = data_loader
self._batch_index = BatchIndex(len(self.data_loader), self.inner_iterations)
def set_data_loader(self, data_loader: torch.utils.data.DataLoader):
self.data_loader = data_loader
self._batch_index = BatchIndex(len(data_loader), self.inner_iterations)
self.__iterable = None
def __call__(self):
for sm, s in zip(self.state_modules, self.__states):
sm.set_state(s)
if self.__iterable is None or self._batch_index.completed:
self.__iterable = iter(self.data_loader)
self._batch_index.reset(len(self.data_loader), self.inner_iterations)
for sm in self.state_modules:
sm.on_epoch_start()
with torch.set_grad_enabled(self.mode.is_train):
self.__iterate()
if self._batch_index.completed:
for sm in self.state_modules:
sm.on_epoch_end()
def __iterate(self):
with monit.section(self.name, is_partial=True, is_track=self.is_track_time):
if self._batch_index.idx == 0:
monit.progress(0)
while not self._batch_index.iteration_completed:
batch = next(self.__iterable)
self.step(batch, self._batch_index)
self._batch_index.step()
monit.progress(self._batch_index.epoch_progress)
self._batch_index.step_inner()
class BatchIndex:
idx: int
total: int
iteration: int
total_iterations: int
def __init__(self, total: int, total_iterations: int):
self.total_iterations = total_iterations
self.total = total
def is_interval(self, interval: int):
if interval <= 0:
return False
if self.idx + 1 == self.total:
return True
else:
return (self.idx + 1) % interval == 0
@property
def is_last(self):
return self.idx + 1 == self.total
@property
def completed(self):
return self.iteration >= self.total_iterations
@property
def iteration_completed(self):
# // is important so that the last step happens on the last iteration
return self.idx >= (self.iteration + 1) * self.total // self.total_iterations
@property
def epoch_progress(self):
return self.idx / self.total
def step(self):
self.idx += 1
def step_inner(self):
self.iteration += 1
def reset(self, total: int, total_iterations: int):
self.total = total
self.total_iterations = total_iterations
self.idx = 0
self.iteration = 0
class TrainValidConfigs(TrainingLoopConfigs):
r"""
This is a configurable module that you can extend for experiments that involve a
training and validation datasets (i.e. most DL experiments).
Arguments:
epochs (int): Number of epochs to train on. Defaults to ``10``.
train_loader (torch.utils.data.DataLoader): Training data loader.
valid_loader (torch.utils.data.DataLoader): Training data loader.
inner_iterations (int): Number of times to switch between training and validation
within an epoch. Defaults to ``1``.
You can override ``init``, ``step`` functions. There is also a ``sample`` function
that you can override to generate samples ever time it switches between training and validation.
"""
state_modules: List[StateModule]
mode: ModeState
epochs: int = 10
trainer: Trainer
validator: Trainer
train_loader: torch.utils.data.DataLoader
valid_loader: torch.utils.data.DataLoader
loop_count = '_data_loop_count'
loop_step = None
inner_iterations: int = 1
is_track_time: bool = False
def init(self):
pass
def step(self, batch: Any, batch_idx: BatchIndex):
raise NotImplementedError
def run_step(self):
for i in range(self.inner_iterations):
with tracker.namespace('sample'):
self.sample()
with self.mode.update(is_train=True):
with tracker.namespace('train'):
self.trainer()
if self.validator:
with tracker.namespace('valid'):
self.validator()
tracker.save()
def run(self):
with monit.section("Initialize"):
self.init()
_ = self.validator
_ = self.trainer
for _ in self.training_loop:
self.run_step()
def sample(self):
pass
@option(TrainValidConfigs.trainer)
def _default_trainer(c: TrainValidConfigs):
return Trainer(name='Train',
mode=c.mode,
data_loader=c.train_loader,
inner_iterations=c.inner_iterations,
state_modules=c.state_modules,
is_track_time=c.is_track_time,
step=c.step)
@option(TrainValidConfigs.validator)
def _default_validator(c: TrainValidConfigs):
return Trainer(name='Valid',
mode=c.mode,
data_loader=c.valid_loader,
inner_iterations=c.inner_iterations,
state_modules=c.state_modules,
is_track_time=c.is_track_time,
step=c.step)
@option(TrainValidConfigs.loop_count)
def _data_loop_count(c: TrainValidConfigs):
return c.epochs
class SimpleTrainValidConfigs(TrainValidConfigs):
r"""
This is a configurable module that works for many standard DL experiments.
Arguments:
model: A PyTorch model.
optimizer: A PyTorch optimizer to update model.
device: The device to train the model on. This defaults to a configurable device
loss_function: A function to calculate the loss. This should accept ``model_output, target`` as
arguments.
update_batches (int): Number of batches to accumulate before taking an optimizer step.
Defaults to ``1``.
log_save_batches (int): How often to call :func:`labml.tracker.save`.
"""
optimizer: torch.optim.Adam
model: nn.Module
device: torch.device = DeviceConfigs()
loss_func: nn.Module
update_batches: int = 1
log_save_batches: int = 1
state_modules: List[StateModule] = []
def init(self):
pass
def step(self, batch: Any, batch_idx: BatchIndex):
self.model.train(self.mode.is_train)
data, target = batch[0].to(self.device), batch[1].to(self.device)
if self.mode.is_train:
tracker.add_global_step(len(data))
with monit.section("model"):
output = self.model(data)
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
if self.mode.is_train:
with monit.section('backward'):
loss.backward()
if batch_idx.is_interval(self.update_batches):
with monit.section('optimize'):
self.optimizer.step()
self.optimizer.zero_grad()
if batch_idx.is_interval(self.log_save_batches):
tracker.save()
meta_config(SimpleTrainValidConfigs.update_batches,
)
@option(SimpleTrainValidConfigs.optimizer)
def _default_optimizer(c: SimpleTrainValidConfigs):
from .optimizer import OptimizerConfigs
opt_conf = OptimizerConfigs()
opt_conf.parameters = c.model.parameters()
return opt_conf
| {
"repo_id": "labmlai/annotated_deep_learning_paper_implementations",
"file_path": "labml_nn/helpers/trainer.py",
"license": "MIT License",
"lines": 402,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/tests/unit_tests/callbacks/test_handle_event.py | """Tests for handle_event and _ahandle_event_for_handler fallback behavior.
Covers the NotImplementedError fallback from on_chat_model_start to on_llm_start.
Handlers must declare `serialized` and `messages` as explicit positional args
(not *args) — see on_chat_model_start docstring for details.
See: https://github.com/langchain-ai/langchain/issues/31576
"""
from typing import Any
from unittest.mock import MagicMock
import pytest
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.callbacks.manager import (
_ahandle_event_for_handler,
handle_event,
)
from langchain_core.messages import BaseMessage, HumanMessage
class _FallbackChatHandler(BaseCallbackHandler):
"""Handler that correctly declares the required args but raises NotImplementedError.
This triggers the fallback to on_llm_start, as documented.
"""
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
**kwargs: Any,
) -> None:
raise NotImplementedError
def on_llm_start(self, *args: Any, **kwargs: Any) -> None:
pass
class _FallbackChatHandlerAsync(BaseCallbackHandler):
"""Async-compatible handler; raises NotImplementedError for on_chat_model_start."""
run_inline = True
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
**kwargs: Any,
) -> None:
raise NotImplementedError
def on_llm_start(self, *args: Any, **kwargs: Any) -> None:
pass
def test_handle_event_chat_model_start_fallback_to_llm_start() -> None:
"""on_chat_model_start raises NotImplementedError → falls back to on_llm_start."""
handler = _FallbackChatHandler()
handler.on_llm_start = MagicMock() # type: ignore[method-assign]
serialized = {"name": "test"}
messages = [[HumanMessage(content="hello")]]
handle_event(
[handler],
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
)
handler.on_llm_start.assert_called_once()
def test_handle_event_other_event_not_implemented_logs_warning() -> None:
"""Non-chat_model_start events that raise NotImplementedError log a warning."""
class _Handler(BaseCallbackHandler):
def on_llm_start(self, *args: Any, **kwargs: Any) -> None:
raise NotImplementedError
handler = _Handler()
# Should not raise — logs a warning instead
handle_event(
[handler],
"on_llm_start",
"ignore_llm",
{"name": "test"},
["prompt"],
)
@pytest.mark.asyncio
async def test_ahandle_event_chat_model_start_fallback_to_llm_start() -> None:
"""Async: on_chat_model_start NotImplementedError falls back to on_llm_start."""
handler = _FallbackChatHandlerAsync()
handler.on_llm_start = MagicMock() # type: ignore[method-assign]
serialized = {"name": "test"}
messages = [[HumanMessage(content="hello")]]
await _ahandle_event_for_handler(
handler,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
)
handler.on_llm_start.assert_called_once()
@pytest.mark.asyncio
async def test_ahandle_event_other_event_not_implemented_logs_warning() -> None:
"""Async: non-chat_model_start events log warning on NotImplementedError."""
class _Handler(BaseCallbackHandler):
run_inline = True
def on_llm_start(self, *args: Any, **kwargs: Any) -> None:
raise NotImplementedError
handler = _Handler()
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
{"name": "test"},
["prompt"],
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/callbacks/test_handle_event.py",
"license": "MIT License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/anthropic/scripts/check_version.py | """Check version consistency between `pyproject.toml` and `_version.py`.
This script validates that the version defined in pyproject.toml matches the
`__version__` variable in `langchain_anthropic/_version.py`. Intended for use as a
pre-commit hook to prevent version mismatches.
"""
import re
import sys
from pathlib import Path
def get_pyproject_version(pyproject_path: Path) -> str | None:
"""Extract version from `pyproject.toml`."""
content = pyproject_path.read_text(encoding="utf-8")
match = re.search(r'^version\s*=\s*"([^"]+)"', content, re.MULTILINE)
return match.group(1) if match else None
def get_version_py_version(version_path: Path) -> str | None:
"""Extract `__version__` from `_version.py`."""
content = version_path.read_text(encoding="utf-8")
match = re.search(r'^__version__\s*=\s*"([^"]+)"', content, re.MULTILINE)
return match.group(1) if match else None
def main() -> int:
"""Validate version consistency."""
script_dir = Path(__file__).parent
package_dir = script_dir.parent
pyproject_path = package_dir / "pyproject.toml"
version_path = package_dir / "langchain_anthropic" / "_version.py"
if not pyproject_path.exists():
print(f"Error: {pyproject_path} not found") # noqa: T201
return 1
if not version_path.exists():
print(f"Error: {version_path} not found") # noqa: T201
return 1
pyproject_version = get_pyproject_version(pyproject_path)
version_py_version = get_version_py_version(version_path)
if pyproject_version is None:
print("Error: Could not find version in pyproject.toml") # noqa: T201
return 1
if version_py_version is None:
print("Error: Could not find __version__ in langchain_anthropic/_version.py") # noqa: T201
return 1
if pyproject_version != version_py_version:
print("Error: Version mismatch detected!") # noqa: T201
print(f" pyproject.toml: {pyproject_version}") # noqa: T201
print(f" langchain_anthropic/_version.py: {version_py_version}") # noqa: T201
return 1
print(f"Version check passed: {pyproject_version}") # noqa: T201
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/scripts/check_version.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/partners/openrouter/langchain_openrouter/chat_models.py | """OpenRouter chat models."""
from __future__ import annotations
import json
import warnings
from collections.abc import AsyncIterator, Callable, Iterator, Mapping, Sequence
from operator import itemgetter
from typing import Any, Literal, cast
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import (
LanguageModelInput,
ModelProfile,
ModelProfileRegistry,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
LangSmithParams,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
InvalidToolCall,
SystemMessage,
SystemMessageChunk,
ToolCall,
ToolMessage,
ToolMessageChunk,
is_data_content_block,
)
from langchain_core.messages.ai import (
InputTokenDetails,
OutputTokenDetails,
UsageMetadata,
)
from langchain_core.messages.block_translators.openai import (
convert_to_openai_data_block,
)
from langchain_core.messages.tool import tool_call_chunk
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
make_invalid_tool_call,
parse_tool_call,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils import from_env, get_pydantic_field_names, secret_from_env
from langchain_core.utils.function_calling import (
convert_to_json_schema,
convert_to_openai_tool,
)
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
from langchain_openrouter.data._profiles import _PROFILES
_MODEL_PROFILES = cast("ModelProfileRegistry", _PROFILES)
# LangChain-internal kwargs that must not be forwarded to the SDK.
_INTERNAL_KWARGS = frozenset({"ls_structured_output_format"})
def _get_default_model_profile(model_name: str) -> ModelProfile:
default = _MODEL_PROFILES.get(model_name) or {}
return default.copy()
class ChatOpenRouter(BaseChatModel):
"""OpenRouter chat model integration.
OpenRouter is a unified API that provides access to hundreds of models from
multiple providers (OpenAI, Anthropic, Google, Meta, etc.).
???+ info "Setup"
Install `langchain-openrouter` and set environment variable
`OPENROUTER_API_KEY`.
```bash
pip install -U langchain-openrouter
```
```bash
export OPENROUTER_API_KEY="your-api-key"
```
??? info "Key init args — completion params"
| Param | Type | Description |
| ----- | ---- | ----------- |
| `model` | `str` | Model name, e.g. `'openai/gpt-4o-mini'`. |
| `temperature` | `float | None` | Sampling temperature. |
| `max_tokens` | `int | None` | Max tokens to generate. |
??? info "Key init args — client params"
| Param | Type | Description |
| ----- | ---- | ----------- |
| `api_key` | `str | None` | OpenRouter API key. |
| `base_url` | `str | None` | Base URL for API requests. |
| `timeout` | `int | None` | Timeout in milliseconds. |
| `app_url` | `str | None` | App URL for attribution. |
| `app_title` | `str | None` | App title for attribution. |
| `max_retries` | `int` | Max retries (default `2`). Set to `0` to disable. |
??? info "Instantiate"
```python
from langchain_openrouter import ChatOpenRouter
model = ChatOpenRouter(
model="anthropic/claude-sonnet-4-5",
temperature=0,
# api_key="...",
# openrouter_provider={"order": ["Anthropic"]},
)
```
See https://openrouter.ai/docs for platform documentation.
"""
client: Any = Field(default=None, exclude=True)
"""Underlying SDK client (`openrouter.OpenRouter`)."""
openrouter_api_key: SecretStr | None = Field(
alias="api_key",
default_factory=secret_from_env("OPENROUTER_API_KEY", default=None),
)
"""OpenRouter API key."""
openrouter_api_base: str | None = Field(
default_factory=from_env("OPENROUTER_API_BASE", default=None),
alias="base_url",
)
"""OpenRouter API base URL. Maps to SDK `server_url`."""
app_url: str | None = Field(
default_factory=from_env(
"OPENROUTER_APP_URL",
default="https://docs.langchain.com/oss",
),
)
"""Application URL for OpenRouter attribution.
Maps to `HTTP-Referer` header.
See https://openrouter.ai/docs/app-attribution for details.
"""
app_title: str | None = Field(
default_factory=from_env("OPENROUTER_APP_TITLE", default="langchain"),
)
"""Application title for OpenRouter attribution.
Maps to `X-Title` header.
See https://openrouter.ai/docs/app-attribution for details.
"""
request_timeout: int | None = Field(default=None, alias="timeout")
"""Timeout for requests in milliseconds. Maps to SDK `timeout_ms`."""
max_retries: int = 2
"""Maximum number of retries.
Controls the retry backoff window via the SDK's `max_elapsed_time`.
Set to `0` to disable retries.
"""
model_name: str = Field(alias="model")
"""The name of the model, e.g. `'anthropic/claude-sonnet-4-5'`."""
temperature: float | None = None
"""Sampling temperature."""
max_tokens: int | None = None
"""Maximum number of tokens to generate."""
max_completion_tokens: int | None = None
"""Maximum number of completion tokens to generate."""
top_p: float | None = None
"""Nucleus sampling parameter."""
frequency_penalty: float | None = None
"""Frequency penalty for generation."""
presence_penalty: float | None = None
"""Presence penalty for generation."""
seed: int | None = None
"""Random seed for reproducibility."""
stop: list[str] | str | None = Field(default=None, alias="stop_sequences")
"""Default stop sequences."""
n: int = Field(default=1, ge=1)
"""Number of chat completions to generate for each prompt."""
streaming: bool = False
"""Whether to stream the results or not."""
model_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Any extra model parameters for the OpenRouter API."""
reasoning: dict[str, Any] | None = None
"""Reasoning settings to pass to OpenRouter.
Controls how many tokens the model allocates for internal chain-of-thought
reasoning.
Accepts an `openrouter.components.OpenResponsesReasoningConfig` or an
equivalent dict.
Supported keys:
- `effort`: Controls reasoning token budget.
Values: `'xhigh'`, `'high'`, `'medium'`, `'low'`, `'minimal'`, `'none'`.
- `summary`: Controls verbosity of the reasoning summary returned in the
response.
Values: `'auto'`, `'concise'`, `'detailed'`.
Example: `{"effort": "high", "summary": "auto"}`
See https://openrouter.ai/docs/guides/best-practices/reasoning-tokens
"""
openrouter_provider: dict[str, Any] | None = None
"""Provider preferences to pass to OpenRouter.
Example: `{"order": ["Anthropic", "OpenAI"]}`
"""
route: str | None = None
"""Route preference for OpenRouter, e.g. `'fallback'`."""
plugins: list[dict[str, Any]] | None = None
"""Plugins configuration for OpenRouter."""
model_config = ConfigDict(populate_by_name=True)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
msg = f"Found {field_name} supplied twice."
raise ValueError(msg)
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended.""",
stacklevel=2,
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
msg = (
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
raise ValueError(msg)
values["model_kwargs"] = extra
return values
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate configuration and build the SDK client."""
if not (self.openrouter_api_key and self.openrouter_api_key.get_secret_value()):
msg = "OPENROUTER_API_KEY must be set."
raise ValueError(msg)
if self.n > 1 and self.streaming:
msg = "n must be 1 when streaming."
raise ValueError(msg)
if not self.client:
try:
import openrouter # noqa: PLC0415
from openrouter.utils import ( # noqa: PLC0415
BackoffStrategy,
RetryConfig,
)
except ImportError as e:
msg = (
"Could not import the `openrouter` Python SDK. "
"Please install it with: pip install openrouter"
)
raise ImportError(msg) from e
client_kwargs: dict[str, Any] = {
"api_key": self.openrouter_api_key.get_secret_value(),
}
if self.openrouter_api_base:
client_kwargs["server_url"] = self.openrouter_api_base
if self.app_url:
client_kwargs["http_referer"] = self.app_url
if self.app_title:
client_kwargs["x_title"] = self.app_title
if self.request_timeout is not None:
client_kwargs["timeout_ms"] = self.request_timeout
if self.max_retries > 0:
client_kwargs["retry_config"] = RetryConfig(
strategy="backoff",
backoff=BackoffStrategy(
initial_interval=500,
max_interval=60000,
exponent=1.5,
max_elapsed_time=self.max_retries * 150_000,
),
retry_connection_errors=True,
)
self.client = openrouter.OpenRouter(**client_kwargs)
return self
@model_validator(mode="after")
def _set_model_profile(self) -> Self:
"""Set model profile if not overridden."""
if self.profile is None:
self.profile = _get_default_model_profile(self.model_name)
return self
#
# Serializable class method overrides
#
@property
def lc_secrets(self) -> dict[str, str]:
"""A map of constructor argument names to secret ids."""
return {"openrouter_api_key": "OPENROUTER_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by LangChain."""
return True
#
# BaseChatModel method overrides
#
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "openrouter-chat"
@property
def _identifying_params(self) -> dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model_name,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"streaming": self.streaming,
"reasoning": self.reasoning,
"openrouter_provider": self.openrouter_provider,
"route": self.route,
"model_kwargs": self.model_kwargs,
}
def _get_ls_params(
self,
stop: list[str] | None = None,
**kwargs: Any,
) -> LangSmithParams:
"""Get standard params for tracing."""
params = self._get_invocation_params(stop=stop, **kwargs)
ls_params = LangSmithParams(
ls_provider="openrouter",
ls_model_name=params.get("model", self.model_name),
ls_model_type="chat",
ls_temperature=params.get("temperature", self.temperature),
)
if ls_max_tokens := params.get("max_tokens", self.max_tokens):
ls_params["ls_max_tokens"] = ls_max_tokens
if ls_stop := stop or params.get("stop", None) or self.stop:
ls_params["ls_stop"] = ls_stop if isinstance(ls_stop, list) else [ls_stop]
return ls_params
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
_strip_internal_kwargs(params)
sdk_messages = _wrap_messages_for_sdk(message_dicts)
response = self.client.chat.send(messages=sdk_messages, **params)
return self._create_chat_result(response)
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
_strip_internal_kwargs(params)
sdk_messages = _wrap_messages_for_sdk(message_dicts)
response = await self.client.chat.send_async(messages=sdk_messages, **params)
return self._create_chat_result(response)
def _stream( # noqa: C901
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
_strip_internal_kwargs(params)
sdk_messages = _wrap_messages_for_sdk(message_dicts)
default_chunk_class: type[BaseMessageChunk] = AIMessageChunk
for chunk in self.client.chat.send(messages=sdk_messages, **params):
chunk_dict = chunk.model_dump(by_alias=True)
if not chunk_dict.get("choices"):
if error := chunk_dict.get("error"):
msg = (
f"OpenRouter API returned an error during streaming: "
f"{error.get('message', str(error))} "
f"(code: {error.get('code', 'unknown')})"
)
raise ValueError(msg)
continue
choice = chunk_dict["choices"][0]
message_chunk = _convert_chunk_to_message_chunk(
chunk_dict, default_chunk_class
)
generation_info: dict[str, Any] = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
# Include response-level metadata on the final chunk
response_model = chunk_dict.get("model")
generation_info["model_name"] = response_model or self.model_name
if system_fingerprint := chunk_dict.get("system_fingerprint"):
generation_info["system_fingerprint"] = system_fingerprint
if native_finish_reason := choice.get("native_finish_reason"):
generation_info["native_finish_reason"] = native_finish_reason
if response_id := chunk_dict.get("id"):
generation_info["id"] = response_id
if created := chunk_dict.get("created"):
generation_info["created"] = int(created)
if object_ := chunk_dict.get("object"):
generation_info["object"] = object_
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
if generation_info:
generation_info["model_provider"] = "openrouter"
message_chunk = message_chunk.model_copy(
update={
"response_metadata": {
**message_chunk.response_metadata,
**generation_info,
}
}
)
default_chunk_class = message_chunk.__class__
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
if run_manager:
run_manager.on_llm_new_token(
generation_chunk.text,
chunk=generation_chunk,
logprobs=logprobs,
)
yield generation_chunk
async def _astream( # noqa: C901
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
_strip_internal_kwargs(params)
sdk_messages = _wrap_messages_for_sdk(message_dicts)
default_chunk_class: type[BaseMessageChunk] = AIMessageChunk
async for chunk in await self.client.chat.send_async(
messages=sdk_messages, **params
):
chunk_dict = chunk.model_dump(by_alias=True)
if not chunk_dict.get("choices"):
if error := chunk_dict.get("error"):
msg = (
f"OpenRouter API returned an error during streaming: "
f"{error.get('message', str(error))} "
f"(code: {error.get('code', 'unknown')})"
)
raise ValueError(msg)
continue
choice = chunk_dict["choices"][0]
message_chunk = _convert_chunk_to_message_chunk(
chunk_dict, default_chunk_class
)
generation_info: dict[str, Any] = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
# Include response-level metadata on the final chunk
response_model = chunk_dict.get("model")
generation_info["model_name"] = response_model or self.model_name
if system_fingerprint := chunk_dict.get("system_fingerprint"):
generation_info["system_fingerprint"] = system_fingerprint
if native_finish_reason := choice.get("native_finish_reason"):
generation_info["native_finish_reason"] = native_finish_reason
if response_id := chunk_dict.get("id"):
generation_info["id"] = response_id
if created := chunk_dict.get("created"):
generation_info["created"] = int(created) # UNIX timestamp
if object_ := chunk_dict.get("object"):
generation_info["object"] = object_
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
if generation_info:
generation_info["model_provider"] = "openrouter"
message_chunk = message_chunk.model_copy(
update={
"response_metadata": {
**message_chunk.response_metadata,
**generation_info,
}
}
)
default_chunk_class = message_chunk.__class__
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
if run_manager:
await run_manager.on_llm_new_token(
token=generation_chunk.text,
chunk=generation_chunk,
logprobs=logprobs,
)
yield generation_chunk
#
# Internal methods
#
@property
def _default_params(self) -> dict[str, Any]: # noqa: C901, PLR0912
"""Get the default parameters for calling OpenRouter API."""
params: dict[str, Any] = {
"model": self.model_name,
"stream": self.streaming,
**self.model_kwargs,
}
if self.temperature is not None:
params["temperature"] = self.temperature
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
if self.max_completion_tokens is not None:
params["max_completion_tokens"] = self.max_completion_tokens
if self.top_p is not None:
params["top_p"] = self.top_p
if self.frequency_penalty is not None:
params["frequency_penalty"] = self.frequency_penalty
if self.presence_penalty is not None:
params["presence_penalty"] = self.presence_penalty
if self.seed is not None:
params["seed"] = self.seed
if self.n > 1:
params["n"] = self.n
if self.stop is not None:
params["stop"] = self.stop
# OpenRouter-specific params
if self.reasoning is not None:
params["reasoning"] = self.reasoning
if self.openrouter_provider is not None:
params["provider"] = self.openrouter_provider
if self.route is not None:
params["route"] = self.route
if self.plugins is not None:
params["plugins"] = self.plugins
return params
def _create_message_dicts(
self, messages: list[BaseMessage], stop: list[str] | None
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
params = self._default_params
if stop is not None:
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Any) -> ChatResult: # noqa: C901, PLR0912
"""Create a `ChatResult` from an OpenRouter SDK response."""
if not isinstance(response, dict):
response = response.model_dump(by_alias=True)
if error := response.get("error"):
msg = (
f"OpenRouter API returned an error: "
f"{error.get('message', str(error))} "
f"(code: {error.get('code', 'unknown')})"
)
raise ValueError(msg)
generations = []
token_usage = response.get("usage") or {}
choices = response.get("choices", [])
if not choices:
msg = (
"OpenRouter API returned a response with no choices. "
"This may indicate a problem with the request or model availability."
)
raise ValueError(msg)
# Extract top-level response metadata
response_model = response.get("model")
system_fingerprint = response.get("system_fingerprint")
for res in choices:
message = _convert_dict_to_message(res["message"])
if token_usage and isinstance(message, AIMessage):
message.usage_metadata = _create_usage_metadata(token_usage)
# Surface OpenRouter cost data in response_metadata
if "cost" in token_usage:
message.response_metadata["cost"] = token_usage["cost"]
if "cost_details" in token_usage:
message.response_metadata["cost_details"] = token_usage[
"cost_details"
]
if isinstance(message, AIMessage):
if system_fingerprint:
message.response_metadata["system_fingerprint"] = system_fingerprint
if native_finish_reason := res.get("native_finish_reason"):
message.response_metadata["native_finish_reason"] = (
native_finish_reason
)
generation_info: dict[str, Any] = {
"finish_reason": res.get("finish_reason"),
}
if "logprobs" in res:
generation_info["logprobs"] = res["logprobs"]
gen = ChatGeneration(
message=message,
generation_info=generation_info,
)
generations.append(gen)
llm_output: dict[str, Any] = {
"model_name": response_model or self.model_name,
}
if response_id := response.get("id"):
llm_output["id"] = response_id
if created := response.get("created"):
llm_output["created"] = int(created)
if object_ := response.get("object"):
llm_output["object"] = object_
return ChatResult(generations=generations, llm_output=llm_output)
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type[BaseModel] | Callable | BaseTool],
*,
tool_choice: dict | str | bool | None = None,
strict: bool | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Supports any tool definition handled by
`langchain_core.utils.function_calling.convert_to_openai_tool`.
tool_choice: Which tool to require the model to call.
strict: If `True`, model output is guaranteed to exactly match the
JSON Schema provided in the tool definition.
If `None`, the `strict` argument will not be passed to
the model.
**kwargs: Any additional parameters.
"""
formatted_tools = [
convert_to_openai_tool(tool, strict=strict) for tool in tools
]
if tool_choice is not None and tool_choice:
if tool_choice == "any":
tool_choice = "required"
if isinstance(tool_choice, str) and (
tool_choice not in ("auto", "none", "required")
):
tool_choice = {"type": "function", "function": {"name": tool_choice}}
if isinstance(tool_choice, bool):
if len(tools) > 1:
msg = (
"tool_choice can only be True when there is one tool. Received "
f"{len(tools)} tools."
)
raise ValueError(msg)
tool_name = formatted_tools[0]["function"]["name"]
tool_choice = {
"type": "function",
"function": {"name": tool_name},
}
kwargs["tool_choice"] = tool_choice
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output( # type: ignore[override]
self,
schema: dict | type[BaseModel] | None = None,
*,
method: Literal["function_calling", "json_schema"] = "function_calling",
include_raw: bool = False,
strict: bool | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, dict | BaseModel]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema as a Pydantic class, TypedDict, JSON Schema,
or OpenAI function schema.
method: The method for steering model generation.
include_raw: If `True` then both the raw model response and the
parsed model response will be returned.
strict: If `True`, model output is guaranteed to exactly match the
JSON Schema provided in the schema definition.
If `None`, the `strict` argument will not be passed to
the model.
**kwargs: Any additional parameters.
Returns:
A `Runnable` that takes same inputs as a `BaseChatModel`.
"""
if method == "json_mode":
warnings.warn(
"Unrecognized structured output method 'json_mode'. "
"Defaulting to 'json_schema' method.",
stacklevel=2,
)
method = "json_schema"
is_pydantic_schema = _is_pydantic_class(schema)
if method == "function_calling":
if schema is None:
msg = (
"schema must be specified when method is 'function_calling'. "
"Received None."
)
raise ValueError(msg)
formatted_tool = convert_to_openai_tool(schema)
tool_name = formatted_tool["function"]["name"]
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
strict=strict,
ls_structured_output_format={
"kwargs": {"method": "function_calling", "strict": strict},
"schema": formatted_tool,
},
**kwargs,
)
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
elif method == "json_schema":
if schema is None:
msg = (
"schema must be specified when method is 'json_schema'. "
"Received None."
)
raise ValueError(msg)
json_schema = convert_to_json_schema(schema)
schema_name = json_schema.get("title", "")
json_schema_spec: dict[str, Any] = {
"name": schema_name,
"schema": json_schema,
}
if strict is not None:
json_schema_spec["strict"] = strict
response_format = {
"type": "json_schema",
"json_schema": json_schema_spec,
}
ls_format_info = {
"kwargs": {"method": "json_schema", "strict": strict},
"schema": json_schema,
}
llm = self.bind(
response_format=response_format,
ls_structured_output_format=ls_format_info,
**kwargs,
)
output_parser = (
PydanticOutputParser(pydantic_object=schema) # type: ignore[type-var, arg-type]
if is_pydantic_schema
else JsonOutputParser()
)
else:
msg = (
f"Unrecognized method argument. Expected one of 'function_calling' "
f"or 'json_schema'. Received: '{method}'"
)
raise ValueError(msg)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
return llm | output_parser
def _is_pydantic_class(obj: Any) -> bool:
return isinstance(obj, type) and is_basemodel_subclass(obj)
def _strip_internal_kwargs(params: dict[str, Any]) -> None:
"""Remove LangChain-internal keys that the SDK does not accept."""
for key in _INTERNAL_KWARGS:
params.pop(key, None)
def _has_file_content_blocks(message_dicts: list[dict[str, Any]]) -> bool:
"""Return `True` if any message dict contains a `file` content block."""
for msg in message_dicts:
content = msg.get("content")
if isinstance(content, list):
for block in content:
if isinstance(block, dict) and block.get("type") == "file":
return True
return False
def _wrap_messages_for_sdk(
message_dicts: list[dict[str, Any]],
) -> list[dict[str, Any]] | list[Any]:
"""Wrap message dicts as SDK Pydantic models when file blocks are present.
The OpenRouter Python SDK (v0.6.0) does not include `file` in its
`ChatMessageContentItem` discriminated union, so Pydantic validation
rejects file content blocks even though the OpenRouter **API** supports
them. Using `model_construct` on the SDK's message classes bypasses
validation while still producing the correct JSON payload.
When no file blocks are detected the original dicts are returned unchanged
so the normal (validated) code path is preserved.
Args:
message_dicts: Message dicts produced by `_convert_message_to_dict`.
Returns:
The original list when no file blocks are present, or a list of SDK
Pydantic model instances otherwise.
"""
if not _has_file_content_blocks(message_dicts):
return message_dicts
try:
from openrouter import components # noqa: PLC0415
except ImportError:
return message_dicts
role_to_model: dict[str, type[BaseModel]] = {
"user": components.UserMessage,
"system": components.SystemMessage,
"assistant": components.AssistantMessage,
"tool": components.ToolResponseMessage,
"developer": components.DeveloperMessage,
}
wrapped: list[Any] = []
for msg in message_dicts:
model_cls = role_to_model.get(msg.get("role", ""))
if model_cls is None:
# Unknown role — pass dict through and hope for the best.
wrapped.append(msg)
continue
fields = {k: v for k, v in msg.items() if k != "role"}
wrapped.append(model_cls.model_construct(**fields))
return wrapped
#
# Type conversion helpers
#
def _convert_video_block_to_openrouter(block: dict[str, Any]) -> dict[str, Any]:
"""Convert a LangChain video content block to OpenRouter's `video_url` format.
Args:
block: A LangChain `VideoContentBlock`.
Returns:
A dict in OpenRouter's `video_url` format.
Raises:
ValueError: If no video source is provided.
"""
if "url" in block:
return {"type": "video_url", "video_url": {"url": block["url"]}}
if "base64" in block or block.get("source_type") == "base64":
base64_data = block["data"] if "source_type" in block else block["base64"]
mime_type = block.get("mime_type", "video/mp4")
return {
"type": "video_url",
"video_url": {"url": f"data:{mime_type};base64,{base64_data}"},
}
msg = "Video block must have either 'url' or 'base64' data."
raise ValueError(msg)
def _convert_file_block_to_openrouter(block: dict[str, Any]) -> dict[str, Any]:
"""Convert a LangChain file content block to OpenRouter's `file` format.
OpenRouter accepts files as::
{"type": "file", "file": {"filename": "...", "file_data": "..."}}
where `file_data` is either a public URL or a `data:` URI.
Args:
block: A LangChain file content block.
Returns:
A dict in OpenRouter's `file` format.
Raises:
ValueError: If the block contains neither a URL, base64 data, nor a
file ID.
"""
file: dict[str, str] = {}
# --- resolve file_data ---------------------------------------------------
if "url" in block:
file["file_data"] = block["url"]
elif block.get("source_type") == "base64" or "base64" in block:
base64_data = block["data"] if "source_type" in block else block["base64"]
mime_type = block.get("mime_type", "application/octet-stream")
file["file_data"] = f"data:{mime_type};base64,{base64_data}"
elif block.get("source_type") == "id" or "file_id" in block:
msg = "OpenRouter does not support file IDs."
raise ValueError(msg)
else:
msg = "File block must have either 'url' or 'base64' data."
raise ValueError(msg)
# --- resolve filename ----------------------------------------------------
if filename := block.get("filename"):
file["filename"] = filename
elif ((extras := block.get("extras")) and "filename" in extras) or (
(extras := block.get("metadata")) and "filename" in extras
):
file["filename"] = extras["filename"]
return {"type": "file", "file": file}
def _format_message_content(content: Any) -> Any:
"""Format message content for OpenRouter API.
Converts LangChain data content blocks to the expected format.
Args:
content: The message content (string or list of content blocks).
Returns:
Formatted content suitable for the OpenRouter API.
"""
if content and isinstance(content, list):
formatted: list = []
for block in content:
if isinstance(block, dict) and is_data_content_block(block):
if block.get("type") == "video":
formatted.append(_convert_video_block_to_openrouter(block))
elif block.get("type") == "file":
formatted.append(_convert_file_block_to_openrouter(block))
else:
formatted.append(convert_to_openai_data_block(block))
else:
formatted.append(block)
return formatted
return content
def _convert_message_to_dict(message: BaseMessage) -> dict[str, Any]: # noqa: C901, PLR0912
"""Convert a LangChain message to an OpenRouter-compatible dict payload.
Handles role mapping, multimodal content formatting, tool call
serialization, and reasoning content preservation for multi-turn
conversations.
Args:
message: The LangChain message.
Returns:
A dict suitable for the OpenRouter chat API `messages` parameter.
"""
message_dict: dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {
"role": "user",
"content": _format_message_content(message.content),
}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
# Filter out non-text blocks from list content
if isinstance(message.content, list):
text_blocks = [
block
for block in message.content
if isinstance(block, dict) and block.get("type") == "text"
]
message_dict["content"] = text_blocks or ""
if message.tool_calls or message.invalid_tool_calls:
message_dict["tool_calls"] = [
_lc_tool_call_to_openrouter_tool_call(tc) for tc in message.tool_calls
] + [
_lc_invalid_tool_call_to_openrouter_tool_call(tc)
for tc in message.invalid_tool_calls
]
if message_dict["content"] == "" or (
isinstance(message_dict["content"], list)
and not message_dict["content"]
):
message_dict["content"] = None
elif "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
if message_dict["content"] == "" or (
isinstance(message_dict["content"], list)
and not message_dict["content"]
):
message_dict["content"] = None
# Preserve reasoning content for multi-turn conversations (e.g.
# tool-calling loops). OpenRouter stores reasoning in "reasoning" and
# optional structured details in "reasoning_details".
if "reasoning_content" in message.additional_kwargs:
message_dict["reasoning"] = message.additional_kwargs["reasoning_content"]
if "reasoning_details" in message.additional_kwargs:
message_dict["reasoning_details"] = message.additional_kwargs[
"reasoning_details"
]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"tool_call_id": message.tool_call_id,
}
else:
msg = f"Got unknown type {message}"
raise TypeError(msg)
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: # noqa: C901
"""Convert an OpenRouter API response message dict to a LangChain message.
Extracts tool calls, reasoning content, and maps roles to the appropriate
LangChain message type (`HumanMessage`, `AIMessage`, `SystemMessage`,
`ToolMessage`, or `ChatMessage`).
Args:
_dict: The message dictionary from the API response.
Returns:
The corresponding LangChain message.
"""
id_ = _dict.get("id")
role = _dict.get("role")
if role == "user":
return HumanMessage(content=_dict.get("content", ""))
if role == "assistant":
content = _dict.get("content", "") or ""
additional_kwargs: dict = {}
if reasoning := _dict.get("reasoning"):
additional_kwargs["reasoning_content"] = reasoning
if reasoning_details := _dict.get("reasoning_details"):
additional_kwargs["reasoning_details"] = reasoning_details
tool_calls = []
invalid_tool_calls = []
if raw_tool_calls := _dict.get("tool_calls"):
for raw_tool_call in raw_tool_calls:
try:
tool_calls.append(parse_tool_call(raw_tool_call, return_id=True))
except Exception as e: # noqa: BLE001, PERF203
invalid_tool_calls.append(
make_invalid_tool_call(raw_tool_call, str(e))
)
return AIMessage(
content=content,
id=id_,
additional_kwargs=additional_kwargs,
tool_calls=tool_calls,
invalid_tool_calls=invalid_tool_calls,
response_metadata={"model_provider": "openrouter"},
)
if role == "system":
return SystemMessage(content=_dict.get("content", ""))
if role == "tool":
additional_kwargs = {}
if "name" in _dict:
additional_kwargs["name"] = _dict["name"]
return ToolMessage(
content=_dict.get("content", ""),
tool_call_id=_dict.get("tool_call_id"),
additional_kwargs=additional_kwargs,
)
if role is None:
msg = (
f"OpenRouter response message is missing the 'role' field. "
f"Message keys: {list(_dict.keys())}"
)
raise ValueError(msg)
warnings.warn(
f"Unrecognized message role '{role}' from OpenRouter. "
f"Falling back to ChatMessage.",
stacklevel=2,
)
return ChatMessage(content=_dict.get("content", ""), role=role)
def _convert_chunk_to_message_chunk( # noqa: C901, PLR0911, PLR0912
chunk: Mapping[str, Any], default_class: type[BaseMessageChunk]
) -> BaseMessageChunk:
"""Convert a streaming chunk dict to a LangChain message chunk.
Args:
chunk: The streaming chunk dictionary.
default_class: Default message chunk class.
Returns:
The LangChain message chunk.
"""
choice = chunk["choices"][0]
_dict = choice.get("delta", {})
role = cast("str", _dict.get("role"))
content = cast("str", _dict.get("content") or "")
additional_kwargs: dict = {}
tool_call_chunks: list = []
if raw_tool_calls := _dict.get("tool_calls"):
for rtc in raw_tool_calls:
try:
tool_call_chunks.append(
tool_call_chunk(
name=rtc["function"].get("name"),
args=rtc["function"].get("arguments"),
id=rtc.get("id"),
index=rtc["index"],
)
)
except (KeyError, TypeError, AttributeError): # noqa: PERF203
warnings.warn(
f"Skipping malformed tool call chunk during streaming: "
f"unexpected structure in {rtc!r}.",
stacklevel=2,
)
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
if role == "assistant" or default_class == AIMessageChunk:
if reasoning := _dict.get("reasoning"):
additional_kwargs["reasoning_content"] = reasoning
if reasoning_details := _dict.get("reasoning_details"):
additional_kwargs["reasoning_details"] = reasoning_details
usage_metadata = None
response_metadata: dict[str, Any] = {"model_provider": "openrouter"}
if usage := chunk.get("usage"):
usage_metadata = _create_usage_metadata(usage)
# Surface OpenRouter cost data in response_metadata
if "cost" in usage:
response_metadata["cost"] = usage["cost"]
if "cost_details" in usage:
response_metadata["cost_details"] = usage["cost_details"]
return AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
tool_call_chunks=tool_call_chunks, # type: ignore[arg-type]
usage_metadata=usage_metadata, # type: ignore[arg-type]
response_metadata=response_metadata,
)
if role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
if role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(
content=content, tool_call_id=_dict.get("tool_call_id", "")
)
if role:
warnings.warn(
f"Unrecognized streaming chunk role '{role}' from OpenRouter. "
f"Falling back to ChatMessageChunk.",
stacklevel=2,
)
return ChatMessageChunk(content=content, role=role)
if default_class is ChatMessageChunk:
return ChatMessageChunk(content=content, role=role or "")
return default_class(content=content) # type: ignore[call-arg]
def _lc_tool_call_to_openrouter_tool_call(tool_call: ToolCall) -> dict[str, Any]:
"""Convert a LangChain ``ToolCall`` to an OpenRouter tool call dict.
Serializes `args` (a dict) via `json.dumps`.
"""
return {
"type": "function",
"id": tool_call["id"],
"function": {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"], ensure_ascii=False),
},
}
def _lc_invalid_tool_call_to_openrouter_tool_call(
invalid_tool_call: InvalidToolCall,
) -> dict[str, Any]:
"""Convert a LangChain `InvalidToolCall` to an OpenRouter tool call dict.
Unlike the valid variant, `args` is already a raw string (not a dict) and
is passed through as-is.
"""
return {
"type": "function",
"id": invalid_tool_call["id"],
"function": {
"name": invalid_tool_call["name"],
"arguments": invalid_tool_call["args"],
},
}
def _create_usage_metadata(token_usage: dict[str, Any]) -> UsageMetadata:
"""Create usage metadata from OpenRouter token usage response.
OpenRouter may return token counts as floats rather than ints, so all
values are explicitly cast to int.
Args:
token_usage: Token usage dict from the API response.
Returns:
Usage metadata with input/output token details.
"""
input_tokens = int(
token_usage.get("prompt_tokens") or token_usage.get("input_tokens") or 0
)
output_tokens = int(
token_usage.get("completion_tokens") or token_usage.get("output_tokens") or 0
)
total_tokens = int(token_usage.get("total_tokens") or input_tokens + output_tokens)
input_details_dict = (
token_usage.get("prompt_tokens_details")
or token_usage.get("input_tokens_details")
or {}
)
output_details_dict = (
token_usage.get("completion_tokens_details")
or token_usage.get("output_tokens_details")
or {}
)
cache_read = input_details_dict.get("cached_tokens")
cache_creation = input_details_dict.get("cache_write_tokens")
input_token_details: dict = {
"cache_read": int(cache_read) if cache_read is not None else None,
"cache_creation": int(cache_creation) if cache_creation is not None else None,
}
reasoning_tokens = output_details_dict.get("reasoning_tokens")
output_token_details: dict = {
"reasoning": int(reasoning_tokens) if reasoning_tokens is not None else None,
}
usage_metadata: UsageMetadata = {
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"total_tokens": total_tokens,
}
filtered_input = {k: v for k, v in input_token_details.items() if v is not None}
if filtered_input:
usage_metadata["input_token_details"] = InputTokenDetails(**filtered_input) # type: ignore[typeddict-item]
filtered_output = {k: v for k, v in output_token_details.items() if v is not None}
if filtered_output:
usage_metadata["output_token_details"] = OutputTokenDetails(**filtered_output) # type: ignore[typeddict-item]
return usage_metadata
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openrouter/langchain_openrouter/chat_models.py",
"license": "MIT License",
"lines": 1174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/openrouter/scripts/check_imports.py | """Script to check imports of given Python files."""
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception: # noqa: PERF203, BLE001
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openrouter/scripts/check_imports.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/partners/openrouter/tests/integration_tests/test_chat_models.py | """Integration tests for `ChatOpenRouter` chat model."""
from __future__ import annotations
import pytest
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from pydantic import BaseModel, Field
from langchain_openrouter.chat_models import ChatOpenRouter
def test_basic_invoke() -> None:
"""Test basic invocation."""
model = ChatOpenRouter(model="openai/gpt-4o-mini", temperature=0)
response = model.invoke("Say 'hello' and nothing else.")
assert response.content
assert response.response_metadata.get("model_provider") == "openrouter"
def test_streaming() -> None:
"""Test streaming."""
model = ChatOpenRouter(model="openai/gpt-4o-mini", temperature=0)
full: BaseMessageChunk | None = None
for chunk in model.stream("Say 'hello' and nothing else."):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.content
def test_tool_calling() -> None:
"""Test tool calling via OpenRouter."""
class GetWeather(BaseModel):
"""Get the current weather in a given location."""
location: str = Field(description="The city and state")
model = ChatOpenRouter(model="openai/gpt-4o-mini", temperature=0)
model_with_tools = model.bind_tools([GetWeather])
response = model_with_tools.invoke("What's the weather in San Francisco?")
assert response.tool_calls
def test_structured_output() -> None:
"""Test structured output via OpenRouter."""
class Joke(BaseModel):
"""A joke."""
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline of the joke")
model = ChatOpenRouter(model="openai/gpt-4o-mini", temperature=0)
structured = model.with_structured_output(Joke)
result = structured.invoke("Tell me a joke about programming")
assert isinstance(result, Joke)
assert result.setup
assert result.punchline
@pytest.mark.xfail(reason="Depends on reasoning model availability on OpenRouter.")
def test_reasoning_content() -> None:
"""Test reasoning content from a reasoning model."""
model = ChatOpenRouter(
model="openai/o3-mini",
reasoning={"effort": "low"},
)
response = model.invoke("What is 2 + 2?")
assert response.content
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openrouter/tests/integration_tests/test_chat_models.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/openrouter/tests/integration_tests/test_compile.py | """Test compilation of integration tests."""
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openrouter/tests/integration_tests/test_compile.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/openrouter/tests/integration_tests/test_standard.py | """Standard integration tests for `ChatOpenRouter`."""
from langchain_core.messages import AIMessage, AIMessageChunk
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openrouter.chat_models import ChatOpenRouter
MODEL_NAME = "openai/gpt-4o-mini"
class TestChatOpenRouter(ChatModelIntegrationTests):
"""Test `ChatOpenRouter` chat model."""
@property
def chat_model_class(self) -> type[ChatOpenRouter]:
"""Return class of chat model being tested."""
return ChatOpenRouter
@property
def chat_model_params(self) -> dict:
"""Parameters to create chat model instance for testing."""
return {
"model": MODEL_NAME,
"temperature": 0,
}
@property
def returns_usage_metadata(self) -> bool:
# Don't want to implement tests for now
return False
@property
def supports_json_mode(self) -> bool:
return False
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_video_inputs(self) -> bool:
return True
@property
def model_override_value(self) -> str:
return "openai/gpt-4o"
AUDIO_MODEL = "google/gemini-2.5-flash"
REASONING_MODEL = "openai/o3-mini"
class TestChatOpenRouterMultiModal(ChatModelIntegrationTests):
"""Tests for audio input and reasoning output capabilities.
Uses an audio-capable model as the base and creates separate model
instances for reasoning tests.
"""
@property
def chat_model_class(self) -> type[ChatOpenRouter]:
return ChatOpenRouter
@property
def chat_model_params(self) -> dict:
return {
"model": AUDIO_MODEL,
"temperature": 0,
}
@property
def returns_usage_metadata(self) -> bool:
# Don't want to implement tests for now
return False
@property
def supports_json_mode(self) -> bool:
return False
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_audio_inputs(self) -> bool:
return True
@property
def supports_video_inputs(self) -> bool:
return True
@property
def model_override_value(self) -> str:
return "openai/gpt-4o"
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
"""Invoke a reasoning model to exercise reasoning token tracking."""
llm = ChatOpenRouter(
model=REASONING_MODEL,
reasoning={"effort": "medium"},
)
prompt = (
"Explain the relationship between the 2008/9 economic crisis and "
"the startup ecosystem in the early 2010s"
)
if stream:
full: AIMessageChunk | None = None
for chunk in llm.stream(prompt):
full = chunk if full is None else full + chunk # type: ignore[assignment]
assert full is not None
return full
return llm.invoke(prompt)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openrouter/tests/integration_tests/test_standard.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.