File size: 4,239 Bytes
49e7d2b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ["JAX_PLATFORMS"] = "cpu"
import pytest
import jax
import jax.numpy as jnp
import numpy as np
import torch
from quantization import quantization_types
torch.autograd.grad_mode.set_grad_enabled(False)
backends = {"jax": jnp, "numpy": np, "torch": torch}
scales = (0.5, 1.0, 1.5)
@pytest.fixture(params=backends.values(), ids=backends.keys())
def backend(request):
"""Fixture to parameterize over all backends."""
return request.param
def _uint8_range(backend):
"""Fixture to provide uint8 range for each backend."""
return backend.arange(256, dtype=backend.uint8)
uint8_range = pytest.fixture(_uint8_range)
def _float_range(backend):
"""Fixture to provide float range for each backend."""
return backend.linspace(-4, 4, 101, dtype=backend.float32)
float_range = pytest.fixture(_float_range)
@pytest.fixture(params=quantization_types)
def cls(request):
"""Fixture to parameterize over all quantization classes."""
return request.param
@pytest.fixture
def default_instance(cls):
"""Fixture to parameterize over default instances of all quantization classes."""
return cls()
@pytest.fixture(params=scales)
def scale(request):
"""Fixture to parameterize over different scale values."""
return request.param
@pytest.fixture
def instance(cls, scale):
"""Fixture to parameterize over instances of all quantization classes, swept over all scale values."""
return cls(scale=scale)
def test_interface(cls, default_instance):
"""Test that all quantization classes and instances have the expected methods."""
assert isinstance(default_instance, cls)
classattrs = ()
classmethods = ()
instanceattrs = ("scale",)
instancemethods = ("quantize", "dequantize", "nonlinearity", "inv_nonlinearity")
interface = {
cls: (classattrs, classmethods),
default_instance: (instanceattrs, instancemethods)
}
for obj, (attrs, methods) in interface.items():
for attr in attrs + methods:
assert hasattr(obj, attr)
for method in methods:
assert callable(getattr(obj, method))
def test_quantize_returns_uint8(default_instance, float_range):
"""Test that quantize returns uint8 values."""
quantized = default_instance.quantize(float_range)
assert "uint8" in str(quantized.dtype).lower()
def test_dequantize_returns_float32(default_instance, uint8_range):
"""Test that dequantize returns float32 values."""
dequantized = default_instance.dequantize(uint8_range)
assert "float32" in str(dequantized.dtype).lower()
def test_roundtrip_consistency(instance, uint8_range):
"""Test that dequantize->quantize is lossless."""
dequantized = instance.dequantize(uint8_range)
requantized = instance.quantize(dequantized)
np.testing.assert_array_equal(uint8_range, requantized)
def test_cross_backend_consistency(instance):
"""Test that dequantization gives identical results across all backends."""
results = {
name: instance.dequantize(_uint8_range(backend))
for name, backend in backends.items()
}
baseline_name = list(backends.keys())[0]
baseline_result = results[baseline_name]
for name, result in results.items():
if name == baseline_name:
continue
np.testing.assert_array_almost_equal(
result,
baseline_result,
err_msg=f"{name} result doesn't match {baseline_name}",
)
def test_jax_jit(instance):
"""Test that the JAX backend works when the quantization methods are jitted."""
data = _uint8_range(jnp)
def roundtrip(x):
return instance.quantize(instance.dequantize(x))
nojit = roundtrip(data)
yesjit = jax.jit(roundtrip)(data)
np.testing.assert_array_equal(yesjit, nojit)
def test_torch_compile(instance):
"""Test that the PyTorch backend works when the quantization methods are compiled."""
data = _uint8_range(torch)
def roundtrip(x):
return instance.quantize(instance.dequantize(x))
nojit = roundtrip(data)
yesjit = torch.compile(roundtrip)(data)
np.testing.assert_array_equal(yesjit, nojit)
|