|
|
""" |
|
|
数据加载器实现 |
|
|
Data loader implementation for emotion and physiological state data |
|
|
""" |
|
|
|
|
|
import torch |
|
|
from torch.utils.data import DataLoader as TorchDataLoader, random_split |
|
|
from typing import Union, Tuple, Optional, List, Dict, Any |
|
|
from pathlib import Path |
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
from loguru import logger |
|
|
|
|
|
from .dataset import EmotionDataset |
|
|
from .preprocessor import DataPreprocessor |
|
|
from .synthetic_generator import SyntheticDataGenerator |
|
|
from .gpu_preload_loader import GPUPreloadDataLoader, GPUPreloadDataLoaderFactory |
|
|
|
|
|
class DataLoaderFactory: |
|
|
""" |
|
|
数据加载器工厂类 |
|
|
Factory class for creating data loaders |
|
|
""" |
|
|
|
|
|
def __init__(self, config: Optional[Dict[str, Any]] = None): |
|
|
""" |
|
|
初始化数据加载器工厂 |
|
|
|
|
|
Args: |
|
|
config: 配置字典 |
|
|
""" |
|
|
self.config = config or self._get_default_config() |
|
|
|
|
|
def _get_default_config(self) -> Dict[str, Any]: |
|
|
"""获取默认配置""" |
|
|
return { |
|
|
'batch_size': 32, |
|
|
'num_workers': 4, |
|
|
'pin_memory': True, |
|
|
'shuffle': True, |
|
|
'drop_last': False, |
|
|
'train_split': 0.7, |
|
|
'val_split': 0.15, |
|
|
'test_split': 0.15, |
|
|
'normalize_features': True, |
|
|
'normalize_labels': False, |
|
|
'seed': 42 |
|
|
} |
|
|
|
|
|
def create_data_loaders( |
|
|
self, |
|
|
data_path: Optional[Union[str, Path]] = None, |
|
|
data: Optional[Union[np.ndarray, pd.DataFrame]] = None, |
|
|
split_ratio: Optional[Tuple[float, float, float]] = None, |
|
|
**kwargs |
|
|
) -> Tuple['DataLoader', 'DataLoader', 'DataLoader']: |
|
|
""" |
|
|
创建训练、验证和测试数据加载器 |
|
|
|
|
|
Args: |
|
|
data_path: 数据文件路径 |
|
|
data: 数据数组或DataFrame |
|
|
split_ratio: 训练/验证/测试分割比例 |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
训练、验证、测试数据加载器的元组 |
|
|
""" |
|
|
|
|
|
dataset = self._load_dataset(data_path, data, **kwargs) |
|
|
|
|
|
|
|
|
train_dataset, val_dataset, test_dataset = self._split_dataset( |
|
|
dataset, split_ratio |
|
|
) |
|
|
|
|
|
|
|
|
train_loader = self._create_dataloader( |
|
|
train_dataset, shuffle=True, **self.config |
|
|
) |
|
|
|
|
|
val_loader = self._create_dataloader( |
|
|
val_dataset, shuffle=False, **self.config |
|
|
) |
|
|
|
|
|
test_loader = self._create_dataloader( |
|
|
test_dataset, shuffle=False, **self.config |
|
|
) |
|
|
|
|
|
logger.info(f"Created data loaders:") |
|
|
logger.info(f" Train: {len(train_dataset)} samples, {len(train_loader)} batches") |
|
|
logger.info(f" Val: {len(val_dataset)} samples, {len(val_loader)} batches") |
|
|
logger.info(f" Test: {len(test_dataset)} samples, {len(test_loader)} batches") |
|
|
|
|
|
return train_loader, val_loader, test_loader |
|
|
|
|
|
def create_single_loader( |
|
|
self, |
|
|
data_path: Optional[Union[str, Path]] = None, |
|
|
data: Optional[Union[np.ndarray, pd.DataFrame]] = None, |
|
|
mode: str = 'train', |
|
|
**kwargs |
|
|
) -> 'DataLoader': |
|
|
""" |
|
|
创建单个数据加载器 |
|
|
|
|
|
Args: |
|
|
data_path: 数据文件路径 |
|
|
data: 数据数组或DataFrame |
|
|
mode: 模式 ('train', 'val', 'test', 'predict') |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
数据加载器 |
|
|
""" |
|
|
|
|
|
config = self.config.copy() |
|
|
if mode == 'train': |
|
|
config['shuffle'] = True |
|
|
else: |
|
|
config['shuffle'] = False |
|
|
|
|
|
|
|
|
dataset = self._load_dataset(data_path, data, **kwargs) |
|
|
|
|
|
|
|
|
loader = self._create_dataloader(dataset, **config) |
|
|
|
|
|
logger.info(f"Created {mode} loader: {len(dataset)} samples, {len(loader)} batches") |
|
|
|
|
|
return loader |
|
|
|
|
|
def create_synthetic_loaders( |
|
|
self, |
|
|
num_samples: int = 1000, |
|
|
split_ratio: Optional[Tuple[float, float, float]] = None, |
|
|
**kwargs |
|
|
) -> Tuple['DataLoader', 'DataLoader', 'DataLoader']: |
|
|
""" |
|
|
创建合成数据的数据加载器 |
|
|
|
|
|
Args: |
|
|
num_samples: 样本数量 |
|
|
split_ratio: 训练/验证/测试分割比例 |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
训练、验证、测试数据加载器的元组 |
|
|
""" |
|
|
|
|
|
generator = SyntheticDataGenerator(num_samples=num_samples) |
|
|
data, labels = generator.generate_data() |
|
|
|
|
|
|
|
|
combined_data = np.hstack([data, labels]) |
|
|
|
|
|
|
|
|
return self.create_data_loaders( |
|
|
data=combined_data, |
|
|
split_ratio=split_ratio, |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
def _load_dataset( |
|
|
self, |
|
|
data_path: Optional[Union[str, Path]] = None, |
|
|
data: Optional[Union[np.ndarray, pd.DataFrame]] = None, |
|
|
**kwargs |
|
|
) -> EmotionDataset: |
|
|
""" |
|
|
加载数据集 |
|
|
|
|
|
Args: |
|
|
data_path: 数据文件路径 |
|
|
data: 数据数组或DataFrame |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
数据集 |
|
|
""" |
|
|
|
|
|
default_label_columns = ['ai_delta_p', 'ai_delta_a', 'ai_delta_d'] |
|
|
|
|
|
if data_path is not None: |
|
|
dataset = EmotionDataset( |
|
|
data=data_path, |
|
|
label_columns=kwargs.get('label_columns', default_label_columns), |
|
|
normalize_features=self.config['normalize_features'], |
|
|
normalize_labels=self.config['normalize_labels'], |
|
|
**kwargs |
|
|
) |
|
|
elif data is not None: |
|
|
dataset = EmotionDataset( |
|
|
data=data, |
|
|
label_columns=kwargs.get('label_columns', default_label_columns), |
|
|
normalize_features=self.config['normalize_features'], |
|
|
normalize_labels=self.config['normalize_labels'], |
|
|
**kwargs |
|
|
) |
|
|
else: |
|
|
raise ValueError("Either data_path or data must be provided") |
|
|
|
|
|
return dataset |
|
|
|
|
|
def _split_dataset( |
|
|
self, |
|
|
dataset: EmotionDataset, |
|
|
split_ratio: Optional[Tuple[float, float, float]] = None |
|
|
) -> Tuple[EmotionDataset, EmotionDataset, EmotionDataset]: |
|
|
""" |
|
|
分割数据集 |
|
|
|
|
|
Args: |
|
|
dataset: 原始数据集 |
|
|
split_ratio: 分割比例 (train, val, test) |
|
|
|
|
|
Returns: |
|
|
训练、验证、测试数据集的元组 |
|
|
""" |
|
|
if split_ratio is None: |
|
|
split_ratio = ( |
|
|
self.config['train_split'], |
|
|
self.config['val_split'], |
|
|
self.config['test_split'] |
|
|
) |
|
|
|
|
|
|
|
|
if abs(sum(split_ratio) - 1.0) > 1e-6: |
|
|
raise ValueError(f"Split ratios must sum to 1.0, got {sum(split_ratio)}") |
|
|
|
|
|
|
|
|
total_size = len(dataset) |
|
|
train_size = int(total_size * split_ratio[0]) |
|
|
val_size = int(total_size * split_ratio[1]) |
|
|
test_size = total_size - train_size - val_size |
|
|
|
|
|
|
|
|
torch.manual_seed(self.config['seed']) |
|
|
np.random.seed(self.config['seed']) |
|
|
|
|
|
|
|
|
train_dataset, val_dataset, test_dataset = random_split( |
|
|
dataset, [train_size, val_size, test_size] |
|
|
) |
|
|
|
|
|
return train_dataset, val_dataset, test_dataset |
|
|
|
|
|
def _create_dataloader( |
|
|
self, |
|
|
dataset: EmotionDataset, |
|
|
shuffle: bool = True, |
|
|
**config |
|
|
) -> 'DataLoader': |
|
|
""" |
|
|
创建数据加载器 |
|
|
|
|
|
Args: |
|
|
dataset: 数据集 |
|
|
shuffle: 是否打乱数据 |
|
|
**config: 配置参数 |
|
|
|
|
|
Returns: |
|
|
数据加载器 |
|
|
""" |
|
|
|
|
|
num_workers = config.get('num_workers', self.config['num_workers']) |
|
|
import platform |
|
|
if platform.system() == 'Windows': |
|
|
num_workers = 0 |
|
|
|
|
|
return TorchDataLoader( |
|
|
dataset, |
|
|
batch_size=int(config.get('batch_size', self.config['batch_size'])), |
|
|
shuffle=shuffle, |
|
|
num_workers=num_workers, |
|
|
pin_memory=config.get('pin_memory', self.config['pin_memory']) and torch.cuda.is_available(), |
|
|
drop_last=config.get('drop_last', self.config['drop_last']) |
|
|
) |
|
|
|
|
|
class DataLoader: |
|
|
""" |
|
|
数据加载器包装类 |
|
|
Wrapper class for data loading functionality |
|
|
|
|
|
支持两种数据加载模式: |
|
|
1. 标准模式: 使用PyTorch DataLoader,逐batch从CPU传输到GPU |
|
|
2. GPU预加载模式: 一次性将所有数据加载到GPU,消除传输开销(适用于小数据集) |
|
|
""" |
|
|
|
|
|
def __init__(self, config: Optional[Dict[str, Any]] = None): |
|
|
""" |
|
|
初始化数据加载器 |
|
|
|
|
|
Args: |
|
|
config: 配置字典 |
|
|
""" |
|
|
self.factory = DataLoaderFactory(config) |
|
|
self.config = self.factory.config |
|
|
|
|
|
|
|
|
self.preload_config = self.config.get('preload_to_gpu', {}) |
|
|
self.use_gpu_preload = self.preload_config.get('enabled', False) |
|
|
|
|
|
if self.use_gpu_preload: |
|
|
logger.info("✓ GPU预加载模式已启用") |
|
|
logger.info(f" 预加载批次大小: {self.preload_config.get('batch_size', 4096)}") |
|
|
logger.info(f" 应用到验证集: {self.preload_config.get('apply_to_validation', True)}") |
|
|
else: |
|
|
logger.info("使用标准DataLoader模式") |
|
|
|
|
|
def get_train_loader( |
|
|
self, |
|
|
data_path: Optional[Union[str, Path]] = None, |
|
|
data: Optional[Union[np.ndarray, pd.DataFrame]] = None, |
|
|
**kwargs |
|
|
) -> Union['DataLoader', GPUPreloadDataLoader]: |
|
|
""" |
|
|
获取训练数据加载器 |
|
|
|
|
|
Args: |
|
|
data_path: 数据文件路径 |
|
|
data: 数据数组或DataFrame |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
训练数据加载器(标准DataLoader或GPU预加载DataLoader) |
|
|
""" |
|
|
|
|
|
if self.use_gpu_preload and data_path is not None: |
|
|
logger.info("创建GPU预加载训练数据加载器") |
|
|
gpu_batch_size = self.preload_config.get('batch_size', 4096) |
|
|
|
|
|
|
|
|
gpu_loader_config = { |
|
|
'batch_size': gpu_batch_size, |
|
|
'shuffle': True, |
|
|
'normalize_features': self.config.get('normalize_features', True), |
|
|
'normalize_labels': self.config.get('normalize_labels', False), |
|
|
'input_dim': self.preload_config.get('input_dim'), |
|
|
'output_dim': self.preload_config.get('output_dim'), |
|
|
} |
|
|
|
|
|
factory = GPUPreloadDataLoaderFactory() |
|
|
return factory.create_train_loader( |
|
|
data_path=data_path, |
|
|
**gpu_loader_config, |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
|
|
|
return self.factory.create_single_loader( |
|
|
data_path=data_path, |
|
|
data=data, |
|
|
mode='train', |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
def get_val_loader( |
|
|
self, |
|
|
data_path: Optional[Union[str, Path]] = None, |
|
|
data: Optional[Union[np.ndarray, pd.DataFrame]] = None, |
|
|
**kwargs |
|
|
) -> Union['DataLoader', GPUPreloadDataLoader]: |
|
|
""" |
|
|
获取验证数据加载器 |
|
|
|
|
|
Args: |
|
|
data_path: 数据文件路径 |
|
|
data: 数据数组或DataFrame |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
验证数据加载器(标准DataLoader或GPU预加载DataLoader) |
|
|
""" |
|
|
|
|
|
if self.use_gpu_preload and self.preload_config.get('apply_to_validation', True) and data_path is not None: |
|
|
logger.info("创建GPU预加载验证数据加载器") |
|
|
gpu_batch_size = self.preload_config.get('batch_size', 4096) |
|
|
|
|
|
|
|
|
gpu_loader_config = { |
|
|
'batch_size': gpu_batch_size, |
|
|
'shuffle': False, |
|
|
'normalize_features': self.config.get('normalize_features', True), |
|
|
'normalize_labels': self.config.get('normalize_labels', False), |
|
|
'input_dim': self.preload_config.get('input_dim'), |
|
|
'output_dim': self.preload_config.get('output_dim'), |
|
|
} |
|
|
|
|
|
factory = GPUPreloadDataLoaderFactory() |
|
|
return factory.create_val_loader( |
|
|
data_path=data_path, |
|
|
**gpu_loader_config, |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
|
|
|
return self.factory.create_single_loader( |
|
|
data_path=data_path, |
|
|
data=data, |
|
|
mode='val', |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
def get_test_loader( |
|
|
self, |
|
|
data_path: Optional[Union[str, Path]] = None, |
|
|
data: Optional[Union[np.ndarray, pd.DataFrame]] = None, |
|
|
**kwargs |
|
|
) -> Union['DataLoader', GPUPreloadDataLoader]: |
|
|
""" |
|
|
获取测试数据加载器 |
|
|
|
|
|
Args: |
|
|
data_path: 数据文件路径 |
|
|
data: 数据数组或DataFrame |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
测试数据加载器(标准DataLoader或GPU预加载DataLoader) |
|
|
""" |
|
|
|
|
|
if self.use_gpu_preload and self.preload_config.get('apply_to_validation', True) and data_path is not None: |
|
|
logger.info("创建GPU预加载测试数据加载器") |
|
|
gpu_batch_size = self.preload_config.get('batch_size', 4096) |
|
|
|
|
|
|
|
|
gpu_loader_config = { |
|
|
'batch_size': gpu_batch_size, |
|
|
'shuffle': False, |
|
|
'normalize_features': self.config.get('normalize_features', True), |
|
|
'normalize_labels': self.config.get('normalize_labels', False), |
|
|
'input_dim': self.preload_config.get('input_dim'), |
|
|
'output_dim': self.preload_config.get('output_dim'), |
|
|
} |
|
|
|
|
|
factory = GPUPreloadDataLoaderFactory() |
|
|
return factory.create_test_loader( |
|
|
data_path=data_path, |
|
|
**gpu_loader_config, |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
|
|
|
return self.factory.create_single_loader( |
|
|
data_path=data_path, |
|
|
data=data, |
|
|
mode='test', |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
def get_predict_loader( |
|
|
self, |
|
|
data_path: Optional[Union[str, Path]] = None, |
|
|
data: Optional[Union[np.ndarray, pd.DataFrame]] = None, |
|
|
**kwargs |
|
|
) -> 'DataLoader': |
|
|
""" |
|
|
获取预测数据加载器 |
|
|
|
|
|
Args: |
|
|
data_path: 数据文件路径 |
|
|
data: 数据数组或DataFrame |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
预测数据加载器 |
|
|
""" |
|
|
return self.factory.create_single_loader( |
|
|
data_path=data_path, |
|
|
data=data, |
|
|
mode='predict', |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
def get_all_loaders( |
|
|
self, |
|
|
data_path: Optional[Union[str, Path]] = None, |
|
|
data: Optional[Union[np.ndarray, pd.DataFrame]] = None, |
|
|
split_ratio: Optional[Tuple[float, float, float]] = None, |
|
|
**kwargs |
|
|
) -> Tuple['DataLoader', 'DataLoader', 'DataLoader']: |
|
|
""" |
|
|
获取所有数据加载器 |
|
|
|
|
|
Args: |
|
|
data_path: 数据文件路径 |
|
|
data: 数据数组或DataFrame |
|
|
split_ratio: 分割比例 |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
训练、验证、测试数据加载器的元组 |
|
|
""" |
|
|
return self.factory.create_data_loaders( |
|
|
data_path=data_path, |
|
|
data=data, |
|
|
split_ratio=split_ratio, |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
def get_synthetic_loaders( |
|
|
self, |
|
|
num_samples: int = 1000, |
|
|
split_ratio: Optional[Tuple[float, float, float]] = None, |
|
|
**kwargs |
|
|
) -> Tuple['DataLoader', 'DataLoader', 'DataLoader']: |
|
|
""" |
|
|
获取合成数据加载器 |
|
|
|
|
|
Args: |
|
|
num_samples: 样本数量 |
|
|
split_ratio: 分割比例 |
|
|
**kwargs: 其他参数 |
|
|
|
|
|
Returns: |
|
|
训练、验证、测试数据加载器的元组 |
|
|
""" |
|
|
return self.factory.create_synthetic_loaders( |
|
|
num_samples=num_samples, |
|
|
split_ratio=split_ratio, |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
def create_data_loader( |
|
|
config: Optional[Dict[str, Any]] = None, |
|
|
**kwargs |
|
|
) -> DataLoader: |
|
|
""" |
|
|
创建数据加载器的便捷函数 |
|
|
|
|
|
Args: |
|
|
config: 配置字典 |
|
|
**kwargs: 配置参数 |
|
|
|
|
|
Returns: |
|
|
数据加载器实例 |
|
|
""" |
|
|
if config is None: |
|
|
config = {} |
|
|
|
|
|
|
|
|
final_config = {**config, **kwargs} |
|
|
|
|
|
return DataLoader(final_config) |
|
|
|
|
|
def load_data_from_config(config_path: Union[str, Path]) -> Tuple['DataLoader', 'DataLoader', 'DataLoader']: |
|
|
""" |
|
|
从配置文件加载数据 |
|
|
|
|
|
Args: |
|
|
config_path: 配置文件路径 |
|
|
|
|
|
Returns: |
|
|
训练、验证、测试数据加载器的元组 |
|
|
""" |
|
|
import yaml |
|
|
|
|
|
with open(config_path, 'r') as f: |
|
|
config = yaml.safe_load(f) |
|
|
|
|
|
|
|
|
data_config = config.get('data', {}) |
|
|
|
|
|
|
|
|
loader = create_data_loader(data_config.get('dataloader', {})) |
|
|
|
|
|
|
|
|
train_path = data_config.get('train_data_path') |
|
|
val_path = data_config.get('val_data_path') |
|
|
test_path = data_config.get('test_data_path') |
|
|
|
|
|
if train_path and val_path and test_path: |
|
|
|
|
|
train_loader = loader.get_train_loader(data_path=train_path) |
|
|
val_loader = loader.get_val_loader(data_path=val_path) |
|
|
test_loader = loader.get_test_loader(data_path=test_path) |
|
|
else: |
|
|
|
|
|
data_path = train_path or val_path or test_path |
|
|
if data_path is None: |
|
|
raise ValueError("No data path found in config") |
|
|
|
|
|
train_loader, val_loader, test_loader = loader.get_all_loaders( |
|
|
data_path=data_path |
|
|
) |
|
|
|
|
|
return train_loader, val_loader, test_loader |
|
|
|
|
|
|
|
|
class DataAugmentation: |
|
|
""" |
|
|
数据增强类 |
|
|
Data augmentation strategies |
|
|
""" |
|
|
|
|
|
def __init__(self, config: Optional[Dict[str, Any]] = None): |
|
|
""" |
|
|
初始化数据增强 |
|
|
|
|
|
Args: |
|
|
config: 配置字典 |
|
|
""" |
|
|
self.config = config or {} |
|
|
self.noise_std = self.config.get('noise_std', 0.01) |
|
|
self.mixup_alpha = self.config.get('mixup_alpha', 0.2) |
|
|
self.enabled = self.config.get('enabled', False) |
|
|
|
|
|
def add_gaussian_noise(self, features: torch.Tensor) -> torch.Tensor: |
|
|
""" |
|
|
添加高斯噪声 |
|
|
|
|
|
Args: |
|
|
features: 特征张量 |
|
|
|
|
|
Returns: |
|
|
添加噪声后的特征张量 |
|
|
""" |
|
|
if not self.enabled: |
|
|
return features |
|
|
|
|
|
noise = torch.randn_like(features) * self.noise_std |
|
|
return features + noise |
|
|
|
|
|
def mixup_data( |
|
|
self, |
|
|
features: torch.Tensor, |
|
|
labels: torch.Tensor, |
|
|
alpha: Optional[float] = None |
|
|
) -> Tuple[torch.Tensor, torch.Tensor, float]: |
|
|
""" |
|
|
Mixup数据增强 |
|
|
|
|
|
Args: |
|
|
features: 特征张量 |
|
|
labels: 标签张量 |
|
|
alpha: Beta分布参数 |
|
|
|
|
|
Returns: |
|
|
混合后的特征、标签和lambda值 |
|
|
""" |
|
|
if not self.enabled: |
|
|
return features, labels, 1.0 |
|
|
|
|
|
if alpha is None: |
|
|
alpha = self.mixup_alpha |
|
|
|
|
|
if alpha > 0: |
|
|
lam = np.random.beta(alpha, alpha) |
|
|
else: |
|
|
lam = 1 |
|
|
|
|
|
batch_size = features.size(0) |
|
|
index = torch.randperm(batch_size) |
|
|
|
|
|
mixed_features = lam * features + (1 - lam) * features[index, :] |
|
|
mixed_labels = lam * labels + (1 - lam) * labels[index, :] |
|
|
|
|
|
return mixed_features, mixed_labels, lam |
|
|
|
|
|
def random_feature_dropout(self, features: torch.Tensor, dropout_rate: float = 0.1) -> torch.Tensor: |
|
|
""" |
|
|
随机特征丢弃 |
|
|
|
|
|
Args: |
|
|
features: 特征张量 |
|
|
dropout_rate: 丢弃率 |
|
|
|
|
|
Returns: |
|
|
丢弃特征后的张量 |
|
|
""" |
|
|
if not self.enabled: |
|
|
return features |
|
|
|
|
|
mask = torch.rand_like(features) > dropout_rate |
|
|
return features * mask.float() |