repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/mixed_precision.py | """Mixed precision related functions."""
from functools import partial
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jmp
from imgx import model
from imgx.model import CONFIG_NAME_TO_MODEL_CLS_NAME
def get_mixed_precision_policy(use_mp: bool) -> jmp.Policy:
"""Return general mixed precision policy.
Args:
use_mp: use mixed precision if True.
Returns:
Policy instance.
"""
full = jnp.float32
if not use_mp:
return jmp.Policy(
compute_dtype=full, param_dtype=full, output_dtype=full
)
half = jmp.half_dtype()
return jmp.Policy(compute_dtype=half, param_dtype=full, output_dtype=full)
def get_mixed_precision_policy_for_normalization(use_mp: bool) -> jmp.Policy:
"""Return mixed precision policy for norms.
Args:
use_mp: use mixed precision if True.
Returns:
Policy instance.
"""
full = jnp.float32
if not use_mp:
return jmp.Policy(
compute_dtype=full, param_dtype=full, output_dtype=full
)
half = jmp.half_dtype()
return jmp.Policy(compute_dtype=full, param_dtype=full, output_dtype=half)
def select_tree(
pred: jnp.ndarray, a: chex.ArrayTree, b: chex.ArrayTree
) -> chex.ArrayTree:
"""Selects a pytree based on the given predicate.
Replace jmp.select_tree as it used jax.tree_multimap
which has been deprecated.
Args:
pred: bool array.
a: values for true.
b: values for false.
Returns:
Selected tree.
Raises:
ValueError: if pred dtype or shape is wrong.
"""
if not (pred.ndim == 0 and pred.dtype == jnp.bool_):
raise ValueError("expected boolean scalar")
return jax.tree_map(partial(jax.lax.select, pred), a, b)
def set_mixed_precision_policy(use_mp: bool, model_name: str) -> None:
"""Set mixed precision policy for networks.
Args:
use_mp: use mixed precision if True.
model_name: name of the model.
"""
# assign mixed precision policies to modules
# for norms, use the full precision for stability
mp_policy = get_mixed_precision_policy(use_mp)
mp_norm_policy = get_mixed_precision_policy_for_normalization(use_mp)
# the order we call `set_policy` doesn't matter, when a method on a
# class is called the policy for that class will be applied, or it will
# inherit the policy from its parent module.
hk.mixed_precision.set_policy(hk.BatchNorm, mp_norm_policy)
hk.mixed_precision.set_policy(hk.GroupNorm, mp_norm_policy)
hk.mixed_precision.set_policy(hk.LayerNorm, mp_norm_policy)
hk.mixed_precision.set_policy(hk.InstanceNorm, mp_norm_policy)
if model_name not in CONFIG_NAME_TO_MODEL_CLS_NAME:
raise ValueError(f"Unknown model name {model_name}.")
model_cls_name = CONFIG_NAME_TO_MODEL_CLS_NAME[model_name]
hk.mixed_precision.set_policy(getattr(model, model_cls_name), mp_policy)
| 2,956 | 29.173469 | 78 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/imgx/exp/eval.py | """Module for building evaluation functions."""
import json
from functools import partial
from pathlib import Path
from typing import Callable, Dict, Iterable, Optional, Tuple
import chex
import haiku as hk
import jax
import numpy as np
import pandas as pd
from jax import numpy as jnp
from omegaconf import DictConfig
from imgx import IMAGE, LABEL, UID
from imgx.datasets import (
DIR_TFDS_PROCESSED_MAP,
IMAGE_SPACING_MAP,
NUM_CLASSES_MAP,
)
from imgx.datasets.preprocess import save_segmentation_prediction
from imgx.datasets.util import unpad
from imgx.device import unshard
from imgx.diffusion.gaussian_diffusion import GaussianDiffusion
from imgx.exp.mixed_precision import set_mixed_precision_policy
from imgx.exp.model import build_diffusion_model, build_vision_model
from imgx.math_util import logits_to_mask
from imgx.metric import (
aggregated_surface_distance,
centroid_distance,
class_proportion,
dice_score,
iou,
normalized_surface_dice_from_distances,
)
from imgx.metric.centroid import get_coordinate_grid
def get_jit_segmentation_metrics(
mask_pred: jnp.ndarray, mask_true: jnp.ndarray, spacing: jnp.ndarray
) -> Dict[str, jnp.ndarray]:
"""Calculate segmentation metrics.
Use nanmean in case some classes do not exist.
Args:
mask_true: shape = (batch, ..., num_classes).
mask_pred: shape = (batch, ..., num_classes).
spacing: spacing of pixel/voxels along each dimension, (3,).
Returns:
Dict of metrics, each value is of shape (batch,).
"""
chex.assert_equal_shape([mask_pred, mask_true])
scalars = {}
# binary dice (batch, num_classes)
dice_score_bc = dice_score(
mask_pred=mask_pred,
mask_true=mask_true,
)
for i in range(dice_score_bc.shape[-1]):
scalars[f"binary_dice_score_class_{i}"] = dice_score_bc[:, i]
scalars["mean_binary_dice_score"] = jnp.nanmean(dice_score_bc, axis=1)
scalars["mean_binary_dice_score_without_background"] = jnp.nanmean(
dice_score_bc[:, 1:], axis=1
)
# IoU (batch, num_classes)
iou_bc = iou(
mask_pred=mask_pred,
mask_true=mask_true,
)
for i in range(iou_bc.shape[-1]):
scalars[f"iou_class_{i}"] = iou_bc[:, i]
scalars["mean_iou"] = jnp.nanmean(iou_bc, axis=1)
scalars["mean_iou_without_background"] = jnp.nanmean(iou_bc[:, 1:], axis=1)
# centroid distance (batch, num_classes)
grid = get_coordinate_grid(shape=mask_pred.shape[1:-1])
centroid_dist_bc = centroid_distance(
mask_pred=mask_pred,
mask_true=mask_true,
grid=grid,
spacing=spacing,
)
for i in range(centroid_dist_bc.shape[-1]):
scalars[f"centroid_dist_class_{i}"] = centroid_dist_bc[:, i]
scalars["mean_centroid_dist"] = jnp.nanmean(centroid_dist_bc, axis=1)
scalars["mean_centroid_dist_without_background"] = jnp.nanmean(
centroid_dist_bc[:, 1:], axis=1
)
# class proportion (batch, num_classes)
for mask, mask_name in zip([mask_pred, mask_true], ["pred", "label"]):
class_prop_bc = class_proportion(mask)
for i in range(class_prop_bc.shape[-1]):
scalars[f"class_{i}_proportion_{mask_name}"] = class_prop_bc[:, i]
return scalars
def get_non_jit_segmentation_metrics(
mask_pred: jnp.ndarray,
mask_true: jnp.ndarray,
spacing: Optional[jnp.ndarray],
) -> Dict[str, jnp.ndarray]:
"""Calculate non-jittable segmentation metrics for batch.
Use nanmean in case some classes do not exist.
Args:
mask_pred: (batch, w, h, d, num_classes)
mask_true: (batch, w, h, d, num_classes)
spacing: spacing of pixel/voxels along each dimension.
Returns:
Dict of metrics, each value is of shape (batch,).
"""
chex.assert_equal_shape([mask_pred, mask_true])
batch_scalars = {}
# (3, batch, num_classes)
# mean surface distance
# hausdorff distance, 95 percentile
# normalised surface dice
sur_dist_bc = aggregated_surface_distance(
mask_pred=np.array(mask_pred),
mask_true=np.array(mask_true),
agg_fns=[
np.mean,
partial(np.percentile, q=95),
normalized_surface_dice_from_distances,
],
num_args=[1, 1, 2],
spacing=spacing,
)
for i in range(sur_dist_bc.shape[-1]):
batch_scalars[f"mean_surface_dist_class_{i}"] = sur_dist_bc[0, :, i]
batch_scalars[f"hausdorff_dist_class_{i}"] = sur_dist_bc[1, :, i]
batch_scalars[f"surface_dice_class_{i}"] = sur_dist_bc[2, :, i]
batch_scalars["mean_mean_surface_dist"] = np.nanmean(
sur_dist_bc[0, ...], axis=-1
)
batch_scalars["mean_hausdorff_dist"] = np.nanmean(
sur_dist_bc[1, ...], axis=-1
)
batch_scalars["mean_surface_dice"] = np.nanmean(
sur_dist_bc[2, ...], axis=-1
)
batch_scalars["mean_mean_surface_dist_without_background"] = np.nanmean(
sur_dist_bc[0, :, 1:], axis=-1
)
batch_scalars["mean_hausdorff_dist_without_background"] = np.nanmean(
sur_dist_bc[1, :, 1:], axis=-1
)
batch_scalars["mean_surface_dice_without_background"] = np.nanmean(
sur_dist_bc[2, :, 1:], axis=-1
)
return batch_scalars
def batch_segmentation_evaluation(
input_dict: Dict[str, jnp.ndarray],
model: hk.Module,
spacing: jnp.ndarray,
num_classes: int,
) -> Tuple[Dict[str, jnp.ndarray], jnp.ndarray]:
"""Evaluate binary predictions.
Args:
input_dict: input data having image and label.
model: network instance.
spacing: spacing of pixel/voxels along each dimension.
num_classes: number of classes including background.
Returns:
- metrics, each metric value has shape (batch, ).
- logits.
"""
# (batch, ..., 1)
image = jnp.expand_dims(input_dict[IMAGE], axis=-1)
# (batch, ..., num_classes)
logits = model(image=image, is_train=False)
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(
input_dict[LABEL], num_classes=num_classes, axis=-1
)
# (batch, ..., num_classes)
mask_pred = logits_to_mask(logits, axis=-1)
# evaluate
scalars = get_jit_segmentation_metrics(
mask_pred=mask_pred, mask_true=mask_true, spacing=spacing
)
return scalars, logits
def batch_diffusion_evaluation(
input_dict: Dict[str, jnp.ndarray],
spacing: jnp.ndarray,
num_classes: int,
diffusion_model: GaussianDiffusion,
) -> Tuple[Dict[str, jnp.ndarray], jnp.ndarray]:
"""Evaluate predictions from diffusion model.
Args:
input_dict: input data having image and label.
spacing: spacing of pixel/voxels along each dimension.
num_classes: number of classes including background.
diffusion_model: model for sampling.
Returns:
- metrics for all time steps, each metric value has shape (batch, ).
- logits for all time steps.
"""
# (batch, ..., 1)
image = jnp.expand_dims(input_dict[IMAGE], axis=-1)
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(
input_dict[LABEL], num_classes=num_classes, axis=-1
)
# (batch, ..., num_classes)
x_t = diffusion_model.noise_sample(shape=mask_true.shape, dtype=image.dtype)
# (batch, ..., num_classes)
x_start = jnp.stack(
list(diffusion_model.sample_mask_progressive(image=image, x_t=x_t)),
axis=-1,
)
# evaluate
# (batch, ..., num_classes, num_timesteps_sample)
mask_pred = logits_to_mask(x_start, axis=-2)
scalars = jax.vmap(
partial(
get_jit_segmentation_metrics,
mask_true=mask_true,
spacing=spacing,
),
in_axes=-1,
out_axes=-1,
)(mask_pred)
return scalars, x_start
def build_batch_eval_fn(
config: DictConfig,
) -> Callable:
"""Build model from config.
Args:
config: entire config.
Returns:
Evaluate function.
Raises:
ValueError: if config is wrong or not supported.
"""
if not hasattr(config.model, "name"):
raise ValueError("Config does have model name.")
set_mixed_precision_policy(
use_mp=config.training.mixed_precision.use, model_name=config.model.name
)
# image spacing
data_config = config.data
dataset_name = data_config.name
spacing = jnp.array(IMAGE_SPACING_MAP[dataset_name])
num_classes = NUM_CLASSES_MAP[dataset_name]
task_config = config.task
vision_model = build_vision_model(
data_config=data_config,
task_config=task_config,
model_config=config.model,
)
if task_config["name"] == "segmentation":
return partial(
batch_segmentation_evaluation,
model=vision_model,
spacing=spacing,
num_classes=num_classes,
)
if task_config["name"] == "diffusion":
diffusion_model = build_diffusion_model(
model=vision_model,
diffusion_config=task_config["diffusion"],
)
return partial(
batch_diffusion_evaluation,
spacing=spacing,
num_classes=num_classes,
diffusion_model=diffusion_model,
)
raise ValueError(f"Unknown task {task_config['name']}.")
def get_non_jit_segmentation_metrics_per_step(
mask_pred: jnp.ndarray,
mask_true: jnp.ndarray,
spacing: Optional[jnp.ndarray],
) -> Dict[str, jnp.ndarray]:
"""Calculate non-jittable segmentation metrics for batch.
Cannot use VMAP as it requires jittable functions.
Args:
mask_pred: (batch, w, h, d, num_classes, num_steps)
mask_true: (batch, w, h, d, num_classes)
spacing: spacing of pixel/voxels along each dimension.
Returns:
Metrics dict, each value is a list corresponding to steps.
"""
if mask_pred.ndim != 6:
raise ValueError(
"mask_pred should have shape "
"(batch, w, h, d, num_classes, num_timesteps_sample) ,"
f"got {mask_pred.shape}."
)
lst_scalars = []
for i in range(mask_pred.shape[-1]):
scalars = get_non_jit_segmentation_metrics(
mask_pred=mask_pred[..., i],
mask_true=mask_true,
spacing=spacing,
)
lst_scalars.append(scalars)
scalar_keys = lst_scalars[0].keys()
scalars = {}
for k in scalar_keys:
scalars[k] = np.stack([x[k] for x in lst_scalars], axis=-1)
return scalars
def dataset_segmentation_evaluation( # pylint:disable=R0912,R0915
evaluate_pmap: Callable,
params: hk.Params,
state: hk.State,
rng: jnp.ndarray,
batch_iterator: Iterable[Dict[str, chex.ArrayTree]],
num_steps: int,
is_diffusion: bool,
spacing: Optional[jnp.ndarray],
out_dir: Optional[Path],
tfds_dir: Path,
save_predictions: bool,
) -> Dict:
"""Get predictions and perform evaluations of a data set.
Args:
evaluate_pmap: forward function to call.
params: model parameters.
state: model state, EMA or not.
rng: random key.
batch_iterator: iterator of a data set.
num_steps: number of steps.
is_diffusion: the method is diffusion or not.
spacing: spacing of pixel/voxels along each dimension.
out_dir: output directory for metrics and predictions,
if None, no files will be saved.
tfds_dir: directory saving preprocessed images and labels.
save_predictions: if True, save predicted masks.
Returns:
- predicted values
- indices
- metrics
"""
lst_df_scalar = []
for _ in range(num_steps):
batch = next(batch_iterator) # type: ignore[call-overload]
# get UID and parse to string
uids = batch.pop(UID)
uids = uids.reshape(-1) # remove shard axis
uids = [
x.decode("utf-8") if isinstance(x, bytes) else x
for x in uids.tolist()
]
# non diffusion
# logits (num_shards, batch, w, h, d, num_classes)
# metrics (num_shards, batch)
# diffusion
# logits (num_shards, batch, w, h, d, num_classes, num_timesteps)
# metrics (num_shards, batch, num_timesteps)
# arrays are across all devices
batch = jax.lax.stop_gradient(batch)
(scalars, logits), _ = evaluate_pmap(params, state, rng, batch)
label = batch[LABEL]
# put on cpu
device_cpu = jax.devices("cpu")[0]
scalars = jax.device_put(scalars, device_cpu)
logits = jax.device_put(logits, device_cpu)
label = jax.device_put(label, device_cpu)
# remove shard axis
# array are on device 0
scalars = unshard(scalars)
logits = unshard(logits)
label = unshard(label)
# remove padded examples
if 0 in uids:
# the batch was not complete, padded with zero
num_samples = uids.index(0)
uids = uids[:num_samples]
scalars = unpad(scalars, num_samples)
logits = unpad(logits, num_samples)
label = unpad(label, num_samples)
if is_diffusion:
num_classes = logits.shape[-2]
mask_true = jax.nn.one_hot(label, num_classes=num_classes, axis=-1)
mask_pred = logits_to_mask(logits, axis=-2)
scalars_non_jit = get_non_jit_segmentation_metrics_per_step(
mask_pred=mask_pred,
mask_true=mask_true,
spacing=spacing,
)
else:
num_classes = logits.shape[-1]
mask_true = jax.nn.one_hot(label, num_classes=num_classes, axis=-1)
scalars_non_jit = get_non_jit_segmentation_metrics(
mask_pred=logits_to_mask(logits, axis=-1),
mask_true=mask_true,
spacing=spacing,
)
scalars = {**scalars, **scalars_non_jit}
# for diffusion separate metrics per step
if is_diffusion:
scalars_flatten = {}
for k, v in scalars.items():
for i in range(v.shape[-1]):
scalars_flatten[f"{k}_step_{i}"] = v[..., i]
scalars_flatten[k] = v[..., -1]
scalars = scalars_flatten
# save output
if save_predictions and (out_dir is not None):
if is_diffusion:
for i in range(logits.shape[-1]):
mask_pred = np.array(
jnp.argmax(logits[..., i], axis=-1), dtype=int
)
save_segmentation_prediction(
preds=mask_pred,
uids=uids,
out_dir=out_dir / f"step_{i}",
tfds_dir=tfds_dir,
)
else:
mask_pred = np.array(jnp.argmax(logits, axis=-1), dtype=int)
save_segmentation_prediction(
preds=mask_pred,
uids=uids,
out_dir=out_dir,
tfds_dir=tfds_dir,
)
# save metrics
scalars = jax.tree_map(lambda x: np.asarray(x).tolist(), scalars)
scalars["uid"] = uids
lst_df_scalar.append(pd.DataFrame(scalars))
# assemble metrics
df_scalar = pd.concat(lst_df_scalar)
df_scalar = df_scalar.sort_values("uid")
if out_dir is not None:
df_scalar.to_csv(out_dir / "metrics_per_sample.csv", index=False)
# average over samples in the dataset
scalars = df_scalar.drop("uid", axis=1).mean().to_dict()
scalars["num_images_in_total"] = len(df_scalar)
if out_dir is not None:
with open(out_dir / "mean_metrics.json", "w", encoding="utf-8") as f:
json.dump(scalars, f, sort_keys=True, indent=4)
return scalars
def build_dataset_eval_fn(config: DictConfig) -> Callable:
"""Return a function to evaluate data set.
Args:
config: entire config.
Returns:
A function.
Raises:
ValueError: if data set in unknown.
"""
is_diffusion = config["task"]["name"] == "diffusion"
spacing = jnp.array(IMAGE_SPACING_MAP[config.data.name])
tfds_dir = DIR_TFDS_PROCESSED_MAP[config.data.name]
return partial(
dataset_segmentation_evaluation,
is_diffusion=is_diffusion,
spacing=spacing,
tfds_dir=tfds_dir,
)
| 16,503 | 31.746032 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_model_basic.py | """Test basic functions for model."""
import chex
import jax
from absl.testing import parameterized
from chex._src import fake
from imgx.model.basic import sinusoidal_positional_embedding
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestSinusoidalPositionalEmbedding(chex.TestCase):
"""Test the function sinusoidal_positional_embedding."""
@chex.variants(
with_jit=True, without_jit=True, with_device=True, without_device=True
)
@parameterized.named_parameters(
("case 1", 2, 4, 5),
(
"case 2",
3,
8,
10000,
),
)
def test_shapes(self, batch_size: int, dim: int, max_period: int) -> None:
"""Test output shapes under different device condition.
Args:
batch_size: batch size.
dim: embedding dimension, assume to be evenly divided by two.
max_period: controls the minimum frequency of the embeddings.
"""
rng = jax.random.PRNGKey(0)
x = jax.random.uniform(
rng,
shape=(batch_size,),
)
out = self.variant(
sinusoidal_positional_embedding, static_argnums=(1, 2)
)(
x,
dim=dim,
max_period=max_period,
)
chex.assert_shape(out, (batch_size, dim))
| 1,478 | 27.442308 | 78 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_diffusion_gaussian.py | """Test Gaussian diffusion related classes and functions."""
from typing import Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from absl.testing import parameterized
from chex._src import fake
from imgx.diffusion.gaussian_diffusion import (
DiffusionBetaSchedule,
DiffusionModelOutputType,
DiffusionModelVarianceType,
DiffusionSpace,
GaussianDiffusion,
extract_and_expand,
)
from imgx.model import Unet3dTime
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestExtractAndExpand(chex.TestCase):
"""Test extract_and_expand."""
@chex.variants(without_jit=True, with_device=True, without_device=True)
@parameterized.named_parameters(
(
"1d",
1,
),
(
"2d",
2,
),
(
"3d",
3,
),
)
def test_shapes(
self,
ndim: int,
) -> None:
"""Test output shape.
Args:
ndim: number of dimensions.
"""
batch_size = 2
betas = jnp.array([0, 0.2, 0.5, 1.0])
num_timesteps = len(betas)
rng = jax.random.PRNGKey(0)
t = jax.random.randint(
rng, shape=(batch_size,), minval=0, maxval=num_timesteps
)
got = self.variant(extract_and_expand)(arr=betas, t=t, ndim=ndim)
expected_shape = (batch_size,) + (1,) * (ndim - 1)
chex.assert_shape(got, expected_shape)
class TestGaussianDiffusion(chex.TestCase):
"""Test the class GaussianDiffusion."""
batch_size = 2
# unet
in_channels = 1
num_classes = 2
num_channels = (1, 2)
num_timesteps = 5
num_timesteps_beta = 1001
beta_schedule = DiffusionBetaSchedule.QUADRADIC
beta_start = 0.0001
beta_end = 0.02
x_limit = 1.0
use_ddim = False
@chex.variants(without_jit=True)
def test_attributes(
self,
) -> None:
"""Test attribute shape."""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward() -> GaussianDiffusion:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion
gd = forward()
chex.assert_shape(gd.betas, (self.num_timesteps,))
chex.assert_shape(gd.alphas_cumprod, (self.num_timesteps,))
chex.assert_shape(gd.alphas_cumprod_prev, (self.num_timesteps,))
chex.assert_shape(gd.alphas_cumprod_next, (self.num_timesteps,))
chex.assert_shape(gd.sqrt_alphas_cumprod, (self.num_timesteps,))
chex.assert_shape(
gd.sqrt_one_minus_alphas_cumprod, (self.num_timesteps,)
)
chex.assert_shape(
gd.log_one_minus_alphas_cumprod, (self.num_timesteps,)
)
chex.assert_shape(gd.sqrt_recip_alphas_cumprod, (self.num_timesteps,))
chex.assert_shape(
gd.sqrt_recip_alphas_cumprod_minus_one, (self.num_timesteps,)
)
chex.assert_shape(gd.posterior_mean_coeff_start, (self.num_timesteps,))
chex.assert_shape(gd.posterior_mean_coeff_t, (self.num_timesteps,))
chex.assert_shape(gd.posterior_variance, (self.num_timesteps,))
chex.assert_shape(
gd.posterior_log_variance_clipped, (self.num_timesteps,)
)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_q_mean_log_variance(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x_start: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray]:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.q_mean_log_variance(x_start=x_start, t=t)
rng = jax.random.PRNGKey(0)
rng_start, rng_t = jax.random.split(rng, num=2)
dummy_x_start = jax.random.uniform(
rng_start, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got_mean, got_log_var = forward(x_start=dummy_x_start, t=dummy_t)
expanded_shape = (dummy_x_start.shape[0],) + (1,) * (
dummy_x_start.ndim - 1
)
chex.assert_shape(got_mean, dummy_x_start.shape)
chex.assert_shape(got_log_var, expanded_shape)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_q_sample(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x_start: jnp.ndarray, noise: jnp.ndarray, t: jnp.ndarray
) -> jnp.ndarray:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.q_sample(x_start=x_start, noise=noise, t=t)
rng = jax.random.PRNGKey(0)
rng_start, rng_noise, rng_t = jax.random.split(rng, num=3)
dummy_x_start = jax.random.uniform(
rng_start, shape=(self.batch_size, *in_shape)
)
dummy_noise = jax.random.uniform(
rng_noise, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got = forward(x_start=dummy_x_start, noise=dummy_noise, t=dummy_t)
chex.assert_shape(got, dummy_x_start.shape)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_q_posterior_mean_variance(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x_start: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray]:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
rng_start = jax.random.PRNGKey(0)
rng_start, rng_x_t, rng_t = jax.random.split(rng_start, num=3)
dummy_x_start = jax.random.uniform(
rng_start, shape=(self.batch_size, *in_shape)
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got_mean, got_log_var = forward(
x_start=dummy_x_start, x_t=dummy_x_t, t=dummy_t
)
expanded_shape = (dummy_x_start.shape[0],) + (1,) * (
dummy_x_start.ndim - 1
)
chex.assert_shape(got_mean, dummy_x_start.shape)
chex.assert_shape(got_log_var, expanded_shape)
@chex.all_variants
@parameterized.product(
in_shape=[
(2,),
(2, 3),
(2, 3, 4),
],
model_out_type=[
DiffusionModelOutputType.X_START,
DiffusionModelOutputType.X_PREVIOUS,
DiffusionModelOutputType.EPSILON,
],
model_var_type=[
DiffusionModelVarianceType.FIXED_SMALL,
DiffusionModelVarianceType.FIXED_LARGE,
DiffusionModelVarianceType.LEARNED,
DiffusionModelVarianceType.LEARNED_RANGE,
],
)
def test_p_mean_variance(
self,
in_shape: Tuple[int, ...],
model_out_type: DiffusionModelOutputType,
model_var_type: DiffusionModelVarianceType,
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
model_out_type: define model output meaning.
model_var_type: define p(x_{t-1} | x_t) variance.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
model_out: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=model_out_type,
model_var_type=model_var_type,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.p_mean_variance(model_out=model_out, x_t=x_t, t=t)
rng_out = jax.random.PRNGKey(0)
rng_out, rng_x_t, rng_t = jax.random.split(rng_out, num=3)
num_out_channels = self.num_classes
if model_var_type in [
DiffusionModelVarianceType.LEARNED,
DiffusionModelVarianceType.LEARNED_RANGE,
]:
num_out_channels *= 2
model_out_shape = (self.batch_size, *in_shape, num_out_channels)
dummy_model_out = jax.random.uniform(
rng_out,
shape=model_out_shape,
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape, self.num_classes)
)
# for t = 0, x_prev is not well-defined
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=1, maxval=self.num_timesteps
)
(
got_x_start,
got_model_mean,
got_model_log_variance,
) = forward(model_out=dummy_model_out, x_t=dummy_x_t, t=dummy_t)
expanded_shape = (dummy_x_t.shape[0],) + (1,) * (dummy_x_t.ndim - 1)
assert (~jnp.isnan(got_x_start)).all()
chex.assert_shape(got_x_start, dummy_x_t.shape)
chex.assert_shape(got_model_mean, dummy_x_t.shape)
if model_var_type in [
DiffusionModelVarianceType.FIXED_SMALL,
DiffusionModelVarianceType.FIXED_LARGE,
]:
# variances are extended
chex.assert_shape(got_model_log_variance, expanded_shape)
else:
chex.assert_shape(got_model_log_variance, dummy_x_t.shape)
# check value range
chex.assert_scalar_in(
jnp.min(got_x_start).item(), -self.x_limit, self.x_limit
)
chex.assert_scalar_in(
jnp.max(got_x_start).item(), -self.x_limit, self.x_limit
)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_p_sample(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
model_out: jnp.ndarray, x_t: jnp.ndarray, t: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray]:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.p_sample(model_out=model_out, x_t=x_t, t=t)
rng_out = jax.random.PRNGKey(0)
rng_out, rng_x_t, rng_t = jax.random.split(rng_out, num=3)
model_out_shape = (self.batch_size, *in_shape)
dummy_model_out = jax.random.uniform(
rng_out,
shape=model_out_shape,
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got_sample, got_x_start_pred = forward(
model_out=dummy_model_out, x_t=dummy_x_t, t=dummy_t
)
chex.assert_shape(got_sample, dummy_x_t.shape)
chex.assert_shape(got_x_start_pred, dummy_x_t.shape)
# check value range
chex.assert_scalar_in(
jnp.min(got_sample).item(), -self.x_limit, self.x_limit
)
chex.assert_scalar_in(
jnp.max(got_sample).item(), -self.x_limit, self.x_limit
)
@chex.all_variants
def test_p_sample_mask(
self,
) -> None:
"""Test output shape."""
in_shape = (2, 3, 4)
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
image: jnp.ndarray,
x_t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function for p_sample_mask.
Args:
image: (batch, w, h, d, in_channels).
x_t: (batch, w, h, d, num_classes).
Returns:
p_sample_mask output.
"""
model = Unet3dTime(
in_shape=in_shape,
in_channels=self.in_channels + self.num_classes,
out_channels=self.num_classes,
num_channels=self.num_channels,
num_timesteps=self.num_timesteps,
)
diffusion = GaussianDiffusion(
model=model,
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.sample_mask(
image=image,
x_t=x_t,
)
rng_image = jax.random.PRNGKey(0)
rng_image, rng_x_t = jax.random.split(rng_image)
image_shape = (self.batch_size, *in_shape, self.in_channels)
dummy_image = jax.random.uniform(
rng_image,
shape=image_shape,
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape, self.num_classes)
)
got_sample = forward(
image=dummy_image,
x_t=dummy_x_t,
)
chex.assert_shape(got_sample, dummy_x_t.shape)
@chex.all_variants
@parameterized.named_parameters(
("1d", (2,)),
("2d", (2, 3)),
("3d", (2, 3, 4)),
)
def test_variational_lower_bound(
self,
in_shape: Tuple[int, ...],
) -> None:
"""Test output shape.
Args:
in_shape: input shape.
"""
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
model_out: jnp.ndarray,
x_start: jnp.ndarray,
x_t: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
diffusion = GaussianDiffusion(
model=hk.Module(),
num_timesteps=self.num_timesteps,
num_timesteps_beta=self.num_timesteps_beta,
beta_schedule=self.beta_schedule,
beta_start=self.beta_start,
beta_end=self.beta_end,
model_out_type=DiffusionModelOutputType.X_START,
model_var_type=DiffusionModelVarianceType.FIXED_LARGE,
x_limit=self.x_limit,
x_space=DiffusionSpace.SCALED_PROBS,
use_ddim=self.use_ddim,
)
return diffusion.variational_lower_bound(
model_out=model_out, x_start=x_start, x_t=x_t, t=t
)
rng_out = jax.random.PRNGKey(0)
rng_out, rng_x_start, rng_x_t, rng_t = jax.random.split(rng_out, num=4)
model_out_shape = (self.batch_size, *in_shape)
dummy_model_out = jax.random.uniform(
rng_out,
shape=model_out_shape,
)
dummy_x_start = jax.random.uniform(
rng_x_start, shape=(self.batch_size, *in_shape)
)
dummy_x_t = jax.random.uniform(
rng_x_t, shape=(self.batch_size, *in_shape)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
got = forward(
model_out=dummy_model_out,
x_start=dummy_x_start,
x_t=dummy_x_t,
t=dummy_t,
)
chex.assert_shape(got, (self.batch_size,))
| 19,693 | 33.250435 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_loss_cross_entropy.py | """Test dice loss functions."""
import chex
import jax
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx.loss import mean_cross_entropy, mean_focal_loss
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestCrossEntropyLoss(chex.TestCase):
"""Test mean_cross_entropy."""
prob_0 = 1 / (1 + np.exp(-1) + np.exp(-2))
prob_1 = np.exp(-1) / (1 + np.exp(-1) + np.exp(-2))
prob_2 = np.exp(-2) / (1 + np.exp(-1) + np.exp(-2))
@chex.all_variants
@parameterized.named_parameters(
(
"1d",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
np.mean(
-np.log(
[prob_2, prob_1, prob_0],
)
),
),
(
"2d",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
np.mean(
-np.log(
[prob_2, prob_1, prob_1, prob_2],
)
),
),
)
def test_values(
self,
logits: np.ndarray,
targets: np.ndarray,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
logits: unscaled prediction, of shape (..., num_classes).
targets: values are integers, of shape (...).
expected: expected output.
"""
num_classes = logits.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(mean_cross_entropy)(
logits=logits,
mask_true=mask_true,
)
chex.assert_trees_all_close(got, expected)
class TestFocalLoss(chex.TestCase):
"""Test mean_focal_loss."""
prob_0 = 1 / (1 + np.exp(-1) + np.exp(-2))
prob_1 = np.exp(-1) / (1 + np.exp(-1) + np.exp(-2))
prob_2 = np.exp(-2) / (1 + np.exp(-1) + np.exp(-2))
@chex.all_variants
@parameterized.named_parameters(
(
"1d-gamma=0.0",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
0.0,
np.mean(
-np.log(
[prob_2, prob_1, prob_0],
)
),
),
(
"2d-gamma=0.0",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
0.0,
np.mean(
-np.log(
[prob_2, prob_1, prob_1, prob_2],
)
),
),
(
"1d-gamma=1.2",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
1.2,
np.mean(
np.array(
[
-((1 - p) ** 1.2) * np.log(p)
for p in [prob_2, prob_1, prob_0]
],
)
),
),
(
"2d-gamma=1.2",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
1.2,
np.mean(
np.array(
[
-((1 - p) ** 1.2) * np.log(p)
for p in [prob_2, prob_1, prob_1, prob_2]
],
)
),
),
)
def test_values(
self,
logits: np.ndarray,
targets: np.ndarray,
gamma: float,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
logits: unscaled prediction, of shape (..., num_classes).
targets: values are integers, of shape (...).
gamma: adjust class imbalance, 0 is equivalent to cross entropy.
expected: expected output.
"""
num_classes = logits.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(mean_focal_loss)(
logits=logits,
mask_true=mask_true,
gamma=gamma,
)
chex.assert_trees_all_close(got, expected)
| 5,017 | 27.511364 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_train_state.py | """Test TrainState and related functions."""
from pathlib import Path
from typing import Dict
import chex
import jax.numpy as jnp
import jax.random
import jmp
import pytest
from chex._src import fake
from imgx.device import broadcast_to_local_devices
from imgx.exp import train_state
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
@pytest.fixture(name="dummy_train_state_dict")
def get_dummy_train_state_dict() -> Dict[str, jnp.ndarray]:
"""A dummy dict for train state attribute values.
Returns:
A dict of dummy values.
"""
key = jax.random.PRNGKey(0)
return {
"params": jax.random.uniform(key, (3, 5)),
"network_state": jax.random.uniform(key, (4, 5)),
"opt_state": jax.random.uniform(key, (5, 5)),
"global_step": jnp.array(0, dtype=jnp.int32),
"rng": jax.random.PRNGKey(0),
"ema_params": jax.random.uniform(key, (3, 3)),
"ema_network_state": jax.random.uniform(key, (3, 4)),
}
def test_save_restore_array_tree(
tmp_path: Path, dummy_train_state_dict: chex.ArrayTree
) -> None:
"""Test by saving and restoring.
Args:
tmp_path: fixture for temp path.
dummy_train_state_dict: dummy data to save.
"""
ckpt_dir = tmp_path / "ckpt"
ckpt_dir.mkdir()
train_state.save_array_tree(ckpt_dir, dummy_train_state_dict)
restored = train_state.restore_array_tree(ckpt_dir)
chex.assert_trees_all_equal(dummy_train_state_dict, restored)
@pytest.mark.parametrize(
"loss_scale_type",
[
"NoOpLossScale",
"StaticLossScale",
"DynamicLossScale",
],
)
def test_save_restore_ckpt(
loss_scale_type: str, tmp_path: Path, dummy_train_state_dict: chex.ArrayTree
) -> None:
"""Test by saving and restoring.
Args:
loss_scale_type: NoOpLossScale, StaticLossScale, DynamicLossScale.
tmp_path: fixture for temp path.
dummy_train_state_dict: dummy data to save.
"""
ckpt_dir = tmp_path / "ckpt"
ckpt_dir.mkdir()
train_state_dict = jax.tree_map(
broadcast_to_local_devices, dummy_train_state_dict
)
if loss_scale_type == "NoOpLossScale":
loss_scale = jmp.NoOpLossScale()
else:
scale = jmp.half_dtype()(2**15)
loss_scale = getattr(jmp, loss_scale_type)(scale)
loss_scale = broadcast_to_local_devices(loss_scale)
dummy_train_state = train_state.TrainState( # type: ignore[call-arg]
params=train_state_dict["params"],
network_state=train_state_dict["network_state"],
opt_state=train_state_dict["opt_state"],
loss_scale=loss_scale,
global_step=train_state_dict["global_step"],
rng=train_state_dict["rng"],
ema_params=train_state_dict["ema_params"],
ema_network_state=train_state_dict["ema_network_state"],
)
train_state.save_ckpt(dummy_train_state, ckpt_dir)
restored_train_state = train_state.restore_ckpt(ckpt_dir)
if loss_scale_type == "DynamicLossScale":
dummy_loss_scale = dummy_train_state.loss_scale
restored_loss_scale = restored_train_state.loss_scale
dummy_train_state.loss_scale = -1
restored_train_state.loss_scale = -1
chex.assert_trees_all_equal(dummy_train_state, restored_train_state)
chex.assert_trees_all_equal(
dummy_loss_scale.loss_scale, restored_loss_scale.loss_scale
)
chex.assert_trees_all_equal(
dummy_loss_scale.counter, restored_loss_scale.counter
)
chex.assert_trees_all_equal(
dummy_loss_scale.period, restored_loss_scale.period
)
chex.assert_trees_all_equal(
dummy_loss_scale.factor, restored_loss_scale.factor
)
else:
chex.assert_trees_all_equal(dummy_train_state, restored_train_state)
| 3,895 | 30.934426 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_exp_model.py | """Test mixed precision related functions in factory."""
import haiku as hk
import pytest
from omegaconf import DictConfig
from imgx.exp.model import build_vision_model
from imgx.model import MODEL_CLS_NAME_TO_CONFIG_NAME, SUPPORTED_VISION_MODELS
DUMMY_TASK_CONFIG = {
"name": "segmentation",
"diffusion": {
"num_timesteps": 4,
"num_timesteps_sample": 20,
"beta": {
"beta_schedule": "linear",
"beta_start": 0.0001,
"beta_end": 0.02,
},
"model_out_type": "x_start",
"model_var_type": "fixed_large",
"x_space": "scaled_probs",
"x_limit": 0.0,
},
}
DUMMY_MODEL_CONFIG = {
"remat": False,
"unet3d": {
"num_channels": [1, 2, 4],
},
"unet3d_slice": {
"num_channels": [1, 2, 4],
},
"unet3d_time": {
"num_channels": [1, 2, 4],
},
"unet3d_slice_time": {
"num_channels": [1, 2, 4],
},
}
@hk.testing.transform_and_run()
@pytest.mark.parametrize(
"model_class",
SUPPORTED_VISION_MODELS,
ids=SUPPORTED_VISION_MODELS,
)
def test_build_vision_model(model_class: str) -> None:
"""Test all supported models.
Args:
model_class: name of model class.
"""
data_config = {
"name": "male_pelvic_mr",
}
data_config = DictConfig(data_config)
model_config = DictConfig(DUMMY_MODEL_CONFIG)
task_config = DictConfig(DUMMY_TASK_CONFIG)
if model_class.endswith("_time"):
task_config["name"] = "diffusion"
model_config["name"] = MODEL_CLS_NAME_TO_CONFIG_NAME[model_class]
build_vision_model(
data_config=data_config,
task_config=task_config,
model_config=model_config,
)
| 1,733 | 24.5 | 77 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_dataset_augmentation.py | """Test function for data augmentation."""
from typing import Tuple
import chex
import jax
import jax.numpy as jnp
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx import IMAGE, LABEL
from imgx.datasets import FOREGROUND_RANGE
from imgx.datasets.augmentation import (
batch_apply_affine_to_grid,
batch_get_random_affine_matrix,
batch_random_affine_transform,
batch_resample_image_label,
get_affine_matrix,
get_rotation_matrix,
get_scaling_matrix,
get_translation_matrix,
)
from imgx.metric.centroid import get_coordinate_grid
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestDeterministicAffineMatrix(chex.TestCase):
"""Test deterministic affine matrix."""
sin_30 = 0.5
cos_30 = np.sqrt(3) / 2
sqrt2 = np.sqrt(2)
@chex.all_variants
@parameterized.named_parameters(
(
"2d - 30 degrees rotation",
np.asarray([np.pi / 6]),
np.asarray(
[
[cos_30, -sin_30, 0.0],
[sin_30, cos_30, 0.0],
[0.0, 0.0, 1.0],
]
),
np.asarray(
[
[sqrt2 * np.cos(75 / 180 * np.pi)],
[sqrt2 * np.sin(75 / 180 * np.pi)],
[1.0],
]
),
),
(
"3d - no rotation",
np.asarray([0.0, 0.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray([[1.0], [1.0], [1.0], [1.0]]),
),
(
"3d - x axis - 30 degrees rotation",
np.asarray([np.pi / 6, 0.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, cos_30, -sin_30, 0.0],
[0.0, sin_30, cos_30, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[sqrt2 * np.cos(75 / 180 * np.pi)],
[sqrt2 * np.sin(75 / 180 * np.pi)],
[1.0],
]
),
),
(
"3d - y axis - 30 degrees rotation",
np.asarray([0.0, np.pi / 6, 0.0]),
np.asarray(
[
[cos_30, 0.0, sin_30, 0.0],
[0.0, 1.0, 0.0, 0.0],
[-sin_30, 0.0, cos_30, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[sqrt2 * np.sin(75 / 180 * np.pi)],
[1.0],
[sqrt2 * np.cos(75 / 180 * np.pi)],
[1.0],
]
),
),
(
"3d - z axis - 30 degrees rotation",
np.asarray([0.0, 0.0, np.pi / 6]),
np.asarray(
[
[cos_30, -sin_30, 0.0, 0.0],
[sin_30, cos_30, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[sqrt2 * np.cos(75 / 180 * np.pi)],
[sqrt2 * np.sin(75 / 180 * np.pi)],
[1.0],
[1.0],
]
),
),
)
def test_rotation(
self,
radians: np.ndarray,
expected_affine_matrix: np.ndarray,
expected_rotated_vector: np.ndarray,
) -> None:
"""Test affine matrix values and rotated unit vector.
Args:
radians: values correspond to yz, xz, xy planes.
expected_affine_matrix: expected affine matrix.
expected_rotated_vector: expected rotated vector.
"""
if len(radians) > 1:
vector = jnp.array([[1.0], [1.0], [1.0], [1.0]])
else:
vector = jnp.array([[1.0], [1.0], [1.0]])
got_affine_matrix = self.variant(get_rotation_matrix)(
radians=radians,
)
chex.assert_trees_all_close(got_affine_matrix, expected_affine_matrix)
got_rotated_vector = jnp.matmul(got_affine_matrix, vector)
chex.assert_trees_all_close(got_rotated_vector, expected_rotated_vector)
@chex.all_variants
@parameterized.named_parameters(
(
"2d - shift",
np.asarray([-1.0, -2.0]),
np.asarray(
[
[1.0, 0.0, -1.0],
[0.0, 1.0, -2.0],
[0.0, 0.0, 1.0],
]
),
np.asarray([[0.0], [-1.0], [1.0]]),
),
(
"3d - no shift",
np.asarray([0.0, 0.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray([[1.0], [1.0], [1.0], [1.0]]),
),
(
"3d - shift x axis",
np.asarray([1.0, 0.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[2.0],
[1.0],
[1.0],
[1.0],
]
),
),
(
"3d - shift y axis",
np.asarray([0.0, 1.0, 0.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[2.0],
[1.0],
[1.0],
]
),
),
(
"3d - shift z axis",
np.asarray([0.0, 0.0, 1.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[1.0],
[2.0],
[1.0],
]
),
),
)
def test_translation(
self,
shifts: np.ndarray,
expected_affine_matrix: np.ndarray,
expected_rotated_vector: np.ndarray,
) -> None:
"""Test affine matrix values and rotated unit vector.
Args:
shifts: correspond to each axis shift.
expected_affine_matrix: expected affine matrix.
expected_rotated_vector: expected rotated vector.
"""
vector = jnp.ones(shape=(len(shifts) + 1, 1))
got_affine_matrix = self.variant(get_translation_matrix)(
shifts=shifts,
)
chex.assert_trees_all_close(got_affine_matrix, expected_affine_matrix)
got_rotated_vector = jnp.matmul(got_affine_matrix, vector)
chex.assert_trees_all_close(got_rotated_vector, expected_rotated_vector)
@chex.all_variants
@parameterized.named_parameters(
(
"2d - scale",
np.asarray([2.0, 3.0]),
np.asarray(
[
[2.0, 0.0, 0.0],
[0.0, 3.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.asarray([[2.0], [3.0], [1.0]]),
),
(
"3d - no scale",
np.asarray([1.0, 1.0, 1.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray([[1.0], [1.0], [1.0], [1.0]]),
),
(
"3d - scale x axis",
np.asarray([2.0, 1.0, 1.0]),
np.asarray(
[
[2.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[2.0],
[1.0],
[1.0],
[1.0],
]
),
),
(
"3d - scale y axis",
np.asarray([1.0, 2.0, 1.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[2.0],
[1.0],
[1.0],
]
),
),
(
"3d - scale z axis",
np.asarray([1.0, 1.0, 2.0]),
np.asarray(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 2.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
),
np.asarray(
[
[1.0],
[1.0],
[2.0],
[1.0],
]
),
),
)
def test_scaling(
self,
scales: np.ndarray,
expected_affine_matrix: np.ndarray,
expected_rotated_vector: np.ndarray,
) -> None:
"""Test affine matrix values and rotated unit vector.
Args:
scales: correspond to each axis scaling.
expected_affine_matrix: expected affine matrix.
expected_rotated_vector: expected rotated vector.
"""
vector = jnp.ones(shape=(len(scales) + 1, 1))
got_affine_matrix = self.variant(get_scaling_matrix)(
scales=scales,
)
chex.assert_trees_all_close(got_affine_matrix, expected_affine_matrix)
got_rotated_vector = jnp.matmul(got_affine_matrix, vector)
chex.assert_trees_all_close(got_rotated_vector, expected_rotated_vector)
@chex.all_variants
@parameterized.named_parameters(
(
"2d - rotate - scale - shift",
np.asarray([np.pi / 2]),
np.asarray([-1.0, 1.0]),
np.asarray([0.8, 1.2]),
np.asarray(
[
[0.0, -0.8, -1.0],
[1.2, 0.0, 1.0],
[0.0, 0.0, 1.0],
],
),
np.asarray([[-1.8], [2.2]]),
),
)
def test_affine(
self,
radians: np.ndarray,
shifts: np.ndarray,
scales: np.ndarray,
expected_affine_matrix: np.ndarray,
expected_rotated_vector: np.ndarray,
) -> None:
"""Test affine matrix values and rotated unit vector.
Args:
radians: correspond to rotate around each axis.
shifts: correspond to each axis shift.
scales: correspond to each axis scaling.
expected_affine_matrix: expected affine matrix.
expected_rotated_vector: expected rotated vector.
"""
vector = jnp.ones(shape=(len(scales) + 1, 1))
got_affine_matrix = self.variant(get_affine_matrix)(
radians=radians,
shifts=shifts,
scales=scales,
)
chex.assert_trees_all_close(
got_affine_matrix, expected_affine_matrix, atol=1e-6
)
got_rotated_vector = jnp.matmul(got_affine_matrix[:-1, :], vector)
chex.assert_trees_all_close(got_rotated_vector, expected_rotated_vector)
class TestRandomAffineMatrix(chex.TestCase):
"""Test random affine matrix sampling."""
@chex.all_variants
@parameterized.named_parameters(
(
"2d - batch size 1",
1,
np.asarray(
[
0.088,
]
),
np.asarray([20, 4]),
np.asarray([0.15, 0.15]),
(3, 3),
),
(
"2d - batch size 2",
2,
np.asarray(
[
0.088,
]
),
np.asarray([20, 4]),
np.asarray([0.15, 0.15]),
(3, 3),
),
(
"3d - batch size 2",
2,
np.asarray([0.088, 0.088, 0.088]),
np.asarray([20, 20, 4]),
np.asarray([0.15, 0.15, 0.15]),
(4, 4),
),
)
def test_values(
self,
batch_size: int,
max_rotation: np.ndarray,
max_translation: np.ndarray,
max_scaling: np.ndarray,
expected_shape: Tuple,
) -> None:
"""Test affine matrix values.
Test affine matrix shapes, and test random seed impact.
Args:
batch_size: number of samples in batch.
max_rotation: maximum rotation in radians.
max_translation: maximum translation in pixel/voxels.
max_scaling: maximum scaling difference in pixel/voxels.
expected_shape: expected shape of affine matrix.
"""
max_rotation = np.tile(max_rotation[None, ...], (batch_size, 1))
max_translation = np.tile(max_translation[None, ...], (batch_size, 1))
max_scaling = np.tile(max_scaling[None, ...], (batch_size, 1))
# check output shape
key1 = jax.random.PRNGKey(1)
got1 = self.variant(batch_get_random_affine_matrix)(
key=key1,
max_rotation=max_rotation,
min_translation=-max_translation,
max_translation=max_translation,
max_scaling=max_scaling,
)
chex.assert_shape(got1, (batch_size, *expected_shape))
# if batch size > 1, each affine matrix should be different
if batch_size > 1:
diff = jnp.sum(jnp.abs(got1[1, ...] - got1[0, ...])).item()
chex.assert_scalar_positive(diff)
# same seed should provide same values
got2 = self.variant(batch_get_random_affine_matrix)(
key=key1,
max_rotation=max_rotation,
min_translation=-max_translation,
max_translation=max_translation,
max_scaling=max_scaling,
)
chex.assert_trees_all_equal(got1, got2)
# different seeds should provide different values
key3 = jax.random.PRNGKey(3)
got3 = self.variant(batch_get_random_affine_matrix)(
key=key3,
max_rotation=max_rotation,
min_translation=-max_translation,
max_translation=max_translation,
max_scaling=max_scaling,
)
diff = jnp.sum(jnp.abs(got1 - got3)).item()
chex.assert_scalar_positive(diff)
class TestApplyAffineMatrix(chex.TestCase):
"""Test apply_affine_to_grid."""
@chex.all_variants
@parameterized.parameters(4, 1)
def test_values(
self,
batch_size: int,
) -> None:
"""Test transformed grid values.
Args:
batch_size: number of samples in batch.
"""
grid = np.asarray(
[
# x
[
[0.0, 0.0],
[1.0, 1.0],
[2.0, 2.0],
],
# y
[
[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0],
],
],
)
affine_matrix = np.asarray(
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 0.0],
[0.0, 0.0, 1.0],
],
)
expected = np.asarray(
[
# shift -> 2x+y -> shift back
[
[-1.5, -0.5],
[0.5, 1.5],
[2.5, 3.5],
],
# shift -> 3y -> shift back
[
[-1.0, 2.0],
[-1.0, 2.0],
[-1.0, 2.0],
],
],
)
batch_affine_matrix = np.tile(
affine_matrix[None, ...], (batch_size, 1, 1)
)
batch_expected = np.tile(
expected[None, ...], (batch_size,) + (1,) * len(expected.shape)
)
got = self.variant(batch_apply_affine_to_grid)(
grid=grid,
affine_matrix=batch_affine_matrix,
)
chex.assert_trees_all_equal(got, batch_expected)
class TestResample(chex.TestCase):
"""Test apply_affine_to_grid."""
@chex.all_variants
@parameterized.named_parameters(
(
"2d - batch",
np.asarray(
[
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
],
),
np.asarray(
[
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
],
),
np.asarray(
[
# first image, un changed
[
# x axis
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]],
# y axis
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
],
# second image, changed
# (0.4, 0) x-axis linear interpolation
# (0, 0.6) y-axis linear interpolation
# (0.4, 1.6) x/y-axis linear interpolation
# (1.0, 3.0) out of boundary
[
# x axis
[[0.4, 0.0, 0.4], [1.0, 1.0, 1.0]],
# y axis
[[0.0, 0.6, 1.6], [0.0, 3.0, 2.0]],
],
]
), # (batch=2, n=2, d1=2, d2=3)
np.asarray(
[
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
[
[1.2, 1.4, 1.68],
[0.0, 0.0, 4.0],
],
],
),
np.asarray(
[
[
[2.0, 1.0, 0.0],
[0.0, 3.0, 4.0],
],
[
[2.0, 1.0, 0.0],
[0.0, 0.0, 4.0],
],
],
),
),
)
def test_shapes(
self,
image: np.ndarray,
label: np.ndarray,
grid: np.ndarray,
expected_image: np.ndarray,
expected_label: np.ndarray,
) -> None:
"""Test affine matrix values.
Test affine matrix shapes, and test random seed impact.
Args:
image: input image batch.
label: input label batch.
grid: batch of grid with affine applied.
expected_image: expected image.
expected_label: expected label.
"""
input_dict = {IMAGE: image, LABEL: label}
got = self.variant(batch_resample_image_label)(
input_dict=input_dict,
grid=grid,
)
expected = {IMAGE: expected_image, LABEL: expected_label}
chex.assert_trees_all_close(got, expected)
class TestRandomAffineTransformation(chex.TestCase):
"""Test batch_random_affine_transform."""
@chex.all_variants
@parameterized.product(
(
{
"max_rotation": np.asarray([0.088, 0.088, 0.088]),
"max_translation": np.asarray([2, 3, 1]),
"max_scaling": np.asarray([0.05, 0.05, 0.05]),
"image_shape": (8, 12, 6),
},
{
"max_rotation": np.asarray([0.088]),
"max_translation": np.asarray([2, 3]),
"max_scaling": np.asarray([0.05, 0.05]),
"image_shape": (8, 12),
},
),
batch_size=[4, 1],
)
def test_shapes(
self,
batch_size: int,
max_rotation: np.ndarray,
max_translation: np.ndarray,
max_scaling: np.ndarray,
image_shape: Tuple,
) -> None:
"""Test affine matrix values.
Test affine matrix shapes, and test random seed impact.
Args:
batch_size: number of samples in batch.
max_rotation: maximum rotation in radians.
max_translation: maximum translation in pixel/voxels.
max_scaling: maximum scaling difference in pixel/voxels.
image_shape: image spatial shape.
"""
key = jax.random.PRNGKey(0)
grid = get_coordinate_grid(shape=image_shape)
image = jax.random.uniform(
key=key, shape=(batch_size, *image_shape), minval=0, maxval=1
)
label = jax.random.uniform(
key=key, shape=(batch_size, *image_shape), minval=0, maxval=1
)
label = jnp.asarray(label > jnp.mean(label), dtype=np.float32)
input_dict = {
IMAGE: image,
LABEL: label,
FOREGROUND_RANGE: jnp.zeros((len(image_shape), 2)),
}
got = self.variant(batch_random_affine_transform)(
key=key,
input_dict=input_dict,
grid=grid,
max_rotation=max_rotation,
max_translation=max_translation,
max_scaling=max_scaling,
)
# check shapes
assert len(got) == 2
chex.assert_shape(got[IMAGE], (batch_size, *image_shape))
chex.assert_shape(got[LABEL], (batch_size, *image_shape))
# check label remains boolean
assert jnp.unique(got[LABEL]).size == jnp.unique(label).size
| 23,132 | 29.081925 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_loss_dice.py | """Test dice loss functions."""
import chex
import jax
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx.loss import mean_dice_loss
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestDiceLoss(chex.TestCase):
"""Test dice_loss."""
prob_0 = 1 / (1 + np.exp(-1) + np.exp(-2))
prob_1 = np.exp(-1) / (1 + np.exp(-1) + np.exp(-2))
prob_2 = np.exp(-2) / (1 + np.exp(-1) + np.exp(-2))
@chex.all_variants
@parameterized.named_parameters(
(
"1d-with-background",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
True,
np.mean(
np.array(
[
[
1 - 2 * prob_0 / (3 * prob_0 + 1),
1 - 2 * prob_1 / (3 * prob_1 + 1),
1 - 2 * prob_2 / (3 * prob_2 + 1),
]
],
)
),
),
(
"1d-without-background",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1, 0]]),
False,
np.mean(
np.array(
[
[
1 - 2 * prob_1 / (3 * prob_1 + 1),
1 - 2 * prob_2 / (3 * prob_2 + 1),
]
],
)
),
),
(
"1d-without-and-miss-background",
np.array([[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]]]),
np.array([[2, 1]]),
False,
np.mean(
np.array(
[
[
1 - 2 * prob_1 / (2 * prob_1 + 1),
1 - 2 * prob_2 / (2 * prob_2 + 1),
]
],
)
),
),
(
"2d-with-background",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
True,
np.mean(
np.array(
[
[
1 - 2 * prob_1 / (3 * prob_0 + prob_1 + 1),
1
- 2
* (prob_1 + prob_2)
/ (prob_0 + 2 * prob_1 + prob_2 + 2),
1 - 2 * prob_2 / (prob_1 + 3 * prob_2 + 1),
]
],
)
),
),
(
"2d-without-background",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
False,
np.mean(
np.array(
[
[
1
- 2
* (prob_1 + prob_2)
/ (prob_0 + 2 * prob_1 + prob_2 + 2),
1 - 2 * prob_2 / (prob_1 + 3 * prob_2 + 1),
]
],
)
),
),
(
"2d-with-empty-class-and-background",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[0, 1], [0, 1]]]),
True,
np.mean(
np.array(
[
[
1
- 2 * (prob_0 + prob_1) / (3 * prob_0 + prob_1 + 2),
1
- 2
* (prob_1 + prob_2)
/ (prob_0 + 2 * prob_1 + prob_2 + 2),
]
],
)
),
),
(
"2d-with-empty-class-without-background",
np.array(
[
[
[[2.0, 1.0, 0.0], [0.0, -1.0, -2.0]],
[[1.0, 2.0, 0.0], [1.0, -1.0, 0.0]],
],
]
),
np.array([[[0, 1], [0, 1]]]),
False,
np.array(
1 - 2 * (prob_1 + prob_2) / (prob_0 + 2 * prob_1 + prob_2 + 2),
),
),
)
def test_values(
self,
logits: np.ndarray,
targets: np.ndarray,
include_background: bool,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
logits: unscaled prediction, of shape (..., num_classes).
targets: values are integers, of shape (...).
include_background: include background as a separate class.
expected: expected output.
"""
num_classes = logits.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(mean_dice_loss)(
logits=logits,
mask_true=mask_true,
include_background=include_background,
)
chex.assert_trees_all_close(got, expected)
| 5,992 | 29.42132 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_diffusion_variance_schedule.py | """Test Gaussian diffusion related classes and functions."""
import chex
import jax.numpy as jnp
from absl.testing import parameterized
from chex._src import fake
from imgx.diffusion.variance_schedule import (
DiffusionBetaSchedule,
downsample_beta_schedule,
get_beta_schedule,
)
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestGetBetaSchedule(chex.TestCase):
"""Test get_beta_schedule."""
@parameterized.product(
num_timesteps=[1, 4],
beta_schedule=[
DiffusionBetaSchedule.LINEAR,
DiffusionBetaSchedule.QUADRADIC,
DiffusionBetaSchedule.COSINE,
DiffusionBetaSchedule.WARMUP10,
DiffusionBetaSchedule.WARMUP50,
],
)
def test_shapes(
self,
num_timesteps: int,
beta_schedule: DiffusionBetaSchedule,
) -> None:
"""Test output shape."""
beta_start = 0.0
beta_end = 0.2
got = get_beta_schedule(
num_timesteps=num_timesteps,
beta_schedule=beta_schedule,
beta_start=beta_start,
beta_end=beta_end,
)
chex.assert_shape(got, (num_timesteps,))
assert got[0] == beta_start
if num_timesteps > 1:
chex.assert_trees_all_close(got[-1], beta_end)
class TestDownsampleBetaSchedule(chex.TestCase):
"""Test downsample_beta_schedule."""
@parameterized.named_parameters(
("same", 10, 10),
("downsample 11 to 6", 11, 6),
("downsample 101 to 5", 101, 5),
("downsample to two", 10, 2),
)
def test_values(
self,
num_timesteps: int,
num_timesteps_to_keep: int,
) -> None:
"""Test output values and shapes."""
betas = jnp.linspace(0.0, 1.0, num_timesteps)
got = downsample_beta_schedule(
betas, num_timesteps, num_timesteps_to_keep
)
chex.assert_shape(got, (num_timesteps_to_keep,))
chex.assert_trees_all_close(got[0], betas[0])
chex.assert_trees_all_close(got[-1], betas[-1])
| 2,208 | 27.320513 | 60 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_exp_mixed_precision.py | """Test mixed precision related functions in factory."""
import haiku as hk
import pytest
from imgx import model
from imgx.exp.mixed_precision import set_mixed_precision_policy
from imgx.model import MODEL_CLS_NAME_TO_CONFIG_NAME
from imgx.model import __all__ as all_model_classes
@pytest.mark.parametrize(
"model_class",
all_model_classes,
ids=all_model_classes,
)
def test_set_mixed_precision_policy(model_class: str) -> None:
"""Test all supported models.
Args:
model_class: name of model class.
"""
set_mixed_precision_policy(True, MODEL_CLS_NAME_TO_CONFIG_NAME[model_class])
# clear policy, otherwise impact other tests
hk.mixed_precision.clear_policy(hk.BatchNorm)
hk.mixed_precision.clear_policy(hk.GroupNorm)
hk.mixed_precision.clear_policy(hk.LayerNorm)
hk.mixed_precision.clear_policy(hk.InstanceNorm)
hk.mixed_precision.clear_policy(getattr(model, model_class))
| 938 | 31.37931 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_metric_surface_distance.py | """Test loss functions."""
from functools import partial
from typing import Callable, List, Tuple, Union
import chex
import jax
import numpy as np
from absl.testing import parameterized
from imgx.metric.surface_distance import (
aggregated_surface_distance,
average_surface_distance,
get_binary_mask_bounding_box,
get_mask_edges,
get_surface_distance,
hausdorff_distance,
normalized_surface_dice,
normalized_surface_dice_from_distances,
)
def create_spherical_seg_3d(
radius: float,
centre: Tuple[int, int, int],
shape: Tuple[int, int, int],
) -> np.ndarray:
"""Return a binary 3D image with a sphere inside.
Voxel values will be 1 inside the sphere, and 0 elsewhere.
https://github.com/Project-MONAI/MONAI/blob/dev/tests/test_surface_distance.py
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
shape: shape of image to create.
"""
image = np.zeros(shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : shape[0] - centre[0],
-centre[1] : shape[1] - centre[1],
-centre[2] : shape[2] - centre[2],
]
sphere = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[sphere] = 1
image[~sphere] = 0
return image
def create_circle_seg_2d(
radius: float,
centre: Tuple[int, int],
shape: Tuple[int, int],
) -> np.ndarray:
"""Return a binary 2D image with a sphere inside.
Pixel values will be 1 inside the circle, and 0 elsewhere
Args:
radius: radius of sphere (in terms of number of pixels, can be partial)
centre: location of sphere centre.
shape: shape of image to create.
"""
image = np.zeros(shape, dtype=np.int32)
spy, spx = np.ogrid[
-centre[0] : shape[0] - centre[0],
-centre[1] : shape[1] - centre[1],
]
circle = (spx * spx + spy * spy) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
class TestBBox(chex.TestCase):
"""Test get_binary_mask_bounding_box."""
@parameterized.named_parameters(
(
"1d-int",
np.array([0, 1, 0, 1, 0]),
np.array([1]),
np.array([4]),
),
(
"1d-bool",
np.array([False, True, False, True, False]),
np.array([1]),
np.array([4]),
),
(
"1d-all-true",
np.array([True, True, True, True, True]),
np.array([0]),
np.array([5]),
),
(
"1d-all-false",
np.array([False, False, False, False, False]),
np.array([-1]),
np.array([-1]),
),
(
"2d-1x5",
np.array([[0, 1, 0, 1, 0]]),
np.array([0, 1]),
np.array([1, 4]),
),
(
"2d-2x5",
np.array([[0, 1, 0, 1, 0], [1, 1, 0, 1, 0]]),
np.array([0, 0]),
np.array([2, 4]),
),
(
"2d-2x5-all-false",
np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]),
np.array([-1, -1]),
np.array([-1, -1]),
),
)
def test_values(
self,
mask: np.ndarray,
expected_bbox_min: np.ndarray,
expected_bbox_max: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
mask: binary mask with only spatial axes.
expected_bbox_min: expected bounding box min, inclusive.
expected_bbox_max: expected bounding box max, exclusive.
"""
got_bbox_min, got_bbox_max = get_binary_mask_bounding_box(
mask=mask,
)
chex.assert_trees_all_close(got_bbox_min, expected_bbox_min)
chex.assert_trees_all_close(got_bbox_max, expected_bbox_max)
class TestMaskEdge(chex.TestCase):
"""Test get_mask_edges."""
@parameterized.named_parameters(
(
"2d-same-smaller",
create_circle_seg_2d(radius=2, centre=(4, 4), shape=(7, 7)),
create_circle_seg_2d(radius=2, centre=(4, 4), shape=(7, 7)),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
),
(
"2d-diff-smaller",
create_circle_seg_2d(radius=2, centre=(4, 4), shape=(7, 7)),
create_circle_seg_2d(radius=1, centre=(4, 4), shape=(7, 7)),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, False, False, False],
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
]
),
),
(
"2d-shift",
create_circle_seg_2d(radius=1, centre=(4, 4), shape=(7, 7)),
create_circle_seg_2d(radius=1, centre=(3, 4), shape=(7, 7)),
np.array(
[
[False, False, False],
[False, True, False],
[True, False, True],
[False, True, False],
]
),
np.array(
[
[False, True, False],
[True, False, True],
[False, True, False],
[False, False, False],
]
),
),
(
"2d-zero",
np.zeros((5, 5)),
np.zeros((5, 5)),
np.zeros((5, 5), dtype=np.bool_),
np.zeros((5, 5), dtype=np.bool_),
),
)
def test_values(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
expected_edge_pred: np.ndarray,
expected_edge_true: np.ndarray,
) -> None:
"""Test return values.
Args:
mask_pred: the predicted binary mask.
mask_true: the ground truth binary mask.
expected_edge_pred: the predicted binary edge.
expected_edge_true: the ground truth binary edge.
"""
got_edge_pred, got_edge_true = get_mask_edges(
mask_pred=mask_pred,
mask_true=mask_true,
)
chex.assert_trees_all_close(got_edge_pred, expected_edge_pred)
chex.assert_trees_all_close(got_edge_true, expected_edge_true)
class TestSurfaceDistance(chex.TestCase):
"""Test surface_distance related functions."""
@parameterized.product(
ndims=[2, 3],
func=[
partial(average_surface_distance, spacing=None),
partial(hausdorff_distance, percentile=100, spacing=None),
],
)
def test_nan_distance(
self,
ndims: int,
func: Callable,
) -> None:
"""Test average_surface_distance returns nan given empty inputs.
Args:
ndims: numbder of spatial dimentions.
func: function to test.
"""
batch = 2
num_classes = 4
# build dummy input having non-zero edges
shape = (batch,) + (num_classes,) * ndims
mask_true = np.zeros(shape)
for i in range(num_classes):
mask_true[:, i, ...] = i
mask_true = np.array(
jax.nn.one_hot(
x=mask_true,
num_classes=num_classes,
axis=-1,
)
)
mask_pred = np.zeros_like(mask_true)
got = func(
mask_pred=mask_pred,
mask_true=mask_true,
)
assert np.isnan(got).all()
got = func(
mask_pred=mask_true,
mask_true=mask_pred,
)
assert np.isnan(got).all()
got = func(
mask_pred=mask_pred,
mask_true=mask_pred,
)
assert np.isnan(got).all()
@parameterized.product(
ndims=[2, 3],
func=[
partial(average_surface_distance, spacing=None),
partial(hausdorff_distance, percentile=100, spacing=None),
],
)
def test_zero_distance(
self,
ndims: int,
func: Callable,
) -> None:
"""Test average_surface_distance returns zero given same inputs.
Args:
ndims: numbder of spatial dimentions.
func: function to test.
"""
batch = 2
num_classes = 4
# build dummy input having non-zero edges
shape = (batch,) + (num_classes,) * ndims
mask_true = np.zeros(shape)
for i in range(num_classes):
mask_true[:, i, ...] = i
mask_true = np.array(
jax.nn.one_hot(
x=mask_true,
num_classes=num_classes,
axis=-1,
)
)
got = func(mask_pred=mask_true, mask_true=mask_true)
expected = np.zeros((batch, num_classes))
assert np.array_equal(got, expected)
@parameterized.named_parameters(
(
"2d-4x3",
np.array(
[
[False, False, False],
[False, True, False],
[True, False, True],
[False, True, False],
]
),
np.array(
[
[False, True, False],
[True, False, True],
[False, True, False],
[False, False, False],
]
),
(1.0, 1.0),
np.array([1.0, 1.0, 1.0, 1.0]),
),
(
"2d-5x5",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
(1.0, 1.0),
np.array(
[
0.0,
0.0,
0.0,
np.sqrt(2),
np.sqrt(2),
np.sqrt(2),
np.sqrt(2),
2.0,
]
),
),
(
"2d-5x5-heterogeneous-1",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
(1.0, 2.0),
np.array(
[
0.0,
0.0,
0.0,
np.sqrt(5),
np.sqrt(5),
2.0, # via x axis it's shorter
2.0, # via x axis it's shorter
2.0,
]
),
),
(
"2d-5x5-heterogeneous-2",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
np.array(
[
0.0,
0.0,
0.0,
2.0, # via x axis it's shorter
]
),
),
(
"2d-6x5",
np.array(
[
[False, True, True, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 1.0),
np.array(
[
np.sqrt(10),
3.0,
np.sqrt(10),
np.sqrt(8),
np.sqrt(8),
np.sqrt(2),
np.sqrt(2),
0.0,
]
),
),
)
def test_surface_distance(
self,
edge_pred: np.ndarray,
edge_true: np.ndarray,
spacing: Tuple[float, ...],
expected: np.ndarray,
) -> None:
"""Test get_surface_distance with accurate expected values.
Args:
edge_pred: the predicted binary edge.
edge_true: the ground truth binary edge.
spacing: spacing of pixel/voxels along each dimension.
expected: surface distance, 1D array of len = edge size.
"""
got = get_surface_distance(
edge_pred=edge_pred,
edge_true=edge_true,
spacing=spacing,
)
assert np.array_equal(got, expected)
@parameterized.named_parameters(
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-asymmetric-1",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
(1.0, 2.0),
False,
[
partial(np.percentile, q=0),
partial(np.percentile, q=100),
],
[1, 1],
np.array(
[
0.0,
np.sqrt(5),
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-asymmetric-2",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
False,
[
partial(np.percentile, q=0),
partial(np.percentile, q=100),
],
[1, 1],
np.array(
[
0.0,
2.0,
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-symmetric",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
True,
[
partial(np.percentile, q=0),
partial(np.percentile, q=100),
normalized_surface_dice_from_distances,
],
[1, 1, 2],
np.array(
[
0.0,
np.sqrt(5.0),
0.5,
]
),
),
)
def test_agg_surface_distance(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Tuple[float, ...],
symmetric: bool,
agg_funcs: Union[Callable, List[Callable]],
num_args: Union[int, List[int]],
expected: np.ndarray,
) -> None:
"""Test get_surface_distance with accurate expected values.
Args:
mask_pred: predictions, without batch and class axes.
mask_true: targets, without batch and class axes.
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
agg_funcs: a function or a list of functions
to aggregate a list of distances.
num_args: a int or a list of ints, corresponding to number of
arguments for agg_fn_list.
expected: surface distance, 1D array of len = edge size.
"""
got = aggregated_surface_distance(
mask_pred=mask_pred[None, ..., None],
mask_true=mask_true[None, ..., None],
agg_fns=agg_funcs,
num_args=num_args,
spacing=spacing,
symmetric=symmetric,
)
assert np.array_equal(got[:, 0, 0], expected)
@parameterized.named_parameters(
(
"monai_3d_example1",
create_spherical_seg_3d(
radius=33, centre=(19, 33, 22), shape=(99, 99, 99)
),
create_spherical_seg_3d(
radius=33, centre=(20, 33, 22), shape=(99, 99, 99)
),
(1.0, 1.0, 1.0),
False,
0.3483278807706289,
),
(
"monai_3d_example2",
create_spherical_seg_3d(
radius=20, centre=(20, 33, 22), shape=(99, 99, 99)
),
create_spherical_seg_3d(
radius=40, centre=(20, 33, 22), shape=(99, 99, 99)
),
(1.0, 1.0, 1.0),
False,
12.040033513150455,
),
)
def test_average_surface_distance(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Tuple[float, ...],
symmetric: bool,
expected: float,
) -> None:
"""Test average_surface_distance with accurate expected values.
https://github.com/Project-MONAI/MONAI/blob/dev/tests/test_surface_distance.py
Args:
mask_pred: predictions, without batch and class axes.
mask_true: targets, without batch and class axes.
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
expected: expected value.
"""
got = average_surface_distance(
mask_pred=mask_pred[None, ..., None],
mask_true=mask_true[None, ..., None],
spacing=spacing,
symmetric=symmetric,
)
assert np.isclose(np.mean(got), expected)
@parameterized.named_parameters(
(
"monai_3d_example1",
create_spherical_seg_3d(
radius=20, centre=(20, 20, 20), shape=(99, 99, 99)
),
create_spherical_seg_3d(
radius=20, centre=(10, 20, 20), shape=(99, 99, 99)
),
(1.0, 1.0, 1.0),
False,
10,
),
(
"2d_same_center_diff_radii",
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
create_circle_seg_2d(radius=20, centre=(50, 50), shape=(99, 99)),
(1.0, 1.0),
False,
10,
),
(
"2d_diff_centers_same_radius",
create_circle_seg_2d(radius=20, centre=(50, 51), shape=(99, 99)),
create_circle_seg_2d(radius=20, centre=(50, 50), shape=(99, 99)),
(1.0, 1.0),
False,
1,
),
(
"2d_diff_centers_diff_radii_asymmetric1",
create_circle_seg_2d(radius=5, centre=(60, 50), shape=(99, 99)),
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
(1.0, 1.0),
False,
5,
),
(
"2d_diff_centers_diff_radii_asymmetric2",
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
create_circle_seg_2d(radius=5, centre=(60, 50), shape=(99, 99)),
(1.0, 1.0),
False,
15,
),
(
"2d_diff_centers_diff_radii_symmetric1",
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
create_circle_seg_2d(radius=5, centre=(60, 50), shape=(99, 99)),
(1.0, 1.0),
True,
15,
),
(
"2d_diff_centers_diff_radii_symmetric2",
create_circle_seg_2d(radius=5, centre=(60, 50), shape=(99, 99)),
create_circle_seg_2d(radius=10, centre=(50, 50), shape=(99, 99)),
(1.0, 1.0),
True,
15,
),
)
def test_max_hausdorff_distance(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Tuple[float, ...],
symmetric: bool,
expected: float,
) -> None:
"""Test hausdorff_distance with 100 percentile.
Some test cases come from
https://github.com/Project-MONAI/MONAI/blob/dev/tests/test_hausdorff_distance.py
Args:
mask_pred: predictions, without batch and class axes.
mask_true: targets, without batch and class axes.
spacing: spacing of pixel/voxels along each dimension.
symmetric: the distance is symmetric to (pred, true) means swapping
the masks provides the same value.
expected: expected value.
"""
got = hausdorff_distance(
mask_pred=mask_pred[None, ..., None],
mask_true=mask_true[None, ..., None],
percentile=100,
spacing=spacing,
symmetric=symmetric,
)
assert np.isclose(np.mean(got), expected)
@parameterized.named_parameters(
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-1mm",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
1.0,
np.array(
[
0.5,
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-1.5mm",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
1.5,
np.array(
[
0.5,
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-2mm",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
2.0,
np.array(
[
10.0 / 12.0,
]
),
),
(
# distances from pred to true
# 0.0, 0.0, 0.0, np.sqrt(5), np.sqrt(5), 2.0, 2.0, 2.0
# distances from true to pred
# 0.0, 0.0, 0.0, 2.0
"2d-5x5-heterogeneous-2.24mm",
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[False, False, True, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
),
np.array(
[
[False, False, True, False, False],
[False, True, False, True, False],
[True, False, False, False, True],
[False, True, False, True, False],
[False, False, True, False, False],
]
),
(1.0, 2.0),
2.24, # sqrt(5) = 2.236...
np.array(
[
1.0,
]
),
),
)
def test_normalized_surface_dice(
self,
mask_pred: np.ndarray,
mask_true: np.ndarray,
spacing: Tuple[float, ...],
tolerance_mm: float,
expected: float,
) -> None:
"""Test average_surface_distance with accurate expected values.
https://github.com/Project-MONAI/MONAI/blob/dev/tests/test_surface_distance.py
Args:
mask_pred: predictions, without batch and class axes.
mask_true: targets, without batch and class axes.
spacing: spacing of pixel/voxels along each dimension.
tolerance_mm: tolerance value to consider surface being overlapping.
expected: expected value.
"""
got = normalized_surface_dice(
mask_pred=mask_pred[None, ..., None],
mask_true=mask_true[None, ..., None],
spacing=spacing,
tolerance_mm=tolerance_mm,
)
assert np.isclose(np.mean(got), expected)
| 31,405 | 31.444215 | 88 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_metric_dice.py | """Test dice score metric related functions."""
import chex
import jax
import numpy as np
from absl.testing import parameterized
from chex._src import fake
from imgx.metric import dice_score, iou
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestDiceScore(chex.TestCase):
"""Test dice_score."""
@chex.all_variants
@parameterized.named_parameters(
(
"1d",
np.array([[[0.2, 0.3, 0.5], [0.0, 1.0, 0.0]]]),
np.array([[2, 1]]),
np.array(
[
[
0.0, # no target on background
2.0 / 2.3,
1.0 / 1.5,
]
],
),
),
(
"2d",
np.array(
[
[
[[0.2, 0.3, 0.5], [0.0, 1.0, 0.0]],
[[0.9, 0.0, 0.1], [0.5, 0.1, 0.4]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
np.array(
[
[
1.8 / 2.6,
2.2 / 3.4,
1.0 / 2.0,
]
],
),
),
)
def test_values(
self,
mask_pred: np.ndarray,
targets: np.ndarray,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
mask_pred: unscaled prediction, of shape (..., num_classes).
targets: values are integers, of shape (...).
expected: expected output.
"""
num_classes = mask_pred.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(dice_score)(
mask_pred=mask_pred,
mask_true=mask_true,
)
chex.assert_trees_all_close(got, expected)
class TestIOU(chex.TestCase):
"""Test iou."""
@chex.all_variants
@parameterized.named_parameters(
(
"1d",
np.array([[[0.2, 0.3, 0.5], [0.0, 1.0, 0.0]]]),
np.array([[2, 1]]),
np.array(
[
[
0.0, # no target on background
1.0 / 1.3,
0.5 / 1.0,
]
],
),
),
(
"2d",
np.array(
[
[
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
],
]
),
np.array([[[2, 1], [0, 1]]]),
np.array(
[
[
0.5,
0.5,
1.0,
]
],
),
),
(
"2d-nan",
np.array(
[
[
[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
],
]
),
np.array([[[0, 1], [0, 1]]]),
np.array(
[
[
1.0 / 3.0,
1.0 / 3.0,
np.nan,
]
],
),
),
)
def test_values(
self,
mask_pred: np.ndarray,
targets: np.ndarray,
expected: np.ndarray,
) -> None:
"""Test dice loss values.
Args:
mask_pred: soft mask, of shape (..., num_classes).
targets: values are integers, of shape (...).
expected: expected output.
"""
num_classes = mask_pred.shape[-1]
# (batch, ..., num_classes)
mask_true = jax.nn.one_hot(targets, num_classes=num_classes, axis=-1)
got = self.variant(iou)(
mask_pred=mask_pred,
mask_true=mask_true,
)
chex.assert_trees_all_close(got, expected)
| 4,360 | 25.430303 | 77 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_model_unet_3d_time.py | """Test Unet related classes and functions."""
from typing import Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from absl.testing import parameterized
from chex._src import fake
from imgx.model import Unet3dSliceTime, Unet3dTime
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestTimeUnet3d(chex.TestCase):
"""Test the class TimeUnet3d and Unet3dSliceTime."""
batch_size = 2
in_channels = 1
out_channels = 2
num_timesteps = 4
@parameterized.product(
(
{
"in_shape": (15, 16, 17),
"kernel_size": 3,
"scale_factor": 2,
},
{
"in_shape": (13, 14, 15),
"kernel_size": 5,
"scale_factor": 1,
},
{
"in_shape": (29, 30, 31),
"kernel_size": 5,
"scale_factor": 2,
},
{
"in_shape": (53, 54, 55),
"kernel_size": 5,
"scale_factor": 3,
},
),
model_cls=[Unet3dTime, Unet3dSliceTime],
)
def test_output_shape(
self,
in_shape: Tuple[int, int, int],
kernel_size: int,
scale_factor: int,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape.
Args:
in_shape: input shape
scale_factor: convolution stride for down-sampling/up-sampling.
kernel_size: convolution kernel size, the value(s) should be odd.
model_cls: model to be tested.
"""
channels = (2, 4, 2)
@hk.testing.transform_and_run()
def forward(
x: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
t: time.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
num_timesteps=self.num_timesteps,
)
return net(x, t)
rng = jax.random.PRNGKey(0)
rng, rng_t = jax.random.split(rng)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
out = forward(dummy_image, dummy_t)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
@chex.all_variants
@parameterized.named_parameters(
(
"Unet3dTime",
Unet3dTime,
),
("Unet3dSliceTime", Unet3dSliceTime),
)
def test_output_shape_variants(
self,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape under different device variants.
Args:
model_cls: model to be tested.
"""
kernel_size = 3
scale_factor = 2
in_shape = (14, 15, 16)
channels = (2, 4)
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
t: time.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
num_timesteps=self.num_timesteps,
)
return net(x, t)
rng = jax.random.PRNGKey(0)
rng, rng_t = jax.random.split(rng)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
out = forward(dummy_image, dummy_t)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
@parameterized.named_parameters(
(
"Unet3dTime",
Unet3dTime,
),
("Unet3dSliceTime", Unet3dSliceTime),
)
def test_output_real_shape(
self,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape with real setting.
Args:
model_cls: model to be tested.
"""
kernel_size = 3
scale_factor = 2
in_shape = (256, 256, 48)
channels = (2, 2, 2, 2)
@hk.testing.transform_and_run()
def forward(
x: jnp.ndarray,
t: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
t: time.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
num_timesteps=self.num_timesteps,
)
return net(x, t)
rng = jax.random.PRNGKey(0)
rng, rng_t = jax.random.split(rng)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
dummy_t = jax.random.randint(
rng_t, shape=(self.batch_size,), minval=0, maxval=self.num_timesteps
)
out = forward(dummy_image, dummy_t)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
| 6,305 | 27.278027 | 80 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_exp_eval.py | """Test functions in imgx.exp.eval."""
import chex
import jax
import numpy as np
from chex._src import fake
from imgx.exp.eval import (
get_jit_segmentation_metrics,
get_non_jit_segmentation_metrics,
)
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestGetSegmentationMetrics(chex.TestCase):
"""Test get_segmentation_metrics."""
batch = 2
num_classes = 3
spatial_shape = (4, 5, 6)
spacing = np.array((0.2, 0.5, 1.0))
mask_shape = (batch, *spatial_shape, num_classes)
@chex.all_variants
def test_jit_shapes(self) -> None:
"""Test shapes."""
key = jax.random.PRNGKey(0)
key_pred, key_true = jax.random.split(key)
mask_pred = jax.random.uniform(key_pred, shape=self.mask_shape)
mask_true = jax.random.uniform(key_true, shape=self.mask_shape)
got = self.variant(get_jit_segmentation_metrics)(
mask_pred, mask_true, self.spacing
)
for _, v in got.items():
chex.assert_shape(v, (self.batch,))
@chex.variants(without_jit=True, with_device=True, without_device=True)
def test_nonjit_shapes(self) -> None:
"""Test shapes."""
key = jax.random.PRNGKey(0)
key_pred, key_true = jax.random.split(key)
mask_pred = jax.random.uniform(key_pred, shape=self.mask_shape)
mask_true = jax.random.uniform(key_true, shape=self.mask_shape)
got = self.variant(get_non_jit_segmentation_metrics)(
mask_pred, mask_true, self.spacing
)
for _, v in got.items():
chex.assert_shape(v, (self.batch,))
| 1,741 | 29.561404 | 75 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_model_unet_3d.py | """Test Unet related classes and functions."""
from typing import Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from absl.testing import parameterized
from chex._src import fake
from imgx.model import Unet3d, Unet3dSlice
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestUnet3d(chex.TestCase):
"""Test the class Unet3d and Unet3dSlice."""
batch_size = 2
in_channels = 1
out_channels = 2
@parameterized.product(
(
{
"in_shape": (15, 16, 17),
"kernel_size": 3,
"scale_factor": 2,
},
{
"in_shape": (13, 14, 15),
"kernel_size": 5,
"scale_factor": 1,
},
{
"in_shape": (29, 30, 31),
"kernel_size": 5,
"scale_factor": 2,
},
{
"in_shape": (53, 54, 55),
"kernel_size": 5,
"scale_factor": 3,
},
),
model_cls=[Unet3d, Unet3dSlice],
)
def test_output_shape(
self,
in_shape: Tuple[int, int, int],
kernel_size: int,
scale_factor: int,
model_cls: hk.Module,
) -> None:
"""Test output shape.
Args:
in_shape: input shape
scale_factor: convolution stride for down-sampling/up-sampling.
kernel_size: convolution kernel size, the value(s) should be odd.
model_cls: model to be tested.
"""
channels = (2, 4, 2)
@hk.testing.transform_and_run()
def forward(
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
)
return net(x)
rng = jax.random.PRNGKey(0)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
out = forward(dummy_image)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
@chex.all_variants
@parameterized.named_parameters(
("Unet3d", Unet3d),
(
"Unet3dSlice",
Unet3dSlice,
),
)
def test_output_shape_variants(
self,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape under different device variants.
Args:
model_cls: model to be tested.
"""
kernel_size = 3
scale_factor = 2
in_shape = (14, 15, 16)
channels = (2, 4)
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
)
return net(x)
rng = jax.random.PRNGKey(0)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
out = forward(dummy_image)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
@parameterized.named_parameters(
("Unet3d", Unet3d),
(
"Unet3dSlice",
Unet3dSlice,
),
)
def test_output_real_shape(
self,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape with real setting.
Args:
model_cls: model to be tested.
"""
kernel_size = 3
scale_factor = 2
in_shape = (256, 256, 48)
channels = (2, 2, 2, 2)
@hk.testing.transform_and_run()
def forward(
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
)
return net(x)
rng = jax.random.PRNGKey(0)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
out = forward(dummy_image)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
| 5,355 | 25.646766 | 79 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/data/test_dataset_iterator.py | """Test image data iterators."""
from typing import Tuple
import chex
import haiku as hk
import jax
import numpy as np
import SimpleITK as sitk # noqa: N813
from absl.testing import parameterized
from chex._src import fake
from omegaconf import DictConfig
from imgx import IMAGE, LABEL, UID
from imgx.datasets import (
AMOS_CT,
DIR_TFDS_PROCESSED_MAP,
IMAGE_SHAPE_MAP,
MALE_PELVIC_MR,
NUM_CLASSES_MAP,
Dataset,
)
from imgx.datasets.iterator import get_image_tfds_dataset
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestImageIterator(chex.TestCase):
"""Test image iterators."""
@chex.variants(without_jit=True, with_device=True, without_device=True)
@parameterized.named_parameters(
("AMOS CT", AMOS_CT),
("Male Pelvic MR", MALE_PELVIC_MR),
)
def test_output_shape_variants(
self,
dataset_name: str,
) -> None:
"""Test iterator output shape under different device variants.
Dataset num_valid_steps is tested too.
Args:
dataset_name: dataset name.
"""
num_devices_per_replica = jax.local_device_count()
batch_size = 2
batch_size_per_replica = 1
max_num_samples = 3
image_shape = IMAGE_SHAPE_MAP[dataset_name]
config = DictConfig(
{
"seed": 0,
"training": {
"num_devices_per_replica": 1,
"batch_size": batch_size,
"batch_size_per_replica": batch_size_per_replica,
"mixed_precision": {
"use": False,
},
},
"data": {
"max_num_samples": max_num_samples,
dataset_name: {
"data_augmentation": {
"max_rotation": [0.088, 0.088, 0.088],
"max_translation": [20, 20, 4],
"max_scaling": [0.15, 0.15, 0.15],
},
},
},
}
)
@hk.testing.transform_and_run(jax_transform=self.variant)
def get_batch() -> (
Tuple[Dataset, chex.ArrayTree, chex.ArrayTree, chex.ArrayTree]
):
"""Get one batch for iterator.
Returns:
Training batch
Validation batch
Test batch.
"""
ds = get_image_tfds_dataset(
dataset_name=dataset_name,
config=config,
)
return (
ds,
next(ds.train_iter),
next(ds.valid_iter),
next(ds.test_iter),
)
dataset, train_batch, valid_batch, test_batch = get_batch()
assert dataset.num_valid_steps == int(
np.ceil(max_num_samples / batch_size)
)
for i, batch in enumerate([train_batch, valid_batch, test_batch]):
chex.assert_shape(
batch[IMAGE],
(
num_devices_per_replica,
batch_size // num_devices_per_replica,
*image_shape,
),
)
chex.assert_shape(
batch[LABEL],
(
num_devices_per_replica,
batch_size // num_devices_per_replica,
*image_shape,
),
)
if i == 0:
assert UID not in batch
else:
chex.assert_shape(
batch[UID],
(
num_devices_per_replica,
batch_size // num_devices_per_replica,
),
)
class TestImageShape(chex.TestCase):
"""Test the data loader shapes."""
@parameterized.named_parameters(
("AMOS CT", AMOS_CT),
("Male Pelvic MR", MALE_PELVIC_MR),
)
def test_shape(
self,
dataset_name: str,
) -> None:
"""Test the data loader shapes.
Args:
dataset_name: dataset name.
"""
image_shape = IMAGE_SHAPE_MAP[dataset_name]
num_devices_per_replica = jax.local_device_count()
batch_size = 2
batch_size_per_replica = 1
assert batch_size % num_devices_per_replica == 0
config = DictConfig(
{
"seed": 0,
"training": {
"num_devices_per_replica": 1,
"batch_size": batch_size,
"batch_size_per_replica": batch_size_per_replica,
"mixed_precision": {
"use": False,
},
},
"data": {
"max_num_samples": 4,
"dataset_name": {
"data_augmentation": {
"max_rotation": [0.088, 0.088, 0.088],
"max_translation": [20, 20, 4],
"max_scaling": [0.15, 0.15, 0.15],
},
},
},
}
)
dataset = get_image_tfds_dataset(
dataset_name,
config,
)
batch_size_per_replica = batch_size // num_devices_per_replica
for it in [dataset.train_iter, dataset.valid_iter, dataset.test_iter]:
batch = next(it)
chex.assert_shape(
batch[IMAGE],
(
num_devices_per_replica,
batch_size_per_replica,
*image_shape,
),
)
chex.assert_shape(
batch[LABEL],
(
num_devices_per_replica,
batch_size_per_replica,
*image_shape,
),
)
# in AMOS not all images have all labels, even without resampling
@parameterized.named_parameters(
("Male Pelvic MR", MALE_PELVIC_MR),
)
def test_labels(
self,
dataset_name: str,
) -> None:
"""Test all mask labels have all classes.
Args:
dataset_name: dataset name.
"""
mask_paths = list(
DIR_TFDS_PROCESSED_MAP[dataset_name].glob(
"*_mask_preprocessed.nii.gz"
)
)
err_paths = []
for path in mask_paths:
volume = sitk.ReadImage(path)
arr = sitk.GetArrayFromImage(volume)
if np.unique(arr).size != NUM_CLASSES_MAP[dataset_name]:
err_paths.append(path.name)
if len(err_paths) > 0:
raise ValueError(
f"{err_paths} have less than {NUM_CLASSES_MAP[dataset_name]} "
f"classes including background."
)
| 7,123 | 29.444444 | 78 | py |
CPM-Live | CPM-Live-master/cpm-live/cpmbee_translator.py | from typing import Dict
from cpm_live.generation.bee import CPMBeeBeamSearch
from cpm_live.models import CPMBeeTorch, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
import torch
import spacy
import re
def is_chinese(ch: str):
if "\u4e00" <= ch <= "\u9fff":
return True
return False
def is_english(ch: str):
return ch.isalpha()
class Translator:
def __init__(self, ckpt_path, batch_size=8):
config = CPMBeeConfig.from_json_file("config/cpm-bee-10b.json")
self.tokenizer = CPMBeeTokenizer()
model = CPMBeeTorch(config=config)
model.load_state_dict(torch.load(ckpt_path))
model.cuda()
self._beam_search = CPMBeeBeamSearch(
model=model,
tokenizer=self.tokenizer,
)
self._batch_size = batch_size
self._nlp_eng = spacy.load("en_core_web_trf")
# self._nlp_chn = spacy.load("zh_core_web_trf")
def _auto_cut(self, text: str):
CUT_TABLE = {
".": 100,
"?": 100,
"!": 100,
"。": 48,
"?": 48,
"!": 48,
}
st = 0
sub_text = []
while st < len(text):
ed = st
while ed + 1 < len(text) and (
text[ed] not in CUT_TABLE or ed < st + CUT_TABLE[text[ed]]
):
ed += 1
sub_text.append(text[st : ed + 1])
st = ed + 1
return sub_text
def _remove_entity(self, nlp: spacy.language.Language, text: str):
doc = nlp(text)
ent_spans = []
for ent in doc.ents:
if ent.label_ in ["PRODUCT"]:
ent_spans.append((ent.start_char, ent.end_char))
sorted(ent_spans, key=lambda x: x[0])
sub_text = []
ent_map = {}
unk_map = {}
p = 0
for ent_s, ent_e in ent_spans:
sub_text.append(self.tokenizer.escape(text[p:ent_s]))
ent = text[ent_s:ent_e]
if ent not in ent_map:
ent_map[ent] = len(ent_map)
unk_map["<unk_{}>".format(ent_map[ent])] = ent
sub_text.append("<unk_{}>".format(ent_map[ent]))
p = ent_e
sub_text.append(self.tokenizer.escape(text[p:]))
return "".join(sub_text), unk_map
def _replace_entity(self, text: str, table: Dict[str, str]):
ret = []
for token in self.tokenizer.tokenize(text):
if token.is_special and token.token in table:
t = token.token
if t.startswith("the "):
t = t[4:]
ret.append(table[token.token])
else:
ret.append(token.token)
return "".join(ret)
def to_chn(self, text: str) -> str:
text, replace_table = self._remove_entity(self._nlp_eng, text)
sub_text = []
for line in text.split("\n"):
sub_text.extend(self._auto_cut(line))
sub_text.append("")
ret = ["\n" for _ in range(len(sub_text))]
curr_batch = []
curr_batch_idx = []
for i, t in enumerate(sub_text):
if len(t) == 0:
ret[i] = "\n"
else:
curr_batch.append(t)
curr_batch_idx.append(i)
if len(curr_batch) >= self._batch_size:
inference_results = self._beam_search.generate(
[{"document": doc, "task": "英翻中", "<ans>": ""} for doc in curr_batch],
max_length=180,
repetition_penalty=1.0,
)
for idx, res in zip(curr_batch_idx, inference_results):
ret[idx] = self._replace_entity(res["<ans>"], replace_table)
curr_batch = []
curr_batch_idx = []
if len(curr_batch) > 0:
inference_results = self._beam_search.generate(
[{"document": doc, "task": "英翻中", "<ans>": ""} for doc in curr_batch],
max_length=180,
repetition_penalty=1.0,
)
for idx, res in zip(curr_batch_idx, inference_results):
ret[idx] = self._replace_entity(res["<ans>"], replace_table)
curr_batch = []
curr_batch_idx = []
return "".join(ret)
def to_eng(self, text: str):
text = self.tokenizer.escape(text)
text = re.sub(r"([^\x00-\x7F])([a-zA-Z])", r"\1 \2", text)
sub_text = []
for line in text.split("\n"):
sub_text.extend(self._auto_cut(line))
sub_text.append("")
ret = ["\n" for _ in range(len(sub_text))]
curr_batch = []
curr_batch_idx = []
for i, t in enumerate(sub_text):
if len(t) == 0:
ret[i] = "\n"
else:
curr_batch.append(t)
curr_batch_idx.append(i)
if len(curr_batch) >= self._batch_size:
inference_results = self._beam_search.generate(
[{"document": doc, "task": "中翻英", "<ans>": ""} for doc in curr_batch],
max_length=180,
repetition_penalty=1.0,
)
for idx, res in zip(curr_batch_idx, inference_results):
ret[idx] = self.tokenizer.unescape(res["<ans>"])
curr_batch = []
curr_batch_idx = []
if len(curr_batch) > 0:
inference_results = self._beam_search.generate(
[{"document": doc, "task": "中翻英", "<ans>": ""} for doc in curr_batch],
max_length=180,
repetition_penalty=1.0,
)
for idx, res in zip(curr_batch_idx, inference_results):
ret[idx] = self.tokenizer.unescape(res["<ans>"])
curr_batch = []
curr_batch_idx = []
is_newline = True
for i in range(len(ret)):
if ret[i] == "\n":
is_newline = True
elif is_newline:
is_newline = False
else:
ret[i] = " " + ret[i]
return "".join(ret)
def main():
translator = Translator("path/to/model")
print(
translator.to_eng(
"""考虑到机器学习模型的“黑盒”本质,模型有可能在不受控的情况下输出包括但不限于虚假信息、错误政治言论、偏见与歧视性话语、对不良行为的煽动与暗示等内容。CPM-Live虽已对相关训练数据进行数据清洗,但仍有可能具有不限于如下所示使用风险。用户使用CPM-Live相关资源前,需明确本节涉及的相关风险,并在使用过程中承担全部风险与责任。
侵犯个人隐私。模型有可能直接或经引导后产生涉及个人隐私的内容。
侵犯内容版权。模型有可能直接或经引导后产生与其他出版物相同、相似的内容。
产生虚假信息。模型有可能直接或经引导后产生不符合事实或客观规律的虚假信息。用户不应故意使用与引导模型制作虚假内容。
产生政治敏感内容。模型有可能直接或经引导后产生与政策、法规等相关的政治敏感内容。
产生偏见与歧视性话语。模型有可能直接或经引导后产生包括但不限于性别、种族等方面的偏见与歧视性话语。
产生对不良行为的煽动与暗示。模型有可能直接或经引导后产生对于违法犯罪等不良行为的煽动与暗示。
产生个体伤害言论。模型有可能直接或经引导后产生对个体进行伤害的言论,如对个人的诋毁、打击言论或鼓励个体进行自我伤害行为的言论等。
"""
)
)
if __name__ == "__main__":
main()
| 6,854 | 32.768473 | 180 | py |
CPM-Live | CPM-Live-master/cpm-live/setup.py | from setuptools import setup, find_packages
setup(
name="cpm_live",
version="0.1.0",
author="OpenBMB",
author_email="openbmb@gmail.com",
description="Toolkit for CPM-Live",
packages=find_packages(),
install_requires=[
"numpy",
"torch>=1.10",
"bmtrain>=0.1.8",
"jieba",
"tqdm",
"tensorboard",
"numpy>=1.21.0",
],
package_data={"cpm_live": ["vocabs/*.txt"]},
)
| 452 | 20.571429 | 48 | py |
CPM-Live | CPM-Live-master/cpm-live/pretrain_cpm_bee.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from typing import Any, Dict, List, Union
import torch
import bmtrain as bmt
import os
from cpm_live.arguments import get_args
from cpm_live.models import CPMBee, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
from cpm_live.utils import allgather_objects, LogManager
from cpm_live.training_tasks.bee import MixedDataset
def get_tokenizer(args):
tokenizer = CPMBeeTokenizer()
return tokenizer
def get_model(args):
config = CPMBeeConfig.from_json_file(args.model_config)
model = CPMBee(config)
if args.load is not None:
bmt.load(model, args.load)
else:
bmt.init_parameters(model)
return model
def get_optimizer(args, model):
optimizer = bmt.optim.AdamOffloadOptimizer(
model.parameters(), weight_decay=args.weight_decay
)
if args.load is not None:
if os.path.exists(os.path.join(args.save, args.save_name + (".rank-%d.opt" % 0))):
# optimizer state exists
states = torch.load(
os.path.join(args.save, args.save_name + (".rank-%d.opt" % bmt.rank()))
)
optimizer.load_state_dict(states)
return optimizer
def get_learning_rate_scheduler(args, optimizer):
if args.lr_decay_iters is None:
args.lr_decay_iters = args.train_iters
lr_scheduler = bmt.lr_scheduler.Noam(
optimizer,
start_lr=args.lr,
warmup_iter=args.warmup_iters,
end_iter=args.lr_decay_iters,
num_iter=args.start_step,
)
return lr_scheduler
def setup_model_and_optimizer(args):
model = get_model(args)
tokenizer = get_tokenizer(args)
bmt.synchronize()
optimizer = get_optimizer(args, model)
lr_scheduler = get_learning_rate_scheduler(args, optimizer)
bmt.synchronize()
optim_manager = bmt.optim.OptimManager(
loss_scale=args.loss_scale,
loss_scale_factor=2,
loss_scale_steps=512,
)
optim_manager.add_optimizer(optimizer, lr_scheduler)
return tokenizer, model, optimizer, lr_scheduler, optim_manager
def initialize():
os.environ["MASTER_PORT"] = str(int(os.environ["MASTER_PORT"]) + 2333)
args = get_args(pretrain=True)
bmt.init_distributed(seed=args.seed)
if args.save is not None:
os.makedirs(args.save, exist_ok=True)
return args
def see_memory(detail=False):
if detail:
res = torch.cuda.memory_summary()
else:
res = (
round(torch.cuda.memory_allocated() / (1024 * 1024 * 1024), 2),
round(torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024), 2),
)
torch.cuda.reset_peak_memory_stats()
return res
def add_mem_time(info, mem_usage, tim_usage):
torch.cuda.synchronize()
mem_usage[info] = see_memory()
tim_usage[info] = time.time()
return mem_usage, tim_usage
class LossSpikeDetector:
def __init__(self, log_path: str) -> None:
self._last_loss: Dict[str, float] = {}
self._last_data: List[Any] = [None]
self._log_path = log_path
def update_data(self, data: Any):
self._last_data.append(data)
if len(self._last_data) > 2:
self._last_data = self._last_data[-2:]
def update_loss(self, iteration: int, loss_map: Dict[str, float]):
loss_spike_result = []
for task, loss in loss_map.items():
if task in self._last_loss:
if loss > self._last_loss[task] * 3:
# loss spike!
loss_spike_result.append(
{
"prev": self._last_loss[task],
"curr": loss,
"task": task,
}
)
self._last_loss[task] = float(loss)
if len(loss_spike_result) > 0:
self._write_log(iteration, self._last_data[-1], loss_spike_result)
def _write_log(self, iteration: int, data: Any, result: List[Dict[str, Any]]):
with open(self._log_path, "a", encoding="utf-8") as fp:
fp.write("=" * 20)
fp.write("\nloss spike at {}\n".format(iteration))
fp.write("{}\n".format(json.dumps(result, indent=4, ensure_ascii=False)))
fp.write("data: \n")
for d in data:
fp.write("{}\n".format(json.dumps(d, indent=4, ensure_ascii=False)))
fp.write("\n\n")
def pretrain(
args,
tokenizer: CPMBeeTokenizer,
model: CPMBee,
optimizer: bmt.optim.AdamOffloadOptimizer,
lr_scheduler: bmt.lr_scheduler.WarmupLRScheduler,
optim_manager: bmt.optim.OptimManager,
):
average_time = bmt.utils.AverageRecorder()
loss_func = bmt.loss.FusedCrossEntropy(ignore_index=-100)
start_step = args.start_step
lsd = LossSpikeDetector("debug/spile.%d.log" % bmt.rank())
if args.tensorboard is not None and bmt.rank() == 0:
from torch.utils.tensorboard import SummaryWriter
import distutils.version # noqa: F401
if not os.path.exists(args.tensorboard):
os.makedirs(args.tensorboard)
writer = SummaryWriter(log_dir=args.tensorboard)
if args.log_dir is not None and bmt.rank() == 0:
log_mgr = LogManager(args.log_dir)
global_token_pass = 0.0
global_world_size = bmt.world_size()
dataloader = MixedDataset(
args.dataset, args.batch_size, args.max_length, tokenizer, max_depth=8
)
if os.path.exists(os.path.join(args.save, args.save_name + ("-%d.data.pt" % start_step))):
# load dataset states if exists
dataset_states = torch.load(
os.path.join(args.save, args.save_name + ("-%d.data.pt" % start_step))
)
missing = dataloader.load_state_dict(dataset_states)
if len(missing) > 0:
bmt.print_rank("Missing keys when loading dataset states: ", missing)
dataloader.start()
try:
for iteration, data in enumerate(dataloader):
iteration = iteration + start_step + 1
assert data["inputs"].shape[0] == args.batch_size
input_ids = torch.from_numpy(data["inputs"]).cuda().to(torch.int32)
input_ids_sub = torch.from_numpy(data["inputs_sub"]).cuda().to(torch.int32)
input_length = torch.from_numpy(data["length"]).cuda().to(torch.int32)
input_context = torch.from_numpy(data["context"]).cuda().bool()
input_sample_ids = torch.from_numpy(data["sample_ids"]).cuda().to(torch.int32)
input_num_segments = torch.from_numpy(data["num_segments"]).cuda().to(torch.int32)
input_segment_ids = torch.from_numpy(data["segment_ids"]).cuda().to(torch.int32)
input_segment_rel_offset = (
torch.from_numpy(data["segment_rel_offset"]).cuda().to(torch.int32)
)
input_segment_rel = torch.from_numpy(data["segment_rel"]).cuda().to(torch.int32)
input_span = torch.from_numpy(data["spans"]).cuda().to(torch.int32)
targets = torch.from_numpy(data["target"]).cuda().to(torch.int32)
ext_table_ids = torch.from_numpy(data["ext_ids"]).cuda().to(torch.int32)
ext_table_sub = torch.from_numpy(data["ext_sub"]).cuda().to(torch.int32)
task_ids = torch.from_numpy(data["task_ids"]).cuda().to(torch.int32)
task_names = data["task_names"]
lsd.update_data(data["raw_data"])
# ===========
optim_manager.zero_grad()
# torch.cuda.empty_cache()
mem_usage = {}
tim_usage = {}
mem_usage, tim_usage = add_mem_time("init", mem_usage, tim_usage)
# ===========
logits, _ = model(
input_ids,
input_ids_sub,
input_length,
input_context,
input_sample_ids,
input_num_segments,
input_segment_ids,
input_segment_rel_offset,
input_segment_rel,
input_span,
ext_table_ids,
ext_table_sub,
)
loss = loss_func(logits.view(-1, logits.size(-1)), targets.view(-1))
global_loss = bmt.sum_loss(loss).item()
mem_usage, tim_usage = add_mem_time("forward", mem_usage, tim_usage)
# ===========
optim_manager.backward(loss)
mem_usage, tim_usage = add_mem_time("backward", mem_usage, tim_usage)
# ===========
current_stream = torch.cuda.current_stream()
# some reduce ops of distributed parameter were launched on load stream
current_stream.wait_stream(bmt.config['load_stream'])
grad_norm = optim_manager.clip_grad_norm(optimizer.param_groups, max_norm=1.0)
optim_manager.step()
mem_usage, tim_usage = add_mem_time("optim", mem_usage, tim_usage)
# ==========
iteration_time = tim_usage["optim"] - tim_usage["init"]
average_time.record(iteration_time)
with torch.no_grad():
task_num = len(task_names)
targets_tmp = targets.expand(task_num, -1, -1)
task = torch.arange(task_num, dtype=torch.int32, device="cuda")[:, None, None]
targets_tmp = torch.where(
task_ids == task,
targets_tmp,
torch.scalar_tensor(-100, dtype=torch.int32, device="cuda"),
)
task_loss_map: Dict[str, float] = {}
for i in range(task_num):
task_loss = loss_func(
logits.view(-1, logits.size(-1)), targets_tmp[i, :].view(-1)
)
# global_task_loss = float(bmt.sum_loss(task_loss).item())
task_loss_map[task_names[i]] = task_loss.item()
gatherd_task_loss_map: List[Dict[str, float]] = allgather_objects(task_loss_map)
global_task_loss_map: Dict[str, Union[List[float], float]] = {}
for local_task_loss_map in gatherd_task_loss_map:
for task_name, task_loss in local_task_loss_map.items():
if task_name not in global_task_loss_map:
global_task_loss_map[task_name] = []
global_task_loss_map[task_name].append(task_loss)
task_loss_map = {}
for task_name in sorted(list(global_task_loss_map.keys())):
avg_loss = sum(global_task_loss_map[task_name]) / len(
global_task_loss_map[task_name]
)
task_loss_map[task_name] = avg_loss
local_total_rate = torch.Tensor([input_length.float().mean() / args.max_length]).cuda()
local_total_rate = bmt.sum_loss(local_total_rate).item()
global_token_pass += (
global_world_size * local_total_rate * args.max_length * args.batch_size
)
avg_time = average_time.value
lsd.update_loss(iteration, task_loss_map)
train_info = {
"time": tim_usage["init"],
"iteration": iteration,
"loss": global_loss,
"lr": lr_scheduler.current_lr,
"lr_scale": int(optim_manager.loss_scale),
"time_usage": tim_usage,
"mem_usage": mem_usage,
"avg_time": avg_time,
"token_max": local_total_rate,
"token_pass": global_token_pass,
"throughout": args.max_length * args.batch_size * local_total_rate / avg_time,
"grad_norm": grad_norm.item(),
"mask_max": ((targets >= 0).sum(-1).float().mean() / args.max_length).item(),
"num_gpus": global_world_size,
"task_loss": task_loss_map,
}
bmt.print_rank(
(
"| Iter: {:6d} | loss: {:.4f} | lr: {:.4e}, scale: {:10.4f} | time: {:.4f} |"
+ " token/max: {:.4f} | mask/max: {:.4f} | grad_norm: {:.4f}"
).format(
iteration,
global_loss,
lr_scheduler.current_lr,
int(optim_manager.loss_scale),
avg_time,
input_length.float().mean() / args.max_length,
(targets >= 0).sum(-1).float().mean() / args.max_length,
grad_norm,
)
)
bmt.print_rank(
"| "
+ " | ".join(
[
"{} loss: {:.4f}".format(task_name, loss)
for task_name, loss in task_loss_map.items()
]
)
)
if iteration % args.inspect_iters == 0:
model_inspect = bmt.inspect.inspect_model(model, "*")
bmt.print_rank(bmt.inspect.format_summary(model_inspect))
train_info["model_inspect"] = model_inspect
# write log here
if args.log_dir is not None and bmt.rank() == 0:
log_mgr.write(**train_info)
if args.tensorboard is not None and bmt.rank() == 0:
writer.add_scalar("Loss/train", global_loss, iteration)
writer.add_scalar("Optimizer/lr", lr_scheduler.current_lr, iteration)
writer.add_scalar("Optimizer/scale", optim_manager.loss_scale, iteration)
writer.add_scalar("Optimizer/grad_norm", grad_norm.item(), iteration)
for task_name, loss in task_loss_map.items():
writer.add_scalar("Loss/train/{}".format(task_name), loss, iteration)
if args.save is not None and iteration % args.save_iters == 0:
bmt.save(model, os.path.join(args.save, args.save_name + ("-%d.pt" % iteration)))
torch.save(
optimizer.state_dict(),
os.path.join(args.save, args.save_name + (".rank-%d.opt" % bmt.rank())),
)
all_states = dataloader.state_dict()
if bmt.rank() == 0:
# rank 0 writes the dataloader state
torch.save(
all_states,
os.path.join(args.save, args.save_name + ("-%d.data.pt" % iteration)),
)
del all_states
finally:
dataloader.close()
bmt.save(model, os.path.join(args.save, args.save_name + ".pt"))
def main():
args = initialize()
tokenizer, model, optimizer, lr_scheduler, optim_manager = setup_model_and_optimizer(args)
pretrain(args, tokenizer, model, optimizer, lr_scheduler, optim_manager)
if __name__ == "__main__":
main()
| 15,523 | 38.805128 | 99 | py |
CPM-Live | CPM-Live-master/cpm-live/finetune_cpm_bee.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Dict, List, Union
import torch
import bmtrain as bmt
import os
from opendelta import LoraModel
from cpm_live.arguments import get_args
from cpm_live.models import CPMBee, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
from cpm_live.utils import allgather_objects
from cpm_live.training_tasks.bee import FinetuneDataset
def get_tokenizer(args):
tokenizer = CPMBeeTokenizer()
return tokenizer
def get_model(args):
config = CPMBeeConfig.from_json_file(args.model_config)
model = CPMBee(config)
if args.load is not None:
bmt.load(model, args.load)
else:
bmt.init_parameters(model)
# insert LoRA
if args.use_delta:
delta_model = LoraModel(
backbone_model=model, modified_modules=["project_q", "project_v"], backend="bmt"
)
delta_model.freeze_module(exclude=["deltas"], set_state_dict=True)
delta_model.log()
return model
def get_optimizer(args, model):
optimizer = bmt.optim.AdamOffloadOptimizer(
model.parameters(), weight_decay=args.weight_decay
)
return optimizer
def get_learning_rate_scheduler(args, optimizer):
if args.lr_decay_iters is None:
args.lr_decay_iters = args.train_iters
lr_scheduler = bmt.lr_scheduler.Noam(
optimizer,
start_lr=args.lr,
warmup_iter=args.warmup_iters,
end_iter=args.lr_decay_iters,
num_iter=args.start_step,
)
return lr_scheduler
def setup_model_and_optimizer(args):
model = get_model(args)
tokenizer = get_tokenizer(args)
bmt.synchronize()
optimizer = get_optimizer(args, model)
lr_scheduler = get_learning_rate_scheduler(args, optimizer)
bmt.synchronize()
optim_manager = bmt.optim.OptimManager(
loss_scale=args.loss_scale,
loss_scale_factor=2,
loss_scale_steps=512,
)
optim_manager.add_optimizer(optimizer, lr_scheduler)
return tokenizer, model, optimizer, lr_scheduler, optim_manager
def initialize():
args = get_args(finetune=True)
bmt.init_distributed(seed=args.seed)
if args.save is not None:
os.makedirs(args.save, exist_ok=True)
return args
def see_memory(detail=False):
if detail:
res = torch.cuda.memory_summary()
else:
res = (
round(torch.cuda.memory_allocated() / (1024 * 1024 * 1024), 2),
round(torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024), 2),
)
torch.cuda.reset_peak_memory_stats()
return res
def add_mem_time(info, mem_usage, tim_usage):
torch.cuda.synchronize()
mem_usage[info] = see_memory()
tim_usage[info] = time.time()
return mem_usage, tim_usage
def evaluation(model, args, tokenizer, loss_func):
bmt.print_rank("evaluation begins...")
eval_dataloader = FinetuneDataset(
args.eval_dataset,
1,
args.max_length,
tokenizer,
max_depth=8,
task_name=args.task_name,
drop_last=args.drop_last,
)
eval_losses = []
last_data = None
with torch.no_grad():
for iteration, data in enumerate(eval_dataloader):
iteration = iteration + 1
skip_this_batch = False
if data is None:
if last_data is None:
raise RuntimeError(
"Dataset is too small, please use a smaller batch size or sequence length!"
)
data = last_data
skip_this_batch = True
else:
last_data = data
input_ids = torch.from_numpy(data["inputs"]).cuda().to(torch.int32)
input_ids_sub = torch.from_numpy(data["inputs_sub"]).cuda().to(torch.int32)
input_length = torch.from_numpy(data["length"]).cuda().to(torch.int32)
input_context = torch.from_numpy(data["context"]).cuda().bool()
input_sample_ids = torch.from_numpy(data["sample_ids"]).cuda().to(torch.int32)
input_num_segments = torch.from_numpy(data["num_segments"]).cuda().to(torch.int32)
input_segment_ids = torch.from_numpy(data["segment_ids"]).cuda().to(torch.int32)
input_segment_rel_offset = (
torch.from_numpy(data["segment_rel_offset"]).cuda().to(torch.int32)
)
input_segment_rel = torch.from_numpy(data["segment_rel"]).cuda().to(torch.int32)
input_span = torch.from_numpy(data["spans"]).cuda().to(torch.int32)
targets = torch.from_numpy(data["target"]).cuda().to(torch.int32)
ext_table_ids = torch.from_numpy(data["ext_ids"]).cuda().to(torch.int32)
ext_table_sub = torch.from_numpy(data["ext_sub"]).cuda().to(torch.int32)
# ===========
mem_usage = {}
tim_usage = {}
mem_usage, tim_usage = add_mem_time("init", mem_usage, tim_usage)
# ===========
logits, _ = model(
input_ids,
input_ids_sub,
input_length,
input_context,
input_sample_ids,
input_num_segments,
input_segment_ids,
input_segment_rel_offset,
input_segment_rel,
input_span,
ext_table_ids,
ext_table_sub,
)
loss = loss_func(logits.view(-1, logits.size(-1)), targets.view(-1))
if skip_this_batch:
loss = loss * 0
eval_losses.append(bmt.sum_loss(loss))
overall_loss = torch.stack(eval_losses).mean().item()
return overall_loss
def finetune(
args,
tokenizer: CPMBeeTokenizer,
model: CPMBee,
optimizer: bmt.optim.AdamOffloadOptimizer,
lr_scheduler: bmt.lr_scheduler.WarmupLRScheduler,
optim_manager: bmt.optim.OptimManager,
):
average_time = bmt.utils.AverageRecorder()
loss_func = bmt.loss.FusedCrossEntropy(ignore_index=-100)
if args.tensorboard is not None and bmt.rank() == 0:
from torch.utils.tensorboard import SummaryWriter
import distutils.version # noqa: F401
if not os.path.exists(args.tensorboard):
os.makedirs(args.tensorboard)
writer = SummaryWriter(log_dir=args.tensorboard)
best_eval_loss, eval_loss_increase = 1e9, 0
global_token_pass = 0.0
global_steps = 0
global_world_size = bmt.world_size()
dataloader = FinetuneDataset(
args.dataset,
args.batch_size,
args.max_length,
tokenizer,
max_depth=8,
task_name=args.task_name,
drop_last=args.drop_last,
)
for epoch in range(args.epoch):
epoch = epoch + 1
last_data = None
for iteration, data in enumerate(dataloader):
iteration = iteration + 1
global_steps = global_steps + 1
skip_this_batch = False
if data is None:
if last_data is None:
raise RuntimeError(
"Dataset is too small, please use a smaller batch size or sequence length!"
)
data = last_data # use last data
skip_this_batch = True
else:
last_data = data
input_ids = torch.from_numpy(data["inputs"]).cuda().to(torch.int32)
input_ids_sub = torch.from_numpy(data["inputs_sub"]).cuda().to(torch.int32)
input_length = torch.from_numpy(data["length"]).cuda().to(torch.int32)
input_context = torch.from_numpy(data["context"]).cuda().bool()
input_sample_ids = torch.from_numpy(data["sample_ids"]).cuda().to(torch.int32)
input_num_segments = torch.from_numpy(data["num_segments"]).cuda().to(torch.int32)
input_segment_ids = torch.from_numpy(data["segment_ids"]).cuda().to(torch.int32)
input_segment_rel_offset = (
torch.from_numpy(data["segment_rel_offset"]).cuda().to(torch.int32)
)
input_segment_rel = torch.from_numpy(data["segment_rel"]).cuda().to(torch.int32)
input_span = torch.from_numpy(data["spans"]).cuda().to(torch.int32)
targets = torch.from_numpy(data["target"]).cuda().to(torch.int32)
ext_table_ids = torch.from_numpy(data["ext_ids"]).cuda().to(torch.int32)
ext_table_sub = torch.from_numpy(data["ext_sub"]).cuda().to(torch.int32)
task_ids = torch.from_numpy(data["task_ids"]).cuda().to(torch.int32)
task_names = data["task_names"]
# ===========
optim_manager.zero_grad()
mem_usage = {}
tim_usage = {}
mem_usage, tim_usage = add_mem_time("init", mem_usage, tim_usage)
# ===========
logits, _ = model(
input_ids,
input_ids_sub,
input_length,
input_context,
input_sample_ids,
input_num_segments,
input_segment_ids,
input_segment_rel_offset,
input_segment_rel,
input_span,
ext_table_ids,
ext_table_sub,
)
loss = loss_func(logits.view(-1, logits.size(-1)), targets.view(-1))
if skip_this_batch:
loss = loss * 0
mem_usage, tim_usage = add_mem_time("forward", mem_usage, tim_usage)
# ===========
optim_manager.backward(loss)
mem_usage, tim_usage = add_mem_time("backward", mem_usage, tim_usage)
# ===========
grad_norm = optim_manager.clip_grad_norm(optimizer.param_groups, max_norm=1.0)
optim_manager.step()
mem_usage, tim_usage = add_mem_time("optim", mem_usage, tim_usage)
# ==========
iteration_time = tim_usage["optim"] - tim_usage["init"]
average_time.record(iteration_time)
with torch.no_grad():
task_num = len(task_names)
targets_tmp = targets.expand(task_num, -1, -1)
task = torch.arange(task_num, dtype=torch.int32, device="cuda")[:, None, None]
targets_tmp = torch.where(
task_ids == task,
targets_tmp,
torch.scalar_tensor(-100, dtype=torch.int32, device="cuda"),
)
task_loss_map: Dict[str, float] = {}
if not skip_this_batch:
for i in range(task_num):
task_loss = loss_func(
logits.view(-1, logits.size(-1)), targets_tmp[i, :].view(-1)
)
task_loss_map[task_names[i]] = task_loss.item()
gatherd_task_loss_map: List[Dict[str, float]] = allgather_objects(task_loss_map)
global_task_loss_map: Dict[str, Union[List[float], float]] = {}
for local_task_loss_map in gatherd_task_loss_map:
for task_name, task_loss in local_task_loss_map.items():
if task_name not in global_task_loss_map:
global_task_loss_map[task_name] = []
global_task_loss_map[task_name].append(task_loss)
task_loss_map = {}
for task_name in sorted(list(global_task_loss_map.keys())):
avg_loss = sum(global_task_loss_map[task_name]) / len(
global_task_loss_map[task_name]
)
task_loss_map[task_name] = avg_loss
local_total_rate = torch.Tensor([input_length.float().mean() / args.max_length]).cuda()
local_total_rate = bmt.sum_loss(local_total_rate).item()
global_token_pass += (
global_world_size * local_total_rate * args.max_length * args.batch_size
)
avg_time = average_time.value
train_info = {
"time": tim_usage["init"],
"epoch": epoch,
"iteration": iteration,
"loss": task_loss_map[args.task_name],
"lr": lr_scheduler.current_lr,
"lr_scale": int(optim_manager.loss_scale),
"time_usage": tim_usage,
"mem_usage": mem_usage,
"avg_time": avg_time,
"token_max": local_total_rate,
"token_pass": global_token_pass,
"throughout": args.max_length * args.batch_size * local_total_rate / avg_time,
"grad_norm": grad_norm.item(),
"mask_max": ((targets >= 0).sum(-1).float().mean() / args.max_length).item(),
"num_gpus": global_world_size,
"task_loss": task_loss_map,
}
bmt.print_rank(
(
"| Epoch: {:3d} | Iter: {:6d} | loss: {:.4f} "
+ "| lr: {:.4e}, scale: {:10.4f} | time: {:.4f} |"
+ " token/max: {:.4f} | mask/max: {:.4f} | grad_norm: {:.4f}"
).format(
epoch,
iteration,
task_loss_map[args.task_name],
lr_scheduler.current_lr,
int(optim_manager.loss_scale),
avg_time,
input_length.float().mean() / args.max_length,
(targets >= 0).sum(-1).float().mean() / args.max_length,
grad_norm,
)
)
bmt.print_rank(
"| "
+ " | ".join(
[
"{} loss: {:.4f}".format(task_name, loss)
for task_name, loss in task_loss_map.items()
]
)
)
if iteration % args.inspect_iters == 0:
model_inspect = bmt.inspect.inspect_model(model, "*")
bmt.print_rank(bmt.inspect.format_summary(model_inspect))
train_info["model_inspect"] = model_inspect
# write log here
if args.tensorboard is not None and bmt.rank() == 0:
writer.add_scalar("Loss/train", task_loss_map[args.task_name], global_steps)
for task_name, loss in task_loss_map.items():
writer.add_scalar("Loss/train/{}".format(task_name), loss, global_steps)
# evaluation
if global_steps % args.eval_interval == 0:
eval_loss = evaluation(model, args, tokenizer, loss_func)
if args.tensorboard is not None and bmt.rank() == 0:
writer.add_scalar("Loss/eval", eval_loss, global_steps)
if eval_loss < best_eval_loss:
best_eval_loss = eval_loss
eval_loss_increase = 0
if args.save is not None:
bmt.save(model, os.path.join(args.save, args.save_name + "-best.pt"))
else:
eval_loss_increase += 1
bmt.print_rank(
"| Eval loss: {:.4f} | Increase: {:2d}".format(eval_loss, eval_loss_increase)
)
if eval_loss_increase == args.early_stop_patience:
bmt.print_rank(
"Eval loss has increased {:d} times, the finetune loop early stopped."
.format(eval_loss_increase)
)
return
# end of finetune
def main():
args = initialize()
tokenizer, model, optimizer, lr_scheduler, optim_manager = setup_model_and_optimizer(args)
finetune(args, tokenizer, model, optimizer, lr_scheduler, optim_manager)
if __name__ == "__main__":
main()
| 16,454 | 37.900709 | 99 | py |
CPM-Live | CPM-Live-master/cpm-live/text_generation.py | from cpm_live.generation.bee import CPMBeeBeamSearch
from cpm_live.models import CPMBeeTorch, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
import torch
if __name__ == "__main__":
data_list = [
{"document": "今天天气是真的<mask_0>", "<ans>": {"<mask_0>": ""}},
]
config = CPMBeeConfig.from_json_file("config/cpm-bee-10b.json")
ckpt_path = "path/to/checkpoint.pt"
tokenizer = CPMBeeTokenizer()
model = CPMBeeTorch(config=config)
model.load_state_dict(torch.load(ckpt_path))
model.cuda()
# use beam search
beam_search = CPMBeeBeamSearch(
model=model,
tokenizer=tokenizer,
)
inference_results = beam_search.generate(data_list, max_length=100)
for res in inference_results:
print(res)
| 778 | 26.821429 | 71 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/dataset/distributed_dataset.py | # coding=utf-8
# Copyright 2020 The OpenBMB team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import struct
from typing import List, Optional, Set
import torch
import bisect
import bmtrain as bmt
import json
from .serializer import Serializer, PickleSerializer
import random
import string
import time
def _random_string():
return "".join(random.choices(string.ascii_uppercase + string.digits, k=8))
_DEFAULT_BLOCK_SIZE = 16 << 20
class FileInfo:
def __init__(
self,
file_name: str = "",
block_begin: int = 0,
block_end: int = 0,
nbytes: int = 0,
nlines: int = 0,
mask: bool = False,
block_size: int = _DEFAULT_BLOCK_SIZE,
) -> None:
self.file_name = file_name
self.block_begin = block_begin
self.block_end = block_end
self.nbytes = nbytes
self.nlines = nlines
self.mask = mask
self.block_size = block_size
def state_dict(self):
return {
"file_name": self.file_name,
"block_begin": self.block_begin,
"block_end": self.block_end,
"nbytes": self.nbytes,
"nlines": self.nlines,
"mask": self.mask,
"block_size": self.block_size,
}
def load_state_dict(self, d):
self.file_name = d["file_name"]
self.block_begin = d["block_begin"]
self.block_end = d["block_end"]
self.nbytes = d["nbytes"]
self.nlines = d["nlines"]
self.mask = d["mask"]
self.block_size = d["block_size"]
def dumps(self) -> str:
return json.dumps(self.state_dict())
def loads(self, data: str) -> "FileInfo":
self.load_state_dict(json.loads(data))
return self
def dump(self, fp: io.TextIOWrapper) -> "FileInfo":
fp.write(self.dumps())
return self
def load(self, fp: io.TextIOWrapper) -> "FileInfo":
self.loads(fp.read())
return self
def _read_info_list(meta_path: str) -> List[FileInfo]:
info: List[FileInfo] = []
while True:
try:
with open(meta_path, "r", encoding="utf-8") as f:
for line in f.readlines():
line = line.strip()
if len(line) > 0:
info.append(FileInfo().loads(line))
return info
except Exception as e:
print("Error: reading info list in _read_info_list!,meta_path={path}, err={err}".
format(path=meta_path, err=str(e)))
time.sleep(10)
def _write_info_list(meta_path: str, info: List[FileInfo]):
base_path = os.path.dirname(meta_path)
random_fname = os.path.join(base_path, ".meta.bin.%s" % _random_string())
while True:
try:
with open(random_fname, "w", encoding="utf-8") as f:
for v in info:
f.write(v.dumps() + "\n")
os.rename(random_fname, meta_path)
return
except Exception:
print("Error: writing info list!")
time.sleep(10)
def _filtered_range(
begin: int, end: int, rank: int, world_size: int, filter_set: Optional[Set[int]] = None
):
begin = begin + (rank + (world_size - (begin % world_size))) % world_size
if filter_set is not None:
return [i for i in range(begin, end, world_size) if i in filter_set]
else:
return [i for i in range(begin, end, world_size)]
# for some bugs that may exist in hdfs
class SafeFile:
def __init__(self, fname, mode):
self.fname = None
self.mode = None
self._fp = None
self.open_file(fname, mode)
def read(self, size=-1):
if self._fp is None:
raise RuntimeError("Dataset is closed")
try:
res = self._fp.read(size)
self.offset = self._fp.tell()
return res
except Exception as e:
print("Error {}: reading blocks in read {}!".format(e, self.fname))
self.open_file(self.fname, self.mode, self.offset)
return self.read(size)
def tell(self):
if self._fp is None:
raise RuntimeError("Dataset is closed")
try:
res = self._fp.tell()
self.offset = res
return res
except Exception as e:
print("Error {}: reading blocks in tell {}!".format(e, self.fname))
self.open_file(self.fname, self.mode, self.offset)
return self.tell()
def seek(self, offset, whence=0):
if self._fp is None:
raise RuntimeError("Dataset is closed")
try:
res = self._fp.seek(offset, whence)
self.offset = self._fp.tell()
return res
except Exception as e:
print("Error {}: reading blocks in seek {}!".format(e, self.fname))
self.open_file(self.fname, self.mode, self.offset)
return self.seek(offset, whence)
def close(self):
if self._fp is not None:
try:
self._fp.close()
except Exception:
pass
self._fp = None
def open_file(self, fname, mode, offset=None):
if not os.path.exists(fname):
raise RuntimeError("Dataset does not exist")
try:
self.fname = fname
self.mode = mode
self._fp = open(fname, mode)
if offset is not None:
self._fp.seek(offset, io.SEEK_SET)
self.offset = self._fp.tell()
except Exception as e:
print("Error {}: reading blocks in open_file {}!".format(e, self.fname))
time.sleep(10)
self.open_file(fname, mode, offset)
class DistributedDataset:
"""Open dataset in readonly mode.
`DistributeDataset` is used to read datasets in a distributed manner.
Data in this dataset will be distributed evenly in blocks to each worker in the `distributed communicator`.
**Note** When all data has been read, reading dataset again will revert back to the first data.
Args:
path (str): Path to dataset.
rank (int): Rank in distributed communicator. See: bmtrain.rank()
world_size (int): Total workers in distributed communicator. See: bmtrain.world_size()
block_size (int): Size of each block in bytes. All files in the same dataset should have the same block size. Default: 16MB
Example:
>>> dataset = DistributedDataset("/path/to/dataset")
>>> for i in range(10):
>>> dataset.read()
""" # noqa: E501
def __init__(
self,
path: str,
rank: int = 0,
world_size: int = 1,
serializer: Optional[Serializer] = None,
max_repeat_times: Optional[int] = None,
shuffle: bool = True,
) -> None:
# config
self._path = path
self._rank = rank
self._world_size = world_size
self._max_repeat_times = max_repeat_times
self._repeat_times = 0
self._shuffle = shuffle
if serializer is None:
serializer = PickleSerializer()
self.serializer = serializer
# dataset meta
self._unused_block: List[int] = []
self._file_info: List[FileInfo] = []
self._file_ends: List[int] = []
self._total_blocks = 0
self._nbytes = 0
self._nlines = 0
# states
self._curr_block = None
self._fp = None
# cache
self._last_mod_time = 0
self._curr_fname = None
self._update_states(fast_skip=False)
self._repeat_times += 1
def _update_states(self, fast_skip: bool = True):
meta_path = os.path.join(self._path, "meta.bin")
while True:
try:
mod_time = os.stat(meta_path).st_mtime
break
except Exception as e:
print("Error: reading info list in DistributedDataset._update_states, "
"meta_path={path}, err={err}!".format(path=meta_path, err=str(e)))
time.sleep(10)
if self._last_mod_time < mod_time:
# file changed
pass
else:
if fast_skip:
return
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
old_len = len(self._file_info)
if old_len > len(info):
raise RuntimeError("Dataset meta file: changed unexpectly")
mask_changed = False
for i in range(old_len):
if self._file_info[i].file_name != info[i].file_name:
raise RuntimeError("Dataset meta file: changed unexpectly")
if self._file_info[i].block_begin != info[i].block_begin:
raise RuntimeError("Dataset meta file: changed unexpectly")
if self._file_info[i].block_end != info[i].block_end:
raise RuntimeError("Dataset meta file: changed unexpectly")
if self._file_info[i].mask != info[i].mask:
mask_changed = True
if info[0].block_begin != 0:
raise RuntimeError("Dataset meta file: block error (0)")
for i in range(len(info) - 1):
if info[i].block_end != info[i + 1].block_begin:
raise RuntimeError("Dataset meta file: block error (%d)" % (i + 1))
if (old_len == len(info) and not mask_changed) and fast_skip:
# fast skip
return
if len(info) > 0:
total_blocks = info[-1].block_end
self._nbytes = 0
self._nlines = 0
for v in info:
self._nbytes += v.nbytes
self._nlines += v.nlines
else:
total_blocks = 0
self._nbytes = 0
self._nlines = 0
if total_blocks > 0:
unused_block_set = set(self._unused_block)
nw_unused_block: List[int] = []
for i in range(len(info)):
v = info[i]
if not v.mask:
if i < old_len:
nw_unused_block.extend(
_filtered_range(
v.block_begin,
v.block_end,
self._rank,
self._world_size,
unused_block_set,
)
)
else:
nw_unused_block.extend(
_filtered_range(
v.block_begin, v.block_end, self._rank, self._world_size
)
)
# re-shuffle unused blocks
if self._shuffle:
random.shuffle(nw_unused_block)
self._unused_block = nw_unused_block
self._file_ends = []
for v in info:
self._file_ends.append(v.block_end)
else:
self._unused_block = []
self._file_ends = []
self._total_blocks = total_blocks
self._file_info = info
assert len(self._file_ends) == len(self._file_info)
def _mask_file(self, f: FileInfo):
self._unused_block = [
block_id
for block_id in self._unused_block
if block_id < f.block_begin or block_id >= f.block_end
]
def _get_block_file(self, block_id: int):
# find block in which file
file_idx = bisect.bisect_right(self._file_ends, block_id)
return self._file_info[file_idx]
def _prepare_new_epoch(self):
if self._max_repeat_times is not None:
if self._repeat_times >= self._max_repeat_times:
raise EOFError("End of dataset")
nw_unused_block: List[int] = []
for v in self._file_info:
if not v.mask:
nw_unused_block.extend(
_filtered_range(v.block_begin, v.block_end, self._rank, self._world_size)
)
if self._shuffle:
random.shuffle(nw_unused_block)
self._unused_block = nw_unused_block
self._repeat_times += 1
def _get_next_block(self):
self._update_states()
if len(self._unused_block) == 0:
self._prepare_new_epoch()
if len(self._unused_block) == 0:
raise RuntimeError("Empty dataset {}".format(self._path))
mn_block: int = self._unused_block.pop()
return mn_block
def _state_dict(self):
self._update_states()
num_unused_block = len(self._unused_block)
if (self._fp is not None) and (self._curr_block is not None):
curr_block = self._curr_block
curr_f = self._get_block_file(curr_block)
inblock_offset = self._fp.tell() - (curr_block - curr_f.block_begin) * curr_f.block_size
else:
curr_block = -1
inblock_offset = 0
return {
"states": torch.tensor(self._unused_block, dtype=torch.long, device="cpu"),
"block": torch.tensor(
[curr_block, inblock_offset, num_unused_block, self._repeat_times],
dtype=torch.long,
device="cpu",
),
}
def state_dict(self):
"""Returns a state dict representing the read states of the dataset.
Example:
>>> state = dataset.state_dict()
>>> dataset.load_state_dict(state)
"""
self._update_states()
num_unused_block = len(self._unused_block)
if (self._fp is not None) and (self._curr_block is not None):
curr_block = self._curr_block
curr_f = self._get_block_file(curr_block)
inblock_offset = self._fp.tell() - (curr_block - curr_f.block_begin) * curr_f.block_size
else:
curr_block = -1
inblock_offset = 0
with torch.no_grad():
if self._world_size > 1:
gpu_num_unused_block = torch.tensor([num_unused_block], dtype=torch.long).cuda()
max_unused_blocks = (
bmt.distributed.all_reduce(gpu_num_unused_block, op="max").cpu().item()
)
gpu_states = torch.full((max_unused_blocks,), -1, dtype=torch.long).cuda()
gpu_states[:num_unused_block] = torch.tensor(
self._unused_block, dtype=torch.long
).cuda()
gpu_block = torch.tensor(
[curr_block, inblock_offset, num_unused_block, self._repeat_times],
dtype=torch.long,
).cuda()
global_states = bmt.distributed.all_gather(
gpu_states
).cpu() # (world_size, max_unused_blocks)
global_block = bmt.distributed.all_gather(gpu_block).cpu() # (world_size, 4)
return {"states": global_states, "block": global_block}
else:
return {
"states": torch.tensor([self._unused_block], dtype=torch.long, device="cpu"),
"block": torch.tensor(
[[curr_block, inblock_offset, num_unused_block, self._repeat_times]],
dtype=torch.long,
device="cpu",
),
}
def load_state_dict(self, state, strict: bool = True):
"""Load dataset state.
Args:
state (dict): dataset state dict.
strict (bool): If `strict` is True, world size needs to be the same as when exported.
Example:
>>> state = dataset.state_dict()
>>>
"""
block_states: torch.LongTensor = state["states"]
block_info: torch.LongTensor = state["block"]
if block_states.size(0) != self._world_size:
if strict:
raise ValueError(
"world_size changed (%d -> %d)" % (state["block"].size(0), self._world_size)
)
else:
self._curr_block = None
self._fp = None
self._curr_fname = None
self._repeat_times = int(block_info[0, 3].item())
# re-shuffle unused blocks
nw_unused_block: List[int] = []
for i in range(block_states.size(0)):
# filter blocks that are not in this rank
num_unused_blocks: int = int(block_info[i, 2].item())
nw_unused_block.extend(
[
block_id
for block_id in block_states[i, :num_unused_blocks].tolist()
if block_id % self._world_size == self._rank
]
)
if self._shuffle:
random.shuffle(nw_unused_block)
self._unused_block = nw_unused_block
else:
curr_block, inblock_offset, num_unused_blocks, self._repeat_times = block_info[
self._rank
].tolist()
if curr_block == -1:
self._curr_block = None
else:
while True:
try:
self._curr_block = curr_block
f_info = self._get_block_file(self._curr_block)
self._open_file(
f_info.file_name,
(self._curr_block - f_info.block_begin)
* f_info.block_size
+ inblock_offset,
)
self._unused_block = block_states[self._rank, :num_unused_blocks].tolist()
break
except Exception:
print("Error: reading block!")
time.sleep(10)
# end
self._update_states()
def _get_file_path(self, fname):
return os.path.join(self._path, fname)
def _open_file(self, fname, offset):
if self._curr_fname != fname:
if self._fp is not None:
self._fp.close()
self._curr_fname = None
# self._fp = open(self._get_file_path(fname), "rb")
self._fp = SafeFile(self._get_file_path(fname), "rb")
self._curr_fname = fname
else:
assert self._fp is not None, "Unexpected error"
self._fp.seek(offset, io.SEEK_SET) # move to block
def read(self):
"""Read a piece of data from dataset.
Workers in different ranks will read different data.
"""
if self._curr_block is None:
next_block_id = self._get_next_block()
f_info = self._get_block_file(next_block_id)
try:
self._open_file(
f_info.file_name,
(next_block_id - f_info.block_begin) * f_info.block_size,
)
self._curr_block = next_block_id
except FileNotFoundError:
print("ERR: reading again!")
self._mask_file(f_info)
return self.read() # read again
if self._fp is None:
raise RuntimeError("Dataset is not initialized")
MAGIC = self._fp.read(1)
if MAGIC == b"\x1F":
# correct
size = struct.unpack("I", self._fp.read(4))[0]
data = self._fp.read(size)
return self.serializer.deserialize(data)
elif MAGIC == b"\x00":
# end of block
self._curr_block = None
return self.read() # read next block
else:
raise ValueError("Invalid magic header")
@property
def nbytes(self):
return self._nbytes
class SimpleDataset(DistributedDataset):
def __init__(
self,
path: str,
serializer: Optional[Serializer] = None,
shuffle: bool = True,
) -> None:
super().__init__(
path,
0,
1,
serializer=serializer,
max_repeat_times=1,
shuffle=shuffle,
)
def __iter__(self):
while True:
try:
data = self.read()
except EOFError:
self._repeat_times = 0
break
yield data
def __len__(self):
return self._nlines
class DatasetWriter:
def __init__(self, fname: str, block_size: int, serializer: Optional[Serializer] = None):
self._fname = fname
self._block_size = block_size
self._fp = open(self._fname, "wb")
self._inblock_offset = 0
self._nbytes = 0
self._nlines = 0
self._nblocks = 1
if serializer is None:
serializer = PickleSerializer()
self.serializer = serializer
def write(self, data):
"""Write a piece of data into dataset.
Args:
data (Any): Serialization will be done using pickle.
Example:
>>> writer.write( "anything you want" )
"""
byte_data = self.serializer.serialize(data)
byte_data = struct.pack("I", len(byte_data)) + byte_data
if self._inblock_offset + 2 + len(byte_data) > self._block_size:
self._fp.write(
b"\x00" * (self._block_size - self._inblock_offset)
) # fill the remaining space with 0
self._inblock_offset = 0
self._nblocks += 1
# we go to the next block
if self._inblock_offset + 2 + len(byte_data) > self._block_size:
raise ValueError("data is larger than block size")
self._nbytes += len(byte_data)
self._nlines += 1
self._inblock_offset += 1 + len(byte_data)
self._fp.write(b"\x1F")
self._fp.write(byte_data)
@property
def nbytes(self):
return self._nbytes
@property
def nblocks(self):
return self._nblocks
@property
def nlines(self):
return self._nlines
def close(self):
if not self._fp.closed:
self._fp.write(b"\x00" * (self._block_size - self._inblock_offset))
self._fp.close()
class DatasetBuilder:
def __init__(
self,
path: str,
dbname: str,
block_size=_DEFAULT_BLOCK_SIZE,
serializer: Optional[Serializer] = None,
) -> None:
self._block_size = block_size
self._path = path
self._dbname = dbname
if serializer is None:
serializer = PickleSerializer()
self.serializer = serializer
if not os.path.exists(self._path):
os.makedirs(self._path)
meta_path = os.path.join(self._path, "meta.bin")
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
for v in info:
if v.file_name == dbname:
raise ValueError("Dataset name exists")
self._db_path = os.path.join(self._path, self._dbname)
if os.path.exists(self._db_path):
raise ValueError("File exists `%s`" % self._db_path)
def __enter__(self):
self._writer = DatasetWriter(self._db_path, self._block_size, self.serializer)
return self._writer
def __exit__(self, exc_type, exc_value, exc_traceback):
if self._writer is None:
raise RuntimeError("Unexpected call to __exit__")
self._writer.close()
if exc_type is not None:
print("Error while writing file")
if os.path.exists(self._db_path):
os.unlink(self._db_path)
else:
meta_path = os.path.join(self._path, "meta.bin")
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
last_block = 0
if len(info) > 0:
last_block = info[-1].block_end
info.append(
FileInfo(
self._dbname,
last_block,
last_block + self._writer.nblocks,
self._writer.nbytes,
self._writer.nlines,
False,
self._block_size,
)
)
# atomic write to meta file
_write_info_list(meta_path, info)
self._writer = None
def build_dataset(
path: str,
dbname: str,
block_size: int = _DEFAULT_BLOCK_SIZE,
serializer: Optional[Serializer] = None,
):
"""Open the dataset in write mode and returns a writer.
Args:
path (str): Path to dataset.
dbname (str): The name of the file to which the data will be written. The `dbname` needs to be unique in this `dataset`.
block_size (int): Size of each block in bytes. All files in the same dataset should have the same block size. Default: 16MB
Example:
>>> with build_dataset("/path/to/dataset", "data_part_1") as writer:
>>> for i in range(10):
>>> writer.write( { "anything you want" } )
""" # noqa: E501
return DatasetBuilder(path, dbname, block_size=block_size, serializer=serializer)
| 25,959 | 32.758127 | 131 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/bee.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
from typing_extensions import TypedDict
import torch
from ..utils import Config
from ..layers import Encoder, EmbeddingExt, BucketPositionBias
import bmtrain as bmt
from ..utils.gradient_shrink import gradient_shrink
class CPMBeeInferenceState(TypedDict):
buffer_position: torch.Tensor
buffer_context: torch.Tensor
buffer_sample_ids: torch.Tensor
buffer_num_segments: torch.Tensor
buffer_segments: torch.Tensor
buffer: List[Tuple[torch.Tensor, torch.Tensor]]
class CPMBeeConfig(Config):
def __init__(
self,
vocab_size=30720,
dim_model=4096,
num_heads=64,
dim_head=64,
dim_ff=10240,
num_layers=32,
dropout_p=0.0,
position_bias_num_buckets=256,
position_bias_num_segment_buckets=256,
position_bias_max_distance=2048,
eps=1e-6,
half: bool = True,
mask_modules: Optional[List[Tuple[bool, bool]]] = None,
):
super().__init__()
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.dim_ff = dim_ff
self.num_layers = num_layers
self.position_bias_num_buckets = position_bias_num_buckets
self.position_bias_num_segment_buckets = position_bias_num_segment_buckets
self.position_bias_max_distance = position_bias_max_distance
self.dropout_p = dropout_p
self.eps = eps
if half:
self.dtype = torch.half
else:
self.dtype = torch.float
self.vocab_size = vocab_size
self.mask_modules = mask_modules
class CPMBee(bmt.DistributedModule):
def __init__(self, config: CPMBeeConfig):
super().__init__()
self.encoder = Encoder(
num_layers=config.num_layers,
dim_model=config.dim_model,
dim_ff=config.dim_ff,
num_heads=config.num_heads,
dim_head=config.dim_head,
dtype=config.dtype,
eps=config.eps,
dropout_p=config.dropout_p,
mask_modules=config.mask_modules,
)
self.input_embedding = EmbeddingExt(
vocab_size=config.vocab_size,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.position_bias = BucketPositionBias(
num_heads=config.num_heads,
num_buckets=config.position_bias_num_buckets,
num_segment_bucket=config.position_bias_num_segment_buckets,
max_distance=config.position_bias_max_distance,
dtype=config.dtype,
)
def forward(
self,
input: torch.Tensor, # (batch, seqlen) int32
input_sub: torch.Tensor, # (batch, seqlen) int32
length: torch.Tensor, # (batch) int32
context: torch.Tensor, # (batch, seqlen) bool
sample_ids: torch.Tensor, # (batch, seq_len) int32
num_segments: torch.Tensor, # (batch, seq_len) int32
segment: torch.Tensor, # (batch, seqlen) int32
segment_rel_offset: torch.Tensor, # (batch, seq_len) int32
segment_rel: torch.Tensor, # (batch, num_segment_bucket) int32
span: torch.Tensor, # (batch, seqlen) int32
ext_table_ids: torch.Tensor, # (ext_table_size) int32
ext_table_sub: torch.Tensor, # (ext_table_size) int32
):
batch = input.size(0)
seqlen = input.size(1)
# processing masks and position bias bucket
with torch.no_grad():
device = input.device
# calc segment bucket
segment_rel_2d = torch.masked_fill(
segment[:, :, None] * num_segments[:, :, None]
+ segment[:, None, :]
+ segment_rel_offset[:, :, None],
~(
(sample_ids[:, :, None] == sample_ids[:, None, :])
& (span[:, None, :] == span[:, :, None])
), # not in the same span or sample
0, # avoid torch.gather overflow
).view(batch, seqlen * seqlen)
segment_bucket = torch.gather(
input=segment_rel,
dim=1,
index=segment_rel_2d.long(),
).view(batch, seqlen, seqlen)
segment_bucket.masked_fill_(
~(
(sample_ids[:, :, None] == sample_ids[:, None, :])
& (span[:, None, :] == span[:, :, None])
), # not in the same span or sample
1, # bucket is used for in-context samples
)
# directional mask
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
# sample mask
sample_mask_2d = (sample_ids[:, :, None] == 0) | (
sample_ids[:, :, None] == sample_ids[:, None, :]
)
# context mask
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
# span mask
attention_mask = (
attention_mask & sample_mask_2d & (span[:, None, :] == span[:, :, None])
)
# length mask
mask_1d = (
torch.arange(seqlen, device=device)[None, :].repeat(batch, 1) < length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position = torch.arange(seqlen, device=device).expand(batch, seqlen)
hidden_states = self.input_embedding(input, input_sub)
position_bias = self.position_bias(position, position, segment_bucket)
hidden_states = self.encoder(hidden_states, attention_mask, position_bias)
ext_table = self.input_embedding(ext_table_ids, ext_table_sub)
logits = self.input_embedding.projection(hidden_states, ext_table)
return logits, hidden_states
def inference(
self,
input: torch.Tensor, # (batch, len_q) int32
input_sub: torch.Tensor, # (batch, len_q) int32
position: torch.Tensor, # (batch, len_q) int32
context: torch.Tensor, # (batch, len_q) bool
sample_ids: torch.Tensor, # (batch, len_q) int32
num_segments: torch.Tensor, # (batch, len_q) int32
segment: torch.Tensor, # (batch, len_q) int32
segment_rel_offset: torch.Tensor, # (batch, len_q) int32
segment_rel: torch.Tensor, # (batch, num_segment_bucket) int32
ext_table_ids: torch.Tensor, # (ext_table_size) int32
ext_table_sub: torch.Tensor, # (ext_table_size) int32
past_key_values: Optional[CPMBeeInferenceState] = None,
) -> Tuple[torch.Tensor, torch.Tensor, CPMBeeInferenceState]:
with torch.no_grad():
if past_key_values is None:
present_position = position
present_context = context
present_sample_ids = sample_ids
present_num_segments = num_segments
present_segments = segment
present_buffer = None
else:
present_position = torch.cat([past_key_values["buffer_position"], position], dim=-1)
present_context = torch.cat([past_key_values["buffer_context"], context], dim=-1)
present_sample_ids = torch.cat(
[past_key_values["buffer_sample_ids"], sample_ids], dim=-1
)
present_num_segments = torch.cat(
[past_key_values["buffer_num_segments"], num_segments], dim=-1
)
present_segments = torch.cat([past_key_values["buffer_segments"], segment], dim=-1)
present_buffer = past_key_values["buffer"]
batch = input.size(0)
len_q = input.size(1)
len_buffer = present_position.size(1)
segment_rel_2d = torch.masked_fill(
segment[:, :, None] * num_segments[:, :, None]
+ present_segments[:, None, :]
+ segment_rel_offset[:, :, None],
~(
(sample_ids[:, :, None] == present_sample_ids[:, None, :])
), # not in the same sample
0, # avoid torch.gather overflow
).view(batch, len_q * len_buffer)
segment_bucket = torch.gather(
input=segment_rel,
dim=1,
index=segment_rel_2d.long(),
).view(batch, len_q, len_buffer)
segment_bucket.masked_fill_(
~(
(sample_ids[:, :, None] == present_sample_ids[:, None, :])
), # not in the same span or sample
1, # bucket is used for in-context samples
)
# directional mask
directional_mask_2d = present_position[:, None, :] <= position[:, :, None]
# sample mask
sample_mask_2d = (sample_ids[:, :, None] == 0) | (
sample_ids[:, :, None] == present_sample_ids[:, None, :]
)
# context mask
attention_mask = present_context[:, None, :] | (
context[:, :, None].logical_not()
& directional_mask_2d.view(batch, len_q, len_buffer)
)
# span mask
attention_mask = attention_mask & sample_mask_2d
# length mask
mask_1d = present_num_segments != 0
attention_mask = mask_1d.view(batch, 1, len_buffer) & attention_mask
hidden_states = gradient_shrink(self.input_embedding(input, input_sub))
position_bias = gradient_shrink(
self.position_bias(position, present_position, segment_bucket)
)
hidden_states, present_key_values = self.encoder(
hidden_states,
attention_mask,
position_bias,
True,
present_buffer,
)
ext_table = gradient_shrink(self.input_embedding(ext_table_ids, ext_table_sub))
logits = self.input_embedding.projection(hidden_states, ext_table)
return (
logits,
hidden_states,
{
"buffer_position": present_position,
"buffer_context": present_context,
"buffer_sample_ids": present_sample_ids,
"buffer_num_segments": present_num_segments,
"buffer_segments": present_segments,
"buffer": present_key_values,
},
)
| 11,433 | 38.157534 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/ant.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
import torch
from ..utils import Config
from ..layers import Encoder, Embedding, SegmentPositionEmbedding
import bmtrain as bmt
class CPMAntConfig(Config):
def __init__(
self,
vocab_size=30720,
dim_model=4096,
num_heads=64,
dim_head=64,
dim_ff=10240,
num_layers=32,
dropout_p=0.0,
position_bias_num_buckets=512,
position_bias_max_distance=2048,
eps=1e-6,
half: bool = True,
prompt_types: int = 32,
prompt_length: int = 32,
segment_types: int = 32,
mask_modules: Optional[List[Tuple[bool, bool]]] = None,
**kwargs,
):
super().__init__()
self.prompt_types = prompt_types
self.prompt_length = prompt_length
self.segment_types = segment_types
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.dim_ff = dim_ff
self.num_layers = num_layers
self.position_bias_num_buckets = position_bias_num_buckets
self.position_bias_max_distance = position_bias_max_distance
self.dropout_p = dropout_p
self.eps = eps
if half:
self.dtype = torch.half
else:
self.dtype = torch.float
self.vocab_size = vocab_size
self.mask_modules = mask_modules
class CPMAnt(bmt.DistributedModule):
def __init__(self, config: CPMAntConfig):
super().__init__()
self.encoder = Encoder(
num_layers=config.num_layers,
dim_model=config.dim_model,
dim_ff=config.dim_ff,
num_heads=config.num_heads,
dim_head=config.dim_head,
dtype=config.dtype,
eps=config.eps,
dropout_p=config.dropout_p,
mask_modules=config.mask_modules,
)
self.prompt_embedding = Embedding(
vocab_size=config.prompt_types * config.prompt_length,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.segment_embedding = Embedding(
vocab_size=config.segment_types,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.input_embedding = Embedding(
vocab_size=config.vocab_size,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.position_bias = SegmentPositionEmbedding(
num_heads=config.num_heads,
num_segments=config.segment_types,
num_buckets=config.position_bias_num_buckets,
max_distance=config.position_bias_max_distance,
bidirectional=True,
dtype=config.dtype,
)
self.prompt_length = config.prompt_length
def forward(
self,
input: torch.Tensor, # (batch, seqlen)
length: torch.Tensor, # (batch)
context: torch.Tensor, # (batch, seqlen)
position: torch.Tensor, # (batch, seqlen)
segment: torch.Tensor, # (batch, seqlen)
span: torch.Tensor, # (batch, seqlen)
):
batch = input.size(0)
seqlen = input.size(1)
input_prompt = input[:, : self.prompt_length].contiguous()
input_ids = input[:, self.prompt_length :].contiguous()
prompt_states = self.prompt_embedding(input_prompt)
hidden_states = self.input_embedding(input_ids)
segment_states = self.segment_embedding(segment)
hidden_states = torch.cat([prompt_states, hidden_states], 1) + segment_states
with torch.no_grad():
device = input.device
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
mask_1d = (
torch.arange(seqlen, device=device)[None, :].repeat(batch, 1) < length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position_bias = self.position_bias(position, position, segment, segment)
hidden_states = self.encoder(hidden_states, attention_mask, position_bias)
logits = self.input_embedding.projection(hidden_states)
return logits, hidden_states
def inference(
self,
input: torch.Tensor, # (batch, seqlen)
length: torch.Tensor, # (batch)
context: torch.Tensor, # (batch, seqlen)
position: torch.Tensor, # (batch, seqlen)
segment: torch.Tensor, # (batch, seqlen)
span: torch.Tensor, # (batch, seqlen)
past_key_values=None, # num_layers * 2 * (batch, num_heads, seqlen, dim_head)
):
batch = input.size(0)
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * self.encoder.num_layers)
input_prompt = input[:, : self.prompt_length].contiguous()
input_ids = input[:, self.prompt_length :].contiguous()
prompt_states = self.prompt_embedding(input_prompt)
hidden_states = self.input_embedding(input_ids)
segment_states = self.segment_embedding(segment)
hidden_states = torch.cat([prompt_states, hidden_states], 1) + segment_states
else:
past_length = past_key_values[0][0].size(-2)
segment_states = self.segment_embedding(segment)
hidden_states = self.input_embedding(input) + segment_states[:, -1:, :]
seqlen = past_length + input.size(1)
with torch.no_grad():
device = input.device
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
# mask for left paddding
mask_1d = (
torch.tensor(list(range(seqlen))[::-1], device=device)[None, :].repeat(batch, 1)
< length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position_bias = self.position_bias(position, position, segment, segment)
attention_mask = attention_mask[:, past_length:, :]
position_bias = position_bias[:, :, past_length:, :]
hidden_states, present_key_values = self.encoder(
hidden_states, attention_mask, position_bias, True, past_key_values
)
logits = self.input_embedding.projection(hidden_states)
return logits, hidden_states, present_key_values
| 7,842 | 35.47907 | 96 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/bee_torch.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from ..native_layers import Encoder, EmbeddingExt, BucketPositionBias
from .bee import CPMBeeConfig, CPMBeeInferenceState
class CPMBeeTorch(torch.nn.Module):
def __init__(self, config: CPMBeeConfig):
super().__init__()
self.encoder = Encoder(
num_layers=config.num_layers,
dim_model=config.dim_model,
dim_ff=config.dim_ff,
num_heads=config.num_heads,
dim_head=config.dim_head,
dtype=config.dtype,
eps=config.eps,
dropout_p=config.dropout_p,
mask_modules=config.mask_modules,
)
self.input_embedding = EmbeddingExt(
vocab_size=config.vocab_size,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.position_bias = BucketPositionBias(
num_heads=config.num_heads,
num_buckets=config.position_bias_num_buckets,
num_segment_bucket=config.position_bias_num_segment_buckets,
max_distance=config.position_bias_max_distance,
dtype=config.dtype,
)
def forward(
self,
input: torch.Tensor, # (batch, seqlen) int32
input_sub: torch.Tensor, # (batch, seqlen) int32
length: torch.Tensor, # (batch) int32
context: torch.Tensor, # (batch, seqlen) bool
sample_ids: torch.Tensor, # (batch, seq_len) int32
num_segments: torch.Tensor, # (batch, seq_len) int32
segment: torch.Tensor, # (batch, seqlen) int32
segment_rel_offset: torch.Tensor, # (batch, seq_len) int32
segment_rel: torch.Tensor, # (batch, num_segment_bucket) int32
span: torch.Tensor, # (batch, seqlen) int32
ext_table_ids: torch.Tensor, # (ext_table_size) int32
ext_table_sub: torch.Tensor, # (ext_table_size) int32
):
batch = input.size(0)
seqlen = input.size(1)
# processing masks and position bias bucket
with torch.no_grad():
device = input.device
# calc segment bucket
segment_rel_2d = torch.masked_fill(
segment[:, :, None] * num_segments[:, :, None]
+ segment[:, None, :]
+ segment_rel_offset[:, :, None],
~(
(sample_ids[:, :, None] == sample_ids[:, None, :])
& (span[:, None, :] == span[:, :, None])
), # not in the same span or sample
0, # avoid torch.gather overflow
).view(batch, seqlen * seqlen)
segment_bucket = torch.gather(
input=segment_rel,
dim=1,
index=segment_rel_2d.long(),
).view(batch, seqlen, seqlen)
segment_bucket.masked_fill_(
~(
(sample_ids[:, :, None] == sample_ids[:, None, :])
& (span[:, None, :] == span[:, :, None])
), # not in the same span or sample
1, # bucket is used for in-context samples
)
# directional mask
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
# sample mask
sample_mask_2d = (sample_ids[:, :, None] == 0) | (
sample_ids[:, :, None] == sample_ids[:, None, :]
)
# context mask
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
# span mask
attention_mask = (
attention_mask & sample_mask_2d & (span[:, None, :] == span[:, :, None])
)
# length mask
mask_1d = (
torch.arange(seqlen, device=device)[None, :].repeat(batch, 1) < length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position = torch.arange(seqlen, device=device).expand(batch, seqlen)
hidden_states = self.input_embedding(input, input_sub)
position_bias = self.position_bias(position, position, segment_bucket)
hidden_states = self.encoder(hidden_states, attention_mask, position_bias)
ext_table = self.input_embedding(ext_table_ids, ext_table_sub)
logits = self.input_embedding.projection(hidden_states, ext_table)
return logits, hidden_states
def inference(
self,
input: torch.Tensor, # (batch, len_q) int32
input_sub: torch.Tensor, # (batch, len_q) int32
position: torch.Tensor, # (batch, len_q) int32
context: torch.Tensor, # (batch, len_q) bool
sample_ids: torch.Tensor, # (batch, len_q) int32
num_segments: torch.Tensor, # (batch, len_q) int32
segment: torch.Tensor, # (batch, len_q) int32
segment_rel_offset: torch.Tensor, # (batch, len_q) int32
segment_rel: torch.Tensor, # (batch, num_segment_bucket) int32
ext_table_ids: torch.Tensor, # (ext_table_size) int32
ext_table_sub: torch.Tensor, # (ext_table_size) int32
past_key_values: Optional[CPMBeeInferenceState] = None,
) -> Tuple[torch.Tensor, torch.Tensor, CPMBeeInferenceState]:
with torch.no_grad():
if past_key_values is None:
present_position = position
present_context = context
present_sample_ids = sample_ids
present_num_segments = num_segments
present_segments = segment
present_buffer = None
else:
present_position = torch.cat([past_key_values["buffer_position"], position], dim=-1)
present_context = torch.cat([past_key_values["buffer_context"], context], dim=-1)
present_sample_ids = torch.cat(
[past_key_values["buffer_sample_ids"], sample_ids], dim=-1
)
present_num_segments = torch.cat(
[past_key_values["buffer_num_segments"], num_segments], dim=-1
)
present_segments = torch.cat([past_key_values["buffer_segments"], segment], dim=-1)
present_buffer = past_key_values["buffer"]
batch = input.size(0)
len_q = input.size(1)
len_buffer = present_position.size(1)
segment_rel_2d = torch.masked_fill(
segment[:, :, None] * num_segments[:, :, None]
+ present_segments[:, None, :]
+ segment_rel_offset[:, :, None],
~(
(sample_ids[:, :, None] == present_sample_ids[:, None, :])
), # not in the same sample
0, # avoid torch.gather overflow
).view(batch, len_q * len_buffer)
segment_bucket = torch.gather(
input=segment_rel,
dim=1,
index=segment_rel_2d.long(),
).view(batch, len_q, len_buffer)
segment_bucket.masked_fill_(
~(
(sample_ids[:, :, None] == present_sample_ids[:, None, :])
), # not in the same span or sample
1, # bucket is used for in-context samples
)
# directional mask
directional_mask_2d = present_position[:, None, :] <= position[:, :, None]
# sample mask
sample_mask_2d = (sample_ids[:, :, None] == 0) | (
sample_ids[:, :, None] == present_sample_ids[:, None, :]
)
# context mask
attention_mask = present_context[:, None, :] | (
context[:, :, None].logical_not()
& directional_mask_2d.view(batch, len_q, len_buffer)
)
# span mask
attention_mask = attention_mask & sample_mask_2d
# length mask
mask_1d = present_num_segments != 0
attention_mask = mask_1d.view(batch, 1, len_buffer) & attention_mask
hidden_states = self.input_embedding(input, input_sub)
position_bias = self.position_bias(position, present_position, segment_bucket)
hidden_states, present_key_values = self.encoder(
hidden_states,
attention_mask,
position_bias,
True,
present_buffer,
)
ext_table = self.input_embedding(ext_table_ids, ext_table_sub)
logits = self.input_embedding.projection(hidden_states, ext_table)
return (
logits,
hidden_states,
{
"buffer_position": present_position,
"buffer_context": present_context,
"buffer_sample_ids": present_sample_ids,
"buffer_num_segments": present_num_segments,
"buffer_segments": present_segments,
"buffer": present_key_values,
},
)
| 9,873 | 39.970954 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/__init__.py | from .ant import CPMAntConfig, CPMAnt
from .bee import CPMBeeConfig, CPMBee
from .ant_torch import CPMAntTorch
from .bee_torch import CPMBeeTorch
| 146 | 28.4 | 37 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/models/ant_torch.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..native_layers import Encoder, Embedding, SegmentPositionEmbedding
from .ant import CPMAntConfig
class CPMAntTorch(torch.nn.Module):
def __init__(self, config: CPMAntConfig):
super().__init__()
self.encoder = Encoder(
num_layers=config.num_layers,
dim_model=config.dim_model,
dim_ff=config.dim_ff,
num_heads=config.num_heads,
dim_head=config.dim_head,
dtype=config.dtype,
eps=config.eps,
dropout_p=config.dropout_p,
mask_modules=config.mask_modules,
)
self.prompt_embedding = Embedding(
vocab_size=config.prompt_types * config.prompt_length,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.segment_embedding = Embedding(
vocab_size=config.segment_types,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.input_embedding = Embedding(
vocab_size=config.vocab_size,
embedding_size=config.dim_model,
dtype=config.dtype,
init_std=0.02,
)
self.position_bias = SegmentPositionEmbedding(
num_heads=config.num_heads,
num_segments=config.segment_types,
num_buckets=config.position_bias_num_buckets,
max_distance=config.position_bias_max_distance,
bidirectional=True,
dtype=config.dtype,
)
self.prompt_length = config.prompt_length
def forward(
self,
input: torch.Tensor, # (batch, seqlen)
length: torch.Tensor, # (batch)
context: torch.Tensor, # (batch, seqlen)
position: torch.Tensor, # (batch, seqlen)
segment: torch.Tensor, # (batch, seqlen)
span: torch.Tensor, # (batch, seqlen)
):
batch = input.size(0)
seqlen = input.size(1)
input_prompt = input[:, : self.prompt_length].contiguous()
input_ids = input[:, self.prompt_length :].contiguous()
prompt_states = self.prompt_embedding(input_prompt)
hidden_states = self.input_embedding(input_ids)
segment_states = self.segment_embedding(segment)
hidden_states = torch.cat([prompt_states, hidden_states], 1) + segment_states
with torch.no_grad():
device = input.device
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
mask_1d = (
torch.arange(seqlen, device=device)[None, :].repeat(batch, 1) < length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position_bias = self.position_bias(position, position, segment, segment)
hidden_states = self.encoder(hidden_states, attention_mask, position_bias)
logits = self.input_embedding.projection(hidden_states)
return logits, hidden_states
def inference(
self,
input: torch.Tensor, # (batch, seqlen)
length: torch.Tensor, # (batch)
context: torch.Tensor, # (batch, seqlen)
position: torch.Tensor, # (batch, seqlen)
segment: torch.Tensor, # (batch, seqlen)
span: torch.Tensor, # (batch, seqlen)
past_key_values=None, # num_layers * 2 * (batch, num_heads, seqlen, dim_head)
):
batch = input.size(0)
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * self.encoder.num_layers)
input_prompt = input[:, : self.prompt_length].contiguous()
input_ids = input[:, self.prompt_length :].contiguous()
prompt_states = self.prompt_embedding(input_prompt)
hidden_states = self.input_embedding(input_ids)
segment_states = self.segment_embedding(segment)
hidden_states = torch.cat([prompt_states, hidden_states], 1) + segment_states
else:
past_length = past_key_values[0][0].size(-2)
segment_states = self.segment_embedding(segment)
hidden_states = self.input_embedding(input) + segment_states[:, -1:, :]
seqlen = past_length + input.size(1)
with torch.no_grad():
device = input.device
directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(
seqlen, device=device
).view(-1, 1)
attention_mask = context[:, None, :] | (
context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
)
attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
# mask for left paddding
mask_1d = (
torch.tensor(list(range(seqlen))[::-1], device=device)[None, :].repeat(batch, 1)
< length[:, None]
)
attention_mask = (
mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
)
position_bias = self.position_bias(position, position, segment, segment)
attention_mask = attention_mask[:, past_length:, :]
position_bias = position_bias[:, :, past_length:, :]
hidden_states, present_key_values = self.encoder(
hidden_states, attention_mask, position_bias, True, past_key_values
)
logits = self.input_embedding.projection(hidden_states)
return logits, hidden_states, present_key_values
| 6,552 | 37.547059 | 96 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/training_tasks/ant/pretrain.py | # coding=utf-8
# Copyright 2020 The OpenBMB team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.utils.data as data
import random
import numpy as np
class CPMAntPretrainDataset(data.Dataset):
def __init__(self, ctx, max_length=1024, prompt_length=32, tokenizer=None):
self.ctx = ctx
self.max_length = max_length + prompt_length
self.prompt_length = prompt_length
self.tokenizer = tokenizer
def __len__(self):
return len(self.ctx)
@property
def dataset(self):
return self.ctx
def __get_item_data(self, raw_data):
global_task = raw_data[0]
n_segment = raw_data[1]
len_info = n_segment * 3 + 2
segment_len = raw_data[2:len_info:3]
segment_type = raw_data[3:len_info:3]
segment_task = raw_data[4:len_info:3]
ctx = raw_data[len_info:]
if ctx.shape[0] > self.max_length - self.prompt_length:
return None, None, None, None, None, None, None
len_ctx = min(ctx.shape[0], self.max_length - self.prompt_length)
context_inp = np.full(len_ctx, True)
position_inp = np.arange(len_ctx, dtype=np.int64)
segment_inp = np.full(len_ctx, 0, dtype=np.int64)
task_inp = np.full(len_ctx, 0, dtype=np.int64)
tgt = np.full(len_ctx, -100, dtype=np.int64)
# for each segment
segment_begin = 0
for i in range(n_segment):
segment_end = segment_begin + segment_len[i]
task = segment_task[i]
# generate target
if task == 0:
num_mask = random.randint(1, segment_len[i] - 1)
mask_idx = (
np.random.choice(segment_len[i] - 1, num_mask, replace=False) + segment_begin
)
context_inp[mask_idx + 1] = False
assert segment_type[i] == 1
elif task == 1:
num_mask = random.randint(1, segment_len[i] - 1)
context_inp[segment_end - num_mask : segment_end] = False
assert segment_type[i] == 2
elif task == 3:
if segment_type[i] == 2:
context_inp[1:] = False
elif task == 4:
if segment_type[i] == 3:
context_inp[1:] = False
task_inp[segment_begin:segment_end] = task
segment_inp[segment_begin:segment_end] = segment_type[i]
tgt[segment_begin : segment_end - 1] = np.where(
context_inp[segment_begin + 1 : segment_end],
-100,
ctx[segment_begin + 1 : segment_end],
)
segment_begin = segment_end
# prepend prompt segment
context_inp = np.concatenate((np.full(self.prompt_length, True), context_inp))
position_inp = np.concatenate(
(
np.arange(self.prompt_length, dtype=np.int64),
position_inp + self.prompt_length,
)
)
segment_inp = np.concatenate((np.full(self.prompt_length, 0, dtype=np.int64), segment_inp))
task_inp = np.concatenate((np.full(self.prompt_length, 0, dtype=np.int64), task_inp))
tgt = np.concatenate((np.full(self.prompt_length, -100, dtype=np.int64), tgt))
inp = np.concatenate(
(
np.arange(self.prompt_length, dtype=np.int64) + self.prompt_length * global_task,
ctx,
)
)
return inp, tgt, inp.shape[0], context_inp, position_inp, segment_inp, task_inp
def __iter__(self):
while True:
ctx = self.ctx.read()
(
th_ctx,
th_tgt,
len_ctx,
context_ctx,
position_ctx,
segment_ctx,
task_ctx,
) = self.__get_item_data(ctx)
yield th_ctx, th_tgt, len_ctx, context_ctx, position_ctx, segment_ctx, task_ctx
| 4,499 | 37.135593 | 99 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/training_tasks/bee/pretrain.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import multiprocessing
import os
from queue import Empty
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from typing_extensions import TypedDict
from ...dataset import DistributedDataset
from ...tokenizers import CPMBeeTokenizer
from ...utils.config import load_dataset_config
import numpy as np
import time
from numpy.typing import NDArray
import torch
import bmtrain as bmt
import importlib.machinery
import importlib.util
import types
import random
class _MixedDatasetConfig(TypedDict):
weight: float
path: str
transforms: Union[List[Dict[str, Any]], str]
task_name: str
dataset_name: str
incontext_weight: List[float]
lines: int
dataset: DistributedDataset
CPMBeeInputType = Union[str, Dict[str, "CPMBeeInputType"]]
class _DictTree(TypedDict):
value: str
children: List["_DictTree"]
depth: int
segment_id: int
need_predict: bool
class _PrevExtTableStates(TypedDict):
ext_table: Dict[int, str]
token_id_table: Dict[str, Dict[int, int]]
class _TransformFuncDict(TypedDict):
loader: importlib.machinery.SourceFileLoader
module: types.ModuleType
last_m: float
_TransformFunction = Callable[[CPMBeeInputType, int, random.Random], CPMBeeInputType]
class CPMBeeBatch(TypedDict):
inputs: NDArray[np.int32]
inputs_sub: NDArray[np.int32]
length: NDArray[np.int32]
context: NDArray[np.bool_]
sample_ids: NDArray[np.int32]
num_segments: NDArray[np.int32]
segment_ids: NDArray[np.int32]
segment_rel_offset: NDArray[np.int32]
segment_rel: NDArray[np.int32]
spans: NDArray[np.int32]
target: NDArray[np.int32]
ext_ids: NDArray[np.int32]
ext_sub: NDArray[np.int32]
task_ids: NDArray[np.int32]
task_names: List[str]
raw_data: List[Any]
def rel_to_bucket(n_up: int, n_down: int, max_depth: int = 8):
ret = n_up * max_depth + n_down
if ret == 0:
return ret
else:
# bucket 1 is reserved for incontext samples
return ret + 1
def convert_data_to_id(
tokenizer: CPMBeeTokenizer,
data: Any,
prev_ext_states: Optional[_PrevExtTableStates] = None,
shuffle_answer: bool = True,
max_depth: int = 8,
):
root: _DictTree = {
"value": "<root>",
"children": [],
"depth": 0,
"segment_id": 0,
"need_predict": False,
}
segments = [root]
def _build_dict_tree(data: CPMBeeInputType, depth: int, need_predict: bool) -> List[_DictTree]:
if isinstance(data, dict):
ret_list: List[_DictTree] = []
curr_items = list(data.items())
if need_predict and shuffle_answer:
access_idx = np.arange(len(curr_items))
np.random.shuffle(access_idx)
curr_items = [curr_items[idx] for idx in access_idx]
for k, v in curr_items:
child_info: _DictTree = {
"value": k,
"children": [],
"depth": depth,
"segment_id": len(segments),
"need_predict": False, # only leaves are contexts
}
segments.append(child_info)
child_info["children"] = _build_dict_tree(
v, depth + 1, need_predict or (depth == 1 and k == "<ans>")
) # elements in <root>.<ans>
ret_list.append(child_info)
return ret_list
else:
assert isinstance(data, str), "Invalid data {}".format(data)
ret: _DictTree = {
"value": data,
"children": [],
"depth": depth,
"segment_id": len(segments),
"need_predict": need_predict,
}
segments.append(ret)
return [ret]
root["children"] = _build_dict_tree(data, 1, False)
num_segments = len(segments)
segment_rel = np.zeros((num_segments * num_segments,), dtype=np.int32)
def _build_segment_rel(node: _DictTree) -> List[Tuple[int, int]]:
ret: List[Tuple[int, int]] = [(node["segment_id"], node["depth"])]
for child in node["children"]:
sub = _build_segment_rel(child)
for seg_id_1, depth_1 in sub:
for seg_id_2, depth_2 in ret:
n_up = min(depth_1 - node["depth"], max_depth - 1)
n_down = min(depth_2 - node["depth"], max_depth - 1)
segment_rel[seg_id_1 * num_segments + seg_id_2] = rel_to_bucket(
n_up, n_down, max_depth=max_depth
)
segment_rel[seg_id_2 * num_segments + seg_id_1] = rel_to_bucket(
n_down, n_up, max_depth=max_depth
)
ret.extend(sub)
return ret
_build_segment_rel(root)
input_ids: List[int] = []
input_id_subs: List[int] = []
segment_bound: List[Tuple[int, int]] = []
ext_table: Dict[int, str] = {}
token_id_table: Dict[str, Dict[int, int]] = {}
if prev_ext_states is not None:
ext_table = prev_ext_states["ext_table"]
token_id_table = prev_ext_states["token_id_table"]
for seg in segments:
tokens, ext_table = tokenizer.encode(seg["value"], ext_table)
token_id_subs = []
reid_token_ids = []
for idx in tokens:
if idx in ext_table:
# unk or special token
token = ext_table[idx]
if token.startswith("<") and token.endswith(">"):
# special token
if "_" in token:
token_name = token[1:-1].split("_", maxsplit=1)[0]
else:
token_name = token[1:-1]
token_name = "<{}>".format(token_name)
else:
token_name = "<unk>"
if token_name not in token_id_table:
token_id_table[token_name] = {}
if idx not in token_id_table[token_name]:
token_id_table[token_name][idx] = len(token_id_table[token_name])
if token_name not in tokenizer.encoder:
raise ValueError("Invalid token {}".format(token))
reid_token_ids.append(tokenizer.encoder[token_name])
token_id_subs.append(token_id_table[token_name][idx])
else:
reid_token_ids.append(idx)
token_id_subs.append(0)
tokens = [tokenizer.bos_id] + reid_token_ids
token_id_subs = [0] + token_id_subs
if not seg["need_predict"]:
tokens = tokens + [tokenizer.eos_id]
token_id_subs = token_id_subs + [0]
else:
# no eos
pass
begin = len(input_ids)
input_ids.extend(tokens)
input_id_subs.extend(token_id_subs)
end = len(input_ids)
segment_bound.append((begin, end))
ids = np.array(input_ids, dtype=np.int32)
id_subs = np.array(input_id_subs, dtype=np.int32)
segs = np.zeros((ids.shape[0],), dtype=np.int32)
context = np.zeros((ids.shape[0],), dtype=np.int8)
for i, (begin, end) in enumerate(segment_bound):
if not segments[i]["need_predict"]:
context[begin:end] = 1
segs[begin:end] = i
curr_ext_table_states: _PrevExtTableStates = {
"ext_table": ext_table,
"token_id_table": token_id_table,
}
return ids, id_subs, context, segs, segment_rel, num_segments, curr_ext_table_states
def _dataset_identity(c: _MixedDatasetConfig):
return "{}.{}".format(c["task_name"], c["dataset_name"])
class _MixedDatasetBatchPacker:
def __init__(
self,
batch_size: int,
max_length: int,
tokenizer: CPMBeeTokenizer,
max_depth: int = 16,
) -> None:
self._batch_size = batch_size
self._max_length = max_length
self._max_depth = max_depth
self.tokenizer = tokenizer
self._transform_func_table: Dict[str, _TransformFuncDict] = {}
self._inputs: List[NDArray[np.int32]] = []
self._inputs_sub: List[NDArray[np.int32]] = []
self._context: List[NDArray[np.int8]] = []
self._sample_ids: List[NDArray[np.int32]] = []
self._segments: List[NDArray[np.int32]] = []
self._num_segments: List[NDArray[np.int32]] = []
self._segment_rel_offset: List[NDArray[np.int32]] = []
self._segment_rel: List[NDArray[np.int32]] = []
self._spans: List[List[int]] = []
self._task_ids: List[List[str]] = []
self._raw_data: List[List[Any]] = []
def __len__(self):
return len(self._inputs)
def apply_transform(
self,
data: CPMBeeInputType,
transform: Union[Dict[str, Any], Callable[[CPMBeeInputType], CPMBeeInputType], None],
) -> CPMBeeInputType:
if transform is None:
return data
if not isinstance(transform, dict):
# transform function
return transform(data)
mapping_list: List[Tuple[str, str]] = []
def _walk_transform_dict(data: Union[Dict[str, Any], str], prefix: str = ""):
if isinstance(data, dict):
for k, v in data.items():
if len(prefix) > 0:
_walk_transform_dict(v, prefix + "." + k)
else:
_walk_transform_dict(v, k)
else:
assert isinstance(data, str), "Invalid transform {}".format(data)
mapping_list.append((prefix, data))
_walk_transform_dict(transform)
expanded_mapping_list: List[Tuple[str, Any]] = []
def _expand_mapping(
data: CPMBeeInputType, stars: List[str], path: List[str], target: List[str]
):
if len(path) == 0:
num_stars = 0
for it in target:
if it == "*":
num_stars += 1
if num_stars != len(stars):
raise ValueError("Invalid transform {}".format(".".join(target)))
nw_tgt = []
num_stars = 0
for it in target:
if it == "*":
nw_tgt.append(stars[num_stars])
num_stars += 1
else:
nw_tgt.append(it)
expanded_mapping_list.append((".".join(nw_tgt), data))
else:
if not isinstance(data, dict):
raise ValueError("Invalid data {}".format(data))
if path[0] == "*":
for k, v in data.items():
_expand_mapping(v, stars + [k], path[1:], target)
else:
_expand_mapping(data[path[0]], stars, path[1:], target)
# expand mapping list
for tgt, src in mapping_list:
if src.startswith("$"):
# copy from src
_expand_mapping(data, [], src[1:].split("."), tgt.split("."))
else:
if "*" in tgt:
raise ValueError("Constant value is not allowed to have `*` in prefix")
expanded_mapping_list.append((tgt, src))
ret = {}
for tgt, val in expanded_mapping_list:
tgt = tgt.split(".")
cur = ret
while len(tgt) > 1:
cur = cur[tgt[0]]
tgt = tgt[1:]
cur[tgt[0]] = val
return ret
def data_to_id(
self,
data: Any,
prev_ext_states: Optional[_PrevExtTableStates] = None,
shuffle_answer: bool = True,
):
return convert_data_to_id(
self.tokenizer, data, prev_ext_states, shuffle_answer, self._max_depth
)
def _ensure_transform_function(
self, module_name: str, transform_script_path: str
) -> _TransformFunction:
module_name = "cpm_live.transforms.{}".format(module_name)
if transform_script_path not in self._transform_func_table:
loader = importlib.machinery.SourceFileLoader(module_name, transform_script_path)
spec = importlib.util.spec_from_loader(loader.name, loader)
if spec is None:
raise RuntimeError("spec is none! {}".format(module_name))
mod = importlib.util.module_from_spec(spec)
self._transform_func_table[transform_script_path] = {
"loader": loader,
"module": mod,
"last_m": 0,
}
transform_script_info = self._transform_func_table[transform_script_path]
curr_m_time = float(
transform_script_info["loader"].path_stats(transform_script_path)["mtime"]
)
if curr_m_time > transform_script_info["last_m"]:
transform_script_info["last_m"] = curr_m_time
transform_script_info["loader"].exec_module(transform_script_info["module"])
transform_func = getattr(transform_script_info["module"], "transform", None)
if transform_func is None:
def _empty_transform_func(data: CPMBeeInputType, num_sample: int, r: random.Random):
raise NotImplementedError(
"Transform func for dataset {} not implemented".format(module_name)
)
return _empty_transform_func
else:
return transform_func
def build_instance(self, config: _MixedDatasetConfig):
_sample_weight = np.array(config["incontext_weight"], dtype=np.float32)
_sample_weight = _sample_weight / _sample_weight.sum()
num_incontext = np.random.choice(_sample_weight.shape[0], p=_sample_weight)
ds = config["dataset"]
transforms = config["transforms"]
if isinstance(transforms, str):
while True:
try:
if not os.path.exists(transforms):
raise RuntimeError(
"transform script file {} not exists".format(transforms)
)
# load transform script
transform_func = self._ensure_transform_function(
_dataset_identity(config), transforms
)
seed = random.random()
break
except Exception as e:
print(e)
time.sleep(10)
def _transform(data: CPMBeeInputType):
r = random.Random(seed)
return transform_func(data, num_incontext, r)
transform = _transform
elif len(transforms) == 0:
transform = None
else:
transform = transforms[np.random.choice(len(transforms))]
raw_data = {}
while True:
inp = ds.read()
inp = self.apply_transform(inp, transform)
(
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel,
n_segments,
table_states,
) = self.data_to_id(inp)
if input_ids.shape[0] > self._max_length:
# too long
continue
input_ids = input_ids[: self._max_length]
context = context[: self._max_length]
segment_ids = segment_ids[: self._max_length]
raw_data["input"] = inp
raw_data["samples"] = []
break
sample_ids = np.zeros(input_ids.shape, dtype=np.int32)
segment_rel_offset = np.zeros(input_ids.shape, dtype=np.int32)
num_segments = np.full(input_ids.shape, n_segments, dtype=np.int32)
for i in range(num_incontext):
if input_ids.shape[0] >= self._max_length:
# early break
break
sample = ds.read()
sample = self.apply_transform(sample, transform)
(
sample_input_ids,
sample_id_subs,
_,
sample_segments,
sample_rel,
n_segments,
table_states,
) = self.data_to_id(sample, table_states)
if input_ids.shape[0] + sample_input_ids.shape[0] > self._max_length:
# too long, break
break
raw_data["samples"].append(sample)
input_ids = np.concatenate([input_ids, sample_input_ids], axis=0)
input_id_subs = np.concatenate([input_id_subs, sample_id_subs], axis=0)
context = np.concatenate(
[context, np.ones(sample_input_ids.shape, dtype=np.int8)], axis=0
)
segment_ids = np.concatenate([segment_ids, sample_segments], axis=0)
segment_rel_offset = np.concatenate(
[
segment_rel_offset,
np.full(sample_input_ids.shape, segment_rel.shape[0], dtype=np.int32),
],
axis=0,
)
segment_rel = np.concatenate([segment_rel, sample_rel], axis=0)
sample_ids = np.concatenate(
[sample_ids, np.full(sample_input_ids.shape, i + 1, dtype=np.int32)], axis=0
)
num_segments = np.concatenate(
[num_segments, np.full(sample_input_ids.shape, n_segments, dtype=np.int32)], axis=0
)
return (
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
raw_data,
)
def pack_batch(self, force: bool = False) -> CPMBeeBatch:
# pack batch
if len(self._inputs) < self._batch_size:
if not force:
raise RuntimeError("Batch insufficient")
batch_size = len(self._inputs)
else:
batch_size = self._batch_size
inputs = np.zeros((batch_size, self._max_length), dtype=np.int32)
inputs_sub = np.zeros((batch_size, self._max_length), dtype=np.int32)
context = np.zeros((batch_size, self._max_length), dtype=np.int8)
sample_ids = np.zeros((batch_size, self._max_length), dtype=np.int32)
segments = np.zeros((batch_size, self._max_length), dtype=np.int32)
num_segments = np.zeros((batch_size, self._max_length), dtype=np.int32)
segment_rel_offset = np.zeros((batch_size, self._max_length), dtype=np.int32)
tgt = np.full((batch_size, self._max_length), -100, dtype=np.int32)
max_rel = 0
for i in range(batch_size):
max_rel = max(max_rel, self._segment_rel[i].shape[0])
segment_rel = np.zeros((batch_size, max_rel), dtype=np.int32)
spans = np.zeros((batch_size, self._max_length), dtype=np.int32)
length = np.zeros((batch_size,), dtype=np.int32)
task_ids = np.zeros((batch_size, self._max_length), dtype=np.int32)
all_task_names: Set[str] = set()
for i in range(batch_size):
for task_name in self._task_ids[i]:
all_task_names.add(task_name)
task_names: List[str] = list(all_task_names)
task_name_to_id = {name: i for i, name in enumerate(task_names)}
batch_ext_table_map: Dict[Tuple[int, int], int] = {}
batch_ext_table_ids: List[int] = []
batch_ext_table_sub: List[int] = []
raw_data_list: List[Any] = []
for i in range(batch_size):
instance_length = self._inputs[i].shape[0]
rel_size = self._segment_rel[i].shape[0]
inputs[i, :instance_length] = self._inputs[i]
inputs_sub[i, :instance_length] = self._inputs_sub[i]
context[i, :instance_length] = self._context[i]
sample_ids[i, :instance_length] = self._sample_ids[i]
segments[i, :instance_length] = self._segments[i]
num_segments[i, :instance_length] = self._num_segments[i]
segment_rel_offset[i, :instance_length] = self._segment_rel_offset[i]
segment_rel[i, :rel_size] = self._segment_rel[i]
span_begin = 0
for span_id, (span_end, task_name) in enumerate(zip(self._spans[i], self._task_ids[i])):
spans[i, span_begin:span_end] = span_id
task_ids[i, span_begin:span_end] = task_name_to_id[task_name]
span_begin = span_end
length[i] = instance_length
raw_data_list.extend(self._raw_data[i])
for j in range(instance_length):
idx, idx_sub = self._inputs[i][j], self._inputs_sub[i][j]
tgt_idx = idx
if idx_sub > 0:
# need to be in ext table
if (idx, idx_sub) not in batch_ext_table_map:
batch_ext_table_map[(idx, idx_sub)] = len(batch_ext_table_map)
batch_ext_table_ids.append(idx)
batch_ext_table_sub.append(idx_sub)
tgt_idx = batch_ext_table_map[(idx, idx_sub)] + self.tokenizer.vocab_size
if j > 1 and context[i, j - 1] == 0:
if idx != self.tokenizer.bos_id:
tgt[i, j - 1] = tgt_idx
else:
tgt[i, j - 1] = self.tokenizer.eos_id
if context[i, instance_length - 1] == 0:
tgt[i, instance_length - 1] = self.tokenizer.eos_id
if len(batch_ext_table_map) == 0:
# placeholder
batch_ext_table_ids.append(0)
batch_ext_table_sub.append(1)
self._inputs = self._inputs[batch_size:]
self._inputs_sub = self._inputs_sub[batch_size:]
self._context = self._context[batch_size:]
self._sample_ids = self._sample_ids[batch_size:]
self._segments = self._segments[batch_size:]
self._num_segments = self._num_segments[batch_size:]
self._segment_rel_offset = self._segment_rel_offset[batch_size:]
self._segment_rel = self._segment_rel[batch_size:]
self._spans = self._spans[batch_size:]
self._task_ids = self._task_ids[batch_size:]
self._raw_data = self._raw_data[batch_size:]
return {
"inputs": inputs,
"inputs_sub": inputs_sub,
"length": length,
"context": context > 0,
"sample_ids": sample_ids,
"num_segments": num_segments,
"segment_ids": segments,
"segment_rel_offset": segment_rel_offset,
"segment_rel": segment_rel,
"spans": spans,
"target": tgt,
"ext_ids": np.array(batch_ext_table_ids, dtype=np.int32),
"ext_sub": np.array(batch_ext_table_sub, dtype=np.int32),
"task_ids": task_ids,
"task_names": task_names,
"raw_data": raw_data_list,
}
def add_data(self, config: _MixedDatasetConfig) -> Optional[CPMBeeBatch]:
(
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
raw_data,
) = self.build_instance(config)
# add to batch
best_fit: Union[None, int] = None
best_fit_space: Union[None, int] = None
for i in range(len(self._inputs)):
space = self._max_length - self._inputs[i].shape[0]
if input_ids.shape[0] <= space:
if best_fit_space is None:
best_fit = i
best_fit_space = space
elif best_fit_space > space:
best_fit = i
best_fit_space = space
if best_fit is None:
# add a new instance
self._inputs.append(input_ids)
self._inputs_sub.append(input_id_subs)
self._context.append(context)
self._sample_ids.append(sample_ids)
self._segments.append(segment_ids)
self._num_segments.append(num_segments)
self._segment_rel_offset.append(segment_rel_offset)
self._segment_rel.append(segment_rel)
self._spans.append([input_ids.shape[0]])
self._task_ids.append([config["task_name"]])
self._raw_data.append([raw_data])
else:
# add to existing instance
self._inputs[best_fit] = np.concatenate([self._inputs[best_fit], input_ids], axis=0)
self._inputs_sub[best_fit] = np.concatenate(
[self._inputs_sub[best_fit], input_id_subs], axis=0
)
self._context[best_fit] = np.concatenate([self._context[best_fit], context], axis=0)
self._sample_ids[best_fit] = np.concatenate(
[self._sample_ids[best_fit], sample_ids], axis=0
)
self._segments[best_fit] = np.concatenate(
[self._segments[best_fit], segment_ids], axis=0
)
self._num_segments[best_fit] = np.concatenate(
[self._num_segments[best_fit], num_segments], axis=0
)
self._segment_rel_offset[best_fit] = np.concatenate(
[
self._segment_rel_offset[best_fit],
segment_rel_offset + self._segment_rel[best_fit].shape[0],
],
axis=0,
)
self._segment_rel[best_fit] = np.concatenate(
[self._segment_rel[best_fit], segment_rel], axis=0
)
self._spans[best_fit].append(self._inputs[best_fit].shape[0])
self._task_ids[best_fit].append(config["task_name"])
self._raw_data[best_fit].append(raw_data)
if len(self._inputs) > self._batch_size:
return self.pack_batch()
else:
# not ready
return None
class _MixedDatasetConfigMananger:
def __init__(self, config_path: str) -> None:
self._config_path: str = config_path
self._config: Union[List[_MixedDatasetConfig], None] = None
self._last_m = 0
def changed(self):
while True:
try:
m_time = os.stat(self._config_path).st_mtime
if m_time > self._last_m:
# try to load new config
try:
self._config = load_dataset_config(self._config_path)
except Exception as e:
# failed to load config
print(
"Error: load new config in changed, "
"self._config_path={path}, err={err}"
.format(path=self._config_path, err=str(e))
)
return False
# new config loaded
self._last_m = m_time
return True
return False
except Exception as e:
print("Error: reading info list in _MixedDatasetConfigMananger.changed!, "
"self._config_path={path}, err={err}"
.format(path=self._config_path, err=str(e)))
time.sleep(30)
def get_config(self) -> List[_MixedDatasetConfig]:
if self._config is None:
if not self.changed():
raise RuntimeError("Failed to load config")
if self._config is None:
raise RuntimeError("Failed to load config")
return self._config
def _mixed_dataset_process(
config_path: str,
q_cmd: multiprocessing.Queue,
q_cmd_out: multiprocessing.Queue,
q_data: multiprocessing.Queue,
rank: int,
world_size: int,
packer: _MixedDatasetBatchPacker,
):
# ignore SIGINT
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
config_base_path = os.path.dirname(os.path.abspath(config_path))
def _convert_to_abs_path(transform_path: str):
if transform_path.startswith("/"):
return transform_path
else:
return os.path.join(config_base_path, transform_path)
def _build_sample_weights(config: List[_MixedDatasetConfig]):
if len(config) == 0:
return np.array([], dtype=np.float32)
weights = [c["weight"] * c["lines"] for c in config]
weights = np.array(weights, dtype=np.float32)
sm_weight = weights.sum()
if sm_weight > 0:
weights = weights / sm_weight
return weights
else:
raise RuntimeError("Empty datasets")
cfg_mgr = _MixedDatasetConfigMananger(config_path)
config = cfg_mgr.get_config()
for c in config:
ds = DistributedDataset(
_convert_to_abs_path(c["path"]),
rank,
world_size,
)
c["lines"] = ds._nlines
c["dataset"] = ds
if "weight" not in c:
c["weight"] = 1.0
if "transforms" not in c:
c["transforms"] = []
elif isinstance(c["transforms"], str):
c["transforms"] = _convert_to_abs_path(c["transforms"])
if "incontext_weight" not in c:
c["incontext_weight"] = [1.0]
weights = _build_sample_weights(config)
should_stop = False
should_start = False
while not should_stop:
# update config first
if cfg_mgr.changed():
path_ds_map: Dict[str, _MixedDatasetConfig] = {}
nw_path_set: Set[str] = set()
# load new config
nw_config = cfg_mgr.get_config()
# build path -> dataset map
for c in config:
path_ds_map[_dataset_identity(c)] = c
# add new datasets
for c in nw_config:
if _dataset_identity(c) in path_ds_map:
# update values only
if "weight" in c:
path_ds_map[_dataset_identity(c)]["weight"] = c["weight"]
if "transform" in c:
if isinstance(c["transforms"], str):
path_ds_map[_dataset_identity(c)]["transforms"] = _convert_to_abs_path(
c["transforms"]
)
else:
path_ds_map[_dataset_identity(c)]["transforms"] = c["transforms"]
if "incontext_weight" in c:
path_ds_map[_dataset_identity(c)]["incontext_weight"] = c[
"incontext_weight"
]
else:
# new dataset
ds = DistributedDataset(
_convert_to_abs_path(c["path"]),
rank,
world_size,
)
c["lines"] = ds._nlines
c["dataset"] = ds
if "weight" not in c:
c["weight"] = 1.0
if "transforms" not in c:
c["transforms"] = []
elif isinstance(c["transforms"], str):
c["transforms"] = _convert_to_abs_path(c["transforms"])
if "incontext_weight" not in c:
c["incontext_weight"] = [1.0]
path_ds_map[_dataset_identity(c)] = c
nw_path_set.add(_dataset_identity(c))
# remove unused datasets
for c in config:
if _dataset_identity(c) not in nw_path_set:
del path_ds_map[_dataset_identity(c)]
config: List[_MixedDatasetConfig] = []
for c in nw_config:
config.append(path_ds_map[_dataset_identity(c)])
del path_ds_map
del nw_path_set
del nw_config
weights = _build_sample_weights(config)
# get cmds
while True:
try:
cmd = q_cmd.get_nowait()
except Empty:
break
if cmd == "stop":
should_stop = True
q_cmd_out.put(True)
break
elif cmd == "state_dict":
ret = OrderedDict()
for c in config:
ds_name = _dataset_identity(c)
ret[ds_name] = c["dataset"]._state_dict()
q_cmd_out.put(ret)
elif cmd == "load_state_dict":
state_dict = q_cmd.get()
missing = []
for c in config:
ds_name = _dataset_identity(c)
if ds_name in state_dict:
c["dataset"].load_state_dict(state_dict[ds_name], strict=False)
else:
# new dataset
missing.append(ds_name)
q_cmd_out.put(missing)
elif cmd == "start":
should_start = True
q_cmd_out.put(True)
else:
raise RuntimeError("Unknown command: {}".format(cmd))
if should_stop:
break
if not should_start:
# wait for start cmd
time.sleep(1)
continue
if len(config) == 0:
# no dataset available
time.sleep(1)
continue
if q_data.full():
# queue full
time.sleep(1)
continue
# sample a dataset
ds_id: int = 0
while True:
ds_id = np.random.choice(weights.shape[0], p=weights)
if config[ds_id]["dataset"]._nlines != config[ds_id]["lines"]:
# dataset size changed
for c in config:
c["lines"] = c["dataset"]._nlines
weights = _build_sample_weights(config)
continue
else:
break
batch = packer.add_data(config[ds_id])
if batch is not None:
# new batch comming
q_data.put(batch)
# clean queue
while True:
try:
q_data.get_nowait()
except Empty:
break
class MixedDataset:
def __init__(
self,
config_path: str,
batch_size: int,
max_length: int,
tokenizer: CPMBeeTokenizer,
max_depth: int = 16,
) -> None:
self._q_cmd = multiprocessing.Queue()
self._q_cmd_out = multiprocessing.Queue()
self._q_data = multiprocessing.Queue(maxsize=1)
self._packer = _MixedDatasetBatchPacker(batch_size, max_length, tokenizer, max_depth)
self._p = multiprocessing.Process(
target=_mixed_dataset_process,
args=(
config_path,
self._q_cmd,
self._q_cmd_out,
self._q_data,
bmt.rank(),
bmt.world_size(),
self._packer,
),
)
self._p.start()
self._closed = False
def close(self):
if not self._closed:
self._closed = True
self._q_cmd.put("stop")
assert self._q_cmd_out.get(), "Failed to stop process"
self._p.join()
@property
def closed(self):
return self._closed
def start(self):
self._q_cmd.put("start")
return self._q_cmd_out.get()
def state_dict(self):
self._q_cmd.put("state_dict")
states = self._q_cmd_out.get()
if not isinstance(states, OrderedDict):
raise RuntimeError("Invalid state dict {}".format(states))
if bmt.world_size() == 1:
for val in states.values():
val["states"].unsqueeze_(0)
val["block"].unsqueeze_(0)
return states
ret = OrderedDict()
for k, v in states.items():
num_unused_block = v["states"].size(0)
gpu_num_unused_block = torch.tensor([num_unused_block], dtype=torch.long).cuda()
max_unused_blocks = (
bmt.distributed.all_reduce(gpu_num_unused_block, op="max").cpu().item()
)
if max_unused_blocks == 0:
max_unused_blocks = 1
gpu_states = torch.full((max_unused_blocks,), -1, dtype=torch.long).cuda()
gpu_states[:num_unused_block] = v["states"].cuda()
gpu_block = v["block"].cuda()
global_states = bmt.distributed.all_gather(
gpu_states
).cpu() # (world_size, max_unused_blocks)
global_block = bmt.distributed.all_gather(gpu_block).cpu() # (world_size, 4)
ret[k] = {"states": global_states, "block": global_block}
return ret
def load_state_dict(self, data: OrderedDict, strict: bool = False):
self._q_cmd.put("load_state_dict")
self._q_cmd.put(data)
missing = self._q_cmd_out.get()
if strict:
if len(missing) > 0:
raise RuntimeError("Missing dataset state: {}".format(missing))
return missing
def get(self) -> CPMBeeBatch:
ret: CPMBeeBatch = self._q_data.get() # type: ignore
if not isinstance(ret, dict):
raise RuntimeError("Invalid data {}".format(ret))
return ret
def __iter__(self):
while True:
yield self.get()
def __del__(self):
if not self.closed:
try:
self.close()
except Exception:
pass
| 38,408 | 35.860845 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/embedding.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import math
import torch.nn.functional as F
from .position_embedding import RotaryEmbedding
from typing import Optional
class Embedding(torch.nn.Module):
def __init__(
self,
vocab_size: int,
embedding_size: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
):
super().__init__()
self.dim_model = embedding_size
self.weight = torch.nn.parameter.Parameter(
torch.empty(vocab_size, embedding_size, dtype=dtype)
)
def forward(self, ids: torch.Tensor):
"""
Args:
ids (:obj:`torch.Tensor` of shape ``(batch_size, seq_len)``): Indices of input sequence tokens.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, embedding_size)``: The embedding output.
""" # noqa: E501
embeds = F.embedding(ids, self.weight) / math.sqrt(self.dim_model)
return embeds
def projection(self, x: torch.Tensor):
"""
Projection based on embedding's weight. For example, embedding map vocab_size to embed_size, than projection map embed_size back to vocab_size.
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_model)``): Input of projection
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, vocab_output_size)``: The projection output.
""" # noqa: E501
logits = F.linear(x / math.sqrt(self.dim_model), self.weight)
return logits
class EmbeddingExt(torch.nn.Module):
def __init__(
self,
vocab_size: int,
embedding_size: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
distance_scale: int = 16,
):
super().__init__()
self.dim_model = embedding_size
self.rotary_emb = RotaryEmbedding(
dim=embedding_size, distance_scale=distance_scale, dtype=dtype
)
self.weight = torch.nn.parameter.Parameter(
torch.empty(vocab_size, embedding_size, dtype=dtype),
)
def forward(self, ids: torch.Tensor, ids_sub: torch.Tensor):
"""
Args:
ids (:obj:`torch.Tensor` of shape ``(batch_size, seq_len)``): Indices of input sequence tokens.
ids (:obj:`torch.Tensor` of shape ``(batch_size)``): Subscript of input sequence tokens.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, embedding_size)``: The embedding output.
""" # noqa: E501
embeds = F.embedding(ids, self.weight) / math.sqrt(self.dim_model)
return self.rotary_emb(embeds, ids_sub)
def projection(self, x: torch.Tensor, ext_table: Optional[torch.Tensor] = None):
"""
Projection based on embedding's weight. For example, embedding map vocab_size to embed_size, than projection map embed_size back to vocab_size.
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_model)``): Input of projection
ext_table (:obj:`torch.Tensor` of shape ``(ext_table_size, dim_model)``): Ext vocab table.
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, vocab_size + ext_table_size)``: The projection output.
""" # noqa: E501
logits = F.linear(x / math.sqrt(self.dim_model), self.weight)
if ext_table is not None:
logits_ext = F.linear(x, ext_table)
logits = torch.cat([logits, logits_ext], dim=-1)
return logits
| 4,165 | 36.531532 | 151 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/position_embedding.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Union
import torch
import torch.nn.functional as F
class SegmentPositionEmbedding(torch.nn.Module):
def __init__(
self,
num_heads,
num_segments=1,
num_buckets=32,
max_distance=128,
bidirectional=False,
dtype=torch.half,
init_mean: float = 0.0,
init_std: float = 1,
):
super().__init__()
self.num_heads = num_heads
self.num_buckets = num_buckets
self.max_distance = max_distance
self.bidirectional = bidirectional
self.num_segments = num_segments
self.relative_attention_bias = torch.nn.parameter.Parameter(
torch.empty(num_segments * num_segments + num_buckets, num_heads, dtype=dtype)
)
def forward(
self,
key_pos: torch.Tensor,
query_pos: torch.Tensor,
key_segment: torch.Tensor,
query_segment: torch.Tensor,
):
with torch.no_grad():
batch = key_pos.size(0)
keylen = key_pos.size(1)
querylen = query_pos.size(1)
assert key_pos.size(0) == query_pos.size(0)
assert keylen == key_segment.size(1) and querylen == query_segment.size(1)
key_pos = key_pos.view(batch, -1, keylen)
query_pos = query_pos.view(batch, querylen, -1)
key_segment = key_segment.view(batch, -1, keylen)
query_segment = query_segment.view(batch, querylen, -1)
relative_position_bucket = self._segment_relative_position_bucket(
query_segment, key_segment
)
relative_position_bucket = relative_position_bucket + self.num_buckets # 与相对位置编码区间不重叠
# b*q*k
absolute_position_bucket = self._position_bucket(
torch.arange(keylen, dtype=torch.int32, device=relative_position_bucket.device)[
None, :
]
- torch.arange(querylen, dtype=torch.int32, device=relative_position_bucket.device)[
:, None
],
bidirectional=self.bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_position_bucket = torch.where(
(key_segment == query_segment),
absolute_position_bucket[None, :, :],
relative_position_bucket,
)
# (batch, len_q, len_k)
# (batch, len_q, len_k, num_heads)
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
# (batch, num_heads, len_q, len_k)
embeds = embeds.permute(0, 3, 1, 2).contiguous()
return embeds
def _segment_relative_position_bucket(self, query_segment, key_segment):
return query_segment * self.num_segments + key_segment
def _position_bucket(
self, relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.int32)
relative_postion_if_large = torch.min(
relative_postion_if_large,
torch.full_like(relative_postion_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position.to(torch.int32), relative_postion_if_large
)
return relative_buckets
class BucketPositionBias(torch.nn.Module):
def __init__(
self,
num_heads: int,
num_buckets: int = 32,
num_segment_bucket: int = 32,
max_distance: int = 128,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
) -> None:
super().__init__()
self.num_heads = num_heads
self.num_buckets = num_buckets
self.num_segment_bucket = num_segment_bucket
self.max_distance = max_distance
self.relative_attention_bias = torch.nn.parameter.Parameter(
torch.empty(num_buckets + num_segment_bucket, num_heads, dtype=dtype)
)
def forward(
self,
query_pos: torch.Tensor, # (batch, len_q)
key_pos: torch.Tensor, # (batch, len_k)
rel_buckets: torch.Tensor, # (batch, len_q, len_k)
):
with torch.no_grad():
batch = key_pos.size(0)
keylen = key_pos.size(1)
querylen = query_pos.size(1)
assert key_pos.size(0) == query_pos.size(0)
assert (
rel_buckets.size(0) == batch
and rel_buckets.size(1) == querylen
and rel_buckets.size(2) == keylen
)
relative_position_bucket = rel_buckets - 1 + self.num_buckets # 与相对位置编码区间不重叠
# b*q*k
inner_segment_bucket = self._position_bucket(
key_pos[..., None, :] - query_pos[..., :, None],
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_position_bucket = torch.where(
rel_buckets == 0,
inner_segment_bucket,
relative_position_bucket,
)
# (batch, len_q, len_k)
# (batch, len_q, len_k, num_heads)
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
# (batch, num_heads, len_q, len_k)
embeds = embeds.permute(0, 3, 1, 2).contiguous()
return embeds
def _position_bucket(self, relative_position, num_buckets=32, max_distance=128):
relative_buckets = 0
num_buckets //= 2
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
relative_position = torch.abs(relative_position)
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.int32)
relative_postion_if_large = torch.min(
relative_postion_if_large,
torch.full_like(relative_postion_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position.to(torch.int32), relative_postion_if_large
)
return relative_buckets
class RotaryEmbedding(torch.nn.Module):
def __init__(
self,
dim,
base=10000,
distance_scale: Union[int, float] = 1,
dtype: torch.dtype = torch.half,
):
super().__init__()
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, device="cuda", dtype=torch.float32) / dim)
)
inv_freq = inv_freq.to(dtype)
self.distance_scale = distance_scale
self.dtype = dtype
self.inv_freq = inv_freq
def forward(self, x: torch.Tensor, x_pos: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(..., dim)``): Inputs.
x_pos (:obj:`torch.Tensor` of shape ``(...)``): Positions of inputs.
"""
x_pos = x_pos * self.distance_scale
freqs = x_pos[..., None].to(self.dtype) * self.inv_freq[None, :] # (..., dim/2)
# the same implementation as sat
emb = torch.cat((freqs, freqs), dim=-1) # (..., dim)
emb_cos = emb.cos() # (..., dim)
emb_sin = emb.sin() # (..., dim)
rotate_x = torch.cat(
[-x[..., x.size(-1) // 2 :], x[..., : x.size(-1) // 2]], dim=-1
) # (..., dim)
return x * emb_cos + rotate_x * emb_sin
| 8,848 | 34.681452 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/feedforward.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from .linear import Linear
class DenseGatedACT(torch.nn.Module):
def __init__(
self,
dim_in: int,
dim_ff: int,
dtype=torch.half,
):
super().__init__()
self.w_0 = Linear(
dim_in=dim_in,
dim_out=dim_ff,
dtype=dtype,
scale_before=False,
)
self.w_1 = Linear(
dim_in=dim_in,
dim_out=dim_ff,
dtype=dtype,
scale_before=False,
)
self.act = torch.nn.GELU()
def forward(self, x: torch.Tensor):
"""Transform an input tensor from one feature space to another via a nonlinear operation
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): Tensor that will be subject to nonlinear operations.
Return:
out (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_ff)``)
""" # noqa: E501
gate_score = self.act(self.w_0(x))
x = self.w_1(x)
x = gate_score * x
return x
class FeedForward(torch.nn.Module):
r"""FeedForward module
Args:
dim_in (int): input dimension.
dim_ff (int): middle dimension.
dim_out (int, optional): output dimension. Defaults to None, which means dim_in = dim_out.
dtype (optional): Defaults to torch.half.
init_mean (float, optional): mean of :math:`\mathbf{W}\sim\mathcal{N}(\text{mean}, \text{std}^2)` for fully-connected module used in feed-forward layer. Defaults to 0.
init_std (float, optional): std of :math:`\mathbf{W}\sim\mathcal{N}(\text{mean}, \text{std}^2)` for fully-connected module used in feed-forward layer. Defaults to 0.02.
bias (bool, optional): whether to use bias term in fully-connected layers used in feed-forward module. Defaults to False.
activate_fn (str, optional): Defaults to `gated_gelu`.
dropout_p (int, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
dtype=torch.half,
dropout_p: Optional[float] = None,
):
super().__init__()
self.w_in = DenseGatedACT(
dim_in=dim_model,
dim_ff=dim_ff,
dtype=dtype,
)
if dropout_p is not None:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
self.w_out = Linear(
dim_in=dim_ff,
dim_out=dim_model,
dtype=dtype,
scale_before=False,
)
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): The input of feed-forward module.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_out)``: The output of feed-forward module.
""" # noqa: E501
x = self.w_in(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.w_out(x)
return x
| 3,676 | 29.38843 | 176 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/layernorm.py | import torch
@torch.jit.script # type: ignore
def rms_layernorm(hidden: torch.Tensor, weight: torch.Tensor, eps: float):
old_dtype = hidden.dtype
variance = hidden.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)
hidden = (hidden * torch.rsqrt(variance + eps)).to(old_dtype)
return hidden * weight
class LayerNorm(torch.nn.Module):
"""RMS LayerNorm"""
def __init__(
self,
dim_norm: int,
dtype: torch.dtype = torch.half,
eps: float = 1e-6,
init_var: float = 1.0,
):
super().__init__()
self.eps = eps
self.dim_norm = dim_norm
self.weight = torch.nn.parameter.Parameter(torch.full((dim_norm,), init_var, dtype=dtype))
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch_size, seq_len, dim_norm)``): Input tensor that need to be normalized.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, dim_norm)``: The layernorm output.
""" # noqa: E501
assert x.size(-1) == self.dim_norm
return rms_layernorm(x, self.weight, self.eps)
| 1,156 | 29.447368 | 122 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/linear.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import math
import torch.nn.functional as F
class Linear(torch.nn.Module):
def __init__(
self,
dim_in: int,
dim_out: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
scale_before: bool = False,
):
super().__init__()
self.dim_in = self.in_features = dim_in
self.dim_out = self.out_features = dim_out
self.scale_before = scale_before
self.weight = torch.nn.parameter.Parameter(torch.empty((dim_out, dim_in), dtype=dtype))
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): The input of linear layer
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_out)``: The output of the linear transform y.
""" # noqa: E501
if self.scale_before:
x = x / math.sqrt(self.dim_in)
x = F.linear(x, self.weight)
else:
x = F.linear(x, self.weight)
x = x / math.sqrt(self.dim_in)
return x
| 1,721 | 32.115385 | 109 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/transformer.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Optional, List, Tuple
from .blocks import TransformerBlock
from .layernorm import LayerNorm
class Encoder(torch.nn.Module):
"""Layers of encoder transformer blocks plus an final layernorm.
Args:
num_layers (int): number of layers.
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-6.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
num_layers: int,
dim_model: int,
dim_ff: int,
num_heads: int,
dim_head: int,
dtype: torch.dtype = torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
mask_modules: Optional[List[Tuple[bool, bool]]] = None,
):
super().__init__()
self.num_layers = num_layers
if mask_modules is not None:
assert (
len(mask_modules) == num_layers
), "The total number of masks should equal to num_layers"
for mask_module in mask_modules:
assert (
len(mask_module) == 2
), "For encoder, each mask should be (mask_att, mask_ffn)"
else:
mask_modules = [(False, False)] * num_layers
self.layers = torch.nn.ModuleList(
[
TransformerBlock(
dim_model=dim_model,
dim_ff=dim_ff,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
mask_att=mask_modules[ith][0],
mask_ffn=mask_modules[ith][1],
)
for ith in range(num_layers)
]
)
self.output_layernorm = LayerNorm(dim_norm=dim_model, dtype=dtype, eps=eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_bias: torch.Tensor,
use_cache: bool = False,
past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
):
"""
Args:
hidden-states (:obj:`torch.Tensor` of shape ``(batch, seq_enc, dim_model)``): Input of encoder, might be the embedding of a batch of sequences.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_enc, seq_enc)``): Avoid invalid areas to participate in the calculation
position_bias(:obj:`torch.Tensor` of shape ``(num_heads, seq_enc, seq_enc)``) Provides position information to attention mechanism.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_enc, dim_model)``: The encoder output.
""" # noqa: E501
if not use_cache:
for layer in self.layers:
hidden_states = layer(hidden_states, attention_mask, position_bias)
hidden_states = self.output_layernorm(hidden_states)
return hidden_states
else:
with torch.no_grad():
current_key_values = []
for i, module in enumerate(self.layers):
hidden_states = module(
hidden_states,
attention_mask,
position_bias,
past_key_value=past_key_values[i] if past_key_values else None,
use_cache=use_cache,
)
if use_cache:
current_key_values.append(hidden_states[1])
hidden_states = hidden_states[0]
hidden_states = self.output_layernorm(hidden_states)
if use_cache:
return hidden_states, current_key_values
else:
return hidden_states
| 4,852 | 37.515873 | 155 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/attention.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
import math
from .linear import Linear
class Attention(torch.nn.Module):
def __init__(
self,
dim_model: int,
num_heads: int,
dim_head: int,
dtype: torch.dtype = torch.half,
dropout_p: Optional[float] = None,
) -> None:
super().__init__()
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.project_q = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.project_k = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.project_v = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.attention_out = Linear(self.num_heads * self.dim_head, self.dim_model, dtype=dtype)
self.softmax = torch.nn.Softmax(dim=-1)
if dropout_p is not None:
self.dropout = torch.nn.Dropout(p=dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_q: torch.Tensor,
hidden_kv: torch.Tensor,
attention_mask: torch.BoolTensor,
position_bias: torch.Tensor,
use_cache: bool = False,
past_kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
hidden_q (:obj:`torch.Tensor` of shape ``(batch, len_q, dim_model)``): Indices of input sequence tokens. It will be embedded by model's internal embedding lookup matrix.
hidden_kv (:obj:`torch.Tensor` of shape ``(batch, len_k, dim_model)``): Length of input sequence before padding.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, len_q, len_k)``): Used to avoid performing attention on padding token indices.
position_bias(:obj:`torch.Tensor` of shape ``(num_heads, len_q, len_k)`` or ``(1, num_heads, len_k, len_q)``): Provide positional information about tensor `key_value` and `query`.
Return:
out (:obj:`torch.Tensor` of shape ``(batch, len_q, dim_model)``): The attention output.
""" # noqa: E501
batch_size = hidden_q.size(0)
len_q = hidden_q.size(1)
len_k = hidden_kv.size(1)
h_q = self.project_q(hidden_q)
h_k = self.project_k(hidden_kv)
h_v = self.project_v(hidden_kv)
h_q = h_q.view(batch_size, len_q, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
h_k = h_k.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
h_v = h_v.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
if past_kv is not None:
h_k = torch.cat([past_kv[0], h_k], dim=-2)
h_v = torch.cat([past_kv[1], h_v], dim=-2)
len_k = h_k.size(-2)
# (b, n_h, len_q, d_h) @ (b, n_h, d_h, len_k) -> (b, n_h, len_q, len_k)
score = torch.matmul(h_q, h_k.transpose(-1, -2)) / math.sqrt(self.dim_head)
score = score + position_bias
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == False,
torch.scalar_tensor(float("-inf"), device=score.device, dtype=score.dtype),
)
score = self.softmax(score)
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == False,
torch.scalar_tensor(0, device=score.device, dtype=score.dtype),
)
if self.dropout is not None:
score = self.dropout(score)
# (b, n_h, len_q, len_k) @ (b, n_h, len_k, d_h) -> (b, n_h, len_q, d_h)
score = torch.matmul(score, h_v)
score = score.view(batch_size, self.num_heads, len_q, self.dim_head).permute(0, 2, 1, 3)
score = score.contiguous().view(batch_size, len_q, self.num_heads * self.dim_head)
score = self.attention_out(score)
if use_cache:
return score, (h_k, h_v)
else:
return score
| 4,604 | 38.025424 | 191 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/native_layers/blocks.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from .layernorm import LayerNorm
from .attention import Attention
from .feedforward import FeedForward
class SelfAttentionBlock(torch.nn.Module):
"""The whole cross-attention block. A sequence of operation. Consists of layernorm, self-attention and residual connection.
Args:
dim_model (int): main dimension of modules in transformer blocks.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
num_heads: int,
dim_head: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
):
super().__init__()
self.layernorm_before_attention = LayerNorm(
dim_model,
dtype=dtype,
eps=eps,
)
self.self_attention = Attention(
dim_model=dim_model,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
dropout_p=dropout_p,
)
if dropout_p:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_bias: Optional[torch.Tensor] = None,
use_cache: bool = False,
past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Input of self-attention block. It can be the embedding of a batch of sequences.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_self, seq_self)``): Avoid invalid areas to participate in the calculation.
position_bias (:obj:`torch.Tensor` of shape ``(num_heads, seq_self, seq_self)``): Provide positional information to self-attention block.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of attention block.
""" # noqa: E501
x = self.layernorm_before_attention(hidden_states)
x = self.self_attention(x, x, attention_mask, position_bias, use_cache, past_key_value)
if use_cache:
x, current_key_value = x
else:
current_key_value = None
if self.dropout is not None:
x = self.dropout(x)
hidden_states = (hidden_states + x) / 1.05
if use_cache:
return hidden_states, current_key_value
else:
return hidden_states
class FFNBlock(torch.nn.Module):
"""The whole feed-forward block. A sequence of operation. Consists of layernorm, feed-forward and residual connection.
Args:
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = 0,
):
super().__init__()
self.layernorm_before_ffn = LayerNorm(
dim_model,
dtype=dtype,
eps=eps,
)
self.ffn = FeedForward(
dim_model,
dim_ff,
dtype=dtype,
dropout_p=dropout_p,
)
if dropout_p:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
):
"""
Args:
hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Hidden states before feed forward layer.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of feed-forward block
""" # noqa: E501
x = self.layernorm_before_ffn(hidden_states)
x = self.ffn(x)
if self.dropout is not None:
x = self.dropout(x)
hidden_states = (hidden_states + x) / 1.05
return hidden_states
class TransformerBlock(torch.nn.Module):
"""The whole transformer block. A sequence of operation. Consists of self-attention block[, cross-attention block] and feed-forward block.
Args:
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
num_heads: int,
dim_head: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
mask_att: bool = False,
mask_ffn: bool = False,
):
super().__init__()
self.mask_att = mask_att
self.mask_ffn = mask_ffn
if not self.mask_att:
self.self_att = SelfAttentionBlock(
dim_model=dim_model,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
)
if not self.mask_ffn:
self.ffn = FFNBlock(
dim_model=dim_model,
dim_ff=dim_ff,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
)
def forward(
self,
self_hidden_states: torch.Tensor,
self_attention_mask: torch.Tensor,
self_position_bias: Optional[torch.Tensor] = None,
use_cache: bool = False,
past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
self_hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.
self_attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_self, seq_self)``): Avoid invalid areas to participate in the calculation of self-attention.
self_position_bias (:obj:`torch.Tensor` of shape ``(num_heads, seq_self, seq_self)``): Provide positional information to self-attention block.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of transformer block.
""" # noqa: E501
# (batch, dim_model, seq_self)
current_key_value = None
if not self.mask_att:
hidden_states = self.self_att(
self_hidden_states,
attention_mask=self_attention_mask,
position_bias=self_position_bias,
use_cache=use_cache,
past_key_value=past_key_value,
)
if use_cache:
hidden_states, current_key_value = hidden_states
else:
hidden_states = self_hidden_states
# (batch, dim_model, seq_self)
if not self.mask_ffn:
hidden_states = self.ffn(hidden_states)
if use_cache:
return hidden_states, current_key_value
else:
return hidden_states
| 8,723 | 34.036145 | 198 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/embedding.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import bmtrain as bmt
import math
import torch.nn.functional as F
from .position_embedding import RotaryEmbedding
class Embedding(bmt.DistributedModule):
def __init__(
self,
vocab_size: int,
embedding_size: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
):
super().__init__()
self.dim_model = embedding_size
self.weight = bmt.DistributedParameter(
torch.empty(vocab_size, embedding_size, dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(self, ids: torch.Tensor):
"""
Args:
ids (:obj:`torch.Tensor` of shape ``(batch_size, seq_len)``): Indices of input sequence tokens.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, embedding_size)``: The embedding output.
""" # noqa: E501
embeds = F.embedding(ids, self.weight) / math.sqrt(self.dim_model)
return embeds
def projection(self, x: torch.Tensor):
"""
Projection based on embedding's weight. For example, embedding map vocab_size to embed_size, than projection map embed_size back to vocab_size.
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_model)``): Input of projection
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, vocab_output_size)``: The projection output.
""" # noqa: E501
logits = F.linear(x / math.sqrt(self.dim_model), self.weight)
return logits
class EmbeddingExt(bmt.DistributedModule):
def __init__(
self,
vocab_size: int,
embedding_size: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
distance_scale: int = 16,
):
super().__init__()
self.dim_model = embedding_size
self.rotary_emb = RotaryEmbedding(
dim=embedding_size, distance_scale=distance_scale, dtype=dtype
)
self.weight = bmt.DistributedParameter(
torch.empty(vocab_size, embedding_size, dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(self, ids: torch.Tensor, ids_sub: torch.Tensor):
"""
Args:
ids (:obj:`torch.Tensor` of shape ``(batch_size, seq_len)``): Indices of input sequence tokens.
ids (:obj:`torch.Tensor` of shape ``(batch_size)``): Subscript of input sequence tokens.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, embedding_size)``: The embedding output.
""" # noqa: E501
embeds = F.embedding(ids, self.weight) / math.sqrt(self.dim_model)
return self.rotary_emb(embeds, ids_sub)
def projection(self, x: torch.Tensor, ext_table: Optional[torch.Tensor] = None):
"""
Projection based on embedding's weight. For example, embedding map vocab_size to embed_size, than projection map embed_size back to vocab_size.
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_model)``): Input of projection
ext_table (:obj:`torch.Tensor` of shape ``(ext_table_size, dim_model)``): Ext vocab table.
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, vocab_size + ext_table_size)``: The projection output.
""" # noqa: E501
logits = F.linear(x / math.sqrt(self.dim_model), self.weight)
if ext_table is not None:
logits_ext = F.linear(x, ext_table)
logits = torch.cat([logits, logits_ext], dim=-1)
return logits
| 4,458 | 36.788136 | 151 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/position_embedding.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Union
import torch
import bmtrain as bmt
import torch.nn.functional as F
class SegmentPositionEmbedding(bmt.DistributedModule):
def __init__(
self,
num_heads: int,
num_segments: int = 1,
num_buckets: int = 32,
max_distance: int = 128,
bidirectional: bool = False,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
):
super().__init__()
self.num_heads = num_heads
self.num_buckets = num_buckets
self.max_distance = max_distance
self.bidirectional = bidirectional
self.num_segments = num_segments
self.relative_attention_bias = bmt.DistributedParameter(
torch.empty(num_segments * num_segments + num_buckets, num_heads, dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(
self,
key_pos: torch.Tensor,
query_pos: torch.Tensor,
key_segment: torch.Tensor,
query_segment: torch.Tensor,
):
with torch.no_grad():
batch = key_pos.size(0)
keylen = key_pos.size(1)
querylen = query_pos.size(1)
assert key_pos.size(0) == query_pos.size(0)
assert keylen == key_segment.size(1) and querylen == query_segment.size(1)
key_pos = key_pos.view(batch, -1, keylen)
query_pos = query_pos.view(batch, querylen, -1)
key_segment = key_segment.view(batch, -1, keylen)
query_segment = query_segment.view(batch, querylen, -1)
relative_position_bucket = self._segment_relative_position_bucket(
query_segment, key_segment
)
relative_position_bucket = relative_position_bucket + self.num_buckets # 与相对位置编码区间不重叠
# b*q*k
absolute_position_bucket = self._position_bucket(
torch.arange(keylen, dtype=torch.int32, device=relative_position_bucket.device)[
None, :
]
- torch.arange(querylen, dtype=torch.int32, device=relative_position_bucket.device)[
:, None
],
bidirectional=self.bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_position_bucket = torch.where(
(key_segment == query_segment),
absolute_position_bucket[None, :, :],
relative_position_bucket,
)
# (batch, len_q, len_k)
# (batch, len_q, len_k, num_heads)
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
# (batch, num_heads, len_q, len_k)
embeds = embeds.permute(0, 3, 1, 2).contiguous()
return embeds
def _segment_relative_position_bucket(self, query_segment, key_segment):
return query_segment * self.num_segments + key_segment
def _position_bucket(
self, relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.int32)
relative_postion_if_large = torch.min(
relative_postion_if_large,
torch.full_like(relative_postion_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position.to(torch.int32), relative_postion_if_large
)
return relative_buckets
class BucketPositionBias(bmt.DistributedModule):
def __init__(
self,
num_heads: int,
num_buckets: int = 32,
num_segment_bucket: int = 32,
max_distance: int = 128,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
) -> None:
super().__init__()
self.num_heads = num_heads
self.num_buckets = num_buckets
self.num_segment_bucket = num_segment_bucket
self.max_distance = max_distance
self.relative_attention_bias = bmt.DistributedParameter(
torch.empty(num_buckets + num_segment_bucket, num_heads, dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(
self,
query_pos: torch.Tensor, # (batch, len_q)
key_pos: torch.Tensor, # (batch, len_k)
rel_buckets: torch.Tensor, # (batch, len_q, len_k)
):
with torch.no_grad():
batch = key_pos.size(0)
keylen = key_pos.size(1)
querylen = query_pos.size(1)
assert key_pos.size(0) == query_pos.size(0)
assert (
rel_buckets.size(0) == batch
and rel_buckets.size(1) == querylen
and rel_buckets.size(2) == keylen
)
relative_position_bucket = rel_buckets - 1 + self.num_buckets # 与相对位置编码区间不重叠
# b*q*k
inner_segment_bucket = self._position_bucket(
key_pos[..., None, :] - query_pos[..., :, None],
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_position_bucket = torch.where(
rel_buckets == 0,
inner_segment_bucket,
relative_position_bucket,
)
# (batch, len_q, len_k)
# (batch, len_q, len_k, num_heads)
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
# (batch, num_heads, len_q, len_k)
embeds = embeds.permute(0, 3, 1, 2).contiguous()
return embeds
def _position_bucket(self, relative_position, num_buckets=32, max_distance=128):
relative_buckets = 0
num_buckets //= 2
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
relative_position = torch.abs(relative_position)
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.int32)
relative_postion_if_large = torch.min(
relative_postion_if_large,
torch.full_like(relative_postion_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position.to(torch.int32), relative_postion_if_large
)
return relative_buckets
class RotaryEmbedding(bmt.DistributedModule):
def __init__(
self,
dim,
base=10000,
distance_scale: Union[int, float] = 1,
dtype: torch.dtype = torch.half,
):
super().__init__()
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, device="cuda", dtype=torch.float32) / dim)
)
inv_freq = inv_freq.to(dtype)
self.distance_scale = distance_scale
self.dtype = dtype
self.inv_freq = inv_freq
def forward(self, x: torch.Tensor, x_pos: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(..., dim)``): Inputs.
x_pos (:obj:`torch.Tensor` of shape ``(...)``): Positions of inputs.
"""
x_pos = x_pos * self.distance_scale
freqs = x_pos[..., None].to(self.dtype) * self.inv_freq[None, :] # (..., dim/2)
# the same implementation as sat
emb = torch.cat((freqs, freqs), dim=-1) # (..., dim)
emb_cos = emb.cos() # (..., dim)
emb_sin = emb.sin() # (..., dim)
rotate_x = torch.cat(
[-x[..., x.size(-1) // 2 :], x[..., : x.size(-1) // 2]], dim=-1
) # (..., dim)
return x * emb_cos + rotate_x * emb_sin
| 9,197 | 35.070588 | 100 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/feedforward.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import bmtrain as bmt
from .linear import Linear
class DenseGatedACT(bmt.DistributedModule):
def __init__(
self,
dim_in: int,
dim_ff: int,
dtype=torch.half,
):
super().__init__()
self.w_0 = Linear(
dim_in=dim_in,
dim_out=dim_ff,
dtype=dtype,
scale_before=False,
)
self.w_1 = Linear(
dim_in=dim_in,
dim_out=dim_ff,
dtype=dtype,
scale_before=False,
)
self.act = torch.nn.GELU()
def forward(self, x: torch.Tensor):
"""This model inherits from bmt.DistributedModule.
Transform an input tensor from one feature space to another via a nonlinear operation
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): Tensor that will be subject to nonlinear operations.
Return:
out (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_ff)``)
""" # noqa: E501
gate_score = self.act(self.w_0(x))
x = self.w_1(x)
x = gate_score * x
return x
class FeedForward(bmt.DistributedModule):
r"""FeedForward module
Args:
dim_in (int): input dimension.
dim_ff (int): middle dimension.
dim_out (int, optional): output dimension. Defaults to None, which means dim_in = dim_out.
dtype (optional): Defaults to torch.half.
init_mean (float, optional): mean of :math:`\mathbf{W}\sim\mathcal{N}(\text{mean}, \text{std}^2)` for fully-connected module used in feed-forward layer. Defaults to 0.
init_std (float, optional): std of :math:`\mathbf{W}\sim\mathcal{N}(\text{mean}, \text{std}^2)` for fully-connected module used in feed-forward layer. Defaults to 0.02.
bias (bool, optional): whether to use bias term in fully-connected layers used in feed-forward module. Defaults to False.
activate_fn (str, optional): Defaults to `gated_gelu`.
dropout_p (int, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
dtype=torch.half,
dropout_p: Optional[float] = None,
):
super().__init__()
self.w_in = DenseGatedACT(
dim_in=dim_model,
dim_ff=dim_ff,
dtype=dtype,
)
if dropout_p is not None:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
self.w_out = Linear(
dim_in=dim_ff,
dim_out=dim_model,
dtype=dtype,
scale_before=False,
)
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): The input of feed-forward module.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_out)``: The output of feed-forward module.
""" # noqa: E501
x = self.w_in(x)
if self.dropout is not None:
x = self.dropout(x)
x = self.w_out(x)
return x
| 3,770 | 29.658537 | 176 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/layernorm.py | import torch
import bmtrain as bmt
@torch.jit.script # type: ignore
def rms_layernorm(hidden: torch.Tensor, weight: torch.Tensor, eps: float):
old_dtype = hidden.dtype
variance = hidden.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)
hidden = (hidden * torch.rsqrt(variance + eps)).to(old_dtype)
return hidden * weight
class LayerNorm(bmt.DistributedModule):
"""RMS LayerNorm"""
def __init__(
self,
dim_norm: int,
dtype: torch.dtype = torch.half,
eps: float = 1e-6,
init_var: float = 1.0,
):
super().__init__()
self.eps = eps
self.dim_norm = dim_norm
self.weight = bmt.DistributedParameter(torch.full((dim_norm,), init_var, dtype=dtype))
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch_size, seq_len, dim_norm)``): Input tensor that need to be normalized.
Return:
:obj:`torch.Tensor` of shape ``(batch_size, seq_len, dim_norm)``: The layernorm output.
""" # noqa: E501
assert x.size(-1) == self.dim_norm
return rms_layernorm(x, self.weight, self.eps)
| 1,180 | 29.282051 | 122 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/linear.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import bmtrain as bmt
import math
import torch.nn.functional as F
class Linear(bmt.DistributedModule):
def __init__(
self,
dim_in: int,
dim_out: int,
dtype: torch.dtype = torch.half,
init_mean: float = 0.0,
init_std: float = 1,
scale_before: bool = False,
):
super().__init__()
self.dim_in = self.in_features = dim_in
self.dim_out = self.out_features = dim_out
self.scale_before = scale_before
self.weight = bmt.DistributedParameter(
torch.empty((dim_out, dim_in), dtype=dtype),
init_method=bmt.ParameterInitializer(
torch.nn.init.normal_, mean=init_mean, std=init_std
),
)
def forward(self, x: torch.Tensor):
"""
Args:
x (:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_in)``): The input of linear layer
Returns:
:obj:`torch.Tensor` of shape ``(batch, seq_len, dim_out)``: The output of the linear transform y.
""" # noqa: E501
if self.scale_before:
x = x / math.sqrt(self.dim_in)
x = F.linear(x, self.weight)
else:
x = F.linear(x, self.weight)
x = x / math.sqrt(self.dim_in)
return x
| 1,901 | 31.793103 | 109 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/transformer.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import bmtrain as bmt
from typing import Optional, List, Tuple
from .blocks import TransformerBlock
from .layernorm import LayerNorm
class Encoder(bmt.DistributedModule):
"""Layers of encoder transformer blocks plus an final layernorm.
Args:
num_layers (int): number of layers.
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-6.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
num_layers: int,
dim_model: int,
dim_ff: int,
num_heads: int,
dim_head: int,
dtype: torch.dtype = torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
mask_modules: Optional[List[Tuple[bool, bool]]] = None,
):
super().__init__()
self.num_layers = num_layers
if mask_modules is not None:
assert (
len(mask_modules) == num_layers
), "The total number of masks should equal to num_layers"
for mask_module in mask_modules:
assert (
len(mask_module) == 2
), "For encoder, each mask should be (mask_att, mask_ffn)"
else:
mask_modules = [(False, False)] * num_layers
self.layers = bmt.TransformerBlockList(
[
bmt.CheckpointBlock(
TransformerBlock(
dim_model=dim_model,
dim_ff=dim_ff,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
mask_att=mask_modules[ith][0],
mask_ffn=mask_modules[ith][1],
)
)
for ith in range(num_layers)
]
)
self.output_layernorm = LayerNorm(dim_norm=dim_model, dtype=dtype, eps=eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_bias: torch.Tensor,
use_cache: bool = False,
past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
):
"""
Args:
hidden-states (:obj:`torch.Tensor` of shape ``(batch, seq_enc, dim_model)``): Input of encoder, might be the embedding of a batch of sequences.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_enc, seq_enc)``): Avoid invalid areas to participate in the calculation
position_bias(:obj:`torch.Tensor` of shape ``(num_heads, seq_enc, seq_enc)``) Provides position information to attention mechanism.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_enc, dim_model)``: The encoder output.
""" # noqa: E501
if not use_cache:
hidden_states = self.layers(hidden_states, attention_mask, position_bias)
hidden_states = self.output_layernorm(hidden_states)
return hidden_states
else:
with torch.no_grad():
current_key_values = []
for i, module in enumerate(self.layers):
hidden_states = module(
hidden_states,
attention_mask,
position_bias,
past_key_value=past_key_values[i] if past_key_values else None,
use_cache=use_cache,
)
if use_cache:
current_key_values.append(hidden_states[1])
hidden_states = hidden_states[0]
hidden_states = self.output_layernorm(hidden_states)
if use_cache:
return hidden_states, current_key_values
else:
return hidden_states
| 4,948 | 37.664063 | 155 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/attention.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
import bmtrain as bmt
import math
from .linear import Linear
class Attention(bmt.DistributedModule):
def __init__(
self,
dim_model: int,
num_heads: int,
dim_head: int,
dtype: torch.dtype = torch.half,
dropout_p: Optional[float] = None,
) -> None:
super().__init__()
self.dim_model = dim_model
self.num_heads = num_heads
self.dim_head = dim_head
self.project_q = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.project_k = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.project_v = Linear(self.dim_model, self.num_heads * self.dim_head, dtype=dtype)
self.attention_out = Linear(self.num_heads * self.dim_head, self.dim_model, dtype=dtype)
self.softmax = torch.nn.Softmax(dim=-1)
if dropout_p is not None:
self.dropout = torch.nn.Dropout(p=dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_q: torch.Tensor,
hidden_kv: torch.Tensor,
attention_mask: torch.BoolTensor,
position_bias: torch.Tensor,
use_cache: bool = False,
past_kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""This model inherits from bmt.DistributedModule.
Args:
hidden_q (:obj:`torch.Tensor` of shape ``(batch, len_q, dim_model)``): Indices of input sequence tokens. It will be embedded by model's internal embedding lookup matrix.
hidden_kv (:obj:`torch.Tensor` of shape ``(batch, len_k, dim_model)``): Length of input sequence before padding.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, len_q, len_k)``): Used to avoid performing attention on padding token indices.
position_bias(:obj:`torch.Tensor` of shape ``(num_heads, len_q, len_k)`` or ``(1, num_heads, len_k, len_q)``): Provide positional information about tensor `key_value` and `query`.
Return:
out (:obj:`torch.Tensor` of shape ``(batch, len_q, dim_model)``): The attention output.
""" # noqa: E501
batch_size = hidden_q.size(0)
len_q = hidden_q.size(1)
len_k = hidden_kv.size(1)
h_q = self.project_q(hidden_q) / math.sqrt(math.sqrt(self.dim_head))
h_k = self.project_k(hidden_kv) / math.sqrt(math.sqrt(self.dim_head))
h_v = self.project_v(hidden_kv)
h_q = h_q.view(batch_size, len_q, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
h_k = h_k.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
h_v = h_v.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
if past_kv is not None:
h_k = torch.cat([past_kv[0], h_k], dim=-2)
h_v = torch.cat([past_kv[1], h_v], dim=-2)
len_k = h_k.size(-2)
# (b, n_h, len_q, d_h) @ (b, n_h, d_h, len_k) -> (b, n_h, len_q, len_k)
score = torch.matmul(
h_q, h_k.transpose(-1, -2)
) # / math.sqrt(self.dim_head) moved to line 75~76
score = score + position_bias
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == False,
torch.scalar_tensor(float("-inf"), device=score.device, dtype=score.dtype),
)
score = self.softmax(score)
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == False,
torch.scalar_tensor(0, device=score.device, dtype=score.dtype),
)
if self.dropout is not None:
score = self.dropout(score)
# (b, n_h, len_q, len_k) @ (b, n_h, len_k, d_h) -> (b, n_h, len_q, d_h)
score = torch.matmul(score, h_v)
score = score.view(batch_size, self.num_heads, len_q, self.dim_head).permute(0, 2, 1, 3)
score = score.contiguous().view(batch_size, len_q, self.num_heads * self.dim_head)
score = self.attention_out(score)
if use_cache:
return score, (h_k, h_v)
else:
return score
| 4,800 | 38.677686 | 191 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/layers/blocks.py | # coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
import bmtrain as bmt
from .layernorm import LayerNorm
from .attention import Attention
from .feedforward import FeedForward
class SelfAttentionBlock(bmt.DistributedModule):
"""The whole cross-attention block. A sequence of operation. Consists of layernorm, self-attention and residual connection.
Args:
dim_model (int): main dimension of modules in transformer blocks.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
num_heads: int,
dim_head: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
):
super().__init__()
self.layernorm_before_attention = LayerNorm(
dim_model,
dtype=dtype,
eps=eps,
)
self.self_attention = Attention(
dim_model=dim_model,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
dropout_p=dropout_p,
)
if dropout_p:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_bias: Optional[torch.Tensor] = None,
use_cache: bool = False,
past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Input of self-attention block. It can be the embedding of a batch of sequences.
attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_self, seq_self)``): Avoid invalid areas to participate in the calculation.
position_bias (:obj:`torch.Tensor` of shape ``(num_heads, seq_self, seq_self)``): Provide positional information to self-attention block.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of attention block.
""" # noqa: E501
x = self.layernorm_before_attention(hidden_states)
x = self.self_attention(x, x, attention_mask, position_bias, use_cache, past_key_value)
if use_cache:
x, current_key_value = x
else:
current_key_value = None
if self.dropout is not None:
x = self.dropout(x)
hidden_states = (hidden_states + x) / 1.05
if use_cache:
return hidden_states, current_key_value
else:
return hidden_states
class FFNBlock(torch.nn.Module):
"""The whole feed-forward block. A sequence of operation. Consists of layernorm, feed-forward and residual connection.
Args:
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = 0,
):
super().__init__()
self.layernorm_before_ffn = LayerNorm(
dim_model,
dtype=dtype,
eps=eps,
)
self.ffn = FeedForward(
dim_model,
dim_ff,
dtype=dtype,
dropout_p=dropout_p,
)
if dropout_p:
self.dropout = torch.nn.Dropout(dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
):
"""
Args:
hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Hidden states before feed forward layer.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of feed-forward block
""" # noqa: E501
x = self.layernorm_before_ffn(hidden_states)
x = self.ffn(x)
if self.dropout is not None:
x = self.dropout(x)
hidden_states = (hidden_states + x) / 1.05
return hidden_states
class TransformerBlock(torch.nn.Module):
"""The whole transformer block. A sequence of operation. Consists of self-attention block[, cross-attention block] and feed-forward block.
Args:
dim_model (int): main dimension of modules in transformer blocks.
dim_ff (int): dim_ff used in :py:class:`model_center.layer.FeedForward`.
num_heads (int): num_heads used in :py:class:`model_center.layer.Attention`.
dim_head (int): dim_head used in :py:class:`model_center.layer.Attention`.
dtype (optional): Defaults to torch.half.
eps (float, optional): eps used in :py:class:`model_center.layer.LayerNorm`. Defaults to 1e-5.
dropout_p (float, optional): Defaults to 0.
""" # noqa: E501
def __init__(
self,
dim_model: int,
dim_ff: int,
num_heads: int,
dim_head: int,
dtype=torch.half,
eps: float = 1e-6,
dropout_p: Optional[float] = None,
mask_att: bool = False,
mask_ffn: bool = False,
):
super().__init__()
self.mask_att = mask_att
self.mask_ffn = mask_ffn
if not self.mask_att:
self.self_att = SelfAttentionBlock(
dim_model=dim_model,
num_heads=num_heads,
dim_head=dim_head,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
)
if not self.mask_ffn:
self.ffn = FFNBlock(
dim_model=dim_model,
dim_ff=dim_ff,
dtype=dtype,
eps=eps,
dropout_p=dropout_p,
)
def forward(
self,
self_hidden_states: torch.Tensor,
self_attention_mask: torch.Tensor,
self_position_bias: Optional[torch.Tensor] = None,
use_cache: bool = False,
past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Args:
self_hidden_states (:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.
self_attention_mask (:obj:`torch.Tensor` of shape ``(batch, seq_self, seq_self)``): Avoid invalid areas to participate in the calculation of self-attention.
self_position_bias (:obj:`torch.Tensor` of shape ``(num_heads, seq_self, seq_self)``): Provide positional information to self-attention block.
Return:
:obj:`torch.Tensor` of shape ``(batch, seq_self, dim_model)``: The output of transformer block.
""" # noqa: E501
# (batch, dim_model, seq_self)
current_key_value = None
if not self.mask_att:
hidden_states = self.self_att(
self_hidden_states,
attention_mask=self_attention_mask,
position_bias=self_position_bias,
use_cache=use_cache,
past_key_value=past_key_value,
)
if use_cache:
hidden_states, current_key_value = hidden_states
else:
hidden_states = self_hidden_states
# (batch, dim_model, seq_self)
if not self.mask_ffn:
hidden_states = self.ffn(hidden_states)
if use_cache:
return hidden_states, current_key_value
else:
return hidden_states
| 8,751 | 34.008 | 198 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/export.py | import os
import time
import functools
import torch
import bmtrain as bmt
import json
from cpm_live.models import CPMBee
from .log import logger
from typing import List, Optional
def rename_if_exists(file_path):
if not os.path.exists(file_path):
return
timestamp = time.strftime('%Y%m%d%H%M%S')
file_dir, file_name = os.path.split(file_path)
file_root, file_ext = os.path.splitext(file_name)
new_file_name = f"{file_root}_bak_{timestamp}{file_ext}"
new_file_path = os.path.join(file_dir, new_file_name)
try:
os.rename(file_path, new_file_path)
logger.info(f"File '{file_name}' already exists. Renamed to '{new_file_name}'")
except Exception as e:
logger.warn(
"rename file failed,file_path={file_path}, new_file_path={new_file_path},err={err}"
.format(file_path=file_path, new_file_path=new_file_path, err=str(e)))
def rename_if_exists_decorator(func):
@functools.wraps(func)
def wrapper(file_path, *args, **kwargs):
rename_if_exists(file_path)
return func(file_path, *args, **kwargs)
return wrapper
@rename_if_exists_decorator
def bmt_save(file_path: str, model: CPMBee, export_files: Optional[List[str]] = None):
bmt.save(model, file_path)
if export_files is not None:
export_files.append(file_path)
@rename_if_exists_decorator
def torch_save(file_path: str, obj: object, export_files: Optional[List[str]] = None):
torch.save(obj, file_path)
if export_files is not None:
export_files.append(file_path)
@rename_if_exists_decorator
def json_save(file_path: str, obj: object, export_files: Optional[List[str]] = None):
with open(file_path, "w") as data_f:
json.dump(obj, data_f)
if export_files is not None:
export_files.append(file_path)
| 1,820 | 30.947368 | 95 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/object.py | import bmtrain as bmt
import pickle
import torch
def allgather_objects(obj):
if bmt.world_size() == 1:
return [obj]
with torch.no_grad():
data_bytes: bytes = pickle.dumps(obj)
data_length: int = len(data_bytes)
gpu_data_length = torch.tensor([data_length], device="cuda", dtype=torch.long)
gathered_length = bmt.distributed.all_gather(gpu_data_length).view(-1).cpu()
max_data_length = gathered_length.max().item()
gpu_data_bytes = torch.zeros(max_data_length, dtype=torch.uint8, device="cuda")
byte_storage = torch.ByteStorage.from_buffer(data_bytes)
gpu_data_bytes[:data_length] = torch.ByteTensor(byte_storage)
gathered_data = bmt.distributed.all_gather(gpu_data_bytes).cpu()
ret = []
for i in range(gathered_data.size(0)):
data_bytes = gathered_data[i, : gathered_length[i].item()].numpy().tobytes()
ret.append(pickle.loads(data_bytes))
return ret
| 994 | 33.310345 | 88 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/data_utils.py | import torch
def pad(orig_items, key, padding_value=0, padding_side="left"):
items = []
if isinstance(orig_items[0][key], list):
assert isinstance(orig_items[0][key][0], torch.Tensor)
for it in orig_items:
for tr in it[key]:
items.append({key: tr})
else:
assert isinstance(orig_items[0][key], torch.Tensor)
items = orig_items
batch_size = len(items)
shape = items[0][key].shape
dim = len(shape)
assert dim <= 3
max_length = max(item[key].shape[-1] for item in items)
min_length = min(item[key].shape[-1] for item in items)
dtype = items[0][key].dtype
if dim == 1:
return torch.cat([item[key] for item in items], dim=0)
elif dim == 2:
if max_length == min_length:
return torch.cat([item[key] for item in items], dim=0)
tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
else:
tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value
for i, item in enumerate(items):
if dim == 2:
if padding_side == "left":
tensor[i, -len(item[key][0]) :] = item[key][0].clone()
else:
tensor[i, : len(item[key][0])] = item[key][0].clone()
elif dim == 3:
if padding_side == "left":
tensor[i, -len(item[key][0]) :, :] = item[key][0].clone()
else:
tensor[i, : len(item[key][0]), :] = item[key][0].clone()
return tensor
| 1,550 | 33.466667 | 94 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/utils/gradient_shrink.py | import torch
class OpGradientShrink(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, alpha: float):
ctx.alpha = alpha
return x
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.alpha, None
def gradient_shrink(x: torch.Tensor, alpha: float = 0.1):
return OpGradientShrink.apply(x, alpha)
| 382 | 21.529412 | 57 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/generation/bee.py | from typing import Any, Dict, List, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from .generation_utils import BeamHypotheses, apply_repetition_penalty
from ..tokenizers.bee import CPMBeeTokenizer
from ..models.bee import CPMBee
from ..training_tasks.bee.pretrain import convert_data_to_id
from ..utils import pad
class CPMBeeGeneration:
def __init__(self, model: CPMBee, tokenizer: CPMBeeTokenizer):
model.eval()
self.model = model
self.tokenizer = tokenizer
def _convert_to_tensors(self, data: Any, in_context_samples: List[Any] = []):
answer_placeholders = []
def _put_placeholder(data: Any, path: List[str] = []):
if isinstance(data, dict):
ret = {}
for k, v in data.items():
ret[k] = _put_placeholder(v, path + [k])
return ret
else:
answer_placeholders.append(path)
return "<ans_{}>".format(len(answer_placeholders))
data["<ans>"] = _put_placeholder(data["<ans>"])
(
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel,
n_segments,
table_states,
) = convert_data_to_id(self.tokenizer, data, shuffle_answer=False, max_depth=8)
sub_ans_map: Dict[int, int] = {}
for fake_id, token_sub in table_states["token_id_table"]["<ans>"].items():
token = table_states["ext_table"][fake_id]
if token.startswith("<ans_") and token.endswith(">"):
ans_id = int(token[5:-1])
sub_ans_map[token_sub] = ans_id
tmp_input_ids = []
tmp_input_sub = []
tmp_input_seg = []
predict_segments: List[Tuple[int, int]] = []
for i in range(input_ids.shape[0]):
if context[i] == 0:
if input_ids[i] == self.tokenizer.encoder["<ans>"]:
# is ans
# (segment_id, ans_id)
predict_segments.append((segment_ids[i], sub_ans_map[input_id_subs[i]]))
else:
tmp_input_ids.append(input_ids[i])
tmp_input_sub.append(input_id_subs[i])
tmp_input_seg.append(segment_ids[i])
if len(predict_segments) == 0:
raise ValueError("No answer to predict")
input_ids = np.array(tmp_input_ids, dtype=np.int32)
input_id_subs = np.array(tmp_input_sub, dtype=np.int32)
context = np.full_like(tmp_input_ids, 1, dtype=np.int8)
segment_ids = np.array(tmp_input_seg, dtype=np.int32)
sample_ids = np.zeros(input_ids.shape, dtype=np.int32)
segment_rel_offset = np.zeros(input_ids.shape, dtype=np.int32)
num_segments = np.full(input_ids.shape, n_segments, dtype=np.int32)
for i, sample in enumerate(in_context_samples):
(
sample_input_ids,
sample_id_subs,
_,
sample_segments,
sample_rel,
n_segments,
table_states,
) = convert_data_to_id(self.tokenizer, sample, table_states, max_depth=8)
input_ids = np.concatenate([input_ids, sample_input_ids], axis=0)
input_id_subs = np.concatenate([input_id_subs, sample_id_subs], axis=0)
context = np.concatenate(
[context, np.ones(sample_input_ids.shape, dtype=np.int8)], axis=0
)
segment_ids = np.concatenate([segment_ids, sample_segments], axis=0)
segment_rel_offset = np.concatenate(
[
segment_rel_offset,
np.full(sample_input_ids.shape, segment_rel.shape[0], dtype=np.int32),
],
axis=0,
)
segment_rel = np.concatenate([segment_rel, sample_rel], axis=0)
sample_ids = np.concatenate(
[sample_ids, np.full(sample_input_ids.shape, i + 1, dtype=np.int32)], axis=0
)
num_segments = np.concatenate(
[num_segments, np.full(sample_input_ids.shape, n_segments, dtype=np.int32)], axis=0
)
input_pos = np.arange(input_ids.shape[0], dtype=np.int32)
return (
input_ids,
input_id_subs,
input_pos,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
predict_segments,
answer_placeholders,
table_states["ext_table"],
table_states["token_id_table"],
)
def _process_list(self, data_list: List[Any]):
pack_tensor = []
other_info = []
segment_rel_pack = []
batch_ext_table_map: Dict[Tuple[int, int], int] = {}
batch_ext_table_ids: List[int] = []
batch_ext_table_sub: List[int] = []
for data in data_list:
(
input_ids,
input_id_subs,
input_pos,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
predict_segments,
answer_placeholders,
ext_table,
token_id_table,
) = self._convert_to_tensors(data, [])
rev_ext_table: Dict[int, str] = {}
for token, mp in token_id_table.items():
if token == "<ans>":
continue
token_id = self.tokenizer.encoder[token]
for fake_id, token_sub in mp.items():
if token_sub > 0:
if (token_id, token_sub) not in batch_ext_table_map:
batch_ext_table_map[(token_id, token_sub)] = (
len(batch_ext_table_ids) + self.tokenizer.vocab_size
)
batch_ext_table_ids.append(token_id)
batch_ext_table_sub.append(token_sub)
rev_ext_table[batch_ext_table_map[(token_id, token_sub)]] = ext_table[
fake_id
]
else:
rev_ext_table[token_id] = ext_table[fake_id]
pack_tensor.append(
{
"input": torch.from_numpy(input_ids).unsqueeze(0),
"input_sub": torch.from_numpy(input_id_subs).unsqueeze(0),
"input_pos": torch.from_numpy(input_pos).unsqueeze(0),
"context": torch.from_numpy(context).unsqueeze(0),
"sample_idx": torch.from_numpy(sample_ids).unsqueeze(0),
"num_segments": torch.from_numpy(num_segments).unsqueeze(0),
"segment": torch.from_numpy(segment_ids).unsqueeze(0),
"segment_rel_offset": torch.from_numpy(segment_rel_offset).unsqueeze(0),
}
)
segment_rel_pack.append(torch.from_numpy(segment_rel))
other_info.append(
{
"predict_segments": predict_segments,
"answer_placeholders": answer_placeholders,
"ext_table": rev_ext_table,
}
)
keys = set(pack_tensor[0].keys())
padded = {}
for key in keys:
padded[key] = pad(pack_tensor, key).cuda()
max_num_rels = 0
for rel in segment_rel_pack:
max_num_rels = max(max_num_rels, rel.size(0))
padded_rels = torch.zeros(len(segment_rel_pack), max_num_rels, dtype=torch.int32)
for i, rel in enumerate(segment_rel_pack):
padded_rels[i, : rel.size(0)] = rel
padded["segment_rel"] = padded_rels.cuda()
padded["batch_ext_table_ids"] = torch.tensor(
batch_ext_table_ids, dtype=torch.int32, device="cuda"
)
padded["batch_ext_table_sub"] = torch.tensor(
batch_ext_table_sub, dtype=torch.int32, device="cuda"
)
return padded, other_info
def generate(self, data_list, **kwargs):
model_inputs, other_info = self._process_list(data_list)
with torch.inference_mode():
result_ids = self._decode(model_inputs, other_info, **kwargs)
for sent_id, result in enumerate(result_ids):
ans_result_map: Dict[int, List[int]] = {}
for raw_word_id, ans_id in result:
if ans_id not in ans_result_map:
ans_result_map[ans_id] = []
ans_result_map[ans_id].append(raw_word_id)
answer_placeholders = other_info[sent_id]["answer_placeholders"]
ext_table = other_info[sent_id]["ext_table"]
data = data_list[sent_id]
for ans_id, token_ids in ans_result_map.items():
if token_ids[-1] == self.tokenizer.eos_id:
token_ids = token_ids[:-1]
text = self.tokenizer.decode(token_ids, ext_table)
path = answer_placeholders[ans_id - 1]
if len(path) > 0:
p = data["<ans>"]
for part in path[:-1]:
p = p[part]
p[path[-1]] = text
else:
data["<ans>"] = text
for ans_id in range(len(answer_placeholders)):
if (ans_id + 1) not in ans_result_map:
path = answer_placeholders[ans_id]
p = data["<ans>"]
for part in path[:-1]:
p = p[part]
p[path[-1]] = None
return data_list
def _decode(self, model_inputs, other_info, **kwargs):
raise NotImplementedError("_decode is not implemented.")
class CPMBeeBeamSearch(CPMBeeGeneration):
def _decode(
self,
model_inputs,
other_info,
beam_size=3,
max_length=100,
repetition_penalty=1.0,
repetition_window=None,
):
"""
Beam search
Args:
model_inputs (dict): input ids.
beam_size (int, optional, defaults to 3): beam size of beam search.
generate_length (int, optional, defaults to 100): maximum generation length.
repetition_penalty (float, optional, defaults to 1.0): repetition penalty coefficient, 1.0 means no penalty.
repetition_window (int, optional, defaults to None): window size of repetition penalty, None means that all output tokens are penalized.
""" # noqa: E501
# generate_length + 1 for EOS token
max_length += 1
# expand dimmension
batch_size = model_inputs["input"].size(0)
input: torch.Tensor = (
model_inputs["input"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
input_sub: torch.Tensor = (
model_inputs["input_sub"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
input_pos: torch.Tensor = (
model_inputs["input_pos"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
context: torch.Tensor = (
model_inputs["context"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
sample_ids: torch.Tensor = (
model_inputs["sample_idx"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
num_segments: torch.Tensor = (
model_inputs["num_segments"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
segment: torch.Tensor = (
model_inputs["segment"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
segment_rel_offset: torch.Tensor = (
model_inputs["segment_rel_offset"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
segment_rel: torch.Tensor = (
model_inputs["segment_rel"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
ext_table_ids: torch.Tensor = model_inputs["batch_ext_table_ids"]
ext_table_sub: torch.Tensor = model_inputs["batch_ext_table_sub"]
ext_table_ids_cpu = ext_table_ids.cpu()
ext_table_sub_cpu = ext_table_sub.cpu()
done = [False for _ in range(batch_size)]
beam_scores = torch.zeros((batch_size, beam_size), dtype=torch.float, device=input.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1)
# generated hypotheses
generated_hyps = [
BeamHypotheses(beam_size, max_length, length_penalty=1, early_stopping=False)
for _ in range(batch_size)
]
pred_start_index = input.size(-1)
_, _, past_key_values = self.model.inference(
input=input,
input_sub=input_sub,
position=input_pos,
context=context,
sample_ids=sample_ids,
num_segments=num_segments,
segment=segment,
segment_rel_offset=segment_rel_offset,
segment_rel=segment_rel,
ext_table_ids=ext_table_ids,
ext_table_sub=ext_table_sub,
past_key_values=None,
)
beam_states = []
for sent_id in range(batch_size):
instance_beam_states = []
for beam_id in range(beam_size):
instance_beam_states.append(
{
"idx": 0,
"ans": [],
"nx_token_id": self.tokenizer.bos_id,
"nx_token_sub": 0,
"nx_segment_id": other_info[sent_id]["predict_segments"][0][0],
"nx_position": 0,
}
)
beam_states.append(instance_beam_states)
for i in range(max_length + 1):
tmp_input = []
tmp_input_sub = []
tmp_position = []
tmp_segment = []
for sent_id in range(batch_size):
for beam_id in range(beam_size):
tmp_input.append(beam_states[sent_id][beam_id]["nx_token_id"])
tmp_input_sub.append(beam_states[sent_id][beam_id]["nx_token_sub"])
tmp_position.append(beam_states[sent_id][beam_id]["nx_position"])
tmp_segment.append(beam_states[sent_id][beam_id]["nx_segment_id"])
with torch.no_grad():
input = torch.cat(
[
input,
torch.tensor(tmp_input, dtype=torch.int32, device="cuda").view(
batch_size * beam_size, 1
),
],
dim=-1,
)
logits, _, past_key_values = self.model.inference(
input=input[:, -1:],
input_sub=torch.tensor(tmp_input_sub, dtype=torch.int32, device="cuda").view(
batch_size * beam_size, 1
),
position=torch.tensor(tmp_position, dtype=torch.int32, device="cuda").view(
batch_size * beam_size, 1
),
context=torch.ones(
batch_size * beam_size, dtype=torch.bool, device="cuda"
).view(batch_size * beam_size, 1),
sample_ids=torch.zeros(
batch_size * beam_size, dtype=torch.int32, device="cuda"
).view(batch_size * beam_size, 1),
num_segments=num_segments[:, -1:],
segment=torch.tensor(tmp_segment, dtype=torch.int32, device="cuda").view(
batch_size * beam_size, 1
),
segment_rel_offset=segment_rel_offset[:, -1:],
segment_rel=segment_rel,
ext_table_ids=ext_table_ids,
ext_table_sub=ext_table_sub,
past_key_values=past_key_values,
)
logits = logits[:, -1, :]
# skip all steps when we are done with each sentence
if all(done):
break
for sent_id in range(batch_size):
if self.tokenizer.unk_id not in other_info[sent_id]["ext_table"]:
# unk is not allowed, mask unk
logits[
sent_id * beam_size : (sent_id + 1) * beam_size, self.tokenizer.unk_id
] = -10000
ext_ids = set()
for v in other_info[sent_id]["ext_table"].keys():
ext_ids.add(v)
for ext_id in range(
self.tokenizer.vocab_size, self.tokenizer.vocab_size + ext_table_ids.size(0)
):
if ext_id not in ext_ids:
logits[sent_id * beam_size : (sent_id + 1) * beam_size, ext_id] = -10000
apply_repetition_penalty(
logits,
batch_size,
beam_size,
input,
repetition_penalty,
pred_start_index,
input.size(-1) - 1,
repetition_window,
)
scores = F.log_softmax(logits, dim=-1)
next_scores = scores + beam_scores[:, None].expand_as(
scores
) # (batch_size * beam_size, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(batch_size, -1) # (batch_size, beam_size * vocab_size)
next_scores, next_words = torch.topk(
next_scores, 2 * beam_size, dim=1, largest=True, sorted=True
)
assert next_scores.size() == next_words.size() == (batch_size, 2 * beam_size)
next_beam_states = []
for sent_id in range(batch_size):
# if we are done with this sentence
done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(
next_scores[sent_id].max().item(), i
)
if done[sent_id]:
next_beam_states.append(
[
(
{
"idx": 0,
"ans": [],
"nx_token_id": 0,
"nx_token_sub": 0,
"nx_segment_id": 0,
"nx_position": 0,
},
0,
0,
)
]
* beam_size
) # pad the batch
continue
# next sentence beam content
next_instance_beam_states = []
# next words for this sentence
for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# get beam and word IDs
beam_id = torch.div(idx, scores.size(-1), rounding_mode="floor").item()
word_id = (idx % scores.size(-1)).item()
curr_info = beam_states[sent_id][beam_id]
# end of sentence, or next word
if (
word_id == self.tokenizer.eos_id
and (curr_info["idx"] + 1 == len(other_info[sent_id]["predict_segments"]))
) or i == max_length:
generated_hyps[sent_id].add(
beam_states[sent_id][beam_id]["ans"]
+ [
(
word_id,
other_info[sent_id]["predict_segments"][curr_info["idx"]][1],
)
],
value.item(),
)
elif word_id == self.tokenizer.eos_id:
next_instance_beam_states.append(
(
{
"idx": curr_info["idx"] + 1,
"ans": curr_info["ans"]
+ [
(
word_id,
other_info[sent_id]["predict_segments"][
curr_info["idx"]
][1],
)
],
"nx_token_id": self.tokenizer.bos_id,
"nx_token_sub": 0,
"nx_segment_id": other_info[sent_id]["predict_segments"][
curr_info["idx"] + 1
][0],
"nx_position": 0,
},
value.item(),
sent_id * beam_size + beam_id,
)
)
else:
raw_word_id = word_id
word_id_sub = 0
if word_id >= self.tokenizer.vocab_size:
word_id -= self.tokenizer.vocab_size
word_id_sub = int(ext_table_sub_cpu[word_id].item())
word_id = int(ext_table_ids_cpu[word_id].item())
next_instance_beam_states.append(
(
{
"idx": curr_info["idx"],
"ans": curr_info["ans"]
+ [
(
raw_word_id,
other_info[sent_id]["predict_segments"][
curr_info["idx"]
][1],
)
],
"nx_token_id": word_id,
"nx_token_sub": word_id_sub,
"nx_segment_id": curr_info["nx_segment_id"],
"nx_position": curr_info["nx_position"] + 1,
},
value.item(),
sent_id * beam_size + beam_id,
)
)
# the beam for next step is full
if len(next_instance_beam_states) == beam_size:
break
# update next beam content
assert len(next_instance_beam_states) == 0 if i == max_length else beam_size
next_beam_states.append(next_instance_beam_states)
# we have reached the last step
if i == max_length:
break
# sanity check / prepare next batch
beam_reorder_idx = []
beam_new_scores = []
beam_states = []
for sent_id in range(batch_size):
instance_beam_states = []
for beam_id in range(beam_size):
state, value, beam_idx = next_beam_states[sent_id][beam_id]
beam_reorder_idx.append(beam_idx)
beam_new_scores.append(value)
instance_beam_states.append(state)
beam_states.append(instance_beam_states)
input = input[beam_reorder_idx, :]
beam_scores = torch.tensor(beam_new_scores, dtype=torch.float, device=input.device)
for kw in past_key_values.keys():
if kw == "buffer":
buf_list = past_key_values[kw]
nw_buf_list = []
for buf in buf_list:
if buf is None:
nw_buf_list.append((None, None))
else:
k_buf, v_buf = buf
nw_buf_list.append(
(k_buf[beam_reorder_idx, :], v_buf[beam_reorder_idx, :])
)
past_key_values[kw] = nw_buf_list
else:
past_key_values[kw] = past_key_values[kw][beam_reorder_idx, :]
# select the best hypotheses
results = []
for sent_id, hypotheses in enumerate(generated_hyps):
best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
results.append(best_hyp)
return results
| 26,159 | 40.52381 | 148 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/generation/ant.py | import torch
import torch.nn.functional as F
from .generation_utils import BeamHypotheses, apply_repetition_penalty, top_k_top_p_filtering
from ..utils import pad
class CPMAntGeneration:
def __init__(self, model, tokenizer, prompt_length=32):
model.eval()
self.model = model
self.tokenizer = tokenizer
self.prompt_length = prompt_length
def _convert_to_tensors(self, input_text, task_id=2):
model_inputs = {}
input_ids = [self.tokenizer.bos_id] + self.tokenizer.encode(input_text)
input_ids = [j for j in input_ids if j != self.tokenizer.unk_id]
model_inputs["input"] = [
x + self.prompt_length * task_id for x in range(self.prompt_length)
] + input_ids
model_inputs["length"] = len(model_inputs["input"])
model_inputs["position"] = list(range(len(model_inputs["input"])))
model_inputs["span"] = [0] * len(model_inputs["input"])
model_inputs["context"] = [True] * len(model_inputs["input"])
model_inputs["segment"] = [0] * self.prompt_length + [2] * len(input_ids)
for key in model_inputs:
model_inputs[key] = torch.tensor(model_inputs[key]).int().unsqueeze(0)
return model_inputs
def _process_texts(self, text_list):
input_tensors = list(map(self._convert_to_tensors, text_list))
keys = set(input_tensors[0].keys())
padded = {}
for key in keys:
padded[key] = pad(input_tensors, key, padding_side='left').cuda()
return padded
def generate(self, text_list, **kwargs):
model_inputs = self._process_texts(text_list)
with torch.inference_mode():
result = self._decode(model_inputs, **kwargs)
return result
def _decode(self, model_inputs, **kwargs):
raise NotImplementedError("_decode is not implemented.")
class CPMAntBeamSearch(CPMAntGeneration):
def _decode(
self,
model_inputs,
beam_size=3,
max_length=100,
repetition_penalty=1.0,
repetition_window=None,
**kwargs
):
"""
Beam search
Args:
model_inputs (dict): input ids.
beam_size (int, optional, defaults to 3): beam size of beam search.
generate_length (int, optional, defaults to 100): maximum generation length.
repetition_penalty (float, optional, defaults to 1.0): repetition penalty coefficient, 1.0 means no penalty.
repetition_window (int, optional, defaults to None): window size of repetition penalty, None means that all output tokens are penalized.
""" # noqa: E501
# generate_length + 1 for EOS token
max_length += 1
# expand dimmension
batch_size = model_inputs["input"].size(0)
input = (
model_inputs["input"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
length = (
model_inputs["length"]
.unsqueeze(1)
.expand(batch_size, beam_size)
.contiguous()
.view(
batch_size * beam_size,
)
)
context = (
model_inputs["context"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
position = (
model_inputs["position"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
segment = (
model_inputs["segment"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
span = (
model_inputs["span"]
.unsqueeze(1)
.expand(batch_size, beam_size, -1)
.contiguous()
.view(batch_size * beam_size, -1)
)
done = [False for _ in range(batch_size)]
beam_scores = torch.zeros((batch_size, beam_size), dtype=torch.float, device=input.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1)
# generated hypotheses
generated_hyps = [
BeamHypotheses(beam_size, max_length, length_penalty=1, early_stopping=False)
for _ in range(batch_size)
]
pred_start_index = input.size(-1)
past_key_values = None
for i in range(max_length + 1):
if i == 0:
logits, _, past_key_values = self.model.inference(
input=input,
length=length,
context=context,
position=position,
segment=segment,
span=span,
past_key_values=past_key_values,
)
else:
logits, _, past_key_values = self.model.inference(
input=input[:, -1:],
length=length,
context=context,
position=position,
segment=segment,
span=span,
past_key_values=past_key_values,
)
# skip all steps when we are done with each sentence
if all(done):
break
# (batch * beam, seqlen, model_dim)
logits = logits[:, -1, :]
if i == 0:
logits[:, self.tokenizer.eos_id] = -float("inf")
logits[:, self.tokenizer.newline_id] = -float("inf")
apply_repetition_penalty(
logits,
batch_size,
beam_size,
input,
repetition_penalty,
pred_start_index,
input.size(-1) - 1,
repetition_window,
)
scores = F.log_softmax(logits, dim=-1)
next_scores = scores + beam_scores[:, None].expand_as(
scores
) # (batch_size * beam_size, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(batch_size, -1) # (batch_size, beam_size * vocab_size)
next_scores, next_words = torch.topk(
next_scores, 2 * beam_size, dim=1, largest=True, sorted=True
)
assert next_scores.size() == next_words.size() == (batch_size, 2 * beam_size)
next_batch_beam = []
for sent_id in range(batch_size):
# if we are done with this sentence
done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(
next_scores[sent_id].max().item(), i
)
if done[sent_id]:
next_batch_beam.extend(
[(0, self.tokenizer.pad_id, 0)] * beam_size
) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# get beam and word IDs
beam_id = torch.div(idx, scores.size(-1), rounding_mode="floor")
word_id = idx % scores.size(-1)
# end of sentence, or next word
if word_id == self.tokenizer.eos_id or i == max_length:
generated_hyps[sent_id].add(
input[sent_id * beam_size + beam_id, pred_start_index:]
.clone()
.cpu()
.tolist(),
value.item(),
)
else:
next_sent_beam.append((value, word_id, sent_id * beam_size + beam_id))
# the beam for next step is full
if len(next_sent_beam) == beam_size:
break
# update next beam content
assert len(next_sent_beam) == 0 if i == max_length else beam_size
if len(next_sent_beam) == 0:
next_sent_beam = [(0, self.tokenizer.pad_id, 0)] * beam_size # pad the batch
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == beam_size * (sent_id + 1)
# we have reached the last step
if i == max_length:
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * beam_size
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = input.new([x[1] for x in next_batch_beam])
beam_idx = length.new([x[2] for x in next_batch_beam]).long()
# re-order batch and internal states
input = input[beam_idx, :]
past_key_values = [list(each) if each is not None else each for each in past_key_values] # type: ignore # noqa: E501
for key_value_layer in past_key_values:
if key_value_layer is not None:
key_value_layer[0] = key_value_layer[0][beam_idx]
key_value_layer[1] = key_value_layer[1][beam_idx]
# update input ids
input = torch.cat([input, beam_words.unsqueeze(1)], dim=-1)
length += 1
context = torch.cat(
[context, torch.ones((context.size(0), 1), dtype=torch.int, device=context.device)],
dim=-1,
)
position = torch.cat([position, position[:, -1:] + 1], dim=-1)
segment = torch.cat(
[segment, segment[:, -1:]], dim=-1
) # segment id always the same as the previous token
span = torch.cat([span, span[:, -1:]], dim=-1)
# select the best hypotheses
results = []
for i, hypotheses in enumerate(generated_hyps):
best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
results.append(best_hyp)
result_text = list(map(self.tokenizer.decode, results))
return result_text
class CPMAntRandomSampling(CPMAntGeneration):
def _decode(
self,
model_inputs,
max_length=100,
top_k=0,
top_p=0.9,
temperature=0.9,
repetition_penalty=1.0,
repetition_window=None,
**kwargs
):
"""
Top-k and top-p sampling.
Args:
model_inputs (dict): input ids
generate_length (int, optional, defaults to 100): maximum generation length
top_k (int, optional, defaults to 0): keep only top k tokens with highest probability. 0 means keeping all tokens.
top_p (int, optional, defaults to 0.9): keep the top tokens with cumulative probability >= top_p.
temperature (int, optional, defaults to 0.9): the value that can cool down the logits distribution.
repetition_penalty (float, optional, defaults to 1.0): repetition penalty coefficient, 1.0 means no penalty.
repetition_window (int, optional, defaults to None): window size of repetition penalty, None means that all output tokens are penalized.
""" # noqa: E501
# generate_length + 1 for EOS token
max_length += 1
input = model_inputs["input"]
length = model_inputs["length"]
context = model_inputs["context"]
position = model_inputs["position"]
segment = model_inputs["segment"]
span = model_inputs["span"]
batch_size = input.size(0)
pred_start_index = input.size(-1)
past_key_values = None
done = [False for _ in range(batch_size)]
results = [None for _ in range(batch_size)]
for i in range(max_length):
if i == 0:
logits, _, past_key_values = self.model.inference(
input=input,
length=length,
context=context,
position=position,
segment=segment,
span=span,
past_key_values=past_key_values,
)
else:
logits, _, past_key_values = self.model.inference(
input=input[:, -1:],
length=length,
context=context,
position=position,
segment=segment,
span=span,
past_key_values=past_key_values,
)
logits = logits[:, -1, :]
if i == 0:
logits[:, self.tokenizer.eos_id] = -float("inf")
logits[:, self.tokenizer.newline_id] = -float("inf")
apply_repetition_penalty(
logits,
batch_size,
1,
input,
repetition_penalty,
pred_start_index,
input.size(-1) - 1,
repetition_window,
)
logits = logits / temperature
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = F.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
for idx in range(batch_size):
if not done[idx] and (
next_token[idx].item() == self.tokenizer.eos_id or i == max_length - 1
):
done[idx] = True
results[idx] = input[idx, pred_start_index:].clone().cpu().tolist() # type: ignore # noqa: E501
if sum(done) == batch_size:
break
# update input ids
input = torch.cat([input, next_token], dim=-1)
length += 1
context = torch.cat(
[context, torch.ones((context.size(0), 1), dtype=torch.int, device=context.device)],
dim=-1,
)
position = torch.cat([position, position[:, -1:] + 1], dim=-1)
segment = torch.cat(
[segment, segment[:, -1:]], dim=-1
) # segment id always the same as the previous token
span = torch.cat([span, span[:, -1:]], dim=-1)
result_text = list(map(self.tokenizer.decode, results))
return result_text
| 14,654 | 36.966321 | 148 | py |
CPM-Live | CPM-Live-master/cpm-live/cpm_live/generation/generation_utils.py | import torch
import torch.nn.functional as F
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float("inf")):
# This function has been mostly taken from huggingface conversational ai code at
# https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
batch_size = logits.size()[0]
if top_p > 0.0:
logits = logits.view(batch_size, -1).contiguous()
for index in range(len(logits)):
sorted_logits, sorted_indices = torch.sort(logits[index].view(-1), descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[index][indices_to_remove] = filter_value
logits = logits.view(batch_size, -1).contiguous()
return logits
def apply_repetition_penalty(
logits,
batch_size,
num_beams,
prev_output_tokens,
repetition_penalty,
start_idx=None,
end_idx=None,
window_size=None,
):
# only conduct repetition penalty for the output
assert repetition_penalty >= 1, "repetition penalty coefficient should >= 1"
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
for i in range(batch_size * num_beams):
if start_idx is None or end_idx is None:
output_tokens = prev_output_tokens[i].tolist()
else:
if end_idx >= start_idx:
if window_size:
output_tokens = prev_output_tokens[i][
max(start_idx, end_idx + 1 - window_size) : end_idx + 1
].tolist()
else:
output_tokens = prev_output_tokens[i][start_idx : end_idx + 1].tolist()
else:
output_tokens = []
for previous_token in set(output_tokens):
# if score < 0 then repetition penalty has to
# multiplied to reduce the previous token probability
if logits[i, previous_token] < 0:
logits[i, previous_token] *= repetition_penalty
else:
logits[i, previous_token] /= repetition_penalty
class BeamHypotheses:
def __init__(self, n_hyp, max_len, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_len = max_len
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.n_hyp = n_hyp
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
del self.hyp[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
elif self.early_stopping:
return True
else:
return self.worst_score >= best_sum_logprobs / cur_len**self.length_penalty
| 4,382 | 37.787611 | 122 | py |
LOSTIN | LOSTIN-main/GNN-supernode/inference.py | import torch
from torch_geometric.data import DataLoader
import torch.optim as optim
import torch.nn.functional as F
from gnn import GNN
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch_geometric.utils import degree
from tqdm import tqdm
import argparse
import time
import numpy as np
import json
import operator
from functools import reduce
import ARMA
import film
import gat
import pna
import pan
import sage
import sgn
import unet
import rgcn
#import ggnn
### importing OGB
from dataset_pyg import PygGraphPropPredDataset
from evaluate import Evaluator
cls_criterion = torch.nn.BCEWithLogitsLoss()
reg_criterion = torch.nn.MSELoss()
#reg_criterion=torch.nn.SmoothL1Loss(reduction='mean', beta=1.0)
def train(model, device, loader, optimizer, task_type):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred = model(batch)
optimizer.zero_grad()
## ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
if "classification" in task_type:
loss = cls_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
else:
loss = reg_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
loss.backward()
optimizer.step()
def eval(model, device, loader, evaluator):
model.eval()
y_true = []
y_pred = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
if batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred = model(batch)
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim = 0).numpy()
y_pred = torch.cat(y_pred, dim = 0).numpy()
input_dict = {"y_true": y_true, "y_pred": y_pred}
return evaluator.eval(input_dict), y_true, y_pred
def main():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines on ogbgmol* data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--gnn', type=str, default='gin-virtual',
help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gin-virtual)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--drop_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--emb_dim', type=int, default=300,
help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--dataset', type=str, default="ogbg-molhiv",
help='dataset name (default: ogbg-molhiv)')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
### automatic dataloading and splitting
dataset = PygGraphPropPredDataset(name = args.dataset)
split_idx = dataset.get_idx_split()
### automatic evaluator. takes dataset name as input
evaluator = Evaluator(args.dataset)
test_loader = DataLoader(dataset[split_idx["test"]], batch_size=args.batch_size, shuffle=False, num_workers = 0)
if args.gnn == 'gin':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gin-virtual':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'gcn':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gcn-virtual':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'arma':
model = ARMA.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'film':
model = film.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'sgn':
model = sgn.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'sage':
model = sage.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'gat':
model = gat.Net(heads=8, num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'pna':
deg = torch.zeros(30, dtype=torch.long)
train_dataset = dataset[split_idx["train"]]
for data in train_dataset:
d = degree(data.edge_index[1], num_nodes=data.num_nodes, dtype=torch.long)
deg += torch.bincount(d, minlength=deg.numel())
model = pna.Net(deg=deg, num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'pan':
model = pan.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'unet':
model= unet.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'rgcn':
model = rgcn.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
elif args.gnn == 'ggnn':
model = ggnn.Net(num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio).to(device)
else:
raise ValueError('Invalid GNN type')
optimizer = optim.Adam(model.parameters(), lr=0.0005)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.8, patience=10,min_lr=0.00001)
PATH='model/'+args.dataset + '_'+ args.gnn+ '_layer_'+ str(args.num_layer)+'_model.pt'
checkpoint = torch.load(PATH)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
test_perf, t_true, t_pred = eval(model, device, test_loader, evaluator)
test_true_value=reduce(operator.add, t_true.tolist())
test_pred_value=reduce(operator.add, t_pred.tolist())
f = open('inf_'+args.dataset + '_'+ args.gnn+ '_layer_'+ str(args.num_layer)+ '.json', 'w')
result=dict(test_true=test_true_value, test_pred=test_pred_value)
json.dump(result, f)
f.close()
if __name__ == "__main__":
main()
| 7,700 | 43.514451 | 183 | py |
LOSTIN | LOSTIN-main/GNN-supernode/node_encoder.py | import torch
from features import get_node_feature_dims, get_edge_feature_dims
full_node_feature_dims = get_node_feature_dims()
full_edge_feature_dims = get_edge_feature_dims()
class NodeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(NodeEncoder, self).__init__()
self.node_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_node_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.node_embedding_list.append(emb)
def forward(self, x):
x_embedding = 0
#x_embedding = self.node_embedding_list[0](x[:,0])
for i in range(1, x.shape[1]):
x_embedding += self.node_embedding_list[i](x[:,i])
#x_embedding = torch.cat((x_embedding, self.node_embedding_list[i](x[:,i])),1)
return x_embedding
class EdgeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(EdgeEncoder, self).__init__()
self.edge_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_edge_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.edge_embedding_list.append(emb)
def forward(self, edge_attr):
edge_embedding = 0
for i in range(edge_attr.shape[1]):
edge_embedding += self.edge_embedding_list[i](edge_attr[:,i])
return edge_embedding
if __name__ == '__main__':
from dataset_pyg import PygGraphPropPredDataset
dataset = PygGraphPropPredDataset(name = 'node_embedding_area')
node_enc = NodeEncoder(2)
edge_enc = EdgeEncoder(5)
print(node_enc(dataset[1].x))
print(edge_enc(dataset[1].edge_attr))
| 1,815 | 28.770492 | 90 | py |
LOSTIN | LOSTIN-main/GNN-supernode/test_evaluation.py | import torch
from torch_geometric.loader import DataLoader
from torch.utils.data import TensorDataset
import torch.optim as optim
import torch.nn.functional as F
from gnn import GNN
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tqdm import tqdm
import argparse
import time
import numpy as np
import json
import operator
from functools import reduce
### importing OGB
from dataset_pyg import PygGraphPropPredDataset
from evaluate import Evaluator
cls_criterion = torch.nn.BCEWithLogitsLoss()
reg_criterion = torch.nn.MSELoss()
#reg_criterion=torch.nn.SmoothL1Loss(reduction='mean', beta=1.0)
def gen_batch_dat(batch, graphs):
edge_index, edge_attr, x, bat = None, None, None, None
for idx in range(len(batch.y)):
if idx == 0:
edge_index = graphs[int(batch.graph_selection[idx])].edge_index
edge_attr = graphs[int(batch.graph_selection[idx])].edge_attr
x = graphs[int(batch.graph_selection[idx])].x
bat = torch.zeros(len(graphs[int(batch.graph_selection[idx])].x))
else:
edge_index = torch.cat((edge_index, graphs[int(batch.graph_selection[idx])].edge_index), 1)
edge_attr = torch.cat((edge_attr, graphs[int(batch.graph_selection[idx])].edge_attr), 0)
x = torch.cat((x, graphs[int(batch.graph_selection[idx])].x), 0)
bat = torch.cat((bat, idx+torch.zeros(len(graphs[int(batch.graph_selection[idx])].x))), 0)
batch.edge_index = edge_index
batch.edge_attr = edge_attr
batch.x = x
batch.batch = bat.to(torch.long)
return batch
def train(model, device, loader, optimizer, task_type, graphs):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = gen_batch_dat(batch, graphs).to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred = model(batch)
optimizer.zero_grad()
## ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
if "classification" in task_type:
loss = cls_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
else:
loss = reg_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
loss.backward()
optimizer.step()
def eval(model, device, loader, evaluator, graphs):
model.eval()
y_true = []
y_pred = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = gen_batch_dat(batch, graphs).to(device)
if batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred = model(batch)
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim = 0).numpy()
y_pred = torch.cat(y_pred, dim = 0).numpy()
input_dict = {"y_true": y_true, "y_pred": y_pred}
return evaluator.eval(input_dict), y_true, y_pred
def main():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines on ogbgmol* data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--gnn', type=str, default='gin-virtual',
help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gin-virtual)')
parser.add_argument('--drop_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--num_layer', type=int, default=10,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--emb_dim', type=int, default=8,
help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--virtual_emb_dim', type=int, default=25,
help='dimensionality of hidden units of virtual node in GNNs (default: 25)')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs to train (default: 300)')
parser.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
parser.add_argument('--dataset', type=str, default="pita_delay",
help='dataset name (default: ogbg-molhiv)')
parser.add_argument('--ckpt', type=str, default="TBD",
help='checkpoint file path')
parser.add_argument('--feature', type=str, default="full",
help='full feature or simple feature')
parser.add_argument('--filename', type=str, default="",
help='filename to output result (default: )')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
### automatic dataloading and splitting
dataset = PygGraphPropPredDataset(name = args.dataset)
if args.feature == 'full':
pass
elif args.feature == 'simple':
print('using simple feature')
# only retain the top two node/edge features
dataset.data.x = dataset.data.x[:,:2]
dataset.data.edge_attr = dataset.data.edge_attr[:,:2]
### automatic evaluator. takes dataset name as input
evaluator = Evaluator(args.dataset)
verilog_list = ['adder', 'arbiter', 'bar', 'div', 'log2', 'max', 'multiplier', 'sin', 'sqrt', 'square', 'voter']
graphs = dataset.graphs
if args.gnn == 'gin':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gin-virtual':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'gcn':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gcn-virtual':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
else:
raise ValueError('Invalid GNN type')
ckpt = args.ckpt
ckpt_path = f'model/{ckpt}.pt' # args['ckpt']
model.load_state_dict(torch.load(ckpt_path)['model_state_dict'])
optimizer = optim.Adam(model.parameters(), lr=0.0005)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.8, patience=10,min_lr=0.00001)
valid_curve = []
test_curve = []
train_curve = []
test_predict_value= []
test_true_value= []
valid_predict_value= []
valid_true_value= []
for idx, verilog in enumerate(verilog_list):
data = DataLoader(dataset[300000*idx:(300000*idx+3000)], batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
test_perf, t_true, t_pred = eval(model, device, data, evaluator, graphs)
print("Done with evaluation design:", verilog)
print(test_perf)
np.save(f'delays/{verilog}_true.npy', t_true)
np.save(f'delays/{verilog}_pred.npy', t_pred)
if __name__ == "__main__":
main()
| 7,667 | 40.225806 | 183 | py |
LOSTIN | LOSTIN-main/GNN-supernode/evaluate.py | from sklearn.metrics import roc_auc_score, average_precision_score
import pandas as pd
import os
import numpy as np
try:
import torch
except ImportError:
torch = None
### Evaluator for graph classification
class Evaluator:
def __init__(self, name):
self.name = name
meta_info = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in meta_info:
print(self.name)
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(meta_info.keys())
raise ValueError(error_mssg)
self.num_tasks = int(meta_info[self.name]['num tasks'])
self.eval_metric = meta_info[self.name]['eval metric']
def _parse_and_check_input(self, input_dict):
if self.eval_metric == 'rocauc' or self.eval_metric == 'ap' or self.eval_metric == 'rmse' or self.eval_metric == 'rmae' or self.eval_metric == 'acc':
if not 'y_true' in input_dict:
raise RuntimeError('Missing key of y_true')
if not 'y_pred' in input_dict:
raise RuntimeError('Missing key of y_pred')
y_true, y_pred = input_dict['y_true'], input_dict['y_pred']
'''
y_true: numpy ndarray or torch tensor of shape (num_graph, num_tasks)
y_pred: numpy ndarray or torch tensor of shape (num_graph, num_tasks)
'''
# converting to torch.Tensor to numpy on cpu
if torch is not None and isinstance(y_true, torch.Tensor):
y_true = y_true.detach().cpu().numpy()
if torch is not None and isinstance(y_pred, torch.Tensor):
y_pred = y_pred.detach().cpu().numpy()
## check type
if not (isinstance(y_true, np.ndarray) and isinstance(y_true, np.ndarray)):
raise RuntimeError('Arguments to Evaluator need to be either numpy ndarray or torch tensor')
if not y_true.shape == y_pred.shape:
raise RuntimeError('Shape of y_true and y_pred must be the same')
if not y_true.ndim == 2:
raise RuntimeError('y_true and y_pred mush to 2-dim arrray, {}-dim array given'.format(y_true.ndim))
if not y_true.shape[1] == self.num_tasks:
raise RuntimeError('Number of tasks for {} should be {} but {} given'.format(self.name, self.num_tasks, y_true.shape[1]))
return y_true, y_pred
elif self.eval_metric == 'F1':
if not 'seq_ref' in input_dict:
raise RuntimeError('Missing key of seq_ref')
if not 'seq_pred' in input_dict:
raise RuntimeError('Missing key of seq_pred')
seq_ref, seq_pred = input_dict['seq_ref'], input_dict['seq_pred']
if not isinstance(seq_ref, list):
raise RuntimeError('seq_ref must be of type list')
if not isinstance(seq_pred, list):
raise RuntimeError('seq_pred must be of type list')
if len(seq_ref) != len(seq_pred):
raise RuntimeError('Length of seq_true and seq_pred should be the same')
return seq_ref, seq_pred
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
def eval(self, input_dict):
if self.eval_metric == 'rocauc':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_rocauc(y_true, y_pred)
if self.eval_metric == 'ap':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_ap(y_true, y_pred)
elif self.eval_metric == 'rmse':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_rmse(y_true, y_pred)
elif self.eval_metric == 'rmae':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_rmae(y_true, y_pred)
elif self.eval_metric == 'acc':
y_true, y_pred = self._parse_and_check_input(input_dict)
return self._eval_acc(y_true, y_pred)
elif self.eval_metric == 'F1':
seq_ref, seq_pred = self._parse_and_check_input(input_dict)
return self._eval_F1(seq_ref, seq_pred)
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
@property
def expected_input_format(self):
desc = '==== Expected input format of Evaluator for {}\n'.format(self.name)
if self.eval_metric == 'rocauc' or self.eval_metric == 'ap':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += 'where y_pred stores score values (for computing AUC score),\n'
desc += 'num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one graph.\n'
desc += 'nan values in y_true are ignored during evaluation.\n'
elif self.eval_metric == 'rmse':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += 'where num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one graph.\n'
desc += 'nan values in y_true are ignored during evaluation.\n'
elif self.eval_metric == 'rmae':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_graph, num_task)\n'
desc += 'where num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one graph.\n'
desc += 'nan values in y_true are ignored during evaluation.\n'
elif self.eval_metric == 'acc':
desc += '{\'y_true\': y_true, \'y_pred\': y_pred}\n'
desc += '- y_true: numpy ndarray or torch tensor of shape (num_node, num_task)\n'
desc += '- y_pred: numpy ndarray or torch tensor of shape (num_node, num_task)\n'
desc += 'where y_pred stores predicted class label (integer),\n'
desc += 'num_task is {}, and '.format(self.num_tasks)
desc += 'each row corresponds to one graph.\n'
elif self.eval_metric == 'F1':
desc += '{\'seq_ref\': seq_ref, \'seq_pred\': seq_pred}\n'
desc += '- seq_ref: a list of lists of strings\n'
desc += '- seq_pred: a list of lists of strings\n'
desc += 'where seq_ref stores the reference sequences of sub-tokens, and\n'
desc += 'seq_pred stores the predicted sequences of sub-tokens.\n'
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
return desc
@property
def expected_output_format(self):
desc = '==== Expected output format of Evaluator for {}\n'.format(self.name)
if self.eval_metric == 'rocauc':
desc += '{\'rocauc\': rocauc}\n'
desc += '- rocauc (float): ROC-AUC score averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'ap':
desc += '{\'ap\': ap}\n'
desc += '- ap (float): Average Precision (AP) score averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'rmse':
desc += '{\'rmse\': rmse}\n'
desc += '- rmse (float): root mean squared error averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'rmae':
desc += '{\'rmae\': rmae}\n'
desc += '- rmae (float): root mean squared error averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'acc':
desc += '{\'acc\': acc}\n'
desc += '- acc (float): Accuracy score averaged across {} task(s)\n'.format(self.num_tasks)
elif self.eval_metric == 'F1':
desc += '{\'F1\': F1}\n'
desc += '- F1 (float): F1 score averaged over samples.\n'
else:
raise ValueError('Undefined eval metric %s ' % (self.eval_metric))
return desc
def _eval_rocauc(self, y_true, y_pred):
'''
compute ROC-AUC averaged across tasks
'''
rocauc_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == 0) > 0:
# ignore nan values
is_labeled = y_true[:,i] == y_true[:,i]
rocauc_list.append(roc_auc_score(y_true[is_labeled,i], y_pred[is_labeled,i]))
if len(rocauc_list) == 0:
raise RuntimeError('No positively labeled data available. Cannot compute ROC-AUC.')
return {'rocauc': sum(rocauc_list)/len(rocauc_list)}
def _eval_ap(self, y_true, y_pred):
'''
compute Average Precision (AP) averaged across tasks
'''
ap_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == 0) > 0:
# ignore nan values
is_labeled = y_true[:,i] == y_true[:,i]
ap = average_precision_score(y_true[is_labeled,i], y_pred[is_labeled,i])
ap_list.append(ap)
if len(ap_list) == 0:
raise RuntimeError('No positively labeled data available. Cannot compute Average Precision.')
return {'ap': sum(ap_list)/len(ap_list)}
def _eval_rmse(self, y_true, y_pred):
'''
compute RMSE score averaged across tasks
'''
rmse_list = []
for i in range(y_true.shape[1]):
# ignore nan values
is_labeled = y_true[:,i] == y_true[:,i]
rmse_list.append(np.sqrt(((y_true[is_labeled] - y_pred[is_labeled])**2).mean()))
return {'rmse': sum(rmse_list)/len(rmse_list)}
def _eval_rmae(self, y_true, y_pred):
'''
compute RMAE score averaged across tasks
'''
error = np.sum(np.abs((y_true - y_pred) / y_true))
rmae = error / len(y_true)
return {'rmae': rmae}
def _eval_acc(self, y_true, y_pred):
acc_list = []
for i in range(y_true.shape[1]):
is_labeled = y_true[:,i] == y_true[:,i]
correct = y_true[is_labeled,i] == y_pred[is_labeled,i]
acc_list.append(float(np.sum(correct))/len(correct))
return {'acc': sum(acc_list)/len(acc_list)}
def _eval_F1(self, seq_ref, seq_pred):
# '''
# compute F1 score averaged over samples
# '''
precision_list = []
recall_list = []
f1_list = []
for l, p in zip(seq_ref, seq_pred):
label = set(l)
prediction = set(p)
true_positive = len(label.intersection(prediction))
false_positive = len(prediction - label)
false_negative = len(label - prediction)
if true_positive + false_positive > 0:
precision = true_positive / (true_positive + false_positive)
else:
precision = 0
if true_positive + false_negative > 0:
recall = true_positive / (true_positive + false_negative)
else:
recall = 0
if precision + recall > 0:
f1 = 2 * precision * recall / (precision + recall)
else:
f1 = 0
precision_list.append(precision)
recall_list.append(recall)
f1_list.append(f1)
return {'precision': np.average(precision_list),
'recall': np.average(recall_list),
'F1': np.average(f1_list)}
if __name__ == '__main__':
evaluator = Evaluator('ogbg-code')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
seq_ref = [['tom', 'is'], ['he'], ['he'], ['hey', 'fea', 'he'], ['alpha'], ['fe4qfq', 'beta'], ['aa']]
seq_pred = [['tom', 'is'], ['he'], ['he'], ['hey', 'he', 'fea'], ['alpha'], ['beta', 'fe4qfq'], ['aa']] # [['tom', 'is'] , ['he'], ['the', 'he'], ['hey', 'fea', 'he'], ['alpha'], ['beta', 'fe4qfq', 'c', 'fe4qf'], ['']]
input_dict = {'seq_ref': seq_ref, 'seq_pred': seq_pred}
result = evaluator.eval(input_dict)
print(result)
# exit(-1)
evaluator = Evaluator('ogbg-molpcba')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = torch.tensor(np.random.randint(2, size = (100,128)))
y_pred = torch.tensor(np.random.randn(100,128))
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
evaluator = Evaluator('ogbg-molhiv')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = torch.tensor(np.random.randint(2, size = (100,1)))
y_pred = torch.tensor(np.random.randn(100,1))
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
### rmse case
evaluator = Evaluator('ogbg-mollipo')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = np.random.randn(100,1)
y_pred = np.random.randn(100,1)
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
### acc
evaluator = Evaluator('ogbg-ppa')
print(evaluator.expected_input_format)
print(evaluator.expected_output_format)
y_true = np.random.randint(5, size = (100,1))
y_pred = np.random.randint(5, size = (100,1))
input_dict = {'y_true': y_true, 'y_pred': y_pred}
result = evaluator.eval(input_dict)
print(result)
| 14,336 | 40.677326 | 222 | py |
LOSTIN | LOSTIN-main/GNN-supernode/read_graph_pyg.py | import pandas as pd
import torch
from torch_geometric.data import Data
import os.path as osp
import numpy as np
from read_graph_raw import read_csv_graph_raw
from tqdm import tqdm
def read_graph_pyg(raw_dir, add_inverse_edge = False, additional_node_files = [], additional_edge_files = [], binary = False):
graph_list, super_node, graph_selection = read_csv_graph_raw(raw_dir, add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files)
pyg_graph_list = []
graph_info_container = []
for i in range(len(super_node)):
data = Data()
data.super_node = torch.from_numpy(super_node[i])
data.graph_selection = torch.from_numpy(graph_selection[i])
graph_info_container.append(data)
print('Converting graphs into PyG objects...')
for graph in tqdm(graph_list):
g = Data()
g.__num_nodes__ = graph['num_nodes']
g.edge_index = torch.from_numpy(graph['edge_index'])
del graph['num_nodes']
del graph['edge_index']
if graph['edge_feat'] is not None:
g.edge_attr = torch.from_numpy(graph['edge_feat'])
del graph['edge_feat']
if graph['node_feat'] is not None:
g.x = torch.from_numpy(graph['node_feat'])
del graph['node_feat']
for key in additional_node_files:
g[key] = torch.from_numpy(graph[key])
del graph[key]
for key in additional_edge_files:
g[key] = torch.from_numpy(graph[key])
del graph[key]
pyg_graph_list.append(g)
return pyg_graph_list, graph_info_container
if __name__ == '__main__':
pass | 1,697 | 29.321429 | 185 | py |
LOSTIN | LOSTIN-main/GNN-supernode/main_gnn.py | import torch
from torch_geometric.loader import DataLoader
import torch.optim as optim
import torch.nn.functional as F
from gnn import GNN
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tqdm import tqdm
import argparse
import time
import numpy as np
import json
import operator
from functools import reduce
### importing OGB
from dataset_pyg import PygGraphPropPredDataset
from evaluate import Evaluator
cls_criterion = torch.nn.BCEWithLogitsLoss()
reg_criterion = torch.nn.MSELoss()
#reg_criterion=torch.nn.SmoothL1Loss(reduction='mean', beta=1.0)
def gen_batch_dat(batch, graphs):
edge_index, edge_attr, x, bat = None, None, None, None
for idx in range(len(batch.y)):
if idx == 0:
edge_index = graphs[int(batch.graph_selection[idx])].edge_index
edge_attr = graphs[int(batch.graph_selection[idx])].edge_attr
x = graphs[int(batch.graph_selection[idx])].x
bat = torch.zeros(len(graphs[int(batch.graph_selection[idx])].x))
else:
edge_index = torch.cat((edge_index, graphs[int(batch.graph_selection[idx])].edge_index), 1)
edge_attr = torch.cat((edge_attr, graphs[int(batch.graph_selection[idx])].edge_attr), 0)
x = torch.cat((x, graphs[int(batch.graph_selection[idx])].x), 0)
bat = torch.cat((bat, idx+torch.zeros(len(graphs[int(batch.graph_selection[idx])].x))), 0)
batch.edge_index = edge_index
batch.edge_attr = edge_attr
batch.x = x
batch.batch = bat.to(torch.long)
return batch
def train(model, device, loader, optimizer, task_type, graphs):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = gen_batch_dat(batch, graphs).to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred = model(batch)
optimizer.zero_grad()
## ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
if "classification" in task_type:
loss = cls_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
else:
loss = reg_criterion(pred.to(torch.float32)[is_labeled], batch.y.to(torch.float32)[is_labeled])
loss.backward()
optimizer.step()
def eval(model, device, loader, evaluator, graphs):
model.eval()
y_true = []
y_pred = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = gen_batch_dat(batch, graphs).to(device)
if batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred = model(batch)
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim = 0).numpy()
y_pred = torch.cat(y_pred, dim = 0).numpy()
input_dict = {"y_true": y_true, "y_pred": y_pred}
return evaluator.eval(input_dict), y_true, y_pred
def main():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines on ogbgmol* data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--gnn', type=str, default='gin-virtual',
help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gin-virtual)')
parser.add_argument('--drop_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--num_layer', type=int, default=10,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--emb_dim', type=int, default=8,
help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--virtual_emb_dim', type=int, default=25,
help='dimensionality of hidden units of virtual node in GNNs (default: 25)')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs to train (default: 300)')
parser.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
parser.add_argument('--dataset', type=str, default="pita_area",
help='dataset name (default: ogbg-molhiv)')
parser.add_argument('--feature', type=str, default="full",
help='full feature or simple feature')
parser.add_argument('--filename', type=str, default="",
help='filename to output result (default: )')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
### automatic dataloading and splitting
dataset = PygGraphPropPredDataset(name = args.dataset)
if args.feature == 'full':
pass
elif args.feature == 'simple':
print('using simple feature')
# only retain the top two node/edge features
dataset.data.x = dataset.data.x[:,:2]
dataset.data.edge_attr = dataset.data.edge_attr[:,:2]
### automatic evaluator. takes dataset name as input
evaluator = Evaluator(args.dataset)
# [‘adder’, ‘arbiter’, ‘bar’, ‘div’, ‘log2’, ‘max’] / [‘multiplier’, ‘sin’, ‘sqrt’, ‘square’, ‘voter’]
data_0, dataset_test_0 = dataset[0:1800000], dataset[1800000:3300000]
dataset_ratio = [660000, 165000, 975000]
dataset_train, dataset_valid, dataset_test_1 = torch.utils.data.random_split(data_0, dataset_ratio)
train_loader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
valid_loader = DataLoader(dataset_valid, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
# test_loader = DataLoader(dataset_test_0 + dataset_test_1, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
graphs = dataset.graphs
if args.gnn == 'gin':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gin-virtual':
model = GNN(gnn_type = 'gin', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'gcn':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gcn-virtual':
model = GNN(gnn_type = 'gcn', num_tasks = dataset.num_tasks, num_layer = args.num_layer, emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
else:
raise ValueError('Invalid GNN type')
optimizer = optim.Adam(model.parameters(), lr=0.0005)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.8, patience=10,min_lr=0.00001)
valid_curve = []
test_curve = []
train_curve = []
test_predict_value= []
test_true_value= []
valid_predict_value= []
valid_true_value= []
for epoch in range(1, args.epochs + 1):
print("=====Epoch {}".format(epoch))
print('Training...')
train(model, device, train_loader, optimizer, dataset.task_type, graphs)
print('Evaluating...')
# train_perf, _, _ = eval(model, device, train_loader, evaluator, graphs)
valid_perf, v_true, v_pred= eval(model, device, valid_loader, evaluator, graphs)
# test_perf, t_true, t_pred = eval(model, device, test_loader, evaluator, graphs)
print({'Validation': valid_perf})
# train_curve.append(train_perf[dataset.eval_metric])
valid_curve.append(valid_perf[dataset.eval_metric])
# test_curve.append(test_perf[dataset.eval_metric])
# test_predict_value.append(reduce(operator.add, t_pred.tolist()))
valid_predict_value.append(reduce(operator.add, v_pred.tolist()))
# test_loss=test_perf[dataset.eval_metric]
valid_loss=valid_perf[dataset.eval_metric]
if valid_loss<=np.min(np.array(valid_curve)):
PATH='model/1_'+args.dataset + '_'+ args.gnn+ '_layer_'+ str(args.num_layer)+'_model.pt'
torch.save({'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': valid_loss
}, PATH)
# test_true_value=reduce(operator.add, t_true.tolist())
valid_true_value=reduce(operator.add, v_true.tolist())
if 'classification' in dataset.task_type:
best_val_epoch = np.argmax(np.array(valid_curve))
else:
best_val_epoch = np.argmin(np.array(valid_curve))
print('Finished training!')
print('Best validation score: {}'.format(valid_curve[best_val_epoch]))
f = open('1_'+args.dataset + '_'+ args.gnn+ '_layer_'+ str(args.num_layer)+ '.json', 'w')
result=dict(val=valid_curve[best_val_epoch],
valid_pred=valid_predict_value,
valid_true=valid_true_value,
valid_curve=valid_curve)
json.dump(result, f)
f.close()
if not args.filename == '':
torch.save({'Val': valid_curve[best_val_epoch]}, args.filename)
if __name__ == "__main__":
main()
| 9,703 | 41.375546 | 183 | py |
LOSTIN | LOSTIN-main/GNN-supernode/gnn.py | import torch
from torch_geometric.nn import MessagePassing,BatchNorm
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
import torch.nn.functional as F
from torch_geometric.nn.inits import uniform
from torch.nn import Sequential, ReLU, Linear, ModuleList
from conv import GNN_node, GNN_node_Virtualnode
from torch_scatter import scatter_mean
class GNN(torch.nn.Module):
def __init__(self, num_tasks, num_layer = 5, emb_dim = 300,
gnn_type = 'gin', virtual_node = True, residual = False, drop_ratio = 0.5, JK = "sum", graph_pooling = "sum"):
'''
num_tasks (int): number of labels to be predicted
virtual_node (bool): whether to add virtual node or not
'''
super(GNN, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.graph_pooling = graph_pooling
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
### GNN to generate node embeddings
if virtual_node:
self.gnn_node = GNN_node_Virtualnode(num_layer, emb_dim, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)
else:
self.gnn_node = GNN_node(num_layer, emb_dim, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)
### Pooling function to generate whole-graph embeddings
if self.graph_pooling == "sum":
self.pool = global_add_pool
elif self.graph_pooling == "mean":
self.pool = global_mean_pool
elif self.graph_pooling == "max":
self.pool = global_max_pool
elif self.graph_pooling == "attention":
self.pool = GlobalAttention(gate_nn = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, 1)))
elif self.graph_pooling == "set2set":
self.pool = Set2Set(emb_dim, processing_steps = 2)
else:
raise ValueError("Invalid graph pooling type.")
self.graph_pred_linear=ModuleList()
self.graph_norm=ModuleList()
if graph_pooling == "set2set":
self.graph_pred_linear.append(Linear(2*emb_dim, 2*emb_dim))
self.graph_pred_linear.append(Linear(2*emb_dim, emb_dim))
self.graph_pred_linear.append(Linear(emb_dim, self.num_tasks))
self.graph_norm.append(BatchNorm(2*emb_dim))
self.graph_norm.append(BatchNorm(emb_dim))
else:
self.graph_pred_linear.append(Linear(emb_dim, 2*emb_dim))
self.graph_pred_linear.append(Linear(2*emb_dim, emb_dim))
self.graph_pred_linear.append(Linear(emb_dim, self.num_tasks))
self.graph_norm.append(BatchNorm(2*emb_dim))
self.graph_norm.append(BatchNorm(emb_dim))
def forward(self, batched_data):
h_node = self.gnn_node(batched_data)
h_graph = self.pool(h_node, batched_data.batch)
### final predictions
#h_graph = self.graph_pred_linear[0](h_graph)
#h_graph = self.graph_norm[0](h_graph)
#h_graph = F.dropout(F.relu(h_graph), self.drop_ratio, training = self.training)
#h_graph = self.graph_pred_linear[1](h_graph)
#h_graph = self.graph_norm[1](h_graph)
#h_graph = F.dropout(F.relu(h_graph), self.drop_ratio, training = self.training)
return self.graph_pred_linear[2](h_graph)
if __name__ == '__main__':
GNN(num_tasks = 10) | 3,673 | 39.822222 | 188 | py |
LOSTIN | LOSTIN-main/GNN-supernode/conv.py | import torch
from torch_geometric.nn import MessagePassing
import torch.nn.functional as F
from torch_geometric.nn import global_mean_pool, global_add_pool
from node_encoder import NodeEncoder,EdgeEncoder
from torch_geometric.utils import degree
import math
### GIN convolution along the graph structure
class GINConv(MessagePassing):
def __init__(self, emb_dim):
'''
emb_dim (int): node embedding dimensionality
'''
super(GINConv, self).__init__(aggr = "add")
self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, emb_dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.edge_encoder = EdgeEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
edge_embedding = self.edge_encoder(edge_attr)
out = self.mlp((1 + self.eps) *x + self.propagate(edge_index, x=x, edge_attr=edge_embedding))
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GCN convolution along the graph structure
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super(GCNConv, self).__init__(aggr='add')
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.root_emb = torch.nn.Embedding(1, emb_dim)
self.edge_encoder = EdgeEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.edge_encoder(edge_attr)
row, col = edge_index
#edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)
deg = degree(row, x.size(0), dtype = x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr = edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GNN to generate node embedding
class GNN_node(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
num_layer (int): number of GNN message passing layers
'''
super(GNN_node, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.node_encoder = NodeEncoder(emb_dim)
###List of GNNs
self.convs = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### computing input node embedding
h_list = [self.node_encoder(x)]
for layer in range(self.num_layer):
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h += h_list[layer]
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer):
node_representation += h_list[layer]
return node_representation
### Virtual GNN to generate node embedding
class GNN_node_Virtualnode(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
'''
super(GNN_node_Virtualnode, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.node_encoder = NodeEncoder(emb_dim)
### set the initial virtual node embedding to 0.
self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)
torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)
self.supernode_layer = torch.nn.Linear(25, 8)
### List of GNNs
self.convs = torch.nn.ModuleList()
### batch norms applied to node embeddings
self.batch_norms = torch.nn.ModuleList()
### List of MLPs to transform virtual node at every layer
self.mlp_virtualnode_list = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
for layer in range(num_layer - 1):
self.mlp_virtualnode_list.append(torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), \
torch.nn.Linear(2*emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU()))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### virtual node embeddings for graphs
virtualnode_data = self.supernode_layer(batched_data.super_node.to(torch.float))
virtualnode_embedding = self.virtualnode_embedding(torch.zeros(batch[-1].item() + 1).to(edge_index.dtype).to(edge_index.device))
virtualnode_embedding += virtualnode_data
h_list = [self.node_encoder(x)]
for layer in range(self.num_layer):
### add message from virtual nodes to graph nodes
h_list[layer] = h_list[layer] + virtualnode_embedding[batch]
### Message passing among graph nodes
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h = h + h_list[layer]
h_list.append(h)
### update the virtual nodes
if layer < self.num_layer - 1:
### add message from graph nodes to virtual nodes
virtualnode_embedding_temp = global_add_pool(h_list[layer], batch) + virtualnode_embedding
### transform virtual nodes using MLP
if self.residual:
virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
else:
virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer):
node_representation += h_list[layer]
return node_representation
if __name__ == "__main__":
pass
| 8,910 | 35.520492 | 182 | py |
LOSTIN | LOSTIN-main/GNN-supernode/dataset_pyg.py | from torch_geometric.data import InMemoryDataset
import pandas as pd
import shutil, os
import os.path as osp
import torch
import numpy as np
from read_graph_pyg import read_graph_pyg
class PygGraphPropPredDataset(InMemoryDataset):
def __init__(self, name, root = 'dataset', transform=None, pre_transform = None, meta_dict = None):
'''
- name (str): name of the dataset
- root (str): root directory to store the dataset folder
- transform, pre_transform (optional): transform/pre-transform graph objects
- meta_dict: dictionary that stores all the meta-information about data. Default is None,
but when something is passed, it uses its information. Useful for debugging for external contributers.
'''
self.name = name ## original name, e.g., ogbg-molhiv
if meta_dict is None:
self.dir_name = '_'.join(name.split('-'))
# check if previously-downloaded folder exists.
# If so, use that one.
if osp.exists(osp.join(root, self.dir_name + '_pyg')):
self.dir_name = self.dir_name + '_pyg'
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in master:
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
self.download_name = self.meta_info['download_name'] ## name of downloaded file, e.g., tox21
self.num_tasks = int(self.meta_info['num tasks'])
self.eval_metric = self.meta_info['eval metric']
self.task_type = self.meta_info['task type']
self.__num_classes__ = int(self.meta_info['num classes'])
self.binary = self.meta_info['binary'] == 'True'
super(PygGraphPropPredDataset, self).__init__(self.root, transform, pre_transform)
self.graphs = torch.load(self.processed_paths[0])
self.data, self.slices = torch.load(self.processed_paths[1])
def get_idx_split(self, split_type = None):
if split_type is None:
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
# short-cut if split_dict.pt exists
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train_idx = pd.read_csv(osp.join(path, 'train.csv.gz'), compression='gzip', header = None).values.T[0]
valid_idx = pd.read_csv(osp.join(path, 'valid.csv.gz'), compression='gzip', header = None).values.T[0]
test_idx = pd.read_csv(osp.join(path, 'test.csv.gz'), compression='gzip', header = None).values.T[0]
return {'train': torch.tensor(train_idx, dtype = torch.long), 'valid': torch.tensor(valid_idx, dtype = torch.long), 'test': torch.tensor(test_idx, dtype = torch.long)}
@property
def num_classes(self):
return self.__num_classes__
@property
def raw_file_names(self):
if self.binary:
return ['data.npz']
else:
file_names = ['edge']
if self.meta_info['has_node_attr'] == 'True':
file_names.append('node-feat')
if self.meta_info['has_edge_attr'] == 'True':
file_names.append('edge-feat')
return [file_name + '.csv.gz' for file_name in file_names]
@property
def processed_file_names(self):
return 'geometric_data_processed.pt', 'graph_info_processed.pt'
def process(self):
### read pyg graph list
add_inverse_edge = self.meta_info['add_inverse_edge'] == 'True'
if self.meta_info['additional node files'] == 'None':
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if self.meta_info['additional edge files'] == 'None':
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
data_list, g_info_list = read_graph_pyg(self.raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files, binary=self.binary)
if self.task_type == 'subtoken prediction':
graph_label_notparsed = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
graph_label = [str(graph_label_notparsed[i][0]).split(' ') for i in range(len(graph_label_notparsed))]
for i, g in enumerate(data_list):
g.y = graph_label[i]
else:
if self.binary:
graph_label = np.load(osp.join(self.raw_dir, 'graph-label.npz'))['graph_label']
else:
graph_label = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
has_nan = np.isnan(graph_label).any()
for i, g in enumerate(g_info_list):
if 'classification' in self.task_type:
if has_nan:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
else:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.long)
else:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
# g_data, g_slices = self.collate(data_list)
data, slices = self.collate(g_info_list)
self.slices = slices
print('Saving...')
torch.save(data_list, self.processed_paths[0])
torch.save((data, slices), self.processed_paths[1])
| 6,385 | 41.291391 | 212 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/main_gnn_customized_delay.py | ### Libraries
import numpy as np
import argparse
from tqdm import tqdm
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
# torchtext 0.6.0
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# Evaluation
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# graph loading dependency
from torch_geometric.data import DataLoader
from dataset_pyg import PygGraphPropPredDataset
from gnn import GNN
### dir
data_folder = 'lstm/data_area'
destination_folder = 'model_ckt/area'
# Hybrid model
class Hybridmodel(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=64, graph_emb=11):
super(Hybridmodel, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=2,
batch_first=True, bidirectional=False)
self.gmodel = GNN(gnn_type = 'gin', num_tasks = 1, num_layer = 2, emb_dim = graph_emb, drop_ratio = 0.5, virtual_node = False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim + graph_emb, 100))
self.linear.append(Linear(100,100))
self.linear.append(Linear(100,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(100))
self.norm.append(BatchNorm1d(100))
def forward(self, text, text_len, graph_batch):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, _ = self.lstm(packed_input)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
g_emb = self.gmodel(graph_batch)
combined_emb = torch.cat((out, g_emb[text[:,0]-7]),1)
flow_fea=F.relu(self.linear[0](combined_emb))
flow_fea=self.norm[0](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
# Training Function
def training(model, device,
optimizer,
train_loader,
valid_loader,
graph_loader,
num_epochs,
eval_every,
args,
criterion = nn.MSELoss(),
file_path = destination_folder,
best_valid_loss = float("Inf"), best_train_loss = float("Inf")):
# initialize running values
running_loss = 0.0
valid_running_loss = 0.0
global_step = 0
train_loss_list = []
valid_loss_list = []
global_steps_list = []
# read graphs
for graph_batch in graph_loader:
graph_batch = graph_batch.to(device)
# training loop
model.train()
for epoch in range(num_epochs):
for ((flow, flow_len), labels), _ in tqdm(train_loader, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update running values
running_loss += loss.item()
global_step += 1
# evaluation step
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
# validation loop
for ((flow, flow_len), labels), _ in valid_loader:
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
loss = criterion(output, labels)
valid_running_loss += loss.item()
# evaluation
average_train_loss = running_loss / eval_every
average_valid_loss = valid_running_loss / len(valid_loader)
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
# resetting running values
running_loss = 0.0
valid_running_loss = 0.0
model.train()
# print progress
print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader),
average_train_loss, average_valid_loss))
# checkpoint
if best_valid_loss + best_train_loss > average_valid_loss + average_train_loss:
best_valid_loss = average_valid_loss
best_train_loss = average_train_loss
save_checkpoint(file_path + '/model_batch_'+str(args.batch_size)+'.pt', model, optimizer, best_valid_loss)
save_metrics(file_path + '/metrics_batch_'+str(args.batch_size)+'.pt', train_loss_list, valid_loss_list, global_steps_list)
save_metrics(file_path + '/metrics_batch_'+str(args.batch_size)+'.pt', train_loss_list, valid_loss_list, global_steps_list)
print('Finished Training!')
def main():
# arguments
parser = argparse.ArgumentParser(description='Customized model for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=10, help='dimensionality of input embedding of transformations')
parser.add_argument('--graph_emb', type=int, default=32, help='dimensionality of hidden units in GNNs')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Load graphs
pyg_dataset = PygGraphPropPredDataset(name = 'vgraph')
graph_loader = DataLoader(pyg_dataset, batch_size=32, shuffle=False)
# Fields
delay_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('delay', delay_field)]
# TabularDataset
print('Data loading ...')
train, valid, test = TabularDataset.splits(path=data_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = Hybridmodel(input_dim=len(flow_field.vocab), emb_dim=args.emb_dim, graph_emb=args.graph_emb).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
training(model = model, device = device, optimizer = optimizer, args = args,\
train_loader = train_iter, valid_loader = valid_iter, graph_loader = graph_loader, eval_every = len(train_iter), \
num_epochs=args.epochs)
if __name__ == "__main__":
main() | 9,155 | 36.679012 | 148 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/node_encoder.py | import torch
from features import get_node_feature_dims, get_edge_feature_dims
full_node_feature_dims = get_node_feature_dims()
full_edge_feature_dims = get_edge_feature_dims()
class NodeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(NodeEncoder, self).__init__()
self.node_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_node_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.node_embedding_list.append(emb)
def forward(self, x):
x_embedding = 0
#x_embedding = self.node_embedding_list[0](x[:,0])
for i in range(1, x.shape[1]):
x_embedding += self.node_embedding_list[i](x[:,i])
#x_embedding = torch.cat((x_embedding, self.node_embedding_list[i](x[:,i])),1)
return x_embedding
class EdgeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(EdgeEncoder, self).__init__()
self.edge_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_edge_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.edge_embedding_list.append(emb)
def forward(self, edge_attr):
edge_embedding = 0
for i in range(edge_attr.shape[1]):
edge_embedding += self.edge_embedding_list[i](edge_attr[:,i])
return edge_embedding
if __name__ == '__main__':
from dataset_pyg import PygGraphPropPredDataset
dataset = PygGraphPropPredDataset(name = 'vgraph')
node_enc = NodeEncoder(2)
edge_enc = EdgeEncoder(5)
print(node_enc(dataset[1].x).shape)
print(node_enc(dataset[0].x).shape)
print(edge_enc(dataset[1].edge_attr).shape[1])
| 1,857 | 28.967742 | 90 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/read_graph_pyg.py | import pandas as pd
import torch
from torch_geometric.data import Data
import os.path as osp
import numpy as np
from read_graph_raw import read_csv_graph_raw
from tqdm import tqdm
def read_graph_pyg(raw_dir, add_inverse_edge = False, additional_node_files = [], additional_edge_files = [], binary = False):
graph_list = read_csv_graph_raw(raw_dir, add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files)
pyg_graph_list = []
print('Converting graphs into PyG objects...')
for graph in tqdm(graph_list):
g = Data()
g.__num_nodes__ = graph['num_nodes']
g.edge_index = torch.from_numpy(graph['edge_index'])
del graph['num_nodes']
del graph['edge_index']
if graph['edge_feat'] is not None:
g.edge_attr = torch.from_numpy(graph['edge_feat'])
del graph['edge_feat']
if graph['node_feat'] is not None:
g.x = torch.from_numpy(graph['node_feat'])
del graph['node_feat']
for key in additional_node_files:
g[key] = torch.from_numpy(graph[key])
del graph[key]
for key in additional_edge_files:
g[key] = torch.from_numpy(graph[key])
del graph[key]
pyg_graph_list.append(g)
return pyg_graph_list
if __name__ == '__main__':
pass | 1,387 | 27.916667 | 156 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/main_gnn_customized_inference.py | ### Libraries
# torchtext 0.6.0
import numpy as np
import argparse
from tqdm import tqdm
# Libraries
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# graph loading dependency
from torch_geometric.data import DataLoader
from dataset_pyg import PygGraphPropPredDataset
from gnn import GNN
# Hybridmodel model
class Hybridmodel(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=64, graph_emb=11, model_name='gin', num_layer=5):
super(Hybridmodel, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=2,
batch_first=True, bidirectional=False)
self.gmodel = GNN(gnn_type = model_name, num_tasks = 1, num_layer = num_layer, emb_dim = graph_emb, drop_ratio = 0.2, virtual_node = False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim + graph_emb, 100))
self.linear.append(Linear(100,100))
self.linear.append(Linear(100,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(100))
self.norm.append(BatchNorm1d(100))
def forward(self, text, text_len, graph_batch):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, _ = self.lstm(packed_input)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
g_emb = self.gmodel(graph_batch)
combined_emb = torch.cat((out, g_emb[text[:,0]-7]),1)
flow_fea=F.relu(self.linear[0](combined_emb))
flow_fea=self.norm[0](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
def load_checkpoint(load_path, model, optimizer, device):
if load_path==None:
return
state_dict = torch.load(load_path, map_location=device)
print(f'Model loaded from <== {load_path}')
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
return state_dict['valid_loss']
def main():
# arguments
parser = argparse.ArgumentParser(description='LSTM baseline for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of hidden units in GNNs')
parser.add_argument('--graph_emb', type=int, default=32, help='dimensionality of hidden units in GNNs')
parser.add_argument('--dest_folder', type=str, default='model_ckt/area', help='Destination folder that saves the model')
parser.add_argument('--data_folder', type=str, default='lstm/data_area', help='The folder that saves the data')
parser.add_argument('--model_name', type=str, default='gin', help='GNN model name')
parser.add_argument('--num_layer', type=int, default=5, help='GNN model name')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Load graphs
pyg_dataset = PygGraphPropPredDataset(name = 'vgraph')
graph_loader = DataLoader(pyg_dataset, batch_size=32, shuffle=False)
# Fields
area_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('area', area_field)]
print("Loading data ...")
# TabularDataset
train, valid, test = TabularDataset.splits(path=args.data_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=False, sort_within_batch=False)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = Hybridmodel(input_dim=len(flow_field.vocab), emb_dim=args.emb_dim, graph_emb=args.graph_emb, model_name=args.model_name, num_layer=args.num_layer).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
load_checkpoint(args.dest_folder + '/model_sum_'+args.model_name+str(args.num_layer)+'_batch_32.pt', model, optimizer, device)
y_pred = []
y_true = []
relative_error = []
flow_l = []
design = []
for graph_batch in graph_loader:
graph_batch = graph_batch.to(device)
model.eval()
with torch.no_grad():
for ((flow, flow_len), labels), _ in tqdm(test_iter, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
y_pred.extend(output.tolist())
y_true.extend(labels.tolist())
rmae = np.abs(np.divide(np.subtract(output.tolist(), labels.tolist()), labels.tolist()))
relative_error.extend(rmae)
flow_l.extend(flow_len.tolist())
design.extend((flow[:,0]-7).tolist())
output = pd.DataFrame({'design_name':design, 'flow_length':flow_l, 'labels': y_true, 'prediction': y_pred, 'relative error': relative_error})
output.to_csv('inference_'+args.dest_folder.split('/')[1]+'_'+args.model_name+str(args.num_layer)+'.csv',index=False)
print(np.mean(relative_error))
if __name__ == "__main__":
main() | 6,775 | 37.942529 | 169 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/gnn.py | import torch
from torch_geometric.nn import MessagePassing,BatchNorm
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
import torch.nn.functional as F
from torch_geometric.nn.inits import uniform
from torch.nn import Sequential, ReLU, Linear, ModuleList
from conv import GNN_node, GNN_node_Virtualnode
from torch_scatter import scatter_mean
class GNN(torch.nn.Module):
def __init__(self, num_tasks, num_layer = 5, emb_dim = 100,
gnn_type = 'gin', virtual_node = True, residual = False, drop_ratio = 0.5, JK = "last", graph_pooling = "sum"):
'''
num_tasks (int): number of labels to be predicted
virtual_node (bool): whether to add virtual node or not
'''
super(GNN, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.graph_pooling = graph_pooling
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
### GNN to generate node embeddings
if virtual_node:
self.gnn_node = GNN_node_Virtualnode(num_layer, emb_dim, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)
else:
self.gnn_node = GNN_node(num_layer, emb_dim, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)
### Pooling function to generate whole-graph embeddings
if self.graph_pooling == "sum":
self.pool = global_add_pool
elif self.graph_pooling == "mean":
self.pool = global_mean_pool
elif self.graph_pooling == "max":
self.pool = global_max_pool
elif self.graph_pooling == "attention":
self.pool = GlobalAttention(gate_nn = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, 1)))
elif self.graph_pooling == "set2set":
self.pool = Set2Set(emb_dim, processing_steps = 2)
else:
raise ValueError("Invalid graph pooling type.")
self.graph_pred_linear=ModuleList()
self.graph_norm=ModuleList()
if graph_pooling == "set2set":
self.graph_pred_linear.append(Linear(2*emb_dim, emb_dim))
else:
self.graph_pred_linear.append(Linear(emb_dim, emb_dim))
def forward(self, batched_data):
h_node = self.gnn_node(batched_data)
h_graph = self.pool(h_node, batched_data.batch)
return self.graph_pred_linear[0](h_graph)
if __name__ == '__main__':
GNN(num_tasks = 10) | 2,744 | 36.60274 | 188 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/conv.py | import torch
from torch_geometric.nn import MessagePassing
import torch.nn.functional as F
from torch_geometric.nn import global_mean_pool, global_add_pool
from node_encoder import NodeEncoder,EdgeEncoder
from torch_geometric.utils import degree
import math
### GIN convolution along the graph structure
class GINConv(MessagePassing):
def __init__(self, emb_dim):
'''
emb_dim (int): node embedding dimensionality
'''
super(GINConv, self).__init__(aggr = "add")
#self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, emb_dim))
self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, emb_dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.edge_encoder = EdgeEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
edge_embedding = self.edge_encoder(edge_attr)
out = self.mlp((1 + self.eps) *x + self.propagate(edge_index, x=x, edge_attr=edge_embedding))
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GCN convolution along the graph structure
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super(GCNConv, self).__init__(aggr='add')
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.root_emb = torch.nn.Embedding(1, emb_dim)
self.edge_encoder = EdgeEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.edge_encoder(edge_attr)
row, col = edge_index
#edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)
deg = degree(row, x.size(0), dtype = x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr = edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GNN to generate node embedding
class GNN_node(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
num_layer (int): number of GNN message passing layers
'''
super(GNN_node, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.node_encoder = NodeEncoder(emb_dim)
###List of GNNs
self.convs = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### computing input node embedding
h_list = [self.node_encoder(x)]
for layer in range(self.num_layer):
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h += h_list[layer]
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer):
node_representation += h_list[layer]
return node_representation
### Virtual GNN to generate node embedding
class GNN_node_Virtualnode(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
'''
super(GNN_node_Virtualnode, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.node_encoder = NodeEncoder(emb_dim)
### set the initial virtual node embedding to 0.
self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)
torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)
### List of GNNs
self.convs = torch.nn.ModuleList()
### batch norms applied to node embeddings
self.batch_norms = torch.nn.ModuleList()
### List of MLPs to transform virtual node at every layer
self.mlp_virtualnode_list = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
for layer in range(num_layer - 1):
self.mlp_virtualnode_list.append(torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), \
torch.nn.Linear(2*emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU()))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### virtual node embeddings for graphs
virtualnode_embedding = self.virtualnode_embedding(torch.zeros(batch[-1].item() + 1).to(edge_index.dtype).to(edge_index.device))
h_list = [self.node_encoder(x)]
for layer in range(self.num_layer):
### add message from virtual nodes to graph nodes
h_list[layer] = h_list[layer] + virtualnode_embedding[batch]
### Message passing among graph nodes
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h = h + h_list[layer]
h_list.append(h)
### update the virtual nodes
if layer < self.num_layer - 1:
### add message from graph nodes to virtual nodes
virtualnode_embedding_temp = global_add_pool(h_list[layer], batch) + virtualnode_embedding
### transform virtual nodes using MLP
if self.residual:
virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
else:
virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer):
node_representation += h_list[layer]
return node_representation
if __name__ == "__main__":
pass
| 8,791 | 35.481328 | 182 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/main_gnn_customized_area.py | ### Libraries
import numpy as np
import argparse
from tqdm import tqdm
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
# torchtext 0.6.0
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# Evaluation
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# graph loading dependency
from torch_geometric.data import DataLoader
from dataset_pyg import PygGraphPropPredDataset
from gnn import GNN
### dir
data_folder = 'lstm/data_area'
destination_folder = 'model_ckt/area'
# Hybrid model
class Hybridmodel(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=64, graph_emb=11):
super(Hybridmodel, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=2,
batch_first=True, bidirectional=False)
self.gmodel = GNN(gnn_type = 'gin', num_tasks = 1, num_layer = 2, emb_dim = graph_emb, drop_ratio = 0.5, virtual_node = False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim + graph_emb, 100))
self.linear.append(Linear(100,100))
self.linear.append(Linear(100,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(100))
self.norm.append(BatchNorm1d(100))
def forward(self, text, text_len, graph_batch):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, _ = self.lstm(packed_input)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
g_emb = self.gmodel(graph_batch)
combined_emb = torch.cat((out, g_emb[text[:,0]-7]),1)
flow_fea=F.relu(self.linear[0](combined_emb))
flow_fea=self.norm[0](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
# Training Function
def training(model, device,
optimizer,
train_loader,
valid_loader,
graph_loader,
num_epochs,
eval_every,
args,
criterion = nn.MSELoss(),
file_path = destination_folder,
best_valid_loss = float("Inf"), best_train_loss = float("Inf")):
# initialize running values
running_loss = 0.0
valid_running_loss = 0.0
global_step = 0
train_loss_list = []
valid_loss_list = []
global_steps_list = []
# read graphs
for graph_batch in graph_loader:
graph_batch = graph_batch.to(device)
# training loop
model.train()
for epoch in range(num_epochs):
for ((flow, flow_len), labels), _ in tqdm(train_loader, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update running values
running_loss += loss.item()
global_step += 1
# evaluation step
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
# validation loop
for ((flow, flow_len), labels), _ in valid_loader:
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
loss = criterion(output, labels)
valid_running_loss += loss.item()
# evaluation
average_train_loss = running_loss / eval_every
average_valid_loss = valid_running_loss / len(valid_loader)
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
# resetting running values
running_loss = 0.0
valid_running_loss = 0.0
model.train()
# print progress
print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader),
average_train_loss, average_valid_loss))
# checkpoint
if best_valid_loss + best_train_loss > average_valid_loss + average_train_loss:
best_valid_loss = average_valid_loss
best_train_loss = average_train_loss
save_checkpoint(file_path + '/model_batch_'+str(args.batch_size)+'.pt', model, optimizer, best_valid_loss)
save_metrics(file_path + '/metrics_batch_'+str(args.batch_size)+'.pt', train_loss_list, valid_loss_list, global_steps_list)
save_metrics(file_path + '/metrics_batch_'+str(args.batch_size)+'.pt', train_loss_list, valid_loss_list, global_steps_list)
print('Finished Training!')
def main():
# arguments
parser = argparse.ArgumentParser(description='Customized model for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of input embedding of transformations')
parser.add_argument('--graph_emb', type=int, default=32, help='dimensionality of hidden units in GNNs')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Load graphs
pyg_dataset = PygGraphPropPredDataset(name = 'vgraph')
graph_loader = DataLoader(pyg_dataset, batch_size=32, shuffle=False)
# Fields
area_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('area', area_field)]
# TabularDataset
print('Data loading ...')
train, valid, test = TabularDataset.splits(path=data_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = Hybridmodel(input_dim=len(flow_field.vocab), emb_dim=args.emb_dim, graph_emb=args.graph_emb).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
training(model = model, device = device, optimizer = optimizer, \
train_loader = train_iter, valid_loader = valid_iter, graph_loader = graph_loader, eval_every = len(train_iter), \
num_epochs=args.epochs, args = args)
if __name__ == "__main__":
main() | 9,153 | 36.670782 | 148 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/dataset_pyg.py | from torch_geometric.data import InMemoryDataset
import pandas as pd
import shutil, os
import os.path as osp
import torch
import numpy as np
from read_graph_pyg import read_graph_pyg
class PygGraphPropPredDataset(InMemoryDataset):
def __init__(self, name, root = 'dataset', transform=None, pre_transform = None, meta_dict = None):
'''
- name (str): name of the dataset
- root (str): root directory to store the dataset folder
- transform, pre_transform (optional): transform/pre-transform graph objects
- meta_dict: dictionary that stores all the meta-information about data. Default is None,
but when something is passed, it uses its information. Useful for debugging for external contributers.
'''
self.name = name ## original name, e.g., ogbg-molhiv
if meta_dict is None:
self.dir_name = '_'.join(name.split('-'))
# check if previously-downloaded folder exists.
# If so, use that one.
if osp.exists(osp.join(root, self.dir_name + '_pyg')):
self.dir_name = self.dir_name + '_pyg'
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in master:
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
self.download_name = self.meta_info['download_name'] ## name of downloaded file, e.g., tox21
self.num_tasks = int(self.meta_info['num tasks'])
self.eval_metric = self.meta_info['eval metric']
self.task_type = self.meta_info['task type']
self.__num_classes__ = int(self.meta_info['num classes'])
self.binary = self.meta_info['binary'] == 'True'
super(PygGraphPropPredDataset, self).__init__(self.root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
def get_idx_split(self, split_type = None):
if split_type is None:
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
# short-cut if split_dict.pt exists
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train_idx = pd.read_csv(osp.join(path, 'train.csv.gz'), compression='gzip', header = None).values.T[0]
valid_idx = pd.read_csv(osp.join(path, 'valid.csv.gz'), compression='gzip', header = None).values.T[0]
test_idx = pd.read_csv(osp.join(path, 'test.csv.gz'), compression='gzip', header = None).values.T[0]
return {'train': torch.tensor(train_idx, dtype = torch.long), 'valid': torch.tensor(valid_idx, dtype = torch.long), 'test': torch.tensor(test_idx, dtype = torch.long)}
@property
def num_classes(self):
return self.__num_classes__
@property
def raw_file_names(self):
if self.binary:
return ['data.npz']
else:
file_names = ['edge']
if self.meta_info['has_node_attr'] == 'True':
file_names.append('node-feat')
if self.meta_info['has_edge_attr'] == 'True':
file_names.append('edge-feat')
return [file_name + '.csv.gz' for file_name in file_names]
@property
def processed_file_names(self):
return 'geometric_data_processed.pt'
def process(self):
### read pyg graph list
add_inverse_edge = self.meta_info['add_inverse_edge'] == 'True'
if self.meta_info['additional node files'] == 'None':
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if self.meta_info['additional edge files'] == 'None':
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
data_list = read_graph_pyg(self.raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files, binary=self.binary)
if self.task_type == 'subtoken prediction':
graph_label_notparsed = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
graph_label = [str(graph_label_notparsed[i][0]).split(' ') for i in range(len(graph_label_notparsed))]
for i, g in enumerate(data_list):
g.y = graph_label[i]
else:
if self.binary:
graph_label = np.load(osp.join(self.raw_dir, 'graph-label.npz'))['graph_label']
else:
graph_label = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
has_nan = np.isnan(graph_label).any()
for i, g in enumerate(data_list):
if 'classification' in self.task_type:
if has_nan:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
else:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.long)
else:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
# self.data = data
self.slices = slices
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
if __name__ == '__main__':
pyg_dataset = PygGraphPropPredDataset(name = 'vgraph')
print(pyg_dataset.num_classes)
print(pyg_dataset[0])
#print(pyg_dataset[1].x)
#print(pyg_dataset.slices)
from torch_geometric.loader import DataLoader
loader = DataLoader(pyg_dataset, batch_size=32, shuffle=False)
for batch in loader:
#print(batch.edge_index[:,2781])
print(batch)
#print(batch.y)
print(len(batch.y))
#break
| 6,691 | 38.364706 | 199 | py |
LOSTIN | LOSTIN-main/LSTM/LSTM_inference.py | ### Libraries
# torchtext 0.6.0
import numpy as np
import argparse
from tqdm import tqdm
# Libraries
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# LSTM model
class LSTM(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=128):
super(LSTM, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm_1 = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.lstm_2 = nn.LSTM(input_size=hidden_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim,30))
self.linear.append(Linear(30,30))
self.linear.append(Linear(30,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(30))
self.norm.append(BatchNorm1d(30))
def forward(self, text, text_len):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, (h1, c1) = self.lstm_1(packed_input)
packed_output, _ = self.lstm_2(packed_output, (h1, c1))
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
#print(output.shape)
#print(out)
#flow_fea = F.dropout(out,p=0.5,training=self.training)
flow_fea=F.relu(self.linear[0](out))
flow_fea=self.norm[0](flow_fea)
#flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save and Load Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_checkpoint(load_path, model, optimizer, device):
if load_path==None:
return
state_dict = torch.load(load_path, map_location=device)
print(f'Model loaded from <== {load_path}')
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
return state_dict['valid_loss']
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_metrics(load_path):
if load_path==None:
return
state_dict = torch.load(load_path, map_location=device)
print(f'Model loaded from <== {load_path}')
return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']
def main():
# arguments
parser = argparse.ArgumentParser(description='LSTM baseline for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--dest_folder', type=str, default='data_area', help='Destination folder that saves the model')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Fields
area_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('area', area_field)]
print("Loading data ...")
# TabularDataset
train, valid, test = TabularDataset.splits(path=args.dest_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
test_iter = BucketIterator(test, batch_size=args.batch_size, device=device, sort=False, sort_within_batch=False)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = LSTM(input_dim=len(flow_field.vocab),emb_dim=args.emb_dim).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
load_checkpoint(args.dest_folder + '/model.pt', model, optimizer, device)
y_pred = []
y_true = []
relative_error = []
flow_l = []
design = []
model.eval()
with torch.no_grad():
for ((flow, flow_len), labels), _ in tqdm(test_iter, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
y_pred.extend(output.tolist())
y_true.extend(labels.tolist())
rmae = np.abs(np.divide(np.subtract(output.tolist(), labels.tolist()), labels.tolist()))
relative_error.extend(rmae)
flow_l.extend(flow_len.tolist())
design.extend((flow[:,0]-7).tolist())
output = pd.DataFrame({'design_name':design, 'flow_length':flow_l, 'labels': y_true, 'prediction': y_pred, 'relative error': relative_error})
output.to_csv('lstm_'+args.dest_folder+'.csv',index=False)
print(np.mean(relative_error))
if __name__ == "__main__":
main() | 6,857 | 33.29 | 149 | py |
LOSTIN | LOSTIN-main/LSTM/LSTM_area.py | ### Libraries
# torchtext 0.6.0
import numpy as np
import argparse
from tqdm import tqdm
# Libraries
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# Evaluation
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
destination_folder = 'data_area'
# LSTM model
class LSTM(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=128):
super(LSTM, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm_1 = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.lstm_2 = nn.LSTM(input_size=hidden_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim,30))
self.linear.append(Linear(30,30))
self.linear.append(Linear(30,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(30))
self.norm.append(BatchNorm1d(30))
def forward(self, text, text_len):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, (h1, c1) = self.lstm_1(packed_input)
packed_output, _ = self.lstm_2(packed_output, (h1, c1))
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
#print(output.shape)
#print(out)
#flow_fea = F.dropout(out,p=0.5,training=self.training)
flow_fea=F.relu(self.linear[0](out))
flow_fea=self.norm[0](flow_fea)
#flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save and Load Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_checkpoint(load_path, model, optimizer):
if load_path==None:
return
state_dict = torch.load(load_path)
print(f'Model loaded from <== {load_path}')
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
return state_dict['valid_loss']
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_metrics(load_path):
if load_path==None:
return
state_dict = torch.load(load_path)
print(f'Model loaded from <== {load_path}')
return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']
# Training Function
def training(model, device,
optimizer,
train_loader,
valid_loader,
num_epochs,
eval_every,
criterion = nn.MSELoss(),
file_path = destination_folder,
best_valid_loss = float("Inf"), best_train_loss = float("Inf")):
# initialize running values
running_loss = 0.0
valid_running_loss = 0.0
global_step = 0
train_loss_list = []
valid_loss_list = []
global_steps_list = []
# training loop
model.train()
for epoch in range(num_epochs):
for ((flow, flow_len), labels), _ in tqdm(train_loader, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update running values
running_loss += loss.item()
global_step += 1
# evaluation step
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
# validation loop
for ((flow, flow_len), labels), _ in valid_loader:
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
loss = criterion(output, labels)
valid_running_loss += loss.item()
# evaluation
average_train_loss = running_loss / eval_every
average_valid_loss = valid_running_loss / len(valid_loader)
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
# resetting running values
running_loss = 0.0
valid_running_loss = 0.0
model.train()
# print progress
print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader),
average_train_loss, average_valid_loss))
# checkpoint
if best_valid_loss + best_train_loss > average_valid_loss + average_train_loss:
best_valid_loss = average_valid_loss
best_train_loss = average_train_loss
save_checkpoint(file_path + '/model.pt', model, optimizer, best_valid_loss)
save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)
save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)
print('Finished Training!')
def main():
# arguments
parser = argparse.ArgumentParser(description='LSTM baseline for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of hidden units in GNNs (default: 300)')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Fields
area_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('area', area_field)]
# TabularDataset
train, valid, test = TabularDataset.splits(path=destination_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = LSTM(input_dim=len(flow_field.vocab),emb_dim=args.emb_dim).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
training(model = model, device = device, optimizer = optimizer, \
train_loader = train_iter, valid_loader = valid_iter, eval_every = len(train_iter), \
num_epochs=args.epochs)
if __name__ == "__main__":
main() | 9,043 | 34.328125 | 148 | py |
LOSTIN | LOSTIN-main/LSTM/LSTM_delay.py | ### Libraries
# torchtext 0.6.0
import numpy as np
import argparse
from tqdm import tqdm
# Libraries
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# Evaluation
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
destination_folder = 'data_delay'
# LSTM model
class LSTM(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=128):
super(LSTM, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm_1 = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.lstm_2 = nn.LSTM(input_size=hidden_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim,30))
self.linear.append(Linear(30,30))
self.linear.append(Linear(30,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(30))
self.norm.append(BatchNorm1d(30))
def forward(self, text, text_len):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, (h1, c1) = self.lstm_1(packed_input)
packed_output, _ = self.lstm_2(packed_output, (h1, c1))
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
#print(output.shape)
#print(out)
#flow_fea = F.dropout(out,p=0.5,training=self.training)
flow_fea=F.relu(self.linear[0](out))
flow_fea=self.norm[0](flow_fea)
#flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save and Load Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_checkpoint(load_path, model, optimizer):
if load_path==None:
return
state_dict = torch.load(load_path)
print(f'Model loaded from <== {load_path}')
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
return state_dict['valid_loss']
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_metrics(load_path):
if load_path==None:
return
state_dict = torch.load(load_path)
print(f'Model loaded from <== {load_path}')
return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']
# Training Function
def training(model, device,
optimizer,
train_loader,
valid_loader,
num_epochs,
eval_every,
criterion = nn.MSELoss(),
file_path = destination_folder,
best_valid_loss = float("Inf"),best_train_loss = float("Inf")):
# initialize running values
running_loss = 0.0
valid_running_loss = 0.0
global_step = 0
train_loss_list = []
valid_loss_list = []
global_steps_list = []
# training loop
model.train()
for epoch in range(num_epochs):
for ((flow, flow_len), labels), _ in tqdm(train_loader, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update running values
running_loss += loss.item()
global_step += 1
# evaluation step
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
# validation loop
for ((flow, flow_len), labels), _ in valid_loader:
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
loss = criterion(output, labels)
valid_running_loss += loss.item()
# evaluation
average_train_loss = running_loss / eval_every
average_valid_loss = valid_running_loss / len(valid_loader)
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
# resetting running values
running_loss = 0.0
valid_running_loss = 0.0
model.train()
# print progress
print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader),
average_train_loss, average_valid_loss))
# checkpoint
if best_valid_loss + best_train_loss > average_valid_loss + average_train_loss:
best_valid_loss = average_valid_loss
best_train_loss = average_train_loss
save_checkpoint(file_path + '/model.pt', model, optimizer, best_valid_loss)
save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)
save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)
print('Finished Training!')
def main():
# arguments
parser = argparse.ArgumentParser(description='LSTM baseline for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of hidden units in GNNs (default: 300)')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Fields
delay_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('delay', delay_field)]
# TabularDataset
train, valid, test = TabularDataset.splits(path=destination_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = LSTM(input_dim=len(flow_field.vocab),emb_dim=args.emb_dim).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
training(model = model, device = device, optimizer = optimizer, \
train_loader = train_iter, valid_loader = valid_iter, eval_every = len(train_iter), \
num_epochs=args.epochs)
if __name__ == "__main__":
main() | 9,049 | 34.490196 | 148 | py |
LOSTIN | LOSTIN-main/CNN/cnn_data_gen.py | import utils
import pandas as pd
import numpy as np
import argparse
import pprint as pp
from os import listdir
from os.path import isfile, join
import torch
from torch.utils.data import TensorDataset, DataLoader
def main(args):
ff_10 = pd.read_csv('flow_10.csv',header=None)
ff_15 = pd.read_csv('flow_15.csv',header=None)
ff_20 = pd.read_csv('flow_20.csv',header=None)
ff_25 = pd.read_csv('flow_25.csv',header=None)
keyword = args['key']
label_dir = 'dataset-ground-truth'
label_list = [f for f in listdir(label_dir) if isfile(join(label_dir, f))]
verilog_list = ['div', 'max', 'multiplier', 'sin', 'square', 'voter', 'adder', 'arbiter', 'bar', 'log2', 'sqrt']
stat_list = []
dataset_x = []
dataset_y = []
# Collect all of the data from the abc first
for verilog in verilog_list:
v_file = f'epfl/{verilog}.v'
stat = utils.run_abc(v_file, '')
delay, area, edge, nd, lev, i, o = utils.get_cnn_metrics(stat)
stat_list.append((delay, area, edge, nd, lev, i, o))
print("Acquired all of the data from abc!")
# Main loop
for i, verilog in enumerate(verilog_list):
print("Begin processing the data for the verilog file: ", verilog)
delay, area, edge, nd, lev, i, o = stat_list[i]
label_file_10, label_file_15, label_file_20, label_file_25 = '', '', '', ''
for f in label_list:
if (keyword in f) and (verilog in f):
if '10' in f:
label_file_10 = f
elif '15' in f:
label_file_15 = f
elif '20' in f:
label_file_20 = f
elif '25' in f:
label_file_25 = f
print("Label 10 file: ", label_file_10)
print("Label 15 file: ", label_file_15)
print("Label 20 file: ", label_file_20)
print("Label 25 file: ", label_file_25)
label_10 = pd.read_csv(f'{label_dir}/{label_file_10}', header=None)
label_15 = pd.read_csv(f'{label_dir}/{label_file_15}', header=None)
label_20 = pd.read_csv(f'{label_dir}/{label_file_20}', header=None)
label_25 = pd.read_csv(f'{label_dir}/{label_file_25}', header=None)
# Processing Length 10 Flow
for i in range(50000):
commands = ff_10[0][i].split(';')
data = np.zeros([1, 26, 7])
data[0][0] = [i, o, nd, lev, edge, area, delay]
for j in range(10):
if commands[j] == 'b':
data[0][j+1][0] = 1.0
elif commands[j] == 'rf':
data[0][j+1][1] = 1.0
elif commands[j] == 'rfz':
data[0][j+1][2] = 1.0
elif commands[j] == 'rw':
data[0][j+1][3] = 1.0
elif commands[j] == 'rwz':
data[0][j+1][4] = 1.0
elif commands[j] == 'resub':
data[0][j+1][5] = 1.0
elif commands[j] == 'resub -z':
data[0][j+1][6] = 1.0
else:
raise NotImplementedError
dataset_x.append(data)
dataset_y.append([label_10[0][i]])
print("Completed processing for flow-length 10")
# Processing Length 15 Flow
for i in range(50000):
commands = ff_15[0][i].split(';')
data = np.zeros([1, 26, 7])
data[0][0] = [i, o, nd, lev, edge, area, delay]
for j in range(15):
if commands[j] == 'b':
data[0][j+1][0] = 1.0
elif commands[j] == 'rf':
data[0][j+1][1] = 1.0
elif commands[j] == 'rfz':
data[0][j+1][2] = 1.0
elif commands[j] == 'rw':
data[0][j+1][3] = 1.0
elif commands[j] == 'rwz':
data[0][j+1][4] = 1.0
elif commands[j] == 'resub':
data[0][j+1][5] = 1.0
elif commands[j] == 'resub -z':
data[0][j+1][6] = 1.0
else:
raise NotImplementedError
dataset_x.append(data)
dataset_y.append([label_15[0][i]])
print("Completed processing for flow-length 15")
# Processing Length 20 Flow
for i in range(100000):
commands = ff_20[0][i].split(';')
data = np.zeros([1, 26, 7])
data[0][0] = [i, o, nd, lev, edge, area, delay]
for j in range(20):
if commands[j] == 'b':
data[0][j+1][0] = 1.0
elif commands[j] == 'rf':
data[0][j+1][1] = 1.0
elif commands[j] == 'rfz':
data[0][j+1][2] = 1.0
elif commands[j] == 'rw':
data[0][j+1][3] = 1.0
elif commands[j] == 'rwz':
data[0][j+1][4] = 1.0
elif commands[j] == 'resub':
data[0][j+1][5] = 1.0
elif commands[j] == 'resub -z':
data[0][j+1][6] = 1.0
else:
raise NotImplementedError
dataset_x.append(data)
dataset_y.append([label_20[0][i]])
print("Completed processing for flow-length 20")
# Processing Length 25 Flow
for i in range(100000):
commands = ff_25[0][i].split(';')
data = np.zeros([1, 26, 7])
data[0][0] = [i, o, nd, lev, edge, area, delay]
for j in range(25):
if commands[j] == 'b':
data[0][j+1][0] = 1.0
elif commands[j] == 'rf':
data[0][j+1][1] = 1.0
elif commands[j] == 'rfz':
data[0][j+1][2] = 1.0
elif commands[j] == 'rw':
data[0][j+1][3] = 1.0
elif commands[j] == 'rwz':
data[0][j+1][4] = 1.0
elif commands[j] == 'resub':
data[0][j+1][5] = 1.0
elif commands[j] == 'resub -z':
data[0][j+1][6] = 1.0
else:
raise NotImplementedError
dataset_x.append(data)
dataset_y.append([label_25[0][i]])
print("Completed processing for flow-length 25")
tensor_x = torch.Tensor(dataset_x)
tensor_y = torch.Tensor(dataset_y)
my_dataset = TensorDataset(tensor_x, tensor_y)
dir_upper, dir_lower = args['dataset'], args['key']
torch.save(my_dataset, f'{dir_upper}/{dir_lower}.pt')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parser')
parser.add_argument('--key', help='Select area / delay', default='area')
parser.add_argument('--dataset', help='the save directory of dataset', default='cnn_dataset')
args = vars(parser.parse_args())
main(args)
| 7,046 | 33.208738 | 116 | py |
LOSTIN | LOSTIN-main/CNN/train_cnn.py | import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data as Data
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
import argparse
# Keras -> PyTorch Implementation
# Modification: Classification -> Regression
class CNN_Regression(nn.Module):
def __init__(self):
super(CNN_Regression, self).__init__()
self.conv_1 = nn.Conv2d(1, 32, (1, 2))
self.conv_2 = nn.Conv2d(32, 64, (1, 2))
self.pool = nn.MaxPool2d(1, 1)
self.dropout_1 = nn.Dropout(0.3)
self.fc_1 = nn.Linear(64*26*5, 64)
self.dropout_2 = nn.Dropout(0.4)
self.fc_2 = nn.Linear(64, 1)
def forward(self, x):
elu = nn.ELU()
selu = nn.SELU()
x1 = self.conv_1(x)
x2 = self.conv_2(elu(x1))
x3 = self.pool(elu(x2))
x4 = self.dropout_1(x3)
x5 = self.fc_1(torch.flatten(x4, start_dim=1))
x6 = self.dropout_2(selu(x5))
out = self.fc_2(x6)
return out
def RMAE(output, target):
diff = ((target - output) / target).abs().sum()
rmae = diff / len(output)
return rmae
def main(args):
model = CNN_Regression()
optimizer = torch.optim.RMSprop(model.parameters(), lr=0.05)
criterion = torch.nn.MSELoss()
epoches = 20
batch_size = 32
data_key = args['data']
data_set = torch.load(f'cnn_dataset/{data_key}.pt')
device = torch.device("cuda:" + str(args['device'])) if torch.cuda.is_available() else torch.device("cpu")
data_0, dataset_test_0 = TensorDataset(*data_set[0:1800000]), TensorDataset(*data_set[1800000:3300000])
dataset_ratio = [660000, 165000, 975000]
dataset_train, dataset_valid, dataset_test_1 = torch.utils.data.random_split(data_0, dataset_ratio)
data_set_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
data_set_valid = DataLoader(dataset_valid, batch_size=batch_size, shuffle=True)
data_set_test = DataLoader(dataset_test_0 + dataset_test_1, batch_size=batch_size, shuffle=True)
checkpoint_path = args['ckpt']
test_accuracy_curve = []
if torch.cuda.is_available():
print("CUDA is available! Running on GPU")
model = model.to(device)
else:
print("Running on CPU")
# Training routine
for epoch_idx in range(epoches):
print("Training Epoch #: ", epoch_idx+1)
print()
for idx, (data_batch, label_batch) in enumerate(data_set_train):
if torch.cuda.is_available():
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
out = model.forward(data_batch)
loss = criterion(out, label_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx % 200 == 0:
batch_acc = RMAE(out, label_batch)
print("RMAE: ", batch_acc)
print("Training done! Now evaluating..")
print()
# Training accuracy
count, train_sum = 0, 0
for data_batch, label_batch in data_set_train:
if torch.cuda.is_available():
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
with torch.no_grad():
out = model.forward(data_batch)
batch_acc = RMAE(out, label_batch)
count += 1
train_sum += batch_acc
train_sum = train_sum / count
print("Training Result: ", train_sum, "%")
# Validation accuracy
count, valid_sum = 0, 0
for data_batch, label_batch in data_set_valid:
if torch.cuda.is_available():
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
with torch.no_grad():
out = model.forward(data_batch)
batch_acc = RMAE(out, label_batch)
count += 1
valid_sum += batch_acc
valid_sum = valid_sum / count
print("Validation Result: ", valid_sum, "%")
# Testing accuracy
count, test_sum = 0, 0
for data_batch, label_batch in data_set_test:
if torch.cuda.is_available():
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
with torch.no_grad():
out = model.forward(data_batch)
batch_acc = RMAE(out, label_batch)
count += 1
test_sum += batch_acc
test_sum = test_sum / count
test_accuracy_curve.append(test_sum)
print("Testing Result: ", test_sum, "%")
# Save the result
if test_sum <= min(test_accuracy_curve):
torch.save({'model_state_dict': model.state_dict(),
'train_accuracy': train_sum,
'valid_accuracy': valid_sum,
'test_accuracy': test_sum,
}, f'{checkpoint_path}/{data_key}_result.pth')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='this is just parser - nothing special')
parser.add_argument('--device', default='0')
parser.add_argument('--data', help='dataset file', default='area')
parser.add_argument('--ckpt', help='output checkpoint path', default='cnn_checkpoints')
args = vars(parser.parse_args())
main(args)
| 5,484 | 31.264706 | 110 | py |
scalene | scalene-master/test/testpyt.py | # -*- coding: utf-8 -*-
import random
import torch
class DynamicNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
In the constructor we construct three nn.Linear instances that we will use
in the forward pass.
"""
super(DynamicNet, self).__init__()
self.input_linear = torch.nn.Linear(D_in, H)
self.middle_linear = torch.nn.Linear(H, H)
self.output_linear = torch.nn.Linear(H, D_out)
def forward(self, x):
"""
For the forward pass of the model, we randomly choose either 0, 1, 2, or 3
and reuse the middle_linear Module that many times to compute hidden layer
representations.
Since each forward pass builds a dynamic computation graph, we can use normal
Python control-flow operators like loops or conditional statements when
defining the forward pass of the model.
Here we also see that it is perfectly safe to reuse the same Module many
times when defining a computational graph. This is a big improvement from Lua
Torch, where each Module could be used only once.
"""
h_relu = self.input_linear(x).clamp(min=0)
for _ in range(random.randint(0, 3)):
h_relu = self.middle_linear(h_relu).clamp(min=0)
y_pred = self.output_linear(h_relu)
return y_pred
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# Construct our model by instantiating the class defined above
model = DynamicNet(D_in, H, D_out)
# Construct our loss function and an Optimizer. Training this strange model with
# vanilla stochastic gradient descent is tough, so we use momentum
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
for t in range(500):
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(x)
# Compute and print loss
loss = criterion(y_pred, y)
if t % 100 == 99:
print(t, loss.item())
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
| 2,339 | 34.454545 | 85 | py |
scalene | scalene-master/test/torchtest.py | import torch
import math
def torchtest():
dtype = torch.float
#device = torch.device("cpu")
device = torch.device("cuda:0") # Uncomment this to run on GPU
# device = torch.device("cuda") # Uncomment this to run on GPU
# Create Tensors to hold input and outputs.
# By default, requires_grad=False, which indicates that we do not need to
# compute gradients with respect to these Tensors during the backward pass.
# x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
q = torch.linspace(-math.pi, math.pi, 5000000, device=device, dtype=dtype)
x = torch.linspace(-math.pi, math.pi, 5000000, device=device, dtype=dtype)
y = torch.sin(x)
# Create random Tensors for weights. For a third order polynomial, we need
# 4 weights: y = a + b x + c x^2 + d x^3
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Tensors during the backward pass.
a = torch.randn((), device=device, dtype=dtype, requires_grad=True)
b = torch.randn((), device=device, dtype=dtype, requires_grad=True)
c = torch.randn((), device=device, dtype=dtype, requires_grad=True)
d = torch.randn((), device=device, dtype=dtype, requires_grad=True)
learning_rate = 1e-6
for t in range(2000):
# Forward pass: compute predicted y using operations on Tensors.
y_pred = a + b * x + c * x ** 2 + d * x ** 3
# Compute and print loss using operations on Tensors.
# Now loss is a Tensor of shape (1,)
# loss.item() gets the scalar value held in the loss.
# loss = (y_pred - y).pow(2).sum()
loss = (y_pred - y).sum()
if t % 100 == 99:
print(t, loss.item())
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Tensors with requires_grad=True.
# After this call a.grad, b.grad. c.grad and d.grad will be Tensors holding
# the gradient of the loss with respect to a, b, c, d respectively.
loss.backward()
# Manually update weights using gradient descent. Wrap in torch.no_grad()
# because weights have requires_grad=True, but we don't need to track this
# in autograd.
with torch.no_grad():
a -= learning_rate * a.grad
b -= learning_rate * b.grad
c -= learning_rate * c.grad
d -= learning_rate * d.grad
# Manually zero the gradients after updating weights
a.grad = None
b.grad = None
c.grad = None
d.grad = None
print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')
torchtest()
| 2,736 | 41.765625 | 85 | py |
scalene | scalene-master/test/testtf.py | import tensorflow as tf
from time import perf_counter
def config():
num_threads = 16
tf.config.threading.set_inter_op_parallelism_threads(
num_threads
)
tf.config.threading.set_intra_op_parallelism_threads(
num_threads
)
def run_benchmark():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
predictions = model(x_train[:1]).numpy()
print("predictions", predictions)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
t0 = perf_counter()
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
dt = perf_counter() - t0
print(f"Total time: {dt}")
run_benchmark()
| 1,116 | 26.925 | 77 | py |
XFL | XFL-master/python/common/evaluation/metrics.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
from typing import List, Optional
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import confusion_matrix, roc_curve
from torch.nn import BCELoss
from common.utils.algo_utils import (BiClsAccuracy, BiClsAuc, BiClsF1, BiClsKS,
BiClsPrecision, BiClsRecall)
from algorithm.core.metrics import get_metric
from common.utils.logger import logger
from multiprocessing import Pool, cpu_count
def cumulative_gain_curve(y_true, y_score, pos_label=None):
"""Adapted from skplot package. Add some simplifications and modifications
`skplot` github: https://github.com/reiinakano/scikit-plot
This function generates the points necessary to plot the Cumulative Gain
Note: This implementation is restricted to the binary classification task.
Args:
y_true (array-like, shape (n_samples)): True labels of the data.
y_score (array-like, shape (n_samples)): Target scores, can either be
probability estimates of the positive class, confidence values, or
non-thresholded measure of decisions (as returned by
decision_function on some classifiers).
pos_label (int or str, default=None): Label considered as positive and
others are considered negative
Returns:
percentages (numpy.ndarray): An array containing the X-axis values for
plotting the Cumulative Gains chart.
gains (numpy.ndarray): An array containing the Y-axis values for one
curve of the Cumulative Gains chart.
Raises:
ValueError: If `y_true` is not composed of 2 classes. The Cumulative
Gain Chart is only relevant in binary classification.
"""
y_true, y_score = np.asarray(y_true), np.asarray(y_score)
classes = np.unique(y_true)
pos_label = 1
# make y_true a boolean vector
y_true = (y_true == pos_label)
sorted_indices = np.argsort(y_score)[::-1]
y_true = y_true[sorted_indices]
gains = np.cumsum(y_true)
percentages = np.arange(start=1, stop=len(y_true) + 1)
gains = gains / float(np.sum(y_true))
percentages = percentages / float(len(y_true))
gains = np.insert(gains, 0, [0])
percentages = np.insert(percentages, 0, [0])
return percentages, gains
class CommonMetrics:
@staticmethod
def to_str(metrics_dict: dict, presicion: int = 6):
return {k: format(v, f".{presicion}f") for k, v in metrics_dict.items()}
@staticmethod
def _calc_metrics(
metrics: dict,
labels: list,
val_predicts: list,
lossfunc_name: str = None,
loss: float = None,
dataset_type: str = "val",
) -> dict:
metrics_output = {}
metrics_str = {}
if lossfunc_name is not None:
metrics_output[lossfunc_name] = loss
for method in metrics.keys():
metrics_output[method] = metrics[method](labels, val_predicts)
metrics_str = CommonMetrics.to_str(metrics_output)
logger.info(f"Metrics on {dataset_type} dataset: {metrics_str}")
return metrics_output
@staticmethod
def save_metric_csv(
metrics_output: dict,
output_config: dict,
global_epoch: int,
local_epoch: int = None,
dataset_type: str = "val",
) -> None:
metrics_str = CommonMetrics.to_str(metrics_output)
metric_dir = output_config.get("path", "")
if not os.path.exists(metric_dir):
os.makedirs(metric_dir)
file_name = output_config.get("metric_" + dataset_type)["name"]
output_file = os.path.join(metric_dir, file_name)
if local_epoch:
epoch = f"{local_epoch}/{global_epoch}"
else:
epoch = f"{global_epoch}"
if os.path.exists(output_file):
with open(output_file, 'a') as f:
features = []
for k, v in metrics_str.items():
features.append(v)
f.write("%s,%s\n" % (epoch, ','.join(features)))
else:
with open(output_file, 'w') as f:
if local_epoch:
f.write("%s,%s\n" % ("local_epoch/global_epoch", ','.join(
[_ for _ in metrics_str.keys()])))
else:
f.write("%s,%s\n" % ("global_epoch", ','.join(
[_ for _ in metrics_str.keys()])))
features = []
for k, v in metrics_str.items():
features.append(v)
f.write("%s,%s\n" % (epoch, ','.join(features)))
class BiClsMetric:
def __init__(self, epoch, output_file=None, metric_config={}, lossfunc_config={}):
self.metric_functions_map = {
"BCEWithLogitsLoss": BCELoss,
"acc": BiClsAccuracy,
"precision": BiClsPrecision,
"recall": BiClsRecall,
"f1_score": BiClsF1,
"auc": BiClsAuc,
"ks": BiClsKS
}
self.metric_functions = {}
self.metrics = {}
self.epoch = epoch
self.output_file = output_file
if len(lossfunc_config):
loss_function = list(lossfunc_config.keys())[0]
else:
loss_function = None
if loss_function:
if loss_function not in self.metric_functions_map:
raise NotImplementedError(
"Loss function {} is not supported in this model.".format(loss_function))
func = self.metric_functions_map[loss_function]
method_args = inspect.getfullargspec(func).args
defined_args = {}
for (key, value) in lossfunc_config.items():
if key in method_args:
defined_args[key] = value
self.metric_functions[loss_function] = func(**defined_args)
for metric_function in metric_config:
if metric_function == "auc_ks":
logger.warning('metric "auc_ks" in config will be deprecated in future version, '
'please use "auc" and "ks" separately.')
defined_args = {}
for _ in ["auc", "ks"]:
func = self.metric_functions_map[_]
self.metric_functions[_] = func(**defined_args)
continue
if metric_function == "decision_table":
continue
elif metric_function not in self.metric_functions_map:
raise NotImplementedError(
"Metric function {} is not supported in this model.".format(metric_function))
func = self.metric_functions_map[metric_function]
defined_args = {}
self.metric_functions[metric_function] = func(**defined_args)
def calc_metrics(self, y_true: np.array, y_pred: np.array):
fpr, tpr, _ = roc_curve(y_true, y_pred)
cm = confusion_matrix(y_true, y_pred > 0.5)
for metric_function in self.metric_functions:
if metric_function in ("acc", "precision", "recall", "f1_score"):
self.metrics[metric_function] = self.metric_functions[metric_function](
cm).item()
elif metric_function in ("auc", "ks"):
self.metrics[metric_function] = self.metric_functions[metric_function](
tpr, fpr).item()
elif metric_function == "BCEWithLogitsLoss":
self.metrics[metric_function] = self.metric_functions[metric_function](torch.tensor(y_pred),
torch.tensor(y_true)).item()
def __repr__(self):
output = ["epoch: %d" % self.epoch]
for k, v in self.metrics.items():
output.append("%s: %.6g" % (k, v))
return ', '.join(output)
def save(self):
if os.path.exists(self.output_file):
with open(self.output_file, 'a') as f:
features = []
for k in self.metric_functions_map:
if k in self.metrics:
features.append("%.6g" % self.metrics[k])
else:
features.append("")
f.write("%d,%s\n" % (self.epoch, ','.join(features)))
else:
with open(self.output_file, 'w') as f:
f.write("%s,%s\n" % ("epoch", ','.join(
[_ for _ in self.metric_functions_map])))
features = []
for k in self.metric_functions_map:
if k in self.metrics:
features.append("%.6g" % self.metrics[k])
else:
features.append("")
f.write("%d,%s\n" % (self.epoch, ','.join(features)))
class RegressionMetric:
def __init__(self, epoch, output_file=None, metric_config={}):
self.metric_functions = {}
self.metrics = {}
self.epoch = epoch
self.output_file = output_file
for metric_function in metric_config:
self.metric_functions[metric_function] = get_metric(
metric_function)
def calc_metrics(self, y_true: np.array, y_pred: np.array):
for metric_function in self.metric_functions:
self.metrics[metric_function] = self.metric_functions[metric_function](
y_true, y_pred)
def __repr__(self):
output = ["epoch: %d" % self.epoch]
for k, v in self.metrics.items():
output.append("%s: %.6g" % (k, v))
return ', '.join(output)
def save(self, met):
features = ["%.6g" % met[k] for k in met]
if os.path.exists(self.output_file):
with open(self.output_file, 'a') as f:
f.write("%d,%s\n" % (self.epoch, ','.join(features)))
else:
with open(self.output_file, 'w') as f:
f.write("%s,%s\n" % ("epoch", ','.join([_ for _ in met])))
f.write("%d,%s\n" % (self.epoch, ','.join(features)))
class ThresholdCutter:
def __init__(self, output_file=None):
self.bst_threshold = 0.5
self.bst_score = 0
# self.default_threshold = [0.1, 0.2, 0.3, 0.35, 0.4, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5,
# 0.51, 0.52, 0.53, 0.54, 0.55, 0.6, 0.65, 0.7, 0.8, 0.9]
self.default_percentile = np.arange(100, -1, -1)
self.output_file = output_file
self.metrics = {
"threshold": [],
"tn": [],
"fp": [],
"fn": [],
"tp": [],
"tpr": [],
"fpr": [],
"ks": []
}
def sim_cut_by_value(self, y_true, y_pred):
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
fpr, tpr, thresholds = fpr[:-1], tpr[:-1], thresholds[:-1]
ks_curve = tpr - fpr
ks_curve = np.where(ks_curve > 0, ks_curve, 0)
# shrink output size
probs = np.unique(y_pred)
if len(probs) < len(self.default_percentile):
self.metrics = {
'tpr': tpr,
'fpr': fpr,
'ks': ks_curve,
'threshold': thresholds
}
return
cuts = np.arange(0.01, 1, 0.01)
size = thresholds.size
index_list = [int(size * cut) for cut in cuts]
if index_list[-1] >= size:
index_list = index_list[:-1]
thresholds = [thresholds[idx] for idx in index_list]
ks_curve = [ks_curve[idx] for idx in index_list]
tpr = [tpr[idx] for idx in index_list]
fpr = [fpr[idx] for idx in index_list]
self.metrics = {
'tpr': tpr,
'fpr': fpr,
'ks': ks_curve,
'threshold': thresholds
}
return
def cut_by_value(self, y_true: np.array, y_pred: np.array, values: List = None):
probs = np.unique(y_pred)
logger.info("num of probs: %d." % len(probs))
if values is None:
if len(probs) < len(self.default_percentile):
# logger.warning("ks points %d less than the default num: %d." % (len(probs),
# len(self.default_percentile)))
values = np.array(sorted(probs, reverse=True))
self.default_percentile = np.array(
[sum(y_pred < _) / (len(y_pred) - 1) * 100 for _ in values])
else:
values = np.percentile(y_pred, self.default_percentile)
# - Threshold, TP, FN, FP, TN, TPR, FPR, KS
for threshold in values:
tn, fp, fn, tp = confusion_matrix(
y_true, y_pred >= threshold, labels=[1, 0]).ravel()
if tp + fn > 0:
tpr = tp / (tp + fn)
else:
tpr = np.nan
if tn + fp > 0:
fpr = fp / (tn + fp)
else:
fpr = np.nan
ks = max(np.max(tpr - fpr), 0)
for metric in self.metrics:
self.metrics[metric].append(locals()[metric])
if ks > self.bst_score:
self.bst_score = ks
self.bst_threshold = threshold
def cut_by_index(self, y_true: np.array, y_pred: np.array):
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
ks_curve = tpr - fpr
ks_curve = np.where(ks_curve > 0, ks_curve, 0)
idx = np.argmax(ks_curve)
ks = ks_curve[idx]
if ks > self.bst_score:
self.bst_score = ks
self.bst_threshold = thresholds[idx]
def save(self):
df = pd.DataFrame(self.metrics)
df["top_percentile"] = 100 - self.default_percentile
df.to_csv(self.output_file, header=True,
index=False, float_format='%.6g')
class DecisionTable:
def __init__(self, conf):
self.method = conf.get("method", "equal_frequency")
self.bins = conf.get("bins", 5)
self.type = conf.get("type")
self._check_params()
self.stats = pd.DataFrame()
def _check_params(self):
if self.method not in ("equal_frequency", "equal_width"):
raise NotImplementedError(
"decision table: method '{}' is not implemented.".format(self.method))
if self.bins <= 1:
raise ValueError(
"decision table: bins ({}) must be greater than 1.".format(self.bins))
def fit(self, y_true: np.array, y_pred: np.array):
df = pd.DataFrame({"label": y_true, "pred": y_pred})
n = len(set(y_pred))
if n <= self.bins:
logger.info("metric::decision table::number of unique values in the prediction (%d) is less than "
"the bins (%d), set the bins=%d." % (n, self.bins, n))
self.bins = n
if self.method == "equal_frequency":
groups = pd.qcut(y_pred, self.bins, duplicates='drop', precision=3)
elif self.method == "equal_width":
groups = pd.cut(y_pred, self.bins, right=True,
duplicates='drop', precision=3)
if self.type == "score_card":
groups = [pd.Interval(int(_.left), int(
_.right), _.closed) for _ in groups]
df["区间"] = groups
self.stats["样本数"] = df.groupby("区间").size()
self.stats["负样本数"] = df.groupby(
"区间")["label"].agg(lambda x: sum(x == 0))
self.stats["正样本数"] = df.groupby(
"区间")["label"].agg(lambda x: sum(x == 1))
self.stats["区间内负样本占比"] = self.stats["负样本数"] / self.stats["样本数"]
self.stats["区间内正样本占比"] = self.stats["正样本数"] / self.stats["样本数"]
self.stats["样本占比"] = self.stats["样本数"] / self.stats["样本数"].sum()
self.stats["负样本占比"] = self.stats["负样本数"] / self.stats["负样本数"].sum()
self.stats["正样本占比"] = self.stats["正样本数"] / self.stats["正样本数"].sum()
self.stats["累计总样本数"] = self.stats["样本数"].cumsum()
self.stats["累计负样本数"] = self.stats["负样本数"].cumsum()
self.stats["累计正样本数"] = self.stats["正样本数"].cumsum()
self.stats["累计负样本/负样本总数"] = self.stats["累计负样本数"] / \
self.stats["负样本数"].sum()
self.stats["累计正样本/正样本总数"] = self.stats["累计正样本数"] / \
self.stats["正样本数"].sum()
self.stats["累计负样本/累计总样本"] = self.stats["累计负样本数"] / self.stats["累计总样本数"]
self.stats["累计正样本/累计总样本"] = self.stats["累计正样本数"] / self.stats["累计总样本数"]
self.stats["累计样本数/总样本数"] = self.stats["累计总样本数"] / \
self.stats["样本数"].sum()
self.stats["累计正样本占比/累计总样本占比"] = self.stats["正样本占比"].cumsum() / \
self.stats["样本占比"].cumsum()
for _ in ["区间内负样本占比", "区间内正样本占比", "样本占比", "负样本占比", "正样本占比",
"累计负样本/负样本总数", "累计正样本/正样本总数", "累计负样本/累计总样本", "累计正样本/累计总样本",
"累计样本数/总样本数", "累计正样本占比/累计总样本占比"]:
self.stats[_] = np.where(
self.stats[_].isnull(),
np.nan,
self.stats[_].apply(lambda x: "%.2f%%" % (x * 100))
)
self.stats = self.stats.reset_index()
self.stats["区间"] = self.stats["区间"].apply(str)
def save(self, file_name):
self.stats.to_csv(file_name, header=True,
index=False, float_format='%.2g')
class LiftGainCalculator():
def __init__(self, output_file=None, step=0.001):
self.output_file = output_file
self.step = step
self.num_proc = 4
@staticmethod
def _pred_thres(pred: np.array, thres: float):
return (pred >= thres).astype(np.int32)
def cal_lift_gain(self, label: np.array, pred: np.array):
step = self.step
# thresholds = np.sort(np.unique(pred))
cuts = np.arange(step, 1, step)
# new_thresholds = [thresholds[idx] for idx in index_list]
percentages, gains = cumulative_gain_curve(label, pred)
logger.info(
f"Length of percentages before pruning: {percentages.size}")
lifts = gains / percentages
size = percentages.size
index_list = [int(size * cut) for cut in cuts]
if index_list[-1] >= size:
index_list = index_list[:-1]
percentages = [percentages[idx] for idx in index_list]
gains = [gains[idx] for idx in index_list]
lifts = [lifts[idx] for idx in index_list]
logger.info(f"Length of percentages after pruning: {len(percentages)}")
self.metrics = pd.DataFrame(
{
'percentage_data': percentages,
'cum_gain': gains,
'lift': lifts
}
)
def save(self, file_name):
self.metrics.to_csv(
file_name, header=True,
index=False, float_format="%.2g"
)
class ClusteringMetric:
def __init__(self):
pass
@staticmethod
def calc_dbi(dist_table, cluster_dist):
if len(dist_table) == 1:
return np.nan
max_dij_list = []
d = 0
n = 0
for i in range(0, len(dist_table)):
dij_list = []
for j in range(0, len(dist_table)):
if j != i:
dij_list.append(
(dist_table[i] + dist_table[j]) / (cluster_dist[d] ** 0.5))
d += 1
dij_list = [_ for _ in dij_list if ~torch.isnan(_)]
if len(dij_list) <= 0:
max_dij_list.append(np.nan)
else:
max_dij = max(dij_list)
max_dij_list.append(max_dij)
n += 1
if n > 0:
return np.nansum(max_dij_list) / n
else:
return np.nan
| 20,407 | 37.946565 | 115 | py |
XFL | XFL-master/python/common/crypto/one_time_pad/one_time_add.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from copy import deepcopy
from typing import List, Union
import numpy as np
import torch
from .component import OneTimePadCiphertext, OneTimePadContext, OneTimeKey
class OneTimeAdd(object):
"""Provide encrypt and decrypt method for one-time-add algorithm
"""
@staticmethod
def context(modulus_exp: int = 128,
data_type: str = "torch.Tensor"):
return OneTimePadContext(modulus_exp, data_type)
@staticmethod
def ciphertext(data: Union[list, np.ndarray, bytes],
context_: OneTimePadContext):
return OneTimePadCiphertext(data, context_)
@staticmethod
def _xcrypt(context_: OneTimePadContext,
data: Union[np.ndarray, torch.Tensor],
one_time_key: List[np.ndarray],
is_addition: Union[List[bool], bool] = True,
is_decrypt: bool = False) -> Union[List[OneTimePadCiphertext], OneTimePadCiphertext]:
""" Function for implementing encryption and decryption.
is_addition: same length as one_time_key, means to add or to subtract the key. default to True;
if is_decrypt is False, return a numpy array of integers;
if is_decrypt is True, return a numpy array of float numbers.
"""
if isinstance(is_addition, (bool, int)):
is_addition = [is_addition] * len(one_time_key)
elif len(is_addition) != len(one_time_key):
raise ValueError(
f"Length of is_additon ({len(is_addition)}) and one_time_key ({len(one_time_key)}) not match.")
if data.shape == ():
zero_shape = True
data = np.array([data])
else:
zero_shape = False
dtype = np.uint64 if context_.modulus_exp == 64 else object
if not is_decrypt:
if dtype == np.uint64:
out = np.mod(np.trunc(data * context_.scalar).astype("int"),
context_.modulus).astype(dtype)
else:
out = np.mod(np.trunc(data * context_.scalar),
context_.modulus).astype(dtype)
else:
out = deepcopy(data)
for i in range(len(one_time_key)):
if is_addition[i] - is_decrypt:
if context_.modulus_exp == 64:
out = out + one_time_key[i]
else:
out = np.mod(
out + one_time_key[i], context_.modulus).astype(object)
else:
if context_.modulus_exp == 64:
out = out - one_time_key[i]
else:
out = np.mod(
out - one_time_key[i], context_.modulus).astype(object)
if is_decrypt:
out = out.astype(object)
idx = np.where(out > context_.modulus // 2)
out[idx] -= context_.modulus
out /= context_.scalar
if zero_shape:
out = np.array(out[0])
return out
@classmethod
def encrypt(cls,
context_: OneTimePadContext,
data: Union[np.ndarray, torch.Tensor],
one_time_key: OneTimeKey,
is_addition: Union[List[bool], bool] = True,
serialized: bool = False) -> Union[OneTimePadCiphertext, bytes]:
"""Encrypt the data to a ciphertext
Args:
context_ (OneTimePadContext): see OneTimePadContext.
data (Union[np.ndarray, torch.Tensor]): plaintext to encrypt.
one_time_key (OneTimeKey): a key for addition/subtraction, or a list of keys,
the ciphertext is plaintext +/- key[0] +/- key[1] +/- key[2] +/- ...
is_addition (Union[List[bool], bool], optional): same length as one_time_key, means to add or to subtract the key.
Defaults to True.
serialized (bool, optional): it is convenient to set it to true if the ciphertext needs to
be sent by the network right after the encryption. Defaults to False.
Raises:
ValueError: if shape of data is different from shape of one_time_key or one_time_key[0].
Warnings:
if context_.data_type is different from the type of data, which means the type of plaintext
after decryption will be different from the type of plaintext before encryption.
Returns:
Union[OneTimePadCiphertext, bytes]: if serialized is False, return OneTimePadCiphertext,
else return pickled ciphertext(numpy.ndarray of integers).
"""
if isinstance(one_time_key.value, np.ndarray):
one_time_key = [one_time_key.value]
else:
one_time_key = one_time_key.value
if data.shape != one_time_key[0].shape:
raise ValueError(
f"Input data's shape {data.shape} and one_time_key's shape {one_time_key[0].shape} not match.")
if not isinstance(data, context_.data_type) and not isinstance(data, np.float64):
warnings.warn(
f"Input data type {type(data)} and context_.data_type {context_.data_type} are different.")
if isinstance(data, torch.Tensor):
data = data.numpy()
out = cls._xcrypt(context_, data, one_time_key, is_addition, False)
if not serialized:
out = OneTimePadCiphertext(out, context_)
else:
out = OneTimePadContext.serialize(out)
return out
@classmethod
def decrypt(cls,
context_: OneTimePadContext,
ciphertext: Union[OneTimePadCiphertext, bytes],
one_time_key: OneTimeKey,
is_addition: Union[List[bool], bool] = True) -> Union[np.ndarray, torch.Tensor]:
"""Decrypt the ciphertext to a plaintext
Args:
context_ (OneTimePadContext): see OneTimePadContext.
ciphertext (Union[OneTimePadCiphertext, bytes]): result of cls.encrypt(...) method.
one_time_key (OneTimeKey): the same as it is in cls.encrypt(...).
is_addition (Union[List[bool], bool]): the same as it is in cls.encrypt(...).
Raises:
ValueError: if the shape of ciphertext.data is different from the shape of one_time_key.
Returns:
Union[np.ndarray, torch.Tensor]: numpy.ndarray or torch.Tensor of float32, depend on context_.data_type
"""
if isinstance(one_time_key.value, np.ndarray):
one_time_key = [one_time_key.value]
else:
one_time_key = one_time_key.value
if isinstance(ciphertext, bytes):
ciphertext = OneTimePadContext.deserialize(ciphertext)
if ciphertext.data.shape != one_time_key[0].shape:
raise ValueError(
f"Input ciphertext's shape {ciphertext.data.shape} and one_time_key's shape {one_time_key[0].shape} not match.")
out = cls._xcrypt(context_, ciphertext.data,
one_time_key, is_addition, True)
if context_.data_type == np.ndarray:
out = out.astype(np.float32)
else:
out = torch.from_numpy(out.astype(np.float32))
return out
| 7,887 | 40.298429 | 128 | py |
XFL | XFL-master/python/common/crypto/one_time_pad/component.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from copy import deepcopy
from typing import Union
import numpy as np
class OneTimePadContext(object):
def __init__(self,
modulus_exp: int = 64,
data_type: str = "torch.Tensor"):
"""Context includes modulus, plaintext data type, exponent for encoding and etc..
Args:
modulus_exp (int, optional): exponent(base 2) of modulus. Ciphertext will consists of integers
module 2^modulus_exp. Defaults to 64.
data_type (str, optional): plaintext type, supports "torch.Tensor" and "numpy.ndarray",
or "torch" and "numpy" for short. Defaults to "torch.Tensor".
Raises:
ValueError: if modulus_exp not in [64, 128]
ValueError: if data_type not in ["numpy.ndarray", "numpy", "torch.Tensor", "torch"]
"""
if modulus_exp not in [64, 128]:
raise ValueError(f"Supported modulus_exps are 64 and 128, got {modulus_exp}.")
if data_type not in ["numpy.ndarray", "numpy", "torch.Tensor", "torch"]:
raise ValueError(f"Supported data_types are 'numpy.ndarray', 'numpy', 'torch.Tensor', 'torch', got {data_type}.")
if modulus_exp == 128:
self.__exp = 64
else:
self.__exp = 32
self.__modulus_exp = modulus_exp
self.__scalar = 1 << self.__exp
self.__modulus = 1 << modulus_exp
if "numpy" in data_type:
self.__data_type = np.ndarray
elif "torch" in data_type:
import torch
self.__data_type = torch.Tensor
self.__security_strength = modulus_exp
self.__encode_method = "fixed-point arithmetic"
@property
def exp(self):
return self.__exp
@property
def modulus_exp(self):
return self.__modulus_exp
@property
def scalar(self):
return self.__scalar
@property
def modulus(self):
return self.__modulus
@property
def data_type(self):
return self.__data_type
@property
def security_strength(self):
return self.__security_strength
@property
def encode_method(self):
return self.__encode_method
def __str__(self) -> str:
out = "scalar: 1 << {}, modulus: 1 << {}, data_type: {}, security_strength: {}, encode_method: {}"
out = out.format(self.exp, self.modulus_exp, self.data_type, self.security_strength, self.encode_method)
return out
def __eq__(self, other: object) -> bool:
return self.__modulus_exp == other.modulus_exp
@staticmethod
def serialize(data) -> bytes:
return pickle.dumps(data)
@staticmethod
def deserialize(data) -> any:
return pickle.loads(data)
class OneTimeKey(object):
def __init__(self, key: Union[list[np.ndarray], np.ndarray], modulus_exp: int = 64):
dtype = np.uint64 if modulus_exp == 64 else object
modulus = (1 << modulus_exp)
if isinstance(key, list):
self.value = [np.array(np.mod(v, modulus)).astype(dtype) for v in key]
else:
self.value = np.array(np.mod(key, modulus)).astype(dtype)
def __len__(self):
return len(self.value)
class OneTimePadCiphertext(object):
def __init__(self,
data: Union[list, np.ndarray, bytes],
context_: OneTimePadContext):
"""[summary]
Args:
data (Union[list, np.ndarray, bytes]): list or np.ndarray consists of integers, or picked object of them.
context_ (OneTimePadContext): see OneTimePadContext.
Raises:
TypeError: if the type of data is not bytes, list or np.ndarray.
TypeError: if the type of context_ is not OneTimePadContext or bytes.
"""
if isinstance(context_, OneTimePadContext):
self.__context = context_
elif isinstance(context_, bytes):
self.__context = pickle.loads(context_)
else:
raise TypeError(f"Got context type {type(context_)}, supported types are 'OneTimePadContext', 'bytes'")
dtype = np.uint64 if self.__context.modulus_exp == 64 else object
if isinstance(data, bytes):
self.__data = np.array(OneTimePadContext.deserialize(data), dtype=dtype)
elif isinstance(data, list):
self.__data = np.array(data, dtype=dtype)
elif isinstance(data, (np.ndarray, np.float64, np.uint64)): # , float, int)):
self.__data = data.astype(dtype)
else:
raise TypeError(f"Got data type {type(data)}, supported types are 'list', 'np.ndarray', 'bytes'")
def __str__(self):
out = ', '.join([f"data: {self.__data}", "context: " + str(self.__context)])
return out
def __add__(self, other: object):
if self.__context != other.__context:
raise ValueError(f"Adding ciphertext with different context, {self.__context} vs {other.__context}")
if self.__context.modulus_exp == 64:
out = self.__data + other.__data
else:
out = np.array(np.mod(self.__data + other.__data, self.__context.modulus), dtype=object)
out = OneTimePadCiphertext(out, self.__context)
return out
def __sub__(self, other: object):
if self.__context != other.__context:
raise ValueError(f"Subtracting ciphertext with different context, {self.__context} vs {other.__context}")
if self.__context.modulus_exp == 64:
out = self.__data - other.__data
else:
out = np.array(np.mod(self.__data - other.__data, self.__context.modulus), dtype=object)
out = OneTimePadCiphertext(out, self.__context)
return out
@property
def data(self):
return self.__data
@property
def context_(self):
return self.__context
def serialize(self) -> bytes:
"""Pickle __data for transmission
"""
return OneTimePadContext.serialize(self.__data)
def decode(self):
"""Decode to plaintext when all the keys in the ciphertext are cancelled
"""
if self.__data.shape == ():
zero_shape = True
data = np.array([self.__data], dtype=object)
else:
zero_shape = False
data = self.__data.astype(object)
idx = np.where(data > self.__context.modulus // 2)
out = deepcopy(data)
if len(idx[0]) != 0:
out[idx] -= self.__context.modulus
out /= self.__context.scalar
if self.__context.data_type == np.ndarray:
out = out.astype(np.float32)
else:
import torch
out = torch.from_numpy(out.astype(np.float32))
if zero_shape:
return out[0]
else:
return out
| 7,684 | 33.931818 | 125 | py |
XFL | XFL-master/python/common/dataset/azpro_data.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_url, pd_train_test_split
class Azpro(torch.utils.data.Dataset):
url = "https://r-data.pmagunia.com/system/files/datasets/dataset-15359.csv"
md5 = None
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "azpro")
datadir = "azpro_data"
def __init__(
self,
redownload: bool = False,
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download(redownload)
# raw_df = pd.read_csv("/opt/dataset/data.csv", index_col=0)
raw_df = pd.read_csv(self.datapath, index_col=None)
raw_df = raw_df.drop(columns=["hospital"])
self.feature = raw_df.iloc[:, 1:]
self.feature_cols = self.feature.columns
self.label = pd.DataFrame(raw_df.iloc[:, 0])
self.label.columns = ["y"]
self.id = np.arange(len(self.label))
self.data = self.label.join(self.feature)
self.data = self.data.reset_index()
self.data.columns = ["id"] + list(self.data.columns[1:])
if reallocate_dict["norm"]:
feature = self.data.iloc[:, 1:]
scaler = MinMaxScaler()
data_norm = pd.DataFrame(scaler.fit_transform(feature), columns=feature.columns)
self.data = self.data.iloc[:, :1].join(data_norm)
def __getitem__(self, index: int) -> Any:
return self.feature[index], self.label[index]
def _download(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.datadir))
download_url(
self.url, self.datapath, self.md5)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
self.datadir = f"{self.datadir}_{mode}"
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
test_ratio = reallocate_dict['test_ratio']
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.datadir, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
cols = self.feature_cols
split_cols = np.array_split(cols, splits)
for i, span in enumerate(split_cols):
if "labeled" in parties[i]:
train_data, test_data = pd_train_test_split(
self.data[["id", "y"] + list(span)], test_ratio=test_ratio, random_state=random_state)
else:
train_data, test_data = pd_train_test_split(
self.data[["id"] + list(span)], test_ratio=test_ratio, random_state=random_state)
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
elif mode == "horizontal":
train_data, test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_test.csv')
test_data.to_csv(test_csv_path, index=False)
rand_idx = np.random.permutation(range(len(train_data)))
indices = np.array_split(rand_idx, splits)
for i, party in enumerate(parties):
csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{party}.csv')
data = train_data.loc[indices[i]]
data.to_csv(csv_path, index=False)
os.remove(self.datapath)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="vertical",
help="vertical or horizontal task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--test_ratio", type=float,
default=0.3, help="ratio of test data")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
parser.add_argument("--norm", type=bool,
default=False, help="normalization")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"test_ratio": args.test_ratio,
"random_seed": args.random_seed,
"parties": args.party,
"norm": args.norm
}
boston = Azpro()
boston.reallocate(reallocate_dict)
| 5,809 | 38.52381 | 110 | py |
XFL | XFL-master/python/common/dataset/hiv.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_url, pd_train_test_split
import zipfile
class HIV(torch.utils.data.Dataset):
url = "https://data.dgl.ai/dataset/hiv.zip"
md5 = None
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "hiv")
datadir = "hiv"
def __init__(
self,
redownload: bool = False,
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download(redownload)
raw_df = pd.read_csv(self.dirpath + "/HIV.csv")
self.feature = raw_df.iloc[:, 0].values.reshape(-1, 1)
self.label = raw_df.iloc[:, 2].values
self.feature_cols = [f'x{i}' for i in range(self.feature.shape[1])]
self.id = np.arange(len(self.label))
self.reconstruct_df = np.hstack(
[self.id.reshape(-1, 1), self.label.reshape(-1, 1), self.feature])
self.data = pd.DataFrame(data=self.reconstruct_df,
columns=["id", "y"] + list(self.feature_cols)
)
def __getitem__(self, index: int) -> Any:
return self.feature[index], self.label[index]
def __len__(self) -> int:
return len(self.data.values)
def _download(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.datadir))
download_url(
self.url, self.datapath, self.md5)
self._extract_archive(self.datapath, self.dirpath)
def _extract_archive(self, datapath, dir_path):
with zipfile.ZipFile(datapath, "r") as archive:
archive.extractall(path=dir_path)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
self.datadir = f"{self.datadir}_{mode}"
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
test_ratio = reallocate_dict['test_ratio']
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.datadir, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
cols = self.feature_cols
split_cols = np.array_split(cols, splits)
for i, span in enumerate(split_cols):
if "labeled" in parties[i]:
train_data, test_data = pd_train_test_split(
self.data[["id", "y"] + list(span)], test_ratio=test_ratio, random_state=random_state)
else:
train_data, test_data = pd_train_test_split(
self.data[["id"] + list(span)], test_ratio=test_ratio, random_state=random_state)
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
elif mode == "horizontal":
train_data, test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_test.csv')
test_data.to_csv(test_csv_path, index=False)
rand_idx = np.random.permutation(range(len(train_data)))
indices = np.array_split(rand_idx, splits)
for i, party in enumerate(parties):
csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{party}.csv')
data = train_data.loc[indices[i]]
data.to_csv(csv_path, index=False)
os.remove(self.datapath)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="vertical",
help="vertical or horizontal task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--test_ratio", type=float,
default=0.3, help="ratio of test data")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"test_ratio": args.test_ratio,
"random_seed": args.random_seed,
"parties": args.party,
}
hiv = HIV()
hiv.reallocate(reallocate_dict)
| 5,661 | 37.256757 | 110 | py |
XFL | XFL-master/python/common/dataset/sst2.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_and_extract_data, pd_train_test_split
class SST2(torch.utils.data.Dataset):
url = "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip"
md5 = "9f81648d4199384278b86e315dac217c"
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "SST-2.zip")
data_folder = "SST-2"
raw_data_folder = os.path.join(dirpath, data_folder, "original")
def __init__(
self,
redownload: bool = False,
mode: str = "train"
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download_and_extract(redownload)
self.mode = mode
self.train = pd.read_csv(os.path.join(self.dirpath, self.data_folder, "train.tsv"),sep='\t')
self.val = pd.read_csv(os.path.join(self.dirpath, self.data_folder, "dev.tsv"), sep='\t')
self.test = pd.read_csv(os.path.join(self.dirpath, self.data_folder, "test.tsv"), sep='\t')
self.data = getattr(self, mode)
def __getitem__(self, index: int) -> Any:
return self.data["sentence"].values[index], self.data["label"].values[index]
def __len__(self) -> int:
return len(self.data["sentence"].values)
def get_data(self):
return self.data["sentence"].values, self.data["label"].values
def _download_and_extract(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.data_folder))
download_and_extract_data(
self.url, self.md5, self.datapath, data_folder=self.data_folder)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.data_folder, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
raise NotImplementedError("SST-2 task currently doesn't support vertical federated learning")
elif mode == "horizontal":
val_path = os.path.join(
final_dir_path, f'{self.data_folder}_val.tsv')
self.val.to_csv(val_path, index=False, sep="\t")
rand_idx = np.random.permutation(range(len(self.train)))
indices = np.array_split(rand_idx, splits)
for i, party in enumerate(parties):
tsv_path = os.path.join(
final_dir_path, f'{self.data_folder}_{party}.tsv')
data = self.train.loc[indices[i]]
data.to_csv(tsv_path, index=False, sep='\t')
shutil.rmtree(self.raw_data_folder)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="horizontal",
help="vertical or horizontal task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"random_seed": args.random_seed,
"parties": args.party
}
sst2= SST2()
sst2.reallocate(reallocate_dict)
| 4,360 | 36.594828 | 105 | py |
XFL | XFL-master/python/common/dataset/breast_cancer_wisconsin.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_url, pd_train_test_split
class WDBC(torch.utils.data.Dataset):
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
md5 = None
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "wdbc.data")
datadir = "breast_cancer_wisconsin"
def __init__(
self,
redownload: bool = False,
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download(redownload)
self.data = pd.read_csv(self.datapath, names=[
"id", "y"] + [f"x{i:0>2d}" for i in range(30)])
self.data["y"] = self.data["y"].map({"M": 1, "B": 0})
self.data[[f"x{i:0>2d}" for i in range(30)]] = self.data[[
f"x{i:0>2d}" for i in range(30)]].apply(lambda x: (x-x.mean())/x.std())
def __getitem__(self, index: int) -> Any:
return self.data[[f"x{i:0>2d}" for i in range(30)]].values[index], self.data["y"].values[index]
def __len__(self) -> int:
return len(self.data.values)
def _download(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.datadir))
download_url(
self.url, self.datapath, self.md5)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
self.datadir = f"{self.datadir}_{mode}"
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
test_ratio = reallocate_dict['test_ratio']
drop_ratio = reallocate_dict['drop_ratio']
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.datadir, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
cols = [f"x{i:0>2d}" for i in range(30)]
split_cols = np.array_split(cols, splits)
for i, span in enumerate(split_cols):
if "labeled" in parties[i]:
train_data, test_data = pd_train_test_split(
self.data[["id", "y"]+list(span)], test_ratio=test_ratio, random_state=random_state)
else:
train_data, test_data = pd_train_test_split(
self.data[["id"]+list(span)], test_ratio=test_ratio, random_state=random_state)
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
elif mode == "horizontal":
train_data, test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_test.csv')
test_data.to_csv(test_csv_path, index=False)
# rand_idx = np.random.permutation(range(len(train_data)))
# indices = np.array_split(rand_idx, splits)
indices = np.array_split(range(len(train_data)), splits)
for i, party in enumerate(parties):
csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{party}.csv')
data = train_data.loc[indices[i]]
data.to_csv(csv_path, index=False)
elif mode == "transfer":
all_train_data, all_test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
cols = [f"x{i:0>2d}" for i in range(30)]
split_cols = np.array_split(cols, splits)
index_dict = {}
for i, span in enumerate(split_cols):
if parties[i] == "labeled":
train_data, _ = pd_train_test_split(
all_train_data[["id", "y"]+list(span)], test_ratio=drop_ratio,
shuffle=True, random_state=random_state+100
)
test_data = all_test_data[["id", "y"]+list(span)]
else:
train_data, _ = pd_train_test_split(
all_train_data[["id"]+list(span)], test_ratio=drop_ratio,
shuffle=True, random_state=random_state+200
)
test_data = all_test_data[["id"]+list(span)]
index_dict[i] = train_data["id"].to_list()
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
overlap_index = index_dict[0]
for i in range(1, len(index_dict)):
overlap_index = self.intersection_list(overlap_index, index_dict[i])
np.save(os.path.join(final_dir_path, "overlap_index.npy"), np.array(overlap_index))
os.remove(self.datapath)
@staticmethod
def intersection_list(list1, list2):
list3 = [value for value in list1 if value in list2]
return list3
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="vertical",
help="vertical, horizontal or transfer task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--test_ratio", type=float,
default=0.3, help="ratio of test data")
parser.add_argument("--drop_ratio", type=float,
default=0.3, help="ratio of drop data")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"test_ratio": args.test_ratio,
"drop_ratio": args.drop_ratio,
"random_seed": args.random_seed,
"parties": args.party
}
wdbc = WDBC()
wdbc.reallocate(reallocate_dict)
| 7,461 | 40.921348 | 108 | py |
XFL | XFL-master/python/common/dataset/cifar.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pickle
import shutil
from typing import Any, Callable, Optional, Tuple
import numpy as np
import torch
from common.utils.data_utils import check_integrity, download_and_extract_data
from PIL import Image
import torchvision.transforms as transforms
class CIFAR10(torch.utils.data.Dataset):
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
md5 = "c58f30108f718f92721af3b95e74349a"
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "cifar-10-python.tar.gz")
data_folder = "cifar-10-batches-py"
data_folder_renamed = "cifar10"
train_dict = {
"data_batch_1": "c99cafc152244af753f735de768cd75f",
"data_batch_2": "d4bba439e000b95fd0a9bffe97cbabec",
"data_batch_3": "54ebc095f3ab1f0389bbae665268c751",
"data_batch_4": "634d18415352ddfa80567beed471001a",
"data_batch_5": "482c414d41f54cd18b22e5b47cb7c3cb",
}
test_dict = {
"test_batch": "40351d587109b95175f43aff81a1287e",
}
metadata = {
"filename": "batches.meta",
"key": "label_names",
"md5": "5ff9c542aee3614f3951f8cda6e48888",
}
def __init__(
self,
train: bool = True,
redownload: bool = False,
transform: Optional[Callable] = None
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self.train = train
self._download_and_extract(redownload)
self._load_metadata()
self.data_dict = self.train_dict if self.train else self.test_dict
self.transform = transform
self.data = []
self.labels = []
for file_name, md5 in self.data_dict.items():
file_path = os.path.join(self.dirpath, self.data_folder, file_name)
if not check_integrity(file_path, md5):
self.intergrity = False
raise RuntimeError(
f'{file_path} has been corruptted or lost. Please redownload the data by setting redownload=True')
with open(file_path, "rb") as f:
entry = pickle.load(f, encoding="latin1")
self.data.append(entry["data"])
if "labels" in entry:
self.labels.extend(entry["labels"])
else:
self.labels.extend(entry["fine_labels"])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32).transpose((0, 2, 3, 1)) # HWC format
self.labels = np.array(self.labels)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
data, label = self.data[index], self.labels[index]
data = Image.fromarray(data)
if self.transform is not None:
data = self.transform(data)
return data, label
def __len__(self) -> int:
return len(self.data)
def _download_and_extract(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.data_folder))
download_and_extract_data(
self.url, self.md5, self.datapath, data_folder=self.data_folder)
def _load_metadata(self) -> None:
metapath = os.path.join(
self.dirpath, self.data_folder, self.metadata["filename"])
if not check_integrity(metapath, self.metadata["md5"]):
raise RuntimeError(
"Dataset metadata has been found or corrupted. Please redownload the data by setting redownload=True")
with open(metapath, "rb") as f:
data = pickle.load(f, encoding="latin1")
self.classes = data[self.metadata["key"]]
self.class_to_idx = {c: i for i, c in enumerate(self.classes)}
def reallocate(self, reallocate_dict):
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
if not self.train:
splits = len(reallocate_dict["party"])
if reallocate_dict["sampling"] == "random":
np.random.seed(reallocate_dict["seed"])
if isinstance(reallocate_dict["splits"], int):
rand_idx = np.random.permutation(range(len(self.data)))
# split into equal arrays
indices = np.array_split(rand_idx, reallocate_dict["splits"])
elif isinstance(reallocate_dict["splits"], list):
assert sum(
reallocate_dict["splits"]) == 1, "the sum of fractions must be 1"
rand_idx = np.random.permutation(range(len(self.data)))
sections = np.floor(
reallocate_dict["splits"] * len(self.data))
sections = np.cumsum(sections)[:-1]
# split into arrays according to ratios
indices = np.split(rand_idx, sections)
final_dir_path = os.path.join(
self.dirpath, self.data_folder_renamed, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
for i, party in enumerate(reallocate_dict["party"]):
npy_path = os.path.join(
final_dir_path, f"{self.data_folder_renamed}_{party}.npz")
data = self.data[indices[i]]
labels = self.labels[indices[i]]
np.savez(npy_path, data=data, labels=labels)
elif reallocate_dict["sampling"] == "biased":
np.random.seed(reallocate_dict["seed"])
indices_group = [[] for _ in range(reallocate_dict["splits"])]
for group_label, fractions in reallocate_dict["group_fractions"].items():
group_index = np.where(self.label == group_label)
group_index = np.random.permutation(group_index)
sections = np.floor(fractions * len(group_index))
sections = np.cumsum(sections)[:-1]
indices = np.split(rand_idx, sections)
for i, indice in enumerate(indices):
indices_group[i].extend(indice)
final_dir_path = os.path.join(
self.dirpath, self.data_folder_renamed, reallocate_folder)
for i, party in enumerate(reallocate_dict["party"]):
npy_path = os.path.join(final_dir_path, f"{party}.npz")
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
data = self.data[indices_group[i]]
labels = self.labels[indices_group[i]]
np.savez(npy_path, data=data, labels=labels)
elif reallocate_dict["sampling"] == "dirichlet":
np.random.seed(reallocate_dict["seed"])
party_size = 0
min_size = len(self.data) / reallocate_dict["splits"] * 0.95
while party_size < min_size:
indices_group = [[] for _ in range(reallocate_dict["splits"])]
for group_label in range(len(set(self.labels))):
group_index = np.where(self.labels == group_label)[0]
group_index = np.random.permutation(group_index)
fractions = np.random.dirichlet(np.repeat(reallocate_dict["beta"], reallocate_dict["splits"]))
sections = (np.cumsum(fractions) * len(group_index)).astype(int)[:-1]
indices = np.split(group_index, sections)
for i, indice in enumerate(indices):
indices_group[i].extend(indice)
party_size = min([len(ind) for ind in indices_group])
final_dir_path = os.path.join(
self.dirpath, self.data_folder_renamed, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
for i, party in enumerate(reallocate_dict["party"]):
npy_path = os.path.join(
final_dir_path, f"{self.data_folder_renamed}_{party}.npz")
data = self.data[indices_group[i]]
labels = self.labels[indices_group[i]]
np.savez(npy_path, data=data, labels=labels)
class CIFAR100(CIFAR10):
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
md5 = "eb9058c3a382ffc7106e4002c42a8d85"
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "cifar-100-python.tar.gz")
data_folder = "cifar-100-python"
data_folder_renamed = "cifar-100"
train_dict = {
"train": "16019d7e3df5f24257cddd939b257f8d",
}
test_dict = {
"test", "f0ef6b0ae62326f3e7ffdfab6717acfc",
}
metadata = {
"filename": "meta",
"key": "fine_label_names",
"md5": "7973b15100ade9c7d40fb424638fde48",
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--sampling", type=str, default="random",
help="mode to split the dataset, random, biased or dirichlet")
parser.add_argument("--beta", type=float, default=1, help="dirichlet parameter, smaller means more non-iid")
parser.add_argument("--party", nargs="+", help="involved parties")
parser.add_argument("--keep_raw_data",
action='store_true', help="keep raw data file")
args = parser.parse_args()
if args.sampling == "random":
train_reallocate_dict = {
"sampling": "random",
"splits": args.splits,
"seed": 0,
"party": args.party
}
test_reallocate_dict = {
"sampling": "random",
"splits": args.splits,
"seed": 0,
"party": ["test"]
}
elif args.sampling == "biased":
train_reallocate_dict = {
"sampling": "biased",
"splits": args.splits,
"seed": 0,
"group_fractions": {1: [0.8, 0.2], 2: [0.8, 0.2], 3: [0.8, 0.2], 4: [0.8, 0.2], 5: [0.8, 0.2], 6: [0.2, 0.8], 7: [0.2, 0.8], 8: [0.2, 0.8], 9: [0.2, 0.8], 0: [0.2, 0.8]},
"party": args.party
}
test_reallocate_dict = {
"sampling": "random",
"splits": args.splits,
"seed": 0,
"party": ["test"]
}
elif args.sampling == "dirichlet":
train_reallocate_dict = {
"sampling": "dirichlet",
"splits": args.splits,
"seed": 0,
"beta": args.beta,
"party": args.party
}
test_reallocate_dict = {
"sampling": "random",
"splits": args.splits,
"seed": 0,
"party": ["test"]
}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
cifar10_train = CIFAR10(train=True)
cifar10_train.reallocate(train_reallocate_dict)
cifar10_test = CIFAR10(train=False)
cifar10_test.reallocate(test_reallocate_dict)
if not args.keep_raw_data:
shutil.rmtree(os.path.join(
cifar10_train.dirpath, cifar10_train.data_folder)) | 12,098 | 40.434932 | 182 | py |
XFL | XFL-master/python/common/dataset/boston_housing_price.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_url, pd_train_test_split
class Boston(torch.utils.data.Dataset):
url = "http://lib.stat.cmu.edu/datasets/boston"
md5 = None
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "boston")
datadir = "boston_housing_price"
def __init__(
self,
redownload: bool = False,
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download(redownload)
raw_df = pd.read_csv(self.datapath, sep="\s+", skiprows=22, header=None)
self.feature = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
self.label = raw_df.values[1::2, 2]
self.feature_cols = [f'x{i}' for i in range(self.feature.shape[1])]
self.id = np.arange(len(self.label))
self.reconstruct_df = np.hstack([self.id.reshape(-1, 1), self.label.reshape(-1, 1), self.feature])
self.data = pd.DataFrame(data=self.reconstruct_df,
columns=["id", "y"] + list(self.feature_cols)
)
if reallocate_dict["norm"]:
feature = self.data.iloc[:, 2:]
scaler = StandardScaler()
data_norm = pd.DataFrame(scaler.fit_transform(feature), columns=feature.columns)
self.data = self.data.iloc[:, :2].join(data_norm)
def __getitem__(self, index: int) -> Any:
return self.feature[index], self.label[index]
def __len__(self) -> int:
return len(self.data.values)
def _download(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.datadir))
download_url(
self.url, self.datapath, self.md5)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
self.datadir = f"{self.datadir}_{mode}"
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
test_ratio = reallocate_dict['test_ratio']
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.datadir, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
cols = self.feature_cols
split_cols = np.array_split(cols, splits)
for i, span in enumerate(split_cols):
if "labeled" in parties[i]:
train_data, test_data = pd_train_test_split(
self.data[["id", "y"] + list(span)], test_ratio=test_ratio, random_state=random_state)
else:
train_data, test_data = pd_train_test_split(
self.data[["id"] + list(span)], test_ratio=test_ratio, random_state=random_state)
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
elif mode == "horizontal":
train_data, test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_test.csv')
test_data.to_csv(test_csv_path, index=False)
rand_idx = np.random.permutation(range(len(train_data)))
indices = np.array_split(rand_idx, splits)
for i, party in enumerate(parties):
csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{party}.csv')
data = train_data.loc[indices[i]]
data.to_csv(csv_path, index=False)
os.remove(self.datapath)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="vertical",
help="vertical or horizontal task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--test_ratio", type=float,
default=0.3, help="ratio of test data")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
parser.add_argument("--norm", type=bool,
default=False, help="normalization")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"test_ratio": args.test_ratio,
"random_seed": args.random_seed,
"parties": args.party,
"norm": args.norm
}
boston = Boston()
boston.reallocate(reallocate_dict)
| 5,887 | 39.054422 | 110 | py |
XFL | XFL-master/python/common/utils/model_io.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import json
from typing import Optional
from pathlib import Path
import torch
from common.utils.logger import logger
class ModelIO:
@staticmethod
def _gen_model_path(
save_dir: str,
model_name: str,
epoch: Optional[int] = None,
) -> Path:
split_name = model_name.split(".")
if epoch is None:
model_name = '.'.join(split_name[:-1]) + '.' + split_name[-1]
else:
model_name = '.'.join(split_name[:-1]) + f'_epoch_{epoch}.' + split_name[-1]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_path = Path(save_dir, model_name)
return model_path
@staticmethod
def save_torch_model(state_dict,
save_dir: str,
model_name: str,
meta_dict: dict = {},
epoch: Optional[int] = None,
version: str = '1.4.0'):
model_dict = {}
model_dict.update(meta_dict)
model_dict = {"state_dict": state_dict, "version": version}
model_path = ModelIO._gen_model_path(save_dir, model_name, epoch)
torch.save(model_dict, model_path)
logger.info("Model saved as: {}".format(model_path))
@staticmethod
def copy_best_model(
save_dir: str,
model_name: str,
epoch: Optional[int] = None
):
model_path = ModelIO._gen_model_path(save_dir, model_name, epoch)
best_model_path = ModelIO._gen_model_path(save_dir, model_name)
shutil.copy(model_path, best_model_path)
logger.info("Best model saved as: {}".format(best_model_path))
@staticmethod
def load_torch_model(model_path: str, device: str = "cpu"):
if device == "cpu":
model_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
elif "cuda" in device:
model_dict = torch.load(model_path, map_location=lambda storage, loc: storage.cuda(0))
else:
raise ValueError(f"Device {device} not support.")
logger.info("Pretrain model loaded from: {}".format(model_path))
return model_dict
@staticmethod
def save_torch_onnx(model, input_dim: tuple, save_dir: str, model_name: str, epoch: Optional[int] = None):
dummy_input = torch.randn(1, *input_dim)
model_path = ModelIO._gen_model_path(save_dir, model_name, epoch)
torch.onnx.export(model,
dummy_input,
model_path,
verbose=False,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
logger.info("Model saved as: {}".format(model_path))
@staticmethod
def save_json_model(model_dict: dict,
save_dir: str,
model_name: str,
meta_dict: dict = {},
epoch: Optional[int] = None,
version: str = '1.4.0'):
new_model_dict = {}
new_model_dict.update(meta_dict)
new_model_dict.update(model_dict)
new_model_dict["version"] = version
model_path = ModelIO._gen_model_path(save_dir, model_name, epoch)
fp = open(model_path, 'w')
json.dump(new_model_dict, fp)
logger.info("Model saved as: {}".format(model_path))
@staticmethod
def load_json_model(model_path: str):
with open(model_path, 'r') as fp:
model_dict = json.load(fp)
logger.info("Model loaded from: {}".format(model_path))
return model_dict
@staticmethod
def save_json_proto(model_dict: dict,
save_dir: str,
model_name: str,
meta_dict: dict = {},
epoch: Optional[int] = None,
version: str = '1.4.0'):
pass
@staticmethod
def load_json_proto(model_path: str):
pass
| 4,851 | 35.757576 | 110 | py |
XFL | XFL-master/python/common/utils/algo_utils.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import numpy as np
import torch
from numpy.core.records import ndarray
from sklearn.metrics import auc, roc_curve
from torch.nn import Module
from common.utils.logger import logger
class MapeLoss(Module):
def __init__(self):
super(MapeLoss, self).__init__()
def forward(self, preds: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
"""
Args:
preds:
labels:
Returns:
"""
mask = (labels != 0)
distance = torch.abs(preds - labels) / torch.abs(labels)
return torch.mean(distance[mask])
class BiClsAccuracy(Module):
# torch mape loss function
def __init__(self):
super(BiClsAccuracy, self).__init__()
def forward(self, confusion_matrix: np.array) -> torch.Tensor:
"""
Binary Classification Accuracy
Args:
confusion_matrix:
Returns:
"""
tn, fp, fn, tp = confusion_matrix.ravel()
return (tn + tp) / (tn + fp + fn + tp)
class BiClsPrecision(Module):
def __init__(self):
super(BiClsPrecision, self).__init__()
def forward(self, confusion_matrix: np.array) -> torch.Tensor:
"""
Binary Classification precision
Args:
confusion_matrix:
Returns:
"""
tn, fp, fn, tp = confusion_matrix.ravel()
if fp + tp > 0:
return tp / (fp + tp)
else:
return torch.Tensor([0.0])
class BiClsRecall(Module):
def __init__(self):
super(BiClsRecall, self).__init__()
def forward(self, confusion_matrix: np.array) -> torch.Tensor:
"""
Binary Classification recall
Args:
confusion_matrix:
Returns:
"""
tn, fp, fn, tp = confusion_matrix.ravel()
if fn + tp > 0:
return tp / (fn + tp)
else:
return torch.Tensor([0.0])
class BiClsF1(Module):
def __init__(self):
super(BiClsF1, self).__init__()
def forward(self, confusion_matrix: np.array) -> torch.Tensor:
"""
Binary Classification recall
Args:
confusion_matrix:
Returns:
"""
tn, fp, fn, tp = confusion_matrix.ravel()
if fp + tp > 0 and fn + tp > 0:
precision, recall = tp / (fp + tp), tp / (fn + tp)
return 2 * precision * recall / (precision + recall)
else:
return torch.Tensor([0.0])
class BiClsAuc(Module):
def __init__(self):
super(BiClsAuc, self).__init__()
def forward(self, tpr: np.array, fpr: np.array) -> float:
"""
auc
Args:
tpr: TP / (TP + FN)
fpr: FP / (FP + TN)
Returns: auc_score
"""
auc_score = auc(fpr, tpr)
return auc_score
class BiClsKS(Module):
def __init__(self):
super(BiClsKS, self).__init__()
def forward(self, tpr: np.array, fpr: np.array) -> float:
"""
ks
Args:
tpr: TP / (TP + FN)
fpr: FP / (FP + TN)
Returns: ks
"""
ks = max(np.max(tpr - fpr), 0)
return ks
class aucScore(Module):
def __init__(self):
super(aucScore, self).__init__()
def forward(self, pred: np.array, label: np.array) -> Tuple[float, Union[ndarray, int, float, complex]]:
"""
auc
Args:
pred:
label:
Returns: auc_score, ks
"""
fpr, tpr, _ = roc_curve(label, pred)
auc_score = auc(fpr, tpr)
ks = max(np.max(tpr - fpr), 0)
return auc_score, ks
class earlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, key: str, patience: int = 10, delta: float = 0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.key = key
self.counter = 0
self.best_score = None
self.early_stop = False
self.delta = delta
def __call__(self, metric) -> Tuple[bool, bool]:
if self.key not in metric:
raise KeyError("Key {} cannot found in metrics.".format(self.key))
save_flag, val_score = False, metric[self.key]
if self.best_score is None:
self.best_score, save_flag = val_score, True
elif val_score < self.best_score + self.delta:
self.counter += 1
logger.info(
f'EarlyStopping counter: {self.counter} out of {self.patience}. Epoch score {val_score}, '
f'best score {self.best_score}.')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score, save_flag = val_score, True
self.counter = 0
return self.early_stop, save_flag
class _earlyStopping:
"""Early stops the training if validation metric doesn't increase or decrease after a given patience."""
def __init__(self, key: str, patience: int = 10, delta: float = 0, maxmize: bool = True):
"""
Args:
key (str): The key of metric to monitor.
patience (int): How long to wait after last time validation loss improved.
Default: 10
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
maxmize (bool): If True, we try to maxmize the metric. Otherwise, we try to minimize the metric.
"""
self.patience = patience
self.key = key
self.counter = 0
self.best_score = None
self.best_epoch = None
self.early_stop = False
self.maxmize = 1 if maxmize else -1
self.delta = delta * maxmize
def __call__(self, metric: dict, epoch: int) -> bool:
'''
Args:
metric (dict): The metric dict.
epoch (int): The current epoch.
'''
if self.key not in metric:
raise KeyError("Key {} cannot found in metrics.".format(self.key))
val_score = metric[self.key]
if self.best_score is None:
# update best score and best epoch
self.best_score = val_score
self.best_epoch = epoch
elif (val_score * self.maxmize) < ((self.best_score + self.delta) * self.maxmize):
self.counter += 1
logger.info(
f'EarlyStopping counter: {self.counter} out of {self.patience}. '
f'Epoch {epoch} score {val_score}, '
f'best epoch {self.best_epoch} best score {self.best_score}.')
if (val_score * self.maxmize) < (self.best_score * self.maxmize):
# update best score and best epoch
self.best_score = val_score
self.best_epoch = epoch
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = val_score
self.best_epoch = epoch
self.counter = 0
return self.early_stop
class earlyStoppingH(_earlyStopping):
"""Early stops the training if validation metric doesn't increase after a given patience."""
def __init__(self, key: str, patience: int = 10, delta: float = 0):
"""
Args:
key (str): The key of metric to monitor.
patience (int): How long to wait after last time validation loss improved.
Default: 10
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
maxmize = None
if key in ["acc", "precision", "recall", "f1_score", "auc", "ks"]:
maxmize = True
elif key in ["mae", "mse", "mape", "rmse"]:
maxmize = False
else:
raise ValueError("Key {} cannot be monitored.".format(key))
super().__init__(key, patience, delta, maxmize=maxmize) | 9,180 | 30.016892 | 108 | py |
XFL | XFL-master/python/common/utils/config_checker.py | import os
import importlib
import traceback
from collections import Counter
from common.checker.compare import compare
from common.utils.logger import logger
from common.utils.config_parser import replace_variable
def find_rule_class(fed_type, operator_name, role, inference):
try:
if inference:
operator_name += '_infer'
module_path = '.'.join(['algorithm.config_descriptor', fed_type + '_' + operator_name, role]) # , 'local_' + operator + '_rule'])
module = importlib.import_module(module_path)
except Exception: # ModuleNotFoundError:
logger.warning(traceback.format_exc())
return None
try:
if fed_type == 'local':
rule = getattr(module, fed_type + '_' + operator_name + '_rule')
elif fed_type == 'vertical':
rule = getattr(module, fed_type + '_' + operator_name + '_' + role + '_rule')
else:
return None
except Exception:
return None
return rule
def check_stage_train_conf(conf):
role = conf.get("identity")
name = conf.get('model_info', {}).get('name')
if not name:
res = {
"result": [],
"itemized_result": [],
"summary": [],
"message": []
}
return res
fed_type = name.split('_')[0]
operator_name = '_'.join(name.split('_')[1:])
inference = True if conf.get('inference') else False
res = {
"result": {},
"summary": (0, 0),
"message": 'Rule not found.'
}
if not role or not name:
res["message"] = f"Role {role} or Name {name} not valid."
return res
rule = find_rule_class(fed_type, operator_name, role, inference)
if not rule:
return res
try:
result, itemized_result, rule_passed, rule_checked = compare(conf, rule)
except Exception:
logger.warning(traceback.format_exc())
logger.info("Error when checking train_config.")
return res
res = {
"result": result,
"itemized_result": itemized_result,
"summary": (rule_passed, rule_checked),
"message": 'Config checked.'
}
return res
def check_multi_stage_train_conf(conf: list):
if not isinstance(conf, list):
return [], [(0, 1)], "Not a list"
res = {
"result": [],
"itemized_result": [],
"summary": [],
"message": []
}
for stage_conf in conf:
if not isinstance(stage_conf, dict):
stage_result = {"rule_passed": 0, "rule_checked": 1}
stage_message = "Not a dict."
else:
report = check_stage_train_conf(stage_conf)
stage_result = report["result"]
stage_itemized_result = report["itemized_result"]
stage_summary = report["summary"]
stage_message = report["message"]
res["result"].append(stage_result)
res["itemized_result"].append(stage_itemized_result)
res["summary"].append(stage_summary)
res["message"].append(stage_message)
return res
def check_cross_stage_input_output(conf: list, ignore_list: list = []):
input_dict = {}
output_dict = {}
"""
{
0: [
{
"key_chain": ["input", "trainset"],
"value": "/opt/dataset/a.csv"
}
]
}
"""
for stage_id, stage_conf in enumerate(conf):
input = stage_conf.get("input", {})
path = input.get("path", "")
input_path = []
for key in input:
if isinstance(input[key], list):
for item in input[key]:
local_path = item.get("path", "") or path
local_name = item.get("name", "")
if isinstance(local_name, list):
for name in local_name:
input_path.append(
{
"key_chain": ["input", key],
"value": os.path.join(local_path, name)
}
)
else:
input_path.append(
{
"key_chain": ["input", key],
"value": os.path.join(local_path, local_name)
}
)
elif isinstance(input[key], dict):
item = input[key]
local_path = item.get("path", "") or path
local_name = item.get("name", "")
if isinstance(local_name, list):
for name in local_name:
input_path.append(
{
"key_chain": ["input", key],
"value": os.path.join(local_path, name)
}
)
else:
input_path.append(
{
"key_chain": ["input", key],
"value": os.path.join(local_path, local_name)
}
)
input_dict[stage_id] = input_path
output = stage_conf.get("output", {})
path = output.get("path", "")
output_path = []
for key in output:
if isinstance(output[key], dict):
local_path = output[key].get("path") or path
local_name = output[key].get("name", "")
output_path.append(
{
"key_chain": ["output", key],
"value": os.path.join(local_path, local_name)
}
)
output_dict[stage_id] = output_path
input_dict_a = {k: replace_variable(v, stage_id=k, job_id='JOB_ID', node_id='NODE_ID') for k, v in input_dict.items()}
output_dict_a = {k: replace_variable(v, stage_id=k, job_id='JOB_ID', node_id='NODE_ID') for k, v in output_dict.items()}
def find_duplicated_and_blank(in_dict, duplicated=True):
result = {
"duplicated": [],
"blank": [],
"nonexistent": []
}
stage_id_list = []
key_chain_list = []
value_list = []
for stage_id in in_dict:
for path_dict in in_dict[stage_id]:
stage_id_list.append(stage_id)
key_chain_list.append(path_dict['key_chain'])
value_list.append(path_dict['value'])
if duplicated:
count_result = dict(Counter(value_list))
for k in count_result:
# find duplicated
if count_result[k] > 1:
index = [i for i, v in enumerate(value_list) if v == k]
if index:
result['duplicated'].append(
{
"value": k,
"position": [
{
"stage": stage_id_list[i],
"key_chain": key_chain_list[i],
} for i in index
]
}
)
# find blank
index = [i for i, v in enumerate(value_list) if v.strip() == '']
if index:
result['blank'].append(
{
"value": '',
"position": [
{
"stage": stage_id_list[i],
"key_chain": key_chain_list[i],
} for i in index
]
}
)
return result
def find_nonexistent(input_dict, output_dict, ignore_list):
result = {
"duplicated": [],
"blank": [],
"nonexistent": []
}
stage_id_list = []
key_chain_list = []
value_list = []
for stage_id in input_dict:
for path_dict in input_dict[stage_id]:
stage_id_list.append(stage_id)
key_chain_list.append(path_dict['key_chain'])
value_list.append(path_dict['value'])
output_stage_id_list = []
output_key_chain_list = []
output_value_list = []
for stage_id in output_dict:
for path_dict in output_dict[stage_id]:
output_stage_id_list.append(stage_id)
output_key_chain_list.append(path_dict['key_chain'])
output_value_list.append(path_dict['value'])
for i, stage_id in enumerate(stage_id_list):
ids = [j for j, stage in enumerate(output_stage_id_list) if stage < stage_id]
if value_list[i] not in [output_value_list[j] for j in ids] and value_list[i] not in ignore_list:
result['nonexistent'].append(
{
"value": value_list[i],
"position": [
{
"stage": stage_id_list[i],
"key_chain": key_chain_list[i],
}
]
}
)
return result
result = {
"duplicated": [],
"blank": [],
"nonexistent": []
}
r1 = find_duplicated_and_blank(input_dict_a, duplicated=False)
r2 = find_duplicated_and_blank(output_dict_a)
r3 = find_nonexistent(input_dict_a, output_dict_a, ignore_list)
result["duplicated"] += r1["duplicated"]
result["duplicated"] += r2["duplicated"]
result["blank"] += r1["blank"]
result["blank"] += r2["blank"]
result["nonexistent"] += r3["nonexistent"]
return result
if __name__ == "__main__":
# path = '/mnt/c/Documents and Settings/wanghong/workspace/federated-learning/demo/vertical/xgboost/2party_env/config/trainer_config_node-1.json'
# import json
# conf = json.load(open(path, 'r'))
conf = \
[
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_binning_woe_iv_fintech"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True,
"nan_list": [
]
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "vertical_binning_woe_iv_[STAGE_ID].json"
},
"iv": {
"name": "woe_iv_result_[STAGE_ID].json"
},
"split_points": {
"name": "binning_split_points_[STAGE_ID].json"
},
"trainset": {
"name": "fintech_woe_map_train_[STAGE_ID].csv"
}
},
"train_info": {
"interaction_params": {
"save_model": True
},
"train_params": {
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"binning": {
"method": "equal_width",
"bins": 5
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_feature_selection"
},
"input": {
"iv_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "woe_iv_result_[STAGE_ID-1].json"
},
"trainset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "feature_selection_[STAGE_ID].pkl"
},
"trainset": {
"name": "selected_train_[STAGE_ID].csv"
},
"valset": {
"name": "selected_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"filter": {
"common": {
"metrics": "iv",
"filter_method": "threshold",
"threshold": 0.01
}
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_pearson"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"corr": {
"name": "vertical_pearson_[STAGE_ID].pkl"
}
},
"train_info": {
"train_params": {
"col_index": -1,
"col_names": "",
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 6,
"djn_on": True,
"parallelize_on": True
}
},
"max_num_cores": 999,
"sample_size": 9999
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_feature_selection"
},
"input": {
"corr_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "vertical_pearson_[STAGE_ID-1].pkl"
},
"iv_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "woe_iv_result_[STAGE_ID-3].json"
},
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-2].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_val_[STAGE_ID-2].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "feature_selection_[STAGE_ID].pkl"
},
"trainset": {
"name": "selected_train_[STAGE_ID].csv"
},
"valset": {
"name": "selected_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"filter": {
"common": {
"metrics": "iv",
"filter_method": "threshold",
"threshold": 0.01
},
"correlation": {
"sort_metric": "iv",
"correlation_threshold": 0.7
}
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "local_normalization"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_val_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "local_normalization_[STAGE_ID].pt"
},
"trainset": {
"name": "normalized_train_[STAGE_ID].csv"
},
"valset": {
"name": "normalized_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"norm": "max",
"axis": 0
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "normalized_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "normalized_val_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"pretrained_model": {
"path": "",
"name": ""
}
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "vertical_logitstic_regression_[STAGE_ID].pt"
},
"metric_train": {
"name": "lr_metric_train_[STAGE_ID].csv"
},
"metric_val": {
"name": "lr_metric_val_[STAGE_ID].csv"
},
"prediction_train": {
"name": "lr_prediction_train_[STAGE_ID].csv"
},
"prediction_val": {
"name": "lr_prediction_val_[STAGE_ID].csv"
},
"ks_plot_train": {
"name": "lr_ks_plot_train_[STAGE_ID].csv"
},
"ks_plot_val": {
"name": "lr_ks_plot_val_[STAGE_ID].csv"
},
"decision_table_train": {
"name": "lr_decision_table_train_[STAGE_ID].csv"
},
"decision_table_val": {
"name": "lr_decision_table_val_[STAGE_ID].csv"
},
"feature_importance": {
"name": "lr_feature_importance_[STAGE_ID].csv"
}
},
"train_info": {
"interaction_params": {
"save_frequency": -1,
"write_training_prediction": True,
"write_validation_prediction": True,
"echo_training_metrics": True
},
"train_params": {
"global_epoch": 2,
"batch_size": 512,
"encryption": {
"ckks": {
"poly_modulus_degree": 8192,
"coeff_mod_bit_sizes": [
60,
40,
40,
60
],
"global_scale_bit_size": 40
}
},
"optimizer": {
"lr": 0.01,
"p": 2,
"alpha": 1e-4
},
"metric": {
"decision_table": {
"method": "equal_frequency",
"bins": 10
},
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
"ks": {}
},
"early_stopping": {
"key": "acc",
"patience": 10,
"delta": 0
},
"random_seed": 50
}
}
}
]
result = check_multi_stage_train_conf(conf)
print(result)
result = check_cross_stage_input_output(conf)
print(result)
conf = [
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True
}
]
result = check_multi_stage_train_conf(conf)
print(result)
| 25,081 | 35.037356 | 149 | py |
XFL | XFL-master/python/common/utils/model_preserver.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import OrderedDict
import torch
from common.utils.logger import logger
# TODO: 逐渐替代这个,以后会删除
class ModelPreserver(object):
@staticmethod
def save(save_dir: str,
model_name: str,
state_dict: OrderedDict,
epoch: int = None,
final: bool = False,
suggest_threshold: float = None
):
if not os.path.exists(save_dir): os.makedirs(save_dir)
model_info = {"state_dict": state_dict}
if suggest_threshold:
model_info["suggest_threshold"] = suggest_threshold
model_name_list = model_name.split(".")
name_prefix, name_postfix = ".".join(model_name_list[:-1]), model_name_list[-1]
if not final and epoch:
model_name = name_prefix + "_epoch_{}".format(epoch) + "." + name_postfix
else:
model_name = name_prefix + "." + name_postfix
model_path = os.path.join(save_dir, model_name)
torch.save(model_info, model_path)
logger.info("model saved as: {}.".format(model_path))
return
@staticmethod
def load(model_path: str):
return torch.load(model_path)
| 1,794 | 31.053571 | 87 | py |
XFL | XFL-master/python/common/utils/auto_descriptor/torch/torch_descriptor.py |
import inspect
import math
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.nn as nn
import sklearn.metrics as sklearn_metrics
import algorithm.core.metrics as custom_metrics
from algorithm.core.metrics import metric_dict
# from common.checker.qualifiers import (OneOf, Optional, RepeatableSomeOf,
# Required, SomeOf)
# from common.checker.x_types import All, Any, Bool, Float, Integer, String
def gen_torch_optim_dict(out_path: str):
methods = [getattr(optim, name) for name in dir(optim) if isinstance(getattr(optim, name), type) and name not in ['Optimizer']]
blank = ''
with open(out_path, 'w') as f:
f.write('from common.checker.x_types import String, Bool, Integer, Float, Any, All\n')
f.write('from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional\n')
f.write('\n\n')
f.write('optimizer = {')
blank += ' '
for i, method in enumerate(methods):
# print(method.__name__)
# print(inspect.getfullargspec(method))
# print(inspect.signature(method).parameters)
mark0 = ',' if i > 0 else ''
f.write(f'{mark0}\n' + blank + f'"{method.__name__}": ' + '{')
blank += ' '
params = list(inspect.signature(method).parameters.values())
required_params = []
whole_params = []
for j, param in enumerate(params):
name = param.name
default = param.default
mark1 = ',' if j > 1 else ''
# Don't support params
if name == 'params':
continue
# No default lr value for SGD
# if name == 'lr' and not isinstance(name, (int, float)):
# default = 0.001
if isinstance(default, bool):
f.write(f'{mark1}\n' + blank + f'"{name}": Bool({default})')
elif isinstance(default, int):
f.write(f'{mark1}\n' + blank + f'"{name}": Integer({default})')
elif isinstance(default, float):
default = None if math.isnan(default) else default
f.write(f'{mark1}\n' + blank + f'"{name}": Float({default})')
elif isinstance(default, str):
f.write(f'{mark1}\n' + blank + f'"{name}": String("{default}")')
elif isinstance(default, (list, tuple)):
f.write(f'{mark1}\n' + blank + f'"{name}": [')
for k, item in enumerate(default):
mark2 = ',' if k != 0 else ''
if isinstance(item, bool):
v = f'Bool({item})'
elif isinstance(item, int):
v = f'Integer({item})'
elif isinstance(item, float):
item = None if math.isnan(item) else item
v = f'Float({item})'
elif isinstance(item, str):
v = f'String("{item}")'
else:
v = f'Any({item})'
f.write(f'{mark2}\n' + blank + ' ' + v)
f.write(f'{mark1}\n' + blank + ' ' + ']')
elif default is None:
f.write(f'{mark1}\n' + blank + f'"{name}": All(None)')
else:
f.write(f'{mark1}\n' + blank + f'"{name}": ' + 'All("No default value")')
required_params.append(name)
print(f"{name}, {default}")
pass
whole_params.append(name)
if len(whole_params) != 0:
mark2 = ',' if len(whole_params) > 0 else ''
f.write(f'{mark2}\n' + blank + '"__rule__": [')
if len(required_params) > 0:
f.write("Required(")
for j, name in enumerate(required_params):
mark3 = ', ' if j > 0 else ''
f.write(f'{mark3}"{name}"')
f.write(")")
optional_params = list(set(whole_params) - set(required_params))
for j, name in enumerate(optional_params):
mark3 = ', ' if len(required_params) > 0 or j > 0 else ''
f.write(f'{mark3}Optional("{name}")')
f.write(']')
blank = blank[:-4]
f.write('\n' + blank + '}')
f.write('\n}\n')
def gen_torch_lr_scheduler_dict(out_path: str):
methods = [getattr(lr_scheduler, name) for name in dir(lr_scheduler) if isinstance(getattr(lr_scheduler, name), type) and '_' not in name and name not in ['Optimizer', 'ChainedScheduler', 'Counter']]
blank = ''
with open(out_path, 'w') as f:
f.write('from common.checker.x_types import String, Bool, Integer, Float, Any, All\n')
f.write('from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional\n')
f.write('\n\n')
f.write('lr_scheduler = {')
blank += ' '
for i, method in enumerate(methods):
# print(method.__name__)
# print(inspect.getfullargspec(method))
# print(inspect.signature(method).parameters)
mark0 = ',' if i > 0 else ''
f.write(f'{mark0}\n' + blank + f'"{method.__name__}": ' + '{')
blank += ' '
params = list(inspect.signature(method).parameters.values())
required_params = []
whole_params = []
for j, param in enumerate(params):
name = param.name
default = param.default
mark1 = ',' if j > 1 else ''
# Don't support optimizer
if name == 'optimizer':
continue
if isinstance(default, bool):
f.write(f'{mark1}\n' + blank + f'"{name}": Bool({default})')
elif isinstance(default, int):
f.write(f'{mark1}\n' + blank + f'"{name}": Integer({default})')
elif isinstance(default, float):
default = None if math.isnan(default) else default
f.write(f'{mark1}\n' + blank + f'"{name}": Float({default})')
elif isinstance(default, str):
f.write(f'{mark1}\n' + blank + f'"{name}": String("{default}")')
elif isinstance(default, (list, tuple)):
f.write(f'{mark1}\n' + blank + f'"{name}": [')
for k, item in enumerate(default):
mark2 = ',' if k != 0 else ''
if isinstance(item, bool):
v = f'Bool({item})'
elif isinstance(item, int):
v = f'Integer({item})'
elif isinstance(item, float):
item = None if math.isnan(item) else item
v = f'Float({item})'
elif isinstance(item, str):
v = f'String("{item}")'
else:
v = f'Any({item})'
f.write(f'{mark2}\n' + blank + ' ' + v)
f.write(f'{mark1}\n' + blank + ' ' + ']')
elif default is None:
f.write(f'{mark1}\n' + blank + f'"{name}": All(None)')
else:
f.write(f'{mark1}\n' + blank + f'"{name}": ' + 'All("No default value")')
required_params.append(name)
print(f"{name}, {default}")
pass
whole_params.append(name)
if len(whole_params) != 0:
mark2 = ',' if len(whole_params) > 0 else ''
f.write(f'{mark2}\n' + blank + '"__rule__": [')
if len(required_params) > 0:
f.write("Required(")
for j, name in enumerate(required_params):
mark3 = ', ' if j > 0 else ''
f.write(f'{mark3}"{name}"')
f.write(")")
optional_params = list(set(whole_params) - set(required_params))
for j, name in enumerate(optional_params):
mark3 = ', ' if len(required_params) > 0 or j > 0 else ''
f.write(f'{mark3}Optional("{name}")')
f.write(']')
blank = blank[:-4]
f.write('\n' + blank + '}')
f.write('\n}\n')
def gen_torch_lossfunc_dict(out_path: str):
methods = [getattr(nn, name) for name in dir(nn) if isinstance(getattr(nn, name), type) and 'Loss' in name]
blank = ''
with open(out_path, 'w') as f:
f.write('from common.checker.x_types import String, Bool, Integer, Float, Any, All\n')
f.write('from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional\n')
f.write('\n\n')
f.write('lossfunc = {')
blank += ' '
for i, method in enumerate(methods):
# print(method.__name__)
# print(inspect.getfullargspec(method))
# print(inspect.signature(method).parameters)
mark0 = ',' if i > 0 else ''
f.write(f'{mark0}\n' + blank + f'"{method.__name__}": ' + '{')
blank += ' '
params = list(inspect.signature(method).parameters.values())
required_params = []
whole_params = []
for j, param in enumerate(params):
name = param.name
default = param.default
mark1 = ',' if j > 0 else ''
# Don't support params
# if name == 'optimizer':
# continue
if isinstance(default, bool):
f.write(f'{mark1}\n' + blank + f'"{name}": Bool({default})')
elif isinstance(default, int):
f.write(f'{mark1}\n' + blank + f'"{name}": Integer({default})')
elif isinstance(default, float):
default = None if math.isnan(default) else default
f.write(f'{mark1}\n' + blank + f'"{name}": Float({default})')
elif isinstance(default, str):
f.write(f'{mark1}\n' + blank + f'"{name}": String("{default}")')
elif isinstance(default, (list, tuple)):
f.write(f'{mark1}\n' + blank + f'"{name}": [')
for k, item in enumerate(default):
mark2 = ',' if k != 0 else ''
if isinstance(item, bool):
v = f'Bool({item})'
elif isinstance(item, int):
v = f'Integer({item})'
elif isinstance(item, float):
item = None if math.isnan(item) else item
v = f'Float({item})'
elif isinstance(item, str):
v = f'String("{item}")'
else:
v = f'Any({item})'
f.write(f'{mark2}\n' + blank + ' ' + v)
f.write(f'{mark1}\n' + blank + ' ' + ']')
elif default is None:
f.write(f'{mark1}\n' + blank + f'"{name}": All(None)')
else:
f.write(f'{mark1}\n' + blank + f'"{name}": ' + 'All("No default value")')
required_params.append(name)
print(f"{name}, {default}")
pass
whole_params.append(name)
if len(whole_params) != 0:
mark2 = ',' if len(whole_params) > 0 else ''
f.write(f'{mark2}\n' + blank + '"__rule__": [')
if len(required_params) > 0:
f.write("Required(")
for j, name in enumerate(required_params):
mark3 = ', ' if j > 0 else ''
f.write(f'{mark3}"{name}"')
f.write(")")
optional_params = list(set(whole_params) - set(required_params))
for j, name in enumerate(optional_params):
mark3 = ', ' if len(required_params) > 0 or j > 0 else ''
f.write(f'{mark3}Optional("{name}")')
f.write(']')
blank = blank[:-4]
f.write('\n' + blank + '}')
f.write('\n}\n')
def gen_metric_dict(out_path: str):
candidate_methods_name = dir(sklearn_metrics) # [getattr(sklearn_metrics, name) for name in dir(sklearn_metrics)]
valid_combination = [('y_true', 'y_pred'), ('X', 'Y'), ('y_true', 'y_score'), ('X', 'labels'), ('labels_true', 'labels_pred'), ('x', 'y'), ('y_true', 'y_prob'), ('X', 'labels'), ('a', 'b')]
methods = []
for name in candidate_methods_name:
method = getattr(sklearn_metrics, name)
if inspect.isfunction(method):
params = list(inspect.signature(method).parameters.keys())
if len(params) >= 2:
if (params[0], params[1]) in valid_combination:
methods.append(name)
# print(params, name)
methods = [getattr(sklearn_metrics, name) for name in methods]
custom_methods = []
for name in dir(custom_metrics):
method = getattr(custom_metrics, name)
if inspect.isfunction(method):
if name not in ["get_metric"]:
custom_methods.append(name)
custom_methods = [getattr(custom_metrics, name) for name in custom_methods]
names_map = {v: k for k, v in metric_dict.items()}
# print(list(set(dir(sklearn_metrics)) - set(methods)))
# print("####")
# for name in list(set(dir(sklearn_metrics)) - set(methods)):
# method = getattr(sklearn_metrics, name)
# if inspect.isfunction(method):
# print(list(inspect.signature(method).parameters.keys()), name)
blank = ''
with open(out_path, 'w') as f:
f.write('from common.checker.x_types import String, Bool, Integer, Float, Any, All\n')
f.write('from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional\n')
f.write('\n\n')
f.write('metrics = {')
blank += ' '
for i, method in enumerate(methods + custom_methods):
# print(method.__name__)
# print(inspect.getfullargspec(method))
# print(inspect.signature(method).parameters)
mark0 = ',' if i > 0 else ''
if method.__name__ in names_map:
f.write(f'{mark0}\n' + blank + f'"{names_map[method.__name__]}": ' + '{')
else:
f.write(f'{mark0}\n' + blank + f'"{method.__name__}": ' + '{')
blank += ' '
params = list(inspect.signature(method).parameters.values())[2:]
required_params = []
whole_params = []
is_first = True
for j, param in enumerate(params):
name = param.name
default = param.default
if name == 'kwds':
continue
mark1 = ',' if is_first is False else ''
is_first = False
# Don't support params
# if name == 'optimizer':
# continue
if isinstance(default, bool):
f.write(f'{mark1}\n' + blank + f'"{name}": Bool({default})')
elif isinstance(default, int):
f.write(f'{mark1}\n' + blank + f'"{name}": Integer({default})')
elif isinstance(default, float):
default = None if math.isnan(default) else default
f.write(f'{mark1}\n' + blank + f'"{name}": Float({default})')
elif isinstance(default, str):
f.write(f'{mark1}\n' + blank + f'"{name}": String("{default}")')
elif isinstance(default, (list, tuple)):
f.write(f'{mark1}\n' + blank + f'"{name}": [')
for k, item in enumerate(default):
mark2 = ',' if k != 0 else ''
if isinstance(item, bool):
v = f'Bool({item})'
elif isinstance(item, int):
item = None if math.isnan(item) else item
v = f'Integer({item})'
elif isinstance(item, float):
v = f'Float({item})'
elif isinstance(item, str):
v = f'String("{item}")'
else:
v = f'Any({item})'
f.write(f'{mark2}\n' + blank + ' ' + v)
f.write(f'{mark1}\n' + blank + ' ' + ']')
elif default is None:
f.write(f'{mark1}\n' + blank + f'"{name}": All(None)')
else:
f.write(f'{mark1}\n' + blank + f'"{name}": ' + 'All("No default value")')
required_params.append(name)
print(f"{name}, {default}")
pass
whole_params.append(name)
if len(whole_params) != 0:
mark2 = ',' if len(whole_params) > 0 else ''
f.write(f'{mark2}\n' + blank + '"__rule__": [')
if len(required_params) > 0:
f.write("Required(")
for j, name in enumerate(required_params):
mark3 = ', ' if j > 0 else ''
f.write(f'{mark3}"{name}"')
f.write(")")
optional_params = list(set(whole_params) - set(required_params))
for j, name in enumerate(optional_params):
mark3 = ', ' if len(required_params) > 0 or j > 0 else ''
f.write(f'{mark3}Optional("{name}")')
f.write(']')
blank = blank[:-4]
f.write('\n' + blank + '}')
f.write('\n}\n')
if __name__ == "__main__":
from pathlib import Path
out_path = Path(__file__).parent / 'optimizer.py'
gen_torch_optim_dict(out_path)
out_path = Path(__file__).parent / 'lr_scheduler.py'
gen_torch_lr_scheduler_dict(out_path)
out_path = Path(__file__).parent / 'lossfunc.py'
gen_torch_lossfunc_dict(out_path)
out_path = Path(__file__).parent / 'metrics.py'
gen_metric_dict(out_path)
| 20,063 | 41.780384 | 203 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_kmeans/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_kmeans_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_kmeans"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_kmeans_[STAGE_ID].pkl")
},
"result": {
"name": String("cluster_result_[STAGE_ID].csv")
},
"summary": {
"name": String("cluster_summary_[STAGE_ID].csv")
}
},
"train_info": {
"__rule__": Optional("train_params"),
"train_params": {
"init": OneOf("random", "kmeans++").set_default("random"),
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"k": Integer(5),
"max_iter": Integer(50),
"tol": Float(1e-6),
"random_seed": Float(50)
}
}
}
| 2,041 | 31.935484 | 109 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_kmeans/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_kmeans_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_kmeans",
"config": {
"input_dim": Integer(),
"num_clusters": Integer(3)
}
},
"input": {
"__rule__": [Optional("pretrain_model"), Required("valset")],
"valset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(False)
}
).set_default_index(0)
],
"pretrain_model": {}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_kmeans_[STAGE_ID].model")
},
"metric_val": {
"name": String("kmeans_metric_val_[STAGE_ID].csv")
}
},
"train_info": {
"train_params": {
"global_epoch": Integer(20),
"aggregation": {
"method": {
"__rule__": OneOf("fedavg", "fedprox", "scaffold").set_default_index(0),
"fedavg": {},
"fedprox": {
"mu": Float(0.1)
},
"scaffold": {}
}
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
}
}
}
}
| 2,660 | 34.013158 | 109 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_linear_regression/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_linear_regression_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_linear_regression",
"config": {
"input_dim": Integer(),
"bias": Bool(True)
}
},
"input": {
"__rule__": [Optional("pretrain_model"), Required("valset")],
"valset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
],
"pretrain_model": {}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_linear_regression_[STAGE_ID].model")
},
"metric_val": {
"name": String("lr_metric_val_[STAGE_ID].csv")
}
},
"train_info": {
"device": OneOf("cpu", "cuda:0"),
"interaction_params": {
"__rule__": [Optional("save_frequency")],
"save_frequency": Integer(1),
},
"train_params": {
"global_epoch": Integer(20),
"val_batch_size": Integer(128),
"aggregation": {
"method": {
"__rule__": OneOf("fedavg", "fedprox", "scaffold").set_default_index(0),
"fedavg": {},
"fedprox": {
"mu": Float(0.1)
},
"scaffold": {}
}
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"optimizer": {
"__rule__": OneOf(*list(optimizer.keys())).set_default("Adam"),
},
"lr_scheduler": {
"__rule__": OneOf(*list(lr_scheduler.keys())).set_default("StepLR")
},
"lossfunc": {
"L1Loss": lossfunc["L1Loss"]
},
"metric": {
"mae": metrics["mae"],
"mse": metrics["mse"],
"mape": metrics["mape"]
},
"early_stopping": {
},
}
}
}
update_dict(horizontal_linear_regression_assist_trainer_rule["train_info"]["train_params"]["optimizer"], optimizer)
update_dict(horizontal_linear_regression_assist_trainer_rule["train_info"]["train_params"]["lr_scheduler"], lr_scheduler)
| 3,686 | 35.50495 | 121 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_logistic_regression/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_logistic_regression_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_logistic_regression",
"config": {
"input_dim": Integer(),
"bias": Bool(True)
}
},
"input": {
"__rule__": [Optional("pretrain_model"), Required("valset")],
"valset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
],
"pretrain_model": {}
},
"output": {
"__rule__": [Optional("model"), Optional("onnx_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_logitstic_regression_[STAGE_ID].model")
},
"onnx_model": {
"name": String("horizontal_logitstic_regression_[STAGE_ID].onnx")
},
"metric_val": {
"name": String("lr_metric_val_[STAGE_ID].csv")
}
},
"train_info": {
"device": OneOf("cpu", "cuda:0"),
"interaction_params": {
"__rule__": [Optional("save_frequency")],
"save_frequency": Integer(1),
},
"train_params": {
"global_epoch": Integer(20),
"val_batch_size": Integer(128),
"aggregation": {
"method": {
"__rule__": OneOf("fedavg", "fedprox", "scaffold").set_default_index(0),
"fedavg": {},
"fedprox": {
"mu": Float(0.1)
},
"scaffold": {}
}
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"optimizer": {
"__rule__": OneOf(*list(optimizer.keys())).set_default("Adam"),
},
"lr_scheduler": {
"__rule__": OneOf(*list(lr_scheduler.keys())).set_default("StepLR")
},
"lossfunc": {
"BCELoss": lossfunc["BCELoss"]
},
"metric": {
"acc": metrics["acc"],
"precision": metrics["precision"],
"recall": metrics["recall"],
"f1_score": metrics["f1_score"],
"auc": metrics["auc"],
"ks": metrics["ks"]
},
"early_stopping": {
"key": OneOf("acc", "precision", "recall", "f1_score", "auc", "ks").set_default_index(-1).add_rule(lambda x, y: x in y["train_info"]["train_params"]["metric"].keys(), "should in metric"),
"patience": Integer(10).ge(-1),
"delta": Float(0.001).gt(0)
},
}
}
}
update_dict(horizontal_logistic_regression_assist_trainer_rule["train_info"]["train_params"]["optimizer"], optimizer)
update_dict(horizontal_logistic_regression_assist_trainer_rule["train_info"]["train_params"]["lr_scheduler"], lr_scheduler)
| 4,321 | 36.912281 | 203 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost_infer/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_infer_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True,
"input": {
"testset": [
Optional(
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"__rule__": [Optional("path"), Optional("testset")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"testset": {
"name": String("xgb_prediction_test_[STAGE_ID].csv")
}
},
"train_info": {
"train_params": {
"batch_size_val": Integer(40960)
}
}
}
| 1,197 | 26.860465 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost_infer/sync.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_infer_sync_rule = {
"train_info": {
"train_params": {
"batch_size_val": All()
}
}
}
| 303 | 24.333333 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost_infer/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_infer_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True,
"input": {
"testset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"__rule__": [Optional("path"), Optional("testset")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"testset": {
"name": String("xgb_prediction_test_[STAGE_ID].csv")
}
},
"train_info": {
}
}
| 1,034 | 25.538462 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_poisson_regression/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_poisson_regression_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_poisson_regression",
"config": {
"input_dim": Integer(),
"bias": Bool(True)
}
},
"input": {
"__rule__": [Optional("pretrain_model"), Required("valset")],
"valset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
],
"pretrain_model": {}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_poisson_regression_[STAGE_ID].model")
},
"metric_val": {
"name": String("pr_metric_val_[STAGE_ID].csv")
}
},
"train_info": {
"device": OneOf("cpu", "cuda:0"),
"interaction_params": {},
"train_params": {
"global_epoch": Integer(20),
"val_batch_size": Integer(128),
"aggregation": {
"method": {
"__rule__": OneOf("fedavg", "fedprox", "scaffold").set_default_index(0),
"fedavg": {},
"fedprox": {
"mu": Float(0.1)
},
"scaffold": {}
}
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"optimizer": {
"__rule__": OneOf(*list(optimizer.keys())).set_default("Adam"),
},
"lr_scheduler": {
"__rule__": OneOf(*list(lr_scheduler.keys())).set_default("StepLR")
},
"lossfunc": {
"PoissonNLLLoss": lossfunc["PoissonNLLLoss"]
},
"metric": {
"mean_poisson_deviance": metrics["mean_poisson_deviance"]
},
"early_stopping": {}
}
}
}
update_dict(horizontal_poisson_regression_assist_trainer_rule["train_info"]["train_params"]["optimizer"], optimizer)
update_dict(horizontal_poisson_regression_assist_trainer_rule["train_info"]["train_params"]["lr_scheduler"], lr_scheduler)
| 3,544 | 36.315789 | 122 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.