repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/summary_providers/primitive_summary_provider.py | src/providers/summary_providers/primitive_summary_provider.py | import numpy as np
import yaml
from utils.infer_higher_is_better import higher_is_better_from_metric_key, is_neutral_key
from .base.summary_provider_base import SummaryProviderBase
from ..path_provider import PathProvider
class PrimitiveSummaryProvider(SummaryProviderBase):
def __init__(self, path_provider: PathProvider):
super().__init__()
self.path_provider = path_provider
self.summary = {}
def update(self, *args, **kwargs):
self.summary.update(*args, **kwargs)
def __setitem__(self, key, value):
self.summary[key] = value
def __getitem__(self, key):
return self.summary[key]
def __contains__(self, key):
return key in self.summary
def keys(self):
return self.summary.keys()
def get_summary_of_previous_stage(self, stage_name, stage_id):
summary_uri = self.path_provider.get_primitive_summary_uri(stage_name=stage_name, stage_id=stage_id)
if not summary_uri.exists():
return None
with open(summary_uri) as f:
return yaml.safe_load(f)
def flush(self):
""" summary is potentially often updated -> flush in bulks """
with open(self.path_provider.primitive_summary_uri, "w") as f:
yaml.safe_dump(self.summary, f)
def summarize_logvalues(self):
entries_uri = self.path_provider.primitive_entries_uri
if not entries_uri.exists():
return None
with open(entries_uri) as f:
entries = yaml.safe_load(f)
if entries is None:
return None
summary = {}
for key, update_to_value in entries.items():
# some wandb system metrics (e.g. "_runtime") start with _
# TODO not sure why they are in the primitive summary
if key[0] == "_":
continue
# logvalues are stored as {"key": {<update0>: <value0>, <update1>: <value1>}}
# NOTE: python min/max is faster on dicts than numpy
last_update = max(update_to_value.keys())
last_value = update_to_value[last_update]
self[key] = last_value
if key in ["epoch", "update", "sample"]:
continue
# exclude neutral keys (e.g. lr, profiling, ...) for min/max summarizing
if is_neutral_key(key):
continue
values = list(update_to_value.values())
# min/max
higher_is_better = higher_is_better_from_metric_key(key)
if higher_is_better:
minmax_key = f"{key}/max"
minmax_value = max(values)
else:
minmax_key = f"{key}/min"
minmax_value = min(values)
self[minmax_key] = minmax_value
summary[minmax_key] = minmax_value
self.logger.info(f"{minmax_key}: {minmax_value}")
# last10/last50
for running_avg_count in [10, 50]:
running_avg = float(np.mean(values[-running_avg_count:]))
running_avg_key = f"{key}/last{running_avg_count}"
self[running_avg_key] = running_avg
summary[running_avg_key] = running_avg
# self.logger.info(f"{running_avg_key}: {running_avg}")
# add last
# wandb adds it automatically, but with the postfix /last it is easier to distinguis in SummarySummarizers
last_key = f"{key}/last"
last_value = values[-1]
self[last_key] = last_value
summary[last_key] = last_value
return summary
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/summary_providers/noop_summary_provider.py | src/providers/summary_providers/noop_summary_provider.py | from .base.summary_provider_base import SummaryProviderBase
class NoopSummaryProvider(SummaryProviderBase):
def __init__(self):
super().__init__()
self.summary = {}
def update(self, *args, **kwargs):
self.summary.update(*args, **kwargs)
def __setitem__(self, key, value):
self.summary[key] = value
def __getitem__(self, key):
return self.summary[key]
def __contains__(self, key):
return key in self.summary
def keys(self):
return self.summary.keys()
def get_summary_of_previous_stage(self, stage_name, stage_id):
return {}
def flush(self):
pass
def summarize_logvalues(self):
pass
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/summary_providers/__init__.py | src/providers/summary_providers/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/summary_providers/base/summary_provider_base.py | src/providers/summary_providers/base/summary_provider_base.py | import logging
class SummaryProviderBase:
def __init__(self):
self.logger = logging.getLogger(type(self).__name__)
def update(self, *args, **kwargs):
raise NotImplementedError
def __setitem__(self, key, value):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
def __contains__(self, key):
raise NotImplementedError
def keys(self):
raise NotImplementedError
def get_summary_of_previous_stage(self, stage_name, stage_id):
raise NotImplementedError
def flush(self):
raise NotImplementedError
def summarize_logvalues(self):
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/providers/summary_providers/base/__init__.py | src/providers/summary_providers/base/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/__init__.py | src/modules/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/pdearena/conditional_twod_resnet.py | src/modules/pdearena/conditional_twod_resnet.py | # Adapted from https://github.com/microsoft/pdearena/blob/main/pdearena/modules/conditioned/twod_resnet.py
# Adaptions:
# - conditional embedding is passed to the model instead of creating a sincos timestep embedding within the model
# - remove last projection (handled by decoder)
# - removed explicit fp32 conversions
# - removed unneded things
# - removed option to expand channels within blocks
# - input is expected to be 4D instead of 5D (4D + time dimension)
import torch
import torch.nn.functional as F
from torch import nn
class FreqLinear(nn.Module):
def __init__(self, in_channel, modes1, modes2):
super().__init__()
self.modes1 = modes1
self.modes2 = modes2
scale = 1 / (in_channel + 4 * modes1 * modes2)
self.weights = nn.Parameter(scale * torch.randn(in_channel, 4 * modes1 * modes2, dtype=torch.float32))
self.bias = nn.Parameter(torch.zeros(1, 4 * modes1 * modes2, dtype=torch.float32))
def forward(self, x):
B = x.shape[0]
h = torch.einsum("tc,cm->tm", x, self.weights) + self.bias
h = h.reshape(B, self.modes1, self.modes2, 2, 2)
return torch.view_as_complex(h)
def batchmul2d(x, weights, emb):
temp = x * emb.unsqueeze(1)
out = torch.einsum("bixy,ioxy->boxy", temp, weights)
return out
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, cond_channels, modes1, modes2):
super().__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
@author: Zongyi Li
[paper](https://arxiv.org/pdf/2010.08895.pdf)
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes2 = modes2
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, 2, dtype=torch.float32)
)
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, 2, dtype=torch.float32)
)
self.cond_emb = FreqLinear(cond_channels, self.modes1, self.modes2)
def forward(self, x, emb):
emb12 = self.cond_emb(emb)
emb1 = emb12[..., 0]
emb2 = emb12[..., 1]
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2] = batchmul2d(
x_ft[:, :, : self.modes1, : self.modes2], torch.view_as_complex(self.weights1), emb1
)
out_ft[:, :, -self.modes1:, : self.modes2] = batchmul2d(
x_ft[:, :, -self.modes1:, : self.modes2], torch.view_as_complex(self.weights2), emb2
)
# Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
class EmbedSequential(nn.Sequential):
# noinspection PyMethodOverriding
def forward(self, x, emb):
for layer in self:
x = layer(x, emb)
return x
class FourierBasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
in_planes: int,
planes: int,
cond_channels: int,
modes1: int = 16,
modes2: int = 16,
norm: bool = False,
) -> None:
super().__init__()
self.modes1 = modes1
self.modes2 = modes2
self.activation = nn.GELU()
assert not norm
self.fourier1 = SpectralConv2d(in_planes, planes, cond_channels, modes1=self.modes1, modes2=self.modes2)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, padding=0, padding_mode="zeros", bias=True)
self.cond_emb = nn.Linear(cond_channels, planes)
self.fourier2 = SpectralConv2d(planes, planes, cond_channels, modes1=self.modes1, modes2=self.modes2)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=1, padding=0, padding_mode="zeros", bias=True)
def forward(self, x, emb):
x1 = self.fourier1(x, emb)
x2 = self.conv1(x)
emb_out = self.cond_emb(emb)
while len(emb_out.shape) < len(x2.shape):
emb_out = emb_out[..., None]
out = self.activation(x1 + x2 + emb_out)
x1 = self.fourier2(out, emb)
x2 = self.conv2(out)
out = x1 + x2
out = self.activation(out)
return out
class ResNet(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
cond_dim: int,
block: nn.Module,
num_blocks: list,
norm: bool = False,
):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.cond_dim = cond_dim
self.activation = nn.GELU()
self.conv_in1 = nn.Conv2d(
input_dim,
hidden_dim,
kernel_size=1,
bias=True,
)
self.conv_in2 = nn.Conv2d(
hidden_dim,
hidden_dim,
kernel_size=1,
bias=True,
)
self.conv_out1 = nn.Conv2d(
hidden_dim,
hidden_dim,
kernel_size=1,
bias=True,
)
self.conv_out2 = nn.Conv2d(
hidden_dim,
hidden_dim,
kernel_size=1,
bias=True,
)
self.layers = nn.ModuleList(
[
self._make_layer(
block=block,
planes=hidden_dim,
cond_channels=cond_dim,
num_blocks=num_blocks[i],
stride=1,
norm=norm,
)
for i in range(len(num_blocks))
]
)
@staticmethod
def _make_layer(
block: nn.Module,
planes: int,
cond_channels: int,
num_blocks: int,
stride: int,
norm: bool = True,
):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for _ in strides:
layers.append(
block(
planes,
planes,
cond_channels=cond_channels,
norm=norm,
)
)
return EmbedSequential(*layers)
def __repr__(self):
return "ResNet"
def forward(self, x, cond):
assert x.dim() == 4
x = self.activation(self.conv_in1(x))
x = self.activation(self.conv_in2(x))
for layer in self.layers:
x = layer(x, cond)
x = self.activation(self.conv_out1(x))
x = self.conv_out2(x)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/pdearena/__init__.py | src/modules/pdearena/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/graph/gnn_layer.py | src/modules/graph/gnn_layer.py | import torch
from torch import nn
from torch_geometric.nn.conv import MessagePassing
class GNNLayer(MessagePassing):
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.message_net = nn.Sequential(
nn.Linear(2 * input_dim + 1, hidden_dim),
nn.SiLU(),
)
self.update_net = nn.Sequential(
nn.Linear(input_dim + hidden_dim, hidden_dim),
nn.SiLU(),
)
def forward(self, x, pos, edge_index):
""" Propagate messages along edges """
x = self.propagate(edge_index, x=x, pos=pos)
return x
# noinspection PyMethodOverriding
def message(self, x_i, x_j, pos_i, pos_j):
""" Message update """
msg_input = torch.cat((x_i, x_j, torch.sqrt(torch.sum((pos_i - pos_j) ** 2, dim=1)).unsqueeze(dim=1)), dim=-1)
message = self.message_net(msg_input)
return message
# noinspection PyMethodOverriding
def update(self, message, x, pos):
""" Node update """
x = x + self.update_net(torch.cat((x, message), dim=-1))
return x
def message_and_aggregate(self, adj_t):
raise NotImplementedError
def edge_update(self):
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/graph/__init__.py | src/modules/graph/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/graph/topk.py | src/modules/graph/topk.py | from typing import Callable, Optional, Union
import torch
from torch import Tensor
from torch_geometric.nn.inits import uniform
from torch_geometric.nn.pool.select import Select, SelectOutput
from torch_geometric.nn.resolver import activation_resolver
from torch_geometric.utils import cumsum, scatter
def topk(
x: Tensor,
num_output_nodes: int,
batch: Tensor,
) -> Tensor:
num_nodes = scatter(batch.new_ones(x.size(0)), batch, reduce='sum')
k = torch.full(size=(len(num_nodes),), fill_value=num_output_nodes, device=x.device, dtype=torch.long)
assert torch.all(num_nodes >= k), "num_nodes has to be at >= num_outputs_nodes"
x, x_perm = torch.sort(x.view(-1), descending=True)
batch = batch[x_perm]
batch, batch_perm = torch.sort(batch, descending=False, stable=True)
arange = torch.arange(x.size(0), dtype=torch.long, device=x.device)
ptr = cumsum(num_nodes)
batched_arange = arange - ptr[batch]
mask = batched_arange < k[batch]
return x_perm[batch_perm[mask]]
class SelectTopK(Select):
"""
torch_geometrics.nn.pool.select.topk with a dynamic ratio such that the number of output nodes is constant
also removed the parameter "weight" that allowed to learn a weighted sum
"""
def __init__(
self,
in_channels: int,
num_output_nodes: int = 5,
act: Union[str, Callable] = "tanh",
):
super().__init__()
self.in_channels = in_channels
self.num_output_nodes = num_output_nodes
self.act = activation_resolver(act)
def forward(
self,
x: Tensor,
batch: Optional[Tensor] = None,
) -> SelectOutput:
if batch is None:
batch = x.new_zeros(x.size(0), dtype=torch.long)
x = x.view(-1, 1) if x.dim() == 1 else x
score = self.act(x.sum(dim=-1))
node_index = topk(score, self.num_output_nodes, batch)
return SelectOutput(
node_index=node_index,
num_nodes=x.size(0),
cluster_index=torch.arange(node_index.size(0), device=x.device),
num_clusters=node_index.size(0),
weight=score[node_index],
)
def __repr__(self) -> str:
arg = f"num_output_nodes={self.num_output_nodes}"
return f"{self.__class__.__name__}({self.in_channels}, {arg})"
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/graph/sag_pool.py | src/modules/graph/sag_pool.py | from typing import Callable, Optional, Tuple, Union
import torch
from torch import Tensor
from torch_geometric.nn import GraphConv
from torch_geometric.nn.pool.connect import FilterEdges
from .topk import SelectTopK
from torch_geometric.typing import OptTensor
class SAGPoolingFixedNumNodes(torch.nn.Module):
""" torch_geometric.nn.pool.SAGPooling with a dynamic ratio such that the number of output nodes is constant """
def __init__(
self,
in_channels: int,
num_output_nodes: int = 5,
GNN: torch.nn.Module = GraphConv,
multiplier: float = 1.0,
nonlinearity: Union[str, Callable] = 'tanh',
aggr: str = "sum",
):
super().__init__()
self.in_channels = in_channels
self.num_output_nodes = num_output_nodes
self.multiplier = multiplier
self.aggr = aggr
self.gnn = GNN(in_channels, 1, aggr=aggr)
self.select = SelectTopK(1, num_output_nodes, nonlinearity)
self.connect = FilterEdges()
self.reset_parameters()
def reset_parameters(self):
r"""Resets all learnable parameters of the module."""
self.gnn.reset_parameters()
self.select.reset_parameters()
def forward(
self,
x: Tensor,
edge_index: Tensor,
edge_attr: OptTensor = None,
batch: OptTensor = None,
attn: OptTensor = None,
) -> Tuple[Tensor, Tensor, OptTensor, OptTensor, Tensor, Tensor]:
r"""
Args:
x (torch.Tensor): The node feature matrix.
edge_index (torch.Tensor): The edge indices.
edge_attr (torch.Tensor, optional): The edge features.
(default: :obj:`None`)
batch (torch.Tensor, optional): The batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns
each node to a specific example. (default: :obj:`None`)
attn (torch.Tensor, optional): Optional node-level matrix to use
for computing attention scores instead of using the node
feature matrix :obj:`x`. (default: :obj:`None`)
"""
if batch is None:
batch = edge_index.new_zeros(x.size(0))
attn = x if attn is None else attn
attn = attn.view(-1, 1) if attn.dim() == 1 else attn
attn = self.gnn(attn, edge_index)
select_out = self.select(attn, batch)
perm = select_out.node_index
score = select_out.weight
assert score is not None
x = x[perm] * score.view(-1, 1)
x = self.multiplier * x if self.multiplier != 1 else x
connect_out = self.connect(select_out, edge_index, edge_attr, batch)
return (x, connect_out.edge_index, connect_out.edge_attr,
connect_out.batch, perm, score)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"gnn={self.gnn.__class__.__name__}, "
f"in_channels={self.in_channels}, "
f"num_output_nodes={self.num_output_nodes}, "
f"multiplier={self.multiplier}"
f")"
)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/cfd_gnn_pool.py | src/modules/gno/cfd_gnn_pool.py | from functools import partial
import torch
from kappamodules.init.functional import init_xavier_uniform_zero_bias, init_truncnormal_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_geometric.nn.conv import MessagePassing
from modules.graph.sag_pool import SAGPoolingFixedNumNodes
class CfdGnnPool(nn.Module):
def __init__(
self,
input_dim,
hidden_dim,
num_output_nodes,
depth=1,
ndim=2,
norm="none",
init_weights="xavier_uniform",
):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_output_nodes = num_output_nodes
self.depth = depth
self.norm = norm
self.ndim = ndim
self.init_weights = init_weights
self.proj = nn.Linear(input_dim, hidden_dim)
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim, ndim=ndim)
if norm == "none":
norm_ctor = nn.Identity
else:
raise NotImplementedError
self.gnn_layers = nn.ModuleList([
self.CfdGnnPoolMessagePassing(
message_net=nn.Sequential(
nn.Linear(2 * hidden_dim, hidden_dim),
norm_ctor(hidden_dim),
nn.SiLU(),
nn.Linear(hidden_dim, hidden_dim),
),
update_net=nn.Sequential(
nn.Linear(2 * hidden_dim, hidden_dim),
norm_ctor(hidden_dim),
nn.SiLU(),
nn.Linear(hidden_dim, hidden_dim),
),
)
for _ in range(depth)
])
self.pool = SAGPoolingFixedNumNodes(hidden_dim, num_output_nodes=num_output_nodes, aggr="mean")
self.reset_parameters()
def reset_parameters(self):
if self.init_weights == "xavier_uniform":
self.apply(init_xavier_uniform_zero_bias)
elif self.init_weights == "truncnormal":
self.apply(init_truncnormal_zero_bias)
else:
raise NotImplementedError
def forward(self, x, mesh_pos, mesh_edges, batch_idx):
# embed + GNN
x = self.proj(x)
x = x + self.pos_embed(mesh_pos)
for gnn_layer in self.gnn_layers:
x = gnn_layer(mesh_edges=mesh_edges.T, x=x, pos=mesh_pos)
# pool
pool_result = self.pool(x, mesh_edges.T, batch=batch_idx)
# x_pool, edge_index_pool, edge_attr_pool, batch_pool, perm, score = pool_result
x_pool, _, _, batch_pool, _, _ = pool_result
return x_pool, batch_pool
class CfdGnnPoolMessagePassing(MessagePassing):
def __init__(self, message_net, update_net):
super().__init__(aggr="mean")
self.message_net = message_net
self.update_net = update_net
def forward(self, mesh_edges, x, pos):
return self.propagate(edge_index=mesh_edges, x=x, pos=pos)
# noinspection PyMethodOverriding
def message(self, x_i, x_j, pos_i, pos_j):
msg_input = torch.cat([x_i, x_j], dim=1)
message = self.message_net(msg_input)
return message
# noinspection PyMethodOverriding
def update(self, message, x, pos):
x = x + self.update_net(torch.cat([x, message], dim=1))
return x
def message_and_aggregate(self, adj_t):
raise NotImplementedError
def edge_update(self):
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/cfd_gino_mesh_to_grid.py | src/modules/gno/cfd_gino_mesh_to_grid.py | import einops
import numpy as np
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection, Residual
from torch import nn
from torch_scatter import segment_csr
from kappamodules.init import init_xavier_uniform_zero_bias, init_truncnormal_zero_bias
class CfdGinoMeshToGrid(nn.Module):
def __init__(self, input_dim, hidden_dim, resolution, init_weights="xavier_uniform"):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.resolution = resolution
self.init_weights = init_weights
self.num_grid_points = int(np.prod(resolution))
self.input_proj = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim, ndim=len(resolution))
self.message = nn.Sequential(
nn.Linear(hidden_dim * 2, hidden_dim * 2),
nn.GELU(),
nn.Linear(hidden_dim * 2, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
self.output_dim = hidden_dim
self.reset_parameters()
def reset_parameters(self):
if self.init_weights == "xavier_uniform":
self.apply(init_xavier_uniform_zero_bias)
elif self.init_weights == "truncnormal":
self.apply(init_truncnormal_zero_bias)
else:
raise NotImplementedError
def forward(self, x, mesh_pos, grid_pos, mesh_to_grid_edges):
assert x.ndim == 2
assert mesh_pos.ndim == 2
assert grid_pos.ndim == 2
assert mesh_to_grid_edges.ndim == 2
assert len(grid_pos) % self.num_grid_points == 0
# embed mesh
x = self.input_proj(x) + self.pos_embed(mesh_pos)
# embed grid
grid_pos = self.pos_embed(grid_pos)
# create message input
grid_idx, mesh_idx = mesh_to_grid_edges.unbind(1)
x = torch.concat([x[mesh_idx], grid_pos[grid_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = grid_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(grid_pos) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
# convert to dense tensor (dim last)
x = x.reshape(-1, *self.resolution, self.output_dim)
x = einops.rearrange(x, "batch_size ... dim -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/gino_grid_to_mesh.py | src/modules/gno/gino_grid_to_mesh.py | import einops
import torch
from kappamodules.init.functional import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_scatter import segment_csr
class GinoGridToMesh(nn.Module):
def __init__(
self,
input_dim,
hidden_dim,
output_dim,
ndim,
bottleneck_dim=None,
embed_dim=None,
pred_hidden_dim=None,
):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.ndim = ndim
if isinstance(hidden_dim, int):
# rectangular architecture
# prep shapes
self.bottleneck_dim = bottleneck_dim or hidden_dim
self.embed_dim = embed_dim or hidden_dim
self.pred_hidden_dim = pred_hidden_dim or hidden_dim
# create layers
self.proj = nn.Linear(input_dim, self.embed_dim)
self.pos_embed = ContinuousSincosEmbed(dim=self.embed_dim, ndim=ndim)
self.message = nn.Sequential(
nn.Linear(2 * self.embed_dim, 2 * hidden_dim),
nn.GELU(),
nn.Linear(2 * hidden_dim, 2 * hidden_dim),
nn.GELU(),
nn.Linear(2 * hidden_dim, self.bottleneck_dim),
)
else:
# custom shape (original is 448 -> 512 -> 256 -> 86)
# prep shapes
assert bottleneck_dim is None
assert embed_dim is None
self.bottleneck_dim = hidden_dim[-1]
self.embed_dim = hidden_dim[0]
assert self.embed_dim % 2 == 0
self.pred_hidden_dim = pred_hidden_dim or hidden_dim[-1]
# create layers
self.proj = nn.Linear(input_dim, hidden_dim[0] // 2)
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim[0] // 2, ndim=ndim)
layers = []
for i in range(len(hidden_dim) - 1):
layers.append(nn.Linear(hidden_dim[i], hidden_dim[i + 1]))
if i < len(hidden_dim) - 2:
layers.append(nn.GELU())
self.message = nn.Sequential(*layers)
self.pred = nn.Sequential(
nn.Linear(self.bottleneck_dim, self.pred_hidden_dim),
nn.GELU(),
nn.Linear(self.pred_hidden_dim, output_dim),
)
def forward(self, x, query_pos, grid_to_query_edges):
assert query_pos.ndim == 2
assert grid_to_query_edges.ndim == 2
# convert to sparse tensor
x = einops.rearrange(x, "batch_size seqlen dim -> (batch_size seqlen) dim")
x = self.proj(x)
# embed mesh
query_pos = self.pos_embed(query_pos)
# create message input
query_idx, grid_idx = grid_to_query_edges.unbind(1)
x = torch.concat([x[grid_idx], query_pos[query_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = query_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(query_pos) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
#
x = self.pred(x)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/rans_gino_mesh_to_grid_sdf_og.py | src/modules/gno/rans_gino_mesh_to_grid_sdf_og.py | import einops
import numpy as np
import torch
from kappamodules.init.functional import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_scatter import segment_csr
class RansGinoMeshToGridSdfOg(nn.Module):
def __init__(self, hidden_dim, output_dim, resolution):
super().__init__()
# original parameters:
# dim=64
# resolution=64
# output_dim=86
self.hidden_dim = hidden_dim
# GINO concats the raw SDF and the raw grid position before the FNO
self.output_dim = output_dim + 4
self.resolution = resolution
self.num_grid_points = int(np.prod(resolution))
# "df_embed"
self.sdf_embed = nn.Sequential(
nn.Linear(1, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim * 3),
)
# GINO concats a constant 1 as 4th dimension for some reason
self.mesh_pos_embed = ContinuousSincosEmbed(dim=hidden_dim * 4, ndim=len(resolution) + 1)
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim * 3, ndim=len(resolution))
# "gno1"
self.message = nn.Sequential(
nn.Linear(hidden_dim * 10, 512),
nn.GELU(),
nn.Linear(512, 256),
nn.GELU(),
nn.Linear(256, output_dim),
)
def forward(self, mesh_pos, sdf, grid_pos, mesh_to_grid_edges):
assert mesh_pos.ndim == 2
assert grid_pos.ndim == 2
assert mesh_to_grid_edges.ndim == 2
assert len(grid_pos) % self.num_grid_points == 0
# NOTE: we rescale all positions to [0, 200] instead of [-1, 1] -> revert
mesh_pos = mesh_pos / 100 - 1
grid_pos = grid_pos / 100 - 1
# embed mesh
# original implementation adds a 4th dimension with constant 1 during training
ones = torch.ones(size=(len(mesh_pos),), dtype=mesh_pos.dtype, device=mesh_pos.device).unsqueeze(1)
mesh_pos = torch.concat([mesh_pos, ones], dim=1)
mesh_pos = self.mesh_pos_embed(mesh_pos)
# embed grid
grid_pos_embed = self.pos_embed(grid_pos)
# flatten sdf -> embed SDF
sdf = sdf.view(-1, 1)
sdf_embed = self.sdf_embed(sdf)
# create grid embedding (positional embedding of grid posisionts + SDF embedding)
grid_embed = torch.concat([grid_pos_embed, sdf_embed], dim=1)
# create message input
grid_idx, mesh_idx = mesh_to_grid_edges.unbind(1)
x = torch.concat([mesh_pos[mesh_idx], grid_embed[grid_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = grid_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(grid_embed) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
# GINO concats grid positions (without sincos embedding) and the raw sdf (i.e. without sdf_embed) before FNO
x = torch.concat([grid_pos, sdf, x], dim=1)
# convert to dense tensor (dim last)
x = x.reshape(-1, *self.resolution, self.output_dim)
x = einops.rearrange(x, "batch_size ... dim -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/rans_posembed_message.py | src/modules/gno/rans_posembed_message.py | import torch
from kappamodules.init.functional import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_geometric.nn.conv import MessagePassing
class RansPosembedMessage(MessagePassing):
def __init__(self, dim, ndim):
super().__init__(aggr="mean")
self.dim = dim
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=ndim)
self.message_net = nn.Sequential(
nn.Linear(2 * dim, dim),
nn.SiLU(),
nn.Linear(dim, dim),
)
self.update_net = nn.Sequential(
nn.Linear(2 * dim, dim),
nn.SiLU(),
nn.Linear(dim, dim),
)
def forward(self, mesh_pos, mesh_edges):
x = self.pos_embed(mesh_pos)
x = self.propagate(x=x, pos=mesh_pos, edge_index=mesh_edges.T)
return x
# noinspection PyMethodOverriding
def message(self, x_i, x_j, pos_i, pos_j):
return self.message_net(torch.cat([x_i, x_j], dim=-1))
# noinspection PyMethodOverriding
def update(self, message, x):
return x + self.update_net(torch.cat([x, message], dim=-1))
def message_and_aggregate(self, adj_t):
raise NotImplementedError
def edge_update(self):
raise NotImplementedError
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/cfd_gino_grid_to_mesh.py | src/modules/gno/cfd_gino_grid_to_mesh.py | import einops
import torch
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_scatter import segment_csr
class CfdGinoGridToMesh(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, ndim):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.ndim = ndim
# create layers
self.proj = nn.Linear(input_dim, hidden_dim)
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim, ndim=ndim)
self.message = nn.Sequential(
nn.Linear(2 * hidden_dim, 2 * hidden_dim),
nn.GELU(),
nn.Linear(2 * hidden_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
self.pred = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, output_dim),
)
def forward(self, x, query_pos, grid_to_query_edges):
assert query_pos.ndim == 2
assert grid_to_query_edges.ndim == 2
# convert to sparse tensor
x = einops.rearrange(x, "batch_size seqlen dim -> (batch_size seqlen) dim")
x = self.proj(x)
# embed mesh
query_pos = self.pos_embed(query_pos)
# create message input
query_idx, grid_idx = grid_to_query_edges.unbind(1)
x = torch.concat([x[grid_idx], query_pos[query_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = query_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(query_pos) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
#
x = self.pred(x)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/rans_gino_grid_to_mesh.py | src/modules/gno/rans_gino_grid_to_mesh.py | import einops
import torch
from kappamodules.init.functional import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_scatter import segment_csr
class RansGinoGridToMesh(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, ndim):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.ndim = ndim
self.proj = nn.Linear(input_dim, hidden_dim)
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim, ndim=ndim)
self.message = nn.Sequential(
nn.Linear(2 * hidden_dim, 2 * hidden_dim),
nn.GELU(),
nn.Linear(2 * hidden_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
self.pred = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, output_dim),
)
def forward(self, x, query_pos, grid_to_query_edges):
assert query_pos.ndim == 2
assert grid_to_query_edges.ndim == 2
# convert to sparse tensor
x = einops.rearrange(x, "batch_size seqlen dim -> (batch_size seqlen) dim")
x = self.proj(x)
# embed mesh
query_pos = self.pos_embed(query_pos)
# create message input
query_idx, grid_idx = grid_to_query_edges.unbind(1)
x = torch.concat([x[grid_idx], query_pos[query_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = query_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(query_pos) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
#
x = self.pred(x)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/rans_gino_grid_to_mesh_og.py | src/modules/gno/rans_gino_grid_to_mesh_og.py | import einops
import torch
from kappamodules.init.functional import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_scatter import segment_csr
class RansGinoGridToMeshOg(nn.Module):
def __init__(
self,
input_dim,
hidden_dim,
bottleneck_dim,
output_dim,
):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.bottleneck_dim = bottleneck_dim
self.output_dim = output_dim
# GINO concats a constant 1 as 4th dimension for some reason
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim * 4, ndim=4)
# "gno1"
self.message = nn.Sequential(
nn.Linear(input_dim + 4 * hidden_dim, 512),
nn.GELU(),
nn.Linear(512, 256),
nn.GELU(),
nn.Linear(256, bottleneck_dim),
)
self.pred = nn.Sequential(
nn.Linear(bottleneck_dim, 256),
nn.GELU(),
nn.Linear(256, output_dim),
)
def forward(self, x, query_pos, grid_to_query_edges):
assert query_pos.ndim == 2
assert grid_to_query_edges.ndim == 2
# NOTE: we rescale all positions to [0, 200] instead of [-1, 1] -> revert
query_pos = query_pos / 100 - 1
# convert to sparse tensor
x = einops.rearrange(x, "batch_size seqlen dim -> (batch_size seqlen) dim")
# embed mesh
# original implementation adds a 4th dimension with constant 1 during training
ones = torch.ones(size=(len(query_pos),), dtype=query_pos.dtype, device=query_pos.device).unsqueeze(1)
query_pos = torch.concat([query_pos, ones], dim=1)
query_pos = self.pos_embed(query_pos)
# create message input
query_idx, grid_idx = grid_to_query_edges.unbind(1)
x = torch.concat([x[grid_idx], query_pos[query_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = query_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(query_pos) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
#
x = self.pred(x)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/rans_gino_latent_to_mesh.py | src/modules/gno/rans_gino_latent_to_mesh.py | import einops
import torch
from torch_scatter import segment_csr
from .gino_grid_to_mesh import GinoGridToMesh
class RansGinoLatentToMesh(GinoGridToMesh):
# noinspection PyMethodOverriding
def forward(self, x, query_pos):
assert query_pos.ndim == 3
# convert to sparse tensor
_, seqlen, _ = x.shape
x = einops.rearrange(x, "batch_size seqlen dim -> (batch_size seqlen) dim")
x = self.proj(x)
# convert to sparse tensor
query_pos = einops.rearrange(
query_pos,
"batch_size num_query_points ndim -> (batch_size num_query_points) ndim",
)
query_pos = self.pos_embed(query_pos)
# create message input
query_idx = torch.arange(len(query_pos), device=x.device).repeat_interleave(seqlen)
latent_idx = torch.arange(seqlen, device=x.device).repeat(len(query_pos))
x = torch.concat([x[latent_idx], query_pos[query_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = query_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(query_pos) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
#
x = self.pred(x)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/rans_gino_mesh_to_grid_og.py | src/modules/gno/rans_gino_mesh_to_grid_og.py | import einops
import numpy as np
import torch
from kappamodules.init.functional import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_scatter import segment_csr
class RansGinoMeshToGridOg(nn.Module):
def __init__(self, dim, resolution):
super().__init__()
self.dim = dim
self.resolution = resolution
self.num_grid_points = int(np.prod(resolution))
if isinstance(dim, int):
# rectangular shape
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=len(resolution))
self.message = nn.Sequential(
nn.Linear(dim * 2, dim * 2),
nn.GELU(),
nn.Linear(dim * 2, dim * 2),
nn.GELU(),
nn.Linear(dim * 2, dim),
)
self.output_dim = dim
else:
# custom shape (original is 640 -> 512 -> 256 -> 86)
assert dim[0] % 2 == 0
self.pos_embed = ContinuousSincosEmbed(dim=dim[0] // 2, ndim=len(resolution))
layers = []
for i in range(len(dim) - 1):
layers.append(nn.Linear(dim[i], dim[i + 1]))
if i < len(dim) - 2:
layers.append(nn.GELU())
self.message = nn.Sequential(*layers)
self.output_dim = dim[-1]
def forward(self, mesh_pos, grid_pos, mesh_to_grid_edges):
assert mesh_pos.ndim == 2
assert grid_pos.ndim == 2
assert mesh_to_grid_edges.ndim == 2
assert len(grid_pos) % self.num_grid_points == 0
# embed mesh
mesh_pos = self.pos_embed(mesh_pos)
# embed grid
grid_pos = self.pos_embed(grid_pos)
# create message input
grid_idx, mesh_idx = mesh_to_grid_edges.unbind(1)
x = torch.concat([mesh_pos[mesh_idx], grid_pos[grid_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = grid_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(grid_pos) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
# convert to dense tensor (dim last)
x = x.reshape(-1, *self.resolution, self.output_dim)
x = einops.rearrange(x, "batch_size ... dim -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/cfd_pool_gaussian_sincos_pos.py | src/modules/gno/cfd_pool_gaussian_sincos_pos.py | import einops
import numpy as np
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection, Residual
from kappamodules.init import init_xavier_uniform_zero_bias, init_truncnormal_zero_bias
from torch import nn
from torch_scatter import segment_csr
class CfdPoolGaussianSincosPos(nn.Module):
def __init__(self, input_dim, hidden_dim, positional_std, ndim=2, init_weights="xavier_uniform"):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.ndim = ndim
self.init_weights = init_weights
self.input_proj = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
# Try with other method from https://arxiv.org/pdf/2006.10739.pdf
generator = torch.Generator().manual_seed(42)
self.register_buffer(
"b",
torch.normal(mean=torch.zeros(hidden_dim // 2, ndim), std=positional_std, generator=generator)
)
self.message = nn.Sequential(
nn.Linear(hidden_dim * 2, hidden_dim * 2),
nn.GELU(),
nn.Linear(hidden_dim * 2, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
self.output_dim = hidden_dim
self.reset_parameters()
def reset_parameters(self):
if self.init_weights == "xavier_uniform":
self.apply(init_xavier_uniform_zero_bias)
elif self.init_weights == "truncnormal":
self.apply(init_truncnormal_zero_bias)
else:
raise NotImplementedError
def forward(self, x, mesh_pos, mesh_edges, batch_idx):
assert x.ndim == 2
assert mesh_pos.ndim == 2
assert mesh_edges.ndim == 2
# embed mesh
x = self.input_proj(x) + self.pos_embed(mesh_pos)
# create message input
dst_idx, src_idx = mesh_edges.unbind(1)
x = torch.concat([x[src_idx], x[dst_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = dst_idx.unique(return_counts=True)
# first index has to be 0
# NOTE: padding for target indices that dont occour is not needed as self-loop is always present
padded_counts = torch.zeros(len(counts) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[1:] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
# sanity check: dst_indices has len of batch_size * num_supernodes and has to be divisible by batch_size
# if num_supernodes is not set in dataset this assertion fails
batch_size = batch_idx.max() + 1
assert dst_indices.numel() % batch_size == 0
# convert to dense tensor (dim last)
x = einops.rearrange(
x,
"(batch_size num_supernodes) dim -> batch_size num_supernodes dim",
batch_size=batch_size,
)
return x
def pos_embed(self, pos):
return torch.concat([torch.cos(2.0 * torch.pi * pos @ self.b.T),
torch.sin(2.0 * torch.pi * pos @ self.b.T)], dim=-1)
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/rans_gino_mesh_to_grid.py | src/modules/gno/rans_gino_mesh_to_grid.py | import einops
import numpy as np
import torch
from kappamodules.init.functional import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_scatter import segment_csr
class RansGinoMeshToGrid(nn.Module):
def __init__(self, dim, resolution):
super().__init__()
self.dim = dim
self.resolution = resolution
self.num_grid_points = int(np.prod(resolution))
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=len(resolution))
self.message = nn.Sequential(
nn.Linear(dim * 2, dim * 2),
nn.GELU(),
nn.Linear(dim * 2, dim),
nn.GELU(),
nn.Linear(dim, dim),
)
self.output_dim = dim
def forward(self, mesh_pos, grid_pos, mesh_to_grid_edges):
assert mesh_pos.ndim == 2
assert grid_pos.ndim == 2
assert mesh_to_grid_edges.ndim == 2
assert len(grid_pos) % self.num_grid_points == 0
# embed mesh
mesh_pos = self.pos_embed(mesh_pos)
# embed grid
grid_pos = self.pos_embed(grid_pos)
# create message input
grid_idx, mesh_idx = mesh_to_grid_edges.unbind(1)
x = torch.concat([mesh_pos[mesh_idx], grid_pos[grid_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = grid_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(grid_pos) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
# convert to dense tensor (dim last)
x = x.reshape(-1, *self.resolution, self.output_dim)
x = einops.rearrange(x, "batch_size ... dim -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/cfd_gino_mesh_to_grid_old.py | src/modules/gno/cfd_gino_mesh_to_grid_old.py | import einops
import numpy as np
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection, Residual
from torch import nn
from torch_scatter import segment_csr
class CfdGinoMeshToGridOld(nn.Module):
def __init__(self, input_dim, hidden_dim, resolution):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.resolution = resolution
self.num_grid_points = int(np.prod(resolution))
if isinstance(hidden_dim, int):
# rectangular shape
self.input_proj = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim // 2, ndim=len(resolution))
self.message = nn.Sequential(
nn.Linear(hidden_dim * 2, hidden_dim * 2),
nn.GELU(),
nn.Linear(hidden_dim * 2, hidden_dim * 2),
nn.GELU(),
nn.Linear(hidden_dim * 2, hidden_dim),
)
self.output_dim = hidden_dim
else:
# custom shape (original is 640 -> 512 -> 256 -> 86)
assert hidden_dim[0] % 4 == 0
self.input_proj = nn.Sequential(
nn.Linear(input_dim, hidden_dim[0]),
nn.GELU(),
nn.Linear(hidden_dim[0], hidden_dim[0]),
nn.GELU(),
nn.Linear(hidden_dim[0], hidden_dim[0] // 2),
)
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim[0] // 4, ndim=len(resolution))
layers = []
for i in range(len(hidden_dim) - 1):
layers.append(nn.Linear(hidden_dim[i], hidden_dim[i + 1]))
if i < len(hidden_dim) - 2:
layers.append(nn.GELU())
self.message = nn.Sequential(*layers)
self.output_dim = hidden_dim[-1]
def forward(self, x, mesh_pos, grid_pos, mesh_to_grid_edges):
assert x.ndim == 2
assert mesh_pos.ndim == 2
assert grid_pos.ndim == 2
assert mesh_to_grid_edges.ndim == 2
assert len(grid_pos) % self.num_grid_points == 0
# embed mesh
x = torch.concat([self.input_proj(x), self.pos_embed(mesh_pos)], dim=1)
# embed grid
grid_pos = self.pos_embed(grid_pos)
# create message input
grid_idx, mesh_idx = mesh_to_grid_edges.unbind(1)
x = torch.concat([x[mesh_idx], grid_pos[grid_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = grid_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(grid_pos) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
# convert to dense tensor (dim last)
x = x.reshape(-1, *self.resolution, self.output_dim)
x = einops.rearrange(x, "batch_size ... dim -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/rans_gino_mesh_to_grid_sdf.py | src/modules/gno/rans_gino_mesh_to_grid_sdf.py | import einops
import numpy as np
import torch
from kappamodules.init.functional import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_scatter import segment_csr
class RansGinoMeshToGridSdf(nn.Module):
def __init__(self, dim, resolution):
super().__init__()
self.dim = dim
self.resolution = resolution
self.num_grid_points = int(np.prod(resolution))
self.sdf_embed = nn.Sequential(
nn.Linear(1, dim),
nn.GELU(),
nn.Linear(dim, dim),
)
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=len(resolution))
self.message = nn.Sequential(
nn.Linear(2 * dim, 2 * dim),
nn.GELU(),
nn.Linear(2 * dim, dim),
nn.GELU(),
nn.Linear(dim, dim),
)
def forward(self, mesh_pos, sdf, grid_pos, mesh_to_grid_edges):
assert mesh_pos.ndim == 2
assert grid_pos.ndim == 2
assert mesh_to_grid_edges.ndim == 2
assert len(grid_pos) % self.num_grid_points == 0
# embed mesh
mesh_pos = self.pos_embed(mesh_pos)
# embed grid
grid_pos_embed = self.pos_embed(grid_pos)
# flatten sdf -> embed SDF
sdf = sdf.view(-1, 1)
sdf_embed = self.sdf_embed(sdf)
# create grid embedding (positional embedding of grid posisionts + SDF embedding)
grid_embed = grid_pos_embed + sdf_embed
# create message input
grid_idx, mesh_idx = mesh_to_grid_edges.unbind(1)
x = torch.concat([mesh_pos[mesh_idx], grid_embed[grid_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = grid_idx.unique(return_counts=True)
# first index has to be 0 + add padding for target indices that dont occour
padded_counts = torch.zeros(len(grid_embed) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[dst_indices + 1] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
# convert to dense tensor (dim last)
x = x.reshape(-1, *self.resolution, self.dim)
x = einops.rearrange(x, "batch_size ... dim -> batch_size (...) dim")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/cfd_pool.py | src/modules/gno/cfd_pool.py | import einops
import numpy as np
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection, Residual
from kappamodules.init import init_xavier_uniform_zero_bias, init_truncnormal_zero_bias
from torch import nn
from torch_scatter import segment_csr
class CfdPool(nn.Module):
def __init__(self, input_dim, hidden_dim, ndim=2, init_weights="xavier_uniform"):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.ndim = ndim
self.init_weights = init_weights
self.input_proj = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim, ndim=ndim)
self.message = nn.Sequential(
nn.Linear(hidden_dim * 2, hidden_dim * 2),
nn.GELU(),
nn.Linear(hidden_dim * 2, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
self.output_dim = hidden_dim
self.reset_parameters()
def reset_parameters(self):
if self.init_weights == "xavier_uniform":
self.apply(init_xavier_uniform_zero_bias)
elif self.init_weights == "truncnormal":
self.apply(init_truncnormal_zero_bias)
else:
raise NotImplementedError
def forward(self, x, mesh_pos, mesh_edges, batch_idx):
assert x.ndim == 2
assert mesh_pos.ndim == 2
assert mesh_edges.ndim == 2
# embed mesh
x = self.input_proj(x) + self.pos_embed(mesh_pos)
# create message input
dst_idx, src_idx = mesh_edges.unbind(1)
x = torch.concat([x[src_idx], x[dst_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = dst_idx.unique(return_counts=True)
# first index has to be 0
# NOTE: padding for target indices that dont occour is not needed as self-loop is always present
padded_counts = torch.zeros(len(counts) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[1:] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
# sanity check: dst_indices has len of batch_size * num_supernodes and has to be divisible by batch_size
# if num_supernodes is not set in dataset this assertion fails
batch_size = batch_idx.max() + 1
assert dst_indices.numel() % batch_size == 0
# convert to dense tensor (dim last)
x = einops.rearrange(
x,
"(batch_size num_supernodes) dim -> batch_size num_supernodes dim",
batch_size=batch_size,
)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/__init__.py | src/modules/gno/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/cfd_interpolate_grid_to_mesh.py | src/modules/gno/cfd_interpolate_grid_to_mesh.py | import einops
import torch
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_scatter import segment_csr
import torch.nn.functional as F
class CfdInterpolateGridToMesh(nn.Module):
@staticmethod
def forward(x, query_pos):
assert torch.all(query_pos.abs() <= 1)
if query_pos.ndim == 3:
# query_pos.shape: (batch_size, num_query_pos, ndim)
# grid_sample requires 4d dense tensor
query_pos = einops.rearrange(
query_pos,
"batch_size num_query_pos ndim -> batch_size num_query_pos 1 ndim",
)
else:
raise NotImplementedError
# interpolate to mesh
# x.shape: (batch_size, dim, height, width, depth)
query_pos = torch.stack(list(reversed(query_pos.unbind(-1))), dim=-1)
x = F.grid_sample(input=x, grid=query_pos, align_corners=False)
# to sparse tensor
x = einops.rearrange(x, "batch_size dim num_query_pos 1 -> (batch_size num_query_pos) dim ")
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/rans_pool.py | src/modules/gno/rans_pool.py | import einops
import numpy as np
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection, Residual
from kappamodules.init import init_xavier_uniform_zero_bias, init_truncnormal_zero_bias
from torch import nn
from torch_scatter import segment_csr
class RansPool(nn.Module):
def __init__(self, hidden_dim, ndim, init_weights="xavier_uniform"):
super().__init__()
self.hidden_dim = hidden_dim
self.ndim = ndim
self.init_weights = init_weights
self.pos_embed = ContinuousSincosEmbed(dim=hidden_dim, ndim=ndim)
self.message = nn.Sequential(
nn.Linear(hidden_dim * 2, hidden_dim * 2),
nn.GELU(),
nn.Linear(hidden_dim * 2, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, hidden_dim),
)
self.output_dim = hidden_dim
self.reset_parameters()
def reset_parameters(self):
if self.init_weights == "xavier_uniform":
self.apply(init_xavier_uniform_zero_bias)
elif self.init_weights == "truncnormal":
self.apply(init_truncnormal_zero_bias)
else:
raise NotImplementedError
def forward(self, mesh_pos, mesh_edges, batch_idx):
assert mesh_pos.ndim == 2
assert mesh_edges.ndim == 2
# embed mesh
x = self.pos_embed(mesh_pos)
# create message input
dst_idx, src_idx = mesh_edges.unbind(1)
x = torch.concat([x[src_idx], x[dst_idx]], dim=1)
x = self.message(x)
# accumulate messages
# indptr is a tensor of indices betweeen which to aggregate
# i.e. a tensor of [0, 2, 5] would result in [src[0] + src[1], src[2] + src[3] + src[4]]
dst_indices, counts = dst_idx.unique(return_counts=True)
# first index has to be 0
# NOTE: padding for target indices that dont occour is not needed as self-loop is always present
padded_counts = torch.zeros(len(counts) + 1, device=counts.device, dtype=counts.dtype)
padded_counts[1:] = counts
indptr = padded_counts.cumsum(dim=0)
x = segment_csr(src=x, indptr=indptr, reduce="mean")
# sanity check: dst_indices has len of batch_size * num_supernodes and has to be divisible by batch_size
# if num_supernodes is not set in dataset this assertion fails
batch_size = batch_idx.max() + 1
assert dst_indices.numel() % batch_size == 0
# convert to dense tensor (dim last)
x = einops.rearrange(
x,
"(batch_size num_supernodes) dim -> batch_size num_supernodes dim",
batch_size=batch_size,
)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/gno/cfd_interpolate_mesh_to_grid.py | src/modules/gno/cfd_interpolate_mesh_to_grid.py | import einops
import numpy as np
import torch
from kappamodules.layers import ContinuousSincosEmbed, LinearProjection, Residual
from torch import nn
from torch_scatter import segment_csr
from kappamodules.init import init_xavier_uniform_zero_bias, init_truncnormal_zero_bias
from torch_geometric.nn.unpool.knn_interpolate import knn_interpolate
class CfdInterpolateMeshToGrid(nn.Module):
@staticmethod
def forward(x, mesh_pos, grid_pos, batch_idx):
assert x.ndim == 2
assert mesh_pos.ndim == 2
assert grid_pos.ndim == 2
batch_size = batch_idx.max() + 1
assert len(grid_pos) % batch_size == 0
num_grid_points = len(grid_pos) // batch_size
batch_y = torch.arange(batch_size, device=x.device).repeat_interleave(num_grid_points)
x = knn_interpolate(
x=x,
pos_x=mesh_pos,
pos_y=grid_pos,
batch_x=batch_idx,
batch_y=batch_y,
)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/mesh_embed/baseline_mesh_embed.py | src/modules/mesh_embed/baseline_mesh_embed.py | import einops
import torch
from kappamodules.init import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from modules.graph.gnn_layer import GNNLayer
class BaselineMeshEmbed(nn.Module):
def __init__(self, dim, depth, resolution, input_dim):
super().__init__()
self.dim = dim
assert depth >= 1
self.depth = depth
assert len(resolution) == 2
self.resolution = resolution
self.num_grid_points = self.resolution[0] * self.resolution[1]
self.register_buffer("grid_points_arange", torch.arange(self.num_grid_points), persistent=False)
self.proj = nn.Linear(input_dim, dim)
self.pos_embed = ContinuousSincosEmbed(dim=dim, ndim=len(resolution))
self.pos_mlp = nn.Sequential(
nn.Linear(dim, dim),
nn.SiLU(),
nn.Linear(dim, dim),
)
self.gnn_layers = nn.ModuleList([
GNNLayer(input_dim=dim, hidden_dim=dim)
for _ in range(depth)
])
self.reset_parameters()
def reset_parameters(self):
init_xavier_uniform_zero_bias(self.proj)
self.pos_mlp.apply(init_xavier_uniform_zero_bias)
def forward(self, x, pos, batch_idx, edge_index):
# get indices of grid nodes
_, counts = batch_idx.unique(return_counts=True)
start = (counts.cumsum(dim=0) - counts[0]).repeat_interleave(self.num_grid_points)
grid_pos_idx = self.grid_points_arange.repeat(len(counts)) + start
# project input to dim
x = self.proj(x)
# add pos embedding
pos_embed = self.pos_embed(pos)
# initialze grid nodes with MLP(pos_embed)
x[grid_pos_idx] = self.pos_mlp(pos_embed[grid_pos_idx])
# add pos_embed
x = x + pos_embed
# message passing
for gnn_layer in self.gnn_layers:
x = gnn_layer(x, pos, edge_index.T)
# select grid nodes
x = x[grid_pos_idx]
# convert to dense tensor
x = einops.rearrange(
x,
"(batch_size num_grid_points) dim -> batch_size num_grid_points dim",
num_grid_points=self.num_grid_points,
)
return x
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/mesh_embed/gnn_mesh_embed_v2.py | src/modules/mesh_embed/gnn_mesh_embed_v2.py | import torch
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_geometric.data import Data
from modules.graph.sag_pool import SAGPoolingFixedNumNodes
from modules.graph.gnn_layer import GNNLayer
class GNNMeshEmbedV2(torch.nn.Module):
def __init__(self, input_dim, gnn_dim, pool_dim, output_dim, num_output_nodes):
super().__init__()
self.input_dim = input_dim
self.gnn_dim = gnn_dim
self.pool_dim = pool_dim
self.output_dim = output_dim or pool_dim
self.num_output_nodes = num_output_nodes
self.gnn_proj = nn.Linear(input_dim, gnn_dim)
self.pos_embed = ContinuousSincosEmbed(dim=gnn_dim, ndim=2)
self.gnn_layer = GNNLayer(input_dim=gnn_dim, hidden_dim=gnn_dim)
self.pool_proj = nn.Linear(gnn_dim, pool_dim)
self.pool = SAGPoolingFixedNumNodes(pool_dim, num_output_nodes=num_output_nodes)
self.out_proj = nn.Linear(pool_dim, self.output_dim)
def forward(self, x, pos, edge_index, batch_idx):
# embed + GNN
x = self.gnn_proj(x)
x = x + self.pos_embed(pos)
x = self.gnn_layer(x, pos, edge_index.T)
# pool
x = self.pool_proj(x)
pool_result = self.pool(x, edge_index.T, batch=batch_idx)
# x_pool, edge_index_pool, edge_attr_pool, batch_pool, perm, score = pool_result
x_pool, _, _, batch_pool, _, _ = pool_result
x_pool = self.out_proj(x_pool)
return x_pool, batch_pool
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/mesh_embed/gnn_mesh_embed.py | src/modules/mesh_embed/gnn_mesh_embed.py | import torch
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_geometric.data import Data
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.pool import SAGPooling
from modules.graph.sag_pool import SAGPoolingFixedNumNodes
class GNNLayer(MessagePassing):
def __init__(self, in_features, hidden_features):
super().__init__()
self.message_net = nn.Sequential(
nn.Linear(2 * in_features + 1, hidden_features),
nn.SiLU(),
)
self.update_net = nn.Sequential(
nn.Linear(in_features + hidden_features, hidden_features),
nn.SiLU(),
)
def forward(self, x, pos, edge_index):
""" Propagate messages along edges """
x = self.propagate(edge_index, x=x, pos=pos)
return x
# noinspection PyMethodOverriding
def message(self, x_i, x_j, pos_i, pos_j):
""" Message update """
msg_input = torch.cat((x_i, x_j, torch.sqrt(torch.sum((pos_i - pos_j) ** 2, dim=1)).unsqueeze(dim=1)), dim=-1)
message = self.message_net(msg_input)
return message
# noinspection PyMethodOverriding
def update(self, message, x, pos):
""" Node update """
x = x + self.update_net(torch.cat((x, message), dim=-1))
return x
def message_and_aggregate(self, adj_t):
raise NotImplementedError
def edge_update(self):
raise NotImplementedError
class GNNMeshEmbed(torch.nn.Module):
def __init__(
self,
in_features=3,
out_features=None,
hidden_features=32,
use_gnn=True,
pool_ratio=None,
num_output_nodes=None,
):
super().__init__()
assert (pool_ratio is None) ^ (num_output_nodes is None),\
"GnnMeshEmbed requires pool_ratio or num_output_nodes"
self.in_features = in_features
self.out_features = out_features or hidden_features
self.hidden_features = hidden_features
self.use_gnn = use_gnn
self.pool_ratio = pool_ratio
self.num_output_nodes = num_output_nodes
if use_gnn:
self.gnn_layer = GNNLayer(in_features=self.hidden_features, hidden_features=self.hidden_features)
else:
self.gnn_layer = None
self.pos_embed = ContinuousSincosEmbed(dim=self.hidden_features, ndim=2)
self.embedding_proj = nn.Linear(self.in_features, self.hidden_features)
self.output_proj = nn.Linear(self.hidden_features, self.out_features)
if num_output_nodes is not None:
self.pool = SAGPoolingFixedNumNodes(self.hidden_features, num_output_nodes=self.num_output_nodes)
else:
self.pool = SAGPooling(self.hidden_features, ratio=pool_ratio)
def forward(self, x, pos, edge_index, batch_idx):
# First map node features (v_x, v_y, p) to hidden_space (same size as latent space of the transformer?)
x = self.embedding_proj(x)
x = x + self.pos_embed(pos)
if self.gnn_layer is not None:
x = self.gnn_layer(x, pos, edge_index.T)
# pool + project
pool_result = self.pool(x, edge_index.T, batch=batch_idx)
# x_pool, edge_index_pool, edge_attr_pool, batch_pool, perm, score = pool_result
x_pool, _, _, batch_pool, _, _ = pool_result
x_pool = self.output_proj(x_pool)
return x_pool, batch_pool
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/mesh_embed/__init__.py | src/modules/mesh_embed/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/modules/mesh_embed/dummy_mesh_embed.py | src/modules/mesh_embed/dummy_mesh_embed.py | import math
import einops
import torch.nn.functional as F
from kappamodules.init import init_xavier_uniform_zero_bias
from kappamodules.layers import ContinuousSincosEmbed
from torch import nn
from torch_geometric.nn.pool import SAGPooling
import torch
class DummyMeshEmbed(nn.Module):
def __init__(self, in_features, hidden_features, pool_ratio):
super().__init__()
self.in_features = in_features
self.hidden_features = hidden_features
self.pool_ratio = pool_ratio
self.proj = nn.Linear(in_features, hidden_features)
self.pool = SAGPooling(hidden_features, ratio=pool_ratio)
self.reset_parameters()
def reset_parameters(self):
init_xavier_uniform_zero_bias(self.proj)
def forward(self, x, pos, edge_index, batch_idx):
pool_result = self.pool(self.proj(x), edge_index.T, batch=batch_idx)
x_pool, _, _, batch_pool, _, _ = pool_result
return x_pool, batch_pool | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/configs/wandb_config.py | src/configs/wandb_config.py | class WandbConfig:
MODES = ["disabled", "online", "offline"]
def __init__(self, mode: str, host: str = None, entity: str = None, project: str = None):
assert mode in self.MODES
self.mode = mode
if not self.is_disabled:
assert host is not None and isinstance(host, str), f"wandb host is required (got '{host}')"
assert entity is not None and isinstance(entity, str), f"wandb entity is required (got '{project}')"
assert project is not None and isinstance(project, str), f"wandb project is required (got '{project}')"
self.host = host
self.entity = entity
self.project = project
@property
def is_disabled(self) -> bool:
return self.mode == "disabled"
@property
def is_offline(self) -> bool:
return self.mode == "offline"
@property
def is_online(self) -> bool:
return self.mode == "online"
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/configs/static_config.py | src/configs/static_config.py | import logging
import os
from pathlib import Path
from typing import Optional
import kappaconfig as kc
from distributed.config import is_distributed, is_managed, get_world_size, get_local_rank
from .wandb_config import WandbConfig
class StaticConfig:
def __init__(self, uri: str, datasets_were_preloaded: bool = False):
self._uri = Path(uri).expanduser()
self._config = kc.DefaultResolver(template_path=".").resolve(kc.from_file_uri(self._uri))
# version without kappaconfig
# with open(self._uri) as f:
# self._config = yaml.safe_load(f)
self.datasets_were_preloaded = datasets_were_preloaded
# region param checking
def __check_bool(self, key):
value = self._config[key]
assert isinstance(value, bool), f"{key} {value} is not a bool"
return value
# endregion
@property
def account_name(self) -> str:
return self._config["account_name"]
@property
def output_path(self) -> Path:
assert "output_path" in self._config, f"output_path is not in static_config.yaml"
path = Path(self._config["output_path"]).expanduser()
assert path.exists(), f"output_path '{path}' doesn't exist"
return path
@property
def model_path(self) -> Optional[Path]:
if "model_path" not in self._config:
return None
path = Path(self._config["model_path"]).expanduser()
assert path.exists(), f"model_path '{path}' doesn't exist"
return path
# region dataset
def get_global_dataset_paths(self) -> dict:
return self._config["global_dataset_paths"]
def get_local_dataset_path(self) -> Path:
if "local_dataset_path" not in self._config:
return None
path = Path(self._config["local_dataset_path"]).expanduser()
path.mkdir(exist_ok=True)
# if for some reason local storage is read only -> use global storage by default
# try:
# path.mkdir(exist_ok=True)
# except PermissionError:
# logging.error(f"failed to create local_dataset_path directory")
# return None
# change permissions on local
if os.name == "posix" and "local_dataset_path_group" in self._config:
group = self._config["local_dataset_path_group"]
os.system(f"chgrp -R {group} {path}")
os.system(f"chmod g+rwxs {path}")
# managed runs (SLURM/PBS) have non-persistent storage
# - all processes work with the same data -> wait for local rank0 to copy data via barrier
# - processes are independent -> copy dataset for each process to a seperate local path
# - no guarantee that the processes use the same dataset
# - avoid race conditions
# datasets can be preloaded before starting the run (useful when a node is split into multiple single-GPU runs)
if not self.datasets_were_preloaded and is_managed() and get_world_size() == 1:
path = path / f"localrank{get_local_rank()}"
path.mkdir(exist_ok=True)
return path
def get_data_source_modes(self) -> dict:
if "data_source_modes" not in self._config:
return {}
data_source_modes = self._config["data_source_modes"]
assert all(data_source_mode in ["global", "local"] for data_source_mode in data_source_modes.values())
return data_source_modes
# endregion
@property
def temp_path(self):
local_dataset_path = self.get_local_dataset_path()
if local_dataset_path is None:
# temp folder in execution directory
return Path("temp")
# temp folder on SSD
return local_dataset_path / f"REMOVE_ME"
@property
def mig_config(self):
if "mig" not in self._config:
return {}
mig = self._config["mig"]
# mig is mapping from hostnames to devices to MIG-IDS
# badger:
# 0: MIG-abcdef-ghi...
assert isinstance(mig, dict), f"mig {mig} is not dict"
for hostname, device_to_migid in mig.items():
assert isinstance(hostname, str), f"hostnames should be strings (got {hostname})"
assert isinstance(device_to_migid, dict), f"devices_to_migid should be dict (got {device_to_migid})"
for device_idx, mig_id in device_to_migid.items():
assert isinstance(device_idx, int), f"devices_to_migid keys should be int (got {device_idx})"
assert isinstance(mig_id, str), f"devices_to_migid values should be str (got {mig_id})"
return mig
@property
def default_wandb_mode(self) -> str:
mode = self._config["default_wandb_mode"]
assert mode in WandbConfig.MODES, f"default_wandb_mode '{mode}' not in {WandbConfig.MODES}"
return mode
# region deterministic/profiling
@property
def default_cudnn_benchmark(self) -> bool:
return self.__check_bool("default_cudnn_benchmark")
@property
def default_cudnn_deterministic(self) -> bool:
return self.__check_bool("default_cudnn_deterministic")
@property
def default_cuda_profiling(self) -> bool:
return self.__check_bool("default_cuda_profiling")
# endregion
# region distributed
@property
def default_sync_batchnorm(self) -> bool:
return self.__check_bool("default_sync_batchnorm")
@property
def master_port(self) -> int:
master_port = self._config["master_port"]
assert isinstance(master_port, int), f"master_port {master_port} is not an int"
return master_port
# endregion
def log(self, verbose=False):
logging.info("------------------")
logging.info("STATIC CONFIG")
logging.info(f"account_name: {self.account_name}")
logging.info(f"output_path: {self.output_path}")
# datasets
if verbose:
logging.info(f"global_dataset_paths:")
for key, dataset_path in self._config["global_dataset_paths"].items():
logging.info(f" {key}: {Path(dataset_path).expanduser()}")
if "local_dataset_path" in self._config:
logging.info(f"local_dataset_path: {self._config['local_dataset_path']}")
if os.name == "posix":
# log available space on local disk
for line in os.popen(f"df -h {self._config['local_dataset_path']}").read().strip().split("\n"):
logging.info(line)
if "data_source_modes" in self._config:
logging.info(f"data_source_modes:")
for key, source_mode in self._config["data_source_modes"].items():
logging.info(f" {key}: {source_mode}")
# other
if verbose:
logging.info(f"default_wandb_mode: {self.default_wandb_mode}")
logging.info(f"default_cudnn_benchmark: {self.default_cudnn_benchmark}")
logging.info(f"default_cudnn_deterministic: {self.default_cudnn_deterministic}")
logging.info(f"default_cuda_profiling: {self.default_cuda_profiling}")
# distributed
if is_distributed():
logging.info(f"master_port: {self.master_port}")
logging.info(f"default_sync_batchnorm: {self.default_sync_batchnorm}")
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/configs/__init__.py | src/configs/__init__.py | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false | |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/src/configs/cli_args.py | src/configs/cli_args.py | import logging
from argparse import ArgumentParser
from dataclasses import dataclass
from pathlib import Path
from .wandb_config import WandbConfig
@dataclass
class CliArgs:
hp: str
accelerator: str
devices: str
num_workers: int
pin_memory: bool
wandb_mode: str
wandb_config: str
cudnn_benchmark: bool
cuda_profiling: bool
testrun: bool
minmodelrun: bool
mindatarun: bool
mindurationrun: bool
name: str
master_port: int
sync_batchnorm: bool
datasets_were_preloaded: bool
resume_stage_id: str
resume_checkpoint: str
def log(self):
logging.info("------------------")
logging.info(f"CLI ARGS")
for key, value in vars(self).items():
if value is not None:
logging.info(f"{key}: {value}")
def _hp(hp):
assert isinstance(hp, str)
path = Path(hp).expanduser().with_suffix(".yaml")
assert path.exists(), f"hp file '{hp}' doesn't exist"
return hp
def _devices(devices):
assert isinstance(devices, str)
if not devices.isdigit():
assert all(d.isdigit() for d in devices.split(",")), f"specify multiple devices as 0,1,2,3 (not {devices})"
return devices
def _wandb_config(wandb_config):
if wandb_config is not None:
assert isinstance(wandb_config, str)
path = (Path("wandb_configs").expanduser() / wandb_config).with_suffix(".yaml")
assert path.exists(), f"wandb_config file '{path}' doesn't exist"
return wandb_config
def parse_run_cli_args() -> CliArgs:
parser = ArgumentParser()
parser.add_argument("--hp", type=_hp, required=True)
parser.add_argument("--accelerator", type=str, default="gpu", choices=["cpu", "gpu"])
parser.add_argument("--devices", type=_devices)
parser.add_argument("--name", type=str)
# dataloading
parser.add_argument("--num_workers", type=int)
pin_memory_group = parser.add_mutually_exclusive_group()
pin_memory_group.add_argument("--pin_memory", action="store_true")
pin_memory_group.add_argument("--no_pin_memory", action="store_false", dest="pin_memory")
pin_memory_group.set_defaults(pin_memory=None)
# wandb
parser.add_argument("--wandb_mode", type=str, choices=WandbConfig.MODES)
parser.add_argument("--wandb_config", type=_wandb_config)
# cudnn benchmark
cudnn_benchmark_group = parser.add_mutually_exclusive_group()
cudnn_benchmark_group.add_argument("--cudnn_benchmark", action="store_true")
cudnn_benchmark_group.add_argument("--no_cudnn_benchmark", action="store_false", dest="cudnn_benchmark")
cudnn_benchmark_group.set_defaults(cudnn_benchmark=None)
# cuda profiling
cuda_profiling_group = parser.add_mutually_exclusive_group()
cuda_profiling_group.add_argument("--cuda_profiling", action="store_true")
cuda_profiling_group.add_argument("--no_cuda_profiling", action="store_false", dest="cuda_profiling")
cuda_profiling_group.set_defaults(cuda_profiling=None)
# testrun
testrun_group = parser.add_mutually_exclusive_group()
testrun_group.add_argument("--testrun", action="store_true")
testrun_group.add_argument("--minmodelrun", action="store_true")
testrun_group.add_argument("--mindatarun", action="store_true")
testrun_group.add_argument("--mindurationrun", action="store_true")
# distributed
parser.add_argument("--master_port", type=int)
# distributed - syncbatchnorm
sync_batchnorm_group = parser.add_mutually_exclusive_group()
sync_batchnorm_group.add_argument("--sync_batchnorm", action="store_true")
sync_batchnorm_group.add_argument("--no_sync_batchnorm", action="store_false", dest="sync_batchnorm")
sync_batchnorm_group.set_defaults(sync_batchnorm=None)
# slurm
parser.add_argument("--datasets_were_preloaded", action="store_true")
# resume
parser.add_argument("--resume_stage_id", type=str)
parser.add_argument("--resume_checkpoint", type=str)
return CliArgs(**vars(parser.parse_known_args()[0]))
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/data/shapenetcar/preprocess.py | data/shapenetcar/preprocess.py | # conda create --name open3d python=3.9
# pip install open3d
# pip install meshio
# pip install torch
# pip install tempfile
import os
import tempfile
from argparse import ArgumentParser
from pathlib import Path
import meshio
import numpy as np
import open3d as o3d
import torch
from tqdm import tqdm
def parse_args():
parser = ArgumentParser()
parser.add_argument("--src", type=str, required=True, help="e.g. /data/shapenet_car/training_data")
parser.add_argument("--dst", type=str, required=True, help="e.g. /data/shapenet_car/preprocessed")
return vars(parser.parse_args())
def sdf(mesh, resolution):
quads = mesh.cells_dict["quad"]
idx = np.flatnonzero(quads[:, -1] == 0)
out0 = np.empty((quads.shape[0], 2, 3), dtype=quads.dtype)
out0[:, 0, 1:] = quads[:, 1:-1]
out0[:, 1, 1:] = quads[:, 2:]
out0[..., 0] = quads[:, 0, None]
out0.shape = (-1, 3)
mask = np.ones(out0.shape[0], dtype=bool)
mask[idx * 2 + 1] = 0
quad_to_tri = out0[mask]
cells = [("triangle", quad_to_tri)]
new_mesh = meshio.Mesh(mesh.points, cells)
with tempfile.NamedTemporaryFile(delete=True, suffix=".ply") as tf:
new_mesh.write(tf, file_format="ply")
open3d_mesh = o3d.io.read_triangle_mesh(tf.name)
open3d_mesh = o3d.t.geometry.TriangleMesh.from_legacy(open3d_mesh)
scene = o3d.t.geometry.RaycastingScene()
_ = scene.add_triangles(open3d_mesh)
domain_min = torch.tensor([-2.0, -1.0, -4.5])
domain_max = torch.tensor([2.0, 4.5, 6.0])
tx = np.linspace(domain_min[0], domain_max[0], resolution)
ty = np.linspace(domain_min[1], domain_max[1], resolution)
tz = np.linspace(domain_min[2], domain_max[2], resolution)
grid = np.stack(np.meshgrid(tx, ty, tz, indexing="ij"), axis=-1).astype(np.float32)
return torch.from_numpy(scene.compute_signed_distance(grid).numpy()).float()
def main(src, dst):
src = Path(src).expanduser()
assert src.exists(), f"'{src.as_posix()}' doesnt exist"
assert src.name == "training_data"
dst = Path(dst).expanduser()
# assert not dst.exists(), f"'{dst.as_posix()}' exist"
print(f"src: {src.as_posix()}")
print(f"dst: {dst.as_posix()}")
# collect uris for samples
uris = []
for i in range(9):
param_uri = src / f"param{i}"
for name in sorted(os.listdir(param_uri)):
# param folders contain .npy/.py/txt files
if "." in name:
continue
potential_uri = param_uri / name
assert potential_uri.is_dir()
uris.append(potential_uri)
print(f"found {len(uris)} samples")
# .vtk files contains points that dont belong to the mesh -> filter them out
mesh_point_counts = []
for uri in tqdm(uris):
reluri = uri.relative_to(src)
out = dst / reluri
out.mkdir(exist_ok=True, parents=True)
# filter out mesh points that are not part of the shape
mesh = meshio.read(uri / "quadpress_smpl.vtk")
assert len(mesh.cells) == 1
cell_block = mesh.cells[0]
assert cell_block.type == "quad"
unique = np.unique(cell_block.data)
mesh_point_counts.append(len(unique))
mesh_points = torch.from_numpy(mesh.points[unique]).float()
pressure = torch.from_numpy(np.load(uri / "press.npy")[unique]).float()
torch.save(mesh_points, out / "mesh_points.th")
torch.save(pressure, out / "pressure.th")
# generate sdf
for resolution in [32, 40, 48, 64, 80]:
torch.save(sdf(mesh, resolution=resolution), out / f"sdf_res{resolution}.th")
print("fin")
if __name__ == "__main__":
main(**parse_args())
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/data/transientflow/MeshGenerator.py | data/transientflow/MeshGenerator.py | import gmsh
import math
import numpy as np
import random
import meshio
import argparse
def compute_centroid(points):
centroid = [sum(x for x, _ in points) / len(points), sum(y for _, y in points) / len(points)]
return centroid
def polar_angle(point,centroid):
x, y = point
angle = math.atan2(y - centroid[1], x - centroid[0])
return angle
def pair_neighboring_elements(numbers):
paired_list = []
length = len(numbers)
for i in range(length):
pair = [numbers[i], numbers[(i + 1) % length]]
paired_list.append(pair)
return paired_list
def sort_points(points):
centroid = compute_centroid(points)
p_keys = [-polar_angle(p,centroid) for p in points]
sorted_indices = list(np.argsort(p_keys))
points = [points[s] for s in sorted_indices]
return points
def rand_points(num_points,max_bound):
polar_coords = []
cartesian_coords = []
for _ in range(num_points):
r = random.uniform(0, max_bound) # Random radial distance
theta = random.uniform(0, 2 * math.pi) # Random angle in radians
polar_coords.append((r, theta))
for r, theta in polar_coords:
x = r * math.cos(theta)
y = r * math.sin(theta)
cartesian_coords.append((x, y))
return cartesian_coords
def compute_distance(p1,p2):
d = (p1[0]-p2[0])**2+(p1[1]-p2[1])**2
return d
def polygon_length(points):
points = sort_points(points)
d = [compute_distance(points[i],points[i+1]) for i in range(len(points)-1)]
d.append(compute_distance(points[-1],points[0]))
return d
def pair_neighboring_elements_internal(line_type):
paired_list = []
point_tag = 5
for lt in line_type[:-1]:
if lt == 1:
paired_list.append([point_tag,point_tag+1])
point_tag += 1
elif lt == 2:
paired_list.append([point_tag,point_tag+1,point_tag+2])
point_tag += 2
if line_type[-1] == 1:
paired_list.append([point_tag,5])
elif line_type[-1] == 2:
paired_list.append([point_tag,point_tag+1,5])
return paired_list
def calculate_points(total_length, distance_between_points):
if distance_between_points <= 0:
raise ValueError("Distance between points must be greater than 0")
if total_length <= 0:
raise ValueError("Total length must be greater than 0")
num_points = total_length / distance_between_points
return int(num_points) + 1
def generate_circle_points(n, radius, center=(0, 0)):
theta = np.linspace(0, 2*np.pi, n, endpoint=False)
x = center[0] + radius * np.cos(theta)
y = center[1] + radius * np.sin(theta)
coordinates = np.column_stack((x, y))
return coordinates
def add_circle(x_center,y_center,n,radius):
cp = generate_circle_points(n,radius,(x_center,y_center))
center_point = gmsh.model.geo.addPoint(x_center,y_center,0)
cp_tags = []
for p in cp:
x,y = p
cp_tags.append(gmsh.model.geo.addPoint(x,y,0))
line_tags = []
for i in range(len(cp_tags)):
line_tags.append(gmsh.model.geo.addCircleArc(cp_tags[i%len(cp_tags)],center_point,cp_tags[(i+1)%len(cp_tags)]))
curve_loop_tag = gmsh.model.geo.add_curve_loop(line_tags)
return curve_loop_tag
def add_rand_partial_circle(x_center,y_center,n,radius):
cp = generate_circle_points(n,radius,(x_center,y_center))
center_point = gmsh.model.geo.addPoint(x_center,y_center,0)
cp_tags = []
print(len(cp))
section_length = random.randint(20,len(cp)-1)
section_start = random.randint(0,len(cp))
cp = circular_slice(cp,section_start,section_length)
for p in cp:
x,y = p
cp_tags.append(gmsh.model.geo.addPoint(x,y,0))
line_tags = []
for i in range(len(cp_tags)-1):
line_tags.append(gmsh.model.geo.addCircleArc(cp_tags[i],center_point,cp_tags[(i+1)]))
line_tags.append(gmsh.model.geo.addLine(cp_tags[-1],cp_tags[0]))
curve_loop_tag = gmsh.model.geo.add_curve_loop(line_tags)
return curve_loop_tag
def circular_slice(lst, start, length):
n = len(lst)
end = (start + length) % n
if end >= start:
return np.array(lst[start:end])
else:
return np.concatenate((lst[start:], lst[:end]))
def circles_overlap(circle1,circle2):
x1,y1,r1 = circle1
x2,y2,r2 = circle2
distance = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return distance <= (r1 + r2 + 0.01)
def triangle_area(x1, y1, x2, y2, x3, y3):
# Shoelace Formula
area = 0.5 * np.abs(x1*(y2 - y3) + x2*(y3 - y1) + x3*(y1 - y2))
return area
def generate_mesh(output_filename,n_points):
gmsh.initialize()
gmsh.clear()
gmsh.model.add('model')
points = [(-0.5,-0.5),(-0.5,0.5),(1.0,-0.5),(1.0,0.5)]
curve_loops = []
points = sort_points(points)
print(points)
bounding_box_tags = []
for i,p in enumerate(points):
x = p[0]
y = p[1]
bounding_box_tags.append(gmsh.model.geo.add_point(x,y,0,tag=i+1))
line_pairs = pair_neighboring_elements([i+1 for i in range(len(points))])
lines = []
#add lines
for i,l in enumerate(line_pairs):
lines.append(gmsh.model.geo.addLine(l[0], l[1]))
curve_loops.append(gmsh.model.geo.add_curve_loop(lines))
circles = []
circle_curve_loops = []
while len(circles) < n_points:
current_circle = (random.uniform(-0.4,0.4),random.uniform(-0.4,0.4),random.uniform(0.02,0.1))
n = 3
circle_overlap_flag = False
for c in circles:
if circles_overlap(c,current_circle):
circle_overlap_flag = True
break
if circle_overlap_flag:
continue
circle_curve_loop = add_circle(current_circle[0],current_circle[1],n,current_circle[2])
curve_loops.append(circle_curve_loop)
circle_curve_loops.append(circle_curve_loop)
circles.append(current_circle)
print(len(circles),n_points)
surface = []
surface.append(gmsh.model.geo.addPlaneSurface(curve_loops))
gmsh.model.geo.synchronize()
s = gmsh.model.geo.extrude([(2,1)],0,0,0.1,[1],[1],recombine=True)
gmsh.model.addPhysicalGroup(2,[s[0][1],1],name = "FrontBackPlane")
gmsh.model.addPhysicalGroup(2,[s[3][1]],name="outflow")
gmsh.model.addPhysicalGroup(2,[s[5][1]],name="inflow")
gmsh.model.addPhysicalGroup(2,[s[2][1],s[4][1]],name="sidewalls")
gmsh.model.addPhysicalGroup(2,[s[i][1] for i in range(6,len(s))],name="wall")
gmsh.model.addPhysicalGroup(3,[1],name="internal")
gmsh.model.mesh.field.add("Distance", 1)
gmsh.model.mesh.field.setNumbers(1, "SurfacesList", [s[i][1] for i in range(6,len(s))])
gmsh.model.mesh.field.setNumber(1, "Sampling", 100)
#res_min = 0.003
res_min = random.uniform(0.003,0.004)
gmsh.model.mesh.field.add("Threshold",2)
gmsh.model.mesh.field.setNumber(2, "InField", 1)
gmsh.model.mesh.field.setNumber(2, "SizeMin", res_min)
gmsh.model.mesh.field.setNumber(2, "SizeMax", res_min*2.9)
gmsh.model.mesh.field.setNumber(2, "DistMin", 0)
gmsh.model.mesh.field.setNumber(2, "DistMax", 0.1)
gmsh.option.setNumber("Mesh.MeshSizeExtendFromBoundary", 0)
gmsh.option.setNumber("Mesh.MeshSizeFromPoints", 0)
gmsh.option.setNumber("Mesh.MeshSizeFromCurvature", 0)
gmsh.model.mesh.field.setAsBackgroundMesh(2)
gmsh.model.geo.synchronize()
gmsh.model.mesh.generate(3)
gmsh.write(output_filename)
msh = meshio.read(output_filename)
triangles = msh.cells_dict['triangle'][(msh.points[msh.cells_dict['triangle']][:,:,-1] == 0)[:,0]]
mesh_points = msh.points
t = mesh_points[triangles]
x1 = t[:,0,0]
y1 = t[:,0,1]
x2 = t[:,1,0]
y2 = t[:,1,1]
x3 = t[:,2,0]
y3 = t[:,2,1]
area = triangle_area(x1,y1,x2,y2,x3,y3)
print(f"mesh area ratio: {area.max()/area.min()}")
print(mesh_points.shape)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('output_filename', type=str, help='output filename of mesh')
parser.add_argument('n_objects', type=int, help='number of objects in case')
args = parser.parse_args()
output_filename = args.output_filename
n_points = args.n_objects
generate_mesh(output_filename,n_points) | python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/data/transientflow/generateCase.py | data/transientflow/generateCase.py | import os
import argparse
import shutil
import random
import time
from tqdm import tqdm
import pickle
from fluidfoam import readscalar
from fluidfoam import readmesh
from fluidfoam import readvector
from sklearn.cluster import DBSCAN
import meshio
from shapely.geometry import Point,Polygon
import torch
import numpy as np
import multiprocessing
import matplotlib.pyplot as plt
import io
from PIL import Image
from multiprocessing import Process
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
from PyFoam.RunDictionary.ParsedParameterFile import ParsedBoundaryDict
from PyFoam.Execution.BasicRunner import BasicRunner
from PyFoam.Execution.ParallelExecution import LAMMachine
from MeshGenerator import generate_mesh,sort_points
def generate_object_mask(sol_dir,x_res,y_res):
msh = meshio.read(sol_dir+"/mesh.msh")
eps = 0.0075
min_samples = 2
dbscan = DBSCAN(eps=eps,min_samples=min_samples)
ngridx = x_res
ngridy = y_res
msh.points[:,1].max()
xinterpmin = msh.points[:,0].min()
xinterpmax = msh.points[:,0].max()
yinterpmin = msh.points[:,1].min()
yinterpmax = msh.points[:,1].max()
xi = np.linspace(xinterpmin,xinterpmax,ngridx)
yi = np.linspace(yinterpmin,yinterpmax,ngridy)
xinterp,yinterp = np.meshgrid(xi,yi)
wall_points = msh.points[msh.cells_dict['quad'][msh.cell_sets_dict['wall']['quad']]]
wp_corrected = wall_points[(wall_points[:,:,2]==0)][:,:2][1:-1:2]
wp_corrected = [(p[0],p[1]) for p in wp_corrected]
wp_corrected = torch.tensor(wp_corrected)
clusters = dbscan.fit_predict(wp_corrected)
if len(set(clusters)) < 1:
return None
p_clusters = []
for cluster_id in set(clusters):
cluster_points = torch.cat([wp_corrected[clusters==cluster_id],wp_corrected[clusters==cluster_id][0].view(1,2)])
p_clusters.append(cluster_points)
polygon_list = [Polygon(sort_points([(p[0].item(),p[1].item()) for p in p_clusters[i]])) for i in range(len(p_clusters))]
interp = torch.tensor(np.stack((xinterp,yinterp),axis=2)).flatten(0,1)
object_mask = []
for p in tqdm(interp):
mask_value = 0
for polygon in polygon_list:
mask_value += polygon.contains(Point(p))
object_mask.append(mask_value)
object_mask = torch.tensor(object_mask).view(ngridy,ngridx).flip(0)
return object_mask,len(set(clusters))
def readU(arg):
i,dest = arg
return torch.tensor(readvector(dest,str(i),'U'))
def readp(arg):
i,dest = arg
return torch.tensor(readscalar(dest,str(i),'p'))
def readPhi(arg):
i,dest = arg
return torch.tensor(readscalar(dest,str(i),'phi'))
plot_height = 5.0
def scatter_plot(arg):
i,x,y,v,triangles,mesh_points = arg
fig = plt.figure(figsize=(plot_height*2.8, plot_height), dpi=100)
plt.tripcolor(mesh_points[:,0],mesh_points[:,1],triangles,v[i],alpha=1.0,shading='flat', antialiased=True, linewidth=0.72,edgecolors='face')
img_buf = io.BytesIO()
fig.savefig(img_buf,format='png')
plt.close(fig)
#print(i)
return Image.open(img_buf)
def prepareCase(src,dest,n_points,velocity,n_cores,x_res,y_res):
if os.path.exists(dest) and os.path.isdir(dest):
shutil.rmtree(dest)
destination = shutil.copytree(src,dest)
while True:
process = Process(target=generate_mesh,args=(dest+"mesh.msh",n_points))
try:
process.start()
process.join()
except Exception as e:
print("retry mesh generation")
finally:
process.terminate()
object_mask,n_detected_objects = generate_object_mask(dest,x_res,y_res)
if process.exitcode == 0 and object_mask is not None and n_points == n_detected_objects:
print("Mesh generated")
break
else:
print("retry mesh generation")
print("objects (expected vs detected):",n_points,n_detected_objects)
time.sleep(3)
runner = BasicRunner(argv=["gmshToFoam","-case",dest,dest+"mesh.msh"],logname="logifle",noLog=True)
runner.start()
f = ParsedBoundaryDict(dest+"constant/polyMesh/boundary")
f['FrontBackPlane']['type'] = 'empty'
f.writeFile()
f = ParsedParameterFile(dest+"0/U")
f['internalField'] = 'uniform ('+str(velocity)+' 0 0)'
f.writeFile()
f = ParsedParameterFile(dest+"system/decomposeParDict")
f['numberOfSubdomains'] = n_cores
f.writeFile()
runner = BasicRunner(argv=["decomposePar","-case",dest],logname="logifle",noLog=True)
runner.start()
return object_mask
def get_current_case(dest):
current_case_number = -1
for d in os.listdir(dest):
#print(d)
d = d.split('_')
if d[0] == 'case':
if int(d[1]) > current_case_number:
current_case_number = int(d[1])
return current_case_number+1
def get_current_case_old(dest):
current_case_number = -1
iterator = 1
for d in os.listdir(dest):
#print(d)
d = d.split('_')
if d[0] == 'case':
if int(d[1]) > current_case_number:
current_case_number = int(d[1])
return current_case_number+1
def get_current_case(parent_directory):
# Get a list of all directories in the parent directory
directories = [d for d in os.listdir(parent_directory) if os.path.isdir(os.path.join(parent_directory, d))]
#check if empty
if not any(directories):
return 0
# Extract numerical parts from directory names and convert to integers
existing_numbers = [int(d.split('_')[1]) for d in directories if d.startswith("case_") and d[5:].isdigit()]
# Find the lowest missing directory number
lowest_missing_number = None
for i in range(1, max(existing_numbers) + 2):
if i not in existing_numbers:
lowest_missing_number = i
break
return lowest_missing_number
def find_first_available_line(file_path):
# Read the content of the file
with open(file_path, 'r') as file:
lines = file.readlines()
# Find the first empty line
empty_line_number = next((i + 1 for i, line in enumerate(lines) if not line.strip()), None)
if empty_line_number is not None:
print(f"Found empty line at line {empty_line_number}")
else:
# If no empty line is found, create one at the end of the file
empty_line_number = len(lines) + 1
lines.append('\n')
# Write the modified content back to the file
with open(file_path, 'w') as file:
file.writelines(lines)
print(f"Created empty line at line {empty_line_number}")
return empty_line_number
def write_status_report(file_path, line_number, new_content):
# Read the content of the file
with open(file_path, 'r') as file:
lines = file.readlines()
# Check if the specified line number is valid
if 1 <= line_number <= len(lines):
# Modify the specific line
lines[line_number - 1] = new_content + '\n' # Adding '\n' to maintain proper line endings
# Write the modified content back to the file
with open(file_path, 'w') as file:
file.writelines(lines)
print(f"Content written to line {line_number} in {file_path}")
else:
print(f"Invalid line number: {line_number}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('n_objects', type=int, help='maximum number of circles/partial circles the case should have (min is 1)')
parser.add_argument('n_cases', type=int, help='number of cases to be run')
parser.add_argument('n_cores',type = int, help='number of CPU-cores to use for computation')
parser.add_argument('empty_case',type=str, help='the empty openfoam case directory')
parser.add_argument('dest', type=str, help='target directory for the OpenFOAM Cases')
parser.add_argument('working_dir',type=str, help='working directory for OpenFoam Simulation')
args = parser.parse_args()
num_points = args.n_objects
assert num_points>0, "n_objects < 1"
x_res = 384
y_res = 256
n_cores = args.n_cores
assert n_cores>1, "at least two core should be used"
#save_raw = args.save_raw
#assert save_raw == 0 or save_raw == 1 , "save_raw is either 0 or 1"
save_raw = 1
mpiInformation = LAMMachine(nr=n_cores)
n_cases = args.n_cases
src = args.empty_case
dest = args.dest
work_dir = args.working_dir
dest = os.path.join(dest, '')
work_dir = os.path.join(work_dir, '')
current_case_number = get_current_case(dest)
delta_t = [0.05,0.025,0.01,0.005,0.0025,0.001,0.0005,0.00025,0.0001,0.00005,0.000025,0.00001]
delta_t_index = 0
print("Working directory is: " + work_dir)
print("Cases are written to: " + dest)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
while current_case_number <= n_cases:
print("current case: ",str(current_case_number))
n_points = random.randint(1,num_points)
velocity = random.uniform(0.01,0.06)
nr_time_steps = 0
crash_counter = 0
object_mask = prepareCase(src,work_dir,n_points,velocity,n_cores,x_res,y_res)
msh = meshio.read(work_dir+"/mesh.msh")
triangles = msh.cells_dict['triangle'][(msh.points[msh.cells_dict['triangle']][:,:,-1] == 0)[:,0]]
mesh_points = msh.points
time.sleep(5)
delta_t_index = 0
f = ParsedParameterFile(work_dir+"system/controlDict")
max_time_steps = f['endTime']
f['deltaT'] = delta_t[delta_t_index]
f.writeFile()
try:
while nr_time_steps < max_time_steps:
delta_t_index += 1
if crash_counter > 0:
f = ParsedParameterFile(work_dir+"system/controlDict")
f['deltaT'] = delta_t[delta_t_index]
f.writeFile()
runner = BasicRunner(argv=["pisoFoam","-case",work_dir],logname="logifle",noLog=True,lam=mpiInformation)
run_information = runner.start()
nr_time_steps = run_information['time']
crash_counter += 1
except IndexError:
print("List out of bound, restarting outer loop")
continue
runner = BasicRunner(argv=["redistributePar","-reconstruct","-case",work_dir],logname="logifle",noLog=True,lam=mpiInformation)
runner.start()
current_case_number = get_current_case(dest)
solution_dir = dest+"/case_"+str(current_case_number)+"/"
for tries in range(10):
try:
os.mkdir(solution_dir)
except OSError as error:
print("case already claimed by other process, retry")
current_case_number = get_current_case(dest)
solution_dir = dest+"/case_"+str(current_case_number)+"/"
else:
print("case available, claiming...")
break
os.remove(work_dir+"PyFoamState.CurrentTime")
os.remove(work_dir+"PyFoamState.LastOutputSeen")
os.remove(work_dir+"PyFoamState.StartedAt")
os.remove(work_dir+"PyFoamState.TheState")
#os.remove(work_dir+"WorkingDirectory.foam")
for item in os.listdir(work_dir):
if item.endswith(".foam"):
os.remove(os.path.join(work_dir,item))
# convert OpenFOAM to interpolated image
#os.system(f"python openfoam_to_image.py --src {work_dir} --dst {solution_dir} --grid_height {y_res} --grid_width {x_res}")
#object_mask = generate_object_mask(work_dir,x_res,y_res)
torch.save(object_mask,solution_dir+"object_mask.th")
with ParsedParameterFile(work_dir+"0/U") as f:
initial_velocity = float(str(f['internalField']).split('(')[1].split(" ")[0])
simulation_description = {"initial_velocity": initial_velocity, "n_objects": n_points}
with open(solution_dir+"simulation_description.pkl",'wb') as handle:
pickle.dump(simulation_description, handle)
#shutil.make_archive(solution_dir+"mesh",dest+"mesh.msh",format='bztar')
shutil.make_archive(base_name=solution_dir+"mesh",
format='bztar',
root_dir=work_dir,
base_dir="mesh.msh")
x,y,z = readmesh(work_dir)
pool_obj = multiprocessing.Pool()
U = pool_obj.map(readU,[(i,work_dir) for i in range(1,max_time_steps+1)])
p = pool_obj.map(readp,[(i,work_dir) for i in range(1,max_time_steps+1)])
pool_obj.close()
U_stacked = torch.stack(U)
x = torch.tensor(x)
y = torch.tensor(y)
v = torch.sqrt(U_stacked[:,0,:]**2+U_stacked[:,1,:]**2+U_stacked[:,2,:]**2)
if save_raw == 1:
for i in range(len(U)):
local_U = U[i].view(3,-1)[:2]
local_p = p[i].view(1,-1)
torch.save(torch.cat([local_U,local_p],dim=0),solution_dir+('{:0>8}'.format(str(i)))+"_mesh.th")
torch.save(x,solution_dir+"x.th")
torch.save(y,solution_dir+"y.th")
shutil.rmtree(work_dir)
pool_obj = multiprocessing.Pool()
img_list = pool_obj.map(scatter_plot,[(i,x,y,v,triangles,mesh_points) for i in range(U_stacked.shape[0])])
pool_obj.close()
for i in range(len(img_list)):
img_list[i]._min_frame = 0
img_list[i].n_frames = 1
img_list[i]._PngImageFile__frame = 0
img_list[0].save(solution_dir+'U.gif',format='GIF',append_images=img_list[1:],save_all=True,duration=50,loop=0)
print("solution directory: ", solution_dir)
current_case_number = get_current_case(dest)
time.sleep(5)
if __name__=="__main__":
main()
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/data/transientflow/cfddataset_from_openfoam.py | data/transientflow/cfddataset_from_openfoam.py | import os
import pickle
import shutil
from argparse import ArgumentParser
from pathlib import Path
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
def parse_args():
parser = ArgumentParser()
parser.add_argument("--src", type=str, required=True, help="/OpenFOAM/")
parser.add_argument("--dst", type=str, required=True, help="/publicdata/")
parser.add_argument("--num_workers", type=int, required=True)
return vars(parser.parse_args())
class CopyDataset(Dataset):
def __init__(self, src, dst, case_names):
super().__init__()
self.src = src
self.dst = dst
self.case_names = case_names
def __len__(self):
return len(self.case_names)
def __getitem__(self, idx):
case_name = self.case_names[idx]
src = self.src / case_name
dst = self.dst / case_name
if dst.exists():
return 1
else:
dst.mkdir()
fnames = [(True, "x.th"), (True, "y.th"), (False, "object_mask.th")]
fnames += [(True, f"{i:08d}_mesh.th") for i in range(120)]
for to_fp16, fname in fnames:
if not (src / fname).exists():
print(f"file not found: {(src / fname).as_posix()}")
return 1
if to_fp16:
data = torch.load(src / fname).half()
torch.save(data, dst / fname)
else:
shutil.copyfile(src / fname, dst / fname)
with open(src / f"simulation_description.pkl", "rb") as f:
desc = pickle.load(f)
uinit = desc["initial_velocity"]
num_objects = desc["n_objects"]
uinit_uri = dst / f"U_init.th"
torch.save(torch.tensor(uinit), uinit_uri)
num_objects_uri = dst / f"num_objects.th"
torch.save(torch.tensor(num_objects), num_objects_uri)
return 0
def main(src, dst, num_workers):
src = Path(src).expanduser()
dst = Path(dst).expanduser()
dst.mkdir(exist_ok=True)
print(f"src: {src.as_posix()}")
print(f"dst: {dst.as_posix()}")
case_names = list(sorted([case_name for case_name in os.listdir(src) if "case_" in case_name]))
print(f"found {len(case_names)} case_names")
dataset = CopyDataset(src=src, dst=dst, case_names=case_names)
for _ in tqdm(DataLoader(dataset, batch_size=1, num_workers=num_workers)):
pass
print("fin")
if __name__ == "__main__":
main(**parse_args())
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
ml-jku/UPT | https://github.com/ml-jku/UPT/blob/f148ef187973ef4958e8a5324c6692dd2582ad97/data/transientflow/cfddataset_norm.py | data/transientflow/cfddataset_norm.py | import os
from argparse import ArgumentParser
from pathlib import Path
import einops
import torch
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
def parse_args():
parser = ArgumentParser()
parser.add_argument("--root", type=str, required=True, help="e.g. /system/user/publicdata/CVSim/mesh_dataset/v1")
parser.add_argument("--q", type=float, default=0)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--exclude_last", type=int, default=0)
return vars(parser.parse_args())
def get_torch_files(root):
result = []
for fname in os.listdir(root):
uri = root / fname
if uri.is_dir():
result += get_torch_files(uri)
else:
if (
uri.name.endswith(".th")
and not uri.name.startswith("coordinates")
and not uri.name.startswith("geometry2d")
and not uri.name.startswith("object_mask")
and not uri.name.startswith("U_init")
and not uri.name.startswith("num_objects")
and not uri.name.startswith("x")
and not uri.name.startswith("y")
and not uri.name.startswith("edge_index")
and not uri.name.startswith("movement_per_position")
and not uri.name.startswith("sampling_weights")
):
try:
_ = int(uri.name[:len("00000000")])
except:
print(f"{uri.name} is not a data file")
raise
result.append(uri)
return result
class MeanVarDataset(Dataset):
def __init__(self, case_uris, q):
super().__init__()
self.case_uris = case_uris
self.q = q
def __len__(self):
return len(self.case_uris)
def __getitem__(self, idx):
case_uri = self.case_uris[idx]
assert case_uri.name.startswith("case_")
uris = get_torch_files(case_uri)
if len(uris) != 120:
#print(f"invalid number of uris for case '{case_uri.as_posix()}' len={len(uris)}")
raise RuntimeError(f"invalid number of uris for case '{case_uri.as_posix()}' len={len(uris)}")
data = torch.stack([torch.load(uri) for uri in uris])
mean = torch.zeros(3)
var = torch.zeros(3)
mmin = torch.zeros(3)
mmax = torch.zeros(3)
within1std = torch.zeros(3)
within2std = torch.zeros(3)
within3std = torch.zeros(3)
for i in range(3):
cur_data = data[:, i]
if self.q > 0:
# quantile is not supported for large dimensions
# qmin = torch.quantile(cur_data, q=self.q)
# qmax = torch.quantile(cur_data, q=1 - self.q)
# approximate quantile by assuming a normal distribution
cur_mean = cur_data.mean()
cur_std = cur_data.std()
dist = torch.distributions.Normal(loc=0, scale=1)
qmin = cur_mean + cur_std * dist.icdf(torch.tensor(self.q))
qmax = cur_mean + cur_std * dist.icdf(torch.tensor(1 - self.q))
is_valid = torch.logical_and(qmin < cur_data, cur_data < qmax)
valid_data = cur_data[is_valid]
else:
valid_data = cur_data
mean[i] = valid_data.mean()
var[i] = valid_data.var()
mmin[i] = valid_data.min()
mmax[i] = valid_data.max()
cur_std = valid_data.std()
is_within1std = torch.logical_and(mean[i] - 1 * cur_std < valid_data, valid_data < mean[i] + 1 * cur_std)
within1std[i] = is_within1std.sum() / is_within1std.numel()
is_within2std = torch.logical_and(mean[i] - 2 * cur_std < valid_data, valid_data < mean[i] + 2 * cur_std)
within2std[i] = is_within2std.sum() / is_within2std.numel()
is_within3std = torch.logical_and(mean[i] - 3 * cur_std < valid_data, valid_data < mean[i] + 3 * cur_std)
within3std[i] = is_within3std.sum() / is_within3std.numel()
# old impl
# mean = torch.mean(data, dim=[0, 2])
# var = torch.var(data, dim=[0, 2])
# mmin = data.min(dim=2).values.min(dim=0).values
# mmax = data.max(dim=2).values.max(dim=0).values
return mean, var, mmin, mmax, within1std, within2std, within3std
def main(root, num_workers, exclude_last, q):
root = Path(root).expanduser()
assert root.exists() and root.is_dir()
print(f"root: {root.as_posix()}")
print(f"num_workers: {num_workers}")
print(f"exclude_last: {exclude_last}")
assert q < 0.5
print(f"q (exclude values below/above quantile): {q}")
# get all case uris
case_uris = [root / fname for fname in os.listdir(root)]
# sort by case index
case_uris = list(sorted(case_uris, key=lambda cu: int(cu.name.replace("case_", ""))))
# exclude last
if exclude_last > 0:
case_uris = case_uris[:-exclude_last]
print(f"using {len(case_uris)} uris")
print(f"last used case_uri: {case_uris[-1].as_posix()}")
# setup dataset
dataset = MeanVarDataset(case_uris=case_uris, q=q)
# calculate mean/var per simulation and then average over them
sum_of_means = 0.
sum_of_vars = 0.
min_of_mins = torch.full(size=(3,), fill_value=torch.inf)
max_of_maxs = torch.full(size=(3,), fill_value=-torch.inf)
within1std_sum = torch.zeros(3)
within2std_sum = torch.zeros(3)
within3std_sum = torch.zeros(3)
for data in tqdm(DataLoader(dataset, batch_size=1, num_workers=num_workers)):
mean, var, mmin, mmax, within1std, within2std, within3std = data
sum_of_means += mean.squeeze(0)
sum_of_vars += var.squeeze(0)
min_of_mins = torch.minimum(min_of_mins, mmin.squeeze(0))
max_of_maxs = torch.maximum(max_of_maxs, mmax.squeeze(0))
within1std_sum += within1std.squeeze(0)
within2std_sum += within2std.squeeze(0)
within3std_sum += within3std.squeeze(0)
# average
mean = sum_of_means / len(dataset)
std = torch.sqrt(sum_of_vars / len(dataset))
within1std_mean = within1std_sum / len(dataset)
within2std_mean = within2std_sum / len(dataset)
within3std_mean = within3std_sum / len(dataset)
#
print(f"data_mean: {mean.tolist()}")
print(f"data_std: {std.tolist()}")
print(f"data_min: {min_of_mins.tolist()}")
print(f"data_max: {max_of_maxs.tolist()}")
print(f"within1std: {within1std_mean.tolist()}")
print(f"within2std: {within2std_mean.tolist()}")
print(f"within3std: {within3std_mean.tolist()}")
if __name__ == "__main__":
main(**parse_args())
| python | MIT | f148ef187973ef4958e8a5324c6692dd2582ad97 | 2026-01-05T07:12:15.158856Z | false |
dhingratul/Stock-Price-Prediction | https://github.com/dhingratul/Stock-Price-Prediction/blob/940b95271f0befe13752dca7d82aa1f84ebf7137/src/helper.py | src/helper.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 12:45:51 2017
@author: dhingratul
"""
import numpy as np
import matplotlib.pyplot as plt
def normalize_windows(win_data):
""" Normalize a window
Input: Window Data
Output: Normalized Window
Note: Run from load_data()
Note: Normalization data using n_i = (p_i / p_0) - 1,
denormalization using p_i = p_0(n_i + 1)
"""
norm_data = []
for w in win_data:
norm_win = [((float(p) / float(w[0])) - 1) for p in w]
norm_data.append(norm_win)
return norm_data
def load_data(filename, seq_len, norm_win):
"""
Loads the data from a csv file into arrays
Input: Filename, sequence Lenght, normalization window(True, False)
Output: X_tr, Y_tr, X_te, Y_te
Note: Normalization data using n_i = (p_i / p_0) - 1,
denormalization using p_i = p_0(n_i + 1)
Note: Run from timeSeriesPredict.py
"""
fid = open(filename, 'r').read()
data = fid.split('\n')
sequence_length = seq_len + 1
out = []
for i in range(len(data) - sequence_length):
out.append(data[i: i + sequence_length])
if norm_win:
out = normalize_windows(out)
out = np.array(out)
split_ratio = 0.9
split = round(split_ratio * out.shape[0])
train = out[:int(split), :]
np.random.shuffle(train)
X_tr = train[:, :-1]
Y_tr = train[:, -1]
X_te = out[int(split):, :-1]
Y_te = out[int(split):, -1]
X_tr = np.reshape(X_tr, (X_tr.shape[0], X_tr.shape[1], 1))
X_te = np.reshape(X_te, (X_te.shape[0], X_te.shape[1], 1))
return [X_tr, Y_tr, X_te, Y_te]
def predict_seq_mul(model, data, win_size, pred_len):
"""
Predicts multiple sequences
Input: keras model, testing data, window size, prediction length
Output: Predicted sequence
Note: Run from timeSeriesPredict.py
"""
pred_seq = []
for i in range(len(data)//pred_len):
current = data[i * pred_len]
predicted = []
for j in range(pred_len):
predicted.append(model.predict(current[None, :, :])[0, 0])
current = current[1:]
current = np.insert(current, [win_size - 1], predicted[-1], axis=0)
pred_seq.append(predicted)
return pred_seq
def predict_pt_pt(model, data):
"""
Predicts only one timestep ahead
Input: keras model, testing data
Output: Predicted sequence
Note: Run from timeSeriesPredict.py
"""
predicted = model.predict(data)
predicted = np.reshape(predicted, (predicted.size, ))
return predicted
def plot_mul(Y_hat, Y, pred_len):
"""
PLots the predicted data versus true data
Input: Predicted data, True Data, Length of prediction
Output: return plot
Note: Run from timeSeriesPredict.py
"""
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(Y, label='Y')
# Print the predictions in its respective series-length
for i, j in enumerate(Y_hat):
shift = [None for p in range(i * pred_len)]
plt.plot(shift + j, label='Y_hat')
plt.legend()
plt.show()
| python | MIT | 940b95271f0befe13752dca7d82aa1f84ebf7137 | 2026-01-05T07:12:22.817686Z | false |
dhingratul/Stock-Price-Prediction | https://github.com/dhingratul/Stock-Price-Prediction/blob/940b95271f0befe13752dca7d82aa1f84ebf7137/src/timeSeriesPredict.py | src/timeSeriesPredict.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 13:54:11 2017
@author: dhingratul
Predicts the next day (closing) stock prices for S&P 500 data using LSTM,
and 1D conv layer
"""
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
import helper
import time
from sklearn.metrics import mean_squared_error
import numpy as np
# Load Data
seq_len = 50
norm_win = True
filename = '../data/sp500.csv'
X_tr, Y_tr, X_te, Y_te = helper.load_data(filename, seq_len, norm_win)
# Model Build
model = Sequential()
model.add(LSTM(input_dim=1,
output_dim=seq_len,
return_sequences=True))
model.add(Dropout(0.2))
model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(LSTM(100,
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(output_dim=1)) # Linear dense layer to aggregate into 1 val
model.add(Activation('linear'))
timer_start = time.time()
model.compile(loss='mse', optimizer='rmsprop')
print('Model built in: ', time.time()-timer_start)
# Training model
model.fit(X_tr,
Y_tr,
batch_size=512,
nb_epoch=200,
validation_split=0.05
)
# Predictions
win_size = seq_len
pred_len = seq_len
plot = False
if plot:
pred = helper.predict_seq_mul(model, X_te, win_size, pred_len)
helper.plot_mul(pred, Y_te, pred_len)
else:
pred = helper.predict_pt_pt(model, X_te)
mse_model = mean_squared_error(Y_te, pred)
print("MSE of DL model ", mse_model)
# Stupid Model
y_bar = np.mean(X_te, axis=1)
y_bar = np.reshape(y_bar, (y_bar.shape[0]))
mse_base = mean_squared_error(Y_te, y_bar)
print("MSE of y_bar Model", mse_base)
# t-1 Model
y_t_1 = X_te[:, -1]
y_t_1 = np.reshape(y_t_1, (y_t_1.shape[0]))
mse_t_1 = mean_squared_error(Y_te, y_t_1)
print("MSE of t-1 Model", mse_t_1)
# Comparisons
improv = (mse_model - mse_base)/mse_base
improv_t_1 = (mse_model - mse_t_1)/mse_t_1
print("%ge improvement over naive model", improv)
print("%ge improvement over t-1 model", improv_t_1)
corr_model = np.corrcoef(Y_te, pred)
corr_base = np.corrcoef(Y_te, y_bar)
corr_t_1 = np.corrcoef(Y_te, y_t_1)
print("Correlation of y_bar \n ", corr_base, "\n t-1 model \n", corr_t_1,
"\n DL model\n", corr_model)
| python | MIT | 940b95271f0befe13752dca7d82aa1f84ebf7137 | 2026-01-05T07:12:22.817686Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/train.py | train.py | from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, LearningRateScheduler
from keras.optimizers import SGD, Adam
from keras.losses import squared_hinge
import os
import argparse
import keras.backend as K
from models.model_factory import build_model
from utils.config_utils import Config
from utils.load_data import load_dataset
# parse arguments
parser = argparse.ArgumentParser(description='Model training')
parser.add_argument('-c', '--config_path', type=str,
default=None, help='Configuration file')
parser.add_argument('-o' ,'--override',action='store',nargs='*',default=[])
arguments = parser.parse_args()
override_dir = {}
for s in arguments.override:
s_s = s.split("=")
k = s_s[0].strip()
v = "=".join(s_s[1:]).strip()
override_dir[k]=v
arguments.override = override_dir
cfg = arguments.config_path
cf = Config(cfg, cmd_args = arguments.override)
# if necessary, only use the CPU for debugging
if cf.cpu:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# ## Construct the network
print('Construct the Network\n')
# In[4]:
model = build_model(cf)
print('setting up the network and creating callbacks\n')
early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=10, mode='min', verbose=1)
checkpoint = ModelCheckpoint(cf.out_wght_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max', period=1)
tensorboard = TensorBoard(log_dir='./logs/' + str(cf.tensorboard_name), histogram_freq=0, write_graph=True, write_images=False)
print('loading data\n')
train_data, val_data, test_data = load_dataset(cf.dataset)
# learning rate schedule
def scheduler(epoch):
if epoch in cf.decay_at_epoch:
index = cf.decay_at_epoch.index(epoch)
factor = cf.factor_at_epoch[index]
lr = K.get_value(model.optimizer.lr)
IT = train_data.X.shape[0]/cf.batch_size
current_lr = lr * (1./(1.+cf.decay*epoch*IT))
K.set_value(model.optimizer.lr,current_lr*factor)
print('\nEpoch {} updates LR: LR = LR * {} = {}\n'.format(epoch+1,factor, K.get_value(model.optimizer.lr)))
return K.get_value(model.optimizer.lr)
lr_decay = LearningRateScheduler(scheduler)
#sgd = SGD(lr=cf.lr, decay=cf.decay, momentum=0.9, nesterov=True)
adam= Adam(lr=cf.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=cf.decay)
print('compiling the network\n')
model.compile(loss=squared_hinge, optimizer=adam, metrics=['accuracy'])
if cf.finetune:
print('Load previous weights\n')
model.load_weights(cf.out_wght_path)
else:
print('No weights preloaded, training from scratch\n')
print('(re)training the network\n')
model.fit(train_data.X,train_data.y,
batch_size = cf.batch_size,
epochs = cf.epochs,
verbose = cf.progress_logging,
callbacks = [checkpoint, tensorboard,lr_decay],
validation_data = (val_data.X,val_data.y))
print('Done\n')
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/personal_config/__init__.py | personal_config/__init__.py | python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false | |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/models/__init__.py | models/__init__.py | python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false | |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/models/model_factory.py | models/model_factory.py | from keras.models import Sequential, Model
from keras import regularizers
from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.regularizers import l2
import numpy as np
from layers.quantized_layers import QuantizedConv2D,QuantizedDense
from layers.quantized_ops import quantized_relu as quantize_op
from layers.binary_layers import BinaryConv2D
from layers.binary_ops import binary_tanh as binary_tanh_op
def build_model(cf):
def quantized_relu(x):
return quantize_op(x,nb=cf.abits)
def binary_tanh(x):
return binary_tanh_op(x)
H = 1.
if cf.network_type =='float':
Conv_ = lambda s, f, i, c: Conv2D(kernel_size=(s, s), filters=f, strides=(1, 1), padding='same', activation='linear',
kernel_regularizer=l2(cf.kernel_regularizer),input_shape = (i,i,c))
Conv = lambda s, f: Conv2D(kernel_size=(s, s), filters=f, strides=(1, 1), padding='same', activation='linear',
kernel_regularizer=l2(cf.kernel_regularizer))
Act = lambda: LeakyReLU()
elif cf.network_type=='qnn':
Conv_ = lambda s, f, i, c: QuantizedConv2D(kernel_size=(s, s), H=1, nb=cf.wbits, filters=f, strides=(1, 1),
padding='same', activation='linear',
kernel_regularizer=l2(cf.kernel_regularizer),
kernel_lr_multiplier=cf.kernel_lr_multiplier,input_shape = (i,i,c))
Conv = lambda s, f: QuantizedConv2D(kernel_size=(s, s), H=1, nb=cf.wbits, filters=f, strides=(1, 1),
padding='same', activation='linear',
kernel_regularizer=l2(cf.kernel_regularizer),
kernel_lr_multiplier=cf.kernel_lr_multiplier)
Act = lambda: LeakyReLU()
elif cf.network_type=='full-qnn':
Conv_ = lambda s, f, i,c: QuantizedConv2D(kernel_size=(s, s), H=1, nb=cf.wbits, filters=f, strides=(1, 1),
padding='same', activation='linear',
kernel_regularizer=l2(cf.kernel_regularizer),
kernel_lr_multiplier=cf.kernel_lr_multiplier,input_shape = (i,i,c))
Conv = lambda s, f: QuantizedConv2D(kernel_size=(s, s), H=1, nb=cf.wbits, filters=f, strides=(1, 1),
padding='same', activation='linear',
kernel_regularizer=l2(cf.kernel_regularizer),
kernel_lr_multiplier=cf.kernel_lr_multiplier)
Act = lambda: Activation(quantized_relu)
elif cf.network_type=='bnn':
Conv_ = lambda s, f,i,c: BinaryConv2D(kernel_size=(s, s), H=1, filters=f, strides=(1, 1), padding='same',
activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
kernel_lr_multiplier=cf.kernel_lr_multiplier,input_shape = (i,i,c))
Conv = lambda s, f: BinaryConv2D(kernel_size=(s, s), H=1, filters=f, strides=(1, 1), padding='same',
activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
kernel_lr_multiplier=cf.kernel_lr_multiplier)
Act = lambda: LeakyReLU()
elif cf.network_type=='full-bnn':
Conv_ = lambda s, f,i,c: BinaryConv2D(kernel_size=(s, s), H=1, filters=f, strides=(1, 1), padding='same',
activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
kernel_lr_multiplier=cf.kernel_lr_multiplier,input_shape = (i,i,c))
Conv = lambda s, f: BinaryConv2D(kernel_size=(s, s), H=1, filters=f, strides=(1, 1), padding='same',
activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
kernel_lr_multiplier=cf.kernel_lr_multiplier)
Act = lambda: Activation(binary_tanh)
else:
print('wrong network type, the supported network types in this repo are float, qnn, full-qnn, bnn and full-bnn')
model = Sequential()
model.add(Conv_(3, cf.nfa,cf.dim,cf.channels))
model.add(BatchNormalization(momentum=0.1,epsilon=0.0001))
model.add(Act())
# block A
for i in range(0,cf.nla-1):
model.add(Conv(3, cf.nfa))
model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))
model.add(Act())
model.add(MaxPooling2D(pool_size=(2, 2)))
# block B
for i in range(0,cf.nlb):
model.add(Conv(3, cf.nfb))
model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))
model.add(Act())
model.add(MaxPooling2D(pool_size=(2, 2)))
# block C
for i in range(0,cf.nlc):
model.add(Conv(3, cf.nfc))
model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))
model.add(Act())
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dense Layer
model.add(Flatten())
model.add(Dense(cf.classes,use_bias=False))
model.add(BatchNormalization(momentum=0.1,epsilon=0.0001))
# In[5]:
model.summary()
return model
def load_weights(model, weight_reader):
weight_reader.reset()
for i in range(len(model.layers)):
if 'conv' in model.layers[i].name:
if 'batch' in model.layers[i + 1].name:
norm_layer = model.layers[i + 1]
size = np.prod(norm_layer.get_weights()[0].shape)
beta = weight_reader.read_bytes(size)
gamma = weight_reader.read_bytes(size)
mean = weight_reader.read_bytes(size)
var = weight_reader.read_bytes(size)
weights = norm_layer.set_weights([gamma, beta, mean, var])
conv_layer = model.layers[i]
if len(conv_layer.get_weights()) > 1:
bias = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2, 3, 1, 0])
conv_layer.set_weights([kernel, bias])
else:
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2, 3, 1, 0])
conv_layer.set_weights([kernel])
return model
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/utils/load_data.py | utils/load_data.py | # Copyright 2017 Bert Moons
# This file is part of QNN.
# QNN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# QNN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# The code for QNN is based on BinaryNet: https://github.com/MatthieuCourbariaux/BinaryNet
# You should have received a copy of the GNU General Public License
# along with QNN. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.mnist import MNIST
def load_dataset(dataset):
if (dataset == "CIFAR-10"):
print('Loading CIFAR-10 dataset...')
train_set_size = 45000
train_set = CIFAR10(which_set="train", start=0, stop=train_set_size)
valid_set = CIFAR10(which_set="train", start=train_set_size, stop=50000)
test_set = CIFAR10(which_set="test")
train_set.X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., train_set.X), 1.), (-1, 3, 32, 32)),(0,2,3,1))
valid_set.X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., valid_set.X), 1.), (-1, 3, 32, 32)),(0,2,3,1))
test_set.X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., test_set.X), 1.), (-1, 3, 32, 32)),(0,2,3,1))
# flatten targets
train_set.y = np.hstack(train_set.y)
valid_set.y = np.hstack(valid_set.y)
test_set.y = np.hstack(test_set.y)
# Onehot the targets
train_set.y = np.float32(np.eye(10)[train_set.y])
valid_set.y = np.float32(np.eye(10)[valid_set.y])
test_set.y = np.float32(np.eye(10)[test_set.y])
# for hinge loss
train_set.y = 2 * train_set.y - 1.
valid_set.y = 2 * valid_set.y - 1.
test_set.y = 2 * test_set.y - 1.
# enlarge train data set by mirrroring
x_train_flip = train_set.X[:, :, ::-1, :]
y_train_flip = train_set.y
train_set.X = np.concatenate((train_set.X, x_train_flip), axis=0)
train_set.y = np.concatenate((train_set.y, y_train_flip), axis=0)
elif (dataset == "MNIST"):
print('Loading MNIST dataset...')
train_set_size = 50000
train_set = MNIST(which_set="train", start=0, stop=train_set_size)
valid_set = MNIST(which_set="train", start=train_set_size, stop=60000)
test_set = MNIST(which_set="test")
train_set.X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., train_set.X), 1.), (-1, 1, 28, 28)),(0,2,3,1))
valid_set.X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., valid_set.X), 1.), (-1, 1, 28, 28)),(0,2,3,1))
test_set.X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., test_set.X), 1.), (-1, 1, 28, 28)),(0,2,3,1))
# flatten targets
train_set.y = np.hstack(train_set.y)
valid_set.y = np.hstack(valid_set.y)
test_set.y = np.hstack(test_set.y)
# Onehot the targets
train_set.y = np.float32(np.eye(10)[train_set.y])
valid_set.y = np.float32(np.eye(10)[valid_set.y])
test_set.y = np.float32(np.eye(10)[test_set.y])
# for hinge loss
train_set.y = 2 * train_set.y - 1.
valid_set.y = 2 * valid_set.y - 1.
test_set.y = 2 * test_set.y - 1.
# enlarge train data set by mirrroring
x_train_flip = train_set.X[:, :, ::-1, :]
y_train_flip = train_set.y
train_set.X = np.concatenate((train_set.X, x_train_flip), axis=0)
train_set.y = np.concatenate((train_set.y, y_train_flip), axis=0)
else:
print("wrong dataset given")
return train_set, valid_set, test_set
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/utils/__init__.py | utils/__init__.py | python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false | |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/utils/config_utils.py | utils/config_utils.py |
import warnings
def import_from(mdl, name):
mdl = __import__(mdl, fromlist=[name])
return getattr(mdl, name)
#required, default (if not required), type, subtype*(if previous type is list or tuple)
parameter_specs = {
'cpu' :[True, None, bool],
'epochs' :[True, None, str],
'network_type' :[True, None, str],
'finetune' :[True, None, bool],
'out_wght_path' :[True, None, str],
'decay' :[True, None, float],
'lr' :[True, None, float],
'decay_at_epoch' :[True, None, list, int],
'factor_at_epoch' :[True, None, list, float],
'progress_logging' :[True, None, bool],
'batch_size' :[True, None, int],
'kernel_lr_multiplier' :[True, None, float],
'tensorboard_name' :[True, None, str],
'kernel_regularizer' :[True, None, float],
'activity_regularizer' :[True, None, float],
'bits' : [False, None, int],
'wbits' :[False, None, int],
'abits' :[False, None, int],
'nla' :[False, None, int],
'nfa' :[False, None, int],
'nlb' :[False, None, int],
'nfb' :[False, None, int],
'nlc' :[False, None, int],
'nfc' :[False, None, int],
'dataset' :[False, None, int],
'dim' :[False, None, int],
'channels' :[False, None, int],
'classes' :[False, None, int],
}
def parse_param(param, value):
#todo: support complex types ( (nested) lists/tuples...)
if isinstance(value, parameter_specs[param][2]):
return value
elif not parameter_specs[param][0]: # if not required, check if None
if value in ['None', 'none', '']:
return None
return parameter_specs[param][2](value)
class Config:
def __init__(self, cfg, cmd_args = {}):
try:
for k in parameter_specs:
self.proces_param(k, cfg, cmd_args)
except ImportError:
print('The configfile you provided ({}) cannot be imported, please verify.'.format(cfg))
exit(1)
self.postprocess()
def proces_param(self, param, cfg, cmd_args):
if param in cmd_args :
setattr(self, param.lower(), parse_param(param, cmd_args[param]))
elif param.lower() in cmd_args:
setattr(self, param.lower(), parse_param(param, cmd_args[param.lower()]))
else:
try:
setattr(self, param.lower(),import_from('config.{}'.format(cfg), param))
except AttributeError:
if parameter_specs[param][0]: #if required
raise
else:
setattr(self, param.lower(), parameter_specs[param][1])
def postprocess(self):
if hasattr(self, 'bits') and self.bits is not None:
if self.abits is None:
self.abits=self.bits
warnings.warn('specialized bits to abits')
if self.wbits is None:
self.wbits = self.bits
warnings.warn('specialized bits to wbits')
del self.bits #to make sure it is not further used
if hasattr(self, 'class'):
self.clss=getattr(self,'class')
self.out_wght_path = './weights/{}_{}_{}b_{}b_{}_{}_{}_{}_{}_{}.hdf5'.format(self.dataset,self.network_type, self.abits, self.wbits, self.nla, self.nfa, self.nlb,
self.nfb, self.nlc, self.nfc)
self.tensorboard_name = '{}_{}_{}b_{}b_{}_{}_{}_{}_{}_{}.hdf5'.format(self.dataset,self.network_type, self.abits, self.wbits, self.nla, self.nfa, self.nlb,
self.nfb, self.nlc, self.nfc) | python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/layers/binary_layers.py | layers/binary_layers.py | # -*- coding: utf-8 -*-
import numpy as np
from keras import backend as K
from keras.layers import InputSpec, Layer, Dense, Conv2D
from keras import constraints
from keras import initializers
from binary_ops import binarize
class Clip(constraints.Constraint):
def __init__(self, min_value, max_value=None):
self.min_value = min_value
self.max_value = max_value
if not self.max_value:
self.max_value = -self.min_value
if self.min_value > self.max_value:
self.min_value, self.max_value = self.max_value, self.min_value
def __call__(self, p):
return K.clip(p, self.min_value, self.max_value)
def get_config(self):
return {"name": self.__call__.__name__,
"min_value": self.min_value,
"max_value": self.max_value}
class BinaryDense(Dense):
''' Binarized Dense layer
References:
"BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1" [http://arxiv.org/abs/1602.02830]
'''
def __init__(self, units, H=1., kernel_lr_multiplier='Glorot', bias_lr_multiplier=None, **kwargs):
super(BinaryDense, self).__init__(units, **kwargs)
self.H = H
self.kernel_lr_multiplier = kernel_lr_multiplier
self.bias_lr_multiplier = bias_lr_multiplier
super(BinaryDense, self).__init__(units, **kwargs)
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs):
binary_kernel = binarize(self.kernel, H=self.H)
output = K.dot(inputs, binary_kernel)
if self.use_bias:
output = K.bias_add(output, self.bias)
if self.activation is not None:
output = self.activation(output)
return output
def get_config(self):
config = {'H': self.H,
'kernel_lr_multiplier': self.kernel_lr_multiplier,
'bias_lr_multiplier': self.bias_lr_multiplier}
base_config = super(BinaryDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BinaryConv2D(Conv2D):
'''Binarized Convolution2D layer
References:
"BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1" [http://arxiv.org/abs/1602.02830]
'''
def __init__(self, filters, kernel_regularizer=None,activity_regularizer=None, kernel_lr_multiplier='Glorot',
bias_lr_multiplier=None, H=1., **kwargs):
super(BinaryConv2D, self).__init__(filters, **kwargs)
self.H = H
self.kernel_lr_multiplier = kernel_lr_multiplier
self.bias_lr_multiplier = bias_lr_multiplier
self.activity_regularizer = activity_regularizer
self.kernel_regularizer = kernel_regularizer
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
base = self.kernel_size[0] * self.kernel_size[1]
if self.H == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5/ (nb_input + nb_output)))
#print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
#self.bias_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight((self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
binary_kernel = binarize(self.kernel, H=self.H)
inverse_kernel_lr_multiplier = 1./self.kernel_lr_multiplier
inputs_bnn_gradient = (inputs - (1. - 1./inverse_kernel_lr_multiplier) * K.stop_gradient(inputs))\
* inverse_kernel_lr_multiplier
outputs_bnn_gradient = K.conv2d(
inputs_bnn_gradient,
binary_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
outputs = (outputs_bnn_gradient - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_bnn_gradient))\
* self.kernel_lr_multiplier
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def get_config(self):
config = {'H': self.H,
'kernel_lr_multiplier': self.kernel_lr_multiplier,
'bias_lr_multiplier': self.bias_lr_multiplier}
base_config = super(BinaryConv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
BinaryConvolution2D = BinaryConv2D
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/layers/quantized_ops.py | layers/quantized_ops.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import keras.backend as K
import tensorflow as tf
import numpy as np
def round_through(x):
'''Element-wise rounding to the closest integer with full gradient propagation.
A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
'''
rounded = K.round(x)
rounded_through = x + K.stop_gradient(rounded - x)
return rounded_through
def clip_through(x, min_val, max_val):
'''Element-wise clipping with gradient propagation
Analogue to round_through
'''
clipped = K.clip(x, min_val, max_val)
clipped_through= x + K.stop_gradient(clipped-x)
return clipped_through
def clip_through(x, min, max):
'''Element-wise rounding to the closest integer with full gradient propagation.
A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
'''
clipped = K.clip(x,min,max)
return x + K.stop_gradient(clipped - x)
def _hard_sigmoid(x):
'''Hard sigmoid different from the more conventional form (see definition of K.hard_sigmoid).
# Reference:
- [QuantizedNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
return K.clip((x+1)/2, 0, 1)
def quantize(W, nb = 16, clip_through=False):
'''The weights' binarization function,
# Reference:
- [QuantizedNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
non_sign_bits = nb-1
m = pow(2,non_sign_bits)
#W = tf.Print(W,[W],summarize=20)
if clip_through:
Wq = clip_through(round_through(W*m),-m,m-1)/m
else:
Wq = K.clip(round_through(W*m),-m,m-1)/m
#Wq = tf.Print(Wq,[Wq],summarize=20)
return Wq
def quantized_relu(W, nb=16):
'''The weights' binarization function,
# Reference:
- [QuantizedNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
#non_sign_bits = nb-1
#m = pow(2,non_sign_bits)
#Wq = K.clip(round_through(W*m),0,m-1)/m
nb_bits = nb
Wq = K.clip(2. * (round_through(_hard_sigmoid(W) * pow(2, nb_bits)) / pow(2, nb_bits)) - 1., 0,
1 - 1.0 / pow(2, nb_bits - 1))
return Wq
def quantized_tanh(W, nb=16):
'''The weights' binarization function,
# Reference:
- [QuantizedNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
non_sign_bits = nb-1
m = pow(2,non_sign_bits)
#W = tf.Print(W,[W],summarize=20)
Wq = K.clip(round_through(W*m),-m,m-1)/m
#Wq = tf.Print(Wq,[Wq],summarize=20)
return Wq
def quantized_leakyrelu(W, nb=16, alpha=0.1):
'''The weights' binarization function,
# Reference:
- [QuantizedNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
if alpha != 0.:
negative_part = tf.nn.relu(-W)
W = tf.nn.relu(W)
if alpha != 0.:
alpha = tf.cast(tf.convert_to_tensor(alpha), W.dtype.base_dtype)
W -= alpha * negative_part
non_sign_bits = nb-1
m = pow(2,non_sign_bits)
#W = tf.Print(W,[W],summarize=20)
Wq = clip_through(round_through(W*m),-m,m-1)/m
#Wq = tf.Print(Wq,[Wq],summarize=20)
return Wq
def quantized_maxrelu(W, nb=16):
'''The weights' binarization function,
# Reference:
- [QuantizedNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
non_sign_bits = nb-1
max_ = tf.reduce_max((W))
#max_ = tf.Print(max_,[max_])
max__ = tf.pow(2.0,tf.ceil(tf.log(max_)/tf.log(tf.cast(tf.convert_to_tensor(2.0), W.dtype.base_dtype))))
#max__ = tf.Print(max__,[max__])
m = pow(2,non_sign_bits)
#W = tf.Print(W,[W],summarize=20)
Wq = max__*clip_through(round_through(W/max__*(m)),0,m-1)/(m)
#Wq = tf.Print(Wq,[Wq],summarize=20)
return Wq
def quantized_leakymaxrelu(W, nb=16, alpha=0.1):
'''The weights' binarization function,
# Reference:
- [QuantizedNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
if alpha != 0.:
negative_part = tf.nn.relu(-W)
W = tf.nn.relu(W)
if alpha != 0.:
alpha = tf.cast(tf.convert_to_tensor(alpha), W.dtype.base_dtype)
W -= alpha * negative_part
max_ = tf.reduce_max((W))
#max_ = tf.Print(max_,[max_])
max__ = tf.pow(2.0,tf.ceil(tf.log(max_)/tf.log(tf.cast(tf.convert_to_tensor(2.0), W.dtype.base_dtype))))
#max__ = tf.Print(max__,[max__])
non_sign_bits = nb-1
m = pow(2,non_sign_bits)
#W = tf.Print(W,[W],summarize=20)
Wq = max__* clip_through(round_through(W/max__*m),-m,m-1)/m
#Wq = tf.Print(Wq,[Wq],summarize=20)
return Wq
def _mean_abs(x, axis=None, keepdims=False):
return K.stop_gradient(K.mean(K.abs(x), axis=axis, keepdims=keepdims))
def xnorize(W, H=1., axis=None, keepdims=False):
Wb = quantize(W, H)
Wa = _mean_abs(W, axis, keepdims)
return Wa, Wb
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/layers/quantized_layers.py | layers/quantized_layers.py | # -*- coding: utf-8 -*-
import numpy as np
from keras import backend as K
from keras.layers import InputSpec, Layer, Dense, Conv2D
from keras import constraints
from keras import initializers
from quantized_ops import quantize, clip_through
class Clip(constraints.Constraint):
def __init__(self, min_value, max_value=None):
self.min_value = min_value
self.max_value = max_value
if not self.max_value:
self.max_value = -self.min_value
if self.min_value > self.max_value:
self.min_value, self.max_value = self.max_value, self.min_value
def __call__(self, p):
#todo: switch for clip through?
return K.clip(p, self.min_value, self.max_value)
def get_config(self):
return {"name": self.__call__.__name__,
"min_value": self.min_value,
"max_value": self.max_value}
class QuantizedDense(Dense):
''' Binarized Dense layer
References:
"QuantizedNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1" [http://arxiv.org/abs/1602.02830]
'''
def __init__(self, units, H=1., nb=16, kernel_lr_multiplier='Glorot', bias_lr_multiplier=None, **kwargs):
super(QuantizedDense, self).__init__(units, **kwargs)
self.H = H
self.nb = nb
self.kernel_lr_multiplier = kernel_lr_multiplier
self.bias_lr_multiplier = bias_lr_multiplier
super(QuantizedDense, self).__init__(units, **kwargs)
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs):
quantized_kernel = quantize(self.kernel, nb=self.nb)
output = K.dot(inputs, quantized_kernel)
if self.use_bias:
output = K.bias_add(output, self.bias)
if self.activation is not None:
output = self.activation(output)
return output
def get_config(self):
config = {'H': self.H,
'kernel_lr_multiplier': self.kernel_lr_multiplier,
'bias_lr_multiplier': self.bias_lr_multiplier}
base_config = super(QuantizedDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class QuantizedConv2D(Conv2D):
'''Binarized Convolution2D layer
References:
"QuantizedNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1" [http://arxiv.org/abs/1602.02830]
'''
def __init__(self, filters, kernel_regularizer=None,activity_regularizer=None, kernel_lr_multiplier='Glorot',
bias_lr_multiplier=None, H=1., nb=16, **kwargs):
super(QuantizedConv2D, self).__init__(filters, **kwargs)
self.H = H
self.nb = nb
self.kernel_lr_multiplier = kernel_lr_multiplier
self.bias_lr_multiplier = bias_lr_multiplier
self.activity_regularizer =activity_regularizer
self.kernel_regularizer = kernel_regularizer
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
base = self.kernel_size[0] * self.kernel_size[1]
if self.H == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5/ (nb_input + nb_output)))
#print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
#self.bias_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight((self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
quantized_kernel = quantize(self.kernel, nb=self.nb)
inverse_kernel_lr_multiplier = 1./self.kernel_lr_multiplier
inputs_qnn_gradient = (inputs - (1. - 1./inverse_kernel_lr_multiplier) * K.stop_gradient(inputs))\
* inverse_kernel_lr_multiplier
outputs_qnn_gradient = K.conv2d(
inputs_qnn_gradient,
quantized_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
outputs = (outputs_qnn_gradient - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_qnn_gradient))\
* self.kernel_lr_multiplier
#outputs = outputs*K.mean(K.abs(self.kernel))
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def get_config(self):
config = {'H': self.H,
'kernel_lr_multiplier': self.kernel_lr_multiplier,
'bias_lr_multiplier': self.bias_lr_multiplier}
base_config = super(QuantizedConv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
QuantizedConvolution2D = QuantizedConv2D
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/layers/binary_ops.py | layers/binary_ops.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import keras.backend as K
import tensorflow as tf
def round_through(x):
'''Element-wise rounding to the closest integer with full gradient propagation.
A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
'''
rounded = K.round(x)
return x + K.stop_gradient(rounded - x)
def _hard_sigmoid(x):
'''Hard sigmoid different from the more conventional form (see definition of K.hard_sigmoid).
# Reference:
- [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
x = (0.5 * x) + 0.5
return K.clip(x, 0, 1)
def binary_sigmoid(x):
'''Binary hard sigmoid for training binarized neural network.
# Reference:
- [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
return round_through(_hard_sigmoid(x))
def binary_tanh(x):
'''Binary hard sigmoid for training binarized neural network.
The neurons' activations binarization function
It behaves like the sign function during forward propagation
And like:
hard_tanh(x) = 2 * _hard_sigmoid(x) - 1
clear gradient when |x| > 1 during back propagation
# Reference:
- [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
x = 2 * round_through(_hard_sigmoid(x)) - 1
#x = tf.Print(x,[x],summarize=10,first_n=2)
return x
def binarize(W, H=1):
'''The weights' binarization function,
# Reference:
- [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}
'''
# [-H, H] -> -H or H
Wb = H * binary_tanh(W / H)
#Wb = tf.Print(Wb,[Wb,W],summarize=5,first_n=2)
return Wb
def _mean_abs(x, axis=None, keepdims=False):
return K.stop_gradient(K.mean(K.abs(x), axis=axis, keepdims=keepdims))
def xnorize(W, H=1., axis=None, keepdims=False):
Wb = binarize(W, H)
Wa = _mean_abs(W, axis, keepdims)
return Wa, Wb
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/layers/__init__.py | layers/__init__.py | python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false | |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/config/config_MNIST.py | config/config_MNIST.py | # test using cpu only
cpu = False
# type of network to be trained, can be bnn, full-bnn, qnn, full-qnn, tnn, full-tnn
network_type = 'full-qnn'
# bits can be None, 2, 4, 8 , whatever
bits=None
wbits = 4
abits = 4
# finetune an be false or true
finetune = False
dataset='MNIST'
dim=28
channels=1
classes=10
#regularization
kernel_regularizer=0.
activity_regularizer=0.
# width and depth
nla=2
nfa=64
nlb=3
nfb=128
nlc=3
nfc=128
#learning rate decay, factor => LR *= factor
decay_at_epoch = [0, 25, 80 ]
factor_at_epoch = [1, .1, 1]
kernel_lr_multiplier = 10
# debug and logging
progress_logging = 1 # can be 0 = no std logging, 1 = progress bar logging, 2 = one log line per epoch
epochs = 100
batch_size = 64
lr = 0.001
decay = 0.000025
# important paths
out_wght_path = './weights/{}_{}_{}b_{}b_{}_{}_{}_{}_{}_{}.hdf5'.format(dataset,network_type,abits,wbits,nla,nfa,nlb,nfb,nlc,nfc)
tensorboard_name = '{}_{}_{}b_{}b_{}_{}_{}_{}_{}_{}.hdf5'.format(dataset,network_type,abits,wbits,nla,nfa,nlb,nfb,nlc,nfc)
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/config/config_CIFAR-10.py | config/config_CIFAR-10.py | # test using cpu only
cpu = False
# type of network to be trained, can be bnn, full-bnn, qnn, full-qnn, tnn, full-tnn
network_type = 'full-qnn'
# bits can be None, 2, 4, 8 , whatever
bits=None
wbits = 4
abits = 4
# finetune an be false or true
finetune = False
dataset='CIFAR-10'
dim=32
channels=3
classes=10
#regularization
kernel_regularizer=0.
activity_regularizer=0.
# width and depth
nla=1
nfa=64
nlb=1
nfb=64
nlc=1
nfc=64
#learning rate decay, factor => LR *= factor
decay_at_epoch = [0, 25, 80 ]
factor_at_epoch = [1, .1, 1]
kernel_lr_multiplier = 10
# debug and logging
progress_logging = 1 # can be 0 = no std logging, 1 = progress bar logging, 2 = one log line per epoch
epochs = 100
batch_size = 64
lr = 0.001
decay = 0.000025
# important paths
out_wght_path = './weights/{}_{}_{}b_{}b_{}_{}_{}_{}_{}_{}.hdf5'.format(dataset,network_type,abits,wbits,nla,nfa,nlb,nfb,nlc,nfc)
tensorboard_name = '{}_{}_{}b_{}b_{}_{}_{}_{}_{}_{}.hdf5'.format(dataset,network_type,abits,wbits,nla,nfa,nlb,nfb,nlc,nfc)
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/config/config.py | config/config.py | # test using cpu only
cpu = False
# type of network to be trained, can be bnn, full-bnn, qnn, full-qnn, tnn, full-tnn
network_type = 'full-qnn'
# bits can be None, 2, 4, 8 , whatever
bits=None
wbits = 4
abits = 4
# finetune an be false or true
finetune = False
dataset='CIFAR-10'
dim=32
channels=3
classes=10
#regularization
kernel_regularizer=0.
activity_regularizer=0.
# width and depth
nla=1
nfa=64
nlb=1
nfb=64
nlc=1
nfc=64
#learning rate decay, factor => LR *= factor
decay_at_epoch = [0, 25, 80 ]
factor_at_epoch = [1, .1, 1]
kernel_lr_multiplier = 10
# debug and logging
progress_logging = 1 # can be 0 = no std logging, 1 = progress bar logging, 2 = one log line per epoch
epochs = 100
batch_size = 64
lr = 0.001
decay = 0.000025
# important paths
out_wght_path = './weights/{}_{}_{}b_{}b_{}_{}_{}_{}_{}_{}.hdf5'.format(dataset,network_type,abits,wbits,nla,nfa,nlb,nfb,nlc,nfc)
tensorboard_name = '{}_{}_{}b_{}b_{}_{}_{}_{}_{}_{}.hdf5'.format(dataset,network_type,abits,wbits,nla,nfa,nlb,nfb,nlc,nfc)
| python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false |
BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow | https://github.com/BertMoons/QuantizedNeuralNetworks-Keras-Tensorflow/blob/2eb9f8fbd4ce3c0160be95580c9bb452d7055538/config/__init__.py | config/__init__.py | python | BSD-3-Clause | 2eb9f8fbd4ce3c0160be95580c9bb452d7055538 | 2026-01-05T07:10:58.474336Z | false | |
Kozea/Flask-WeasyPrint | https://github.com/Kozea/Flask-WeasyPrint/blob/580e83d1a4d3ca2e51af417c5479acf9d7b80a60/tests/test_flask_weasyprint.py | tests/test_flask_weasyprint.py | """Tests for Flask-WeasyPrint."""
import pytest
from flask import Flask, json, jsonify, redirect, request
from flask_weasyprint import CSS, HTML, make_url_fetcher, render_pdf
from weasyprint import __version__ as weasyprint_version
from werkzeug.test import ClientRedirectError
from . import app, document_html
def test_url_fetcher():
# A request context is required
with pytest.raises(RuntimeError):
make_url_fetcher()
# But only for fist creating the fetcher, not for using it.
with app.test_request_context(base_url='http://example.org/bar/'):
fetcher = make_url_fetcher()
result = fetcher('http://example.org/bar/')
assert result['string'].strip().startswith(b'<html>')
assert result['mime_type'] == 'text/html'
assert result['encoding'] == 'utf-8'
assert result['redirected_url'] == 'http://example.org/bar/foo/'
result = fetcher('http://example.org/bar/foo/graph?data=1&labels=A')
assert result['string'].strip().startswith(b'<svg xmlns=')
assert result['mime_type'] == 'image/svg+xml'
# Also works with a custom dispatcher
def custom_dispatcher(url_string):
return app, 'http://example.org/bar/', '/foo/graph?data=1&labels=A'
with app.test_request_context(base_url='http://example.org/bar/'):
fetcher = make_url_fetcher(dispatcher=custom_dispatcher)
result = fetcher('test://')
assert result['string'].strip().startswith(b'<svg xmlns=')
assert result['mime_type'] == 'image/svg+xml'
def test_wrappers():
with app.test_request_context(base_url='http://example.org/bar/'):
# HTML can also be used with named parameters only:
html = HTML(url='http://example.org/bar/foo/')
css = CSS(url='http://example.org/bar/static/style.css')
assert html.write_pdf(stylesheets=[css]).startswith(b'%PDF')
@pytest.mark.parametrize('url, filename, automatic, cookie', (
('/foo.pdf', None, None, None),
('/foo.pdf', None, None, 'cookie value'),
('/foo/', None, True, None),
('/foo/', 'bar.pdf', True, None),
('/foo/', 'bar.pdf', False, None),
))
def test_pdf(url, filename, automatic, cookie):
if url.endswith('.pdf'):
client = app.test_client()
if cookie:
client.set_cookie('cookie', cookie)
response = client.get('/foo.pdf')
else:
with app.test_request_context('/foo/'):
options = {
'download_filename': filename,
'automatic_download': automatic,
}
if int(weasyprint_version.split('.')[0]) >= 59:
options['uncompressed_pdf'] = True
response = render_pdf(HTML(string=document_html()), **options)
assert response.status_code == 200
assert response.mimetype == 'application/pdf'
data = b''.join(response.iter_encoded())
assert data.startswith(b'%PDF')
if cookie:
assert cookie.encode() in data
assert b'/URI (https://courtbouillon.org/)' in data
disposition = response.headers.get('Content-Disposition')
if filename:
position = 'attachment' if automatic else 'inline'
assert disposition == f'{position}; filename={filename}'
else:
assert disposition is None
def test_redirects():
app = Flask(__name__)
def add_redirect(old_url, new_url):
app.add_url_rule(
old_url, f'redirect_{old_url}', lambda: redirect(new_url))
add_redirect('/a', '/b')
add_redirect('/b', '/c')
add_redirect('/c', '/d')
app.add_url_rule('/d', 'd', lambda: 'Ok')
add_redirect('/1', '/2')
add_redirect('/2', '/3')
add_redirect('/3', '/1') # redirect loop
with app.test_request_context():
fetcher = make_url_fetcher()
result = fetcher('http://localhost/a')
assert result['string'] == b'Ok'
assert result['redirected_url'] == 'http://localhost/d'
with pytest.raises(ClientRedirectError):
fetcher('http://localhost/1')
with pytest.raises(ValueError):
fetcher('http://localhost/nonexistent')
def test_dispatcher():
app = Flask(__name__, subdomain_matching=True)
app.config['PROPAGATE_EXCEPTIONS'] = True
@app.route('/')
@app.route('/', subdomain='<subdomain>')
@app.route('/<path:path>')
@app.route('/<path:path>', subdomain='<subdomain>')
def catchall(subdomain='', path=None):
query_string = request.query_string.decode()
app = [subdomain, request.script_root, request.path, query_string]
return jsonify(app=app)
def dummy_fetcher(url):
return {'string': 'dummy ' + url}
def assert_app(url, host, script_root, path, query_string=''):
"""The URL was dispatched to the app with these parameters."""
assert json.loads(dispatcher(url)['string']) == {
'app': [host, script_root, path, query_string]}
def assert_dummy(url):
"""The URL was not dispatched, the default fetcher was used."""
assert dispatcher(url)['string'] == 'dummy ' + url
# No SERVER_NAME config, default port
with app.test_request_context(base_url='http://a.net/b/'):
dispatcher = make_url_fetcher(next_fetcher=dummy_fetcher)
assert_app('http://a.net/b', '', '/b', '/')
assert_app('http://a.net/b/', '', '/b', '/')
assert_app('http://a.net/b/', '', '/b', '/')
assert_app('http://a.net/b/c/d?e', '', '/b', '/c/d', 'e')
assert_app('http://a.net:80/b/c/d?e', '', '/b', '/c/d', 'e')
assert_dummy('http://a.net/other/prefix')
assert_dummy('http://subdomain.a.net/b/')
assert_dummy('http://other.net/b/')
assert_dummy('http://a.net:8888/b/')
assert_dummy('https://a.net/b/')
# No SERVER_NAME config, explicit default port
with app.test_request_context(base_url='http://a.net:80/b/'):
dispatcher = make_url_fetcher(next_fetcher=dummy_fetcher)
assert_app('http://a.net/b', '', '/b', '/')
assert_app('http://a.net/b/', '', '/b', '/')
assert_app('http://a.net/b/c/d?e', '', '/b', '/c/d', 'e')
assert_app('http://a.net:80/b/c/d?e', '', '/b', '/c/d', 'e')
assert_dummy('http://a.net/other/prefix')
assert_dummy('http://subdomain.a.net/b/')
assert_dummy('http://other.net/b/')
assert_dummy('http://a.net:8888/b/')
assert_dummy('https://a.net/b/')
# Change the context’s port number
with app.test_request_context(base_url='http://a.net:8888/b/'):
dispatcher = make_url_fetcher(next_fetcher=dummy_fetcher)
assert_app('http://a.net:8888/b', '', '/b', '/')
assert_app('http://a.net:8888/b/', '', '/b', '/')
assert_app('http://a.net:8888/b/cd?e', '', '/b', '/cd', 'e')
assert_dummy('http://subdomain.a.net:8888/b/')
assert_dummy('http://a.net:8888/other/prefix')
assert_dummy('http://a.net/b/')
assert_dummy('http://a.net:80/b/')
assert_dummy('https://a.net/b/')
assert_dummy('https://a.net:443/b/')
assert_dummy('https://a.net:8888/b/')
# Add a SERVER_NAME config
app.config['SERVER_NAME'] = 'a.net'
with app.test_request_context():
dispatcher = make_url_fetcher(next_fetcher=dummy_fetcher)
assert_app('http://a.net', '', '', '/')
assert_app('http://a.net/', '', '', '/')
assert_app('http://a.net/b/c/d?e', '', '', '/b/c/d', 'e')
assert_app('http://a.net:80/b/c/d?e', '', '', '/b/c/d', 'e')
assert_app('https://a.net/b/c/d?e', '', '', '/b/c/d', 'e')
assert_app('https://a.net:443/b/c/d?e', '', '', '/b/c/d', 'e')
assert_app('http://subdomain.a.net/b/', 'subdomain', '', '/b/')
assert_dummy('http://other.net/b/')
assert_dummy('http://a.net:8888/b/')
# SERVER_NAME with a port number
app.config['SERVER_NAME'] = 'a.net:8888'
with app.test_request_context():
dispatcher = make_url_fetcher(next_fetcher=dummy_fetcher)
assert_app('http://a.net:8888', '', '', '/')
assert_app('http://a.net:8888/', '', '', '/')
assert_app('http://a.net:8888/b/c/d?e', '', '', '/b/c/d', 'e')
assert_app('https://a.net:8888/b/c/d?e', '', '', '/b/c/d', 'e')
assert_app('http://subdomain.a.net:8888/b/', 'subdomain', '', '/b/')
assert_dummy('http://other.net:8888/b/')
assert_dummy('http://a.net:5555/b/')
assert_dummy('http://a.net/b/')
@pytest.mark.parametrize('url', (
'http://example.net/Unïĉodé/pass !',
'http://example.net/Unïĉodé/pass !'.encode(),
'http://example.net/foo%20bar/p%61ss%C2%A0!',
b'http://example.net/foo%20bar/p%61ss%C2%A0!',
))
def test_funky_urls(url):
with app.test_request_context(base_url='http://example.net/'):
fetcher = make_url_fetcher()
assert fetcher(url)['string'] == 'pass !'.encode()
| python | BSD-3-Clause | 580e83d1a4d3ca2e51af417c5479acf9d7b80a60 | 2026-01-05T07:12:27.148520Z | false |
Kozea/Flask-WeasyPrint | https://github.com/Kozea/Flask-WeasyPrint/blob/580e83d1a4d3ca2e51af417c5479acf9d7b80a60/tests/__init__.py | tests/__init__.py | """Demonstration and testing application for Flask-WeasyPrint."""
from flask import Flask, abort, redirect, render_template, request, url_for
from weasyprint import __version__ as weasyprint_version
# Disable the Flask’s default static file handling. (See below.)
app = Flask(__name__, static_folder=None)
# This is a pretty standard Flask app with a dynamic SVG graph. Of course the
# data here is always the same, but in a real app it could come from a database
# or be computed on the fly.
@app.config.from_object
class Config:
GRAPH_COLORS = ['#0C3795', '#752641', '#E47F00']
@app.route('/')
def index():
return redirect(url_for('document_html'))
@app.route('/foo/')
def document_html():
return render_template(
'document.html', data=[42, 27.3, 63], labels=['Lorem', 'ipsum', 'sit'],
cookie=request.cookies.get('cookie'))
@app.route('/foo/graph')
def graph():
svg = render_template(
'graph.svg',
# Turn ?data=3,2,1&labels=A,B,C into
# [(0, ('A', 3, color0)), (1, ('B', 2, color1)), (2, ('C', 1, color2))]
series=enumerate(zip(
request.args['labels'].split(','),
map(float, request.args['data'].split(',')),
app.config['GRAPH_COLORS'])))
return svg, 200, {'Content-Type': 'image/svg+xml'}
# The code specific to Flask-WeasyPrint follows. Pretty simple, eh?
from flask_weasyprint import render_pdf # noqa
@app.route('/foo.pdf')
def document_pdf():
if int(weasyprint_version.split('.')[0]) >= 59:
return render_pdf(url_for('index'), uncompressed_pdf=True)
else:
return render_pdf(url_for('index'))
# End of code specific to Flask-WeasyPrint.
# The templates and static files are inlined here and served from memory. This
# is a bit unusual but allows us to keep this app in a single file. We could
# just as well use normal templates and static files.
from jinja2 import DictLoader # noqa
app.jinja_env.loader = DictLoader({
'document.html': '''<html>
{% set data = data | join(',') %}
{% set labels = labels | join(',') %}
<title>Test document{% if cookie %}{{ cookie }}{% endif %}</title>
<link rel=stylesheet
href="{{ url_for('static', filename='style.css') }}" />
<section>
<h1><a href="https://courtbouillon.org/">Flask-WeasyPrint</a></h1>
<nav>Get this document <a href="/foo.pdf">as PDF</a>.</nav>
<p>This vector graph was generated dynamically:</p>
<img src="graph?data={{ data }}&labels={{ labels }}">
</section>
''',
'graph.svg': '''
<svg xmlns="http://www.w3.org/2000/svg"
width="1600" height="1000" viewBox="0 0 160 100">
{% for i, (label, value, color) in series %}
<rect x="{{ 10 + i * 50 }}" y="{{ 75 - value }}"
width="40" height="{{ value }}"
fill="{{ color }}" stroke="#333" rx="5" ry="5" />
<text x="{{ 30 + i * 50 }}" y="90"
text-anchor="middle" font-size="10px">{{ label }}</text>
{% endfor %}
</svg>
''',
})
STATIC_FILES = {'style.css': ('text/css', '''
html { font-family: sans-serif }
section { width: 80%; margin: 2em auto }
a { color: inherit }
img { width: 100%; max-width: 600px; box-sizing: border-box;
border: 1px solid #888; }
/* Print-specific styles, ignored when rendering to screen: */
@page { size: A5; margin: 1cm }
@media print { nav { display: none } }
''')}
@app.route('/static/<path:filename>')
def static(filename):
if filename in STATIC_FILES:
content_type, body = STATIC_FILES[filename]
return body, 200, {'Content-Type': content_type}
else: # pragma: no cover
abort(404)
@app.route(u'/Unïĉodé/<stuff>')
@app.route(u'/foo bar/<stuff>')
def funky_urls(stuff):
return stuff
if __name__ == '__main__': # pragma: no cover
app.run(debug=True)
| python | BSD-3-Clause | 580e83d1a4d3ca2e51af417c5479acf9d7b80a60 | 2026-01-05T07:12:27.148520Z | false |
Kozea/Flask-WeasyPrint | https://github.com/Kozea/Flask-WeasyPrint/blob/580e83d1a4d3ca2e51af417c5479acf9d7b80a60/docs/conf.py | docs/conf.py | # Flask-WeasyPrint documentation build configuration file.
import flask_weasyprint
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.autosectionlabel']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Flask-WeasyPrint'
copyright = 'Simon Sapin and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = flask_weasyprint.__version__
# The short X.Y version.
version = release
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
html_css_files = [
'https://www.courtbouillon.org/static/docs.css',
]
# Output file base name for HTML help builder.
htmlhelp_basename = 'flaskweasyprintdoc'
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask_weasyprint', 'Flask-WeasyPrint Documentation',
['Simon Sapin and contributors'], 1)
]
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'Flask-WeasyPrint', 'Flask-WeasyPrint Documentation',
'Simon Sapin', 'Flask-WeasyPrint',
'Generate PDF files out of your Flask website', 'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'flask': ('https://flask.palletsprojects.com/en/2.1.x/', None),
'weasyprint': ('https://doc.courtbouillon.org/weasyprint/stable/', None),
}
| python | BSD-3-Clause | 580e83d1a4d3ca2e51af417c5479acf9d7b80a60 | 2026-01-05T07:12:27.148520Z | false |
Kozea/Flask-WeasyPrint | https://github.com/Kozea/Flask-WeasyPrint/blob/580e83d1a4d3ca2e51af417c5479acf9d7b80a60/flask_weasyprint/__init__.py | flask_weasyprint/__init__.py | """Make PDF in your Flask app with WeasyPrint."""
from io import BytesIO
from urllib.parse import urljoin, urlsplit
from flask import current_app, has_request_context, request, send_file
from werkzeug.test import Client, ClientRedirectError, EnvironBuilder
from werkzeug.wrappers import Response
VERSION = __version__ = '1.1.0'
DEFAULT_PORTS = (('http', 80), ('https', 443))
def make_flask_url_dispatcher():
"""Return a URL dispatcher based on the current request context.
You generally don’t need to call this directly.
The :doc:`request context <flask:reqcontext>` is used when the dispatcher
is first created but not afterwards. It is not required after this function
has returned.
Dispatch to the context’s app URLs below the context’s root URL. If the
application has a ``SERVER_NAME`` :doc:`configuration <flask:config>` and
``subdomain_matching`` is set, also accept URLs that have that domain name
or a subdomain thereof.
"""
def parse_netloc(netloc):
"""Return (hostname, port)."""
parsed = urlsplit(f'http://{netloc}')
return parsed.hostname, parsed.port
app = current_app._get_current_object()
root_path = request.script_root
server_name = app.config.get('SERVER_NAME')
if server_name and app.subdomain_matching:
hostname, port = parse_netloc(server_name)
def accept(url):
"""Accept any URL scheme; also accept subdomains."""
return url.hostname is not None and (
url.hostname == hostname or
url.hostname.endswith(f'.{hostname}'))
else:
scheme = request.scheme
hostname, port = parse_netloc(request.host)
def accept(url):
"""Do not accept subdomains."""
return (url.scheme, url.hostname) == (scheme, hostname)
def dispatch(url_string):
if isinstance(url_string, bytes):
url_string = url_string.decode()
url = urlsplit(url_string)
url_port = url.port
if (url.scheme, url_port) in DEFAULT_PORTS:
url_port = None
if accept(url) and url_port == port and url.path.startswith(root_path):
netloc = url.netloc
if url.port and not url_port:
netloc = netloc.rsplit(':', 1)[0] # remove default port
base_url = f'{url.scheme}://{netloc}{root_path}'
path = url.path[len(root_path):]
if url.query:
path = f'{path}?{url.query}'
# Ignore url.fragment
return app, base_url, path
return dispatch
def make_url_fetcher(dispatcher=None, next_fetcher=True):
"""Return an function suitable as a ``url_fetcher`` in WeasyPrint.
You generally don’t need to call this directly.
If ``dispatcher`` is not provided, :func:`make_flask_url_dispatcher` is
called to get one. This requires a :doc:`request context
<flask:reqcontext>`.
Otherwise, it must be a callable that takes a URL and returns either
:obj:`None` or a ``(wsgi_callable, base_url, path)`` tuple. For
:obj:`None`, ``next_fetcher`` is used. (By default, fetch normally over the
network.) For a tuple the request is made at the WSGI level.
``wsgi_callable`` must be a Flask application or another WSGI callable.
``base_url`` is the root URL for the application while ``path`` is the path
within the application.
Typically ``base_url + path`` is equivalent to the passed URL.
"""
if next_fetcher is True:
from weasyprint import default_url_fetcher # lazy loading
next_fetcher = default_url_fetcher
if dispatcher is None:
dispatcher = make_flask_url_dispatcher()
def flask_url_fetcher(url):
redirect_chain = set()
while True:
result = dispatcher(url)
if result is None:
return next_fetcher(url)
app, base_url, path = result
client = Client(app, response_wrapper=Response)
if has_request_context() and request.cookies:
server_name = EnvironBuilder(
path, base_url=base_url).server_name
for cookie_key, cookie_value in request.cookies.items():
client.set_cookie(
cookie_key, cookie_value, domain=server_name)
response = client.get(path, base_url=base_url)
if response.status_code == 200:
return {
'string': response.data, 'mime_type': response.mimetype,
'encoding': 'utf-8', 'redirected_url': url}
# The test client can follow redirects, but do it ourselves
# to get access to the redirected URL.
elif response.status_code in (301, 302, 303, 305, 307, 308):
redirect_chain.add(url)
url = urljoin(url, response.location)
if url in redirect_chain:
raise ClientRedirectError('loop detected')
else:
raise ValueError(
'Flask-WeasyPrint got HTTP status '
f'{response.status} for {urljoin(base_url, path)}')
return flask_url_fetcher
def _wrapper(class_, *args, **kwargs):
if args:
guess, args = args[0], args[1:]
else:
guess = kwargs.pop('guess', None)
if guess is not None and not hasattr(guess, 'read'):
# Assume a (possibly relative) URL
guess = urljoin(request.url, guess)
if 'string' in kwargs and 'base_url' not in kwargs:
# Strings do not have an "intrinsic" base URL, use the request context.
kwargs['base_url'] = request.url
kwargs['url_fetcher'] = make_url_fetcher()
return class_(guess, *args, **kwargs)
def HTML(*args, **kwargs):
"""Like :class:`weasyprint.HTML` but:
* :func:`make_url_fetcher` is used to create an ``url_fetcher``
* If ``guess`` is not a file object, it is a URL relative to the current
request context. This means that you can just pass a result from
:func:`flask.url_for`.
* If ``string`` is passed, ``base_url`` defaults to the current
request’s URL.
This requires a Flask :doc:`request context <flask:reqcontext>`.
"""
from weasyprint import HTML # lazy loading
return _wrapper(HTML, *args, **kwargs)
def CSS(*args, **kwargs):
from weasyprint import CSS # lazy loading
return _wrapper(CSS, *args, **kwargs)
CSS.__doc__ = HTML.__doc__.replace('HTML', 'CSS')
def render_pdf(html, stylesheets=None, download_filename=None,
automatic_download=True, **options):
"""Render a PDF to a response with the correct ``Content-Type`` header.
:param html:
Either a :class:`weasyprint.HTML` object or a URL to be passed
to :func:`flask_weasyprint.HTML`. The latter case requires
a request context.
:param list stylesheets:
A list of user stylesheets, passed to
:meth:`weasyprint.HTML.write_pdf`.
:param str download_filename:
If provided, the ``Content-Disposition`` header is set so that most
web browser will show the "Save as…" dialog with the value as the
default filename.
:param **options:
Named properties given to :class:`weasyprint.HTML.write_pdf`.
:param bool automatic_download:
If :obj:`True`, the browser will automatic download file.
:returns: a :class:`flask.Response` object.
"""
if not hasattr(html, 'write_pdf'):
html = HTML(html)
pdf = html.write_pdf(stylesheets=stylesheets, **options)
as_attachment = automatic_download if download_filename else False
return send_file(
BytesIO(pdf), mimetype="application/pdf", as_attachment=as_attachment,
download_name=download_filename)
| python | BSD-3-Clause | 580e83d1a4d3ca2e51af417c5479acf9d7b80a60 | 2026-01-05T07:12:27.148520Z | false |
nvuillam/github-dependents-info | https://github.com/nvuillam/github-dependents-info/blob/59a406df22d1f42803182c6f4da71e3efeeeb161/tests/test_cli.py | tests/test_cli.py | """Tests for CLI argument parsing"""
from github_dependents_info.__main__ import app
from typer.testing import CliRunner
runner = CliRunner()
def test_cli_no_duplicate_param_warnings():
"""Test that help command doesn't show duplicate parameter warnings"""
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
assert "parameter -d is used more than once" not in result.stderr
assert "parameter -c is used more than once" not in result.stderr
def test_cli_args_without_equals():
"""Test CLI accepts arguments without equals sign"""
result = runner.invoke(
app,
[
"--repo",
"test/repo",
"--markdownfile",
"./test.md",
"--sort",
"stars",
"--verbose",
],
)
assert "unexpected extra arguments" not in result.stderr
assert "does not take a value" not in result.stderr
def test_cli_args_with_equals():
"""Test CLI accepts arguments with equals sign"""
result = runner.invoke(
app,
[
"--repo=test/repo",
"--markdownfile=./test.md",
"--sort=stars",
"--verbose",
],
)
assert "unexpected extra arguments" not in result.stderr
assert "does not take a value" not in result.stderr
| python | MIT | 59a406df22d1f42803182c6f4da71e3efeeeb161 | 2026-01-05T07:12:28.185048Z | false |
nvuillam/github-dependents-info | https://github.com/nvuillam/github-dependents-info/blob/59a406df22d1f42803182c6f4da71e3efeeeb161/tests/__init__.py | tests/__init__.py | python | MIT | 59a406df22d1f42803182c6f4da71e3efeeeb161 | 2026-01-05T07:12:28.185048Z | false | |
nvuillam/github-dependents-info | https://github.com/nvuillam/github-dependents-info/blob/59a406df22d1f42803182c6f4da71e3efeeeb161/tests/test_gh_dependents_info/test_gh_dependents_info.py | tests/test_gh_dependents_info/test_gh_dependents_info.py | """Tests for gh_dependents_info"""
import os
import tempfile
import uuid
from github_dependents_info import GithubDependentsInfo
SINGLE_PACKAGE_REPO = "nvuillam/npm-groovy-lint"
SINGLE_PACKAGE_TOTAL_DOC_URL = "https://nvuillam/npm-groovy-lint"
SINGLE_PACKAGE_REPO_PUBLIC_DEPENDENTS_MIN = 10
MULTI_PACKAGE_REPO = "nvuillam/github-dependents-info"
MULTI_PACKAGE_REPO_PUBLIC_DEPENDENTS_MIN = 10
def test_collect_stats_single_package():
# Check generate single package stats file
repo = SINGLE_PACKAGE_REPO
tmp_md_file = tempfile.gettempdir() + os.path.sep + str(uuid.uuid4()) + "-test-single.md"
gh_deps_info = GithubDependentsInfo(
repo, debug=True, sort_key="stars", badge_color="pink", markdown_file=tmp_md_file
)
repo_stats = gh_deps_info.collect()
assert repo_stats["public_dependents_number"] > SINGLE_PACKAGE_REPO_PUBLIC_DEPENDENTS_MIN
md = gh_deps_info.build_markdown(file=tmp_md_file)
assert md.count("\n") > SINGLE_PACKAGE_REPO_PUBLIC_DEPENDENTS_MIN
assert "pink" in md
with open(tmp_md_file, encoding="utf-8") as file:
md_content = file.read()
assert md_content.count("\n") > SINGLE_PACKAGE_REPO_PUBLIC_DEPENDENTS_MIN
# Check Update README file
tmp_readme_file = tempfile.gettempdir() + os.path.sep + str(uuid.uuid4()) + "-test-single-readme.md"
with open(tmp_readme_file, "w", encoding="utf-8") as file:
file.write(
"<!-- gh-dependents-info-used-by-start -->" + "shouldBeReplaced" + "<!-- gh-dependents-info-used-by-end -->"
)
gh_deps_info.badges["total_doc_url"] = SINGLE_PACKAGE_TOTAL_DOC_URL
gh_deps_info.write_badge(tmp_readme_file, "total_doc_url")
with open(tmp_readme_file, encoding="utf-8") as file:
readme_content = file.read()
assert "shouldBeReplaced" not in readme_content
assert SINGLE_PACKAGE_REPO in readme_content
def test_collect_stats_multi_package():
repo = MULTI_PACKAGE_REPO
gh_deps_info = GithubDependentsInfo(repo, debug=True, sort_key="stars")
repo_stats = gh_deps_info.collect()
assert repo_stats["public_dependents_number"] > MULTI_PACKAGE_REPO_PUBLIC_DEPENDENTS_MIN
tmp_md_file = tempfile.gettempdir() + os.path.sep + str(uuid.uuid4()) + "-test-multiple.md"
md = gh_deps_info.build_markdown(file=tmp_md_file)
assert md.count("\n") > MULTI_PACKAGE_REPO_PUBLIC_DEPENDENTS_MIN
with open(tmp_md_file, encoding="utf-8") as file:
md_content = file.read()
assert md_content.count("\n") > MULTI_PACKAGE_REPO_PUBLIC_DEPENDENTS_MIN
def test_collect_stats_min_stars():
repo = SINGLE_PACKAGE_REPO
gh_deps_info = GithubDependentsInfo(repo, debug=True, sort_key="stars", min_stars=10)
repo_stats = gh_deps_info.collect()
assert repo_stats["public_dependents_number"] > 1
assert repo_stats["public_dependents_number"] < 10
def test_collect_csv():
repo = SINGLE_PACKAGE_REPO
with tempfile.TemporaryDirectory() as csv_directory:
gh_deps_info = GithubDependentsInfo(
repo, debug=True, sort_key="stars", min_stars=10, csv_directory=csv_directory
)
gh_deps_info.collect()
assert os.path.isfile(csv_directory + os.path.sep + f"packages_{repo.replace('/', '-')}.csv")
assert os.path.isfile(
csv_directory + os.path.sep + f"dependents_{gh_deps_info.packages[0]['name'].replace('/', '-')}.csv"
)
def test_collect_csv_multi_package():
repo = MULTI_PACKAGE_REPO
with tempfile.TemporaryDirectory() as csv_directory:
gh_deps_info = GithubDependentsInfo(
repo, debug=True, sort_key="stars", min_stars=0, csv_directory=csv_directory
)
gh_deps_info.collect()
assert len(gh_deps_info.packages) >= 2
assert os.path.isfile(csv_directory + os.path.sep + f"packages_{repo.replace('/', '-')}.csv")
packages_with_entries = 0
for package in gh_deps_info.packages:
if package["public_dependents_number"] <= 0:
continue
packages_with_entries += 1
assert os.path.isfile(csv_directory + os.path.sep + f"dependents_{package['name'].replace('/', '-')}.csv")
assert packages_with_entries >= 2
def test_collect_stats_owner():
repo = "nvuillam/npm-groovy-lint"
gh_deps_info = GithubDependentsInfo(repo, debug=True, owner="nvuillam")
repo_stats = gh_deps_info.collect()
assert repo_stats["public_dependents_number"] < 10
def test_collect_stats_max_scraped_pages():
repo = SINGLE_PACKAGE_REPO
gh_deps_info = GithubDependentsInfo(repo, debug=True, sort_key="stars", max_scraped_pages=1)
repo_stats = gh_deps_info.collect()
# With max_scraped_pages=1, we should get at most 30 dependents (1 page = ~30 items)
assert repo_stats["public_dependents_number"] > 0
assert repo_stats["public_dependents_number"] <= 30
def test_pagination_enabled_by_default():
"""Test that pagination is enabled by default."""
repo = SINGLE_PACKAGE_REPO
gh_deps_info = GithubDependentsInfo(repo, debug=True)
assert gh_deps_info.pagination is True
assert gh_deps_info.page_size == 500
def test_pagination_disabled():
"""Test that pagination can be disabled."""
repo = SINGLE_PACKAGE_REPO
gh_deps_info = GithubDependentsInfo(repo, debug=True, pagination=False)
assert gh_deps_info.pagination is False
def test_custom_page_size():
"""Test that custom page size can be set."""
repo = SINGLE_PACKAGE_REPO
gh_deps_info = GithubDependentsInfo(repo, debug=True, page_size=100)
assert gh_deps_info.page_size == 100
def test_pagination_single_page():
"""Test that no pagination files are created when results fit on one page."""
repo = SINGLE_PACKAGE_REPO
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_md_file = os.path.join(tmp_dir, "test-single-page.md")
gh_deps_info = GithubDependentsInfo(
repo, debug=True, sort_key="stars", max_scraped_pages=1, pagination=True, page_size=100
)
gh_deps_info.collect()
gh_deps_info.build_markdown(file=tmp_md_file)
# Should only have one file since results fit on one page
assert os.path.isfile(tmp_md_file)
assert not os.path.isfile(os.path.join(tmp_dir, "test-single-page-2.md"))
# Check that navigation is not in the file
with open(tmp_md_file, encoding="utf-8") as file:
content = file.read()
assert "Page 1 of" not in content
def test_pagination_multiple_pages():
"""Test that multiple pagination files are created when results exceed page size."""
repo = SINGLE_PACKAGE_REPO
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_md_file = os.path.join(tmp_dir, "test-multi-page.md")
# Use a very small page size to force multiple pages
gh_deps_info = GithubDependentsInfo(
repo, debug=True, sort_key="stars", max_scraped_pages=2, pagination=True, page_size=5
)
gh_deps_info.collect()
# Should have results that span multiple pages
if gh_deps_info.result["public_dependents_number"] > 5:
gh_deps_info.build_markdown(file=tmp_md_file)
# Check that multiple files were created
assert os.path.isfile(tmp_md_file)
assert os.path.isfile(os.path.join(tmp_dir, "test-multi-page-2.md"))
# Check navigation in first page
with open(tmp_md_file, encoding="utf-8") as file:
content = file.read()
assert "Page 1 of" in content
assert "Next ➡️" in content
assert "test-multi-page-2.md" in content
# Check navigation in second page
with open(os.path.join(tmp_dir, "test-multi-page-2.md"), encoding="utf-8") as file:
content = file.read()
assert "Page 2 of" in content
assert "Previous" in content
assert "test-multi-page.md" in content
def test_pagination_disabled_no_split():
"""Test that when pagination is disabled, all results go into one file."""
repo = SINGLE_PACKAGE_REPO
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_md_file = os.path.join(tmp_dir, "test-no-pagination.md")
gh_deps_info = GithubDependentsInfo(
repo, debug=True, sort_key="stars", max_scraped_pages=2, pagination=False, page_size=5
)
gh_deps_info.collect()
gh_deps_info.build_markdown(file=tmp_md_file)
# Should only have one file even if results exceed page size
assert os.path.isfile(tmp_md_file)
assert not os.path.isfile(os.path.join(tmp_dir, "test-no-pagination-2.md"))
# Check that navigation is not in the file
with open(tmp_md_file, encoding="utf-8") as file:
content = file.read()
assert "Page 1 of" not in content
| python | MIT | 59a406df22d1f42803182c6f4da71e3efeeeb161 | 2026-01-05T07:12:28.185048Z | false |
nvuillam/github-dependents-info | https://github.com/nvuillam/github-dependents-info/blob/59a406df22d1f42803182c6f4da71e3efeeeb161/tests/test_gh_dependents_info/__init__.py | tests/test_gh_dependents_info/__init__.py | python | MIT | 59a406df22d1f42803182c6f4da71e3efeeeb161 | 2026-01-05T07:12:28.185048Z | false | |
nvuillam/github-dependents-info | https://github.com/nvuillam/github-dependents-info/blob/59a406df22d1f42803182c6f4da71e3efeeeb161/github_dependents_info/gh_dependents_info.py | github_dependents_info/gh_dependents_info.py | import asyncio
import inspect
import json
import logging
import math
import os
import re
from collections import Counter
from pathlib import Path
import httpx
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
class GithubDependentsInfo:
def __init__(self, repo, **options) -> None:
self.repo = repo
self.outputrepo = self.repo if "outputrepo" not in options else options["outputrepo"]
if self.outputrepo is None or self.outputrepo == "" or len(self.outputrepo) < 4:
self.outputrepo = self.repo
self.url_init = f"https://github.com/{self.repo}/network/dependents"
self.url_starts_with = f"/{self.repo}/network/dependents" + "?package_id="
self.sort_key = "name" if "sort_key" not in options else options["sort_key"]
self.min_stars = None if "min_stars" not in options else options["min_stars"]
self.json_output = True if "json_output" in options and options["json_output"] is True else False
self.merge_packages = True if "merge_packages" in options and options["merge_packages"] is True else False
self.doc_url = options["doc_url"] if "doc_url" in options else None
self.markdown_file = options["markdown_file"] if "markdown_file" in options else None
self.badge_color = options["badge_color"] if "badge_color" in options else "informational"
self.debug = True if "debug" in options and options["debug"] is True else False
self.overwrite_progress = (
True if "overwrite_progress" in options and options["overwrite_progress"] is True else False
)
self.csv_directory = (
Path(options["csv_directory"])
if ("csv_directory" in options and options["csv_directory"] is not None)
else None
)
self.owner = options["owner"] if "owner" in options else None
self.max_scraped_pages = options["max_scraped_pages"] if "max_scraped_pages" in options else 0
self.max_concurrent_requests = options.get("max_concurrent_requests", 10)
self.pagination = True if "pagination" not in options else options["pagination"]
self.page_size = options.get("page_size", 500)
self.total_sum = 0
self.total_public_sum = 0
self.total_private_sum = 0
self.total_stars_sum = 0
self.packages = []
self.all_public_dependent_repos = []
self.badges = {}
self.result = {}
self.time_delay = options["time_delay"] if "time_delay" in options else 0.1
self.http_retry_attempts = options.get("http_retry_attempts", 5)
self.http_retry_initial_delay = options.get("http_retry_initial_delay", max(self.time_delay, 1.0))
self.http_retry_backoff = options.get("http_retry_backoff", 2.0)
self.http_retry_max_delay = options.get("http_retry_max_delay", 60.0)
# LLM summary options (used only when an API key is present)
llm_summary_env = os.getenv("GITHUB_DEPENDENTS_INFO_LLM_SUMMARY")
llm_summary_default = True
if llm_summary_env is not None:
llm_summary_default = llm_summary_env.strip().lower() not in {"0", "false", "no", "off"}
self.llm_summary_enabled = options.get("llm_summary", llm_summary_default)
self.llm_model = (
options.get("llm_model") or os.getenv("GITHUB_DEPENDENTS_INFO_LLM_MODEL") or os.getenv("LITELLM_MODEL")
)
self.llm_max_repos = int(options.get("llm_max_repos", os.getenv("GITHUB_DEPENDENTS_INFO_LLM_MAX_REPOS", 500)))
self.llm_max_words = int(options.get("llm_max_words", os.getenv("GITHUB_DEPENDENTS_INFO_LLM_MAX_WORDS", 300)))
self.llm_timeout = float(options.get("llm_timeout", os.getenv("GITHUB_DEPENDENTS_INFO_LLM_TIMEOUT", 120)))
self.llm_model_used: str | None = None
self.llm_summary: str | None = None
self.llm_summary_error: str | None = None
def collect(self):
"""Main entry point - synchronous wrapper for async collection."""
return asyncio.run(self.collect_async())
async def collect_async(self):
if self.overwrite_progress or not self.load_progress():
await self.compute_packages_async()
self.save_progress_packages_list() # only saves if csv_directory is provided
async with self.get_http_client() as client:
# Process packages sequentially to avoid overwhelming GitHub
for package in self.packages:
# check if we have already computed this package on previous crawl
if "public_dependents" in package:
continue
# Build start page url
if package["id"] is not None:
url = self.url_init + "?package_id=" + package["id"]
if self.owner:
url += "&owner=" + self.owner
if self.debug is True:
logging.info("Package " + package["name"] + ": browsing " + url + " ...")
else:
url = self.url_init
if self.owner:
url += "?owner=" + self.owner
if self.debug is True:
logging.info("Package " + self.repo + ": browsing " + url + " ...")
package["url"] = url
package["public_dependent_stars"] = 0
# Fetch all pages for this package in parallel
result, total_dependents, total_public_stars = await self.fetch_all_package_pages(client, package)
# Manage results for package
if self.sort_key == "stars":
result = sorted(result, key=lambda d: d[self.sort_key], reverse=True)
else:
result = sorted(result, key=lambda d: d[self.sort_key])
if self.debug is True:
for r in result:
logging.info(r)
# Build package stats
total_public_dependents = len(result)
package["public_dependents"] = result
package["public_dependents_number"] = total_public_dependents
package["public_dependent_stars"] = total_public_stars
package["private_dependents_number"] = total_dependents - total_public_dependents
package["total_dependents_number"] = (
total_dependents if total_dependents > 0 else total_public_dependents
)
# Build package badges
package["badges"] = {}
package["badges"]["total"] = self.build_badge(
"Used%20by", package["total_dependents_number"], url=package["url"]
)
package["badges"]["public"] = self.build_badge(
"Used%20by%20(public)", package["public_dependents_number"], url=package["url"]
)
package["badges"]["private"] = self.build_badge(
"Used%20by%20(private)", package["private_dependents_number"], url=package["url"]
)
package["badges"]["stars"] = self.build_badge(
"Used%20by%20(stars)", package["public_dependent_stars"], url=package["url"]
)
# Build total stats
self.all_public_dependent_repos += result
self.total_sum += package["total_dependents_number"]
self.total_public_sum += package["public_dependents_number"]
self.total_private_sum += package["private_dependents_number"]
self.total_stars_sum += package["public_dependent_stars"]
# Output
if self.debug is True:
logging.info("Total for package: " + str(total_public_dependents))
logging.info("")
# Save crawl progress
self.save_progress(package) # only saves if csv_directory is provided
# make all_dependent_repos unique
self.all_public_dependent_repos = list({v["name"]: v for v in self.all_public_dependent_repos}.values())
# Sort packages and dependent repos
if self.sort_key == "stars":
self.packages = sorted(self.packages, key=lambda d: d["public_dependent_stars"], reverse=True)
self.all_public_dependent_repos = sorted(
self.all_public_dependent_repos, key=lambda d: d["stars"], reverse=True
)
else:
self.packages = sorted(self.packages, key=lambda d: d["name"])
self.all_public_dependent_repos = sorted(self.all_public_dependent_repos, key=lambda d: d["name"])
# Build total badges
doc_url_to_use = "https://github.com/nvuillam/github-dependents-info"
if self.doc_url is not None:
doc_url_to_use = self.doc_url
elif self.markdown_file is not None:
repo_url_part = self.outputrepo if "/" in self.outputrepo else self.repo
doc_url_to_use = f"https://github.com/{repo_url_part}/blob/main/{self.markdown_file}"
self.badges["total_doc_url"] = self.build_badge("Used%20by", self.total_sum, url=doc_url_to_use)
self.badges["total"] = self.build_badge("Used%20by", self.total_sum)
self.badges["public"] = self.build_badge("Used%20by%20(public)", self.total_public_sum)
self.badges["private"] = self.build_badge("Used%20by%20(private)", self.total_private_sum)
self.badges["stars"] = self.build_badge("Used%20by%20(stars)", self.total_stars_sum)
# Optional: generate an LLM summary if an API key is present (and reuse cached summary when available)
await self.maybe_generate_llm_summary()
# Build final result
return self.build_result()
def _detect_llm_provider(self) -> dict | None:
"""Detect which provider API key is present and propose a lightweight default model."""
candidates: list[tuple[str, str, str]] = [
("openai", "OPENAI_API_KEY", "gpt-5-mini"),
("azure_openai", "AZURE_OPENAI_API_KEY", "gpt-5-mini"),
("anthropic", "ANTHROPIC_API_KEY", "claude-3-5-haiku-latest"),
("gemini", "GEMINI_API_KEY", "gemini-3-flash-preview"),
("google", "GOOGLE_API_KEY", "gemini-3-flash-preview"),
("mistral", "MISTRAL_API_KEY", "mistral-small-latest"),
("cohere", "COHERE_API_KEY", "command-r"),
("groq", "GROQ_API_KEY", "groq/llama-3.1-8b-instant"),
]
for provider, env_var, default_model in candidates:
if os.getenv(env_var):
return {"provider": provider, "env_var": env_var, "default_model": default_model}
return None
def _llm_api_key_present(self) -> bool:
return self._detect_llm_provider() is not None
def _llm_summary_cache_path(self) -> Path | None:
if self.csv_directory is None:
return None
return self.csv_directory / f"llm_summary_{self.repo}.json".replace("/", "-")
def load_llm_summary(self) -> bool:
"""Load cached LLM summary from progress directory if present."""
cache_path = self._llm_summary_cache_path()
if cache_path is None or not cache_path.exists():
return False
try:
with open(cache_path, encoding="utf-8") as f:
payload = json.load(f)
summary = (payload.get("summary") or "").strip()
if summary:
self.llm_summary = summary
return True
except Exception as exc:
logging.warning("Failed to load cached LLM summary: %s", exc)
return False
def save_llm_summary(self) -> None:
"""Persist LLM summary into progress directory if enabled."""
cache_path = self._llm_summary_cache_path()
if cache_path is None:
return
if not self.llm_summary:
return
try:
self.csv_directory.mkdir(parents=False, exist_ok=True)
payload = {
"repo": self.repo,
"model": self.llm_model_used or self.llm_model,
"summary": self.llm_summary,
}
with open(cache_path, "w", encoding="utf-8") as f:
json.dump(payload, f, indent=2)
except Exception as exc:
if self.debug:
logging.warning("Failed to save cached LLM summary: %s", exc)
def _prepare_llm_summary_payload(self) -> dict:
"""Prepare a compact data payload for the LLM prompt."""
repos_sorted = sorted(self.all_public_dependent_repos, key=lambda r: r.get("stars", 0), reverse=True)
repos_top = repos_sorted[: max(0, self.llm_max_repos)]
owners = [r.get("owner") for r in self.all_public_dependent_repos if r.get("owner")]
owners_counter = Counter(owners)
owners_top = owners_counter.most_common(25)
owner_stars: dict[str, int] = {}
for r in self.all_public_dependent_repos:
owner = r.get("owner")
if not owner:
continue
owner_stars[owner] = owner_stars.get(owner, 0) + int(r.get("stars", 0) or 0)
owners_top_by_stars = sorted(owner_stars.items(), key=lambda kv: kv[1], reverse=True)[:25]
return {
"source_repo": self.repo,
"packages": [p.get("name") for p in self.packages] if self.packages else [self.repo],
"totals": {
"dependents_total": self.total_sum,
"dependents_public": self.total_public_sum,
"dependents_private": self.total_private_sum,
"public_dependents_total_stars": self.total_stars_sum,
},
"top_dependents_by_stars": [
{"name": r.get("name"), "stars": int(r.get("stars", 0) or 0)} for r in repos_top
],
"top_owners_by_dependent_count": [{"owner": o, "count": c} for (o, c) in owners_top],
"top_owners_by_total_stars": [{"owner": o, "stars": s} for (o, s) in owners_top_by_stars],
}
async def maybe_generate_llm_summary(self) -> None:
"""Generate an LLM-based summary if possible; otherwise do nothing."""
if not self.llm_summary_enabled:
return
if self.llm_summary:
return
# Reuse cached summary when resuming from CSV progress
if self.load_llm_summary():
return
provider_info = self._detect_llm_provider()
if provider_info is None:
return
# Default model if none was provided
model = self.llm_model or provider_info.get("default_model") or "gpt-4o-mini"
self.llm_model_used = model
# Add provider prefix if missing (for LiteLLM compatibility)
if "/" not in model:
model = f"{provider_info['provider']}/{model}"
payload = self._prepare_llm_summary_payload()
system_prompt = (
"You summarize GitHub 'Used by' dependents for a package. "
"Write concise, factual Markdown. Do not invent data. "
"Use only the provided JSON data. "
"Do not include headings (no H1/H2/H3/H4). "
"Add blank lines before bullet points for readability. "
"Avoid any mention of pagination/navigation words like 'Page', 'Next', or 'Previous'."
)
user_prompt = (
"Given this JSON data, write a short summary that highlights: "
"(1) popular companies/organizations using the package, "
"(2) popular tools/ecosystems (infer from repo names), "
"(3) notable high-star dependent repositories. "
"Do not repeat data between sections (1), (2), and (3). "
"Format as Markdown with short sentences and bullet points. "
"Write in bold the names of companies/organizations and tools/ecosystems. "
f"Add blank lines before bullet points for readability. Keep under {self.llm_max_words} words.\n\n"
+ json.dumps(payload, ensure_ascii=False)
)
try:
from litellm import acompletion # type: ignore
response = await acompletion(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
temperature=0.2,
timeout=self.llm_timeout,
)
content = None
if hasattr(response, "choices") and response.choices:
message = response.choices[0].message
content = getattr(message, "content", None)
if content:
self.llm_summary = str(content).strip()
self.save_llm_summary()
except Exception as exc:
self.llm_summary_error = str(exc)
logging.warning("Failed to generate LLM summary: %s", exc)
finally:
# LiteLLM can keep async clients around. Ensure they're closed before
# asyncio.run() tears down the event loop to avoid:
# RuntimeWarning: coroutine 'close_litellm_async_clients' was never awaited
await self._maybe_close_litellm_async_clients()
async def _maybe_close_litellm_async_clients(self) -> None:
"""Best-effort cleanup for LiteLLM async clients."""
try:
import litellm # type: ignore
close_fn = getattr(litellm, "close_litellm_async_clients", None)
if close_fn is None:
utils = getattr(litellm, "utils", None)
close_fn = getattr(utils, "close_litellm_async_clients", None) if utils else None
if close_fn is None:
return
if inspect.iscoroutinefunction(close_fn):
await close_fn()
return
result = close_fn()
if inspect.isawaitable(result):
await result
except Exception as exc:
if self.debug:
logging.debug("LiteLLM async client cleanup skipped: %s", exc)
def _extract_owner_repo(self, dependent_row):
repo_anchor = dependent_row.find("a", {"data-hovercard-type": "repository"})
if repo_anchor is None:
repo_anchor = dependent_row.find("a", href=re.compile(r"/[^/]+/[^/]+"))
if repo_anchor is None:
return None
repo_name = (repo_anchor.text or "").strip()
owner_name = ""
href_value = (repo_anchor.get("href") or "").split("?")[0].strip("/")
path_parts = [part for part in href_value.split("/") if part]
if len(path_parts) >= 2:
owner_name = path_parts[-2]
if not repo_name:
repo_name = path_parts[-1]
elif len(path_parts) == 1 and not repo_name:
repo_name = path_parts[-1]
if not owner_name:
owner_anchor = dependent_row.find("a", {"data-hovercard-type": re.compile("(user|organization)")})
if owner_anchor is not None and owner_anchor.text:
owner_name = owner_anchor.text.strip()
if not owner_name and repo_name and "/" in repo_name:
splits = repo_name.split("/", 1)
owner_name, repo_name = splits[0], splits[1]
owner_name = owner_name.strip()
repo_name = repo_name.strip()
if owner_name and repo_name:
return owner_name, repo_name
return None
# Get first url to see if there are multiple packages
async def compute_packages_async(self):
async with self.get_http_client() as client:
semaphore = asyncio.Semaphore(self.max_concurrent_requests)
content = await self.fetch_page(client, self.url_init, semaphore)
soup = BeautifulSoup(content, "html.parser")
for a in soup.find_all("a", href=True):
if a["href"].startswith(self.url_starts_with):
package_id = a["href"].rsplit("=", 1)[1]
package_name = a.find("span").text.strip()
if "{{" in package_name:
continue
if self.debug is True:
logging.info(package_name)
self.packages += [{"id": package_id, "name": package_name}]
if len(self.packages) == 0:
self.packages = [{"id": None, "name": self.repo}]
# Save progress during the crawl
def save_progress(self, package):
if self.csv_directory is None:
return
self.csv_directory.mkdir(parents=False, exist_ok=True)
file_path_sources = self.csv_directory / f"packages_{self.repo}.csv".replace("/", "-")
file_path_dependents = self.csv_directory / f"dependents_{package['name']}.csv".replace("/", "-")
keys_skip = ["public_dependents"]
source_info = {k: v for (k, v) in package.items() if k not in keys_skip}
dependents_info = package["public_dependents"]
if not file_path_sources.exists():
pd.json_normalize(source_info).to_csv(file_path_sources, mode="w", header=True)
else:
sources_all_df = pd.read_csv(file_path_sources, index_col=0)
if package["name"] in sources_all_df["name"].values:
# update the row with the new information
sources_all_df.set_index("name", inplace=True)
source_df = pd.json_normalize(source_info).set_index("name", drop=True)
for column in source_df.columns:
if source_df[column].dtype == object and column in sources_all_df.columns:
sources_all_df[column] = sources_all_df[column].astype("object")
sources_all_df.loc[source_df.index, column] = source_df[column]
sources_all_df.reset_index(inplace=True, drop=False)
sources_all_df.to_csv(file_path_sources, mode="w", header=True)
else:
pd.json_normalize(source_info).to_csv(file_path_sources, mode="a", header=False)
if (not file_path_dependents.exists() or self.overwrite_progress) and len(dependents_info) > 0:
pd.DataFrame(dependents_info).to_csv(file_path_dependents, mode="w", header=True)
def save_progress_packages_list(self):
if self.csv_directory is None:
return
self.csv_directory.mkdir(parents=False, exist_ok=True)
columns = [
"id",
"name",
"url",
"total_dependents_number",
"public_dependents_number",
"private_dependents_number",
"public_dependent_stars",
"badges.total",
"badges.public",
"badges.private",
"badges.stars",
]
file_path_sources = self.csv_directory / f"packages_{self.repo}.csv".replace("/", "-")
if not file_path_sources.exists() or self.overwrite_progress:
pd.DataFrame(self.packages, columns=columns).to_csv(file_path_sources, mode="w", header=True)
# Load progress from previous crawl with the same repo
def load_progress(self):
if self.csv_directory is None:
return False
file_path_sources = self.csv_directory / f"packages_{self.repo}.csv".replace("/", "-")
if file_path_sources.exists():
self.packages = pd.read_csv(file_path_sources, index_col=0).replace({np.nan: None}).to_dict("records")
for i, package in enumerate(self.packages):
file_path_dependents = self.csv_directory / f"dependents_{package['name']}.csv".replace("/", "-")
if file_path_dependents.exists():
self.packages[i]["public_dependents"] = (
pd.read_csv(file_path_dependents, index_col=0).replace({np.nan: None}).to_dict("records")
)
self.all_public_dependent_repos += self.packages[i]["public_dependents"]
self.total_sum += package["total_dependents_number"] if package["total_dependents_number"] else 0
self.total_public_sum += (
package["public_dependents_number"] if package["public_dependents_number"] else 0
)
self.total_private_sum += (
package["private_dependents_number"] if package["private_dependents_number"] else 0
)
self.total_stars_sum += (
package["public_dependent_stars"] if package["public_dependent_stars"] else 0
)
# Load cached summary if present
self.load_llm_summary()
return len(self.packages) > 0
# Build result
def build_result(self):
self.result = {
"all_public_dependent_repos": self.all_public_dependent_repos,
"total_dependents_number": self.total_sum,
"public_dependents_number": self.total_public_sum,
"private_dependents_number": self.total_private_sum,
"public_dependents_stars": self.total_stars_sum,
"badges": self.badges,
"llm_summary": self.llm_summary,
}
if self.merge_packages is False:
self.result["packages"] = (self.packages,)
return self.result
# Print output
def print_result(self):
if self.json_output is True:
print(json.dumps(self.result, indent=4))
else:
if self.llm_summary_enabled and self.llm_summary:
print("LLM Summary:\n")
print(self.llm_summary)
print("\n")
print("Total: " + str(self.total_sum))
print("Public: " + str(self.total_public_sum) + " (" + str(self.total_stars_sum) + " stars)")
print("Private: " + str(self.total_private_sum))
def build_markdown(self, **options) -> str:
# Determine if pagination should be applied
use_pagination = self.pagination
# Calculate total number of repos to potentially paginate
total_repos = 0
if self.merge_packages is True:
total_repos = len(self.all_public_dependent_repos)
else:
# For per-package display, count total across all packages
for package in self.packages:
total_repos += len(package.get("public_dependents", []))
# Determine if we need multiple pages
needs_pagination = use_pagination and total_repos > self.page_size
if needs_pagination:
# Generate paginated markdown files
return self._build_paginated_markdown(**options)
else:
# Generate a single markdown file
return self._build_single_markdown(**options)
def _build_footer(self) -> list:
"""Build the standard footer for markdown files."""
return [
"",
"_Generated using [github-dependents-info]"
"(https://github.com/nvuillam/github-dependents-info), "
"by [Nicolas Vuillamy](https://github.com/nvuillam)_",
]
def _append_summary_table(self, md_lines: list) -> None:
"""Append the multi-package summary table when applicable."""
if len(self.packages) <= 1 or self.merge_packages:
return
md_lines += [
self.badges["total"],
self.badges["public"],
self.badges["private"],
self.badges["stars"],
"",
]
md_lines += [
"| Package | Total | Public | Private | Stars |",
"| :-------- | -----: | -----: | -----: | ----: |",
]
for package in self.packages:
name = "[" + package["name"] + "](#package-" + package["name"].replace("/", "").replace("@", "") + ")"
badge_1 = package["badges"]["total"]
badge_2 = package["badges"]["public"]
badge_3 = package["badges"]["private"]
badge_4 = package["badges"]["stars"]
md_lines += [f"| {name} | {badge_1} | {badge_2} | {badge_3} | {badge_4} |"]
md_lines += [""]
def _build_single_markdown(self, **options) -> str:
"""Build a single markdown file without pagination."""
md_lines = [f"# Dependents stats for {self.repo}", ""]
# Summary table
self._append_summary_table(md_lines)
# Optional LLM summary
if self.llm_summary:
md_lines += ["## Summary", "", self.llm_summary.strip(), ""]
# Single dependents list
if self.merge_packages is True:
md_lines += [
self.badges["total"],
self.badges["public"],
self.badges["private"],
self.badges["stars"],
"",
]
md_lines += ["| Repository | Stars |", "| :-------- | -----: |"]
for repo1 in self.all_public_dependent_repos:
self.build_repo_md_line(md_lines, repo1)
# Dependents by package
else:
for package in self.packages:
md_lines += ["## Package " + package["name"], ""]
if len(package["public_dependents"]) == 0:
md_lines += ["No dependent repositories"]
else:
md_lines += [
package["badges"]["total"],
package["badges"]["public"],
package["badges"]["private"],
package["badges"]["stars"],
"",
]
md_lines += ["| Repository | Stars |", "| :-------- | -----: |"]
for repo1 in package["public_dependents"]:
self.build_repo_md_line(md_lines, repo1)
md_lines += [""]
# footer
md_lines += self._build_footer()
md_lines_str = "\n".join(md_lines)
# Write in file if requested
if "file" in options:
os.makedirs(os.path.dirname(options["file"]), exist_ok=True)
with open(options["file"], "w", encoding="utf-8") as f:
f.write(md_lines_str)
if self.json_output is False:
print("Wrote markdown file " + options["file"])
return md_lines_str
def _build_paginated_markdown(self, **options) -> str:
"""Build multiple paginated markdown files."""
# Calculate number of pages needed
if self.merge_packages is True:
total_repos = len(self.all_public_dependent_repos)
else:
# For per-package display, we'll paginate the entire content
total_repos = sum(len(package.get("public_dependents", [])) for package in self.packages)
total_pages = max(1, math.ceil(total_repos / self.page_size))
if "file" not in options:
# If no file is specified, just return the first page as a string
return self._build_markdown_page(1, total_pages, **options)
base_file = options["file"]
# Split the file path to add page suffixes
file_path = Path(base_file)
base_name = file_path.stem
extension = file_path.suffix
parent_dir = file_path.parent
# Generate each page
os.makedirs(parent_dir, exist_ok=True)
for page_num in range(1, total_pages + 1):
if page_num == 1:
page_file = base_file
else:
page_file = str(parent_dir / f"{base_name}-{page_num}{extension}")
md_content = self._build_markdown_page(page_num, total_pages, file_path=file_path)
with open(page_file, "w", encoding="utf-8") as f:
f.write(md_content)
if self.json_output is False:
print(f"Wrote markdown file {page_file}")
# Return the first page content
return self._build_markdown_page(1, total_pages, file_path=file_path)
def _build_markdown_page(self, page_num: int, total_pages: int, **options) -> str:
"""Build a single page of paginated markdown."""
md_lines = [f"# Dependents stats for {self.repo}", ""]
# Summary table (only on first page)
if page_num == 1:
self._append_summary_table(md_lines)
# Optional LLM summary (only on first page)
if self.llm_summary:
md_lines += ["## Summary", "", self.llm_summary.strip(), ""]
# Calculate start and end indices for this page
start_idx = (page_num - 1) * self.page_size
| python | MIT | 59a406df22d1f42803182c6f4da71e3efeeeb161 | 2026-01-05T07:12:28.185048Z | true |
nvuillam/github-dependents-info | https://github.com/nvuillam/github-dependents-info/blob/59a406df22d1f42803182c6f4da71e3efeeeb161/github_dependents_info/__main__.py | github_dependents_info/__main__.py | import logging
from typing import Annotated
import typer
from github_dependents_info import version
from github_dependents_info.gh_dependents_info import GithubDependentsInfo
from rich.console import Console
app = typer.Typer(
name="github-dependents-info",
help="""Collect information about dependencies between a github repo and other repositories.
Results available in JSON, markdown and badges.""",
add_completion=False,
)
console = Console()
def version_callback(print_version: bool) -> None:
"""Print the version of the package."""
if print_version:
console.print(f"[yellow]github-dependents-info[/] version: [bold blue]{version}[/]")
raise typer.Exit()
@app.command(name="")
def main(
repo: str = typer.Option(None, "-r", "--repo", help="Repository (ex: oxsecurity/megalinter)"),
outputrepo: str = typer.Option(None, "-z", "--outputrepo", help="Output repository (ex: oxsecurity/megalinter)"),
markdown_file: str = typer.Option(None, "-m", "--markdownfile", help="Output Markdown file path"),
badge_markdown_file: str = typer.Option(
None,
"-b",
"--badgemarkdownfile",
help="""Path to markdown file to insert/update Used By badge between tags
<!-- gh-dependents-info-used-by-start --><!-- gh-dependents-info-used-by-end -->""",
),
doc_url: str = typer.Option(
None, "-d", "--docurl", help="Hyperlink to use when clicking on badge markdown file badge"
),
badge_color: str = typer.Option("informational", "-c", "--markdownbadgecolor", help="Markdown badge color"),
sort_key: str = typer.Option(None, "-s", "--sort", help="Sort of name(default) or stars"),
min_stars: int = typer.Option(None, "-x", "--minstars", help="Filter dependents with less than X stars"),
time_delay: float = typer.Option(0.1, "-t", "--timedelay", help="seconds delayed between two requests to github"),
json_output: bool = typer.Option(
False,
"-j",
"--json",
help="Output in JSON format",
),
csv_directory: str = typer.Option(
None,
"--csvdirectory",
help="Path to directory for CSV files",
),
merge_packages: bool = typer.Option(
False,
"-p",
"--mergepackages",
help="In case of multiple packages, merges results into a single list",
),
verbose: bool = typer.Option(
False,
"--verbose",
help="Prints the version of github-dependents-info package",
),
overwrite: bool = typer.Option(
False,
"-o",
"--overwrite",
help="Overwrite existing CSV files in provided csv_directory. Default is to resume from existing progress.",
),
print_version: bool = typer.Option(
None,
"-v",
"--version",
callback=version_callback,
is_eager=True,
help="Prints the version of the github-dependents-info package.",
),
owner: str = typer.Option(
None, "-u", "--owner", help="Filter dependent repositories with a specific owner (ex: oxsecurity)"
),
max_scraped_pages: int = typer.Option(
0, "-n", "--max-scraped-pages", help="Maximum number of pages to scrape per package (0 means no limit)"
),
pagination: bool = typer.Option(
True, "--pagination/--no-pagination", help="Enable pagination to split results into multiple files"
),
page_size: int = typer.Option(500, "--page-size", help="Number of results per page when pagination is enabled"),
llm_summary: Annotated[
bool | None,
typer.Option(
"--llm-summary/--no-llm-summary",
help=(
"Generate an AI usage summary in the markdown output when an LLM API key is present "
"(default: enabled)."
),
),
] = None,
llm_model: str = typer.Option(
None,
"--llm-model",
help=(
"LiteLLM model to use for summary generation. If not set, a lightweight model is selected "
"based on the API key provider."
),
),
llm_max_repos: int = typer.Option(
None,
"--llm-max-repos",
help="Max dependent repos to include in the LLM prompt payload (default: 80).",
),
llm_max_words: int = typer.Option(
None,
"--llm-max-words",
help="Max words for the generated summary (default: 300).",
),
llm_timeout: float = typer.Option(
None,
"--llm-timeout",
help="Timeout (seconds) for the LLM call (default: 120).",
),
) -> None:
# Init logger
if verbose is True:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
# Check minimum arguments
if repo is None:
raise ValueError("--repo argument is mandatory")
else:
# Manage default values :)
if outputrepo is None:
outputrepo = repo
if sort_key is None:
sort_key = "name"
if min_stars is None:
min_stars = 0
# Create GithubDependentsInfo instance
gh_options = {
"outputrepo": outputrepo,
"debug": verbose,
"overwrite_progress": overwrite,
"sort_key": sort_key,
"min_stars": min_stars,
"json_output": json_output,
"csv_directory": csv_directory,
"badge_markdown_file": badge_markdown_file,
"doc_url": doc_url,
"markdown_file": markdown_file,
"badge_color": badge_color,
"merge_packages": merge_packages,
"owner": owner,
"time_delay": time_delay,
"max_scraped_pages": max_scraped_pages,
"pagination": pagination,
"page_size": page_size,
}
# Only pass LLM options if explicitly provided, to keep env-based defaults working
if llm_summary is not None:
gh_options["llm_summary"] = llm_summary
if llm_model is not None:
gh_options["llm_model"] = llm_model
if llm_max_repos is not None:
gh_options["llm_max_repos"] = llm_max_repos
if llm_max_words is not None:
gh_options["llm_max_words"] = llm_max_words
if llm_timeout is not None:
gh_options["llm_timeout"] = llm_timeout
gh_deps_info = GithubDependentsInfo(repo, **gh_options)
# Collect data
gh_deps_info.collect()
# Write output markdown
if markdown_file is not None:
gh_deps_info.build_markdown(file=markdown_file)
# Update existing markdown to add badge
if badge_markdown_file is not None:
gh_deps_info.write_badge(badge_markdown_file, "total_doc_url")
# Print text or json result
gh_deps_info.print_result()
if __name__ == "__main__":
app()
| python | MIT | 59a406df22d1f42803182c6f4da71e3efeeeb161 | 2026-01-05T07:12:28.185048Z | false |
nvuillam/github-dependents-info | https://github.com/nvuillam/github-dependents-info/blob/59a406df22d1f42803182c6f4da71e3efeeeb161/github_dependents_info/__init__.py | github_dependents_info/__init__.py | """
Collect information about dependencies between a github repo and other repositories.
Results available in JSON, markdown and badges.
"""
from importlib import metadata as importlib_metadata
from .gh_dependents_info import GithubDependentsInfo # noqa
def get_version() -> str:
try:
return importlib_metadata.version(__name__)
except importlib_metadata.PackageNotFoundError: # pragma: no cover
return "unknown"
version: str = get_version()
| python | MIT | 59a406df22d1f42803182c6f4da71e3efeeeb161 | 2026-01-05T07:12:28.185048Z | false |
rodlaf/KalshiMarketMaker | https://github.com/rodlaf/KalshiMarketMaker/blob/8cd2cb26b6c4eaa3a4ab3ffeeb9683951439a42f/mm.py | mm.py | import abc
import time
from typing import Dict, List, Tuple
import requests
import logging
import uuid
import math
class AbstractTradingAPI(abc.ABC):
@abc.abstractmethod
def get_price(self) -> float:
pass
@abc.abstractmethod
def place_order(self, action: str, side: str, price: float, quantity: int, expiration_ts: int = None) -> str:
pass
@abc.abstractmethod
def cancel_order(self, order_id: str) -> bool:
pass
@abc.abstractmethod
def get_position(self) -> int:
pass
@abc.abstractmethod
def get_orders(self) -> List[Dict]:
pass
class KalshiTradingAPI(AbstractTradingAPI):
def __init__(
self,
email: str,
password: str,
market_ticker: str,
base_url: str,
logger: logging.Logger,
):
self.email = email
self.password = password
self.market_ticker = market_ticker
self.token = None
self.member_id = None
self.logger = logger
self.base_url = base_url
self.login()
def login(self):
url = f"{self.base_url}/login"
data = {"email": self.email, "password": self.password}
response = requests.post(url, json=data)
response.raise_for_status()
result = response.json()
self.token = result["token"]
self.member_id = result.get("member_id")
self.logger.info("Successfully logged in")
def logout(self):
if self.token:
url = f"{self.base_url}/logout"
headers = self.get_headers()
response = requests.post(url, headers=headers)
response.raise_for_status()
self.token = None
self.member_id = None
self.logger.info("Successfully logged out")
def get_headers(self):
return {
"Authorization": f"Bearer {self.token}",
"Content-Type": "application/json",
}
def make_request(
self, method: str, path: str, params: Dict = None, data: Dict = None
):
url = f"{self.base_url}{path}"
headers = self.get_headers()
try:
response = requests.request(
method, url, headers=headers, params=params, json=data
)
self.logger.debug(f"Request URL: {response.url}")
self.logger.debug(f"Request headers: {response.request.headers}")
self.logger.debug(f"Request params: {params}")
self.logger.debug(f"Request data: {data}")
self.logger.debug(f"Response status code: {response.status_code}")
self.logger.debug(f"Response content: {response.text}")
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
self.logger.error(f"Request failed: {e}")
if hasattr(e, "response") and e.response is not None:
self.logger.error(f"Response content: {e.response.text}")
raise
def get_position(self) -> int:
self.logger.info("Retrieving position...")
path = "/portfolio/positions"
params = {"ticker": self.market_ticker, "settlement_status": "unsettled"}
response = self.make_request("GET", path, params=params)
positions = response.get("market_positions", [])
total_position = 0
for position in positions:
if position["ticker"] == self.market_ticker:
total_position += position["position"]
self.logger.info(f"Current position: {total_position}")
return total_position
def get_price(self) -> Dict[str, float]:
self.logger.info("Retrieving market data...")
path = f"/markets/{self.market_ticker}"
data = self.make_request("GET", path)
yes_bid = float(data["market"]["yes_bid"]) / 100
yes_ask = float(data["market"]["yes_ask"]) / 100
no_bid = float(data["market"]["no_bid"]) / 100
no_ask = float(data["market"]["no_ask"]) / 100
yes_mid_price = round((yes_bid + yes_ask) / 2, 2)
no_mid_price = round((no_bid + no_ask) / 2, 2)
self.logger.info(f"Current yes mid-market price: ${yes_mid_price:.2f}")
self.logger.info(f"Current no mid-market price: ${no_mid_price:.2f}")
return {"yes": yes_mid_price, "no": no_mid_price}
def place_order(self, action: str, side: str, price: float, quantity: int, expiration_ts: int = None) -> str:
self.logger.info(f"Placing {action} order for {side} side at price ${price:.2f} with quantity {quantity}...")
path = "/portfolio/orders"
data = {
"ticker": self.market_ticker,
"action": action.lower(), # 'buy' or 'sell'
"type": "limit",
"side": side, # 'yes' or 'no'
"count": quantity,
"client_order_id": str(uuid.uuid4()),
}
price_to_send = int(price * 100) # Convert dollars to cents
if side == "yes":
data["yes_price"] = price_to_send
else:
data["no_price"] = price_to_send
if expiration_ts is not None:
data["expiration_ts"] = expiration_ts
try:
response = self.make_request("POST", path, data=data)
order_id = response["order"]["order_id"]
self.logger.info(f"Placed {action} order for {side} side at price ${price:.2f} with quantity {quantity}, order ID: {order_id}")
return str(order_id)
except requests.exceptions.RequestException as e:
self.logger.error(f"Failed to place order: {e}")
if hasattr(e, 'response') and e.response is not None:
self.logger.error(f"Response content: {e.response.text}")
self.logger.error(f"Request data: {data}")
raise
def cancel_order(self, order_id: int) -> bool:
self.logger.info(f"Canceling order with ID {order_id}...")
path = f"/portfolio/orders/{order_id}"
response = self.make_request("DELETE", path)
success = response["reduced_by"] > 0
self.logger.info(f"Canceled order with ID {order_id}, success: {success}")
return success
def get_orders(self) -> List[Dict]:
self.logger.info("Retrieving orders...")
path = "/portfolio/orders"
params = {"ticker": self.market_ticker, "status": "resting"}
response = self.make_request("GET", path, params=params)
orders = response.get("orders", [])
self.logger.info(f"Retrieved {len(orders)} orders")
return orders
class AvellanedaMarketMaker:
def __init__(
self,
logger: logging.Logger,
api: AbstractTradingAPI,
gamma: float,
k: float,
sigma: float,
T: float,
max_position: int,
order_expiration: int,
min_spread: float = 0.01,
position_limit_buffer: float = 0.1,
inventory_skew_factor: float = 0.01,
trade_side: str = "yes"
):
self.api = api
self.logger = logger
self.base_gamma = gamma
self.k = k
self.sigma = sigma
self.T = T
self.max_position = max_position
self.order_expiration = order_expiration
self.min_spread = min_spread
self.position_limit_buffer = position_limit_buffer
self.inventory_skew_factor = inventory_skew_factor
self.trade_side = trade_side
def run(self, dt: float):
start_time = time.time()
while time.time() - start_time < self.T:
current_time = time.time() - start_time
self.logger.info(f"Running Avellaneda market maker at {current_time:.2f}")
mid_prices = self.api.get_price()
mid_price = mid_prices[self.trade_side]
inventory = self.api.get_position()
self.logger.info(f"Current mid price for {self.trade_side}: {mid_price:.4f}, Inventory: {inventory}")
reservation_price = self.calculate_reservation_price(mid_price, inventory, current_time)
bid_price, ask_price = self.calculate_asymmetric_quotes(mid_price, inventory, current_time)
buy_size, sell_size = self.calculate_order_sizes(inventory)
self.logger.info(f"Reservation price: {reservation_price:.4f}")
self.logger.info(f"Computed desired bid: {bid_price:.4f}, ask: {ask_price:.4f}")
self.manage_orders(bid_price, ask_price, buy_size, sell_size)
time.sleep(dt)
self.logger.info("Avellaneda market maker finished running")
def calculate_asymmetric_quotes(self, mid_price: float, inventory: int, t: float) -> Tuple[float, float]:
reservation_price = self.calculate_reservation_price(mid_price, inventory, t)
base_spread = self.calculate_optimal_spread(t, inventory)
position_ratio = inventory / self.max_position
spread_adjustment = base_spread * abs(position_ratio) * 3
if inventory > 0:
bid_spread = base_spread / 2 + spread_adjustment
ask_spread = max(base_spread / 2 - spread_adjustment, self.min_spread / 2)
else:
bid_spread = max(base_spread / 2 - spread_adjustment, self.min_spread / 2)
ask_spread = base_spread / 2 + spread_adjustment
bid_price = max(0, min(mid_price, reservation_price - bid_spread))
ask_price = min(1, max(mid_price, reservation_price + ask_spread))
return bid_price, ask_price
def calculate_reservation_price(self, mid_price: float, inventory: int, t: float) -> float:
dynamic_gamma = self.calculate_dynamic_gamma(inventory)
inventory_skew = inventory * self.inventory_skew_factor * mid_price
return mid_price + inventory_skew - inventory * dynamic_gamma * (self.sigma**2) * (1 - t/self.T)
def calculate_optimal_spread(self, t: float, inventory: int) -> float:
dynamic_gamma = self.calculate_dynamic_gamma(inventory)
base_spread = (dynamic_gamma * (self.sigma**2) * (1 - t/self.T) +
(2 / dynamic_gamma) * math.log(1 + (dynamic_gamma / self.k)))
position_ratio = abs(inventory) / self.max_position
spread_adjustment = 1 - (position_ratio ** 2)
return max(base_spread * spread_adjustment * 0.01, self.min_spread)
def calculate_dynamic_gamma(self, inventory: int) -> float:
position_ratio = inventory / self.max_position
return self.base_gamma * math.exp(-abs(position_ratio))
def calculate_order_sizes(self, inventory: int) -> Tuple[int, int]:
remaining_capacity = self.max_position - abs(inventory)
buffer_size = int(self.max_position * self.position_limit_buffer)
if inventory > 0:
buy_size = max(1, min(buffer_size, remaining_capacity))
sell_size = max(1, self.max_position)
else:
buy_size = max(1, self.max_position)
sell_size = max(1, min(buffer_size, remaining_capacity))
return buy_size, sell_size
def manage_orders(self, bid_price: float, ask_price: float, buy_size: int, sell_size: int):
current_orders = self.api.get_orders()
self.logger.info(f"Retrieved {len(current_orders)} total orders")
buy_orders = []
sell_orders = []
for order in current_orders:
if order['side'] == self.trade_side:
if order['action'] == 'buy':
buy_orders.append(order)
elif order['action'] == 'sell':
sell_orders.append(order)
self.logger.info(f"Current buy orders: {len(buy_orders)}")
self.logger.info(f"Current sell orders: {len(sell_orders)}")
# Handle buy orders
self.handle_order_side('buy', buy_orders, bid_price, buy_size)
# Handle sell orders
self.handle_order_side('sell', sell_orders, ask_price, sell_size)
def handle_order_side(self, action: str, orders: List[Dict], desired_price: float, desired_size: int):
keep_order = None
for order in orders:
current_price = float(order['yes_price']) / 100 if self.trade_side == 'yes' else float(order['no_price']) / 100
if keep_order is None and abs(current_price - desired_price) < 0.01 and order['remaining_count'] == desired_size:
keep_order = order
self.logger.info(f"Keeping existing {action} order. ID: {order['order_id']}, Price: {current_price:.4f}")
else:
self.logger.info(f"Cancelling extraneous {action} order. ID: {order['order_id']}, Price: {current_price:.4f}")
self.api.cancel_order(order['order_id'])
current_price = self.api.get_price()[self.trade_side]
if keep_order is None:
if (action == 'buy' and desired_price < current_price) or (action == 'sell' and desired_price > current_price):
try:
order_id = self.api.place_order(action, self.trade_side, desired_price, desired_size, int(time.time()) + self.order_expiration)
self.logger.info(f"Placed new {action} order. ID: {order_id}, Price: {desired_price:.4f}, Size: {desired_size}")
except Exception as e:
self.logger.error(f"Failed to place {action} order: {str(e)}")
else:
self.logger.info(f"Skipped placing {action} order. Desired price {desired_price:.4f} does not improve on current price {current_price:.4f}")
| python | MIT | 8cd2cb26b6c4eaa3a4ab3ffeeb9683951439a42f | 2026-01-05T07:12:29.367532Z | false |
rodlaf/KalshiMarketMaker | https://github.com/rodlaf/KalshiMarketMaker/blob/8cd2cb26b6c4eaa3a4ab3ffeeb9683951439a42f/runner.py | runner.py | import argparse
import logging
from concurrent.futures import ThreadPoolExecutor
import yaml
from dotenv import load_dotenv
import os
from typing import Dict
from mm import KalshiTradingAPI, AvellanedaMarketMaker
def load_config(config_file):
with open(config_file, 'r') as f:
return yaml.safe_load(f)
def create_api(api_config, logger):
return KalshiTradingAPI(
email=os.getenv("KALSHI_EMAIL"),
password=os.getenv("KALSHI_PASSWORD"),
market_ticker=api_config['market_ticker'],
base_url=os.getenv("KALSHI_BASE_URL"),
logger=logger,
)
def create_market_maker(mm_config, api, logger):
return AvellanedaMarketMaker(
logger=logger,
api=api,
gamma=mm_config.get('gamma', 0.1),
k=mm_config.get('k', 1.5),
sigma=mm_config.get('sigma', 0.5),
T=mm_config.get('T', 3600),
max_position=mm_config.get('max_position', 100),
order_expiration=mm_config.get('order_expiration', 300),
min_spread=mm_config.get('min_spread', 0.01),
position_limit_buffer=mm_config.get('position_limit_buffer', 0.1),
inventory_skew_factor=mm_config.get('inventory_skew_factor', 0.01),
trade_side=mm_config.get('trade_side', 'yes')
)
def run_strategy(config_name: str, config: Dict):
# Create a logger for this specific strategy
logger = logging.getLogger(f"Strategy_{config_name}")
logger.setLevel(config.get('log_level', 'INFO'))
# Create file handler
fh = logging.FileHandler(f"{config_name}.log")
fh.setLevel(config.get('log_level', 'INFO'))
# Create console handler
ch = logging.StreamHandler()
ch.setLevel(config.get('log_level', 'INFO'))
# Create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# Add handlers to logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info(f"Starting strategy: {config_name}")
# Create API
api = create_api(config['api'], logger)
# Create market maker
market_maker = create_market_maker(config['market_maker'], api, logger)
try:
# Run market maker
market_maker.run(config.get('dt', 1.0))
except KeyboardInterrupt:
logger.info("Market maker stopped by user")
except Exception as e:
logger.error(f"An error occurred: {str(e)}")
finally:
# Ensure logout happens even if an exception occurs
api.logout()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Kalshi Market Making Algorithm")
parser.add_argument("--config", type=str, default="config.yaml", help="Path to config file")
args = parser.parse_args()
# Load all configurations
configs = load_config(args.config)
# Load environment variables
load_dotenv()
# Print the name of every strategy being run
print("Starting the following strategies:")
for config_name in configs:
print(f"- {config_name}")
# Run all strategies in parallel using ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=len(configs)) as executor:
for config_name, config in configs.items():
executor.submit(run_strategy, config_name, config) | python | MIT | 8cd2cb26b6c4eaa3a4ab3ffeeb9683951439a42f | 2026-01-05T07:12:29.367532Z | false |
SmartLi8/stella | https://github.com/SmartLi8/stella/blob/8023dfa91ee2a74511e091902ceba5a3911cc610/run_train.py | run_train.py | # coding=utf8
import os
import logging
import yaml
import torch
from torch.utils.data import DataLoader
from os.path import join
from copy import deepcopy
from transformers import AutoTokenizer, AutoModel, BertModel
from transformers import TrainingArguments, Trainer
import shutil
os.environ["WANDB_DISABLED"] = "true"
transformers_logger = logging.getLogger("transformers")
transformers_logger.setLevel(logging.WARNING)
import torch.nn.functional as F
from loguru import logger
from src import (
InBatchDataSet,
in_batch_collate_fn,
PairDataSet,
pair_collate_fn,
VecDataSet,
get_mean_params, SaveModelCallBack, cosent_loss
)
class MyTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
def get_vecs_e5(ipt):
attention_mask = ipt["attention_mask"]
model_output = model(**ipt)
last_hidden = model_output.last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0)
vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
vectors = F.normalize(vectors, 2.0, dim=1)
return vectors
def get_vecs_bge(ipt):
# print("input_ids.shape", ipt["input_ids"].shape)
token_embeddings = self.model(**ipt)[0]
vectors = token_embeddings[:, 0, :].squeeze(1) # bsz*h
vectors = F.normalize(vectors, 2.0, dim=1)
return vectors
# print(f"len(inputs){inputs[0]}", len(inputs))
# Step1 计算inbatch loss
q_num = inputs[-1]
name = inputs[0]
inputs = inputs[1:-1]
in_batch_loss, pair_loss = torch.tensor(0.0), torch.tensor(0.0)
if "in_batch" in name:
if model_name in ["e5", "piccolo"]:
vectors = [get_vecs_e5(ipt) for ipt in inputs]
elif model_name in ["bge", "simbert", "simbert_hp"]:
vectors = [get_vecs_bge(ipt) for ipt in inputs]
else:
raise NotImplementedError()
vectors = torch.cat(vectors, dim=0)
vecs1, vecs2 = vectors[:q_num, :], vectors[q_num:, :]
logits = torch.mm(vecs1, vecs2.t())
print("logits.shape", logits.shape)
LABEL = torch.LongTensor(list(range(q_num))).to(vectors.device)
in_batch_loss = F.cross_entropy(logits * in_batch_ratio, LABEL)
# Step2 计算pair loss
elif "pair" in name:
neg_pos_idxs = inputs[-1]
inputs = inputs[:-1]
if model_name in ["e5", "piccolo"]:
vectors = [get_vecs_e5(ipt) for ipt in inputs]
elif model_name in ["bge", "simbert", "simbert_hp"]:
vectors = [get_vecs_bge(ipt) for ipt in inputs]
else:
raise NotImplementedError()
vectors = torch.cat(vectors, dim=0)
vecs1, vecs2 = vectors[:q_num, :], vectors[q_num:, :]
pred_sims = F.cosine_similarity(vecs1, vecs2)
# print(name, pred_sims.shape)
pair_loss = cosent_loss(
neg_pos_idxs=neg_pos_idxs,
pred_sims=pred_sims,
cosent_ratio=cosent_ratio,
zero_data=torch.tensor([0.0]).to(vectors.device)
)
# Step3 计算 ewc loss
losses = []
for n, p in model.named_parameters():
# 每个参数都有mean和fisher
mean = original_weight[n.replace("module.", "")]
if "position_embeddings.weight" in n:
print(p.shape, mean.shape)
losses.append(
((p - mean)[:512, :] ** 2).sum()
)
else:
losses.append(
((p - mean) ** 2).sum()
)
ewc_loss = sum(losses)
final_loss = in_batch_loss + pair_loss
if "ewc" in train_method:
final_loss += (ewc_loss * ewc_ratio)
if "in_batch" in name:
logger.info(
f"step-{self.state.global_step}, {name}-loss:{in_batch_loss.item()}, ewc_loss:{ewc_loss.item()}"
)
else:
logger.info(
f"step-{self.state.global_step}, {name}-loss:{pair_loss.item()}, ewc_loss:{ewc_loss.item()}"
)
return (final_loss, None) if return_outputs else final_loss
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pair_label_map = {
"0": 0,
"1": 1,
"contradiction": 2,
"neutral": 3,
"entailment": 4,
}
MODEL_NAME_INFO = {
"e5": [AutoModel, AutoTokenizer, in_batch_collate_fn],
"bge": [AutoModel, AutoTokenizer, in_batch_collate_fn],
"piccolo": [BertModel, AutoTokenizer, in_batch_collate_fn],
"simbert_hp": [BertModel, AutoTokenizer, in_batch_collate_fn],
}
local_rank = int(os.environ.get("LOCAL_RANK", 0))
world_size = int(os.environ.get("WORLD_SIZE", 1))
# 读取参数并赋值
config_path = 'conf.yml'
with open(config_path, "r", encoding="utf8") as fr:
conf = yaml.safe_load(fr)
# args of hf trainer
hf_args = deepcopy(conf["train_args"])
in_batch_bsz = conf["in_batch_bsz"]
pair_bsz = conf["pair_bsz"]
if hf_args.get("deepspeed") and conf["use_deepspeed"]:
hf_args["deepspeed"]["gradient_accumulation_steps"] = hf_args["gradient_accumulation_steps"]
hf_args["deepspeed"]["train_micro_batch_size_per_gpu"] = hf_args["per_device_train_batch_size"]
hf_args["deepspeed"]["optimizer"]["params"]["lr"] = hf_args["learning_rate"]
else:
hf_args.pop("deepspeed", None)
model_name = conf["model_name"]
grad_checkpoint = hf_args["gradient_checkpointing"]
task_name = conf["task_name"]
in_batch_train_paths = conf["in_batch_train_paths"]
pair_train_paths = conf["pair_train_paths"]
loader_idxs = conf["loader_idxs"]
max_length = conf["max_length"]
model_dir = conf["model_dir"]
train_method = conf["train_method"]
ewc_ratio = conf["ewc_ratio"]
cosent_ratio = conf["cosent_ratio"]
in_batch_ratio = conf["in_batch_ratio"]
hard_neg_ratio = conf["hard_neg_ratio"]
output_dir = hf_args["output_dir"]
# 构建训练输出目录
if world_size == 1 and conf["auto_ouput_dir"]:
version = 1
save_dir = join(output_dir,
f"{model_name}_{task_name}_bsz{in_batch_bsz}_len{max_length}_{train_method}_v{version}")
while os.path.exists(save_dir):
version += 1
save_dir = join(output_dir,
f"{model_name}_{task_name}_bsz{in_batch_bsz}_len{max_length}_{train_method}_v{version}")
output_dir = save_dir
hf_args["output_dir"] = output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
if not os.path.exists(hf_args["output_dir"]):
os.makedirs(hf_args["output_dir"], exist_ok=True)
# 拷贝 config
if local_rank == 0:
shutil.copy('conf.yml', os.path.join(output_dir, "train_config.yml"))
# 初始化log
logger.add(join(output_dir, "train_log.txt"), level="INFO", compression="zip", rotation="500 MB",
format="{message}")
# in-batch 数据集
in_batch_data_loaders = []
if in_batch_train_paths:
for data_name, data_paths in in_batch_train_paths.items():
logger.info(f"添加数据迭代器,data_name:{data_name}, data_paths:{data_paths}")
in_batch_data_loaders.append(
DataLoader(
dataset=InBatchDataSet(data_paths=data_paths, data_name=data_name, model_name=model_name),
shuffle=True,
collate_fn=lambda x: in_batch_collate_fn(x, tokenizer, max_length),
drop_last=True,
batch_size=in_batch_bsz,
num_workers=2
)
)
# pair对数据集
pair_data_loaders = []
if pair_train_paths:
for data_name, data_paths in pair_train_paths.items():
logger.info(f"添加数据迭代器,data_name:{data_name}, data_paths:{data_paths}")
pair_data_loaders.append(
DataLoader(
dataset=PairDataSet(data_paths=data_paths, data_name=data_name, model_name=model_name,
pair_label_map=pair_label_map),
shuffle=True,
collate_fn=lambda x: pair_collate_fn(x, tokenizer, max_length),
drop_last=True,
batch_size=pair_bsz,
num_workers=2
)
)
# 加载模型、tokenizer
model = MODEL_NAME_INFO[model_name][0].from_pretrained(
model_dir,
trust_remote_code=True,
torch_dtype=torch.float16
)
model.to(device)
original_weight = get_mean_params(model)
tokenizer = MODEL_NAME_INFO[model_name][1].from_pretrained(model_dir, trust_remote_code=True)
if grad_checkpoint:
try:
model.gradient_checkpointing_enable()
except:
logger.error("gradient_checkpointing failed")
model.train()
# 开始训练
args = TrainingArguments(
**hf_args,
torch_compile=torch.__version__.startswith("2"),
prediction_loss_only=True
)
if hf_args["gradient_checkpointing"]:
args.ddp_find_unused_parameters = False
# save model by call back, do not need official save function
args.save_strategy = "no"
trainer = MyTrainer(
model=model,
args=args,
data_collator=lambda x: x[0],
train_dataset=VecDataSet(in_batch_data_loaders + pair_data_loaders, loader_idxs),
tokenizer=tokenizer,
callbacks=[SaveModelCallBack(output_dir=output_dir, save_steps=conf["save_steps"], local_rank=local_rank)]
)
trainer.train()
| python | Apache-2.0 | 8023dfa91ee2a74511e091902ceba5a3911cc610 | 2026-01-05T07:12:29.713333Z | false |
SmartLi8/stella | https://github.com/SmartLi8/stella/blob/8023dfa91ee2a74511e091902ceba5a3911cc610/src/run_eval_stella.py | src/run_eval_stella.py | """ eval for stella model """
import numpy as np
import torch
import random
import argparse
import functools
from typing import List, Dict
from sentence_transformers import SentenceTransformer
from mteb import MTEB, DRESModel
from C_MTEB.tasks import *
parser = argparse.ArgumentParser(description='evaluation for CMTEB')
parser.add_argument('--model_name', default='bert-base-uncased',
type=str, help='which model to use')
parser.add_argument('--output_dir', default='zh_results/',
type=str, help='output directory')
parser.add_argument('--max_len', default=512, type=int, help='max length')
args = parser.parse_args()
class RetrievalModel(DRESModel):
def __init__(self, encoder, **kwargs):
self.encoder = encoder
def encode_queries(self, queries: List[str], **kwargs) -> np.ndarray:
input_texts = ['查询: {}'.format(q) for q in queries]
return self._do_encode(input_texts)
def encode_corpus(self, corpus: List[Dict[str, str]], **kwargs) -> np.ndarray:
input_texts = ['{} {}'.format(doc.get('title', ''), doc['text']).strip() for doc in corpus]
input_texts = ['结果: {}'.format(t) for t in input_texts]
return self._do_encode(input_texts)
@torch.no_grad()
def _do_encode(self, input_texts: List[str]) -> np.ndarray:
return self.encoder.encode(
sentences=input_texts,
batch_size=256,
normalize_embeddings=True,
convert_to_numpy=True
)
TASKS_WITH_PROMPTS = ["T2Retrieval", "MMarcoRetrieval", "DuRetrieval", "CovidRetrieval", "CmedqaRetrieval",
"EcomRetrieval", "MedicalRetrieval", "VideoRetrieval"]
# python run_eval_stellapy --model_name stella-base-zh --output_dir ./zh_results/stella-base
if __name__ == '__main__':
# 加载模型,使用half来加速
encoder = SentenceTransformer(args.model_name).half()
encoder.encode = functools.partial(encoder.encode, normalize_embeddings=True)
encoder.max_seq_length = int(args.max_len)
# 获取所有任务
task_names = [t.description["name"] for t in MTEB(task_langs=['zh', 'zh-CN']).tasks]
random.shuffle(task_names)
print("task数量", len(task_names))
print("task_names", task_names)
for task in task_names:
evaluation = MTEB(tasks=[task], task_langs=['zh', 'zh-CN'])
if task in TASKS_WITH_PROMPTS:
evaluation.run(RetrievalModel(encoder), output_folder=args.output_dir, overwrite_results=False)
else:
evaluation.run(encoder, output_folder=args.output_dir, overwrite_results=False)
| python | Apache-2.0 | 8023dfa91ee2a74511e091902ceba5a3911cc610 | 2026-01-05T07:12:29.713333Z | false |
SmartLi8/stella | https://github.com/SmartLi8/stella/blob/8023dfa91ee2a74511e091902ceba5a3911cc610/src/utils.py | src/utils.py | # coding=utf8
from transformers import TrainingArguments
from transformers import TrainerCallback, TrainerControl, TrainerState
import torch
from os.path import join
def get_mean_params(model):
"""
:param model:
:return:Dict[para_name, para_weight]
"""
result = {}
for param_name, param in model.named_parameters():
result[param_name] = param.data.clone()
return result
def cosent_loss(neg_pos_idxs, pred_sims, cosent_ratio, zero_data):
pred_sims = pred_sims * cosent_ratio
pred_sims = pred_sims[:, None] - pred_sims[None, :] # 这里是算出所有位置 两两之间余弦的差值
pred_sims = pred_sims - (1 - neg_pos_idxs) * 1e12
pred_sims = pred_sims.view(-1)
pred_sims = torch.cat((zero_data, pred_sims), dim=0) # 这里加0是因为e^0 = 1相当于在log中加了1
return torch.logsumexp(pred_sims, dim=0)
class SaveModelCallBack(TrainerCallback):
def __init__(self, output_dir, save_steps, local_rank):
self.customized_save_steps = save_steps
self.customized_output_dir = output_dir
self.local_rank = local_rank
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
if self.local_rank == 0 and self.customized_save_steps > 0 and state.global_step > 0 and state.global_step % self.customized_save_steps == 0:
epoch = int(state.epoch)
save_dir = join(self.customized_output_dir, f"epoch-{epoch}_globalStep-{state.global_step}")
kwargs["model"].save_pretrained(save_dir, max_shard_size="900000MB")
kwargs["tokenizer"].save_pretrained(save_dir)
kwargs["tokenizer"].save_vocabulary(save_dir)
| python | Apache-2.0 | 8023dfa91ee2a74511e091902ceba5a3911cc610 | 2026-01-05T07:12:29.713333Z | false |
SmartLi8/stella | https://github.com/SmartLi8/stella/blob/8023dfa91ee2a74511e091902ceba5a3911cc610/src/__init__.py | src/__init__.py | from .data_collator import (
InBatchDataSet,
in_batch_collate_fn,
PairDataSet,
pair_collate_fn,
comb_data_loader,
VecDataSet
)
from .utils import cosent_loss, get_mean_params, SaveModelCallBack
| python | Apache-2.0 | 8023dfa91ee2a74511e091902ceba5a3911cc610 | 2026-01-05T07:12:29.713333Z | false |
SmartLi8/stella | https://github.com/SmartLi8/stella/blob/8023dfa91ee2a74511e091902ceba5a3911cc610/src/data_collator.py | src/data_collator.py | # coding=utf8
import collections
import random
import torch
from torch.utils.data import Dataset
import json
from loguru import logger
class InBatchDataSet(Dataset):
def __init__(self, data_paths: str, data_name: str, model_name: str):
self.data = []
self.data_paths = data_paths
self.data_name = data_name
self.model_name = model_name
self.load_data()
def load_data(self):
for data_path in self.data_paths:
with open(data_path, "r", encoding="utf8") as fr:
single_data = [json.loads(line) for line in fr][:]
self.data.extend(single_data)
self.data = [[item["txt1"], item["txt2"], item.get("hard_negs", [])] for item in self.data]
if self.model_name in ["bge", "piccolo"]:
logger.info(f"检测到是{self.model_name}模型,对于q-p数据前面添加特定的instruction")
num_added = 0
# query前面加东西
for item in self.data:
txt1, txt2 = item[:2]
if len(txt1) < 32 and len(txt2) > 64:
num_added += 1
if self.model_name == "piccolo":
item[0] = f"查询: {txt1}"
item[1] = f"结果: {txt2}"
else:
item[0] = f"为这个句子生成表示以用于检索相关文章:{txt1}"
logger.info(f"数据总量:{len(self.data)},添加特定指示的数据量:{num_added}")
self.data = [[self.data_name] + i for i in self.data]
def __len__(self):
return len(self.data)
def __getitem__(self, item):
"""
item 为数据索引,迭代取第item条数据
"""
return self.data[item]
def in_batch_collate_fn(batch, tokenizer, max_length):
"""
DataLoader类collate_fn函数。用于batch内负采样数据处理。
:param batch: 一个batch内样本数据
:param tokenizer:tokenizer处理器
:param max_length:最大长度
:return:batch内负采样处理后数
"""
data_name = batch[0][0]
batch = [item[1:] for item in batch]
# 随机获取一批难负例
# hard_negs = []
# 以hard_neg_ratio的比例进行难负例采样
hard_negs = [random.choice(negs) for _, _, negs in batch if negs and random.random() < 0.2]
# print("len(hard_negs)", len(hard_negs))
batch = [[t1, t2] for t1, t2, _ in batch]
# q之间不能有重复
batch = list(dict(batch).items())
batch = [item[::-1] for item in batch]
# p之间不能有重复
batch = list(dict(batch).items())
batch = [item[::-1] for item in batch]
# q和p之间不能互相有重复
new_batch = []
q_set = collections.Counter([i[0] for i in batch])
p_set = collections.Counter([i[1] for i in batch])
for q, p in batch:
if q != p:
if q not in p_set and p not in q_set:
new_batch.append([q, p])
else:
new_batch.append([q, p]) # ???
batch = new_batch
pos_texts = set([j for item in batch for j in item])
hard_negs = [i for i in hard_negs if i not in pos_texts]
all_texts = [item[0] for item in batch] + [item[1] for item in batch] + hard_negs
ipts = []
# print("len(all_texts)", len(all_texts))
for start in range(0, len(all_texts), 32):
ipt = tokenizer.batch_encode_plus(
all_texts[start:start + 32], padding="longest", truncation=True, max_length=max_length, return_tensors="pt")
# print("in_batch_collate_fn", ipt["input_ids"].shape)
ipts.append(ipt)
# 最后把q数量加上
ipts.append(len(batch))
return [f"in_batch-{data_name}"] + ipts
class PairDataSet(Dataset):
""" pair对数据集 """
def __init__(self, data_paths: str, data_name: str, model_name: str, pair_label_map: dict):
self.data = []
self.data_paths = data_paths
self.data_name = data_name
self.model_name = model_name
self.pair_label_map = pair_label_map
self.load_data()
def load_data(self):
for data_path in self.data_paths:
with open(data_path, "r", encoding="utf8") as fr:
single_data = [json.loads(line) for line in fr]
self.data.extend(single_data)
self.data = [
[item["txt1"], item["txt2"], self.pair_label_map[str(item["label"])]]
for item in self.data if str(item["label"]) in self.pair_label_map
]
self.data = [[self.data_name] + i for i in self.data]
logger.info(f"PairDataSet数据总量:{len(self.data)}")
def __len__(self):
return len(self.data)
def __getitem__(self, item):
"""
item 为数据索引,迭代取第item条数据
"""
return self.data[item]
def pair_collate_fn(batch, tokenizer, max_length):
"""
DataLoader类collate_fn函数。用于pair对数据集处理。
:param batch: List[data_set[i]]
:param tokenizer:tokenizer处理器
:param max_length:最大长度
:return:
"""
data_name = batch[0][0]
batch = [item[1:] for item in batch]
all_texts = [item[0] for item in batch] + [item[1] for item in batch]
ipts = []
for start in range(0, len(all_texts), 32):
ipt = tokenizer.batch_encode_plus(
all_texts[start:start + 32], padding="longest", truncation=True, max_length=max_length, return_tensors="pt")
# print("pair_collate_fn", ipt["input_ids"].shape)
ipts.append(ipt)
labels = [item[2] for item in batch]
neg_pos_idxs = [[0.0] * len(batch) for _ in range(len(batch))]
for i in range(len(batch)):
for j in range(len(batch)):
# 1,4: 相似
# 0 2 3 不相似,其中3是中立,2是完全相反
if labels[i] in [0, 2, 3] and labels[j] in [1, 4]:
neg_pos_idxs[i][j] = 1.0
elif labels[i] == 2 and labels[j] == 3:
neg_pos_idxs[i][j] = 1.0
return [f"pair-{data_name}"] + ipts + [torch.tensor(neg_pos_idxs), len(batch)]
def comb_data_loader(loaders, idx_list=None):
if idx_list is None:
idx_list = list(range(len(loaders)))
loaders_iter = [iter(item) for item in loaders]
idx_for_idx = 0
while True:
loader_idx = idx_list[idx_for_idx]
try:
yield next(loaders_iter[loader_idx])
except StopIteration:
loaders_iter[loader_idx] = iter(loaders[loader_idx])
yield next(loaders_iter[loader_idx])
idx_for_idx += 1
if idx_for_idx % len(idx_list) == 0:
random.shuffle(idx_list)
idx_for_idx = 0
class VecDataSet(Dataset):
""" pair 对数据集 """
def __init__(self, data_loaders, loader_idxs):
self.lens = sum([len(i) for i in data_loaders])
self.data = comb_data_loader(data_loaders, idx_list=loader_idxs)
def __len__(self):
return self.lens
def __getitem__(self, item):
"""
item 为数据索引,迭代取第item条数据
"""
return next(self.data)
| python | Apache-2.0 | 8023dfa91ee2a74511e091902ceba5a3911cc610 | 2026-01-05T07:12:29.713333Z | false |
SmartLi8/stella | https://github.com/SmartLi8/stella/blob/8023dfa91ee2a74511e091902ceba5a3911cc610/src/add_new_pos_embed.py | src/add_new_pos_embed.py | """
扩展当前BERT的长度,新扩展的emebdding用层次分解的位置编码进行初始化
"""
import torch
import json
import os
import shutil
if __name__ == "__main__":
read_dir = r"E:\PublicModels\piccolo-base-zh"
save_dir = r"E:\PublicModels\piccolo-base-zh-1024"
ori_pos = 512
new_pos = 1024
hp_alpha = 0.2
# 创建目录
os.makedirs(save_dir, exist_ok=True)
# 先拷贝无关文件
for name in os.listdir(read_dir):
if name not in ["pytorch_mdoel.bin", "config.json", "tokenizer_config.json"]:
shutil.copy(os.path.join(read_dir, name), os.path.join(save_dir, name))
# config.json
with open(os.path.join(read_dir, "config.json"), "r", encoding="utf8") as fr:
data = json.load(fr)
data["max_position_embeddings"] = new_pos
with open(os.path.join(save_dir, "config.json"), "w", encoding="utf8") as fw:
data = json.dump(data, fw, ensure_ascii=False, indent=1)
# tokenizer_config.json
with open(os.path.join(read_dir, "tokenizer_config.json"), "r", encoding="utf8") as fr:
data = json.load(fr)
data["model_max_length"] = new_pos
with open(os.path.join(save_dir, "tokenizer_config.json"), "w", encoding="utf8") as fw:
data = json.dump(data, fw, ensure_ascii=False, indent=1)
# 处理pytorch
ori_dict = torch.load(os.path.join(read_dir, "pytorch_model.bin"))
ori_dict["embeddings.position_ids"] = torch.LongTensor([list(range(new_pos))])
ori_embed = ori_dict["embeddings.position_embeddings.weight"] # shape [512, 768]
position_ids = torch.LongTensor(list(range(new_pos))) # [0,1,2,3,....1023]
i = position_ids // 512
j = position_ids % 512
base_embedding = (ori_embed - ori_embed[0:1] * hp_alpha) / (1 - hp_alpha)
position_embeddings = hp_alpha * base_embedding[i] + (1 - hp_alpha) * base_embedding[j]
print("position_embeddings.shape", position_embeddings.shape)
ori_dict["embeddings.position_embeddings.weight"] = position_embeddings
torch.save(ori_dict, os.path.join(save_dir, "pytorch_model.bin"))
| python | Apache-2.0 | 8023dfa91ee2a74511e091902ceba5a3911cc610 | 2026-01-05T07:12:29.713333Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/manage.py | abstract-base-user-example/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello_django.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/views.py | abstract-base-user-example/users/views.py | from django.urls import reverse_lazy
from django.views import generic
from .forms import CustomUserCreationForm
class SignUp(generic.CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy("login")
template_name = "signup.html"
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/admin.py | abstract-base-user-example/users/admin.py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import CustomUser
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = ("email", "is_staff", "is_active",)
list_filter = ("email", "is_staff", "is_active",)
fieldsets = (
(None, {"fields": ("email", "password")}),
("Permissions", {"fields": ("is_staff", "is_active", "groups", "user_permissions")}),
)
add_fieldsets = (
(None, {
"classes": ("wide",),
"fields": (
"email", "password1", "password2", "is_staff",
"is_active", "groups", "user_permissions"
)}
),
)
search_fields = ("email",)
ordering = ("email",)
admin.site.register(CustomUser, CustomUserAdmin)
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/models.py | abstract-base-user-example/users/models.py | from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from .managers import CustomUserManager
class CustomUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_("email address"), unique=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(default=timezone.now)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.email
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/managers.py | abstract-base-user-example/users/managers.py | from django.contrib.auth.base_user import BaseUserManager
from django.utils.translation import gettext_lazy as _
class CustomUserManager(BaseUserManager):
"""
Custom user model manager where email is the unique identifiers
for authentication instead of usernames.
"""
def create_user(self, email, password, **extra_fields):
"""
Create and save a user with the given email and password.
"""
if not email:
raise ValueError(_("The Email must be set"))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
"""
Create and save a SuperUser with the given email and password.
"""
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
extra_fields.setdefault("is_active", True)
if extra_fields.get("is_staff") is not True:
raise ValueError(_("Superuser must have is_staff=True."))
if extra_fields.get("is_superuser") is not True:
raise ValueError(_("Superuser must have is_superuser=True."))
return self.create_user(email, password, **extra_fields)
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/__init__.py | abstract-base-user-example/users/__init__.py | python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false | |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/tests.py | abstract-base-user-example/users/tests.py | from django.contrib.auth import get_user_model
from django.test import TestCase
class UsersManagersTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(email="normal@user.com", password="foo")
self.assertEqual(user.email, "normal@user.com")
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(user.username)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email="")
with self.assertRaises(ValueError):
User.objects.create_user(email="", password="foo")
def test_create_superuser(self):
User = get_user_model()
admin_user = User.objects.create_superuser(email="super@user.com", password="foo")
self.assertEqual(admin_user.email, "super@user.com")
self.assertTrue(admin_user.is_active)
self.assertTrue(admin_user.is_staff)
self.assertTrue(admin_user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(admin_user.username)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_superuser(
email="super@user.com", password="foo", is_superuser=False)
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/apps.py | abstract-base-user-example/users/apps.py | from django.apps import AppConfig
class UsersConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "users"
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/forms.py | abstract-base-user-example/users/forms.py | from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = CustomUser
fields = ("email",)
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ("email",)
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/urls.py | abstract-base-user-example/users/urls.py | from django.urls import path
from . import views
urlpatterns = [path("signup/", views.SignUp.as_view(), name="signup"), ]
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/migrations/0001_initial.py | abstract-base-user-example/users/migrations/0001_initial.py | # Generated by Django 4.1.5 on 2023-01-21 21:10
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="CustomUser",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"email",
models.EmailField(
max_length=254, unique=True, verbose_name="email address"
),
),
("is_staff", models.BooleanField(default=False)),
("is_active", models.BooleanField(default=True)),
(
"date_joined",
models.DateTimeField(default=django.utils.timezone.now),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.permission",
verbose_name="user permissions",
),
),
],
options={
"abstract": False,
},
),
]
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/users/migrations/__init__.py | abstract-base-user-example/users/migrations/__init__.py | python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false | |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/hello_django/asgi.py | abstract-base-user-example/hello_django/asgi.py | """
ASGI config for hello_django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello_django.settings")
application = get_asgi_application()
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/hello_django/settings.py | abstract-base-user-example/hello_django/settings.py | """
Django settings for hello_django project.
Generated by 'django-admin startproject' using Django 4.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/4.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-#h8#5yy)jsc@sa+r(1t@f^$)(fs(36&q@8l^+&6=c^0r)jk&)4"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"users", # new
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "hello_django.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
BASE_DIR / "templates",
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "hello_django.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/4.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.1/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.1/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
AUTH_USER_MODEL = "users.CustomUser"
LOGIN_REDIRECT_URL = "home"
LOGOUT_REDIRECT_URL = "home"
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/hello_django/__init__.py | abstract-base-user-example/hello_django/__init__.py | python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false | |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/hello_django/wsgi.py | abstract-base-user-example/hello_django/wsgi.py | """
WSGI config for hello_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello_django.settings")
application = get_wsgi_application()
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-base-user-example/hello_django/urls.py | abstract-base-user-example/hello_django/urls.py | from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import TemplateView
urlpatterns = [
path("", TemplateView.as_view(template_name="home.html"), name="home"),
path("admin/", admin.site.urls),
path("users/", include("users.urls")),
path("users/", include("django.contrib.auth.urls")),
]
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/manage.py | abstract-user-example/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello_django.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.