repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/transformation.py | strhub/models/trba/transformation.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class TPS_SpatialTransformerNetwork(nn.Module):
""" Rectification Network of RARE, namely TPS based STN """
def __init__(self, F, I_size, I_r_size, I_channel_num=1):
""" Based on RARE TPS
input:
batch_I: Batch Input Image [batch_size x I_channel_num x I_height x I_width]
I_size : (height, width) of the input image I
I_r_size : (height, width) of the rectified image I_r
I_channel_num : the number of channels of the input image I
output:
batch_I_r: rectified image [batch_size x I_channel_num x I_r_height x I_r_width]
"""
super().__init__()
self.F = F
self.I_size = I_size
self.I_r_size = I_r_size # = (I_r_height, I_r_width)
self.I_channel_num = I_channel_num
self.LocalizationNetwork = LocalizationNetwork(self.F, self.I_channel_num)
self.GridGenerator = GridGenerator(self.F, self.I_r_size)
def forward(self, batch_I):
batch_C_prime = self.LocalizationNetwork(batch_I) # batch_size x K x 2
# batch_size x n (= I_r_width x I_r_height) x 2
build_P_prime = self.GridGenerator.build_P_prime(batch_C_prime)
build_P_prime_reshape = build_P_prime.reshape([build_P_prime.size(0), self.I_r_size[0], self.I_r_size[1], 2])
if torch.__version__ > "1.2.0":
batch_I_r = F.grid_sample(batch_I, build_P_prime_reshape, padding_mode='border', align_corners=True)
else:
batch_I_r = F.grid_sample(batch_I, build_P_prime_reshape, padding_mode='border')
return batch_I_r
class LocalizationNetwork(nn.Module):
""" Localization Network of RARE, which predicts C' (K x 2) from I (I_width x I_height) """
def __init__(self, F, I_channel_num):
super().__init__()
self.F = F
self.I_channel_num = I_channel_num
self.conv = nn.Sequential(
nn.Conv2d(in_channels=self.I_channel_num, out_channels=64, kernel_size=3, stride=1, padding=1,
bias=False), nn.BatchNorm2d(64), nn.ReLU(True),
nn.MaxPool2d(2, 2), # batch_size x 64 x I_height/2 x I_width/2
nn.Conv2d(64, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(True),
nn.MaxPool2d(2, 2), # batch_size x 128 x I_height/4 x I_width/4
nn.Conv2d(128, 256, 3, 1, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU(True),
nn.MaxPool2d(2, 2), # batch_size x 256 x I_height/8 x I_width/8
nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(True),
nn.AdaptiveAvgPool2d(1) # batch_size x 512
)
self.localization_fc1 = nn.Sequential(nn.Linear(512, 256), nn.ReLU(True))
self.localization_fc2 = nn.Linear(256, self.F * 2)
# Init fc2 in LocalizationNetwork
self.localization_fc2.weight.data.fill_(0)
""" see RARE paper Fig. 6 (a) """
ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2))
ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2))
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
self.localization_fc2.bias.data = torch.from_numpy(initial_bias).float().view(-1)
def forward(self, batch_I):
"""
input: batch_I : Batch Input Image [batch_size x I_channel_num x I_height x I_width]
output: batch_C_prime : Predicted coordinates of fiducial points for input batch [batch_size x F x 2]
"""
batch_size = batch_I.size(0)
features = self.conv(batch_I).view(batch_size, -1)
batch_C_prime = self.localization_fc2(self.localization_fc1(features)).view(batch_size, self.F, 2)
return batch_C_prime
class GridGenerator(nn.Module):
""" Grid Generator of RARE, which produces P_prime by multipling T with P """
def __init__(self, F, I_r_size):
""" Generate P_hat and inv_delta_C for later """
super().__init__()
self.eps = 1e-6
self.I_r_height, self.I_r_width = I_r_size
self.F = F
self.C = self._build_C(self.F) # F x 2
self.P = self._build_P(self.I_r_width, self.I_r_height)
# num_gpu = torch.cuda.device_count()
# if num_gpu > 1:
# for multi-gpu, you may need register buffer
self.register_buffer("inv_delta_C", torch.tensor(
self._build_inv_delta_C(self.F, self.C)).float()) # F+3 x F+3
self.register_buffer("P_hat", torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float()) # n x F+3
# else:
# # for fine-tuning with different image width, you may use below instead of self.register_buffer
# self.inv_delta_C = torch.tensor(self._build_inv_delta_C(self.F, self.C)).float() # F+3 x F+3
# self.P_hat = torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float() # n x F+3
def _build_C(self, F):
""" Return coordinates of fiducial points in I_r; C """
ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
ctrl_pts_y_top = -1 * np.ones(int(F / 2))
ctrl_pts_y_bottom = np.ones(int(F / 2))
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
C = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
return C # F x 2
def _build_inv_delta_C(self, F, C):
""" Return inv_delta_C which is needed to calculate T """
hat_C = np.zeros((F, F), dtype=float) # F x F
for i in range(0, F):
for j in range(i, F):
r = np.linalg.norm(C[i] - C[j])
hat_C[i, j] = r
hat_C[j, i] = r
np.fill_diagonal(hat_C, 1)
hat_C = (hat_C ** 2) * np.log(hat_C)
# print(C.shape, hat_C.shape)
delta_C = np.concatenate( # F+3 x F+3
[
np.concatenate([np.ones((F, 1)), C, hat_C], axis=1), # F x F+3
np.concatenate([np.zeros((2, 3)), np.transpose(C)], axis=1), # 2 x F+3
np.concatenate([np.zeros((1, 3)), np.ones((1, F))], axis=1) # 1 x F+3
],
axis=0
)
inv_delta_C = np.linalg.inv(delta_C)
return inv_delta_C # F+3 x F+3
def _build_P(self, I_r_width, I_r_height):
I_r_grid_x = (np.arange(-I_r_width, I_r_width, 2) + 1.0) / I_r_width # self.I_r_width
I_r_grid_y = (np.arange(-I_r_height, I_r_height, 2) + 1.0) / I_r_height # self.I_r_height
P = np.stack( # self.I_r_width x self.I_r_height x 2
np.meshgrid(I_r_grid_x, I_r_grid_y),
axis=2
)
return P.reshape([-1, 2]) # n (= self.I_r_width x self.I_r_height) x 2
def _build_P_hat(self, F, C, P):
n = P.shape[0] # n (= self.I_r_width x self.I_r_height)
P_tile = np.tile(np.expand_dims(P, axis=1), (1, F, 1)) # n x 2 -> n x 1 x 2 -> n x F x 2
C_tile = np.expand_dims(C, axis=0) # 1 x F x 2
P_diff = P_tile - C_tile # n x F x 2
rbf_norm = np.linalg.norm(P_diff, ord=2, axis=2, keepdims=False) # n x F
rbf = np.multiply(np.square(rbf_norm), np.log(rbf_norm + self.eps)) # n x F
P_hat = np.concatenate([np.ones((n, 1)), P, rbf], axis=1)
return P_hat # n x F+3
def build_P_prime(self, batch_C_prime):
""" Generate Grid from batch_C_prime [batch_size x F x 2] """
batch_size = batch_C_prime.size(0)
batch_inv_delta_C = self.inv_delta_C.repeat(batch_size, 1, 1)
batch_P_hat = self.P_hat.repeat(batch_size, 1, 1)
batch_C_prime_with_zeros = torch.cat((batch_C_prime, batch_C_prime.new_zeros(
(batch_size, 3, 2), dtype=torch.float)), dim=1) # batch_size x F+3 x 2
batch_T = torch.bmm(batch_inv_delta_C, batch_C_prime_with_zeros) # batch_size x F+3 x 2
batch_P_prime = torch.bmm(batch_P_hat, batch_T) # batch_size x n x 2
return batch_P_prime # batch_size x n x 2
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/data/module.py | strhub/data/module.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from pathlib import PurePath
from torch.utils.data import DataLoader
from torchvision import transforms as T
from typing import Optional, Callable, Sequence, Tuple
from pytorch_lightning.utilities import rank_zero_info
from .dataset import build_tree_dataset, LmdbDataset
class SceneTextDataModule(pl.LightningDataModule):
# TEST_BENCHMARK_SUB = ('IIIT5k', 'SVT', 'IC13_857', 'IC15_1811', 'SVTP', 'CUTE80')
TEST_BENCHMARK_SUB = ('IIIT5k', 'SVT', 'IC13_1015', 'IC15_1811', 'IC15_2077', 'SVTP', 'CUTE80', 'HOST', 'WOST')
TEST_BENCHMARK = ('IIIT5k', 'SVT', 'IC13_1015', 'IC15_1811', 'IC15_2077', 'SVTP', 'CUTE80', 'HOST', 'WOST')
# TEST_BENCHMARK_SUB = ('HOST',)
# TEST_BENCHMARK = ('HOST',)
TEST_NEW = ('ArT', 'COCOv1.4', 'Uber')
TEST_ALL = tuple(set(TEST_BENCHMARK_SUB + TEST_BENCHMARK + TEST_NEW))
def __init__(self, root_dir: str, train_dir: str, img_size: Sequence[int], max_label_length: int,
charset_train: str, charset_test: str, batch_size: int, num_workers: int, augment: bool,
remove_whitespace: bool = True, normalize_unicode: bool = True,
min_image_dim: int = 0, rotation: int = 0, collate_fn: Optional[Callable] = None,
output_url: str = None, openai_meanstd: bool = True,):
super().__init__()
self.root_dir = root_dir
self.train_dir = train_dir
self.img_size = tuple(img_size)
self.max_label_length = max_label_length
self.charset_train = charset_train
self.charset_test = charset_test
self.batch_size = batch_size
self.num_workers = num_workers
self.augment = augment
self.remove_whitespace = remove_whitespace
self.normalize_unicode = normalize_unicode
self.min_image_dim = min_image_dim
self.rotation = rotation
self.collate_fn = collate_fn
self._train_dataset = None
self._val_dataset = None
# https://github.com/mlfoundations/open_clip/blob/b4cf9269b0b11c0eea47cb16039369a46bd67449/src/open_clip/constants.py
# OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
# OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
self.mean = (0.48145466, 0.4578275, 0.40821073) if openai_meanstd else 0.5
self.std = (0.26862954, 0.26130258, 0.27577711) if openai_meanstd else 0.5
rank_zero_info("[dataset] mean {}, std {}".format(self.mean, self.std))
@staticmethod
def get_transform(img_size: Tuple[int], augment: bool = False, rotation: int = 0, mean=0.5, std=0.5):
transforms = []
if augment:
from .augment import rand_augment_transform
transforms.append(rand_augment_transform())
if rotation:
transforms.append(lambda img: img.rotate(rotation, expand=True))
transforms.extend([
T.Resize(img_size, T.InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean, std)
])
return T.Compose(transforms)
@property
def train_dataset(self):
if self._train_dataset is None:
transform = self.get_transform(self.img_size, self.augment, mean=self.mean, std=self.std)
root = PurePath(self.root_dir, 'train', self.train_dir)
self._train_dataset = build_tree_dataset(root, self.charset_train, self.max_label_length,
self.min_image_dim, self.remove_whitespace, self.normalize_unicode,
transform=transform)
rank_zero_info('\tlmdb: The number of training samples is {}'.format(len(self._train_dataset)))
return self._train_dataset
@property
def val_dataset(self):
if self._val_dataset is None:
transform = self.get_transform(self.img_size, mean=self.mean, std=self.std)
root = PurePath(self.root_dir, 'val')
self._val_dataset = build_tree_dataset(root, self.charset_test, self.max_label_length,
self.min_image_dim, self.remove_whitespace, self.normalize_unicode,
transform=transform)
rank_zero_info('\tlmdb: The number of validation samples is {}'.format(len(self._val_dataset)))
return self._val_dataset
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True,
num_workers=self.num_workers, persistent_workers=self.num_workers > 0,
pin_memory=True, collate_fn=self.collate_fn)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size,
num_workers=self.num_workers, persistent_workers=self.num_workers > 0,
pin_memory=True, collate_fn=self.collate_fn)
def test_dataloaders(self, subset):
transform = self.get_transform(self.img_size, rotation=self.rotation)
root = PurePath(self.root_dir, 'test')
datasets = {s: LmdbDataset(str(root / s), self.charset_test, self.max_label_length,
self.min_image_dim, self.remove_whitespace, self.normalize_unicode,
transform=transform) for s in subset}
return {k: DataLoader(v, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=True, collate_fn=self.collate_fn)
for k, v in datasets.items()}
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/data/dataset.py | strhub/data/dataset.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import glob
import lmdb
import unicodedata
from PIL import Image
from pathlib import Path, PurePath
from typing import Callable, Optional, Union
from torch.utils.data import Dataset, ConcatDataset
from pytorch_lightning.utilities import rank_zero_info
from strhub.data.utils import CharsetAdapter
def build_tree_dataset(root: Union[PurePath, str], *args, **kwargs):
try:
kwargs.pop('root') # prevent 'root' from being passed via kwargs
except KeyError:
pass
root = Path(root).absolute()
rank_zero_info(f'dataset root:\t{root}')
datasets = []
for mdb in glob.glob(str(root / '**/data.mdb'), recursive=True):
mdb = Path(mdb)
ds_name = str(mdb.parent.relative_to(root))
ds_root = str(mdb.parent.absolute())
dataset = LmdbDataset(ds_root, *args, **kwargs)
rank_zero_info(f'\tlmdb:\t{ds_name}\tnum samples: {len(dataset)}')
datasets.append(dataset)
return ConcatDataset(datasets)
class LmdbDataset(Dataset):
"""Dataset interface to an LMDB database.
It supports both labelled and unlabelled datasets. For unlabelled datasets, the image index itself is returned
as the label. Unicode characters are normalized by default. Case-sensitivity is inferred from the charset.
Labels are transformed according to the charset.
"""
def __init__(self, root: str, charset: str, max_label_len: int, min_image_dim: int = 0,
remove_whitespace: bool = True, normalize_unicode: bool = True,
unlabelled: bool = False, transform: Optional[Callable] = None):
self._env = None
self.root = root
self.unlabelled = unlabelled
self.transform = transform
self.labels = []
self.filtered_index_list = []
self.num_samples = self._preprocess_labels(charset, remove_whitespace, normalize_unicode,
max_label_len, min_image_dim)
# log.info('The number of samples is {}'.format(self.num_samples))
def __del__(self):
if self._env is not None:
self._env.close()
self._env = None
def _create_env(self):
return lmdb.open(self.root, max_readers=1, readonly=True, create=False,
readahead=False, meminit=False, lock=False)
@property
def env(self):
if self._env is None:
self._env = self._create_env()
return self._env
def _preprocess_labels(self, charset, remove_whitespace, normalize_unicode, max_label_len, min_image_dim):
charset_adapter = CharsetAdapter(charset)
with self._create_env() as env, env.begin() as txn:
num_samples = int(txn.get('num-samples'.encode()))
if self.unlabelled:
return num_samples
for index in range(num_samples):
index += 1 # lmdb starts with 1
label_key = f'label-{index:09d}'.encode()
label = txn.get(label_key).decode()
# Normally, whitespace is removed from the labels.
if remove_whitespace:
label = ''.join(label.split())
# Normalize unicode composites (if any) and convert to compatible ASCII characters
if normalize_unicode:
label = unicodedata.normalize('NFKD', label).encode('ascii', 'ignore').decode()
# Filter by length before removing unsupported characters. The original label might be too long.
if len(label) > max_label_len:
continue
label = charset_adapter(label)
# We filter out samples which don't contain any supported characters
if not label:
continue
# Filter images that are too small.
if min_image_dim > 0:
img_key = f'image-{index:09d}'.encode()
buf = io.BytesIO(txn.get(img_key))
w, h = Image.open(buf).size
if w < self.min_image_dim or h < self.min_image_dim:
continue
self.labels.append(label)
self.filtered_index_list.append(index)
return len(self.labels)
def __len__(self):
return self.num_samples
def __getitem__(self, index):
if self.unlabelled:
label = index
else:
label = self.labels[index]
index = self.filtered_index_list[index]
img_key = f'image-{index:09d}'.encode()
with self.env.begin() as txn:
imgbuf = txn.get(img_key)
buf = io.BytesIO(imgbuf)
img = Image.open(buf).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, label
if __name__ == "__main__":
import os
output_path = "."
root = "str_dataset/test/CUTE80/data.mdb"
charset = "0123456789abcdefghijklmnopqrstuvwxyz"
max_label_len = 25
dataset = LmdbDataset(root, charset, max_label_len)
# 512 * 3 = 1536
id1, id2 = 1539, 1568
id1, id2 = 1576, 1706
# image1, label1 = dataset[1539]
# image2, label2 = dataset[1568]
image1, label1 = dataset[id1]
image2, label2 = dataset[id2]
print(label1, label2)
image1.save(os.path.join(output_path, "imiage_{}.jpg".format(id1)))
image2.save(os.path.join(output_path, "imiage_{}.jpg".format(id2)))
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/data/utils.py | strhub/data/utils.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import re
from abc import ABC, abstractmethod
from itertools import groupby
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
class CharsetAdapter:
"""Transforms labels according to the target charset."""
def __init__(self, target_charset) -> None:
super().__init__()
self.lowercase_only = target_charset == target_charset.lower()
self.uppercase_only = target_charset == target_charset.upper()
self.unsupported = f'[^{re.escape(target_charset)}]'
def __call__(self, label):
if self.lowercase_only:
label = label.lower()
elif self.uppercase_only:
label = label.upper()
# Remove unsupported characters
label = re.sub(self.unsupported, '', label)
return label
class BaseTokenizer(ABC):
def __init__(self, charset: str, specials_first: tuple = (), specials_last: tuple = ()) -> None:
self._itos = specials_first + tuple(charset) + specials_last
self._stoi = {s: i for i, s in enumerate(self._itos)}
def __len__(self):
return len(self._itos)
def _tok2ids(self, tokens: str) -> List[int]:
return [self._stoi[s] for s in tokens]
def _ids2tok(self, token_ids: List[int], join: bool = True) -> str:
tokens = [self._itos[i] for i in token_ids]
return ''.join(tokens) if join else tokens
@abstractmethod
def encode(self, labels: List[str], device: Optional[torch.device] = None) -> Tensor:
"""Encode a batch of labels to a representation suitable for the model.
Args:
labels: List of labels. Each can be of arbitrary length.
device: Create tensor on this device.
Returns:
Batched tensor representation padded to the max label length. Shape: N, L
"""
raise NotImplementedError
@abstractmethod
def _filter(self, probs: Tensor, ids: Tensor) -> Tuple[Tensor, List[int]]:
"""Internal method which performs the necessary filtering prior to decoding."""
raise NotImplementedError
def decode(self, token_dists: Tensor, raw: bool = False) -> Tuple[List[str], List[Tensor]]:
"""Decode a batch of token distributions.
Args:
token_dists: softmax probabilities over the token distribution. Shape: N, L, C
raw: return unprocessed labels (will return list of list of strings)
Returns:
list of string labels (arbitrary length) and
their corresponding sequence probabilities as a list of Tensors
"""
batch_tokens = []
batch_probs = []
for dist in token_dists:
probs, ids = dist.max(-1) # greedy selection
# probs, ids = beam_search_decoder(dist, 5)
if not raw:
probs, ids = self._filter(probs, ids)
tokens = self._ids2tok(ids, not raw)
batch_tokens.append(tokens)
batch_probs.append(probs)
return batch_tokens, batch_probs
def decode_for_refine(self, token_dists: Tensor, raw: bool = False) -> Tuple[List[str], List[Tensor]]:
raise NotImplementedError
class Tokenizer(BaseTokenizer):
BOS = '[B]'
EOS = '[E]'
PAD = '[P]'
def __init__(self, charset: str) -> None:
specials_first = (self.EOS,)
specials_last = (self.BOS, self.PAD)
super().__init__(charset, specials_first, specials_last)
self.eos_id, self.bos_id, self.pad_id = [self._stoi[s] for s in specials_first + specials_last]
def encode(self, labels: List[str], device: Optional[torch.device] = None) -> Tensor:
batch = [torch.as_tensor([self.bos_id] + self._tok2ids(y) + [self.eos_id], dtype=torch.long, device=device)
for y in labels]
return pad_sequence(batch, batch_first=True, padding_value=self.pad_id)
def _filter(self, probs: Tensor, ids: Tensor) -> Tuple[Tensor, List[int]]:
ids = ids.tolist()
try:
eos_idx = ids.index(self.eos_id)
except ValueError:
eos_idx = len(ids) # Nothing to truncate.
# Truncate after EOS
ids = ids[:eos_idx]
probs = probs[:eos_idx + 1] # but include prob. for EOS (if it exists)
return probs, ids
def decode_fast(self, token_dists: Tensor, raw: bool = False, charset_adapter = None) -> Tuple[List[str], List[Tensor]]:
"""fast version of decode"""
bs_tokens = []
bs_probs = []
batch_probs, batch_ids = token_dists.max(-1)
batch_ids = batch_ids.tolist()
for i, ids in enumerate(batch_ids):
tokens = self._ids2tok(ids, not raw)
# Truncate after EOS
tokens = tokens.partition(self.EOS)[0]
# but include prob. for EOS (if it exists)
bs_probs.append(batch_probs[i, :len(tokens) + 1])
if charset_adapter is not None:
tokens = charset_adapter(tokens)
bs_tokens.append(tokens)
return bs_tokens, bs_probs
def decode_for_refine(self, token_dists: Tensor, raw: bool = False) -> Tuple[List[str], List[Tensor]]:
"""Decode a batch of token distributions.
Args:
token_dists: softmax probabilities over the token distribution. Shape: N, L, C
raw: return unprocessed labels (will return list of list of strings)
Returns:
# list of string labels (arbitrary length) and
# their corresponding sequence probabilities as a list of Tensors
lisft of sequence probabilities
"""
batch_dist = []
for dist in token_dists:
# greedy selection
max_probs, ids = dist.max(-1)
if not raw:
ids = ids.tolist()
try:
eos_idx = ids.index(self.eos_id)
except ValueError:
eos_idx = len(ids)
# trunucate after EOS
ids = ids[:eos_idx]
# do not include EOS
trunc_dist = dist[:eos_idx, ...]
# set probability of [EOS] to zero
trunc_dist[:, self.eos_id] = torch.zeros_like(trunc_dist[:, self.eos_id])
batch_dist.append(trunc_dist)
else:
batch_dist.append(dist)
return batch_dist
class CTCTokenizer(BaseTokenizer):
BLANK = '[B]'
def __init__(self, charset: str) -> None:
# BLANK uses index == 0 by default
super().__init__(charset, specials_first=(self.BLANK,))
self.blank_id = self._stoi[self.BLANK]
def encode(self, labels: List[str], device: Optional[torch.device] = None) -> Tensor:
# We use a padded representation since we don't want to use CUDNN's CTC implementation
batch = [torch.as_tensor(self._tok2ids(y), dtype=torch.long, device=device) for y in labels]
return pad_sequence(batch, batch_first=True, padding_value=self.blank_id)
def _filter(self, probs: Tensor, ids: Tensor) -> Tuple[Tensor, List[int]]:
# Best path decoding:
ids = list(zip(*groupby(ids.tolist())))[0] # Remove duplicate tokens
ids = [x for x in ids if x != self.blank_id] # Remove BLANKs
# `probs` is just pass-through since all positions are considered part of the path
return probs, ids
def beam_search_decoder(data, k):
"""a simple beam search algorithm"""
device = data.device
data_numpy = data.cpu().numpy()
sequences = [[list(), 0.0]]
# walk over each step in sequence
for row in data_numpy:
all_candidates = list()
# expand each current candidate
for i in range(len(sequences)):
seq, score = sequences[i]
for j in range(len(row)):
candidate = [seq + [j], score - math.log(row[j])]
all_candidates.append(candidate)
# order all candidates by score
ordered = sorted(all_candidates, key=lambda tup:tup[1])
# select k best
sequences = ordered[:k]
ids = torch.tensor(sequences[0][0], dtype=torch.long, device=data.device)
probs = data[torch.arange(len(ids), dtype=torch.long, device=device), ids]
return probs, ids
if __name__ == "__main__":
_char_94_full = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"
t = Tokenizer(_char_94_full)
for x in _char_94_full:
print(t._tok2ids(x))
print(t._itos)
print(t._stoi)
print(t.bos_id)
print(t.eos_id)
print(t.pad_id)
# {'[E]': 0, '0': 1, '1': 2, '2': 3, '3': 4, '4': 5, '5': 6, '6': 7, '7': 8, '8': 9, '9': 10,
# 'a': 11, 'b': 12, 'c': 13, 'd': 14, 'e': 15, 'f': 16, 'g': 17, 'h': 18, 'i': 19, 'j': 20,
# 'k': 21, 'l': 22, 'm': 23, 'n': 24, 'o': 25, 'p': 26, 'q': 27, 'r': 28, 's': 29, 't': 30,
# 'u': 31, 'v': 32, 'w': 33, 'x': 34, 'y': 35, 'z': 36,
# 'A': 37, 'B': 38, 'C': 39, 'D': 40, 'E': 41, 'F': 42, 'G': 43, 'H': 44, 'I': 45, 'J': 46,
# 'K': 47, 'L': 48, 'M': 49, 'N': 50, 'O': 51, 'P': 52, 'Q': 53, 'R': 54, 'S': 55, 'T': 56,
# 'U': 57, 'V': 58, 'W': 59, 'X': 60, 'Y': 61, 'Z': 62,
# '!': 63, '"': 64, '#': 65, '$': 66, '%': 67, '&': 68, "'": 69, '(': 70, ')': 71, '*': 72,
# '+': 73, ',': 74, '-': 75, '.': 76, '/': 77, ':': 78, ';': 79, '<': 80, '=': 81, '>': 82,
# '?': 83, '@': 84, '[': 85, '\\': 86, ']': 87, '^': 88, '_': 89, '`': 90, '{': 91, '|': 92,
# '}': 93, '~': 94, '[B]': 95, '[P]': 96}
# bos_id 95
# eos_id 0
# pad_id 96 | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/data/__init__.py | strhub/data/__init__.py | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false | |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/data/aa_overrides.py | strhub/data/aa_overrides.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extends default ops to accept optional parameters."""
from functools import partial
from timm.data.auto_augment import _LEVEL_DENOM, _randomly_negate, LEVEL_TO_ARG, NAME_TO_OP, rotate
def rotate_expand(img, degrees, **kwargs):
"""Rotate operation with expand=True to avoid cutting off the characters"""
kwargs['expand'] = True
return rotate(img, degrees, **kwargs)
def _level_to_arg(level, hparams, key, default):
magnitude = hparams.get(key, default)
level = (level / _LEVEL_DENOM) * magnitude
level = _randomly_negate(level)
return level,
def apply():
# Overrides
NAME_TO_OP.update({
'Rotate': rotate_expand
})
LEVEL_TO_ARG.update({
'Rotate': partial(_level_to_arg, key='rotate_deg', default=30.),
'ShearX': partial(_level_to_arg, key='shear_x_pct', default=0.3),
'ShearY': partial(_level_to_arg, key='shear_y_pct', default=0.3),
'TranslateXRel': partial(_level_to_arg, key='translate_x_pct', default=0.45),
'TranslateYRel': partial(_level_to_arg, key='translate_y_pct', default=0.45),
})
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/data/augment.py | strhub/data/augment.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import imgaug.augmenters as iaa
import numpy as np
from PIL import ImageFilter, Image
from timm.data import auto_augment
from strhub.data import aa_overrides
aa_overrides.apply()
_OP_CACHE = {}
def _get_op(key, factory):
try:
op = _OP_CACHE[key]
except KeyError:
op = factory()
_OP_CACHE[key] = op
return op
def _get_param(level, img, max_dim_factor, min_level=1):
max_level = max(min_level, max_dim_factor * max(img.size))
return round(min(level, max_level))
def gaussian_blur(img, radius, **__):
radius = _get_param(radius, img, 0.02)
key = 'gaussian_blur_' + str(radius)
op = _get_op(key, lambda: ImageFilter.GaussianBlur(radius))
return img.filter(op)
def motion_blur(img, k, **__):
k = _get_param(k, img, 0.08, 3) | 1 # bin to odd values
key = 'motion_blur_' + str(k)
op = _get_op(key, lambda: iaa.MotionBlur(k))
return Image.fromarray(op(image=np.asarray(img)))
def gaussian_noise(img, scale, **_):
scale = _get_param(scale, img, 0.25) | 1 # bin to odd values
key = 'gaussian_noise_' + str(scale)
op = _get_op(key, lambda: iaa.AdditiveGaussianNoise(scale=scale))
return Image.fromarray(op(image=np.asarray(img)))
def poisson_noise(img, lam, **_):
lam = _get_param(lam, img, 0.2) | 1 # bin to odd values
key = 'poisson_noise_' + str(lam)
op = _get_op(key, lambda: iaa.AdditivePoissonNoise(lam))
return Image.fromarray(op(image=np.asarray(img)))
def _level_to_arg(level, _hparams, max):
level = max * level / auto_augment._LEVEL_DENOM
return level,
_RAND_TRANSFORMS = auto_augment._RAND_INCREASING_TRANSFORMS.copy()
_RAND_TRANSFORMS.remove('SharpnessIncreasing') # remove, interferes with *blur ops
_RAND_TRANSFORMS.extend([
'GaussianBlur',
# 'MotionBlur',
# 'GaussianNoise',
'PoissonNoise'
])
auto_augment.LEVEL_TO_ARG.update({
'GaussianBlur': partial(_level_to_arg, max=4),
'MotionBlur': partial(_level_to_arg, max=20),
'GaussianNoise': partial(_level_to_arg, max=0.1 * 255),
'PoissonNoise': partial(_level_to_arg, max=40)
})
auto_augment.NAME_TO_OP.update({
'GaussianBlur': gaussian_blur,
'MotionBlur': motion_blur,
'GaussianNoise': gaussian_noise,
'PoissonNoise': poisson_noise
})
def rand_augment_transform(magnitude=5, num_layers=3):
# These are tuned for magnitude=5, which means that effective magnitudes are half of these values.
hparams = {
'rotate_deg': 30,
'shear_x_pct': 0.9,
'shear_y_pct': 0.2,
'translate_x_pct': 0.10,
'translate_y_pct': 0.30
}
ra_ops = auto_augment.rand_augment_ops(magnitude, hparams, transforms=_RAND_TRANSFORMS)
# Supply weights to disable replacement in random selection (i.e. avoid applying the same op twice)
choice_weights = [1. / len(ra_ops) for _ in range(len(ra_ops))]
return auto_augment.RandAugment(ra_ops, num_layers, choice_weights)
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/mlt19_converter.py | tools/mlt19_converter.py | #!/usr/bin/env python3
import sys
root = sys.argv[1]
with open(root + '/gt.txt', 'r') as f:
d = f.readlines()
with open(root + '/lmdb.txt', 'w') as f:
for line in d:
img, script, label = line.split(',', maxsplit=2)
label = label.strip()
if label and script in ['Latin', 'Symbols']:
f.write('\t'.join([img, label]) + '\n')
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/create_lmdb_dataset.py | tools/create_lmdb_dataset.py | #!/usr/bin/env python3
""" a modified version of CRNN torch repository https://github.com/bgshih/crnn/blob/master/tool/create_dataset.py """
import io
import os
import fire
import lmdb
import numpy as np
from PIL import Image
def checkImageIsValid(imageBin):
if imageBin is None:
return False
img = Image.open(io.BytesIO(imageBin)).convert('RGB')
return np.prod(img.size) > 0
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k, v)
def createDataset(inputPath, gtFile, outputPath, checkValid=True):
"""
Create LMDB dataset for training and evaluation.
ARGS:
inputPath : input folder path where starts imagePath
outputPath : LMDB output path
gtFile : list of image path and label
checkValid : if true, check the validity of every image
"""
os.makedirs(outputPath, exist_ok=True)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
with open(gtFile, 'r', encoding='utf-8') as f:
data = f.readlines()
nSamples = len(data)
for i, line in enumerate(data):
imagePath, label = line.strip().split(maxsplit=1)
imagePath = os.path.join(inputPath, imagePath)
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
try:
img = Image.open(io.BytesIO(imageBin)).convert('RGB')
except IOError as e:
with open(outputPath + '/error_image_log.txt', 'a') as log:
log.write('{}-th image data occured error: {}, {}\n'.format(i, imagePath, e))
continue
if np.prod(img.size) == 0:
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d'.encode() % cnt
labelKey = 'label-%09d'.encode() % cnt
cache[imageKey] = imageBin
cache[labelKey] = label.encode()
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt - 1
cache['num-samples'.encode()] = str(nSamples).encode()
writeCache(env, cache)
env.close()
print('Created dataset with %d samples' % nSamples)
if __name__ == '__main__':
fire.Fire(createDataset)
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/case_sensitive_str_datasets_converter.py | tools/case_sensitive_str_datasets_converter.py | #!/usr/bin/env python3
import os.path
import sys
from pathlib import Path
d = sys.argv[1]
p = Path(d)
gt = []
num_samples = len(list(p.glob('label/*.txt')))
ext = 'jpg' if p.joinpath('IMG', '1.jpg').is_file() else 'png'
for i in range(1, num_samples + 1):
img = p.joinpath('IMG', f'{i}.{ext}')
name = os.path.splitext(img.name)[0]
with open(p.joinpath('label', f'{i}.txt'), 'r') as f:
label = f.readline()
gt.append((os.path.join('IMG', img.name), label))
with open(d + '/lmdb.txt', 'w', encoding='utf-8') as f:
for line in gt:
fname, label = line
fname = fname.strip()
label = label.strip()
f.write('\t'.join([fname, label]) + '\n')
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/lsvt_converter.py | tools/lsvt_converter.py | #!/usr/bin/env python3
import argparse
import os
import os.path as osp
import re
from functools import partial
import mmcv
import numpy as np
from PIL import Image
from mmocr.utils.fileio import list_to_file
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training set of LSVT '
'by cropping box image.')
parser.add_argument('root_path', help='Root dir path of LSVT')
parser.add_argument(
'n_proc', default=1, type=int, help='Number of processes to run')
args = parser.parse_args()
return args
def process_img(args, src_image_root, dst_image_root):
# Dirty hack for multiprocessing
img_idx, img_info, anns = args
try:
src_img = Image.open(osp.join(src_image_root, 'train_full_images_0/{}.jpg'.format(img_info)))
except IOError:
src_img = Image.open(osp.join(src_image_root, 'train_full_images_1/{}.jpg'.format(img_info)))
blacklist = ['LOFTINESS*']
whitelist = ['#Find YOUR Fun#', 'Story #', '*0#']
labels = []
for ann_idx, ann in enumerate(anns):
text_label = ann['transcription']
# Ignore illegible or words with non-Latin characters
if ann['illegibility'] or re.findall(r'[\u4e00-\u9fff]+', text_label) or text_label in blacklist or \
('#' in text_label and text_label not in whitelist):
continue
points = np.asarray(ann['points'])
x1, y1 = points.min(axis=0)
x2, y2 = points.max(axis=0)
dst_img = src_img.crop((x1, y1, x2, y2))
dst_img_name = f'img_{img_idx}_{ann_idx}.jpg'
dst_img_path = osp.join(dst_image_root, dst_img_name)
# Preserve JPEG quality
dst_img.save(dst_img_path, qtables=src_img.quantization)
labels.append(f'{osp.basename(dst_image_root)}/{dst_img_name}'
f' {text_label}')
src_img.close()
return labels
def convert_lsvt(root_path,
dst_image_path,
dst_label_filename,
annotation_filename,
img_start_idx=0,
nproc=1):
annotation_path = osp.join(root_path, annotation_filename)
if not osp.exists(annotation_path):
raise Exception(
f'{annotation_path} not exists, please check and try again.')
src_image_root = root_path
# outputs
dst_label_file = osp.join(root_path, dst_label_filename)
dst_image_root = osp.join(root_path, dst_image_path)
os.makedirs(dst_image_root, exist_ok=True)
annotation = mmcv.load(annotation_path)
process_img_with_path = partial(
process_img,
src_image_root=src_image_root,
dst_image_root=dst_image_root)
tasks = []
for img_idx, (img_info, anns) in enumerate(annotation.items()):
tasks.append((img_idx + img_start_idx, img_info, anns))
labels_list = mmcv.track_parallel_progress(
process_img_with_path, tasks, keep_order=True, nproc=nproc)
final_labels = []
for label_list in labels_list:
final_labels += label_list
list_to_file(dst_label_file, final_labels)
return len(annotation)
def main():
args = parse_args()
root_path = args.root_path
print('Processing training set...')
convert_lsvt(
root_path=root_path,
dst_image_path='image_train',
dst_label_filename='train_label.txt',
annotation_filename='train_full_labels.json',
nproc=args.n_proc)
print('Finish')
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/coco_2_converter.py | tools/coco_2_converter.py | #!/usr/bin/env python3
import argparse
import html
import math
import os
import os.path as osp
from functools import partial
import mmcv
from PIL import Image
from mmocr.utils.fileio import list_to_file
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training and validation set of TextOCR '
'by cropping box image.')
parser.add_argument('root_path', help='Root dir path of TextOCR')
parser.add_argument(
'n_proc', default=1, type=int, help='Number of processes to run')
args = parser.parse_args()
return args
def process_img(args, src_image_root, dst_image_root):
# Dirty hack for multiprocessing
img_idx, img_info, anns = args
src_img = Image.open(osp.join(src_image_root, 'train2014', img_info['file_name']))
src_w, src_h = src_img.size
labels = []
for ann_idx, ann in enumerate(anns):
text_label = html.unescape(ann['utf8_string'].strip())
# Ignore empty labels
if not text_label or ann['class'] != 'machine printed' or ann['language'] != 'english' or \
ann['legibility'] != 'legible':
continue
# Some labels and images with '#' in the middle are actually good, but some aren't, so we just filter them all.
if text_label != '#' and '#' in text_label:
continue
# Some labels use '*' to denote unreadable characters
if text_label.startswith('*') or text_label.endswith('*'):
continue
pad = 2
x, y, w, h = ann['bbox']
x, y = max(0, math.floor(x) - pad), max(0, math.floor(y) - pad)
w, h = math.ceil(w), math.ceil(h)
x2, y2 = min(src_w, x + w + 2 * pad), min(src_h, y + h + 2 * pad)
dst_img = src_img.crop((x, y, x2, y2))
dst_img_name = f'img_{img_idx}_{ann_idx}.jpg'
dst_img_path = osp.join(dst_image_root, dst_img_name)
# Preserve JPEG quality
dst_img.save(dst_img_path, qtables=src_img.quantization)
labels.append(f'{osp.basename(dst_image_root)}/{dst_img_name}'
f' {text_label}')
src_img.close()
return labels
def convert_textocr(root_path,
dst_image_path,
dst_label_filename,
annotation_filename,
img_start_idx=0,
nproc=1):
annotation_path = osp.join(root_path, annotation_filename)
if not osp.exists(annotation_path):
raise Exception(
f'{annotation_path} not exists, please check and try again.')
src_image_root = root_path
# outputs
dst_label_file = osp.join(root_path, dst_label_filename)
dst_image_root = osp.join(root_path, dst_image_path)
os.makedirs(dst_image_root, exist_ok=True)
annotation = mmcv.load(annotation_path)
split = 'train' if 'train' in dst_label_filename else 'val'
process_img_with_path = partial(
process_img,
src_image_root=src_image_root,
dst_image_root=dst_image_root)
tasks = []
for img_idx, img_info in enumerate(annotation['imgs'].values()):
if img_info['set'] != split:
continue
ann_ids = annotation['imgToAnns'][str(img_info['id'])]
anns = [annotation['anns'][str(ann_id)] for ann_id in ann_ids]
tasks.append((img_idx + img_start_idx, img_info, anns))
labels_list = mmcv.track_parallel_progress(
process_img_with_path, tasks, keep_order=True, nproc=nproc)
final_labels = []
for label_list in labels_list:
final_labels += label_list
list_to_file(dst_label_file, final_labels)
return len(annotation['imgs'])
def main():
args = parse_args()
root_path = args.root_path
print('Processing training set...')
num_train_imgs = convert_textocr(
root_path=root_path,
dst_image_path='image',
dst_label_filename='train_label.txt',
annotation_filename='cocotext.v2.json',
nproc=args.n_proc)
print('Processing validation set...')
convert_textocr(
root_path=root_path,
dst_image_path='image_val',
dst_label_filename='val_label.txt',
annotation_filename='cocotext.v2.json',
img_start_idx=num_train_imgs,
nproc=args.n_proc)
print('Finish')
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/test_abinet_lm_acc.py | tools/test_abinet_lm_acc.py | #!/usr/bin/env python3
import argparse
import string
import sys
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
from tqdm import tqdm
from strhub.data.module import SceneTextDataModule
from strhub.models.abinet.system import ABINet
sys.path.insert(0, '.')
from hubconf import _get_config
from test import Result, print_results_table
class ABINetLM(ABINet):
def _encode(self, labels):
targets = [torch.arange(self.max_label_length + 1)] # dummy target. used to set pad_sequence() length
lengths = []
for label in labels:
targets.append(torch.as_tensor([self.tokenizer._stoi[c] for c in label]))
lengths.append(len(label) + 1)
targets = pad_sequence(targets, batch_first=True, padding_value=0)[1:] # exclude dummy target
lengths = torch.as_tensor(lengths, device=self.device)
targets = F.one_hot(targets, len(self.tokenizer._stoi))[..., :len(self.tokenizer._stoi) - 2].float().to(self.device)
return targets, lengths
def forward(self, labels: Tensor, max_length: int = None) -> Tensor:
targets, lengths = self._encode(labels)
return self.model.language(targets, lengths)['logits']
def main():
parser = argparse.ArgumentParser(description='Measure the word accuracy of ABINet LM using the ground truth as input')
parser.add_argument('checkpoint', help='Official pretrained weights for ABINet-LV (best-train-abinet.pth)')
parser.add_argument('--data_root', default='data')
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--new', action='store_true', default=False, help='Evaluate on new benchmark datasets')
parser.add_argument('--device', default='cuda')
args = parser.parse_args()
# charset used by original ABINet
charset = string.ascii_lowercase + '1234567890'
ckpt = torch.load(args.checkpoint)
config = _get_config('abinet', charset_train=charset, charset_test=charset)
model = ABINetLM(**config)
model.model.load_state_dict(ckpt['model'])
model = model.eval().to(args.device)
model.freeze() # disable autograd
hp = model.hparams
datamodule = SceneTextDataModule(args.data_root, '_unused_', hp.img_size, hp.max_label_length, hp.charset_train,
hp.charset_test, args.batch_size, args.num_workers, False)
test_set = SceneTextDataModule.TEST_BENCHMARK
if args.new:
test_set += SceneTextDataModule.TEST_NEW
test_set = sorted(set(test_set))
results = {}
max_width = max(map(len, test_set))
for name, dataloader in datamodule.test_dataloaders(test_set).items():
total = 0
correct = 0
ned = 0
confidence = 0
label_length = 0
for _, labels in tqdm(iter(dataloader), desc=f'{name:>{max_width}}'):
res = model.test_step((labels, labels), -1)['output']
total += res.num_samples
correct += res.correct
ned += res.ned
confidence += res.confidence
label_length += res.label_length
accuracy = 100 * correct / total
mean_ned = 100 * (1 - ned / total)
mean_conf = 100 * confidence / total
mean_label_length = label_length / total
results[name] = Result(name, total, accuracy, mean_ned, mean_conf, mean_label_length)
result_groups = {
'Benchmark': SceneTextDataModule.TEST_BENCHMARK
}
if args.new:
result_groups.update({'New': SceneTextDataModule.TEST_NEW})
for group, subset in result_groups.items():
print(f'{group} set:')
print_results_table([results[s] for s in subset])
print('\n')
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/textocr_converter.py | tools/textocr_converter.py | #!/usr/bin/env python3
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
from functools import partial
import mmcv
import numpy as np
from PIL import Image
from mmocr.utils.fileio import list_to_file
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training and validation set of TextOCR '
'by cropping box image.')
parser.add_argument('root_path', help='Root dir path of TextOCR')
parser.add_argument(
'n_proc', default=1, type=int, help='Number of processes to run')
parser.add_argument('--rectify_pose', action='store_true',
help='Fix pose of rotated text to make them horizontal')
args = parser.parse_args()
return args
def rectify_image_pose(image, top_left, points):
# Points-based heuristics for determining text orientation w.r.t. bounding box
points = np.asarray(points).reshape(-1, 2)
dist = ((points - np.asarray(top_left)) ** 2).sum(axis=1)
left_midpoint = (points[0] + points[-1]) / 2
right_corner_points = ((points - left_midpoint) ** 2).sum(axis=1).argsort()[-2:]
right_midpoint = points[right_corner_points].sum(axis=0) / 2
d_x, d_y = abs(right_midpoint - left_midpoint)
if dist[0] + dist[-1] <= dist[right_corner_points].sum():
if d_x >= d_y:
rot = 0
else:
rot = 90
else:
if d_x >= d_y:
rot = 180
else:
rot = -90
if rot:
image = image.rotate(rot, expand=True)
return image
def process_img(args, src_image_root, dst_image_root):
# Dirty hack for multiprocessing
img_idx, img_info, anns, rectify_pose = args
src_img = Image.open(osp.join(src_image_root, img_info['file_name']))
labels = []
for ann_idx, ann in enumerate(anns):
text_label = ann['utf8_string']
# Ignore illegible or non-English words
if text_label == '.':
continue
x, y, w, h = ann['bbox']
x, y = max(0, math.floor(x)), max(0, math.floor(y))
w, h = math.ceil(w), math.ceil(h)
dst_img = src_img.crop((x, y, x + w, y + h))
if rectify_pose:
dst_img = rectify_image_pose(dst_img, (x, y), ann['points'])
dst_img_name = f'img_{img_idx}_{ann_idx}.jpg'
dst_img_path = osp.join(dst_image_root, dst_img_name)
# Preserve JPEG quality
dst_img.save(dst_img_path, qtables=src_img.quantization)
labels.append(f'{osp.basename(dst_image_root)}/{dst_img_name}'
f' {text_label}')
src_img.close()
return labels
def convert_textocr(root_path,
dst_image_path,
dst_label_filename,
annotation_filename,
img_start_idx=0,
nproc=1,
rectify_pose=False):
annotation_path = osp.join(root_path, annotation_filename)
if not osp.exists(annotation_path):
raise Exception(
f'{annotation_path} not exists, please check and try again.')
src_image_root = root_path
# outputs
dst_label_file = osp.join(root_path, dst_label_filename)
dst_image_root = osp.join(root_path, dst_image_path)
os.makedirs(dst_image_root, exist_ok=True)
annotation = mmcv.load(annotation_path)
process_img_with_path = partial(
process_img,
src_image_root=src_image_root,
dst_image_root=dst_image_root)
tasks = []
for img_idx, img_info in enumerate(annotation['imgs'].values()):
ann_ids = annotation['imgToAnns'][img_info['id']]
anns = [annotation['anns'][ann_id] for ann_id in ann_ids]
tasks.append((img_idx + img_start_idx, img_info, anns, rectify_pose))
labels_list = mmcv.track_parallel_progress(
process_img_with_path, tasks, keep_order=True, nproc=nproc)
final_labels = []
for label_list in labels_list:
final_labels += label_list
list_to_file(dst_label_file, final_labels)
return len(annotation['imgs'])
def main():
args = parse_args()
root_path = args.root_path
print('Processing training set...')
num_train_imgs = convert_textocr(
root_path=root_path,
dst_image_path='image',
dst_label_filename='train_label.txt',
annotation_filename='TextOCR_0.1_train.json',
nproc=args.n_proc,
rectify_pose=args.rectify_pose)
print('Processing validation set...')
convert_textocr(
root_path=root_path,
dst_image_path='image',
dst_label_filename='val_label.txt',
annotation_filename='TextOCR_0.1_val.json',
img_start_idx=num_train_imgs,
nproc=args.n_proc,
rectify_pose=args.rectify_pose)
print('Finish')
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/filter_lmdb.py | tools/filter_lmdb.py | #!/usr/bin/env python3
import io
import os
from argparse import ArgumentParser
import numpy as np
import lmdb
from PIL import Image
def main():
parser = ArgumentParser()
parser.add_argument('inputs', nargs='+', help='Path to input LMDBs')
parser.add_argument('--output', help='Path to output LMDB')
parser.add_argument('--min_image_dim', type=int, default=8)
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
with lmdb.open(args.output, map_size=1099511627776) as env_out:
in_samples = 0
out_samples = 0
samples_per_chunk = 1000
for lmdb_in in args.inputs:
with lmdb.open(lmdb_in, readonly=True, max_readers=1, lock=False) as env_in:
with env_in.begin() as txn:
num_samples = int(txn.get('num-samples'.encode()))
in_samples += num_samples
chunks = np.array_split(range(num_samples), num_samples // samples_per_chunk)
for chunk in chunks:
cache = {}
with env_in.begin() as txn:
for index in chunk:
index += 1 # lmdb starts at 1
image_key = f'image-{index:09d}'.encode()
image_bin = txn.get(image_key)
img = Image.open(io.BytesIO(image_bin))
w, h = img.size
if w < args.min_image_dim or h < args.min_image_dim:
print(f'Skipping: {index}, w = {w}, h = {h}')
continue
out_samples += 1 # increment. start at 1
label_key = f'label-{index:09d}'.encode()
out_label_key = f'label-{out_samples:09d}'.encode()
out_image_key = f'image-{out_samples:09d}'.encode()
cache[out_label_key] = txn.get(label_key)
cache[out_image_key] = image_bin
with env_out.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k, v)
print(f'Written samples from {chunk[0]} to {chunk[-1]}')
with env_out.begin(write=True) as txn:
txn.put('num-samples'.encode(), str(out_samples).encode())
print(f'Written {out_samples} samples to {args.output} out of {in_samples} input samples.')
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/openvino_converter.py | tools/openvino_converter.py | #!/usr/bin/env python3
import math
import os
import os.path as osp
from argparse import ArgumentParser
from functools import partial
import mmcv
from PIL import Image
from mmocr.utils.fileio import list_to_file
def parse_args():
parser = ArgumentParser(description='Generate training and validation set '
'of OpenVINO annotations for Open '
'Images by cropping box image.')
parser.add_argument(
'root_path', help='Root dir containing images and annotations')
parser.add_argument(
'n_proc', default=1, type=int, help='Number of processes to run')
args = parser.parse_args()
return args
def process_img(args, src_image_root, dst_image_root):
# Dirty hack for multiprocessing
img_idx, img_info, anns = args
src_img = Image.open(osp.join(src_image_root, img_info['file_name']))
labels = []
for ann_idx, ann in enumerate(anns):
attrs = ann['attributes']
text_label = attrs['transcription']
# Ignore illegible or non-English words
if not attrs['legible'] or attrs['language'] != 'english':
continue
x, y, w, h = ann['bbox']
x, y = max(0, math.floor(x)), max(0, math.floor(y))
w, h = math.ceil(w), math.ceil(h)
dst_img = src_img.crop((x, y, x + w, y + h))
dst_img_name = f'img_{img_idx}_{ann_idx}.jpg'
dst_img_path = osp.join(dst_image_root, dst_img_name)
# Preserve JPEG quality
dst_img.save(dst_img_path, qtables=src_img.quantization)
labels.append(f'{osp.basename(dst_image_root)}/{dst_img_name}'
f' {text_label}')
src_img.close()
return labels
def convert_openimages(root_path,
dst_image_path,
dst_label_filename,
annotation_filename,
img_start_idx=0,
nproc=1):
annotation_path = osp.join(root_path, annotation_filename)
if not osp.exists(annotation_path):
raise Exception(
f'{annotation_path} not exists, please check and try again.')
src_image_root = root_path
# outputs
dst_label_file = osp.join(root_path, dst_label_filename)
dst_image_root = osp.join(root_path, dst_image_path)
os.makedirs(dst_image_root, exist_ok=True)
annotation = mmcv.load(annotation_path)
process_img_with_path = partial(
process_img,
src_image_root=src_image_root,
dst_image_root=dst_image_root)
tasks = []
anns = {}
for ann in annotation['annotations']:
anns.setdefault(ann['image_id'], []).append(ann)
for img_idx, img_info in enumerate(annotation['images']):
tasks.append((img_idx + img_start_idx, img_info, anns[img_info['id']]))
labels_list = mmcv.track_parallel_progress(
process_img_with_path, tasks, keep_order=True, nproc=nproc)
final_labels = []
for label_list in labels_list:
final_labels += label_list
list_to_file(dst_label_file, final_labels)
return len(annotation['images'])
def main():
args = parse_args()
root_path = args.root_path
print('Processing training set...')
num_train_imgs = 0
for s in '125f':
num_train_imgs = convert_openimages(
root_path=root_path,
dst_image_path=f'image_{s}',
dst_label_filename=f'train_{s}_label.txt',
annotation_filename=f'text_spotting_openimages_v5_train_{s}.json',
img_start_idx=num_train_imgs,
nproc=args.n_proc)
print('Processing validation set...')
convert_openimages(
root_path=root_path,
dst_image_path='image_val',
dst_label_filename='val_label.txt',
annotation_filename='text_spotting_openimages_v5_validation.json',
img_start_idx=num_train_imgs,
nproc=args.n_proc)
print('Finish')
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/coco_text_converter.py | tools/coco_text_converter.py | #!/usr/bin/env python3
for s in ['train', 'val']:
with open('{}_words_gt.txt'.format(s), 'r', encoding='utf8') as f:
d = f.readlines()
with open('{}_lmdb.txt'.format(s), 'w', encoding='utf8') as f:
for line in d:
try:
fname, label = line.split(',', maxsplit=1)
except ValueError:
continue
fname = '{}_words/{}.jpg'.format(s, fname.strip())
label = label.strip().strip('|')
f.write('\t'.join([fname, label]) + '\n')
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tools/art_converter.py | tools/art_converter.py | #!/usr/bin/env python3
import json
with open('train_task2_labels.json', 'r', encoding='utf8') as f:
d = json.load(f)
with open('gt.txt', 'w', encoding='utf8') as f:
for k, v in d.items():
if len(v) != 1:
print('error', v)
v = v[0]
if v['language'].lower() != 'latin':
# print('Skipping non-Latin:', v)
continue
if v['illegibility']:
# print('Skipping unreadable:', v)
continue
label = v['transcription'].strip()
if not label:
# print('Skipping blank label')
continue
if '#' in label and label != 'LocaL#3':
# print('Skipping corrupted label')
continue
f.write('\t'.join(['train_task2_images/' + k + '.jpg', label]) + '\n')
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
bharadwaj-raju/cligenerator | https://github.com/bharadwaj-raju/cligenerator/blob/9b61832311dab3fcd0146195a063667a4440578b/setup.py | setup.py | from distutils.core import setup
import subprocess as sp
version = '1.0.2'
setup(
name='cligenerator',
packages=['cligenerator'],
version=version,
description='Generate CLI tools from Python modules and functions',
author='Bharadwaj Raju',
author_email='bharadwaj.raju777@gmail.com',
url='https://github.com/bharadwaj-raju/cligenerator',
download_url='https://github.com/bharadwaj-raju/cligenerator/tarball/1.0.1',
keywords = ['cli', 'argparse', 'interface', 'module', 'function', 'generate', 'command-line'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
| python | MIT | 9b61832311dab3fcd0146195a063667a4440578b | 2026-01-05T07:12:53.345874Z | false |
bharadwaj-raju/cligenerator | https://github.com/bharadwaj-raju/cligenerator/blob/9b61832311dab3fcd0146195a063667a4440578b/cligenerator/cligenerator.py | cligenerator/cligenerator.py | # coding: utf-8
# Licensed under the MIT license (see the LICENSE file or the text below)
# This file is part of cligenerator — generate CLI tools from Python libraries
# — also known as "CLI Generator" and "CLIGenerator" and "cli-generator"
# Copyright © 2016 Bharadwaj Raju
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import inspect
from textwrap import dedent
class CLIGenerator(object):
def __init__(self, module_or_function, name='',
description='', library_name='',
help_strings={}, option_types={},
ignore_modules=None, ignore_functions=None,
recurse_modules=False,
additional_imports=None):
self.help_strings = help_strings
self.option_types = option_types
self.additional_imports = additional_imports or []
self.module_or_function = module_or_function
self.recurse_modules = recurse_modules
self.name = name or self.module_or_function.__name__
self.library_name = library_name or self.name
self.description = description or 'A CLI tool for {}'.format(
self.library_name)
self.ignore_modules = ignore_modules or []
self.ignore_functions = ignore_functions or []
self.usage = '{name} [command] [options]'.format(name=name)
if inspect.ismodule(module_or_function):
self.mode = 'module'
elif hasattr(module_or_function, '__call__'):
self.mode = 'function'
else:
raise TypeError(
'module_or_function must be a valid module object or a function object!')
# Base tool code
# Code is built out of these by the generate* functions
self.base_code = \
dedent('''\
import argparse
import sys
{additional_imports}
class {class_name}(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='{description}',
formatter_class=argparse.RawTextHelpFormatter,
usage='%(prog)s command options',
allow_abbrev=False)
parser.add_argument('command', help='Command to run.')
args = parser.parse_args(sys.argv[1:2]) # Ignore options
self._one_func_mode = False
if not hasattr(self, args.command.replace('.', '_')):
print('Unrecognized command!')
sys.exit(1)
getattr(self, args.command.replace('.', '_'))()
''').format(description=self.description,
usage=self.usage,
additional_imports='ADDITIONAL_IMPORTS/:',
# Don't replace now, since it'll be used again later
class_name=self.name.capitalize()\
.replace('-', '_').replace('.', '_') + 'CLI')
def _object_tree(self, obj):
tree = {}
for i in dir(obj):
obj_i = getattr(obj, i)
func_test = hasattr(obj_i, '__call__') and\
obj_i.__name__ not in self.ignore_functions
mod_test = inspect.ismodule(obj_i) and\
obj_i.__name__.startswith(
self.module_or_function.__name__) and\
obj_i.__name__ not in self.ignore_modules
if not i.startswith('_'):
if func_test:
tree[i] = obj_i
elif mod_test:
if self.recurse_modules:
tree[i] = (obj_i, self.object_tree(obj_i))
else:
tree[i] = (obj_i, obj_i)
return tree
def _get_arguments(self, func):
argc = func.__code__.co_argcount
argv = func.__code__.co_varnames[:argc]
argspec = inspect.getargspec(func)
try:
defaults = dict(zip(argspec.args[-len(argspec.defaults):],
argspec.defaults))
except TypeError:
defaults = {}
return (argv, defaults)
def _get_option_type(self, func, option):
def _type_to_str(type_):
return repr(type_).replace('<class ', '').replace(
'>', '').replace("'", '').replace('Type', '').replace('<type ',
'')
try:
return _type_to_str(self.option_types[func.__module__][
func.__name__][option])
except KeyError:
try:
return _type_to_str(self.option_types[func.__name__][option])
except KeyError:
try:
return _type_to_str(self.option_types[option])
except KeyError:
if option in self._get_arguments(func)[1]:
return _type_to_str(
type(self._get_arguments(func)[1][option]))
else:
return None
def _get_function_description(self, func):
try:
return self.help_strings[func.__module__][func.__name__]
except KeyError:
try:
return self.help_strings[func.__name__]
except KeyError:
if func.__doc__:
return func.__doc__.splitlines(
)[0] or func.__doc__.splitlines[1]
else:
return ''
def _get_option_help(self, func, option):
try:
return self.help_strings[func.__module__][func.__name__][option]
except KeyError:
try:
return self.help_strings[func.__name__][option]
except KeyError:
try:
return self.help_strings[option]
except KeyError:
return ''
def _generate_function(self, func):
func_name = '{}{}'.format(
func.__module__ + '.' if func.__module__ != '__main__' else '',
func.__name__
)
self._func_name = func_name
template = '''\
def {name}(self):
parser = argparse.ArgumentParser(description='{description}')
{arg_defs}
if self._one_func_mode:
args = parser.parse_args(sys.argv[1:])
else:
args = parser.parse_args(sys.argv[2:])
{function_call}
'''
arg_template = '''parser.add_argument('{arg_name}'{additional_opts})'''
arg_defs = []
func_args, func_defaults = self._get_arguments(func)
for i in func_args:
required = bool(i not in func_defaults)
option_type = self._get_option_type(func, i)
additional_opts = ''
if option_type is not None and option_type != 'bool':
additional_opts += ', type={}'.format(
option_type if option_type != 'dict' else 'json.loads')
elif option_type == 'bool':
additional_opts += ', action=\'store_true\''
if option_type == 'list':
additional_opts += ', nargs=\'*\''
# nargs=* is zero or more values
if option_type == 'dict':
self.additional_imports.append('json')
if not required:
additional_opts += ', default={}'.format(
"'{}'".format(func_defaults[i]) if isinstance(func_defaults[i], str) else func_defaults[i])
if i.endswith('_') and not required:
additional_opts += ', dest=\'{}\''.format(i[:])
i = i[:-1]
if self._get_option_help(func, i):
additional_opts += ', help=\'{}\''.format(
self._get_option_help(func, i))
arg_defs.append(arg_template.format(
arg_name=i if required else '--{}'.format(i.replace('_', '-')),
additional_opts=additional_opts))
if self.mode == 'function':
function_call = 'REPL_W/:' # On purpose, to be replaced later
else:
function_call = 'print({}(**vars(args)))'.format(func_name)
fmt_func_name = ''.join(func_name.split('.', 1)[1:]) or func_name
self._name = fmt_func_name.replace('.', '_')[:]
return dedent(template.format(
name=fmt_func_name.replace('.', '_'),
description=self._get_function_description(func),
arg_defs='\n\t\t\t\t'.join(arg_defs) or '',
function_call=function_call))
def generate(self):
global code
code = self.base_code[:]
if self.mode == 'module':
self.additional_imports.append(
self.module_or_function.__name__.split('.')[0])
for i in self.additional_imports:
self.additional_imports[self.additional_imports.index(
i)] = 'import {}'.format(i) if not i.startswith('import ') else i
code = code.replace(
'ADDITIONAL_IMPORTS/:',
'\n'.join(
self.additional_imports))
module_tree = self._object_tree(self.module_or_function)
def _recurse_code_update(tree):
global code
for i in tree:
if isinstance(tree[i], tuple):
# Module in module
if self.recurse_modules:
_recurse_code_update(tree[i][1])
else:
function_code = self._generate_function(
tree[i]).splitlines()
function_code[0] = '\t' + function_code[0]
code += '\n\t'.join(function_code)
code += '\n\n'
_recurse_code_update(module_tree)
call_obj = self.name.capitalize().replace('-', '_').replace('.', '_') + 'CLI'
else:
if hasattr(sys, 'ps1'):
# Running interactively
try:
# dill works in interactive mode, inspect.getsource()
# doesn't
import dill
func_code = dill.source.getsource(self.module_or_function)
except ImportError:
try:
func_code = inspect.getsource(self.module_or_function)
except OSError:
func_code = ''
else:
func_code = inspect.getsource(self.module_or_function)
code = code.replace(
'ADDITIONAL_IMPORTS/:',
'\n' +
'\n'.join(
self.additional_imports) +
'\n' +
func_code +
'\n')
function_code = dedent(
self._generate_function(
self.module_or_function))
function_code = function_code.replace(
'def {}(self)'.format(
self._name), 'def {}()'.format(
'__' + self._name + 'CLI'))
function_code = function_code.replace('argparse.ArgumentParser(description=\'\')',
'argparse.ArgumentParser(description=\'{}\')'.format(self.description)) # Won't do anything if specified already
code = code.split(
'class {}(object)'.format(
self.name.capitalize().replace(
'-',
'_').replace(
'.',
'_') + 'CLI'),
1)[0]
to_remove = '''\tif self._one_func_mode:\n\t\targs = parser.parse_args(sys.argv[1:])\n\n\telse:\n\t\targs = parser.parse_args(sys.argv[2:])'''
code += function_code
code = code.replace(to_remove, '\n\targs = parser.parse_args()')
code = code.replace('REPL_W/:',
'try:\n\t\tprint({}(**vars(args)))\n\texcept:\n\t\tprint({}(**vars(args)))'
.format(self._func_name, self._func_name.split('.')[-1]))
code += '\n\n'
call_obj = '__' + self._name + 'CLI'
code += dedent('''
if __name__ == '__main__':
{}()
'''.format(call_obj))
return code
| python | MIT | 9b61832311dab3fcd0146195a063667a4440578b | 2026-01-05T07:12:53.345874Z | false |
bharadwaj-raju/cligenerator | https://github.com/bharadwaj-raju/cligenerator/blob/9b61832311dab3fcd0146195a063667a4440578b/cligenerator/__init__.py | cligenerator/__init__.py | from .cligenerator import CLIGenerator
| python | MIT | 9b61832311dab3fcd0146195a063667a4440578b | 2026-01-05T07:12:53.345874Z | false |
bharadwaj-raju/cligenerator | https://github.com/bharadwaj-raju/cligenerator/blob/9b61832311dab3fcd0146195a063667a4440578b/tests/mymodule.py | tests/mymodule.py | # A simple module
def greet(hello='Hello', world='World!'):
return hello + ', ' + world
def tab(text):
return '\t' + text
def untab(text):
return text.lstrip('\\t').lstrip()
| python | MIT | 9b61832311dab3fcd0146195a063667a4440578b | 2026-01-05T07:12:53.345874Z | false |
bharadwaj-raju/cligenerator | https://github.com/bharadwaj-raju/cligenerator/blob/9b61832311dab3fcd0146195a063667a4440578b/tests/cli_func.py | tests/cli_func.py | import argparse
import sys
def my_function(a, b=3):
return int(a) + int(b)
def __my_functionCLI():
parser = argparse.ArgumentParser(description='A CLI tool for my_function')
parser.add_argument('a')
parser.add_argument('--b', type=int, default=3)
args = parser.parse_args()
try:
print(test_function.my_function(**vars(args)))
except:
print(my_function(**vars(args)))
if __name__ == '__main__':
__my_functionCLI()
| python | MIT | 9b61832311dab3fcd0146195a063667a4440578b | 2026-01-05T07:12:53.345874Z | false |
bharadwaj-raju/cligenerator | https://github.com/bharadwaj-raju/cligenerator/blob/9b61832311dab3fcd0146195a063667a4440578b/tests/context.py | tests/context.py | import sys
sys.path.insert(0, '..')
import cligenerator
| python | MIT | 9b61832311dab3fcd0146195a063667a4440578b | 2026-01-05T07:12:53.345874Z | false |
bharadwaj-raju/cligenerator | https://github.com/bharadwaj-raju/cligenerator/blob/9b61832311dab3fcd0146195a063667a4440578b/tests/test_function.py | tests/test_function.py | from context import cligenerator
import subprocess as sp
import mymodule
import sys
import inspect
def my_function(a, b=3):
return int(a) + int(b)
def test_create_cli():
print('\nCreating CLI for my_function.')
print(inspect.getsource(my_function))
cligen = cligenerator.CLIGenerator(my_function)
with open('cli_func.py', 'w') as f:
f.write(cligen.generate())
def test_cli():
actual = sp.check_output([sys.executable, 'cli_func.py', '3'], universal_newlines=True)
assert actual.rstrip() == '6'
actual = sp.check_output([sys.executable, 'cli_func.py', '3', '--b', '5'], universal_newlines=True)
assert actual.rstrip() == '8'
| python | MIT | 9b61832311dab3fcd0146195a063667a4440578b | 2026-01-05T07:12:53.345874Z | false |
bharadwaj-raju/cligenerator | https://github.com/bharadwaj-raju/cligenerator/blob/9b61832311dab3fcd0146195a063667a4440578b/tests/test_module.py | tests/test_module.py | from context import cligenerator
import subprocess as sp
import mymodule
import sys
def test_create_cli():
print('\nCreating CLI for mymodule ({}).'.format(mymodule.__file__))
cligen = cligenerator.CLIGenerator(mymodule)
with open('cli_module.py', 'w') as f:
f.write(cligen.generate())
def test_cli_greet():
actual = sp.check_output([sys.executable, 'cli_module.py', 'greet'], universal_newlines=True)
assert actual.rstrip() == 'Hello, World!'
actual = sp.check_output([sys.executable, 'cli_module.py', 'greet', '--hello', 'Bye'], universal_newlines=True)
assert actual.rstrip() == 'Bye, World!'
actual = sp.check_output([sys.executable, 'cli_module.py', 'greet', '--hello', 'Bye', '--world', 'everyone!'],
universal_newlines=True)
assert actual.rstrip() == 'Bye, everyone!'
def test_cli_tab():
actual = sp.check_output([sys.executable, 'cli_module.py', 'tab', 'Hello'], universal_newlines=True)
assert actual.rstrip() == '\tHello'
def test_cli_untab():
actual = sp.check_output([sys.executable, 'cli_module.py', 'untab', ' Hello'], universal_newlines=True)
assert actual.rstrip() == 'Hello'
| python | MIT | 9b61832311dab3fcd0146195a063667a4440578b | 2026-01-05T07:12:53.345874Z | false |
bharadwaj-raju/cligenerator | https://github.com/bharadwaj-raju/cligenerator/blob/9b61832311dab3fcd0146195a063667a4440578b/tests/cli_module.py | tests/cli_module.py | import argparse
import sys
import mymodule
class MymoduleCLI(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='A CLI tool for mymodule',
formatter_class=argparse.RawTextHelpFormatter,
usage='%(prog)s command options',
allow_abbrev=False)
parser.add_argument('command', help='Command to run.')
args = parser.parse_args(sys.argv[1:2]) # Ignore options
self._one_func_mode = False
if not hasattr(self, args.command.replace('.', '_')):
print('Unrecognized command!')
sys.exit(1)
getattr(self, args.command.replace('.', '_'))()
def untab(self):
parser = argparse.ArgumentParser(description='')
parser.add_argument('text')
if self._one_func_mode:
args = parser.parse_args(sys.argv[1:])
else:
args = parser.parse_args(sys.argv[2:])
print(mymodule.untab(**vars(args)))
def tab(self):
parser = argparse.ArgumentParser(description='')
parser.add_argument('text')
if self._one_func_mode:
args = parser.parse_args(sys.argv[1:])
else:
args = parser.parse_args(sys.argv[2:])
print(mymodule.tab(**vars(args)))
def greet(self):
parser = argparse.ArgumentParser(description='')
parser.add_argument('--hello', type=str, default='Hello')
parser.add_argument('--world', type=str, default='World!')
if self._one_func_mode:
args = parser.parse_args(sys.argv[1:])
else:
args = parser.parse_args(sys.argv[2:])
print(mymodule.greet(**vars(args)))
if __name__ == '__main__':
MymoduleCLI()
| python | MIT | 9b61832311dab3fcd0146195a063667a4440578b | 2026-01-05T07:12:53.345874Z | false |
Akascape/Ascify-Art | https://github.com/Akascape/Ascify-Art/blob/202a6d894bcce7cc54ace8ae987e0663f743675d/ascify art/Ascify-Art.py | ascify art/Ascify-Art.py |
"""
╔═══╗ ╔═╗ ╔═══╗ ╔╗
║╔═╗║ ║╔╝ ║╔═╗║ ╔╝╚╗
║║ ║║╔══╗╔══╗╔╗╔╝╚╗╔╗ ╔╗ ║║ ║║╔═╗╚╗╔╝
║╚═╝║║══╣║╔═╝╠╣╚╗╔╝║║ ║║ ║╚═╝║║╔╝ ║║
║╔═╗║╠══║║╚═╗║║ ║║ ║╚═╝║ ║╔═╗║║║ ║╚╗
╚╝ ╚╝╚══╝╚══╝╚╝ ╚╝ ╚═╗╔╝ ╚╝ ╚╝╚╝ ╚═╝
╔═╝║
╚══╝
Version: 0.9
Developer: Akash Bora (Akascape)
License: MIT
More info: https://github.com/Akascape/Ascify-Art
"""
import customtkinter
import sys
import tkinter as tk
from tkinter import ttk, font, filedialog
import threading
import os
from PIL import Image, ImageDraw, ImageTk, ImageFont, ImageEnhance, UnidentifiedImageError
import CTkColorPicker
import math
import glob
import matplotlib.font_manager
import webbrowser
import random
from tkinterdnd2 import TkinterDnD, DND_ALL
customtkinter.set_appearance_mode("Dark")
class CTk(customtkinter.CTk, TkinterDnD.DnDWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.TkdndVersion = TkinterDnD._require(self)
root = CTk()
root.geometry("1100x600")
root.minsize(800,430)
root.title("Ascify Art")
root.configure(fg_color="#1e2434")
root.columnconfigure((0,1), weight=1)
root.rowconfigure(1, weight=1)
root.bind("<1>", lambda event: event.widget.focus_set())
root.wm_iconbitmap()
def get_path(event):
dropped_file = event.data.replace("{","").replace("}", "")
openfile(dropped_file)
root.drop_target_register(DND_ALL)
root.dnd_bind("<<Drop>>", get_path)
if sys.platform.startswith("win"):
# Apply the mica theme for windows if possible (works with windows 11)
try:
from ctypes import windll, byref, sizeof, c_int
HWND = windll.user32.GetParent(root.winfo_id())
windll.dwmapi.DwmSetWindowAttribute(HWND, 35, byref(c_int(0x34241e)), sizeof(c_int))
except:
pass
def exit_program():
x = tk.messagebox.askquestion("Exit?", "Do you want to close this program?")
if x=="yes":
root.destroy()
else:
return
def resource(relative_path):
# resource finder via pyinstaller
base_path = getattr(
sys,
'_MEIPASS',
os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
icopath = ImageTk.PhotoImage(file=resource("icon.png"))
root.iconphoto(False, icopath)
root.protocol("WM_DELETE_WINDOW", exit_program)
app_color = random.choice(["#487a7d", "#e49c04", "#84b701", "#e52aff", "#5591c8"])
frame_image = customtkinter.CTkFrame(root, width=350, fg_color="#1b202c", corner_radius=20)
frame_image.grid(column=1, row=0, rowspan=10, sticky="nsew", padx=20, pady=20)
frame_image.rowconfigure(0, weight=1)
frame_image.columnconfigure(0, weight=1)
label_image = customtkinter.CTkLabel(frame_image, width=350, fg_color="#1b202c", corner_radius=0, text="")
label_image.grid(sticky="nsew", padx=5, pady=5)
title = customtkinter.CTkImage(Image.open(resource("title.png")), size=(250,40))
label_1 = customtkinter.CTkLabel(root, text="", image=title)
label_1.grid(row=0, column=0, sticky="wen", pady=20)
file = ""
previous = ""
background = "black"
frame_no = 0
ascii_string = ""
sequence = False
def operation():
# main function that will do the magic
global image, outputImage, ascii_string
if not file:
return
def getChar(inputInt):
return charArray[math.floor(inputInt*interval)]
chars = textbox.get('1.0', tk.END)[::-1]
charArray = list(chars)
charLength = len(charArray)
interval = charLength/256
scaleFactor = round(slider_scale.get(), 3)
oneCharWidth = int(slider_width.get())
oneCharHeight = int(slider_height.get())
s = int(slider_size.get())
im = Image.open(file).convert('RGB')
if selected_font:
fnt = ImageFont.truetype(selected_font, s)
else:
fnt = ImageFont.load_default()
width, height = im.size
im = im.resize((int(scaleFactor*width), int(scaleFactor*height*(oneCharWidth/oneCharHeight))), Image.Resampling.NEAREST)
width, height = im.size
pix = im.load()
outputImage = Image.new('RGB', (oneCharWidth * width, oneCharHeight * height), color = background)
d = ImageDraw.Draw(outputImage)
new_r = int(slider_r.get())
new_g = int(slider_g.get())
new_b = int(slider_b.get())
auto = automatic.get()
ascii_string = ""
# replace the pixels with text
for i in range(height):
for j in range(width):
r, g, b = pix[j, i]
if auto==0:
if r>=new_r: r = new_r
if g>=new_g: g = new_g
if b>=new_b: b = new_b
h = int(r/3 + g/3 + b/3)
pix[j, i] = (h, h, h)
d.text((j*oneCharWidth, i*oneCharHeight), getChar(h), font = fnt, fill = (r, g, b))
ascii_string += str(getChar(h))
ascii_string += "\n"
# some other enhancements like saturation and brightness
outputImage = ImageEnhance.Color(outputImage).enhance(slider_sat.get())
outputImage = ImageEnhance.Brightness(outputImage).enhance(slider_br.get())
# update the label
image = customtkinter.CTkImage(outputImage, size=(frame_image.winfo_height(),frame_image.winfo_height()*img.size[1]/img.size[0]))
label_image.configure(image=image)
def openfile(prefile=None):
# opening and loading the file
global file, image, img, dir_, sequence, previous
if not prefile:
file = filedialog.askopenfilename(filetypes=[('Images', ['*.png', '*.jpg', '*.jpeg', '*.bmp', '*webp']),
('All Files', '*.*')])
else:
file = prefile
if os.path.exists(file):
# check if imported image is valid or not
try:
Image.open(file)
except UnidentifiedImageError:
tk.messagebox.showerror("Oops!", "Not a valid image file!")
file = previous
return
sequence = False
previous = file
if len(os.path.basename(file))>=50:
open_button.configure(
text=f"{os.path.basename(file)[:40]}...{os.path.basename(file)[-3:]}"
)
else:
open_button.configure(text=os.path.basename(file))
img = Image.open(file)
image = customtkinter.CTkImage(img)
label_image.configure(image=image)
image.configure(size=(frame_image.winfo_height(),frame_image.winfo_height()*img.size[1]/img.size[0]))
dir_ = os.path.dirname(file)
if len(dir_)>=30:
entry_location.configure(text=f"{dir_[:25]}...{dir_[-5:]}")
else:
entry_location.configure(text=dir_)
var.set(f"{os.path.basename(file)[:-4]}_Ascified")
try:
root.unbind("<KeyRelease-Left>")
root.unbind("<KeyRelease-Right>")
RightClickMenu.delete("Next Frame")
RightClickMenu.delete("Previous Frame")
except:
None
elif previous!="":
file = previous
def frame_next():
# load next frame of sequence
global frame_no, file
if frame_no<len(allitems)-1:
file = allitems[frame_no+1]
frame_no+=1
operation()
def frame_previous():
# load previous frame of sequence
global frame_no, file
if frame_no>0:
file = allitems[frame_no-1]
frame_no-=1
operation()
def open_sequence():
# opening a sequence of images (folder)
global dir_, file, img, image, allitems, sequence
if dir_img := filedialog.askdirectory():
allitems = glob.glob(os.path.join(dir_img, '*.png'))
allitems.extend(glob.glob(os.path.join(dir_img, '*.jpeg')))
allitems.extend(glob.glob(os.path.join(dir_img, '*.jpg')))
allitems.extend(glob.glob(os.path.join(dir_img, '*.bmp')))
allitems.extend(glob.glob(os.path.join(dir_img, '*.webp')))
if len(allitems) == 0:
tk.messagebox.showinfo("Oops!", "No valid image files present in this folder!")
return
sequence = True
if len(dir_img)>=50:
open_button.configure(text=f"{dir_img[:40]}...{dir_img[-3:]}")
else:
open_button.configure(text=dir_img)
dir_ = os.path.dirname(dir_img)
if len(dir_)>=30:
entry_location.configure(text=f"{dir_[:25]}...{dir_[-5:]}")
else:
entry_location.configure(text=dir_)
try:
RightClickMenu.delete("Next Frame")
RightClickMenu.delete("Previous Frame")
except:
None
var.set(f"{os.path.basename(dir_img)}_Ascified")
RightClickMenu.add_command(label="Next Frame", command=lambda: frame_next())
RightClickMenu.add_command(label="Previous Frame", command=lambda: frame_previous())
file = allitems[0]
img = Image.open(file)
image = customtkinter.CTkImage(img)
label_image.configure(image=image)
image.configure(size=(frame_image.winfo_height(),frame_image.winfo_height()*img.size[1]/img.size[0]))
root.bind("<KeyRelease-Left>", lambda e: frame_previous())
root.bind("<KeyRelease-Right>", lambda e: frame_next())
def resize_event(event):
# dynamic resize of the image with UI
global image
if image!="":
image.configure(size=(event.height,event.height*img.size[1]/img.size[0]))
open_button = customtkinter.CTkButton(root, text="OPEN", fg_color=app_color, command=openfile)
open_button.grid(row=1, column=0, sticky="wen", pady=20, padx=(20,0))
root.bind("<Control-o>", lambda e: openfile())
image = ""
# TABS
frame_image.bind("<Configure>", resize_event)
tabview = customtkinter.CTkTabview(root, fg_color="#1b202c",
segmented_button_fg_color="#0e1321",
segmented_button_selected_color=app_color,
segmented_button_unselected_color="#0e1321",
segmented_button_selected_hover_color=app_color)
tabview.grid(row=1, column=0, padx=(20, 0), pady=(80, 20), sticky="nsew")
tabview.add("Characters")
tabview.add("Size")
tabview.add("Colors")
tabview.add("Font")
tabview.add("Export")
def show_original():
# show original image (right click on the image)
global image
if file:
image = customtkinter.CTkImage(img)
image.configure(size=(frame_image.winfo_height(),frame_image.winfo_height()*img.size[1]/img.size[0]))
label_image.configure(image=image)
def copy_ascii():
operation()
root.clipboard_append(ascii_string)
RightClickMenu = tk.Menu(frame_image, tearoff=False, background='#343e5a', fg='white', borderwidth=0, bd=0, activebackground=app_color)
RightClickMenu.add_command(label="Show Original", command=lambda: show_original())
RightClickMenu.add_command(label="Show Ascified", command=lambda: operation())
RightClickMenu.add_command(label="Copy Ascii Text", command=lambda: copy_ascii())
root.bind("<Return>", lambda e: operation())
def do_popup(event, frame):
try: frame.tk_popup(event.x_root, event.y_root)
finally: frame.grab_release()
label_image.bind("<Button-3>", lambda event: do_popup(event, frame=RightClickMenu))
RightClickMenu2 = tk.Menu(frame_image, tearoff=False, background='#343e5a', fg='white', borderwidth=0, bd=0, activebackground=app_color)
RightClickMenu2.add_command(label="Open Image Sequence", command=lambda: open_sequence())
open_button.bind("<Button-3>", lambda event: do_popup(event, frame=RightClickMenu2))
# TAB 1 (Characters)
tabview.tab("Characters").rowconfigure((0,1), weight=1)
tabview.tab("Characters").columnconfigure(0, weight=1)
label_2 = customtkinter.CTkLabel(tabview.tab("Characters"), text="Enter characters:")
label_2.grid(row=0, column=0, sticky="wn", pady=10, padx=20)
text_live = customtkinter.CTkCheckBox(tabview.tab("Characters"), fg_color=app_color, text="Live Preview",
hover=False, command=lambda: operation() if text_live.get()==1 else None)
text_live.grid(row=0, column=0, padx=20, pady=10, sticky="ne")
text_live.select()
textbox = customtkinter.CTkTextbox(tabview.tab("Characters"), fg_color="#282c35", undo=True)
textbox._textbox.configure(selectbackground=app_color)
textbox.grid(row=0, column=0, columnspan=2, padx=20, pady=(50,20), sticky="nsew")
textbox.insert(tk.END, "$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\"^`'. ")
textbox.bind("<KeyRelease>", lambda event: operation() if text_live.get()==1 else None)
def cut_text():
""" cut text operation """
copy_text()
try: textbox.delete(tk.SEL_FIRST, tk.SEL_LAST)
except: pass
def copy_text():
""" copy text operation """
try:
root.clipboard_clear()
root.clipboard_append(textbox.get(tk.SEL_FIRST, tk.SEL_LAST))
except: pass
def paste_text():
""" paste text operation """
try: textbox.insert(textbox.index('insert'), root.clipboard_get())
except: pass
def clear_all_text():
""" clears sll the text """
try:
textbox.delete(0.0,"end")
except: pass
copy_menu = tk.Menu(textbox, tearoff=False, background='#343e5a', fg='white', borderwidth=0, bd=0, activebackground=app_color)
copy_menu.add_command(label="Cut", command=cut_text)
copy_menu.add_command(label="Copy", command=copy_text)
copy_menu.add_command(label="Paste", command=paste_text)
copy_menu.add_command(label="Clear", command=clear_all_text)
textbox.bind("<Button-3>", lambda event: do_popup(event, frame=copy_menu))
# TAB 2 (Size)
tabview.tab("Size").columnconfigure(0, weight=1)
label_4 = customtkinter.CTkLabel(tabview.tab("Size"), text="Character size: 15")
label_4.grid(row=0, column=0, sticky="wn", pady=10, padx=20)
slider_size = customtkinter.CTkSlider(
tabview.tab("Size"),
hover=False,
height=20,
button_color="white",
from_=1,
to=100,
command=lambda e: label_4.configure(text=f"Character size: {int(e)}"),
)
slider_size.bind("<ButtonRelease-1>", lambda event: operation())
slider_size.grid(row=1, column=0, sticky="we", pady=0, padx=20)
slider_size.set(15)
label_14 = customtkinter.CTkLabel(tabview.tab("Size"), text="Scale factor: 0.09")
label_14.grid(row=2, column=0, sticky="wn", pady=10, padx=20)
slider_scale = customtkinter.CTkSlider(
tabview.tab("Size"),
hover=False,
height=20,
button_color="white",
from_=0.01,
to=0.2,
command=lambda e: label_14.configure(
text=f"Scale factor: {str(round(e, 3))}"
),
)
slider_scale.set(0.09)
slider_scale.bind("<ButtonRelease-1>", lambda event: operation())
slider_scale.grid(row=3, column=0, sticky="we", pady=0, padx=20)
label_12 = customtkinter.CTkLabel(tabview.tab("Size"), text="Scale width: 10")
label_12.grid(row=4, column=0, sticky="wn", pady=10, padx=20)
slider_width = customtkinter.CTkSlider(
tabview.tab("Size"),
hover=False,
height=20,
button_color="white",
from_=1,
to=30,
command=lambda e: label_12.configure(text=f"Scale width: {int(e)}"),
)
slider_width.set(10)
slider_width.bind("<ButtonRelease-1>", lambda event: operation())
slider_width.grid(row=5, column=0, sticky="we", pady=0, padx=20)
label_13 = customtkinter.CTkLabel(tabview.tab("Size"), text="Scale height: 18")
label_13.grid(row=6, column=0, sticky="wn", pady=10, padx=20)
slider_height = customtkinter.CTkSlider(
tabview.tab("Size"),
hover=False,
height=20,
button_color="white",
from_=1,
to=30,
command=lambda e: label_13.configure(text=f"Scale height: {int(e)}"),
)
slider_height.set(18)
slider_height.bind("<ButtonRelease-1>", lambda event: operation())
slider_height.grid(row=7, column=0, sticky="we", pady=0, padx=20)
# TAB 3 (Colors)
tabview.tab("Colors").columnconfigure(0, weight=1)
def toggle_rgb():
# Turn off/on automatic colors
if automatic.get()==1:
slider_r.configure(state="disabled", button_color="grey")
slider_g.configure(state="disabled", button_color="grey")
slider_b.configure(state="disabled", button_color="grey")
label_3.configure(state="disabled")
else:
slider_r.configure(state="normal", button_color="white")
slider_g.configure(state="normal", button_color="white")
slider_b.configure(state="normal", button_color="white")
label_3.configure(state="normal")
operation()
automatic = customtkinter.CTkSwitch(tabview.tab("Colors"), text="Automatic Colors",
progress_color=app_color, command=toggle_rgb)
automatic.grid(row=0, column=0, sticky="wn", pady=10, padx=20)
# IMAGE ENHANCEMENTS
label_3 = customtkinter.CTkLabel(tabview.tab("Colors"), text="RGB Space:")
label_3.grid(row=1, column=0, sticky="wn", pady=10, padx=20)
slider_r = customtkinter.CTkSlider(tabview.tab("Colors"), height=20, progress_color="red",
hover=False, button_color="white", from_=0, to=255)
slider_r.bind("<ButtonRelease-1>", lambda event: operation())
slider_r.set(255)
slider_r.grid(row=2, column=0, sticky="wen", pady=10, padx=20)
slider_b = customtkinter.CTkSlider(tabview.tab("Colors"), height=20, progress_color="blue",
hover=False, button_color="white", from_=0, to=255)
slider_b.bind("<ButtonRelease-1>", lambda event: operation())
slider_b.grid(row=3, column=0, sticky="wen", pady=10, padx=20)
slider_b.set(255)
slider_g = customtkinter.CTkSlider(tabview.tab("Colors"), height=20, progress_color="green",
hover=False, button_color="white", from_=0, to=255)
slider_g.bind("<ButtonRelease-1>", lambda event: operation())
slider_g.grid(row=4, column=0, sticky="wen", pady=10, padx=20)
slider_g.set(255)
label_6 = customtkinter.CTkLabel(tabview.tab("Colors"), text="Background:")
label_6.grid(row=5, column=0, sticky="wn", pady=10, padx=20)
def change_bg():
# open my special color picker
global background
pick_color = CTkColorPicker.AskColor(fg_color="#1e2434", bg_color="#1b202c", button_color=app_color)
pick_color.wm_iconbitmap()
root.after(200, lambda: pick_color.iconphoto(False, icopath))
color = pick_color.get()
if color is None:
return
bg_color.configure(fg_color=color)
background = color
operation()
bg_color = customtkinter.CTkButton(tabview.tab("Colors"), corner_radius=20, fg_color="black", border_width=2,
text="", hover=False, command=change_bg)
bg_color.grid(row=5, column=0, sticky="wn", padx=(100,20), pady=10)
label_7 = customtkinter.CTkLabel(tabview.tab("Colors"), text="Saturation:")
label_7.grid(row=6, column=0, sticky="wn", pady=10, padx=20)
slider_sat = customtkinter.CTkSlider(tabview.tab("Colors"), height=20, width=10, hover=False, button_color="white", from_=0, to=10)
slider_sat.set(1)
slider_sat.bind("<ButtonRelease-1>", lambda event: operation())
slider_sat.grid(row=6, column=0, sticky="wen", pady=15, padx=(100,20))
label_8 = customtkinter.CTkLabel(tabview.tab("Colors"), text="Brightness:")
label_8.grid(row=7, column=0, sticky="wn", pady=10, padx=20)
slider_br = customtkinter.CTkSlider(tabview.tab("Colors"), height=20, width=10, hover=False, button_color="white", from_=0, to=15)
slider_br.set(1)
slider_br.bind("<ButtonRelease-1>", lambda event: operation())
slider_br.grid(row=7, column=0, sticky="wen", pady=15, padx=(100,20))
automatic.toggle()
# TAB-3 (Fonts)
selected_font = ""
def change_font(font):
# change selected font
global selected_font
selected_font = font
operation()
def populate(frame):
# load system fonts (not all at once because it will lag)
global loaded_fonts, all_fonts, l
all_fonts = matplotlib.font_manager.findSystemFonts()
l = len(all_fonts) if len(all_fonts)<100 else 50
for filename in sorted(all_fonts)[:l]:
if "Emoji" not in filename and "18030" not in filename:
font_load = ImageFont.FreeTypeFont(filename)
if (font_load.getname()[1]).lower()!="regular":
name = " ".join(font_load.getname())
else:
name = font_load.getname()[0]
try:
label = customtkinter.CTkButton(frame, text=name, font=(name, 16), fg_color="#1b202c", text_color="white",
anchor="w", command=lambda event=filename: change_font(event)).grid(sticky="w")
loaded_fonts.append(name)
except: pass
tabview.tab("Font").rowconfigure(0, weight=1)
tabview.tab("Font").columnconfigure(0, weight=1)
loaded_fonts = []
frame = customtkinter.CTkScrollableFrame(tabview.tab("Font"), fg_color="transparent")
frame.grid(padx=2, pady=2, sticky="news")
threading.Thread(target= lambda: populate(frame)).start()
tabview.tab("Export").rowconfigure(6, weight=1)
tabview.tab("Export").columnconfigure(0, weight=1)
def add_more_fonts():
# load 50 more system fonts, not all at once because it will lag
global loaded_fonts, l
for filename in sorted(all_fonts)[l:l+50]:
if "Emoji" not in filename and "18030" not in filename:
font_load = ImageFont.FreeTypeFont(filename)
if (font_load.getname()[1]).lower()!="regular":
name = " ".join(font_load.getname())
else:
name = font_load.getname()[0]
label = customtkinter.CTkButton(frame, text=name, font=(name, 16), fg_color="#1b202c", text_color="white",
anchor="w", command=lambda event=filename: change_font(event)).grid(sticky="w")
loaded_fonts.append(name)
l+=50
if l>len(all_fonts)-50:
l = len(all_fonts)
def loadcustom():
# load custom font file
global selected_font
if font_open := filedialog.askopenfilename(
filetypes=[('Font Files', ['*.ttf', '*.otf', '*.TTF'])]
):
selected_font = font_open
operation()
load_more = customtkinter.CTkButton(tabview.tab("Font"), fg_color="#1b2f30", text="Load More", hover=False, command=add_more_fonts)
load_more.grid(row=1, sticky="sew", columnspan=2)
RightClickMenu3 = tk.Menu(frame_image, tearoff=False, background='#343e5a', fg='white', borderwidth=0, bd=0, activebackground=app_color)
RightClickMenu3.add_command(label="Load Font File", command=lambda: loadcustom())
load_more.bind("<Button-3>", lambda event: do_popup(event, frame=RightClickMenu3))
# TAB-4 (Export)
label_5 = customtkinter.CTkLabel(tabview.tab("Export"), text="Export As")
label_5.grid(row=0, column=0, sticky="wn", pady=10, padx=20)
var = tk.StringVar()
entry_save = customtkinter.CTkEntry(tabview.tab("Export"), textvariable=var, corner_radius=20, width=10)
entry_save.grid(row=1, column=0, sticky="ew", padx=(20, 100))
entry_save._entry.configure(selectbackground=app_color)
format_ = customtkinter.CTkComboBox(tabview.tab("Export"), values=["png", "jpg", "bmp"], width=75, corner_radius=20,
state="readonly")
format_.grid(row=1, column=0, sticky="e", padx=20)
format_.set("png")
def changedir():
global dir_
dir_ = filedialog.askdirectory()
if not dir_:
return
if len(dir_)>=30:
entry_location.configure(text=f"{dir_[:25]}...{dir_[-5:]}")
else:
entry_location.configure(text=dir_)
def export():
# Saving rendered images
global file, convert_seq
if not file:
tk.messagebox.showinfo("Uh!","Please import an image!")
return
save.configure(state=tk.DISABLED, fg_color="grey30")
open_button.configure(state=tk.DISABLED)
if sequence is False:
# single image save
operation()
exported_file = os.path.join(dir_, f"{var.get()}.{format_.get()}")
if os.path.exists(exported_file):
res1 = tk.messagebox.askquestion("Warning!","Do you want to replace the old file with the new one? \n(Process not reversible!)")
if res1=='yes':
outputImage.save(exported_file)
elif res1=='no':
save.configure(state="normal", fg_color=app_color)
open_button.configure(state="normal")
return
else:
outputImage.save(exported_file)
tk.messagebox.showinfo("Exported", "Image successfully saved")
else:
new_dir = os.path.join(dir_, var.get())
if os.path.exists(new_dir):
tk.messagebox.showinfo("Warning!", "A folder with this name already exists, please try a new name!")
else:
label_11.grid(row=4, column=0, sticky="w", padx=25, pady=0)
progress_bar.grid(row=5, column=0, sticky="we", padx=20, pady=(0,20))
cancel_button.grid(row=4, column=0, sticky="ne", padx=(0,20))
os.mkdir(new_dir)
# image sequence
count = 1
for i in allitems:
if convert_seq==False:
break
progress_bar.set(count/len(allitems))
label_11.configure(text=f"Frame: {str(count)}")
file = i
operation()
exported_file = os.path.join(
new_dir,
f"{os.path.basename(file)[:-4]}_ascified.{format_.get()}",
)
outputImage.save(exported_file)
count+=1
tk.messagebox.showinfo("Exported", "Images successfully saved")
convert_seq = True
label_11.grid_forget()
progress_bar.grid_forget()
cancel_button.grid_forget()
save.configure(state="normal", fg_color=app_color)
open_button.configure(state="normal")
def new_window():
# About window
info.unbind("<Button-1>")
def exit_top_level():
top_level.destroy()
info.bind("<Button-1>", lambda event: new_window())
def web(link):
webbrowser.open_new_tab(link)
top_level = customtkinter.CTkToplevel(root)
top_level.config(background="#1b202c")
top_level.protocol("WM_DELETE_WINDOW", exit_top_level)
top_level.minsize(400,200)
top_level.title("About")
top_level.transient(root)
top_level.resizable(width=False, height=False)
top_level.wm_iconbitmap()
root.after(200, lambda: top_level.iconphoto(False, icopath))
label_top = customtkinter.CTkLabel(top_level, fg_color="#1b202c", text="Ascify-Art v0.9", font=("Roboto",15))
label_top.grid(padx=20, pady=20, sticky="w")
desc = "Developed by Akash Bora (Akascape) \n \nLicense: MIT \nCopyright 2024"
label_disc = customtkinter.CTkLabel(top_level, fg_color="#1b202c", text=desc, justify="left", font=("Roboto",12))
label_disc.grid(padx=20, pady=0, sticky="w")
label_logo = customtkinter.CTkLabel(top_level, text="", image=logo, bg_color="#1b202c")
label_logo.place(x=230,y=30)
link = customtkinter.CTkLabel(top_level, fg_color="#1b202c", text="Official Page", justify="left", font=("",13), text_color="light blue")
link.grid(padx=20, pady=0, sticky="w")
link.bind("<Button-1>", lambda event: web("https://github.com/Akascape/Ascify-Art"))
link.bind("<Enter>", lambda event: link.configure(font=("", 13, "underline"), cursor="hand2"))
link.bind("<Leave>", lambda event: link.configure(font=("", 13), cursor="arrow"))
link2 = customtkinter.CTkLabel(top_level, fg_color="#1b202c", text="Documentation", justify="left", anchor="n", font=("",13), text_color="light blue")
link2.grid(padx=20, pady=0, sticky="nw")
link2.bind("<Button-1>", lambda event: web("https://github.com/Akascape/Ascify-Art/wiki"))
link2.bind("<Enter>", lambda event: link2.configure(font=("", 13, "underline"), cursor="hand2"))
link2.bind("<Leave>", lambda event: link2.configure(font=("", 13), cursor="arrow"))
logo = customtkinter.CTkImage(Image.open(resource("icon.png")), size=(150,150))
entry_location = customtkinter.CTkButton(tabview.tab("Export"), corner_radius=20, width=10, fg_color="#343638", border_width=2,
text="Browse Location", hover=False, command=changedir)
entry_location.grid(row=2, column=0, sticky="ew", padx=20, pady=20)
label_11 = customtkinter.CTkLabel(tabview.tab("Export"), text="Frame: 1")
progress_bar = customtkinter.CTkProgressBar(tabview.tab("Export"), height=20, width=10, progress_color=app_color)
progress_bar.set(0)
save = customtkinter.CTkButton(tabview.tab("Export"), corner_radius=20, width=10, fg_color=app_color,
text="SAVE", hover=False, command=lambda: threading.Thread(target=export).start())
save.grid(row=3, column=0, sticky="ew", padx=20, pady=20)
root.bind("<Control-s>", lambda e: threading.Thread(target=export).start())
def stop_sequence():
# stop sequence conversion
global convert_seq
convert_seq = False
convert_seq = True
cancel_button = customtkinter.CTkButton(tabview.tab("Export"), text="x", width=10, height=20, corner_radius=10,
fg_color="grey30", hover_color=app_color, command=stop_sequence)
info = customtkinter.CTkLabel(tabview.tab("Export"), text="About", font=("",13))
info.bind("<Button-1>", lambda event: new_window())
info.bind("<Enter>", lambda event: info.configure(font=("", 13, "underline"), cursor="hand2"))
info.bind("<Leave>", lambda event: info.configure(font=("", 13), cursor="arrow"))
info.grid(row=6, column=0, sticky="sw", padx=20, pady=20)
root.mainloop()
#------------------------------------------------------------------------------------------------#
| python | MIT | 202a6d894bcce7cc54ace8ae987e0663f743675d | 2026-01-05T07:12:54.801288Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/setup.py | setup.py | from setuptools import setup, find_packages
version = '1.2'
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='qtwidgets',
version=version,
author='Martin Fitzpatrick',
author_email='martin.fitzpatrick@gmail.com',
description='Custom widget library for PyQt6, PyQt5, PySide6 and PySide2 (Qt for Python). Free to use in your own applications.',
long_description=long_description,
long_description_content_type="text/markdown",
url='http://github.com/learnpyqt/python-qtwidgets',
license='MIT',
packages=find_packages(),
install_requires=[
'markdown',
],
include_package_data=True,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Desktop Environment',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Widget Sets',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4'
]
)
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/__init__.py | qtwidgets/__init__.py | from .colorbutton import ColorButton
# from color_duo
# from equalizer
from .equalizer_bar import EqualizerBar
# from filebrowser
from .gradient import Gradient
from .paint import Paint
from .passwordedit import PasswordEdit
from .power_bar import PowerBar
from .palette import PaletteGrid, PaletteHorizontal, PaletteVertical
from .rangeslider import RangeSlider
# from scrubber
# from stopwatch
from .toggle import Toggle, AnimatedToggle | python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/equalizer_bar/equalizer_bar.py | qtwidgets/equalizer_bar/equalizer_bar.py | import sys
from qtpy import QtCore, QtGui, QtWidgets
from qtpy.QtCore import Qt
from qtpy.QtCore import Signal
class EqualizerBar(QtWidgets.QWidget):
def __init__(self, bars, steps, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setSizePolicy(
QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.MinimumExpanding
)
if isinstance(steps, list):
# list of colours.
self.n_steps = len(steps)
self.steps = steps
elif isinstance(steps, int):
# int number of bars, defaults to red.
self.n_steps = steps
self.steps = ['red'] * steps
else:
raise TypeError('steps must be a list or int')
# Bar appearance.
self.n_bars = bars
self._x_solid_percent = 0.8
self._y_solid_percent = 0.8
self._background_color = QtGui.QColor('black')
self._padding = 25 # n-pixel gap around edge.
# Bar behaviour
self._timer = None
self.setDecayFrequencyMs(100)
self._decay = 10
# Ranges
self._vmin = 0
self._vmax = 100
# Current values are stored in a list.
self._values = [0.0] * bars
def paintEvent(self, e):
painter = QtGui.QPainter(self)
brush = QtGui.QBrush()
brush.setColor(self._background_color)
brush.setStyle(Qt.SolidPattern)
rect = QtCore.QRect(0, 0, painter.device().width(), painter.device().height())
painter.fillRect(rect, brush)
# Define our canvas.
d_height = painter.device().height() - (self._padding * 2)
d_width = painter.device().width() - (self._padding * 2)
# Draw the bars.
step_y = d_height / self.n_steps
bar_height = step_y * self._y_solid_percent
bar_height_space = step_y * (1 - self._x_solid_percent) / 2
step_x = d_width / self.n_bars
bar_width = step_x * self._x_solid_percent
bar_width_space = step_x * (1 - self._y_solid_percent) / 2
for b in range(self.n_bars):
# Calculate the y-stop position for this bar, from the value in range.
pc = (self._values[b] - self._vmin) / (self._vmax - self._vmin)
n_steps_to_draw = int(pc * self.n_steps)
for n in range(n_steps_to_draw):
brush.setColor(QtGui.QColor(self.steps[n]))
rect = QtCore.QRect(
self._padding + (step_x * b) + bar_width_space,
self._padding + d_height - ((1 + n) * step_y) + bar_height_space,
bar_width,
bar_height
)
painter.fillRect(rect, brush)
painter.end()
def sizeHint(self):
return QtCore.QSize(20, 120)
def _trigger_refresh(self):
self.update()
def setDecay(self, f):
self._decay = float(f)
def setDecayFrequencyMs(self, ms):
if self._timer:
self._timer.stop()
if ms:
self._timer = QtCore.QTimer()
self._timer.setInterval(ms)
self._timer.timeout.connect(self._decay_beat)
self._timer.start()
def _decay_beat(self):
self._values = [
max(0, v - self._decay)
for v in self._values
]
self.update() # Redraw new position.
def setValues(self, v):
self._values = v
self.update()
def values(self):
return self._values
def setRange(self, vmin, vmax):
assert float(vmin) < float(vmax)
self._vmin, self._vmax = float(vmin), float(vmax)
def setColor(self, color):
self.steps = [color] * self._bar.n_steps
self.update()
def setColors(self, colors):
self.n_steps = len(colors)
self.steps = colors
self.update()
def setBarPadding(self, i):
self._padding = int(i)
self.update()
def setBarSolidXPercent(self, f):
self._x_solid_percent = float(f)
self.update()
def setBarSolidYPercent(self, f):
self._y_solid_percent = float(f)
self.update()
def setBackgroundColor(self, color):
self._background_color = QtGui.QColor(color)
self.update()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/equalizer_bar/demo.py | qtwidgets/equalizer_bar/demo.py | from qtpy import QtCore, QtWidgets
from qtwidgets import EqualizerBar
import random
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.equalizer = EqualizerBar(5, ['#0C0786', '#40039C', '#6A00A7', '#8F0DA3', '#B02A8F', '#CA4678', '#E06461',
'#F1824C', '#FCA635', '#FCCC25', '#EFF821'])
self.equalizer.setBarSolidYPercent(0.4)
#self.equalizer.setBarSolidXPercent(0.4)
self.setCentralWidget(self.equalizer)
self._timer = QtCore.QTimer()
self._timer.setInterval(100)
self._timer.timeout.connect(self.update_values)
self._timer.start()
def update_values(self):
self.equalizer.setValues([
min(100, v+random.randint(0, 50) if random.randint(0, 5) > 2 else v)
for v in self.equalizer.values()
])
app = QtWidgets.QApplication([])
w = Window()
w.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/equalizer_bar/__init__.py | qtwidgets/equalizer_bar/__init__.py | from .equalizer_bar import EqualizerBar | python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/palette/palette.py | qtwidgets/palette/palette.py | import sys
from qtpy import QtCore, QtWidgets
from qtpy.QtCore import Signal
PALETTES = {
# bokeh paired 12
'paired12':['#000000', '#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928', '#ffffff'],
# d3 category 10
'category10':['#000000', '#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf', '#ffffff'],
# 17 undertones https://lospec.com/palette-list/17undertones
'17undertones': ['#000000', '#141923', '#414168', '#3a7fa7', '#35e3e3', '#8fd970', '#5ebb49', '#458352','#dcd37b', '#fffee5', '#ffd035', '#cc9245', '#a15c3e', '#a42f3b', '#f45b7a', '#c24998', '#81588d', '#bcb0c2', '#ffffff']
}
class _PaletteButton(QtWidgets.QPushButton):
def __init__(self, color):
super().__init__()
self.setFixedSize(QtCore.QSize(24, 24))
self.color = color
self.setStyleSheet("background-color: %s;" % color)
class _PaletteBase(QtWidgets.QWidget):
selected = Signal(object)
def _emit_color(self, color):
self.selected.emit(color)
class _PaletteLinearBase(_PaletteBase):
def __init__(self, colors, *args, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(colors, str):
if colors in PALETTES:
colors = PALETTES[colors]
palette = self.layoutvh()
for c in colors:
b = _PaletteButton(c)
b.pressed.connect(
lambda c=c: self._emit_color(c)
)
palette.addWidget(b)
self.setLayout(palette)
class PaletteHorizontal(_PaletteLinearBase):
layoutvh = QtWidgets.QHBoxLayout
class PaletteVertical(_PaletteLinearBase):
layoutvh = QtWidgets.QVBoxLayout
class PaletteGrid(_PaletteBase):
def __init__(self, colors, n_columns=5, *args, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(colors, str):
if colors in PALETTES:
colors = PALETTES[colors]
palette = QtWidgets.QGridLayout()
row, col = 0, 0
for c in colors:
b = _PaletteButton(c)
b.pressed.connect(
lambda c=c: self._emit_color(c)
)
palette.addWidget(b, row, col)
col += 1
if col == n_columns:
col = 0
row += 1
self.setLayout(palette)
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/palette/demo.py | qtwidgets/palette/demo.py | from qtpy import QtCore, QtGui, QtWidgets
from qtwidgets import PaletteGrid, PaletteHorizontal, PaletteVertical
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
palette = PaletteGrid('17undertones') # or PaletteHorizontal, or PaletteVertical
palette.selected.connect(self.show_selected_color)
self.setCentralWidget(palette)
def show_selected_color(self, c):
print("Selected: {}".format(c))
app = QtWidgets.QApplication([])
w = Window()
w.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/palette/__init__.py | qtwidgets/palette/__init__.py | from .palette import PaletteGrid, PaletteHorizontal, PaletteVertical | python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/paint/paint.py | qtwidgets/paint/paint.py | import sys
from qtpy import QtCore, QtGui, QtWidgets
from qtpy.QtCore import Qt
class Paint(QtWidgets.QLabel):
def __init__(self, width, height, background='white', *args, **kwargs):
super().__init__(*args, **kwargs)
pixmap = QtGui.QPixmap(width, height)
self.setPixmap(pixmap)
# Fill the canvas with the initial color.
painter = QtGui.QPainter(self.pixmap())
brush = QtGui.QBrush()
brush.setColor(QtGui.QColor(background))
brush.setStyle(Qt.SolidPattern)
painter.fillRect(0, 0, pixmap.width(), pixmap.height(), brush)
painter.end()
self.last_x, self.last_y = None, None
self._pen_color = QtGui.QColor('#000000')
self._pen_width = 4
def setPenColor(self, c):
self._pen_color = QtGui.QColor(c)
def setPenWidth(self, w):
self._pen_width = int(w)
def mouseMoveEvent(self, e):
if self.last_x is None: # First event.
self.last_x = e.x()
self.last_y = e.y()
return # Ignore the first time.
painter = QtGui.QPainter(self.pixmap())
p = painter.pen()
p.setWidth(self._pen_width)
p.setColor(self._pen_color)
painter.setPen(p)
painter.drawLine(self.last_x, self.last_y, e.x(), e.y())
painter.end()
self.update()
# Update the origin for next time.
self.last_x = e.x()
self.last_y = e.y()
def mousePressEvent(self, e):
if e.button() == Qt.RightButton:
self._flood_fill_from_event(e)
def mouseReleaseEvent(self, e):
self.last_x = None
self.last_y = None
def _flood_fill_from_event(self, e):
image = self.pixmap().toImage()
w, h = image.width(), image.height()
x, y = e.x(), e.y()
# Get our target color from origin.
target_color = image.pixel(x, y)
have_seen = set()
queue = [(x, y)]
def get_cardinal_points(have_seen, center_pos):
points = []
cx, cy = center_pos
for x, y in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
xx, yy = cx + x, cy + y
if (xx >= 0 and xx < w and
yy >= 0 and yy < h and
(xx, yy) not in have_seen):
points.append((xx, yy))
have_seen.add((xx, yy))
return points
# Now perform the search and fill.
p = QtGui.QPainter(self.pixmap())
p.setPen(QtGui.QPen(self._pen_color))
while queue:
x, y = queue.pop()
if image.pixel(x, y) == target_color:
p.drawPoint(QtCore.QPoint(x, y))
queue.extend(get_cardinal_points(have_seen, (x, y)))
self.update()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/paint/demo.py | qtwidgets/paint/demo.py | from qtpy import QtCore, QtGui, QtWidgets
from qtwidgets import Paint
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
paint = Paint(300, 300)
paint.setPenWidth(5)
paint.setPenColor('#EB5160')
self.setCentralWidget(paint)
app = QtWidgets.QApplication([])
w = Window()
w.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/paint/__init__.py | qtwidgets/paint/__init__.py | from .paint import Paint | python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/colorbutton/colorbutton.py | qtwidgets/colorbutton/colorbutton.py | import sys
if 'PyQt5' in sys.modules:
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, pyqtSignal as Signal
else:
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import Qt, Signal
class ColorButton(QtWidgets.QPushButton):
'''
Custom Qt Widget to show a chosen color.
Left-clicking the button shows the color-chooser, while
right-clicking resets the color to the default color (None by default).
'''
colorChanged = Signal(object)
def __init__(self, *args, color=None, **kwargs):
super(ColorButton, self).__init__(*args, **kwargs)
self._color = None
self._default = color
self.pressed.connect(self.onColorPicker)
# Set the initial/default state.
self.setColor(self._default)
def setColor(self, color):
if color != self._color:
self._color = color
self.colorChanged.emit(color)
if self._color:
self.setStyleSheet("background-color: %s;" % self._color)
else:
self.setStyleSheet("")
def color(self):
return self._color
def onColorPicker(self):
'''
Show color-picker dialog to select color.
Qt will use the native dialog by default.
'''
dlg = QtWidgets.QColorDialog(self)
if self._color:
dlg.setCurrentColor(QtGui.QColor(self._color))
if dlg.exec_():
self.setColor(dlg.currentColor().name())
def mousePressEvent(self, e):
if e.button() == Qt.RightButton:
self.setColor(self._default)
return super(ColorButton, self).mousePressEvent(e)
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/colorbutton/demo.py | qtwidgets/colorbutton/demo.py | from qtpy import QtWidgets
from qtwidgets import ColorButton
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
palette = ColorButton(color='red')
palette.colorChanged.connect(self.show_selected_color)
self.setCentralWidget(palette)
def show_selected_color(self, c):
print("Selected: {}".format(c))
app = QtWidgets.QApplication([])
w = Window()
w.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/colorbutton/__init__.py | qtwidgets/colorbutton/__init__.py | from .colorbutton import ColorButton | python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/rangeslider/demo_pyside2.py | qtwidgets/rangeslider/demo_pyside2.py | from PySide2 import QtCore, QtGui, QtWidgets
from qtwidgets import RangeSlider
app = QtWidgets.QApplication([])
slider = RangeSlider()
slider.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/rangeslider/demo_pyqt5.py | qtwidgets/rangeslider/demo_pyqt5.py | from PyQt5 import QtCore, QtGui, QtWidgets
from qtwidgets import RangeSlider
app = QtWidgets.QApplication([])
slider = RangeSlider()
slider.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/rangeslider/__init__.py | qtwidgets/rangeslider/__init__.py | from .rangeslider import RangeSlider
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/rangeslider/rangeslider.py | qtwidgets/rangeslider/rangeslider.py | import sys
if "PyQt5" in sys.modules:
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt, pyqtSignal as Signal
else:
from PySide2 import QtCore, QtWidgets, QtGui
from PySide2.QtCore import Signal, Qt
class RangeSlider(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.first_position = 1
self.second_position = 8
self.opt = QtWidgets.QStyleOptionSlider()
self.opt.minimum = 0
self.opt.maximum = 10
self.setTickPosition(QtWidgets.QSlider.TicksAbove)
self.setTickInterval(1)
self.setSizePolicy(
QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Slider,
)
)
def setRangeLimit(self, minimum: int, maximum: int):
self.opt.minimum = minimum
self.opt.maximum = maximum
def setRange(self, start: int, end: int):
self.first_position = start
self.second_position = end
def getRange(self):
return (self.first_position, self.second_position)
def setTickPosition(self, position: QtWidgets.QSlider.TickPosition):
self.opt.tickPosition = position
def setTickInterval(self, ti: int):
self.opt.tickInterval = ti
def paintEvent(self, event: QtGui.QPaintEvent):
painter = QtGui.QPainter(self)
# Draw rule
self.opt.initFrom(self)
self.opt.rect = self.rect()
self.opt.sliderPosition = 0
self.opt.subControls = (
QtWidgets.QStyle.SC_SliderGroove | QtWidgets.QStyle.SC_SliderTickmarks
)
# Draw GROOVE
self.style().drawComplexControl(QtWidgets.QStyle.CC_Slider, self.opt, painter)
# Draw INTERVAL
color = self.palette().color(QtGui.QPalette.Highlight)
color.setAlpha(160)
painter.setBrush(QtGui.QBrush(color))
painter.setPen(Qt.NoPen)
self.opt.sliderPosition = self.first_position
x_left_handle = (
self.style()
.subControlRect(
QtWidgets.QStyle.CC_Slider, self.opt, QtWidgets.QStyle.SC_SliderHandle
)
.right()
)
self.opt.sliderPosition = self.second_position
x_right_handle = (
self.style()
.subControlRect(
QtWidgets.QStyle.CC_Slider, self.opt, QtWidgets.QStyle.SC_SliderHandle
)
.left()
)
groove_rect = self.style().subControlRect(
QtWidgets.QStyle.CC_Slider, self.opt, QtWidgets.QStyle.SC_SliderGroove
)
selection = QtCore.QRect(
x_left_handle,
groove_rect.y(),
x_right_handle - x_left_handle,
groove_rect.height(),
).adjusted(-1, 1, 1, -1)
painter.drawRect(selection)
# Draw first handle
self.opt.subControls = QtWidgets.QStyle.SC_SliderHandle
self.opt.sliderPosition = self.first_position
self.style().drawComplexControl(QtWidgets.QStyle.CC_Slider, self.opt, painter)
# Draw second handle
self.opt.sliderPosition = self.second_position
self.style().drawComplexControl(QtWidgets.QStyle.CC_Slider, self.opt, painter)
def mousePressEvent(self, event: QtGui.QMouseEvent):
self.opt.sliderPosition = self.first_position
self._first_sc = self.style().hitTestComplexControl(
QtWidgets.QStyle.CC_Slider, self.opt, event.pos(), self
)
self.opt.sliderPosition = self.second_position
self._second_sc = self.style().hitTestComplexControl(
QtWidgets.QStyle.CC_Slider, self.opt, event.pos(), self
)
def mouseMoveEvent(self, event: QtGui.QMouseEvent):
distance = self.opt.maximum - self.opt.minimum
pos = self.style().sliderValueFromPosition(
0, distance, event.pos().x(), self.rect().width()
)
if self._first_sc == QtWidgets.QStyle.SC_SliderHandle:
if pos <= self.second_position:
self.first_position = pos
self.update()
return
if self._second_sc == QtWidgets.QStyle.SC_SliderHandle:
if pos >= self.first_position:
self.second_position = pos
self.update()
def sizeHint(self):
""" override """
SliderLength = 84
TickSpace = 5
w = SliderLength
h = self.style().pixelMetric(
QtWidgets.QStyle.PM_SliderThickness, self.opt, self
)
if (
self.opt.tickPosition & QtWidgets.QSlider.TicksAbove
or self.opt.tickPosition & QtWidgets.QSlider.TicksBelow
):
h += TickSpace
return (
self.style()
.sizeFromContents(
QtWidgets.QStyle.CT_Slider, self.opt, QtCore.QSize(w, h), self
)
.expandedTo(QtWidgets.QApplication.globalStrut())
)
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/toggle/toggle.py | qtwidgets/toggle/toggle.py | import sys
from qtpy.QtCore import (
Qt, QSize, QPoint, QPointF, QRectF,
QEasingCurve, QPropertyAnimation, QSequentialAnimationGroup,
Slot, Property)
from qtpy.QtWidgets import QCheckBox
from qtpy.QtGui import QColor, QBrush, QPaintEvent, QPen, QPainter
class Toggle(QCheckBox):
_transparent_pen = QPen(Qt.transparent)
_light_grey_pen = QPen(Qt.lightGray)
def __init__(self,
parent=None,
bar_color=Qt.gray,
checked_color="#00B0FF",
handle_color=Qt.white,
):
super().__init__(parent)
# Save our properties on the object via self, so we can access them later
# in the paintEvent.
self._bar_brush = QBrush(bar_color)
self._bar_checked_brush = QBrush(QColor(checked_color).lighter())
self._handle_brush = QBrush(handle_color)
self._handle_checked_brush = QBrush(QColor(checked_color))
# Setup the rest of the widget.
self.setContentsMargins(8, 0, 8, 0)
self._handle_position = 0
self.stateChanged.connect(self.handle_state_change)
def sizeHint(self):
return QSize(58, 45)
def hitButton(self, pos: QPoint):
return self.contentsRect().contains(pos)
def paintEvent(self, e: QPaintEvent):
contRect = self.contentsRect()
handleRadius = round(0.24 * contRect.height())
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing)
p.setPen(self._transparent_pen)
barRect = QRectF(
0, 0,
contRect.width() - handleRadius, 0.40 * contRect.height()
)
barRect.moveCenter(contRect.center())
rounding = barRect.height() / 2
# the handle will move along this line
trailLength = contRect.width() - 2 * handleRadius
xPos = contRect.x() + handleRadius + trailLength * self._handle_position
if self.isChecked():
p.setBrush(self._bar_checked_brush)
p.drawRoundedRect(barRect, rounding, rounding)
p.setBrush(self._handle_checked_brush)
else:
p.setBrush(self._bar_brush)
p.drawRoundedRect(barRect, rounding, rounding)
p.setPen(self._light_grey_pen)
p.setBrush(self._handle_brush)
p.drawEllipse(
QPointF(xPos, barRect.center().y()),
handleRadius, handleRadius)
p.end()
@Slot(int)
def handle_state_change(self, value):
self._handle_position = 1 if value else 0
@Property(float)
def handle_position(self):
return self._handle_position
@handle_position.setter
def handle_position(self, pos):
"""change the property
we need to trigger QWidget.update() method, either by:
1- calling it here [ what we're doing ].
2- connecting the QPropertyAnimation.valueChanged() signal to it.
"""
self._handle_position = pos
self.update()
@Property(float)
def pulse_radius(self):
return self._pulse_radius
@pulse_radius.setter
def pulse_radius(self, pos):
self._pulse_radius = pos
self.update()
class AnimatedToggle(Toggle):
_transparent_pen = QPen(Qt.transparent)
_light_grey_pen = QPen(Qt.lightGray)
def __init__(self, *args, pulse_unchecked_color="#44999999",
pulse_checked_color="#4400B0EE", **kwargs):
self._pulse_radius = 0
super().__init__(*args, **kwargs)
self.animation = QPropertyAnimation(self, b"handle_position", self)
self.animation.setEasingCurve(QEasingCurve.InOutCubic)
self.animation.setDuration(200) # time in ms
self.pulse_anim = QPropertyAnimation(self, b"pulse_radius", self)
self.pulse_anim.setDuration(350) # time in ms
self.pulse_anim.setStartValue(10)
self.pulse_anim.setEndValue(20)
self.animations_group = QSequentialAnimationGroup()
self.animations_group.addAnimation(self.animation)
self.animations_group.addAnimation(self.pulse_anim)
self._pulse_unchecked_animation = QBrush(QColor(pulse_unchecked_color))
self._pulse_checked_animation = QBrush(QColor(pulse_checked_color))
@Slot(int)
def handle_state_change(self, value):
self.animations_group.stop()
if value:
self.animation.setEndValue(1)
else:
self.animation.setEndValue(0)
self.animations_group.start()
def paintEvent(self, e: QPaintEvent):
contRect = self.contentsRect()
handleRadius = round(0.24 * contRect.height())
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing)
p.setPen(self._transparent_pen)
barRect = QRectF(
0, 0,
contRect.width() - handleRadius, 0.40 * contRect.height()
)
barRect.moveCenter(contRect.center())
rounding = barRect.height() / 2
# the handle will move along this line
trailLength = contRect.width() - 2 * handleRadius
xPos = contRect.x() + handleRadius + trailLength * self._handle_position
if self.pulse_anim.state() == QPropertyAnimation.Running:
p.setBrush(
self._pulse_checked_animation if
self.isChecked() else self._pulse_unchecked_animation)
p.drawEllipse(QPointF(xPos, barRect.center().y()),
self._pulse_radius, self._pulse_radius)
if self.isChecked():
p.setBrush(self._bar_checked_brush)
p.drawRoundedRect(barRect, rounding, rounding)
p.setBrush(self._handle_checked_brush)
else:
p.setBrush(self._bar_brush)
p.drawRoundedRect(barRect, rounding, rounding)
p.setPen(self._light_grey_pen)
p.setBrush(self._handle_brush)
p.drawEllipse(
QPointF(xPos, barRect.center().y()),
handleRadius, handleRadius)
p.end()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/toggle/demo.py | qtwidgets/toggle/demo.py | from qtpy import QtCore, QtGui, QtWidgets
from toggle import Toggle, AnimatedToggle
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
toggle_1 = Toggle()
toggle_2 = AnimatedToggle(
checked_color="#FFB000",
pulse_checked_color="#44FFB000"
)
container = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout()
layout.addWidget(toggle_1)
layout.addWidget(toggle_2)
container.setLayout(layout)
self.setCentralWidget(container)
app = QtWidgets.QApplication([])
w = Window()
w.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/toggle/__init__.py | qtwidgets/toggle/__init__.py | from .toggle import Toggle, AnimatedToggle
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/passwordedit/password.py | qtwidgets/passwordedit/password.py | import os
import sys
from qtpy import QtCore, QtGui, QtWidgets
from qtpy.QtCore import Qt, Signal
folder = os.path.dirname(__file__)
class PasswordEdit(QtWidgets.QLineEdit):
"""
Password LineEdit with icons to show/hide password entries.
Based on this example https://kushaldas.in/posts/creating-password-input-widget-in-pyqt.html by Kushal Das.
"""
def __init__(
self,
show_visibility=True,
visible_icon=None,
hidden_icon=None,
icons_from_theme=False,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
if icons_from_theme:
self.visibleIcon = QtGui.QIcon.fromTheme("view-visible")
self.hiddenIcon = QtGui.QIcon.fromTheme("view-hidden")
else:
if visible_icon:
self.visibleIcon = visible_icon
else:
self.visibleIcon = QtGui.QIcon(os.path.join(folder, "eye.svg"))
if hidden_icon:
self.hiddenIcon = hidden_icon
else:
self.hiddenIcon = QtGui.QIcon(
os.path.join(folder, "hidden.svg")
)
self.setEchoMode(QtWidgets.QLineEdit.Password)
if show_visibility:
# Add the password hide/shown toggle at the end of the edit box.
self.togglepasswordAction = self.addAction(
self.visibleIcon, QtWidgets.QLineEdit.TrailingPosition
)
self.togglepasswordAction.triggered.connect(
self.on_toggle_password_Action
)
self.password_shown = False
def on_toggle_password_Action(self):
if not self.password_shown:
self.setEchoMode(QtWidgets.QLineEdit.Normal)
self.password_shown = True
self.togglepasswordAction.setIcon(self.hiddenIcon)
else:
self.setEchoMode(QtWidgets.QLineEdit.Password)
self.password_shown = False
self.togglepasswordAction.setIcon(self.visibleIcon)
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/passwordedit/demo.py | qtwidgets/passwordedit/demo.py | from qtpy import QtCore, QtGui, QtWidgets
from qtwidgets import PasswordEdit
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
password = PasswordEdit()
self.setCentralWidget(password)
app = QtWidgets.QApplication([])
w = Window()
w.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/passwordedit/__init__.py | qtwidgets/passwordedit/__init__.py | from .password import PasswordEdit | python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/power_bar/power_bar.py | qtwidgets/power_bar/power_bar.py | import sys
from qtpy import QtCore, QtGui, QtWidgets
from qtpy.QtCore import Qt
from qtpy.QtCore import Signal
class _Bar(QtWidgets.QWidget):
clickedValue = Signal(int)
def __init__(self, steps, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setSizePolicy(
QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.MinimumExpanding
)
if isinstance(steps, list):
# list of colours.
self.n_steps = len(steps)
self.steps = steps
elif isinstance(steps, int):
# int number of bars, defaults to red.
self.n_steps = steps
self.steps = ['red'] * steps
else:
raise TypeError('steps must be a list or int')
self._bar_solid_percent = 0.8
self._background_color = QtGui.QColor('black')
self._padding = 4.0 # n-pixel gap around edge.
def paintEvent(self, e):
painter = QtGui.QPainter(self)
brush = QtGui.QBrush()
brush.setColor(self._background_color)
brush.setStyle(Qt.SolidPattern)
rect = QtCore.QRect(0, 0, painter.device().width(), painter.device().height())
painter.fillRect(rect, brush)
# Get current state.
parent = self.parent()
vmin, vmax = parent.minimum(), parent.maximum()
value = parent.value()
# Define our canvas.
d_height = painter.device().height() - (self._padding * 2)
d_width = painter.device().width() - (self._padding * 2)
# Draw the bars.
step_size = d_height / self.n_steps
bar_height = step_size * self._bar_solid_percent
bar_spacer = step_size * (1 - self._bar_solid_percent) / 2
# Calculate the y-stop position, from the value in range.
pc = (value - vmin) / (vmax - vmin)
n_steps_to_draw = int(pc * self.n_steps)
for n in range(n_steps_to_draw):
brush.setColor(QtGui.QColor(self.steps[n]))
rect = QtCore.QRect(
self._padding,
self._padding + d_height - ((1 + n) * step_size) + bar_spacer,
d_width,
bar_height
)
painter.fillRect(rect, brush)
painter.end()
def sizeHint(self):
return QtCore.QSize(40, 120)
def _trigger_refresh(self):
self.update()
def _calculate_clicked_value(self, e):
parent = self.parent()
vmin, vmax = parent.minimum(), parent.maximum()
d_height = self.size().height() + (self._padding * 2)
step_size = d_height / self.n_steps
click_y = e.y() - self._padding - step_size / 2
pc = (d_height - click_y) / d_height
value = vmin + pc * (vmax - vmin)
self.clickedValue.emit(value)
def mouseMoveEvent(self, e):
self._calculate_clicked_value(e)
def mousePressEvent(self, e):
self._calculate_clicked_value(e)
class PowerBar(QtWidgets.QWidget):
"""
Custom Qt Widget to show a power bar and dial.
Demonstrating compound and custom-drawn widget.
Left-clicking the button shows the color-chooser, while
right-clicking resets the color to None (no-color).
"""
def __init__(self, steps=5, *args, **kwargs):
super().__init__(*args, **kwargs)
layout = QtWidgets.QVBoxLayout()
self._bar = _Bar(steps)
layout.addWidget(self._bar)
# Create the QDial widget and set up defaults.
# - we provide accessors on this class to override.
self._dial = QtWidgets.QDial()
self._dial.setNotchesVisible(True)
self._dial.setWrapping(False)
self._dial.valueChanged.connect(self._bar._trigger_refresh)
# Take feedback from click events on the meter.
self._bar.clickedValue.connect(self._dial.setValue)
layout.addWidget(self._dial)
self.setLayout(layout)
def __getattr__(self, name):
if name in self.__dict__:
return self[name]
try:
return getattr(self._dial, name)
except AttributeError:
raise AttributeError(
"'{}' object has no attribute '{}'".format(self.__class__.__name__, name)
)
def setColor(self, color):
self._bar.steps = [color] * self._bar.n_steps
self._bar.update()
def setColors(self, colors):
self._bar.n_steps = len(colors)
self._bar.steps = colors
self._bar.update()
def setBarPadding(self, i):
self._bar._padding = int(i)
self._bar.update()
def setBarSolidPercent(self, f):
self._bar._bar_solid_percent = float(f)
self._bar.update()
def setBackgroundColor(self, color):
self._bar._background_color = QtGui.QColor(color)
self._bar.update()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/power_bar/demo.py | qtwidgets/power_bar/demo.py | from qtpy import QtCore, QtGui, QtWidgets
from qtwidgets import PowerBar
app = QtWidgets.QApplication([])
volume = PowerBar(["#053061", "#2166ac", "#4393c3", "#92c5de", "#d1e5f0", "#f7f7f7", "#fddbc7", "#f4a582", "#d6604d", "#b2182b", "#67001f"])
volume.setBarSolidPercent(0.8)
volume.setBarPadding(5)
volume.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/power_bar/__init__.py | qtwidgets/power_bar/__init__.py | from .power_bar import PowerBar | python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/gradient/gradient.py | qtwidgets/gradient/gradient.py | import sys
from qtpy import QtCore, QtGui, QtWidgets
from qtpy.QtCore import Qt
from qtpy.QtCore import Signal
class Gradient(QtWidgets.QWidget):
gradientChanged = Signal()
def __init__(self, gradient=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setSizePolicy(
QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.MinimumExpanding
)
if gradient:
self._gradient = gradient
else:
self._gradient = [
(0.0, '#000000'),
(1.0, '#ffffff'),
]
# Stop point handle sizes.
self._handle_w = 10
self._handle_h = 10
self._drag_position = None
def paintEvent(self, e):
painter = QtGui.QPainter(self)
width = painter.device().width()
height = painter.device().height()
# Draw the linear horizontal gradient.
gradient = QtGui.QLinearGradient(0, 0, width, 0)
for stop, color in self._gradient:
gradient.setColorAt(stop, QtGui.QColor(color))
rect = QtCore.QRect(0, 0, width, height)
painter.fillRect(rect, gradient)
pen = QtGui.QPen()
y = painter.device().height() / 2
# Draw the stop handles.
for stop, _ in self._gradient:
pen.setColor(QtGui.QColor('white'))
painter.setPen(pen)
painter.drawLine(stop * width, y - self._handle_h, stop * width, y + self._handle_h)
pen.setColor(QtGui.QColor('red'))
painter.setPen(pen)
rect = QtCore.QRect(
stop * width - self._handle_w/2,
y - self._handle_h/2,
self._handle_w,
self._handle_h
)
painter.drawRect(rect)
painter.end()
def sizeHint(self):
return QtCore.QSize(200, 50)
def _sort_gradient(self):
self._gradient = sorted(self._gradient, key=lambda g:g[0])
def _constrain_gradient(self):
self._gradient = [
# Ensure values within valid range.
(max(0.0, min(1.0, stop)), color)
for stop, color in self._gradient
]
def setGradient(self, gradient):
assert all([0.0 <= stop <= 1.0 for stop, _ in gradient])
self._gradient = gradient
self._constrain_gradient()
self._sort_gradient()
self.gradientChanged.emit()
def gradient(self):
return self._gradient
@property
def _end_stops(self):
return [0, len(self._gradient)-1]
def addStop(self, stop, color=None):
# Stop is a value 0...1, find the point to insert this stop
# in the list.
assert 0.0 <= stop <= 1.0
for n, g in enumerate(self._gradient):
if g[0] > stop:
# Insert before this entry, with specified or next color.
self._gradient.insert(n, (stop, color or g[1]))
break
self._constrain_gradient()
self.gradientChanged.emit()
self.update()
def removeStopAtPosition(self, n):
if n not in self._end_stops:
del self._gradient[n]
self.gradientChanged.emit()
self.update()
def setColorAtPosition(self, n, color):
if n < len(self._gradient):
stop, _ = self._gradient[n]
self._gradient[n] = stop, color
self.gradientChanged.emit()
self.update()
def chooseColorAtPosition(self, n, current_color=None):
dlg = QtWidgets.QColorDialog(self)
if current_color:
dlg.setCurrentColor(QtGui.QColor(current_color))
if dlg.exec_():
self.setColorAtPosition(n, dlg.currentColor().name())
def _find_stop_handle_for_event(self, e, to_exclude=None):
width = self.width()
height = self.height()
midpoint = height / 2
# Are we inside a stop point? First check y.
if (
e.y() >= midpoint - self._handle_h and
e.y() <= midpoint + self._handle_h
):
for n, (stop, color) in enumerate(self._gradient):
if to_exclude and n in to_exclude:
# Allow us to skip the extreme ends of the gradient.
continue
if (
e.x() >= stop * width - self._handle_w and
e.x() <= stop * width + self._handle_w
):
return n
def mousePressEvent(self, e):
# We're in this stop point.
if e.button() == Qt.RightButton:
n = self._find_stop_handle_for_event(e)
if n is not None:
_, color = self._gradient[n]
self.chooseColorAtPosition(n, color)
elif e.button() == Qt.LeftButton:
n = self._find_stop_handle_for_event(e, to_exclude=self._end_stops)
if n is not None:
# Activate drag mode.
self._drag_position = n
def mouseReleaseEvent(self, e):
self._drag_position = None
self._sort_gradient()
def mouseMoveEvent(self, e):
# If drag active, move the stop.
if self._drag_position:
stop = e.x() / self.width()
_, color = self._gradient[self._drag_position]
self._gradient[self._drag_position] = stop, color
self._constrain_gradient()
self.update()
def mouseDoubleClickEvent(self, e):
# Calculate the position of the click relative 0..1 to the width.
n = self._find_stop_handle_for_event(e)
if n:
self._sort_gradient() # Ensure ordered.
# Delete existing, if not at the ends.
if n > 0 and n < len(self._gradient) - 1:
self.removeStopAtPosition(n)
else:
stop = e.x() / self.width()
self.addStop(stop)
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/gradient/demo.py | qtwidgets/gradient/demo.py | from qtpy import QtCore, QtGui, QtWidgets
from gradient import Gradient
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
gradient = Gradient()
gradient.setGradient([(0, 'black'), (1, 'green'), (0.5, 'red')])
self.setCentralWidget(gradient)
app = QtWidgets.QApplication([])
w = Window()
w.show()
app.exec_()
| python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
pythonguis/python-qtwidgets | https://github.com/pythonguis/python-qtwidgets/blob/89bd9dc53990f04a82bb27cb056f0c513e40e8b0/qtwidgets/gradient/__init__.py | qtwidgets/gradient/__init__.py | from .gradient import Gradient | python | MIT | 89bd9dc53990f04a82bb27cb056f0c513e40e8b0 | 2026-01-05T07:12:55.329843Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/train.py | train.py | import argparse
import functools
import os
import time
from datetime import timedelta
from paddle.distributed import fleet
import paddle
from paddle import nn
from paddle.io import DataLoader
from paddle.optimizer import Adam
from paddle.optimizer.lr import CosineAnnealingDecay
from sklearn.metrics import f1_score
from visualdl import LogWriter
from utils.reader import PuncDatasetFromErnieTokenizer, collate_fn
from utils.model import ErnieLinear
from utils.sampler import CustomBatchSampler, CustomDistributedBatchSampler
from utils.utils import add_arguments, print_arguments
from utils.logger import setup_logger
logger = setup_logger(__name__)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 32, '训练的批量大小')
add_arg('max_seq_len', int, 200, '训练数据的最大长度')
add_arg('num_workers', int, 8, '读取数据的线程数量')
add_arg('num_epoch', int, 30, '训练的轮数')
add_arg('learning_rate', float, 1.0e-5, '初始学习率的大小')
add_arg('train_data_path', str, 'dataset/train.txt', '训练数据的数据文件路径')
add_arg('dev_data_path', str, 'dataset/dev.txt', '测试数据的数据文件路径')
add_arg('punc_path', str, 'dataset/punc_vocab', '标点符号字典路径')
add_arg('model_path', str, 'models/', '保存检查点的目录')
add_arg('resume_model', str, None, '恢复训练模型文件夹')
add_arg('pretrained_token', str, 'ernie-3.0-medium-zh',
'使用的ERNIE模型权重,具体查看:https://paddlenlp.readthedocs.io/zh/latest/model_zoo/transformers/ERNIE/contents.html#ernie')
args = parser.parse_args()
print_arguments(args)
def train():
paddle.set_device("gpu")
# 获取有多少张显卡训练
nranks = paddle.distributed.get_world_size()
local_rank = paddle.distributed.get_rank()
writer = None
if local_rank == 0:
# 日志记录器
writer = LogWriter(logdir='log')
# 支持多卡训练
if nranks > 1:
# 选择设置分布式策略
strategy = fleet.DistributedStrategy()
fleet.init(is_collective=True, strategy=strategy)
train_dataset = PuncDatasetFromErnieTokenizer(data_path=args.train_data_path,
punc_path=args.punc_path,
pretrained_token=args.pretrained_token,
max_seq_len=args.max_seq_len)
dev_dataset = PuncDatasetFromErnieTokenizer(data_path=args.dev_data_path,
punc_path=args.punc_path,
pretrained_token=args.pretrained_token,
max_seq_len=args.max_seq_len)
# 支持多卡训练
if nranks > 1:
train_batch_sampler = CustomDistributedBatchSampler(train_dataset,
batch_size=args.batch_size,
drop_last=True,
shuffle=True)
else:
train_batch_sampler = CustomBatchSampler(train_dataset,
batch_size=args.batch_size,
drop_last=True,
shuffle=True)
train_loader = DataLoader(train_dataset,
collate_fn=collate_fn,
batch_sampler=train_batch_sampler,
num_workers=args.num_workers)
dev_loader = DataLoader(dev_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=collate_fn,
drop_last=False,
num_workers=args.num_workers)
logger.info('预处理数据集完成!')
# num_classes为字符分类大小
model = ErnieLinear(pretrained_token=args.pretrained_token, num_classes=len(train_dataset.punc2id))
criterion = nn.CrossEntropyLoss()
# 支持多卡训练
if nranks > 1:
model = fleet.distributed_model(model)
scheduler = CosineAnnealingDecay(learning_rate=args.learning_rate, T_max=args.num_epoch)
optimizer = Adam(learning_rate=scheduler,
parameters=model.parameters(),
weight_decay=paddle.regularizer.L2Decay(1.0e-5))
# 支持多卡训练
if nranks > 1:
optimizer = fleet.distributed_optimizer(optimizer)
# 恢复训练
last_epoch = 0
if args.resume_model:
assert os.path.exists(os.path.join(args.resume_model, 'model.pdparams')), "模型参数文件不存在!"
assert os.path.exists(os.path.join(args.resume_model, 'optimizer.pdopt')), "优化方法参数文件不存在!"
model.set_state_dict(paddle.load(os.path.join(args.resume_model, 'model.pdparams')))
opt_state = paddle.load(os.path.join(args.resume_model, 'optimizer.pdopt'))
last_epoch = opt_state['LR_Scheduler']['last_epoch']
optimizer.set_state_dict(opt_state)
best_loss = 1e3
train_step, test_step = 0, 0
train_times = []
sum_batch = len(train_loader) * args.num_epoch
for epoch in range(last_epoch, args.num_epoch):
epoch += 1
start = time.time()
for batch_id, (inputs, labels) in enumerate(train_loader()):
labels = paddle.reshape(labels, shape=[-1])
y, logit = model(inputs)
pred = paddle.argmax(logit, axis=1)
loss = criterion(y, labels)
optimizer.clear_grad()
loss.backward()
optimizer.step()
F1_score = f1_score(labels.numpy().tolist(), pred.numpy().tolist(), average="macro")
train_times.append((time.time() - start) * 1000)
# 多卡训练只使用一个进程打印
if batch_id % 100 == 0:
eta_sec = (sum(train_times) / len(train_times)) * (sum_batch - (epoch - 1) * len(train_loader) - batch_id)
eta_str = str(timedelta(seconds=int(eta_sec / 1000)))
logger.info(
'Train epoch: [{}/{}], batch: [{}/{}], loss: {:.5f}, f1_score: {:.5f}, learning rate: {:>.8f}, eta: {}'.format(
epoch, args.num_epoch, batch_id, len(train_loader), float(loss), F1_score, scheduler.get_lr(), eta_str))
if local_rank == 0:
writer.add_scalar('Train/Loss', float(loss), train_step)
writer.add_scalar('Train/F1_Score', F1_score, train_step)
train_step += 1
start = time.time()
if local_rank == 0:
writer.add_scalar('Train/LearnRate', scheduler.get_lr(), epoch)
scheduler.step()
model.eval()
eval_loss = []
eval_f1_score = []
for batch_id, (inputs, labels) in enumerate(dev_loader()):
labels = paddle.reshape(labels, shape=[-1])
y, logit = model(inputs)
pred = paddle.argmax(logit, axis=1)
loss = criterion(y, labels)
eval_loss.append(float(loss))
F1_score = f1_score(labels.numpy().tolist(), pred.numpy().tolist(), average="macro")
eval_f1_score.append(F1_score)
if batch_id % 100 == 0:
logger.info('Batch: [{}/{}], loss: {:.5f}, f1_score: {:.5f}'.format(
batch_id, len(dev_loader), float(loss), F1_score))
eval_loss1 = sum(eval_loss) / len(eval_loss)
eval_f1_score1 = sum(eval_f1_score) / len(eval_f1_score)
if eval_loss1 < best_loss:
best_loss = eval_loss1
# 保存最优模型
if local_rank == 0:
save_dir = os.path.join(args.model_path, "best_checkpoint")
os.makedirs(save_dir, exist_ok=True)
paddle.save(model.state_dict(), os.path.join(save_dir, 'model.pdparams'))
paddle.save(optimizer.state_dict(), os.path.join(save_dir, 'optimizer.pdopt'))
logger.info(f'模型保存在:{save_dir}')
logger.info('Avg eval, loss: {:.5f}, f1_score: {:.5f} best loss: {:.5f}'.
format(eval_loss1, eval_f1_score1, best_loss))
model.train()
if local_rank == 0:
writer.add_scalar('Test/Loss', eval_loss1, test_step)
writer.add_scalar('Test/F1_Score', eval_f1_score1, test_step)
save_dir = os.path.join(args.model_path, "checkpoint")
os.makedirs(save_dir, exist_ok=True)
paddle.save(model.state_dict(), os.path.join(save_dir, 'model.pdparams'))
paddle.save(optimizer.state_dict(), os.path.join(save_dir, 'optimizer.pdopt'))
logger.info(f'模型保存在:{save_dir}')
test_step += 1
if __name__ == "__main__":
train()
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/infer.py | infer.py | import argparse
import functools
from utils.predictor import PunctuationExecutor
from utils.utils import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('text', str, '近几年不但我用书给女儿压岁也劝说亲朋不要给女儿压岁钱而改送压岁书', '需要加标点符号的文本')
add_arg('infer_model_path', str, 'models/pun_models', '预测的目录')
args = parser.parse_args()
print_arguments(args)
if __name__ == '__main__':
pun_executor = PunctuationExecutor(model_dir=args.infer_model_path, use_gpu=True)
result = pun_executor(args.text)
print(result)
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/eval.py | eval.py | import argparse
import functools
import os
import paddle
from paddle import nn
from paddle.io import DataLoader
from sklearn.metrics import f1_score
from utils.logger import setup_logger
from utils.model import ErnieLinear
from utils.reader import PuncDatasetFromErnieTokenizer, collate_fn
from utils.utils import add_arguments, print_arguments
logger = setup_logger(__name__)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 32, '评估的批量大小')
add_arg('max_seq_len', int, 200, '评估数据的最大长度')
add_arg('num_workers', int, 8, '读取数据的线程数量')
add_arg('test_data_path', str, 'dataset/test.txt', '测试数据的数据文件路径')
add_arg('punc_path', str, 'dataset/punc_vocab', '标点符号字典路径')
add_arg('model_path', str, 'models/best_checkpoint', '加载检查点的目录')
add_arg('pretrained_token', str, 'ernie-3.0-medium-zh', '使用的ERNIE模型权重')
args = parser.parse_args()
print_arguments(args)
def evaluate():
logger.info('正在预处理数据集,时间比较长,请耐心等待...')
test_dataset = PuncDatasetFromErnieTokenizer(data_path=args.test_data_path,
punc_path=args.punc_path,
pretrained_token=args.pretrained_token,
max_seq_len=args.max_seq_len)
test_loader = DataLoader(test_dataset,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
collate_fn=collate_fn,
num_workers=args.num_workers)
logger.info('预处理数据集完成!')
# num_classes为字符分类大小
model = ErnieLinear(pretrained_token=args.pretrained_token, num_classes=len(test_dataset.punc2id))
criterion = nn.CrossEntropyLoss()
model_dict = paddle.load(os.path.join(args.model_path, 'model.pdparams'))
model.set_state_dict(model_dict)
model.eval()
eval_loss = []
eval_f1_score = []
for batch_id, (inputs, labels) in enumerate(test_loader()):
labels = paddle.reshape(labels, shape=[-1])
y, logit = model(inputs)
pred = paddle.argmax(logit, axis=1)
loss = criterion(y, labels)
eval_loss.append(float(loss))
F1_score = f1_score(labels.numpy().tolist(), pred.numpy().tolist(), average="macro")
eval_f1_score.append(F1_score)
if batch_id % 100 == 0:
logger.info('Batch: [{}/{}], loss: {:.5f}, f1_score: {:.5f}'.format(
batch_id, len(test_loader), float(loss), F1_score))
logger.info('Avg eval, loss: {:.5f}, f1_score: {:.5f}'.format(
sum(eval_loss) / len(eval_loss), sum(eval_f1_score) / len(eval_f1_score)))
if __name__ == "__main__":
evaluate()
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/export_model.py | export_model.py | import argparse
import functools
import os
import shutil
import paddle
from paddle.static import InputSpec
from utils.logger import setup_logger
from utils.model import ErnieLinearExport
from utils.utils import add_arguments, print_arguments
logger = setup_logger(__name__)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('punc_path', str, 'dataset/punc_vocab', '标点符号字典路径')
add_arg('model_path', str, 'models/best_checkpoint', '加载检查点的目录')
add_arg('infer_model_path', str, 'models/pun_models', '保存的预测的目录')
add_arg('pretrained_token', str, 'ernie-3.0-medium-zh', '使用的ERNIE模型权重')
args = parser.parse_args()
print_arguments(args)
def main():
os.makedirs(args.infer_model_path, exist_ok=True)
with open(args.punc_path, 'r', encoding='utf-8') as f1, \
open(os.path.join(args.infer_model_path, 'vocab.txt'), 'w', encoding='utf-8') as f2:
lines = f1.readlines()
lines = [line.replace('\n', '') for line in lines]
# num_classes为字符分类大小,标点符号数量加1,因为开头还有空格
num_classes = len(lines) + 1
f2.write(' \n')
for line in lines:
f2.write(f'{line}\n')
model = ErnieLinearExport(pretrained_token=args.pretrained_token, num_classes=num_classes)
model_dict = paddle.load(os.path.join(args.model_path, 'model.pdparams'))
model.set_state_dict(model_dict)
input_spec = [InputSpec(shape=(-1, -1), dtype=paddle.int64), InputSpec(shape=(-1, -1), dtype=paddle.int64)]
paddle.jit.save(layer=model, path=os.path.join(args.infer_model_path, 'model'), input_spec=input_spec)
with open(os.path.join(args.infer_model_path, 'info.json'), 'w', encoding='utf-8') as f:
f.write(str({'pretrained_token': args.pretrained_token}).replace("'", '"'))
logger.info(f'模型导出成功,保存在:{args.infer_model_path}')
if __name__ == "__main__":
main()
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/clear_data.py | clear_data.py | import os
import random
def is_chinese(text):
for c in text:
if u'\u4e00' <= c <= u'\u9fa5':
return True
return False
def clear_text(src_dir, clear_file):
os.makedirs(os.path.dirname(clear_file), exist_ok=True)
results = []
for f in os.listdir(src_dir):
with open(os.path.join(src_dir, f), 'r', encoding='utf-8') as fp:
lines = fp.readlines()
for line in lines:
line = line.replace('\n', '')
line = line.replace('1', '')
line = line.replace(' ', '')
line = line.replace('!', '!')
line = line.replace('.', '。')
line = line.replace('?', '?')
line = line.replace('“”', '')
line = line.replace('’’', '')
line = line.replace('-->>', '')
line = line.replace('。。。', '。')
line = line.replace('啊……', '啊!')
line = line.replace('?!', '?')
line = line.replace('!!!', '!')
line = line.strip()
if line.startswith('201') and line.endswith('发表'):
continue
if line.startswith('(') and line.endswith(')'):
line = line[1:-1]
if line.startswith('“') and line.endswith('”') and line.count("“") == 1:
line = line[1:-1]
if line.count("“") == 1:
line = line.replace('“', '')
if line.count("”") == 1:
line = line.replace('”', '')
if line.count("”") == 1:
line = line.replace('”', '')
if line.endswith('……'):
line = line.replace('……', '')
line = line + '。'
if line.endswith('....'):
line = line.replace('....', '')
line = line + '。'
if line.endswith('...'):
line = line.replace('...', '。')
if line.endswith('…'):
line = line.replace('…', '')
line = line + '。'
if line.endswith('(未完待续。)'):
line = line.replace('(未完待续。)', '')
if len(line) < 3: continue
if "兄弟(下)" in line: continue
if "兄弟(上)" in line: continue
if line[0] == "第" and "章" in line: continue
if len(line) == 0 or not is_chinese(line): continue
results.append(f'{line}')
# 写在同一个文件上
with open(clear_file, 'w', encoding='utf-8') as fp1:
results = sorted(results, key=lambda x: len(x))
for line in results:
fp1.write(f'{line}\n')
def create_list(clear_file, save_dir, num_test=10000):
with open(clear_file, 'r', encoding='utf-8') as fp:
lines = fp.readlines()
random.shuffle(lines)
dev_text = lines[:num_test]
test_text = lines[num_test:num_test + num_test]
train_text = lines[num_test + num_test:]
with open(os.path.join(save_dir, 'dev.txt'), 'w', encoding='utf-8') as fp1:
for line in dev_text:
line = line.replace('\n', '')
fp1.write(f'{" ".join(line)} \n')
with open(os.path.join(save_dir, 'test.txt'), 'w', encoding='utf-8') as fp1:
for line in test_text:
line = line.replace('\n', '')
fp1.write(f'{" ".join(line)} \n')
with open(os.path.join(save_dir, 'train.txt'), 'w', encoding='utf-8') as fp1:
for line in train_text:
line = line.replace('\n', '')
fp1.write(f'{" ".join(line)} \n')
if __name__ == '__main__':
clear_text(src_dir="dataset/files", clear_file='dataset/data.txt')
create_list(clear_file='dataset/data.txt', save_dir='dataset')
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/utils/predictor.py | utils/predictor.py | import json
import os
import re
import numpy as np
import paddle.inference as paddle_infer
from paddlenlp.transformers import ErnieTokenizer
from utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['PunctuationExecutor']
class PunctuationExecutor:
def __init__(self, model_dir, use_gpu=True, gpu_mem=500, num_threads=4):
# 创建 config
model_path = os.path.join(model_dir, 'model.pdmodel')
params_path = os.path.join(model_dir, 'model.pdiparams')
if not os.path.exists(model_path) or not os.path.exists(params_path):
raise Exception("标点符号模型文件不存在,请检查{}和{}是否存在!".format(model_path, params_path))
self.config = paddle_infer.Config(model_path, params_path)
# 获取预训练模型类型
pretrained_token = 'ernie-1.0'
if os.path.exists(os.path.join(model_dir, 'info.json')):
with open(os.path.join(model_dir, 'info.json'), 'r', encoding='utf-8') as f:
data = json.load(f)
pretrained_token = data['pretrained_token']
if use_gpu:
self.config.enable_use_gpu(gpu_mem, 0)
else:
self.config.disable_gpu()
self.config.set_cpu_math_library_num_threads(num_threads)
# enable memory optim
self.config.enable_memory_optim()
self.config.disable_glog_info()
# 根据 config 创建 predictor
self.predictor = paddle_infer.create_predictor(self.config)
# 获取输入层
self.input_ids_handle = self.predictor.get_input_handle('input_ids')
self.token_type_ids_handle = self.predictor.get_input_handle('token_type_ids')
# 获取输出的名称
self.output_names = self.predictor.get_output_names()
self._punc_list = []
if not os.path.join(model_dir, 'vocab.txt'):
raise Exception("字典文件不存在,请检查{}是否存在!".format(os.path.join(model_dir, 'vocab.txt')))
with open(os.path.join(model_dir, 'vocab.txt'), 'r', encoding='utf-8') as f:
for line in f:
self._punc_list.append(line.strip())
self.tokenizer = ErnieTokenizer.from_pretrained(pretrained_token)
# 预热
self('近几年不但我用书给女儿儿压岁也劝说亲朋不要给女儿压岁钱而改送压岁书')
logger.info('标点符号模型加载成功。')
def _clean_text(self, text):
text = text.lower()
text = re.sub('[^A-Za-z0-9\u4e00-\u9fa5]', '', text)
text = re.sub(f'[{"".join([p for p in self._punc_list][1:])}]', '', text)
return text
# 预处理文本
def preprocess(self, text: str):
clean_text = self._clean_text(text)
if len(clean_text) == 0: return None
tokenized_input = self.tokenizer(list(clean_text), return_length=True, is_split_into_words=True)
input_ids = tokenized_input['input_ids']
seg_ids = tokenized_input['token_type_ids']
seq_len = tokenized_input['seq_len']
return input_ids, seg_ids, seq_len
def infer(self, input_ids: list, seg_ids: list):
# 设置输入
self.input_ids_handle.reshape([1, len(input_ids)])
self.token_type_ids_handle.reshape([1, len(seg_ids)])
self.input_ids_handle.copy_from_cpu(np.array([input_ids]).astype('int64'))
self.token_type_ids_handle.copy_from_cpu(np.array([seg_ids]).astype('int64'))
# 运行predictor
self.predictor.run()
# 获取输出
output_handle = self.predictor.get_output_handle(self.output_names[0])
output_data = output_handle.copy_to_cpu()
return output_data
# 后处理识别结果
def postprocess(self, input_ids, seq_len, preds):
tokens = self.tokenizer.convert_ids_to_tokens(input_ids[1:seq_len - 1])
labels = preds[1:seq_len - 1].tolist()
assert len(tokens) == len(labels)
text = ''
for t, l in zip(tokens, labels):
text += t
if l != 0:
text += self._punc_list[l]
return text
def __call__(self, text: str) -> str:
# 数据batch处理
input_ids, seg_ids, seq_len = self.preprocess(text)
preds = self.infer(input_ids=input_ids, seg_ids=seg_ids)
if len(preds.shape) == 2:
preds = preds[0]
text = self.postprocess(input_ids, seq_len, preds)
return text
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/utils/sampler.py | utils/sampler.py | import math
import numpy as np
from paddle.io import BatchSampler, DistributedBatchSampler
__all__ = ["CustomBatchSampler", "CustomDistributedBatchSampler"]
def _batch_shuffle(indices, batch_size, epoch):
"""将大小相似的实例放入小批量中可以提高效率,并进行批量打乱
1. 按持续时间对音频剪辑进行排序
2. 生成一个随机数k, k的范围[0,batch_size)
3. 随机移动k实例,为不同的epoch训练创建不同的批次
4. 打乱minibatches.
:param batch_size: 批量大小。这个大小还用于为批量洗牌生成一个随机数。
:type batch_size: int
:param epoch: 当前的轮数。
:type epoch: int
:return: Batch shuffled indices.
:rtype: list
"""
rng = np.random.RandomState(epoch)
shift_len = rng.randint(0, batch_size - 1)
batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size))
rng.shuffle(batch_indices)
batch_indices = [item for batch in batch_indices for item in batch]
res_len = len(indices) - shift_len - len(batch_indices)
if res_len != 0:
batch_indices.extend(indices[-res_len:])
batch_indices.extend(indices[0:shift_len])
return batch_indices
class CustomBatchSampler(BatchSampler):
def __init__(self, dataset, batch_size, shuffle=False, drop_last=False):
"""Sampler for one gpu.
Args:
dataset (paddle.io.Dataset):
batch_size (int): batch size for one gpu
shuffle (bool, optional): True for do shuffle, or else. Defaults to False.
drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.
"""
self.dataset = dataset
assert isinstance(batch_size, int) and batch_size > 0, "batch_size should be a positive integer"
self.batch_size = batch_size
assert isinstance(shuffle, bool), "shuffle should be a boolean value"
self.shuffle = shuffle
assert isinstance(drop_last, bool), "drop_last should be a boolean number"
self.epoch = 0
self.drop_last = drop_last
self.num_samples = int(math.ceil(len(self.dataset) * 1.0))
self.total_size = self.num_samples
def __iter__(self):
num_samples = len(self.dataset)
indices = np.arange(num_samples).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# sort (by duration) or batch-wise shuffle the manifest
if self.shuffle:
indices = _batch_shuffle(indices, self.batch_size, self.epoch)
assert len(indices) == self.total_size, f"batch shuffle examples error: {len(indices)} : {self.total_size}"
assert len(indices) == self.num_samples
_sample_iter = iter(indices)
batch_indices = []
for idx in _sample_iter:
batch_indices.append(idx)
if len(batch_indices) == self.batch_size:
yield batch_indices
batch_indices = []
if not self.drop_last and len(batch_indices) > 0:
yield batch_indices
self.epoch += 1
def __len__(self):
num_samples = self.num_samples
num_samples += int(not self.drop_last) * (self.batch_size - 1)
return num_samples // self.batch_size
class CustomDistributedBatchSampler(DistributedBatchSampler):
def __init__(self,
dataset,
batch_size,
num_replicas=None,
rank=None,
shuffle=False,
drop_last=False):
"""Sortagrad Sampler for multi gpus.
Args:
dataset (paddle.io.Dataset):
batch_size (int): batch size for one gpu
num_replicas (int, optional): world size or numbers of gpus. Defaults to None.
rank (int, optional): rank id. Defaults to None.
shuffle (bool, optional): True for do shuffle, or else. Defaults to False.
drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.
"""
super().__init__(dataset=dataset, batch_size=batch_size, num_replicas=num_replicas, rank=rank, shuffle=shuffle,
drop_last=drop_last)
def __iter__(self):
num_samples = len(self.dataset)
indices = np.arange(num_samples).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
if self.shuffle:
indices = _batch_shuffle(indices, self.batch_size * self.nranks, self.epoch)
assert len(indices) == self.total_size, f"batch shuffle examples error: {len(indices)} : {self.total_size}"
# slice `self.batch_size` examples by rank id
def _get_indices_by_batch_size(indices):
subsampled_indices = []
last_batch_size = self.total_size % (self.batch_size * self.nranks)
assert last_batch_size % self.nranks == 0
last_local_batch_size = last_batch_size // self.nranks
for i in range(self.local_rank * self.batch_size, len(indices) - last_batch_size,
self.batch_size * self.nranks):
subsampled_indices.extend(indices[i:i + self.batch_size])
indices = indices[len(indices) - last_batch_size:]
subsampled_indices.extend(
indices[self.local_rank * last_local_batch_size:(self.local_rank + 1) * last_local_batch_size])
return subsampled_indices
if self.nranks > 1:
indices = _get_indices_by_batch_size(indices)
assert len(indices) == self.num_samples
_sample_iter = iter(indices)
batch_indices = []
for idx in _sample_iter:
batch_indices.append(idx)
if len(batch_indices) == self.batch_size:
yield batch_indices
batch_indices = []
if not self.drop_last and len(batch_indices) > 0:
yield batch_indices
self.epoch += 1
def __len__(self):
num_samples = self.num_samples
num_samples += int(not self.drop_last) * (self.batch_size - 1)
return num_samples // self.batch_size
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/utils/reader.py | utils/reader.py | import json
import os
import random
import numpy as np
from paddle.io import Dataset
from paddlenlp.transformers import ErnieTokenizer
from tqdm import tqdm
from utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ["PuncDatasetFromErnieTokenizer", "collate_fn"]
class PuncDatasetFromErnieTokenizer(Dataset):
def __init__(self, data_path, punc_path, pretrained_token='ernie-3.0-medium-zh', max_seq_len=100):
super().__init__()
self.inputs_data = []
self.labels = []
self.cache_data_path = os.path.join(os.path.dirname(data_path), f'{os.path.basename(data_path)}.cache')
self.tokenizer = ErnieTokenizer.from_pretrained(pretrained_token)
self.paddingID = self.tokenizer.pad_token_id
self.max_seq_len = max_seq_len
# 加载标点符号字典,因为开头还有空格
self.punc2id = self.load_vocab(punc_path, extra_word_list=[" "])
self.id2punc = {k: v for (v, k) in self.punc2id.items()}
# 预处理数据
self.preprocess(data_path)
def __len__(self):
return len(self.inputs_data)
def __getitem__(self, index):
inputs_data = np.array(self.inputs_data[index][:self.max_seq_len], dtype='int64')
labels = np.array(self.labels[index][:self.max_seq_len], dtype='int64')
return inputs_data, labels
@staticmethod
def load_vocab(vocab_path, extra_word_list=[]):
n = len(extra_word_list)
with open(vocab_path, encoding='utf-8') as vf:
vocab = {word.strip(): i + n for i, word in enumerate(vf)}
for i, word in enumerate(extra_word_list):
vocab[word] = i
return vocab
def preprocess(self, data_path: str):
if not os.path.exists(self.cache_data_path):
logger.info(f'{self.cache_data_path}不存在,正在重新生成,时间比较长,请耐心等待...')
txt_seqs = open(data_path, encoding='utf-8').readlines()
# 对数据按照从短到长排序
txt_seqs = sorted(txt_seqs, key=lambda k: len(k))
for text in tqdm(txt_seqs):
txt = text.replace('\n', '').split()
if txt[-1] not in self.punc2id.keys(): txt += ' '
label, input_data = [], []
for i in range(len(txt) - 1):
# 获取输入数据
word = txt[i]
if word in self.punc2id.keys():
continue
token = self.tokenizer(word)
x = token["input_ids"][1:-1]
input_data.extend(x)
# 获取标签数据
punc = txt[i + 1]
for _ in range(len(x) - 1):
label.append(self.punc2id[" "])
if punc not in self.punc2id:
label.append(self.punc2id[" "])
else:
label.append(self.punc2id[punc])
if len(input_data) != len(label):
continue
self.inputs_data.append(input_data)
self.labels.append(label)
data = {'inputs_data': self.inputs_data, 'labels': self.labels}
with open(self.cache_data_path, 'w', encoding='utf-8') as f:
json.dump(data, f)
else:
logger.info(f'正在加载:{self.cache_data_path}')
# 读取之前制作好的数据,如果是更换了数据集,需要删除这几个缓存文件
with open(self.cache_data_path, 'r', encoding='utf-8') as f:
data = json.load(f)
self.inputs_data = data['inputs_data']
self.labels = data['labels']
if len(self.inputs_data) != len(self.labels):
assert 'error: length input_data != label'
# 对一个batch的数据处理
def collate_fn(batch):
# 找出数据长度最长的
batch = sorted(batch, key=lambda s: s[0].shape[0], reverse=True)
max_data_length = batch[0][0].shape[0]
batch_size = len(batch)
# 以最大的长度创建0张量
inputs = np.zeros((batch_size, max_data_length), dtype='int64')
labels = np.zeros((batch_size, max_data_length), dtype='int64')
indices = np.arange(batch_size).tolist()
random.shuffle(indices)
for x in indices:
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.shape[0]
label_length = target.shape[0]
# 输入文本数据的参数和标签长度要一样的
assert seq_length == label_length
# 将数据插入都0张量中,实现了padding
inputs[x, :seq_length] = tensor[:]
labels[x, :label_length] = target[:]
return inputs, labels
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/utils/model.py | utils/model.py | import paddle
import paddle.nn as nn
from paddlenlp.transformers import ErnieForTokenClassification
class ErnieLinear(nn.Layer):
def __init__(self,
num_classes,
pretrained_token='ernie-3.0-medium-zh',
**kwargs):
super(ErnieLinear, self).__init__()
self.ernie = ErnieForTokenClassification.from_pretrained(
pretrained_token, num_classes=num_classes, **kwargs)
self.num_classes = self.ernie.num_classes
self.softmax = nn.Softmax()
def forward(self, input_ids, token_type_ids=None):
y = self.ernie(input_ids, token_type_ids=token_type_ids)
y = paddle.reshape(y, shape=[-1, self.num_classes])
logits = self.softmax(y)
return y, logits
class ErnieLinearExport(nn.Layer):
def __init__(self,
num_classes,
pretrained_token='ernie-3.0-medium-zh',
**kwargs):
super(ErnieLinearExport, self).__init__()
self.ernie = ErnieForTokenClassification.from_pretrained(
pretrained_token, num_classes=num_classes, **kwargs)
self.num_classes = self.ernie.num_classes
self.softmax = nn.Softmax()
def forward(self, input_ids, token_type_ids=None):
y = self.ernie(input_ids, token_type_ids=token_type_ids)
y = paddle.reshape(y, shape=[-1, self.num_classes])
logits = self.softmax(y)
preds = paddle.argmax(logits, axis=-1)
return preds
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/utils/logger.py | utils/logger.py | import datetime
import logging
import os
import sys
import termcolor
__all__ = ['setup_logger']
logger_initialized = []
def setup_logger(name, output=None):
"""
Initialize logger and set its verbosity level to INFO.
Args:
output (str): a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
name (str): the root module name of this logger
Returns:
logging.Logger: a logger
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
logger.setLevel(logging.INFO)
logger.propagate = False
formatter = ("[%(asctime2)s %(levelname2)s] %(module2)s:%(funcName2)s:%(lineno2)s - %(message2)s")
color_formatter = ColoredFormatter(formatter, datefmt="%m/%d %H:%M:%S")
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(color_formatter)
logger.addHandler(ch)
# file logging: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
os.makedirs(os.path.dirname(filename))
fh = logging.FileHandler(filename, mode='a')
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter())
logger.addHandler(fh)
logger_initialized.append(name)
return logger
COLORS = {
"WARNING": "yellow",
"INFO": "white",
"DEBUG": "blue",
"CRITICAL": "red",
"ERROR": "red",
}
class ColoredFormatter(logging.Formatter):
def __init__(self, fmt, datefmt, use_color=True):
logging.Formatter.__init__(self, fmt, datefmt=datefmt)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
def colored(text):
return termcolor.colored(
text,
color=COLORS[levelname],
attrs={"bold": True},
)
record.levelname2 = colored("{:<7}".format(record.levelname))
record.message2 = colored(record.msg)
asctime2 = datetime.datetime.fromtimestamp(record.created)
record.asctime2 = termcolor.colored(asctime2, color="green")
record.module2 = termcolor.colored(record.module, color="cyan")
record.funcName2 = termcolor.colored(record.funcName, color="cyan")
record.lineno2 = termcolor.colored(record.lineno, color="cyan")
return logging.Formatter.format(self, record)
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/utils/utils.py | utils/utils.py | import distutils.util
def print_arguments(args):
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).items()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def add_arguments(argname, type, default, help, argparser, **kwargs):
type = distutils.util.strtobool if type == bool else type
argparser.add_argument("--" + argname,
default=default,
type=type,
help=help + ' 默认: %(default)s.',
**kwargs)
| python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false |
yeyupiaoling/PunctuationModel | https://github.com/yeyupiaoling/PunctuationModel/blob/b16b89d906792c394f59b1f921dd561b86f81664/utils/__init__.py | utils/__init__.py | python | Apache-2.0 | b16b89d906792c394f59b1f921dd561b86f81664 | 2026-01-05T07:12:58.138495Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/manage.py | manage.py | #!/usr/bin/env python
import inspect
import os
import sys
import dotenv
def if_exists_load_env(name: str) -> None:
current_frame = inspect.currentframe()
if not current_frame:
return
inspect_file = inspect.getfile(current_frame)
env_path = os.path.dirname(os.path.abspath(inspect_file))
env_file = "{env_path}/{name}".format(env_path=env_path, name=name)
if os.path.exists(env_file):
dotenv.load_dotenv(env_file, override=True)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.examplesite.settings.dev")
if_exists_load_env(".env.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/setup.py | setup.py | #!/usr/bin/env python
import io
import re
from pathlib import Path
from setuptools import find_packages, setup
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
version = ""
with io.open("wagtailgeowidget/__init__.py", "r", encoding="utf8") as fd:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
).group(1)
test_extras = [
"pytest",
"pytest-django",
"factory-boy",
]
setup(
name="wagtailgeowidget",
version=version,
description=(
"Wagtail-Geo-Widget is the complete map solution for your Wagtail site."
), # NOQA
long_description=long_description,
long_description_content_type="text/markdown",
author="Fröjd",
author_email="martin@marteinn.se",
url="https://github.com/frojd/wagtail-geo-widget",
packages=find_packages(exclude=("tests*", "tests", "example")),
include_package_data=True,
license="MIT",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 4.2",
"Framework :: Django :: 5.1",
"Framework :: Django :: 5.2",
"Framework :: Wagtail",
"Framework :: Wagtail :: 6",
"Framework :: Wagtail :: 7",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Topic :: Utilities",
],
extras_require={"test": test_extras},
install_requires=[
"Wagtail>=6.3",
],
project_urls={
"Source": "https://github.com/Frojd/wagtail-geo-widget/",
"Changelog": "https://github.com/Frojd/wagtail-geo-widget/blob/main/CHANGELOG.md",
},
)
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/test_latlng_parse.py | tests/test_latlng_parse.py | from typing import Dict, cast
from django.test import SimpleTestCase
from wagtailgeowidget.helpers import geosgeometry_str_to_struct
class LatLngParseTestCase(SimpleTestCase):
def test_that_basic_parsing_works(self):
struct = geosgeometry_str_to_struct("SRID=5432;POINT(12.0 13.0)")
struct = cast(Dict, struct)
self.assertEqual(struct["srid"], "5432")
self.assertEqual(struct["x"], "12.0")
self.assertEqual(struct["y"], "13.0")
def test_none_is_returned_on_invalid_struct(self):
struct = geosgeometry_str_to_struct("S=5432_P(12.0 13.0)")
self.assertEqual(struct, None)
def test_that_optional_space_between_point_and_data_is_accepted(self):
struct = geosgeometry_str_to_struct("SRID=5432;POINT (12.0 13.0)")
struct = cast(Dict, struct)
self.assertEqual(struct["srid"], "5432")
self.assertEqual(struct["x"], "12.0")
self.assertEqual(struct["y"], "13.0")
def test_negative_coords(self):
struct = geosgeometry_str_to_struct("SRID=5432;POINT(12.0 -13.0)")
struct = cast(Dict, struct)
self.assertEqual(struct["srid"], "5432")
self.assertEqual(struct["x"], "12.0")
self.assertEqual(struct["y"], "-13.0")
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/test_widgets.py | tests/test_widgets.py | import unittest
from django.test import TestCase
from wagtail import VERSION as WAGTAIL_VERSION
from wagtailgeowidget import app_settings, geocoders
from wagtailgeowidget.widgets import (
GeocoderField,
GoogleMapsField,
GoogleMapsFieldAdapter,
LeafletField,
LeafletFieldAdapter,
)
class GoogleMapsFieldTestCase(TestCase):
def test_google_maps_field_contains_construct(self):
widget = GoogleMapsField()
html = widget.render(
"field",
"",
{
"id": "X",
},
)
self.assertIn(
'<input type="hidden" name="field" id="X" data-controller="google-maps-field"',
html,
)
def test_streamfield_widget_uses_empty_id_prefix(self):
"""Test that StreamField widgets use empty id_prefix."""
widget = GoogleMapsField(srid=4326, id_prefix="")
self.assertEqual(widget.id_prefix, "")
def test_fieldpanel_widget_includes_stimulus_attributes(self):
"""Test that FieldPanel widgets (id_prefix='id_') include Stimulus controller attributes."""
widget = GoogleMapsField(srid=4326, id_prefix="id_")
html = widget.render(
"field",
"SRID=4326;POINT(18.0686 59.3293)",
{
"id": "test-field",
},
)
self.assertIn('data-controller="google-maps-field"', html)
self.assertIn("data-google-maps-field-options-value=", html)
def test_streamfield_widget_excludes_stimulus_attributes(self):
"""Test that StreamField widgets (id_prefix='') exclude Stimulus controller attributes."""
widget = GoogleMapsField(srid=4326, id_prefix="")
html = widget.render(
"field",
"SRID=4326;POINT(18.0686 59.3293)",
{
"id": "test-field",
},
)
self.assertNotIn('data-controller="google-maps-field"', html)
self.assertNotIn("data-google-maps-field-options-value=", html)
@unittest.skipIf(WAGTAIL_VERSION < (7, 1), "Test only applicable for Wagtail 7.1+")
def test_telepath_adapter_js_args_structure(self):
"""Test that the adapter returns correct js_args structure for Telepath."""
widget = GoogleMapsField(
srid=4326,
address_field="address",
zoom_field="zoom",
)
adapter = GoogleMapsFieldAdapter()
result = adapter.js_args(widget)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], str)
self.assertIn('<input type="hidden"', result[0])
self.assertIsInstance(result[1], dict)
options = result[1]
self.assertIn("srid", options)
self.assertIn("addressField", options)
self.assertIn("zoomField", options)
self.assertIn("defaultLocation", options)
self.assertIn("zoom", options)
self.assertIn("mapId", options)
self.assertEqual(options["srid"], 4326)
self.assertEqual(options["addressField"], "address")
self.assertEqual(options["zoomField"], "zoom")
def test_telepath_adapter_streamfield_excludes_stimulus_attributes(self):
"""Test that HTML by adapter for StreamField widget has no Stimulus attributes."""
widget = GoogleMapsField(srid=4326, id_prefix="")
adapter = GoogleMapsFieldAdapter()
result = adapter.js_args(widget)
html = result[0]
self.assertNotIn("data-controller=", html)
self.assertNotIn("data-google-maps-field-options-value=", html)
class LeafletFieldTestCase(TestCase):
def test_leaflet_field_contains_construct(self):
widget = LeafletField()
html = widget.render(
"field",
"",
{
"id": "X",
},
)
self.assertIn(
'<input type="hidden" name="field" id="X" data-controller="leaflet-field"',
html,
)
def test_value_are_parsed_properly(self):
widget = LeafletField()
from html import escape
html = widget.render(
"field",
"SRID=5432;POINT(12.0 13.0)",
{
"id": "X",
},
)
self.assertIn(escape('"lat": "13.0"'), html)
self.assertIn(escape('"lng": "12.0"'), html)
def test_streamfield_widget_uses_empty_id_prefix(self):
"""Test that StreamField widgets use empty id_prefix."""
widget = LeafletField(srid=4326, id_prefix="")
self.assertEqual(widget.id_prefix, "")
def test_fieldpanel_widget_includes_stimulus_attributes(self):
"""Test that FieldPanel widgets (id_prefix='id_') include Stimulus controller attributes."""
widget = LeafletField(srid=4326, id_prefix="id_")
html = widget.render(
"field",
"SRID=4326;POINT(18.0686 59.3293)",
{
"id": "test-field",
},
)
self.assertIn('data-controller="leaflet-field"', html)
self.assertIn("data-leaflet-field-options-value=", html)
def test_streamfield_widget_excludes_stimulus_attributes(self):
"""Test that StreamField widgets (id_prefix='') exclude Stimulus controller attributes."""
widget = LeafletField(srid=4326, id_prefix="")
html = widget.render(
"field",
"SRID=4326;POINT(18.0686 59.3293)",
{
"id": "test-field",
},
)
self.assertNotIn('data-controller="leaflet-field"', html)
self.assertNotIn("data-leaflet-field-options-value=", html)
@unittest.skipIf(WAGTAIL_VERSION < (7, 1), "Test only applicable for Wagtail 7.1+")
def test_telepath_adapter_js_args_structure(self):
"""Test that the adapter returns correct js_args structure for Telepath."""
widget = LeafletField(
srid=4326,
address_field="address",
zoom_field="zoom",
)
adapter = LeafletFieldAdapter()
result = adapter.js_args(widget)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], str)
self.assertIn('<input type="hidden"', result[0])
self.assertIsInstance(result[1], dict)
options = result[1]
self.assertIn("srid", options)
self.assertIn("addressField", options)
self.assertIn("zoomField", options)
self.assertIn("defaultLocation", options)
self.assertIn("zoom", options)
self.assertEqual(options["srid"], 4326)
self.assertEqual(options["addressField"], "address")
self.assertEqual(options["zoomField"], "zoom")
def test_telepath_adapter_streamfield_excludes_stimulus_attributes(self):
"""Test that HTML by adapter for StreamField widget has no Stimulus attributes."""
widget = LeafletField(srid=4326, id_prefix="")
adapter = LeafletFieldAdapter()
result = adapter.js_args(widget)
html = result[0]
self.assertNotIn("data-controller=", html)
self.assertNotIn("data-leaflet-field-options-value=", html)
class GeocoderFieldTestCase(TestCase):
def setUp(self):
app_settings.MAPBOX_ACCESS_TOKEN = None
app_settings.MAPBOX_LANGUAGE = "en"
def test_geocoder_field_contains_construct(self):
widget = GeocoderField()
html = widget.render(
"field",
"",
{
"id": "X",
},
)
self.assertIn(
'<input type="text" name="field" id="X" data-controller="geocoder-field" data-geocoder-field-geocoder-value="nominatim"',
html,
)
def test_googlemaps_geocoder_returns_googlemaps_field(self):
widget = GeocoderField(geocoder=geocoders.GOOGLE_MAPS)
html = widget.render(
"field",
"",
{
"id": "X",
},
)
self.assertIn(
'<input type="text" name="field" id="X" data-controller="geocoder-field" data-geocoder-field-geocoder-value="google_maps"',
html,
)
def test_googlemaps_places_geocoder_returns_googlemaps_field(self):
widget = GeocoderField(geocoder=geocoders.GOOGLE_MAPS_PLACES)
html = widget.render(
"field",
"",
{
"id": "X",
},
)
self.assertIn(
'<input type="text" name="field" id="X" data-controller="geocoder-field" data-geocoder-field-geocoder-value="google_maps_places"',
html,
)
def test_googlemaps_places_new_geocoder_returns_googlemaps_field(self):
widget = GeocoderField(geocoder=geocoders.GOOGLE_MAPS_PLACES_NEW)
html = widget.render(
"field",
"",
{
"id": "X",
},
)
self.assertIn(
'<input type="text" name="field" id="X" data-controller="geocoder-field" data-geocoder-field-geocoder-value="google_maps_places_new"',
html,
)
def test_mapbox_geocoder_returns_googlemaps_field(self):
widget = GeocoderField(geocoder=geocoders.MAPBOX)
from html import escape
html = widget.render(
"field",
"",
{
"id": "X",
},
)
self.assertIn(
'<input type="text" name="field" id="X" data-controller="geocoder-field" data-geocoder-field-geocoder-value="mapbox"',
html,
)
self.assertIn(escape('accessToken": null'), html)
def test_mapbox_access_token_gets_outputted(self):
app_settings.MAPBOX_ACCESS_TOKEN = "<MAPBOX ACCESS TOKEN>"
widget = GeocoderField(geocoder=geocoders.MAPBOX)
from html import escape
html = widget.render(
"field",
"",
{
"id": "X",
},
)
self.assertIn(
'<input type="text" name="field" id="X" data-controller="geocoder-field" data-geocoder-field-geocoder-value="mapbox"',
html,
)
self.assertIn(escape('accessToken": "<MAPBOX ACCESS TOKEN>'), html)
app_settings.MAPBOX_ACCESS_TOKEN = None
def test_mapbox_language_parameter_gets_outputted(self):
widget = GeocoderField(geocoder=geocoders.MAPBOX)
from html import escape
html = widget.render(
"field",
"",
{
"id": "X",
},
)
self.assertIn(
'<input type="text" name="field" id="X" data-controller="geocoder-field" data-geocoder-field-geocoder-value="mapbox"',
html,
)
self.assertIn(escape('language": "en'), html)
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/__init__.py | tests/__init__.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/geopage_nospatial/views.py | tests/geopage_nospatial/views.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/geopage_nospatial/admin.py | tests/geopage_nospatial/admin.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/geopage_nospatial/models.py | tests/geopage_nospatial/models.py | from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from wagtail import blocks
from wagtail.admin.panels import FieldPanel, MultiFieldPanel
from wagtail.fields import StreamField
from wagtail.models import Page
from wagtailgeowidget import geocoders
from wagtailgeowidget.blocks import (
GeoAddressBlock,
GeoZoomBlock,
GoogleMapsBlock,
LeafletBlock,
)
from wagtailgeowidget.panels import GeoAddressPanel, GoogleMapsPanel, LeafletPanel
class StandardPage(Page):
page_description = "Google maps with google maps geocoder"
address = models.CharField(max_length=250, blank=True, null=True)
location = models.CharField(max_length=250, blank=True, null=True)
content_panels = Page.content_panels + [
MultiFieldPanel(
[
GeoAddressPanel("address", geocoder=geocoders.GOOGLE_MAPS),
GoogleMapsPanel("location", address_field="address"),
],
_("Geo details"),
),
]
def get_context(self, request):
data = super().get_context(request)
return data
@cached_property
def point(self):
from wagtailgeowidget.helpers import geosgeometry_str_to_struct
return geosgeometry_str_to_struct(self.location)
@property
def lat(self):
return self.point["y"]
@property
def lng(self):
return self.point["x"]
class StandardPageWithLeaflet(Page):
page_description = "Leaflet with nominatim geocoder"
address = models.CharField(
max_length=250,
help_text=_("Search powered by Nominatim"),
blank=True,
null=True,
)
location = models.CharField(max_length=250, blank=True, null=True)
content_panels = Page.content_panels + [
MultiFieldPanel(
[
GeoAddressPanel("address", geocoder=geocoders.NOMINATIM),
LeafletPanel("location", address_field="address"),
],
_("Geo details"),
),
]
def get_context(self, request):
data = super().get_context(request)
return data
@cached_property
def point(self):
from wagtailgeowidget.helpers import geosgeometry_str_to_struct
return geosgeometry_str_to_struct(self.location)
@property
def lat(self):
return self.point["y"]
@property
def lng(self):
return self.point["x"]
class StandardPageWithZoom(Page):
page_description = "Google maps with google maps geocoder"
address = models.CharField(max_length=250, blank=True, null=True)
location = models.CharField(max_length=250, blank=True, null=True)
zoom = models.SmallIntegerField(blank=True, null=True)
content_panels = Page.content_panels + [
MultiFieldPanel(
[
GeoAddressPanel("address", geocoder=geocoders.GOOGLE_MAPS),
FieldPanel("zoom"),
GoogleMapsPanel("location", address_field="address", zoom_field="zoom"),
],
_("Geo details"),
),
]
def get_context(self, request):
data = super().get_context(request)
return data
@cached_property
def point(self):
from wagtailgeowidget.helpers import geosgeometry_str_to_struct
return geosgeometry_str_to_struct(self.location)
@property
def lat(self):
return self.point["y"]
@property
def lng(self):
return self.point["x"]
class StandardPageWithLeafletAndZoom(Page):
page_description = "Leaflet with nominatim geocoder"
address = models.CharField(
max_length=250,
help_text=_("Search powered by Nominatim"),
blank=True,
null=True,
)
location = models.CharField(max_length=250, blank=True, null=True)
zoom = models.SmallIntegerField(blank=True, null=True)
content_panels = Page.content_panels + [
MultiFieldPanel(
[
GeoAddressPanel("address", geocoder=geocoders.NOMINATIM),
FieldPanel("zoom"),
LeafletPanel("location", address_field="address", zoom_field="zoom"),
],
_("Geo details"),
),
]
def get_context(self, request):
data = super().get_context(request)
return data
@cached_property
def point(self):
from wagtailgeowidget.helpers import geosgeometry_str_to_struct
return geosgeometry_str_to_struct(self.location)
@property
def lat(self):
return self.point["y"]
@property
def lng(self):
return self.point["x"]
class StreamPage(Page):
page_description = "All map blocks"
streamfield_params = {}
body = StreamField(
[
("map", GoogleMapsBlock()),
("map_leaflet", LeafletBlock()),
(
"map_struct",
blocks.StructBlock(
[
("address", GeoAddressBlock(required=True)),
("map", GoogleMapsBlock(address_field="address")),
],
icon="user",
),
),
(
"map_struct_leaflet",
blocks.StructBlock(
[
(
"address",
GeoAddressBlock(required=True, geocoder=geocoders.MAPBOX),
),
("map", LeafletBlock(address_field="address")),
],
icon="user",
),
),
(
"map_struct_with_zoom",
blocks.StructBlock(
[
("address", GeoAddressBlock(required=True)),
("zoom", GeoZoomBlock(required=False)),
(
"map",
GoogleMapsBlock(address_field="address", zoom_field="zoom"),
),
],
icon="user",
),
),
(
"map_struct_leaflet_with_zoom",
blocks.StructBlock(
[
("address", GeoAddressBlock(required=True)),
("zoom", GeoZoomBlock(required=False)),
(
"map",
LeafletBlock(address_field="address", zoom_field="zoom"),
),
],
icon="user",
),
),
],
**streamfield_params,
)
content_panels = Page.content_panels + [
FieldPanel("body"),
]
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/geopage_nospatial/__init__.py | tests/geopage_nospatial/__init__.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/geopage_nospatial/tests.py | tests/geopage_nospatial/tests.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/geopage_nospatial/apps.py | tests/geopage_nospatial/apps.py | from django.apps import AppConfig
class GeopageNospatialConfig(AppConfig):
name = "tests.geopage_nospatial"
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/geopage_nospatial/migrations/0001_initial.py | tests/geopage_nospatial/migrations/0001_initial.py | # Generated by Django 4.2.23 on 2025-08-14 04:41
from django.db import migrations, models
import django.db.models.deletion
import wagtail.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
("wagtailcore", "0094_alter_page_locale"),
]
operations = [
migrations.CreateModel(
name="StandardPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.page",
),
),
("address", models.CharField(blank=True, max_length=250, null=True)),
("location", models.CharField(blank=True, max_length=250, null=True)),
],
options={
"abstract": False,
},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="StandardPageWithLeaflet",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.page",
),
),
(
"address",
models.CharField(
blank=True,
help_text="Search powered by Nominatim",
max_length=250,
null=True,
),
),
("location", models.CharField(blank=True, max_length=250, null=True)),
],
options={
"abstract": False,
},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="StandardPageWithLeafletAndZoom",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.page",
),
),
(
"address",
models.CharField(
blank=True,
help_text="Search powered by Nominatim",
max_length=250,
null=True,
),
),
("location", models.CharField(blank=True, max_length=250, null=True)),
("zoom", models.SmallIntegerField(blank=True, null=True)),
],
options={
"abstract": False,
},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="StandardPageWithZoom",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.page",
),
),
("address", models.CharField(blank=True, max_length=250, null=True)),
("location", models.CharField(blank=True, max_length=250, null=True)),
("zoom", models.SmallIntegerField(blank=True, null=True)),
],
options={
"abstract": False,
},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="StreamPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.page",
),
),
(
"body",
wagtail.fields.StreamField(
[
("map", 0),
("map_leaflet", 1),
("map_struct", 4),
("map_struct_leaflet", 7),
("map_struct_with_zoom", 10),
("map_struct_leaflet_with_zoom", 12),
],
block_lookup={
0: ("wagtailgeowidget.blocks.GoogleMapsBlock", (), {}),
1: ("wagtailgeowidget.blocks.LeafletBlock", (), {}),
2: (
"wagtailgeowidget.blocks.GeoAddressBlock",
(),
{"required": True},
),
3: (
"wagtailgeowidget.blocks.GoogleMapsBlock",
(),
{"address_field": "address"},
),
4: (
"wagtail.blocks.StructBlock",
[[("address", 2), ("map", 3)]],
{"icon": "user"},
),
5: (
"wagtailgeowidget.blocks.GeoAddressBlock",
(),
{"geocoder": "mapbox", "required": True},
),
6: (
"wagtailgeowidget.blocks.LeafletBlock",
(),
{"address_field": "address"},
),
7: (
"wagtail.blocks.StructBlock",
[[("address", 5), ("map", 6)]],
{"icon": "user"},
),
8: (
"wagtailgeowidget.blocks.GeoZoomBlock",
(),
{"required": False},
),
9: (
"wagtailgeowidget.blocks.GoogleMapsBlock",
(),
{"address_field": "address", "zoom_field": "zoom"},
),
10: (
"wagtail.blocks.StructBlock",
[[("address", 2), ("zoom", 8), ("map", 9)]],
{"icon": "user"},
),
11: (
"wagtailgeowidget.blocks.LeafletBlock",
(),
{"address_field": "address", "zoom_field": "zoom"},
),
12: (
"wagtail.blocks.StructBlock",
[[("address", 2), ("zoom", 8), ("map", 11)]],
{"icon": "user"},
),
},
),
),
],
options={
"abstract": False,
},
bases=("wagtailcore.page",),
),
]
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/geopage_nospatial/migrations/__init__.py | tests/geopage_nospatial/migrations/__init__.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/__init__.py | tests/examplesite/__init__.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/wsgi.py | tests/examplesite/wsgi.py | """
WSGI config for examplesite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.examplesite.settings.dev")
application = get_wsgi_application()
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/urls.py | tests/examplesite/urls.py | from django.conf import settings
from django.contrib import admin
from django.urls import include, re_path
from wagtail import urls as wagtail_urls
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.documents import urls as wagtaildocs_urls
from tests.search import views as search_views
urlpatterns = [
re_path(r"^django-admin/", admin.site.urls),
re_path(r"^admin/", include(wagtailadmin_urls)),
re_path(r"^documents/", include(wagtaildocs_urls)),
re_path(r"^search/$", search_views.search, name="search"),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's page serving mechanism. This should be the last pattern in
# the list:
re_path(r"", include(wagtail_urls)),
# Alternatively, if you want Wagtail pages to be served from a subpath
# of your site, rather than the site root:
# re_path(r'^pages/', include(wagtail_urls)),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/management/__init__.py | tests/examplesite/management/__init__.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/management/commands/create_superuser_if_none_exists.py | tests/examplesite/management/commands/create_superuser_if_none_exists.py | from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
Create superuser if none exist
Example:
manage.py create_superuser_if_none_exists --user=admin --password=123
"""
def add_arguments(self, parser):
parser.add_argument("--user", required=True)
parser.add_argument("--password", required=True)
parser.add_argument("--email", default="admin@example.com")
def handle(self, *args, **options):
User = get_user_model()
if User.objects.exists():
return
username = options["user"]
password = options["password"]
email = options["email"]
User.objects.create_superuser(username=username, password=password, email=email)
self.stdout.write('Local user "{}" was created'.format(username))
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/management/commands/__init__.py | tests/examplesite/management/commands/__init__.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/settings/dev.py | tests/examplesite/settings/dev.py | from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "RANDOM"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
try:
from .local import *
except ImportError:
pass
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/settings/base_nospatial.py | tests/examplesite/settings/base_nospatial.py | from .base import *
INSTALLED_APPS.remove("django.contrib.gis")
INSTALLED_APPS.remove("geopage")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "SECRET"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
DATABASES["default"]["ENGINE"] = "django.db.backends.postgresql"
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/settings/production.py | tests/examplesite/settings/production.py | from .base import *
DEBUG = False
try:
from .local import *
except ImportError:
pass
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/settings/__init__.py | tests/examplesite/settings/__init__.py | import os
from django.core.exceptions import ImproperlyConfigured
def get_env(name, default=None):
"""Get the environment variable or return exception"""
if name in os.environ:
return os.environ[name]
if default is not None:
return default
error_msg = "Set the {} env variable".format(name)
raise ImproperlyConfigured(error_msg)
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/settings/base.py | tests/examplesite/settings/base.py | """
Django settings for examplesite project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from . import get_env
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
WAGTAIL_APPS = [
"wagtail.contrib.forms",
"wagtail.contrib.redirects",
"wagtail.contrib.styleguide",
"wagtail.embeds",
"wagtail.sites",
"wagtail.users",
"wagtail.snippets",
"wagtail.documents",
"wagtail.images",
"wagtail.search",
"wagtail.contrib.search_promotions",
"wagtail.admin",
"wagtail",
]
# Application definition
INSTALLED_APPS = WAGTAIL_APPS + [
"modelcluster",
"taggit",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
"wagtailgeowidget",
"tests.examplesite",
"tests.home",
"tests.search",
"tests.geopage",
"tests.geopage_nospatial",
]
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
]
MIDDLEWARE += [
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
]
ROOT_URLCONF = "tests.examplesite.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_DIR, "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "tests.examplesite.wsgi.application"
# Database
# Using PostgreSQL
DATABASES = {
"default": {
# 'ENGINE': 'django.contrib.gis.db.backends.postgis',
"ENGINE": "django.contrib.gis.db.backends.postgis",
"NAME": get_env("DATABASE_NAME"),
"USER": get_env("DATABASE_USER"),
"PASSWORD": get_env("DATABASE_PASSWORD"),
"HOST": get_env("DATABASE_HOST"),
"PORT": get_env("DATABASE_PORT"),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
# Wagtail settings
WAGTAIL_SITE_NAME = "examplesite"
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
WAGTAILADMIN_BASE_URL = "http://example.com"
# Wagtail-geo-widget
GOOGLE_MAPS_V3_APIKEY = get_env("GOOGLE_MAPS_V3_APIKEY")
GEO_WIDGET_ZOOM = 15
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/examplesite/settings/test.py | tests/examplesite/settings/test.py | from .base import *
PASSWORD_HASHERS = ("django.contrib.auth.hashers.MD5PasswordHasher",)
INSTALLED_APPS = [
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.sites",
"wagtail",
"wagtail.admin",
"wagtail.sites",
"wagtail.users",
"wagtail.images",
"taggit",
"wagtailgeowidget",
"tests.examplesite",
"tests.home",
"tests.search",
"tests.geopage",
"tests.geopage_nospatial",
"tests",
]
MIDDLEWARE_CLASSES = []
EMAIL_BACKEND = "django.core.mail.backends.dummy.EmailBackend"
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "unique-snowflake",
}
}
SECRET_KEY = "RANDOM"
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/home/models.py | tests/home/models.py | from wagtail.models import Page
class HomePage(Page):
pass
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/home/__init__.py | tests/home/__init__.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/home/migrations/0001_initial.py | tests/home/migrations/0001_initial.py | # Generated by Django 4.2.23 on 2025-08-14 04:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("wagtailcore", "0094_alter_page_locale"),
]
operations = [
migrations.CreateModel(
name="HomePage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.page",
),
),
],
options={
"abstract": False,
},
bases=("wagtailcore.page",),
),
]
| python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/home/migrations/__init__.py | tests/home/migrations/__init__.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false | |
Frojd/wagtail-geo-widget | https://github.com/Frojd/wagtail-geo-widget/blob/ec36d7bfcd551fc415f50e8b27bd8d68a013c445/tests/geopage/views.py | tests/geopage/views.py | python | MIT | ec36d7bfcd551fc415f50e8b27bd8d68a013c445 | 2026-01-05T07:13:00.652315Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.