id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
183,633 | import torchvision.transforms.functional as F
import warnings
import math
import random
import numpy as np
from PIL import Image
import torch
from detectron2.data.detection_utils import read_image
from detectron2.data.transforms import ResizeTransform, TransformList
def clamp(num, min_value, max_value):
return max(min(num, max_value), min_value)
def get_bb(bb, page_size):
bbs = [float(j) for j in bb]
xs, ys = [], []
for i, b in enumerate(bbs):
if i % 2 == 0:
xs.append(b)
else:
ys.append(b)
(width, height) = page_size
return_bb = [
clamp(min(xs), 0, width - 1),
clamp(min(ys), 0, height - 1),
clamp(max(xs), 0, width - 1),
clamp(max(ys), 0, height - 1),
]
return_bb = [
int(1000 * return_bb[0] / width),
int(1000 * return_bb[1] / height),
int(1000 * return_bb[2] / width),
int(1000 * return_bb[3] / height),
]
return return_bb | null |
183,634 | import torchvision.transforms.functional as F
import warnings
import math
import random
import numpy as np
from PIL import Image
import torch
from detectron2.data.detection_utils import read_image
from detectron2.data.transforms import ResizeTransform, TransformList
def _pil_interp(method):
if method == 'bicubic':
return F.InterpolationMode.BICUBIC
elif method == 'lanczos':
return F.InterpolationMode.LANCZOS
elif method == 'hamming':
return F.InterpolationMode.HAMMING
else:
# default bilinear, do we want to allow nearest?
return F.InterpolationMode.BILINEAR | null |
183,635 | import torchvision.transforms.functional as F
import warnings
import math
import random
import numpy as np
from PIL import Image
import torch
from detectron2.data.detection_utils import read_image
from detectron2.data.transforms import ResizeTransform, TransformList
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB') | null |
183,636 | import torch
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from transformers import BatchEncoding, PreTrainedTokenizerBase
from transformers.data.data_collator import (
DataCollatorMixin,
_torch_collate_batch,
)
from transformers.file_utils import PaddingStrategy
from typing import NewType
def pre_calc_rel_mat(segment_ids):
valid_span = torch.zeros((segment_ids.shape[0], segment_ids.shape[1], segment_ids.shape[1]),
device=segment_ids.device, dtype=torch.bool)
for i in range(segment_ids.shape[0]):
for j in range(segment_ids.shape[1]):
valid_span[i, j, :] = segment_ids[i, :] == segment_ids[i, j]
return valid_span | null |
183,637 | import os
import json
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from PIL import Image
from layoutlmft.data.image_utils import Compose, RandomResizedCropAndInterpolationWithTwoPic
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB') | null |
183,638 | import os
import logging
from transformers.trainer_callback import TrainerCallback
logger = _setup_logger()
def _setup_logger():
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
data_dir = './data/'
os.makedirs(data_dir, exist_ok=True)
file_handler = logging.FileHandler('{}/log.txt'.format(data_dir))
file_handler.setFormatter(log_format)
logger.handlers = [console_handler, file_handler]
return logger | null |
183,639 | import logging
import torch
from typing import Dict
from functools import partial
from transformers.utils.logging import enable_explicit_format
from transformers.trainer_callback import PrinterCallback
from transformers import (
AutoTokenizer,
HfArgumentParser,
EvalPrediction,
Trainer,
set_seed,
PreTrainedTokenizerFast
)
from logger_config import logger, LoggerCallback
from config import Arguments
from trainers import BiencoderTrainer
from loaders import RetrievalDataLoader
from collators import BiencoderCollator
from metrics import accuracy, batch_mrr
from models import BiencoderModel
logger = _setup_logger()
class Arguments(TrainingArguments):
def __post_init__(self):
def _common_setup(args: Arguments):
if args.process_index > 0:
logger.setLevel(logging.WARNING)
enable_explicit_format()
set_seed(args.seed) | null |
183,640 | import logging
import torch
from typing import Dict
from functools import partial
from transformers.utils.logging import enable_explicit_format
from transformers.trainer_callback import PrinterCallback
from transformers import (
AutoTokenizer,
HfArgumentParser,
EvalPrediction,
Trainer,
set_seed,
PreTrainedTokenizerFast
)
from logger_config import logger, LoggerCallback
from config import Arguments
from trainers import BiencoderTrainer
from loaders import RetrievalDataLoader
from collators import BiencoderCollator
from metrics import accuracy, batch_mrr
from models import BiencoderModel
class Arguments(TrainingArguments):
model_name_or_path: str = field(
default='bert-base-uncased',
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
data_dir: str = field(
default=None, metadata={"help": "Path to train directory"}
)
task_type: str = field(
default='ir', metadata={"help": "task type: ir / qa"}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the metrics on (a jsonlines file)."
},
)
train_n_passages: int = field(
default=8,
metadata={"help": "number of passages for each example (including both positive and negative passages)"}
)
share_encoder: bool = field(
default=True,
metadata={"help": "no weight sharing between qry passage encoders"}
)
use_first_positive: bool = field(
default=False,
metadata={"help": "Always use the first positive passage"}
)
use_scaled_loss: bool = field(
default=True,
metadata={"help": "Use scaled loss or not"}
)
loss_scale: float = field(
default=-1.,
metadata={"help": "loss scale, -1 will use world_size"}
)
add_pooler: bool = field(default=False)
out_dimension: int = field(
default=768,
metadata={"help": "output dimension for pooler"}
)
t: float = field(default=0.05, metadata={"help": "temperature of biencoder training"})
l2_normalize: bool = field(default=True, metadata={"help": "L2 normalize embeddings or not"})
t_warmup: bool = field(default=False, metadata={"help": "warmup temperature"})
full_contrastive_loss: bool = field(default=True, metadata={"help": "use full contrastive loss or not"})
# following arguments are used for encoding documents
do_encode: bool = field(default=False, metadata={"help": "run the encoding loop"})
encode_in_path: str = field(default=None, metadata={"help": "Path to data to encode"})
encode_save_dir: str = field(default=None, metadata={"help": "where to save the encode"})
encode_shard_size: int = field(default=int(2 * 10**6))
encode_batch_size: int = field(default=256)
# used for index search
do_search: bool = field(default=False, metadata={"help": "run the index search loop"})
search_split: str = field(default='dev', metadata={"help": "which split to search"})
search_batch_size: int = field(default=128, metadata={"help": "query batch size for index search"})
search_topk: int = field(default=200, metadata={"help": "return topk search results"})
search_out_dir: str = field(default='', metadata={"help": "output directory for writing search results"})
# used for reranking
do_rerank: bool = field(default=False, metadata={"help": "run the reranking loop"})
rerank_max_length: int = field(default=256, metadata={"help": "max length for rerank inputs"})
rerank_in_path: str = field(default='', metadata={"help": "Path to predictions for rerank"})
rerank_out_path: str = field(default='', metadata={"help": "Path to write rerank results"})
rerank_split: str = field(default='dev', metadata={"help": "which split to rerank"})
rerank_batch_size: int = field(default=128, metadata={"help": "rerank batch size"})
rerank_depth: int = field(default=1000, metadata={"help": "rerank depth, useful for debugging purpose"})
rerank_forward_factor: int = field(
default=1,
metadata={"help": "forward n passages, then select top n/factor passages for backward"}
)
rerank_use_rdrop: bool = field(default=False, metadata={"help": "use R-Drop regularization for re-ranker"})
# used for knowledge distillation
do_kd_gen_score: bool = field(default=False, metadata={"help": "run the score generation for distillation"})
kd_gen_score_split: str = field(default='dev', metadata={
"help": "Which split to use for generation of teacher score"
})
kd_gen_score_batch_size: int = field(default=128, metadata={"help": "batch size for teacher score generation"})
kd_gen_score_n_neg: int = field(default=30, metadata={"help": "number of negatives to compute teacher scores"})
do_kd_biencoder: bool = field(default=False, metadata={"help": "knowledge distillation to biencoder"})
kd_mask_hn: bool = field(default=True, metadata={"help": "mask out hard negatives for distillation"})
kd_cont_loss_weight: float = field(default=1.0, metadata={"help": "weight for contrastive loss"})
rlm_generator_model_name: Optional[str] = field(
default='google/electra-base-generator',
metadata={"help": "generator for replace LM pre-training"}
)
rlm_freeze_generator: Optional[bool] = field(
default=True,
metadata={'help': 'freeze generator params or not'}
)
rlm_generator_mlm_weight: Optional[float] = field(
default=0.2,
metadata={'help': 'weight for generator MLM loss'}
)
all_use_mask_token: Optional[bool] = field(
default=False,
metadata={'help': 'Do not use 80:10:10 mask, use [MASK] for all places'}
)
rlm_num_eval_samples: Optional[int] = field(
default=4096,
metadata={"help": "number of evaluation samples pre-training"}
)
rlm_max_length: Optional[int] = field(
default=144,
metadata={"help": "max length for MatchLM pre-training"}
)
rlm_decoder_layers: Optional[int] = field(
default=2,
metadata={"help": "number of transformer layers for MatchLM decoder part"}
)
rlm_encoder_mask_prob: Optional[float] = field(
default=0.3,
metadata={'help': 'mask rate for encoder'}
)
rlm_decoder_mask_prob: Optional[float] = field(
default=0.5,
metadata={'help': 'mask rate for decoder'}
)
q_max_len: int = field(
default=32,
metadata={
"help": "The maximum total input sequence length after tokenization for query."
},
)
p_max_len: int = field(
default=144,
metadata={
"help": "The maximum total input sequence length after tokenization for passage."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
dry_run: Optional[bool] = field(
default=False,
metadata={'help': 'Set dry_run to True for debugging purpose'}
)
def __post_init__(self):
assert os.path.exists(self.data_dir)
assert torch.cuda.is_available(), 'Only support running on GPUs'
assert self.task_type in ['ir', 'qa']
if self.dry_run:
self.logging_steps = 1
self.max_train_samples = self.max_train_samples or 128
self.num_train_epochs = 1
self.per_device_train_batch_size = min(2, self.per_device_train_batch_size)
self.train_n_passages = min(4, self.train_n_passages)
self.rerank_forward_factor = 1
self.gradient_accumulation_steps = 1
self.rlm_num_eval_samples = min(256, self.rlm_num_eval_samples)
self.max_steps = 30
self.save_steps = self.eval_steps = 30
logger.warning('Dry run: set logging_steps=1')
if self.do_encode:
assert self.encode_save_dir
os.makedirs(self.encode_save_dir, exist_ok=True)
assert os.path.exists(self.encode_in_path)
if self.do_search:
assert os.path.exists(self.encode_save_dir)
assert self.search_out_dir
os.makedirs(self.search_out_dir, exist_ok=True)
if self.do_rerank:
assert os.path.exists(self.rerank_in_path)
logger.info('Rerank result will be written to {}'.format(self.rerank_out_path))
assert self.train_n_passages > 1, 'Having positive passages only does not make sense for training re-ranker'
assert self.train_n_passages % self.rerank_forward_factor == 0
if self.do_kd_gen_score:
assert os.path.exists('{}/{}.jsonl'.format(self.data_dir, self.kd_gen_score_split))
if self.do_kd_biencoder:
if self.use_scaled_loss:
assert not self.kd_mask_hn, 'Use scaled loss only works with not masking out hard negatives'
if torch.cuda.device_count() <= 1:
self.logging_steps = min(10, self.logging_steps)
super(Arguments, self).__post_init__()
if self.output_dir:
os.makedirs(self.output_dir, exist_ok=True)
self.label_names = ['labels']
def accuracy(output: torch.tensor, target: torch.tensor, topk=(1,)) -> List[float]:
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size).item())
return res
def batch_mrr(output: torch.tensor, target: torch.tensor) -> float:
assert len(output.shape) == 2
assert len(target.shape) == 1
sorted_score, sorted_indices = torch.sort(output, dim=-1, descending=True)
_, rank = torch.nonzero(sorted_indices.eq(target.unsqueeze(-1)).long(), as_tuple=True)
assert rank.shape[0] == output.shape[0]
rank = rank + 1
mrr = torch.sum(100 / rank.float()) / rank.shape[0]
return mrr.item()
def _compute_metrics(args: Arguments, eval_pred: EvalPrediction) -> Dict[str, float]:
# field consistent with BiencoderOutput
preds = eval_pred.predictions
scores = torch.tensor(preds[-1]).float()
labels = torch.arange(0, scores.shape[0], dtype=torch.long) * args.train_n_passages
labels = labels % scores.shape[1]
topk_metrics = accuracy(output=scores, target=labels, topk=(1, 3))
mrr = batch_mrr(output=scores, target=labels)
return {'mrr': mrr, 'acc1': topk_metrics[0], 'acc3': topk_metrics[1]} | null |
183,641 | import json
import torch
import torch.distributed as dist
from typing import List, Union, Optional, Tuple, Mapping, Dict
def dist_gather_tensor(t: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
if t is None:
return None
t = t.contiguous()
all_tensors = [torch.empty_like(t) for _ in range(dist.get_world_size())]
dist.all_gather(all_tensors, t)
all_tensors[dist.get_rank()] = t
all_tensors = torch.cat(all_tensors, dim=0)
return all_tensors | null |
183,642 | import json
import torch
import torch.distributed as dist
from typing import List, Union, Optional, Tuple, Mapping, Dict
def select_grouped_indices(scores: torch.Tensor,
group_size: int,
start: int = 0) -> torch.Tensor:
assert len(scores.shape) == 2
batch_size = scores.shape[0]
assert batch_size * group_size <= scores.shape[1]
indices = torch.arange(0, group_size, dtype=torch.long)
indices = indices.repeat(batch_size, 1)
indices += torch.arange(0, batch_size, dtype=torch.long).unsqueeze(-1) * group_size
indices += start
return indices.to(scores.device) | null |
183,643 | import json
import torch
import torch.distributed as dist
from typing import List, Union, Optional, Tuple, Mapping, Dict
def full_contrastive_scores_and_labels(
query: torch.Tensor,
key: torch.Tensor,
use_all_pairs: bool = True) -> Tuple[torch.Tensor, torch.Tensor]:
assert key.shape[0] % query.shape[0] == 0, '{} % {} > 0'.format(key.shape[0], query.shape[0])
train_n_passages = key.shape[0] // query.shape[0]
labels = torch.arange(0, query.shape[0], dtype=torch.long, device=query.device)
labels = labels * train_n_passages
# batch_size x (batch_size x n_psg)
qk = torch.mm(query, key.t())
if not use_all_pairs:
return qk, labels
# batch_size x dim
sliced_key = key.index_select(dim=0, index=labels)
assert query.shape[0] == sliced_key.shape[0]
# batch_size x batch_size
kq = torch.mm(sliced_key, query.t())
kq.fill_diagonal_(float('-inf'))
qq = torch.mm(query, query.t())
qq.fill_diagonal_(float('-inf'))
kk = torch.mm(sliced_key, sliced_key.t())
kk.fill_diagonal_(float('-inf'))
scores = torch.cat([qk, kq, qq, kk], dim=-1)
return scores, labels | null |
183,644 | import json
import torch
import torch.distributed as dist
from typing import List, Union, Optional, Tuple, Mapping, Dict
def slice_batch_dict(batch_dict: Dict[str, torch.Tensor], prefix: str) -> dict:
return {k[len(prefix):]: v for k, v in batch_dict.items() if k.startswith(prefix)} | null |
183,645 | import os
import tqdm
import torch
from contextlib import nullcontext
from torch.utils.data import DataLoader
from functools import partial
from datasets import load_dataset
from typing import Dict, List
from transformers.file_utils import PaddingStrategy
from transformers import (
AutoTokenizer,
PreTrainedTokenizerFast,
DataCollatorWithPadding,
HfArgumentParser,
BatchEncoding
)
from config import Arguments
from logger_config import logger
from utils import move_to_cuda
from models import BiencoderModelForInference, BiencoderOutput
args: Arguments = parser.parse_args_into_dataclasses()[0]
def _worker_encode_passages(gpu_idx: int):
def _get_out_path(shard_idx: int = 0) -> str:
return '{}/shard_{}_{}'.format(args.encode_save_dir, gpu_idx, shard_idx)
if os.path.exists(_get_out_path(0)):
logger.error('{} already exists, will skip encoding'.format(_get_out_path(0)))
return
dataset = load_dataset('json', data_files=args.encode_in_path)['train']
if args.dry_run:
dataset = dataset.select(range(4096))
dataset = dataset.shard(num_shards=torch.cuda.device_count(),
index=gpu_idx,
contiguous=True)
logger.info('GPU {} needs to process {} examples'.format(gpu_idx, len(dataset)))
torch.cuda.set_device(gpu_idx)
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: BiencoderModelForInference = BiencoderModelForInference.build(args)
model.eval()
model.cuda()
dataset.set_transform(partial(_psg_transform_func, tokenizer))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if args.fp16 else None)
data_loader = DataLoader(
dataset,
batch_size=args.encode_batch_size,
shuffle=False,
drop_last=False,
num_workers=args.dataloader_num_workers,
collate_fn=data_collator,
pin_memory=True)
num_encoded_docs, encoded_embeds, cur_shard_idx = 0, [], 0
for batch_dict in tqdm.tqdm(data_loader, desc='passage encoding', mininterval=8):
batch_dict = move_to_cuda(batch_dict)
with torch.cuda.amp.autocast() if args.fp16 else nullcontext():
outputs: BiencoderOutput = model(query=None, passage=batch_dict)
encoded_embeds.append(outputs.p_reps.cpu())
num_encoded_docs += outputs.p_reps.shape[0]
if num_encoded_docs >= args.encode_shard_size:
out_path = _get_out_path(cur_shard_idx)
concat_embeds = torch.cat(encoded_embeds, dim=0)
logger.info('GPU {} save {} embeds to {}'.format(gpu_idx, concat_embeds.shape[0], out_path))
torch.save(concat_embeds, out_path)
cur_shard_idx += 1
num_encoded_docs = 0
encoded_embeds.clear()
if num_encoded_docs > 0:
out_path = _get_out_path(cur_shard_idx)
concat_embeds = torch.cat(encoded_embeds, dim=0)
logger.info('GPU {} save {} embeds to {}'.format(gpu_idx, concat_embeds.shape[0], out_path))
torch.save(concat_embeds, out_path)
logger.info('Done computing score for worker {}'.format(gpu_idx))
logger = _setup_logger()
def _batch_encode_passages():
logger.info('Args={}'.format(str(args)))
gpu_count = torch.cuda.device_count()
if gpu_count == 0:
logger.error('No gpu available')
return
logger.info('Use {} gpus'.format(gpu_count))
torch.multiprocessing.spawn(_worker_encode_passages, args=(), nprocs=gpu_count)
logger.info('Done batch encode passages') | null |
183,646 | import json
import os
import glob
import tqdm
import torch
from contextlib import nullcontext
from torch.utils.data import DataLoader
from functools import partial
from collections import defaultdict
from datasets import Dataset
from typing import Dict, List, Tuple
from transformers.file_utils import PaddingStrategy
from transformers import (
AutoTokenizer,
PreTrainedTokenizerFast,
DataCollatorWithPadding,
HfArgumentParser,
BatchEncoding
)
from config import Arguments
from logger_config import logger
from utils import move_to_cuda, save_json_to_file
from metrics import compute_mrr, trec_eval, ScoredDoc
from data_utils import load_queries, load_qrels, load_msmarco_predictions, save_preds_to_msmarco_format
from models import BiencoderModelForInference, BiencoderOutput
args: Arguments = parser.parse_args_into_dataclasses()[0]
def _worker_batch_search(gpu_idx: int):
embeds_path_list = _get_all_shards_path()
query_embeds, query_ids, query_id_to_text = _worker_encode_queries(gpu_idx)
assert query_embeds.shape[0] == len(query_ids), '{} != {}'.format(query_embeds.shape[0], len(query_ids))
query_id_to_topk = defaultdict(list)
psg_idx_offset = 0
for shard_idx, shard_path in enumerate(embeds_path_list):
shard_psg_embed = torch.load(shard_path, map_location=lambda storage, loc: storage).to(query_embeds.device)
logger.info('Load {} passage embeddings from {}'.format(shard_psg_embed.shape[0], shard_path))
for start in tqdm.tqdm(range(0, len(query_ids), args.search_batch_size),
desc="search shard {}".format(shard_idx),
mininterval=5):
batch_query_embed = query_embeds[start:(start + args.search_batch_size)]
batch_query_ids = query_ids[start:(start + args.search_batch_size)]
batch_score = torch.mm(batch_query_embed, shard_psg_embed.t())
batch_sorted_score, batch_sorted_indices = torch.topk(batch_score, k=args.search_topk, dim=-1, largest=True)
for batch_idx, query_id in enumerate(batch_query_ids):
cur_scores = batch_sorted_score[batch_idx].cpu().tolist()
cur_indices = [idx + psg_idx_offset for idx in batch_sorted_indices[batch_idx].cpu().tolist()]
query_id_to_topk[query_id] += list(zip(cur_scores, cur_indices))
query_id_to_topk[query_id] = sorted(query_id_to_topk[query_id], key=lambda t: (-t[0], t[1]))
query_id_to_topk[query_id] = query_id_to_topk[query_id][:args.search_topk]
psg_idx_offset += shard_psg_embed.shape[0]
out_path = _get_topk_result_save_path(worker_idx=gpu_idx)
with open(out_path, 'w', encoding='utf-8') as writer:
for query_id in query_id_to_text:
for rank, (score, doc_id) in enumerate(query_id_to_topk[query_id]):
writer.write('{}\t{}\t{}\t{}\n'.format(query_id, doc_id, rank + 1, round(score, 4)))
logger.info('Write scores to {} done'.format(out_path))
def _compute_and_save_metrics(worker_cnt: int):
preds: Dict[str, List[ScoredDoc]] = {}
for worker_idx in range(worker_cnt):
path = _get_topk_result_save_path(worker_idx)
preds.update(load_msmarco_predictions(path))
out_path = os.path.join(args.search_out_dir, '{}.msmarco.txt'.format(args.search_split))
save_preds_to_msmarco_format(preds, out_path)
logger.info('Merge done: save {} predictions to {}'.format(len(preds), out_path))
path_qrels = os.path.join(args.data_dir, '{}_qrels.txt'.format(args.search_split))
if os.path.exists(path_qrels):
qrels = load_qrels(path=path_qrels)
all_metrics = trec_eval(qrels=qrels, predictions=preds)
all_metrics['mrr'] = compute_mrr(qrels=qrels, predictions=preds)
logger.info('{} trec metrics = {}'.format(args.search_split, json.dumps(all_metrics, ensure_ascii=False, indent=4)))
save_json_to_file(all_metrics, os.path.join(args.search_out_dir, 'metrics_{}.json'.format(args.search_split)))
else:
logger.warning('No qrels found for {}'.format(args.search_split))
# do some cleanup
for worker_idx in range(worker_cnt):
path = _get_topk_result_save_path(worker_idx)
os.remove(path)
logger = _setup_logger()
def _batch_search_queries():
logger.info('Args={}'.format(str(args)))
gpu_count = torch.cuda.device_count()
if gpu_count == 0:
logger.error('No gpu available')
return
logger.info('Use {} gpus'.format(gpu_count))
torch.multiprocessing.spawn(_worker_batch_search, args=(), nprocs=gpu_count)
logger.info('Done batch search queries')
_compute_and_save_metrics(gpu_count) | null |
183,647 | import os
import tqdm
import torch
from contextlib import nullcontext
from torch.utils.data import DataLoader
from functools import partial
from datasets import Dataset
from typing import Dict, List
from transformers.file_utils import PaddingStrategy
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers import (
AutoTokenizer,
PreTrainedTokenizerFast,
DataCollatorWithPadding,
HfArgumentParser,
BatchEncoding
)
from config import Arguments
from logger_config import logger
from utils import move_to_cuda
from models import RerankerForInference
from data_utils import load_msmarco_predictions, load_corpus, load_queries, \
merge_rerank_predictions, get_rerank_shard_path
args: Arguments = parser.parse_args_into_dataclasses()[0]
def _worker_compute_reranker_score(gpu_idx: int):
preds = load_msmarco_predictions(args.rerank_in_path)
query_ids = sorted(list(preds.keys()))
qid_pid = []
for query_id in tqdm.tqdm(query_ids, desc='load qid-pid', mininterval=2):
qid_pid += [(scored_doc.qid, scored_doc.pid) for scored_doc in preds[query_id]
if scored_doc.rank <= args.rerank_depth]
dataset = Dataset.from_dict({'query_id': [t[0] for t in qid_pid],
'doc_id': [t[1] for t in qid_pid]})
dataset = dataset.shard(num_shards=torch.cuda.device_count(),
index=gpu_idx,
contiguous=True)
logger.info('GPU {} needs to process {} examples'.format(gpu_idx, len(dataset)))
torch.cuda.set_device(gpu_idx)
query_ids, doc_ids = dataset['query_id'], dataset['doc_id']
assert len(dataset) == len(query_ids)
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: RerankerForInference = RerankerForInference.from_pretrained(args.model_name_or_path)
model.eval()
model.cuda()
corpus: Dataset = load_corpus(path=os.path.join(args.data_dir, 'passages.jsonl.gz'))
queries = load_queries(path='{}/{}_queries.tsv'.format(args.data_dir, args.rerank_split),
task_type=args.task_type)
dataset.set_transform(partial(_rerank_transform_func, tokenizer, corpus, queries))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if args.fp16 else None)
data_loader = DataLoader(
dataset,
batch_size=args.rerank_batch_size,
shuffle=False,
drop_last=False,
num_workers=args.dataloader_num_workers,
collate_fn=data_collator,
pin_memory=True)
scores = []
for batch_dict in tqdm.tqdm(data_loader, desc='passage rerank', mininterval=5):
batch_dict = move_to_cuda(batch_dict)
with torch.cuda.amp.autocast() if args.fp16 else nullcontext():
outputs: SequenceClassifierOutput = model(batch_dict)
scores.append(outputs.logits.squeeze(dim=-1).cpu())
assert len(scores[-1].shape) == 1
all_scores = torch.cat(scores, dim=-1)
assert all_scores.shape[0] == len(query_ids), '{} != {}'.format(all_scores.shape[0], len(query_ids))
all_scores = all_scores.tolist()
with open(get_rerank_shard_path(args, gpu_idx), 'w', encoding='utf-8') as writer:
for idx in range(len(query_ids)):
# dummy rank, since a query may be split across different workers
writer.write('{}\t{}\t{}\t{}\n'.format(query_ids[idx], doc_ids[idx], -1, round(all_scores[idx], 5)))
logger.info('Done computing rerank score for worker {}'.format(gpu_idx))
logger = _setup_logger()
def merge_rerank_predictions(args: Arguments, gpu_count: int):
from metrics import trec_eval, compute_mrr
qid_to_scored_doc: Dict[str, List[ScoredDoc]] = {}
for worker_idx in range(gpu_count):
path = get_rerank_shard_path(args, worker_idx)
for line in tqdm.tqdm(open(path, 'r', encoding='utf-8'), 'merge results', mininterval=3):
fs = line.strip().split('\t')
qid, pid, _, score = fs
score = float(score)
if qid not in qid_to_scored_doc:
qid_to_scored_doc[qid] = []
scored_doc = ScoredDoc(qid=qid, pid=pid, rank=-1, score=score)
qid_to_scored_doc[qid].append(scored_doc)
qid_to_scored_doc = {k: sorted(v, key=lambda sd: sd.score, reverse=True) for k, v in qid_to_scored_doc.items()}
ori_preds = load_msmarco_predictions(path=args.rerank_in_path)
for query_id in list(qid_to_scored_doc.keys()):
remain_scored_docs = ori_preds[query_id][args.rerank_depth:]
for idx, sd in enumerate(remain_scored_docs):
# make sure the order is not broken
sd.score = qid_to_scored_doc[query_id][-1].score - idx - 1
qid_to_scored_doc[query_id] += remain_scored_docs
assert len(set([sd.pid for sd in qid_to_scored_doc[query_id]])) == len(qid_to_scored_doc[query_id])
save_preds_to_msmarco_format(qid_to_scored_doc, out_path=args.rerank_out_path)
path_qrels = '{}/{}_qrels.txt'.format(args.data_dir, args.rerank_split)
if os.path.exists(path_qrels):
qrels = load_qrels(path=path_qrels)
all_metrics = trec_eval(qrels=qrels, predictions=qid_to_scored_doc)
all_metrics['mrr'] = compute_mrr(qrels=qrels, predictions=qid_to_scored_doc)
logger.info('{} trec metrics = {}'.format(args.rerank_split, json.dumps(all_metrics, ensure_ascii=False, indent=4)))
metrics_out_path = '{}/metrics_rerank_{}.json'.format(os.path.dirname(args.rerank_out_path), args.rerank_split)
save_json_to_file(all_metrics, metrics_out_path)
else:
logger.warning('No qrels found for {}'.format(args.rerank_split))
# cleanup some intermediate results
for worker_idx in range(gpu_count):
path = get_rerank_shard_path(args, worker_idx)
os.remove(path)
def _batch_compute_reranker_score():
logger.info('Args={}'.format(str(args)))
gpu_count = torch.cuda.device_count()
if gpu_count == 0:
logger.error('No gpu available')
return
logger.info('Use {} gpus'.format(gpu_count))
torch.multiprocessing.spawn(_worker_compute_reranker_score, args=(), nprocs=gpu_count)
logger.info('Done batch compute rerank score')
merge_rerank_predictions(args, gpu_count)
logger.info('Done merge results') | null |
183,648 | import os
import tqdm
import torch
from contextlib import nullcontext
from torch.utils.data import DataLoader
from functools import partial
from datasets import Dataset, load_dataset
from typing import Dict, List
from transformers.file_utils import PaddingStrategy
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers import (
AutoTokenizer,
PreTrainedTokenizerFast,
DataCollatorWithPadding,
HfArgumentParser,
BatchEncoding
)
from config import Arguments
from logger_config import logger
from utils import move_to_cuda
from models import RerankerForInference
from data_utils import load_corpus, load_queries, save_to_readable_format
args: Arguments = parser.parse_args_into_dataclasses()[0]
kd_gen_score_out_path = os.path.join(args.data_dir, 'kd_{}.jsonl'.format(args.kd_gen_score_split))
def _worker_gen_teacher_score(gpu_idx: int):
dataset = load_dataset('json', data_files=kd_gen_score_in_path)['train']
if args.dry_run:
dataset = dataset.select(range(100))
dataset = dataset.shard(num_shards=torch.cuda.device_count(),
index=gpu_idx,
contiguous=True)
qid_pids = []
for ex in tqdm.tqdm(dataset, desc='get qid-pid pairs', mininterval=3):
for pos_doc_id in ex['positives']['doc_id']:
qid_pids.append((ex['query_id'], pos_doc_id))
for neg_doc_id in ex['negatives']['doc_id'][:args.kd_gen_score_n_neg]:
qid_pids.append((ex['query_id'], neg_doc_id))
dataset = Dataset.from_dict({'query_id': [t[0] for t in qid_pids],
'doc_id': [t[1] for t in qid_pids]})
query_ids, doc_ids = dataset['query_id'], dataset['doc_id']
logger.info('GPU {} needs to process {} examples'.format(gpu_idx, len(dataset)))
torch.cuda.set_device(gpu_idx)
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: RerankerForInference = RerankerForInference.from_pretrained(args.model_name_or_path)
model.eval()
model.cuda()
corpus: Dataset = load_corpus(path=os.path.join(args.data_dir, 'passages.jsonl.gz'))
queries = load_queries(path='{}/{}_queries.tsv'.format(args.data_dir, args.kd_gen_score_split),
task_type=args.task_type)
dataset.set_transform(partial(_kd_gen_score_transform_func, tokenizer, corpus, queries))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if args.fp16 else None)
data_loader = DataLoader(
dataset,
batch_size=args.kd_gen_score_batch_size,
shuffle=False,
drop_last=False,
num_workers=args.dataloader_num_workers,
collate_fn=data_collator,
pin_memory=True)
scores = []
for batch_dict in tqdm.tqdm(data_loader, desc='generate teacher score', mininterval=5):
batch_dict = move_to_cuda(batch_dict)
with torch.cuda.amp.autocast() if args.fp16 else nullcontext():
outputs: SequenceClassifierOutput = model(batch_dict)
scores.append(outputs.logits.squeeze(dim=-1).cpu())
assert len(scores[-1].shape) == 1
all_scores = torch.cat(scores, dim=-1)
assert all_scores.shape[0] == len(dataset), '{} != {}'
all_scores = all_scores.tolist()
with open(_get_shard_path(gpu_idx), 'w', encoding='utf-8') as writer:
for idx in range(len(query_ids)):
writer.write('{}\t{}\t{}\n'.format(query_ids[idx], doc_ids[idx], round(all_scores[idx], 5)))
logger.info('Done computing teacher score for worker {}'.format(gpu_idx))
def _merge_teacher_scores(worker_cnt: int):
qid_to_pid_to_score = {}
for worker_idx in range(worker_cnt):
shard_path = _get_shard_path(worker_idx)
for line in tqdm.tqdm(open(shard_path, 'r', encoding='utf-8'),
desc='Load shard {} score'.format(worker_idx), mininterval=3):
fs = line.strip().split('\t')
assert len(fs) == 3
qid, pid, score = fs
if qid not in qid_to_pid_to_score:
qid_to_pid_to_score[qid] = {}
qid_to_pid_to_score[qid][pid] = float(score)
os.remove(shard_path)
dataset = load_dataset('json', data_files=kd_gen_score_in_path)['train']
if args.dry_run:
dataset = dataset.select(range(100))
def _update_score(ex: Dict) -> Dict:
query_id = ex['query_id']
pid_to_score = qid_to_pid_to_score[query_id]
ex['negatives']['doc_id'] = [neg_doc_id for neg_doc_id in ex['negatives']['doc_id'] if neg_doc_id in pid_to_score]
ex['positives']['score'] = [pid_to_score[pos_doc_id] for pos_doc_id in ex['positives']['doc_id']]
ex['negatives']['score'] = [pid_to_score[neg_doc_id] for neg_doc_id in ex['negatives']['doc_id']]
return ex
dataset = dataset.map(_update_score, num_proc=4)
logger.info('Writing teacher score to {}'.format(kd_gen_score_out_path))
dataset.to_json(kd_gen_score_out_path, force_ascii=False, lines=True)
logger = _setup_logger()
def load_corpus(path: str) -> Dataset:
assert path.endswith('.jsonl') or path.endswith('.jsonl.gz')
# two fields: id, contents
corpus = load_dataset('json', data_files=path)['train']
logger.info('Load {} documents from {} with columns {}'.format(len(corpus), path, corpus.column_names))
logger.info('A random document: {}'.format(random.choice(corpus)))
return corpus
def save_to_readable_format(in_path: str, corpus: Dataset):
out_path = '{}/readable_{}'.format(os.path.dirname(in_path), os.path.basename(in_path))
dataset: Dataset = load_dataset('json', data_files=in_path)['train']
max_to_keep = 5
def _create_readable_field(samples: Dict[str, List]) -> List:
readable_ex = []
for idx in range(min(len(samples['doc_id']), max_to_keep)):
doc_id = samples['doc_id'][idx]
readable_ex.append({'doc_id': doc_id,
'title': corpus[int(doc_id)].get('title', ''),
'contents': corpus[int(doc_id)]['contents'],
'score': samples['score'][idx]})
return readable_ex
def _mp_func(ex: Dict) -> Dict:
ex['positives'] = _create_readable_field(ex['positives'])
ex['negatives'] = _create_readable_field(ex['negatives'])
return ex
dataset = dataset.map(_mp_func, num_proc=8)
dataset.to_json(out_path, force_ascii=False, lines=False, indent=4)
logger.info('Done convert {} to readable format in {}'.format(in_path, out_path))
def _batch_compute_teacher_score():
logger.info('Args={}'.format(str(args)))
gpu_count = torch.cuda.device_count()
if gpu_count == 0:
logger.error('No gpu available')
return
logger.info('Use {} gpus'.format(gpu_count))
torch.multiprocessing.spawn(_worker_gen_teacher_score, args=(), nprocs=gpu_count)
logger.info('Done batch generate teacher score')
_merge_teacher_scores(gpu_count)
logger.info('Done merge results')
corpus = load_corpus(path=os.path.join(args.data_dir, 'passages.jsonl.gz'))
save_to_readable_format(in_path=kd_gen_score_out_path, corpus=corpus) | null |
183,649 | import random
from typing import Tuple
from transformers import PreTrainedTokenizerFast
from datasets import Dataset, load_dataset
from config import Arguments
from logger_config import logger
logger = _setup_logger()
def split_dataset(dataset: Dataset,
num_eval_examples: int,
max_train_samples: int = None) -> Tuple[Dataset, Dataset]:
indices = list(range(len(dataset)))
random.Random(123).shuffle(indices)
eval_dataset = dataset.select(indices[:num_eval_examples])
train_dataset = dataset.select(indices[num_eval_examples:])
if max_train_samples is not None:
train_dataset = train_dataset.select(range(max_train_samples))
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
return train_dataset, eval_dataset | null |
183,650 | from typing import List, Dict
def _slice_with_mod(elements: List, offset: int, cnt: int) -> List:
return [elements[(offset + idx) % len(elements)] for idx in range(cnt)]
def group_doc_ids(examples: Dict[str, List],
negative_size: int,
offset: int,
use_first_positive: bool = False) -> List[int]:
pos_doc_ids: List[int] = []
positives: List[Dict[str, List]] = examples['positives']
for idx, ex_pos in enumerate(positives):
all_pos_doc_ids = ex_pos['doc_id']
if use_first_positive:
# keep positives that has higher score than all negatives
all_pos_doc_ids = [doc_id for p_idx, doc_id in enumerate(all_pos_doc_ids)
if p_idx == 0 or ex_pos['score'][p_idx] >= ex_pos['score'][0]
or ex_pos['score'][p_idx] > max(examples['negatives'][idx]['score'])]
cur_pos_doc_id = _slice_with_mod(all_pos_doc_ids, offset=offset, cnt=1)[0]
pos_doc_ids.append(int(cur_pos_doc_id))
neg_doc_ids: List[List[int]] = []
negatives: List[Dict[str, List]] = examples['negatives']
for ex_neg in negatives:
cur_neg_doc_ids = _slice_with_mod(ex_neg['doc_id'],
offset=offset * negative_size,
cnt=negative_size)
cur_neg_doc_ids = [int(doc_id) for doc_id in cur_neg_doc_ids]
neg_doc_ids.append(cur_neg_doc_ids)
assert len(pos_doc_ids) == len(neg_doc_ids), '{} != {}'.format(len(pos_doc_ids), len(neg_doc_ids))
assert all(len(doc_ids) == negative_size for doc_ids in neg_doc_ids)
input_doc_ids: List[int] = []
for pos_doc_id, neg_ids in zip(pos_doc_ids, neg_doc_ids):
input_doc_ids.append(pos_doc_id)
input_doc_ids += neg_ids
return input_doc_ids | null |
183,651 | import logging
import torch
from typing import Dict
from transformers.utils.logging import enable_explicit_format
from transformers.trainer_callback import PrinterCallback
from transformers import (
AutoTokenizer,
HfArgumentParser,
EvalPrediction,
Trainer,
set_seed,
PreTrainedTokenizerFast
)
from logger_config import logger, LoggerCallback
from config import Arguments
from trainers.reranker_trainer import RerankerTrainer
from loaders import CrossEncoderDataLoader
from collators import CrossEncoderCollator
from metrics import accuracy
from models import Reranker
logger = _setup_logger()
class Arguments(TrainingArguments):
def __post_init__(self):
def _common_setup(args: Arguments):
if args.process_index > 0:
logger.setLevel(logging.WARNING)
enable_explicit_format()
set_seed(args.seed) | null |
183,652 | import logging
import torch
from typing import Dict
from transformers.utils.logging import enable_explicit_format
from transformers.trainer_callback import PrinterCallback
from transformers import (
AutoTokenizer,
HfArgumentParser,
EvalPrediction,
Trainer,
set_seed,
PreTrainedTokenizerFast
)
from logger_config import logger, LoggerCallback
from config import Arguments
from trainers.reranker_trainer import RerankerTrainer
from loaders import CrossEncoderDataLoader
from collators import CrossEncoderCollator
from metrics import accuracy
from models import Reranker
def accuracy(output: torch.tensor, target: torch.tensor, topk=(1,)) -> List[float]:
def _compute_metrics(eval_pred: EvalPrediction) -> Dict:
preds = eval_pred.predictions
if isinstance(preds, tuple):
preds = preds[-1]
logits = torch.tensor(preds).float()
labels = torch.tensor(eval_pred.label_ids).long()
acc = accuracy(output=logits, target=labels)[0]
return {'acc': acc} | null |
183,653 | import os
import torch
from typing import Optional, Dict, Tuple
from transformers.trainer import Trainer
from logger_config import logger
from metrics import accuracy, batch_mrr
from models import BiencoderOutput, BiencoderModel
from utils import AverageMeter
def _unpack_qp(inputs: Dict[str, torch.Tensor]) -> Tuple:
q_prefix, d_prefix, kd_labels_key = 'q_', 'd_', 'kd_labels'
query_batch_dict = {k[len(q_prefix):]: v for k, v in inputs.items() if k.startswith(q_prefix)}
doc_batch_dict = {k[len(d_prefix):]: v for k, v in inputs.items() if k.startswith(d_prefix)}
if kd_labels_key in inputs:
assert len(query_batch_dict) > 0
query_batch_dict[kd_labels_key] = inputs[kd_labels_key]
if not query_batch_dict:
query_batch_dict = None
if not doc_batch_dict:
doc_batch_dict = None
return query_batch_dict, doc_batch_dict | null |
183,654 | import torch
from dataclasses import dataclass
from typing import List, Dict, Any
from transformers import DataCollatorWithPadding, BatchEncoding
def _unpack_doc_values(features: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
doc_examples = []
for f in features:
keys = list(f.keys())
lists_per_key = len(f[keys[0]])
for idx in range(lists_per_key):
doc_examples.append({k: f[k][idx] for k in keys})
return doc_examples | null |
183,655 | import torch
import random
import warnings
from transformers import BertTokenizer, BertTokenizerFast, BatchEncoding
from typing import List, Union, Tuple, Any, Dict
The provided code snippet includes necessary dependencies for implementing the `whole_word_mask` function. Write a Python function `def whole_word_mask(tokenizer: Union[BertTokenizer, BertTokenizerFast], input_tokens: List[str], mlm_prob: float, max_predictions=512) -> List[int]` to solve the following problem:
Get 0/1 labels for masked tokens with whole word mask proxy
Here is the function:
def whole_word_mask(tokenizer: Union[BertTokenizer, BertTokenizerFast],
input_tokens: List[str],
mlm_prob: float,
max_predictions=512) -> List[int]:
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
if not isinstance(tokenizer, (BertTokenizer, BertTokenizerFast)):
warnings.warn(
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
"Please refer to the documentation for more information."
)
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token == "[CLS]" or token == "[SEP]":
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * mlm_prob))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
if len(covered_indexes) != len(masked_lms):
raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels | Get 0/1 labels for masked tokens with whole word mask proxy |
183,656 | import torch
import random
import warnings
from transformers import BertTokenizer, BertTokenizerFast, BatchEncoding
from typing import List, Union, Tuple, Any, Dict
The provided code snippet includes necessary dependencies for implementing the `torch_mask_tokens` function. Write a Python function `def torch_mask_tokens(tokenizer: Union[BertTokenizer, BertTokenizerFast], inputs: torch.Tensor, mask_labels: torch.Tensor, all_use_mask_token: bool = False) -> Tuple[Any, Any]` to solve the following problem:
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
Here is the function:
def torch_mask_tokens(tokenizer: Union[BertTokenizer, BertTokenizerFast],
inputs: torch.Tensor,
mask_labels: torch.Tensor,
all_use_mask_token: bool = False) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
if tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
masked_inputs = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = mask_labels.clone()
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if tokenizer._pad_token is not None:
padding_mask = labels.eq(tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
if all_use_mask_token:
masked_inputs[masked_indices] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
return masked_inputs, labels
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
masked_inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
masked_inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return masked_inputs, labels | Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref. |
183,657 | import torch
import random
import warnings
from transformers import BertTokenizer, BertTokenizerFast, BatchEncoding
from typing import List, Union, Tuple, Any, Dict
def merge_batch_dict(src_batch_dict: Union[Dict, BatchEncoding],
tgt_batch_dict: Union[Dict, BatchEncoding],
prefix: str = None):
for key in src_batch_dict:
tgt_batch_dict[(prefix or '') + key] = src_batch_dict[key].clone() | null |
183,658 | import logging
import numpy as np
from typing import Dict
from transformers.utils.logging import enable_explicit_format
from transformers.trainer_callback import PrinterCallback
from transformers import (
AutoTokenizer,
HfArgumentParser,
set_seed,
PreTrainedTokenizerFast,
EvalPrediction,
)
from logger_config import logger, LoggerCallback
from config import Arguments
from loaders import ReplaceLMDataloader
from collators import DataCollatorForReplaceLM
from trainers import ReplaceLMTrainer
from models import ReplaceLM
logger = _setup_logger()
class Arguments(TrainingArguments):
model_name_or_path: str = field(
default='bert-base-uncased',
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
data_dir: str = field(
default=None, metadata={"help": "Path to train directory"}
)
task_type: str = field(
default='ir', metadata={"help": "task type: ir / qa"}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the metrics on (a jsonlines file)."
},
)
train_n_passages: int = field(
default=8,
metadata={"help": "number of passages for each example (including both positive and negative passages)"}
)
share_encoder: bool = field(
default=True,
metadata={"help": "no weight sharing between qry passage encoders"}
)
use_first_positive: bool = field(
default=False,
metadata={"help": "Always use the first positive passage"}
)
use_scaled_loss: bool = field(
default=True,
metadata={"help": "Use scaled loss or not"}
)
loss_scale: float = field(
default=-1.,
metadata={"help": "loss scale, -1 will use world_size"}
)
add_pooler: bool = field(default=False)
out_dimension: int = field(
default=768,
metadata={"help": "output dimension for pooler"}
)
t: float = field(default=0.05, metadata={"help": "temperature of biencoder training"})
l2_normalize: bool = field(default=True, metadata={"help": "L2 normalize embeddings or not"})
t_warmup: bool = field(default=False, metadata={"help": "warmup temperature"})
full_contrastive_loss: bool = field(default=True, metadata={"help": "use full contrastive loss or not"})
# following arguments are used for encoding documents
do_encode: bool = field(default=False, metadata={"help": "run the encoding loop"})
encode_in_path: str = field(default=None, metadata={"help": "Path to data to encode"})
encode_save_dir: str = field(default=None, metadata={"help": "where to save the encode"})
encode_shard_size: int = field(default=int(2 * 10**6))
encode_batch_size: int = field(default=256)
# used for index search
do_search: bool = field(default=False, metadata={"help": "run the index search loop"})
search_split: str = field(default='dev', metadata={"help": "which split to search"})
search_batch_size: int = field(default=128, metadata={"help": "query batch size for index search"})
search_topk: int = field(default=200, metadata={"help": "return topk search results"})
search_out_dir: str = field(default='', metadata={"help": "output directory for writing search results"})
# used for reranking
do_rerank: bool = field(default=False, metadata={"help": "run the reranking loop"})
rerank_max_length: int = field(default=256, metadata={"help": "max length for rerank inputs"})
rerank_in_path: str = field(default='', metadata={"help": "Path to predictions for rerank"})
rerank_out_path: str = field(default='', metadata={"help": "Path to write rerank results"})
rerank_split: str = field(default='dev', metadata={"help": "which split to rerank"})
rerank_batch_size: int = field(default=128, metadata={"help": "rerank batch size"})
rerank_depth: int = field(default=1000, metadata={"help": "rerank depth, useful for debugging purpose"})
rerank_forward_factor: int = field(
default=1,
metadata={"help": "forward n passages, then select top n/factor passages for backward"}
)
rerank_use_rdrop: bool = field(default=False, metadata={"help": "use R-Drop regularization for re-ranker"})
# used for knowledge distillation
do_kd_gen_score: bool = field(default=False, metadata={"help": "run the score generation for distillation"})
kd_gen_score_split: str = field(default='dev', metadata={
"help": "Which split to use for generation of teacher score"
})
kd_gen_score_batch_size: int = field(default=128, metadata={"help": "batch size for teacher score generation"})
kd_gen_score_n_neg: int = field(default=30, metadata={"help": "number of negatives to compute teacher scores"})
do_kd_biencoder: bool = field(default=False, metadata={"help": "knowledge distillation to biencoder"})
kd_mask_hn: bool = field(default=True, metadata={"help": "mask out hard negatives for distillation"})
kd_cont_loss_weight: float = field(default=1.0, metadata={"help": "weight for contrastive loss"})
rlm_generator_model_name: Optional[str] = field(
default='google/electra-base-generator',
metadata={"help": "generator for replace LM pre-training"}
)
rlm_freeze_generator: Optional[bool] = field(
default=True,
metadata={'help': 'freeze generator params or not'}
)
rlm_generator_mlm_weight: Optional[float] = field(
default=0.2,
metadata={'help': 'weight for generator MLM loss'}
)
all_use_mask_token: Optional[bool] = field(
default=False,
metadata={'help': 'Do not use 80:10:10 mask, use [MASK] for all places'}
)
rlm_num_eval_samples: Optional[int] = field(
default=4096,
metadata={"help": "number of evaluation samples pre-training"}
)
rlm_max_length: Optional[int] = field(
default=144,
metadata={"help": "max length for MatchLM pre-training"}
)
rlm_decoder_layers: Optional[int] = field(
default=2,
metadata={"help": "number of transformer layers for MatchLM decoder part"}
)
rlm_encoder_mask_prob: Optional[float] = field(
default=0.3,
metadata={'help': 'mask rate for encoder'}
)
rlm_decoder_mask_prob: Optional[float] = field(
default=0.5,
metadata={'help': 'mask rate for decoder'}
)
q_max_len: int = field(
default=32,
metadata={
"help": "The maximum total input sequence length after tokenization for query."
},
)
p_max_len: int = field(
default=144,
metadata={
"help": "The maximum total input sequence length after tokenization for passage."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
dry_run: Optional[bool] = field(
default=False,
metadata={'help': 'Set dry_run to True for debugging purpose'}
)
def __post_init__(self):
assert os.path.exists(self.data_dir)
assert torch.cuda.is_available(), 'Only support running on GPUs'
assert self.task_type in ['ir', 'qa']
if self.dry_run:
self.logging_steps = 1
self.max_train_samples = self.max_train_samples or 128
self.num_train_epochs = 1
self.per_device_train_batch_size = min(2, self.per_device_train_batch_size)
self.train_n_passages = min(4, self.train_n_passages)
self.rerank_forward_factor = 1
self.gradient_accumulation_steps = 1
self.rlm_num_eval_samples = min(256, self.rlm_num_eval_samples)
self.max_steps = 30
self.save_steps = self.eval_steps = 30
logger.warning('Dry run: set logging_steps=1')
if self.do_encode:
assert self.encode_save_dir
os.makedirs(self.encode_save_dir, exist_ok=True)
assert os.path.exists(self.encode_in_path)
if self.do_search:
assert os.path.exists(self.encode_save_dir)
assert self.search_out_dir
os.makedirs(self.search_out_dir, exist_ok=True)
if self.do_rerank:
assert os.path.exists(self.rerank_in_path)
logger.info('Rerank result will be written to {}'.format(self.rerank_out_path))
assert self.train_n_passages > 1, 'Having positive passages only does not make sense for training re-ranker'
assert self.train_n_passages % self.rerank_forward_factor == 0
if self.do_kd_gen_score:
assert os.path.exists('{}/{}.jsonl'.format(self.data_dir, self.kd_gen_score_split))
if self.do_kd_biencoder:
if self.use_scaled_loss:
assert not self.kd_mask_hn, 'Use scaled loss only works with not masking out hard negatives'
if torch.cuda.device_count() <= 1:
self.logging_steps = min(10, self.logging_steps)
super(Arguments, self).__post_init__()
if self.output_dir:
os.makedirs(self.output_dir, exist_ok=True)
self.label_names = ['labels']
def _common_setup(args: Arguments):
if args.process_index > 0:
logger.setLevel(logging.WARNING)
enable_explicit_format()
set_seed(args.seed) | null |
183,659 | import logging
import numpy as np
from typing import Dict
from transformers.utils.logging import enable_explicit_format
from transformers.trainer_callback import PrinterCallback
from transformers import (
AutoTokenizer,
HfArgumentParser,
set_seed,
PreTrainedTokenizerFast,
EvalPrediction,
)
from logger_config import logger, LoggerCallback
from config import Arguments
from loaders import ReplaceLMDataloader
from collators import DataCollatorForReplaceLM
from trainers import ReplaceLMTrainer
from models import ReplaceLM
def _compute_metrics(eval_pred: EvalPrediction) -> Dict[str, float]:
preds = eval_pred.predictions
avg_enc_mlm_loss = float(np.mean(preds[0]))
avg_dec_mlm_loss = float(np.mean(preds[1]))
avg_g_mlm_loss = float(np.mean(preds[2]))
avg_replace_ratio = float(np.mean(preds[3]))
return {'avg_enc_mlm_loss': round(avg_enc_mlm_loss, 4),
'avg_dec_mlm_loss': round(avg_dec_mlm_loss, 4),
'avg_g_mlm_loss': round(avg_g_mlm_loss, 4),
'avg_replace_ratio': round(avg_replace_ratio, 4)} | null |
183,660 | import os
import io
import gzip
import json
import random
import argparse
import ir_datasets
import numpy as np
import sys
from tqdm import tqdm
from typing import Dict, List
from datasets import Dataset
from logger_config import logger
from utils import save_json_to_file
from data_utils import load_msmarco_predictions, load_queries, load_qrels, load_corpus, \
ScoredDoc, save_to_readable_format
args = parser.parse_args()
os.makedirs(args.out_dir, exist_ok=True)
logger.info('Args: {}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4)))
logger = _setup_logger()
def _write_corpus_to_disk():
dataset = ir_datasets.load('msmarco-passage/train')
titles = []
if os.path.exists(args.title_path):
titles = [line.strip().split('\t')[1] for line in tqdm(open(args.title_path).readlines(), desc='load title')]
logger.info('Load {} titles from {}'.format(len(titles), args.title_path))
else:
logger.warning('No title data found: {}'.format(args.title_path))
title_idx = 0
out_path = os.path.join(args.out_dir, 'passages.jsonl.gz')
with gzip.open(out_path, 'wb') as output:
with io.TextIOWrapper(output, encoding='utf-8') as writer:
for doc in tqdm(dataset.docs_iter()):
ex = {'id': doc.doc_id, 'contents': doc.text}
if titles:
ex['title'] = titles[title_idx]
title_idx += 1
writer.write(json.dumps(ex, ensure_ascii=False, separators=(',', ':')))
writer.write('\n')
if titles:
assert title_idx == len(titles), '{} != {}'.format(title_idx, len(titles)) | null |
183,661 | import os
import io
import gzip
import json
import random
import argparse
import ir_datasets
import numpy as np
import sys
from tqdm import tqdm
from typing import Dict, List
from datasets import Dataset
from logger_config import logger
from utils import save_json_to_file
from data_utils import load_msmarco_predictions, load_queries, load_qrels, load_corpus, \
ScoredDoc, save_to_readable_format
logger.info('Args: {}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4)))
logger = _setup_logger()
def _write_queries_to_disk(split: str, out_path: str):
dataset = ir_datasets.load("msmarco-passage/{}".format(split))
with open(out_path, 'w', encoding='utf-8') as writer:
for query in dataset.queries_iter():
writer.write('{}\t{}\n'.format(query.query_id, query.text))
logger.info('Write {} queries to {}'.format(split, out_path)) | null |
183,662 | import os
import io
import gzip
import json
import random
import argparse
import ir_datasets
import numpy as np
import sys
from tqdm import tqdm
from typing import Dict, List
from datasets import Dataset
from logger_config import logger
from utils import save_json_to_file
from data_utils import load_msmarco_predictions, load_queries, load_qrels, load_corpus, \
ScoredDoc, save_to_readable_format
logger.info('Args: {}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4)))
logger = _setup_logger()
def _write_qrels_to_disk(split: str, out_path: str):
dataset = ir_datasets.load("msmarco-passage/{}".format(split))
with open(out_path, 'w', encoding='utf-8') as writer:
for qrel in dataset.qrels_iter():
# query_id, iteration, doc_id, relevance
writer.write('{}\t{}\t{}\t{}\n'
.format(qrel.query_id, qrel.iteration, qrel.doc_id, qrel.relevance))
logger.info('Write {} qrels to {}'.format(split, out_path)) | null |
183,663 | import os
import io
import gzip
import json
import random
import argparse
import ir_datasets
import numpy as np
import sys
from tqdm import tqdm
from typing import Dict, List
from datasets import Dataset
from logger_config import logger
from utils import save_json_to_file
from data_utils import load_msmarco_predictions, load_queries, load_qrels, load_corpus, \
ScoredDoc, save_to_readable_format
args = parser.parse_args()
logger.info('Args: {}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4)))
logger = _setup_logger()
class ScoredDoc:
qid: str
pid: str
rank: int
score: float = field(default=-1)
def _write_prepared_data_to_disk(out_path: str,
corpus: Dataset,
queries: Dict[str, str],
qrels: Dict[str, Dict[str, int]],
preds: Dict[str, List[ScoredDoc]],
is_train: bool = False):
cnt_noisy_positive = 0
cnt_output = 0
with open(out_path, 'w', encoding='utf-8') as writer:
for query_id in tqdm(qrels, mininterval=2):
positive_doc_ids: Dict = qrels.get(query_id)
if not positive_doc_ids:
logger.warning('No positive found for query_id={}'.format(query_id))
continue
if is_train and args.filter_noisy_positives \
and all(sd.pid not in positive_doc_ids for sd in preds.get(query_id, [])):
cnt_noisy_positive += 1
continue
# For official triples, only use those with negative doc ids
if not preds.get(query_id, []):
continue
doc_id_to_score = {scored_doc.pid: scored_doc.score for scored_doc in preds.get(query_id, [])}
negative_scored_docs = [scored_doc for scored_doc in preds.get(query_id, [])
if scored_doc.pid not in positive_doc_ids][:args.depth]
np.random.shuffle(negative_scored_docs)
negative_scored_docs = negative_scored_docs[:(args.num_negatives - args.num_random_neg)]
if len(negative_scored_docs) < args.num_negatives:
if not negative_scored_docs:
logger.warning('No negatives found for query_id={} ({}), will use random negatives'
.format(len(negative_scored_docs), queries[query_id], query_id))
while len(negative_scored_docs) < args.num_negatives:
sd = ScoredDoc(qid=query_id, pid=str(random.randint(0, len(corpus) - 1)), rank=args.depth)
if sd.pid not in positive_doc_ids and sd.pid not in doc_id_to_score:
negative_scored_docs.append(sd)
np.random.shuffle(negative_scored_docs)
example = {'query_id': query_id,
'query': queries[query_id],
'positives': {'doc_id': list(positive_doc_ids),
'score': [doc_id_to_score.get(doc_id, -1.) for doc_id in positive_doc_ids]
},
'negatives': {'doc_id': [scored_doc.pid for scored_doc in negative_scored_docs],
'score': [scored_doc.score for scored_doc in negative_scored_docs]
},
}
writer.write(json.dumps(example, ensure_ascii=False, separators=(',', ':')))
writer.write('\n')
cnt_output += 1
if is_train and args.filter_noisy_positives:
logger.info('Filter {} noisy positives'.format(cnt_noisy_positive))
logger.info('Write {} examples to {}'.format(cnt_output, out_path)) | null |
183,664 | import argparse
import copy
import json
import logging
import re
import unicodedata
from tqdm import tqdm
import numpy as np
import regex
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
def has_answers(text, answers, tokenizer, regex=False):
text = _normalize(text)
if regex:
for ans in answers:
ans = _normalize(ans)
if regex_match(text, ans):
return True
else:
text = tokenizer.tokenize(text).words(uncased=True)
for ans in answers:
ans = _normalize(ans)
ans = tokenizer.tokenize(ans).words(uncased=True)
for i in range(0, len(text) - len(ans) + 1):
if ans == text[i: i + len(ans)]:
return True
return False
def evaluate_retrieval(retrieval_file, topk, regex=False) -> dict:
tokenizer = SimpleTokenizer()
retrieval = json.load(open(retrieval_file))
accuracy = { k : [] for k in topk }
max_k = max(topk)
for qid in tqdm(list(retrieval.keys())):
answers = retrieval[qid]['answers']
contexts = retrieval[qid]['contexts']
has_ans_idx = max_k # first index in contexts that has answers
for idx, ctx in enumerate(contexts):
if idx >= max_k:
break
if 'has_answer' in ctx:
if ctx['has_answer']:
has_ans_idx = idx
break
else:
text = ctx['text'].split('\n')[1] # [0] is title, [1] is text
if has_answers(text, answers, tokenizer, regex):
has_ans_idx = idx
break
for k in topk:
accuracy[k].append(0 if has_ans_idx >= k else 1)
metrics = {}
for k in topk:
metrics['Acc{}'.format(k)] = np.mean(accuracy[k])
return metrics | null |
183,665 | import os
import json
import argparse
import sys
import numpy as np
from tqdm import tqdm
from typing import Dict, Any
from logger_config import logger
from data_utils import load_query_answers, load_corpus, save_to_readable_format
args = parser.parse_args()
logger.info('Args: {}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4)))
assert args.task in ['nq', 'tq']
def _load_qid_to_positive(path: str) -> Dict[str, str]:
if args.task != 'nq':
logger.warning('Only NQ has manually labeled positives')
return {}
examples = json.load(open(path, 'r', encoding='utf-8'))
qid_to_pos_id = {}
for ex in examples:
positive_ctxs = ex['positive_ctxs']
if len(positive_ctxs) > 0:
qid_to_pos_id[ex['question']] = str(int(positive_ctxs[0]['passage_id']) - 1)
logger.info('Get {} manually labeled positives from {}'.format(len(qid_to_pos_id), path))
return qid_to_pos_id
logger = _setup_logger()
def _write_prepared_data_to_disk(out_path: str,
split: str,
queries: Dict[str, Dict[str, Any]],
preds_path: str):
qid_to_pos_id = _load_qid_to_positive(path='{}/biencoder-nq-{}.json'.format(args.out_dir, split))
cnt_filtered = 0
preds = json.load(open(preds_path, 'r', encoding='utf-8'))
with open(out_path, 'w', encoding='utf-8') as writer:
for query_id in tqdm(queries, mininterval=1, desc='prepare {} data'.format(split)):
cur_pred: dict = preds[query_id] if query_id in preds else preds[query_id.strip()]
positive_ids, negative_ids = [], []
manual_positive_id = qid_to_pos_id.get(query_id, None)
if manual_positive_id:
positive_ids.append(manual_positive_id)
for ctx in cur_pred['contexts'][:args.depth]:
doc_id = str(ctx['docid'])
if doc_id == manual_positive_id:
continue
elif ctx['has_answer']:
positive_ids.append(doc_id)
else:
negative_ids.append(doc_id)
if not positive_ids or not negative_ids:
cnt_filtered += 1
continue
np.random.shuffle(negative_ids)
negative_ids = negative_ids[:args.num_negatives]
doc_id_to_score = {str(ctx['docid']): float(ctx['score']) for ctx in cur_pred['contexts']}
doc_id_to_score[manual_positive_id] = 1000.
example = {
'query_id': query_id,
'query': queries[query_id]['query'],
'answers': queries[query_id]['answers'],
'positives': {'doc_id': positive_ids,
'score': [doc_id_to_score.get(doc_id, -1.) for doc_id in positive_ids]
},
'negatives': {'doc_id': negative_ids,
'score': [doc_id_to_score.get(doc_id, -1.) for doc_id in negative_ids]
},
}
writer.write(json.dumps(example, ensure_ascii=False, separators=(',', ':')))
writer.write('\n')
if cnt_filtered > 0:
logger.info('{} questions are filtered out'.format(cnt_filtered))
logger.info('Done write {} data to {}'.format(split, out_path)) | null |
183,666 | import os
import argparse
import json
import sys
from tqdm import tqdm
from typing import Dict, Any
from datasets import Dataset
from evaluate_dpr_retrieval import has_answers, SimpleTokenizer, evaluate_retrieval
from data_utils import load_query_answers, load_corpus
from utils import save_json_to_file
from logger_config import logger
def has_answers(text, answers, tokenizer, regex=False):
text = _normalize(text)
if regex:
for ans in answers:
ans = _normalize(ans)
if regex_match(text, ans):
return True
else:
text = tokenizer.tokenize(text).words(uncased=True)
for ans in answers:
ans = _normalize(ans)
ans = tokenizer.tokenize(ans).words(uncased=True)
for i in range(0, len(text) - len(ans) + 1):
if ans == text[i: i + len(ans)]:
return True
return False
def _map_func(example: Dict[str, Any]) -> dict:
question_id, doc_idx, score = example['question_id'], example['doc_idx'], example['score']
question = qas[question_id]['query']
answers = qas[question_id]['answers']
title, text = corpus[doc_idx]['title'], corpus[doc_idx]['contents']
ctx = '{}\n{}'.format(title, text)
answer_exist = has_answers(text, answers, tokenizer, args.regex)
example['question'] = question
example['answers'] = answers
example['docid'] = doc_idx
example['has_answer'] = answer_exist
if args.store_raw:
example['text'] = ctx
return example | null |
183,667 | import os
import cv2
import math
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from typing import Optional
from packaging import version
from collections import OrderedDict
from PIL import Image, ImageDraw, ImageFont
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import transformers
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
from termcolor import colored
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default='runwayml/stable-diffusion-v1-5',
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="sd-model-finetuned",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument(
"--seed",
type=int,
default=None,
help="A seed for reproducible training."
)
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--character_aware_loss_lambda",
type=float,
default=0.01,
help="Lambda for the character-aware loss",
)
parser.add_argument(
"--character_aware_loss_ckpt",
type=str,
default='ckpt/character_aware_loss_unet.pth',
help="The checkpoint for unet providing the charactere-aware loss."
)
parser.add_argument(
"--train_batch_size",
type=int,
default=16,
help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--num_train_epochs",
type=int,
default=2
)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--no_pos_con",
action="store_true",
default=False,
help="If it is activated, the position and the content of character are not avaible during training.",
)
parser.add_argument(
"--no_con",
action="store_true",
default=False,
help="If it is activated, the content of character is not avaible during training.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps",
type=int,
default=0,
help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--use_8bit_adam",
action="store_true",
help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument(
"--drop_caption",
action="store_true",
help="Whether or not to drop captions during training."
)
parser.add_argument(
"--dataset_name",
type=str,
default='MARIO-10M',
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--use_ema",
action="store_true",
help="Whether to use EMA model."
)
parser.add_argument(
"--segmentation_mask_aug",
action="store_true",
help="Whether to augment the segmentation masks (inspired by https://arxiv.org/abs/2211.13227)."
)
parser.add_argument(
"--non_ema_revision",
type=str,
default=None,
required=False,
help=(
"Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or"
" remote repository specified with --pretrained_model_name_or_path."
),
)
parser.add_argument(
"--image_column",
type=str,
default="image",
help="The column of the dataset containing an image."
)
parser.add_argument(
"--caption_column",
type=str,
default="text",
help="The column of the dataset containing a caption or a list of captions.",
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--mask_all_ratio",
type=float,
default=0.5,
help="The training ratio of two branches."
)
parser.add_argument(
"--adam_beta1",
type=float,
default=0.9,
help="The beta1 parameter for the Adam optimizer."
)
parser.add_argument(
"--adam_beta2",
type=float,
default=0.999,
help="The beta2 parameter for the Adam optimizer."
)
parser.add_argument(
"--adam_weight_decay",
type=float,
default=1e-2,
help="Weight decay to use."
)
parser.add_argument(
"--adam_epsilon",
type=float,
default=1e-08,
help="Epsilon value for the Adam optimizer"
)
parser.add_argument(
"--max_grad_norm",
default=1.0,
type=float,
help="Max gradient norm."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the model to the Hub."
)
parser.add_argument(
"--hub_token",
type=str,
default=None,
help="The token to use to push to the Model Hub."
)
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank"
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=5,
help=(
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
" for more docs"
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention",
action="store_true",
help="Whether or not to use xformers."
)
parser.add_argument(
"--noise_offset",
type=float,
default=0,
help="The scale of noise offset."
)
parser.add_argument(
"--dataset_path",
type=str,
default='/home/cjy/cjy/TextDiffusion/data/laion-ocr-unzip',
help="The path of dataset."
)
parser.add_argument(
"--train_dataset_index_file",
type=str,
default='/home/jingyechen/jingyechen/amlt_test/diffusers_combine/examples/text_to_image/train_dataset_index.txt',
help="The txt file that provides the index of training samples. The format of each line should be XXXXX_XXXXXXXXX."
)
parser.add_argument(
"--vis_num",
type=int,
default=16,
help="The number of images to be visualized during training."
)
parser.add_argument(
"--vis_interval",
type=int,
default=500,
help="The interval for visualization."
)
args = parser.parse_args()
print('***************')
print(args)
print('***************')
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
# default to using the same revision for the non-ema model if not specified
if args.non_ema_revision is None:
args.non_ema_revision = args.revision
return args | null |
183,668 | import os
import cv2
import math
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from typing import Optional
from packaging import version
from collections import OrderedDict
from PIL import Image, ImageDraw, ImageFont
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import transformers
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
from termcolor import colored
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}" | null |
183,669 | import os
import re
import cv2
import math
import shutil
import string
import textwrap
import numpy as np
from PIL import Image, ImageFont, ImageDraw, ImageOps
from typing import *
The provided code snippet includes necessary dependencies for implementing the `transform_mask` function. Write a Python function `def transform_mask(mask_root: str)` to solve the following problem:
This function extracts the mask area and text area from the images. Args: mask_root (str): The path of mask image. * The white area is the unmasked area * The gray area is the masked area * The white area is the text area
Here is the function:
def transform_mask(mask_root: str):
"""
This function extracts the mask area and text area from the images.
Args:
mask_root (str): The path of mask image.
* The white area is the unmasked area
* The gray area is the masked area
* The white area is the text area
"""
img = cv2.imread(mask_root)
img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_NEAREST)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY) # pixel value is set to 0 or 255 according to the threshold
return 1 - (binary.astype(np.float32) / 255) | This function extracts the mask area and text area from the images. Args: mask_root (str): The path of mask image. * The white area is the unmasked area * The gray area is the masked area * The white area is the text area |
183,670 | import os
import re
import cv2
import math
import shutil
import string
import textwrap
import numpy as np
from PIL import Image, ImageFont, ImageDraw, ImageOps
from typing import *
for index, c in enumerate(alphabet):
alphabet_dic[c] = index + 1 # the index 0 stands for non-character
The provided code snippet includes necessary dependencies for implementing the `combine_image` function. Write a Python function `def combine_image(args, sub_output_dir: str, pred_image_list: List, image_pil: Image, character_mask_pil: Image, character_mask_highlight_pil: Image, caption_pil_list: List)` to solve the following problem:
This function combines all the outputs and useful inputs together. Args: args (argparse.ArgumentParser): The arguments. pred_image_list (List): List of predicted images. image_pil (Image): The original image. character_mask_pil (Image): The character-level segmentation mask. character_mask_highlight_pil (Image): The character-level segmentation mask highlighting character regions with green color. caption_pil_list (List): List of captions.
Here is the function:
def combine_image(args, sub_output_dir: str, pred_image_list: List, image_pil: Image, character_mask_pil: Image, character_mask_highlight_pil: Image, caption_pil_list: List):
"""
This function combines all the outputs and useful inputs together.
Args:
args (argparse.ArgumentParser): The arguments.
pred_image_list (List): List of predicted images.
image_pil (Image): The original image.
character_mask_pil (Image): The character-level segmentation mask.
character_mask_highlight_pil (Image): The character-level segmentation mask highlighting character regions with green color.
caption_pil_list (List): List of captions.
"""
# # create a "latest" folder to store the results
# if os.path.exists(f'{args.output_dir}/latest'):
# shutil.rmtree(f'{args.output_dir}/latest')
# os.mkdir(f'{args.output_dir}/latest')
# save each predicted image
# os.makedirs(f'{args.output_dir}/{sub_output_dir}', exist_ok=True)
for index, img in enumerate(pred_image_list):
img.save(f'{args.output_dir}/{sub_output_dir}/{index}.jpg')
# img.save(f'{args.output_dir}/latest/{index}.jpg')
length = len(pred_image_list)
lines = math.ceil(length / 3)
blank = Image.new('RGB', (512*3, 512*(lines+1)+48*lines), (0,0,0))
blank.paste(image_pil,(0,0))
blank.paste(character_mask_pil,(512,0))
blank.paste(character_mask_highlight_pil,(512*2,0))
for i in range(length):
row, col = i // 3, i % 3
blank.paste(pred_image_list[i],(512*col,512*(row+1)+48*row))
blank.paste(caption_pil_list[i],(512*col,512*(row+1)+48*row+512))
blank.save(f'{args.output_dir}/{sub_output_dir}/combine.jpg')
# blank.save(f'{args.output_dir}/latest/combine.jpg')
return blank.convert('RGB') | This function combines all the outputs and useful inputs together. Args: args (argparse.ArgumentParser): The arguments. pred_image_list (List): List of predicted images. image_pil (Image): The original image. character_mask_pil (Image): The character-level segmentation mask. character_mask_highlight_pil (Image): The character-level segmentation mask highlighting character regions with green color. caption_pil_list (List): List of captions. |
183,671 | import os
import re
import cv2
import math
import shutil
import string
import textwrap
import numpy as np
from PIL import Image, ImageFont, ImageDraw, ImageOps
from typing import *
The provided code snippet includes necessary dependencies for implementing the `inpainting_merge_image` function. Write a Python function `def inpainting_merge_image(original_image, mask_image, inpainting_image)` to solve the following problem:
This function merges the original image, mask image and inpainting image. Args: original_image (PIL.Image): The original image. mask_image (PIL.Image): The mask images. inpainting_image (PIL.Image): The inpainting images.
Here is the function:
def inpainting_merge_image(original_image, mask_image, inpainting_image):
"""
This function merges the original image, mask image and inpainting image.
Args:
original_image (PIL.Image): The original image.
mask_image (PIL.Image): The mask images.
inpainting_image (PIL.Image): The inpainting images.
"""
original_image = original_image.resize((512, 512))
mask_image = mask_image.resize((512, 512))
inpainting_image = inpainting_image.resize((512, 512))
mask_image.convert('L')
threshold = 250
table = []
for i in range(256):
if i < threshold:
table.append(1)
else:
table.append(0)
mask_image = mask_image.point(table, "1")
merged_image = Image.composite(inpainting_image, original_image, mask_image)
return merged_image | This function merges the original image, mask image and inpainting image. Args: original_image (PIL.Image): The original image. mask_image (PIL.Image): The mask images. inpainting_image (PIL.Image): The inpainting images. |
183,672 | import os
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, transform_mask, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
import torchsnooper
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default='runwayml/stable-diffusion-v1-5', # no need to modify this
help="Path to pretrained model or model identifier from huggingface.co/models. Please do not modify this.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--mode",
type=str,
default=None,
required=True,
choices=["text-to-image", "text-to-image-with-template", "text-inpainting"],
help="Three modes can be used.",
)
parser.add_argument(
"--prompt",
type=str,
default="",
required=True,
help="The text prompts provided by users.",
)
parser.add_argument(
"--template_image",
type=str,
default="",
help="The template image should be given when using 【text-to-image-with-template】 mode.",
)
parser.add_argument(
"--original_image",
type=str,
default="",
help="The original image should be given when using 【text-inpainting】 mode.",
)
parser.add_argument(
"--text_mask",
type=str,
default="",
help="The text mask should be given when using 【text-inpainting】 mode.",
)
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument(
"--seed",
type=int,
default=None,
help="A seed for reproducible training."
)
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--classifier_free_scale",
type=float,
default=7.5, # following stable diffusion (https://github.com/CompVis/stable-diffusion)
help="Classifier free scale following https://arxiv.org/abs/2207.12598.",
)
parser.add_argument(
"--drop_caption",
action="store_true",
help="Whether to drop captions during training following https://arxiv.org/abs/2207.12598.."
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help="Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the model to the Hub."
)
parser.add_argument(
"--hub_token",
type=str,
default=None,
help="The token to use to push to the Model Hub."
)
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default='fp16',
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank"
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=5,
help=(
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
" for more docs"
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None, # should be specified during inference
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention",
action="store_true",
help="Whether or not to use xformers."
)
parser.add_argument(
"--font_path",
type=str,
default='assets/font/Arial.ttf',
help="The path of font for visualization."
)
parser.add_argument(
"--sample_steps",
type=int,
default=50, # following stable diffusion (https://github.com/CompVis/stable-diffusion)
help="Diffusion steps for sampling."
)
parser.add_argument(
"--vis_num",
type=int,
default=9, # please decreases the number if out-of-memory error occurs
help="Number of images to be sample. Please decrease it when encountering out of memory error."
)
parser.add_argument(
"--binarization",
action="store_true",
help="Whether to binarize the template image."
)
parser.add_argument(
"--use_pillow_segmentation_mask",
type=bool,
default=True,
help="In the 【text-to-image】 mode, please specify whether to use the segmentation masks provided by PILLOW"
)
parser.add_argument(
"--character_segmenter_path",
type=str,
default='textdiffuser-ckpt/text_segmenter.pth',
help="checkpoint of character-level segmenter"
)
args = parser.parse_args()
print(f'{colored("[√]", "green")} Arguments are loaded.')
print(args)
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args | null |
183,673 | import os
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, transform_mask, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
import torchsnooper
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}" | null |
183,674 | import json
import os
import traceback
from tqdm import tqdm
from multiprocessing import Pool
ROOT_TO = 'XXX'
MULTIPROCESSING_NUM = 64
def unzip_file(idx):
if not os.path.exists(f'{ROOT_FROM}/{idx}.zip') or os.path.exists(f'{ROOT_TO}/{idx}'):
return
cmd = f'unzip -q {ROOT_FROM}/{idx}.zip -d {ROOT_TO}'
os.system(cmd)
def multiprocess_unzip_file(idxs):
os.makedirs(ROOT_TO, exist_ok=True)
with Pool(processes=MULTIPROCESSING_NUM) as p:
with tqdm(total=len(idxs), desc='total') as pbar:
for i, _ in enumerate(p.imap_unordered(unzip_file, idxs)):
pbar.update()
print("multiprocess_unzip_file done!") | null |
183,675 | import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
The provided code snippet includes necessary dependencies for implementing the `betas_for_alpha_bar` function. Write a Python function `def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999)` to solve the following problem:
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
Here is the function:
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32) | Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs |
183,676 | import inspect
import os
import warnings
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
from packaging import version
from requests import HTTPError
from torch import Tensor, device
from .. import __version__
from ..utils import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
FLAX_WEIGHTS_NAME,
HF_HUB_OFFLINE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
is_accelerate_available,
is_safetensors_available,
is_torch_version,
logging,
)
def get_parameter_device(parameter: torch.nn.Module):
try:
return next(parameter.parameters()).device
except StopIteration:
# For torch.nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = parameter._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device | null |
183,677 | import inspect
import os
import warnings
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
from packaging import version
from requests import HTTPError
from torch import Tensor, device
from .. import __version__
from ..utils import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
FLAX_WEIGHTS_NAME,
HF_HUB_OFFLINE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
is_accelerate_available,
is_safetensors_available,
is_torch_version,
logging,
)
def get_parameter_dtype(parameter: torch.nn.Module):
try:
return next(parameter.parameters()).dtype
except StopIteration:
# For torch.nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = parameter._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype | null |
183,678 | import inspect
import os
import warnings
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
from packaging import version
from requests import HTTPError
from torch import Tensor, device
from .. import __version__
from ..utils import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
FLAX_WEIGHTS_NAME,
HF_HUB_OFFLINE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
is_accelerate_available,
is_safetensors_available,
is_torch_version,
logging,
)
def _add_variant(weights_name: str, variant: Optional[str] = None) -> str:
if variant is not None:
splits = weights_name.split(".")
splits = splits[:-1] + [variant] + splits[-1:]
weights_name = ".".join(splits)
return weights_name
The provided code snippet includes necessary dependencies for implementing the `load_state_dict` function. Write a Python function `def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[str] = None)` to solve the following problem:
Reads a checkpoint file, returning properly formatted errors if they arise.
Here is the function:
def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[str] = None):
"""
Reads a checkpoint file, returning properly formatted errors if they arise.
"""
try:
if os.path.basename(checkpoint_file) == _add_variant(WEIGHTS_NAME, variant):
return torch.load(checkpoint_file, map_location="cpu")
else:
return safetensors.torch.load_file(checkpoint_file, device="cpu")
except Exception as e:
try:
with open(checkpoint_file) as f:
if f.read().startswith("version"):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please install "
"git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
"you cloned."
)
else:
raise ValueError(
f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained "
"model. Make sure you have saved the model properly."
) from e
except (UnicodeDecodeError, ValueError):
raise OSError(
f"Unable to load weights from checkpoint file for '{checkpoint_file}' "
f"at '{checkpoint_file}'. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True."
) | Reads a checkpoint file, returning properly formatted errors if they arise. |
183,679 | import inspect
import os
import warnings
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
from packaging import version
from requests import HTTPError
from torch import Tensor, device
from .. import __version__
from ..utils import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
FLAX_WEIGHTS_NAME,
HF_HUB_OFFLINE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
is_accelerate_available,
is_safetensors_available,
is_torch_version,
logging,
)
def _load_state_dict_into_model(model_to_load, state_dict):
# Convert old format to new format if needed from a PyTorch state_dict
# copy state_dict so _load_from_state_dict can modify it
state_dict = state_dict.copy()
error_msgs = []
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: torch.nn.Module, prefix=""):
args = (state_dict, prefix, {}, True, [], [], error_msgs)
module._load_from_state_dict(*args)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(model_to_load)
return error_msgs | null |
183,680 | import inspect
import os
import warnings
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
from packaging import version
from requests import HTTPError
from torch import Tensor, device
from .. import __version__
from ..utils import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
FLAX_WEIGHTS_NAME,
HF_HUB_OFFLINE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
is_accelerate_available,
is_safetensors_available,
is_torch_version,
logging,
)
def _add_variant(weights_name: str, variant: Optional[str] = None) -> str:
if variant is not None:
splits = weights_name.split(".")
splits = splits[:-1] + [variant] + splits[-1:]
weights_name = ".".join(splits)
return weights_name
def _get_model_file(
pretrained_model_name_or_path,
*,
weights_name,
subfolder,
cache_dir,
force_download,
proxies,
resume_download,
local_files_only,
use_auth_token,
user_agent,
revision,
commit_hash=None,
):
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isfile(pretrained_model_name_or_path):
return pretrained_model_name_or_path
elif os.path.isdir(pretrained_model_name_or_path):
if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)):
# Load from a PyTorch checkpoint
model_file = os.path.join(pretrained_model_name_or_path, weights_name)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(pretrained_model_name_or_path, subfolder, weights_name)
):
model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name)
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}."
)
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__version__).base_version) >= version.parse("0.17.0")
):
try:
model_file = hf_hub_download(
pretrained_model_name_or_path,
filename=_add_variant(weights_name, revision),
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
subfolder=subfolder,
revision=revision or commit_hash,
)
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.",
FutureWarning,
)
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.",
FutureWarning,
)
try:
# 2. Load model file as usual
model_file = hf_hub_download(
pretrained_model_name_or_path,
filename=weights_name,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
subfolder=subfolder,
revision=revision or commit_hash,
)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"this model name. Check the model page at "
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
)
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}."
)
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}"
)
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'."
)
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}"
) | null |
183,681 | import os
import re
import zipfile
if not os.path.exists('textdiffuser-ckpt'):
os.system('wget https://huggingface.co/datasets/JingyeChen22/TextDiffuser/resolve/main/textdiffuser-ckpt-new.zip')
with zipfile.ZipFile('textdiffuser-ckpt-new.zip', 'r') as zip_ref:
zip_ref.extractall('.')
if not os.path.exists('images'):
os.system('wget https://huggingface.co/datasets/JingyeChen22/TextDiffuser/resolve/main/images.zip')
with zipfile.ZipFile('images.zip', 'r') as zip_ref:
zip_ref.extractall('.')
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, combine_image_gradio, transform_mask, transform_mask_pil, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
args = parse_args()
print(f'{colored("[√]", "green")} Logging dir is set to {logging_dir}.')
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
print(f'{colored("[√]", "green")} Text segmenter is successfully loaded.')
import gradio as gr
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default='runwayml/stable-diffusion-v1-5', # no need to modify this
help="Path to pretrained model or model identifier from huggingface.co/models. Please do not modify this.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--mode",
type=str,
default="text-to-image",
# required=True,
choices=["text-to-image", "text-to-image-with-template", "text-inpainting"],
help="Three modes can be used.",
)
parser.add_argument(
"--prompt",
type=str,
default="",
# required=True,
help="The text prompts provided by users.",
)
parser.add_argument(
"--template_image",
type=str,
default="",
help="The template image should be given when using 【text-to-image-with-template】 mode.",
)
parser.add_argument(
"--original_image",
type=str,
default="",
help="The original image should be given when using 【text-inpainting】 mode.",
)
parser.add_argument(
"--text_mask",
type=str,
default="",
help="The text mask should be given when using 【text-inpainting】 mode.",
)
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument(
"--seed",
type=int,
default=None,
help="A seed for reproducible training."
)
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--classifier_free_scale",
type=float,
default=7.5, # following stable diffusion (https://github.com/CompVis/stable-diffusion)
help="Classifier free scale following https://arxiv.org/abs/2207.12598.",
)
parser.add_argument(
"--drop_caption",
action="store_true",
help="Whether to drop captions during training following https://arxiv.org/abs/2207.12598.."
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help="Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the model to the Hub."
)
parser.add_argument(
"--hub_token",
type=str,
default=None,
help="The token to use to push to the Model Hub."
)
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default='fp16',
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank"
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=5,
help=(
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
" for more docs"
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default='textdiffuser-ckpt/diffusion_backbone', # should be specified during inference
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention",
action="store_true",
help="Whether or not to use xformers."
)
parser.add_argument(
"--font_path",
type=str,
default='Arial.ttf',
help="The path of font for visualization."
)
parser.add_argument(
"--sample_steps",
type=int,
default=50, # following stable diffusion (https://github.com/CompVis/stable-diffusion)
help="Diffusion steps for sampling."
)
parser.add_argument(
"--vis_num",
type=int,
default=4, # please decreases the number if out-of-memory error occurs
help="Number of images to be sample. Please decrease it when encountering out of memory error."
)
parser.add_argument(
"--binarization",
action="store_true",
help="Whether to binarize the template image."
)
parser.add_argument(
"--use_pillow_segmentation_mask",
type=bool,
default=True,
help="In the 【text-to-image】 mode, please specify whether to use the segmentation masks provided by PILLOW"
)
parser.add_argument(
"--character_segmenter_path",
type=str,
default='textdiffuser-ckpt/text_segmenter.pth',
help="checkpoint of character-level segmenter"
)
args = parser.parse_args()
print(f'{colored("[√]", "green")} Arguments are loaded.')
print(args)
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args | null |
183,682 | import os
import re
import zipfile
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, combine_image_gradio, transform_mask, transform_mask_pil, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
import gradio as gr
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}" | null |
183,683 | import os
import re
import zipfile
if not os.path.exists('textdiffuser-ckpt'):
os.system('wget https://huggingface.co/datasets/JingyeChen22/TextDiffuser/resolve/main/textdiffuser-ckpt-new.zip')
with zipfile.ZipFile('textdiffuser-ckpt-new.zip', 'r') as zip_ref:
zip_ref.extractall('.')
if not os.path.exists('images'):
os.system('wget https://huggingface.co/datasets/JingyeChen22/TextDiffuser/resolve/main/images.zip')
with zipfile.ZipFile('images.zip', 'r') as zip_ref:
zip_ref.extractall('.')
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, combine_image_gradio, transform_mask, transform_mask_pil, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
import gradio as gr
def save_model_hook(models, weights, output_dir):
for i, model in enumerate(models):
model.save_pretrained(os.path.join(output_dir, "unet"))
# make sure to pop weight so that corresponding model is not saved again
weights.pop() | null |
183,684 | import os
import re
import zipfile
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, combine_image_gradio, transform_mask, transform_mask_pil, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
import gradio as gr
def load_model_hook(models, input_dir):
for i in range(len(models)):
# pop models so that they are not loaded again
model = models.pop()
# load diffusers style into model
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
model.register_to_config(**load_model.config)
model.load_state_dict(load_model.state_dict())
del load_model | null |
183,685 | import os
import re
import zipfile
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, combine_image_gradio, transform_mask, transform_mask_pil, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
args = parse_args()
print(f'{colored("[√]", "green")} Logging dir is set to {logging_dir}.')
tokenizer = CLIPTokenizer.from_pretrained(
args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
)
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision).cuda()
unet = UNet2DConditionModel.from_pretrained(
args.resume_from_checkpoint, subfolder="unet", revision=None
).cuda()
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
segmenter = UNet(3, 96, True).cuda()
segmenter = torch.nn.DataParallel(segmenter)
segmenter.load_state_dict(torch.load(args.character_segmenter_path))
segmenter.eval()
print(f'{colored("[√]", "green")} Text segmenter is successfully loaded.')
import gradio as gr
def segmentation_mask_visualization(font_path: str, segmentation_mask: np.array):
"""
This function visualizes the segmentaiton masks with characters.
Args:
font_path (str): The path of font. We recommand to use Arial.ttf
segmentation_mask (np.array): The character-level segmentation mask.
"""
segmentation_mask = cv2.resize(segmentation_mask, (64, 64), interpolation=cv2.INTER_NEAREST)
font = ImageFont.truetype(font_path, 8)
blank = Image.new('RGB', (512,512), (0,0,0))
d = ImageDraw.Draw(blank)
for i in range(64):
for j in range(64):
if int(segmentation_mask[i][j]) == 0 or int(segmentation_mask[i][j])-1 >= len(alphabet):
continue
else:
d.text((j*8, i*8), alphabet[int(segmentation_mask[i][j])-1], font=font, fill=(0, 255, 0))
return blank
def make_caption_pil(font_path: str, captions: List[str]):
"""
This function converts captions into pil images.
Args:
font_path (str): The path of font. We recommand to use Arial.ttf
captions (List[str]): List of captions.
"""
caption_pil_list = []
font = ImageFont.truetype(font_path, 18)
for caption in captions:
border_size = 2
img = Image.new('RGB', (512-4,48-4), (255,255,255))
img = ImageOps.expand(img, border=(border_size, border_size, border_size, border_size), fill=(127, 127, 127))
draw = ImageDraw.Draw(img)
border_size = 2
text = caption
lines = textwrap.wrap(text, width=40)
x, y = 4, 4
line_height = font.getsize('A')[1] + 4
start = 0
for line in lines:
draw.text((x, y+start), line, font=font, fill=(200, 127, 0))
y += line_height
caption_pil_list.append(img)
return caption_pil_list
def filter_segmentation_mask(segmentation_mask: np.array):
"""
This function removes some noisy predictions of segmentation masks.
Args:
segmentation_mask (np.array): The character-level segmentation mask.
"""
segmentation_mask[segmentation_mask==alphabet_dic['-']] = 0
segmentation_mask[segmentation_mask==alphabet_dic[' ']] = 0
return segmentation_mask
def combine_image_gradio(args, sub_output_dir: str, pred_image_list: List, image_pil: Image, character_mask_pil: Image, character_mask_highlight_pil: Image, caption_pil_list: List):
"""
This function combines all the outputs and useful inputs together.
Args:
args (argparse.ArgumentParser): The arguments.
pred_image_list (List): List of predicted images.
image_pil (Image): The original image.
character_mask_pil (Image): The character-level segmentation mask.
character_mask_highlight_pil (Image): The character-level segmentation mask highlighting character regions with green color.
caption_pil_list (List): List of captions.
"""
size = len(pred_image_list)
if size == 1:
return pred_image_list[0]
elif size == 2:
blank = Image.new('RGB', (512*2, 512), (0,0,0))
blank.paste(pred_image_list[0],(0,0))
blank.paste(pred_image_list[1],(512,0))
elif size == 3:
blank = Image.new('RGB', (512*3, 512), (0,0,0))
blank.paste(pred_image_list[0],(0,0))
blank.paste(pred_image_list[1],(512,0))
blank.paste(pred_image_list[2],(1024,0))
elif size == 4:
blank = Image.new('RGB', (512*2, 512*2), (0,0,0))
blank.paste(pred_image_list[0],(0,0))
blank.paste(pred_image_list[1],(512,0))
blank.paste(pred_image_list[2],(0,512))
blank.paste(pred_image_list[3],(512,512))
return blank
def get_layout_from_prompt(args):
# prompt = args.prompt
font_path = args.font_path
keywords = get_key_words(args.prompt)
print(f'{colored("[!]", "red")} Detected keywords: {keywords} from prompt {args.prompt}')
text_embedding, mask = text_encoder(args.prompt) # (1, 77 768) / (1, 77)
# process all relevant info
caption, length_list, width_list, target, words, state_list, word_match_list, boxes, boxes_length = process_caption(font_path, args.prompt, keywords)
target = target.cuda().unsqueeze(0) # (77, 5)
width_list = width_list.cuda().unsqueeze(0) # (77, )
length_list = length_list.cuda().unsqueeze(0) # (77, )
state_list = state_list.cuda().unsqueeze(0) # (77, )
word_match_list = word_match_list.cuda().unsqueeze(0) # (77, )
padding = torch.zeros(1, 1, 4).cuda()
boxes = boxes.unsqueeze(0).cuda()
right_shifted_boxes = torch.cat([padding, boxes[:,0:-1,:]],1) # (1, 8, 4)
# inference
return_boxes= []
with torch.no_grad():
for box_index in range(boxes_length):
if box_index == 0:
encoder_embedding = None
output, encoder_embedding = model(text_embedding, length_list, width_list, mask, state_list, word_match_list, target, right_shifted_boxes, train=False, encoder_embedding=encoder_embedding)
output = torch.clamp(output, min=0, max=1) # (1, 8, 4)
# add overlap detection
output = adjust_overlap_box(output, box_index) # (1, 8, 4)
right_shifted_boxes[:,box_index+1,:] = output[:,box_index,:]
xmin, ymin, xmax, ymax = output[0, box_index, :].tolist()
return_boxes.append([xmin, ymin, xmax, ymax])
# print the location of keywords
print(f'index\tkeyword\tx_min\ty_min\tx_max\ty_max')
for index, keyword in enumerate(keywords):
x_min = int(return_boxes[index][0] * 512)
y_min = int(return_boxes[index][1] * 512)
x_max = int(return_boxes[index][2] * 512)
y_max = int(return_boxes[index][3] * 512)
print(f'{index}\t{keyword}\t{x_min}\t{y_min}\t{x_max}\t{y_max}')
# paint the layout
render_image = Image.new('RGB', (512, 512), (255, 255, 255))
draw = ImageDraw.Draw(render_image)
segmentation_mask = Image.new("L", (512,512), 0)
segmentation_mask_draw = ImageDraw.Draw(segmentation_mask)
for index, box in enumerate(return_boxes):
box = [int(i*512) for i in box]
xmin, ymin, xmax, ymax = box
width = xmax - xmin
height = ymax - ymin
text = keywords[index]
font_size = adjust_font_size(args, width, height, draw, text)
font = ImageFont.truetype(args.font_path, font_size)
# draw.rectangle([xmin, ymin, xmax,ymax], outline=(255,0,0))
draw.text((xmin, ymin), text, font=font, fill=(0, 0, 0))
boxes = []
for i, char in enumerate(text):
# paint character-level segmentation masks
# https://github.com/python-pillow/Pillow/issues/3921
bottom_1 = font.getsize(text[i])[1]
right, bottom_2 = font.getsize(text[:i+1])
bottom = bottom_1 if bottom_1 < bottom_2 else bottom_2
width, height = font.getmask(char).size
right += xmin
bottom += ymin
top = bottom - height
left = right - width
char_box = (left, top, right, bottom)
boxes.append(char_box)
char_index = alphabet_dic[char]
segmentation_mask_draw.rectangle(shrink_box(char_box, scale_factor = 0.9), fill=char_index)
print(f'{colored("[√]", "green")} Layout is successfully generated')
return render_image, segmentation_mask
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = 2 if bilinear else 1
self.down4 = Down(512, 1024 // factor)
self.up1 = Up(1024, 512 // factor, bilinear)
self.up2 = Up(512, 256 // factor, bilinear)
self.up3 = Up(256, 128 // factor, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
# logits = torch.sigmoid(logits)
return logits
def text_to_image(prompt,slider_step,slider_guidance,slider_batch):
prompt = prompt.replace('"', "'")
prompt = re.sub(r"[^a-zA-Z0-9'\" ]+", "", prompt)
if slider_step>=100:
slider_step = 100
args.prompt = prompt
sample_num = slider_batch
seed = random.randint(0, 10000000)
set_seed(seed)
scheduler.set_timesteps(slider_step)
noise = torch.randn((sample_num, 4, 64, 64)).to("cuda") # (b, 4, 64, 64)
input = noise # (b, 4, 64, 64)
captions = [args.prompt] * sample_num
captions_nocond = [""] * sample_num
print(f'{colored("[√]", "green")} Prompt is loaded: {args.prompt}.')
# encode text prompts
inputs = tokenizer(
captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
).input_ids # (b, 77)
encoder_hidden_states = text_encoder(inputs)[0].cuda() # (b, 77, 768)
print(f'{colored("[√]", "green")} encoder_hidden_states: {encoder_hidden_states.shape}.')
inputs_nocond = tokenizer(
captions_nocond, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
).input_ids # (b, 77)
encoder_hidden_states_nocond = text_encoder(inputs_nocond)[0].cuda() # (b, 77, 768)
print(f'{colored("[√]", "green")} encoder_hidden_states_nocond: {encoder_hidden_states_nocond.shape}.')
# load character-level segmenter
segmenter = UNet(3, 96, True).cuda()
segmenter = torch.nn.DataParallel(segmenter)
segmenter.load_state_dict(torch.load(args.character_segmenter_path))
segmenter.eval()
print(f'{colored("[√]", "green")} Text segmenter is successfully loaded.')
#### text-to-image ####
render_image, segmentation_mask_from_pillow = get_layout_from_prompt(args)
segmentation_mask = torch.Tensor(np.array(segmentation_mask_from_pillow)).cuda() # (512, 512)
segmentation_mask = filter_segmentation_mask(segmentation_mask)
segmentation_mask = torch.nn.functional.interpolate(segmentation_mask.unsqueeze(0).unsqueeze(0).float(), size=(256, 256), mode='nearest')
segmentation_mask = segmentation_mask.squeeze(1).repeat(sample_num, 1, 1).long().to('cuda') # (1, 1, 256, 256)
print(f'{colored("[√]", "green")} character-level segmentation_mask: {segmentation_mask.shape}.')
feature_mask = torch.ones(sample_num, 1, 64, 64).to('cuda') # (b, 1, 64, 64)
masked_image = torch.zeros(sample_num, 3, 512, 512).to('cuda') # (b, 3, 512, 512)
masked_feature = vae.encode(masked_image).latent_dist.sample() # (b, 4, 64, 64)
masked_feature = masked_feature * vae.config.scaling_factor
print(f'{colored("[√]", "green")} feature_mask: {feature_mask.shape}.')
print(f'{colored("[√]", "green")} masked_feature: {masked_feature.shape}.')
# diffusion process
intermediate_images = []
for t in tqdm(scheduler.timesteps):
with torch.no_grad():
noise_pred_cond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states, segmentation_mask=segmentation_mask, feature_mask=feature_mask, masked_feature=masked_feature).sample # b, 4, 64, 64
noise_pred_uncond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states_nocond, segmentation_mask=segmentation_mask, feature_mask=feature_mask, masked_feature=masked_feature).sample # b, 4, 64, 64
noisy_residual = noise_pred_uncond + slider_guidance * (noise_pred_cond - noise_pred_uncond) # b, 4, 64, 64
prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
input = prev_noisy_sample
intermediate_images.append(prev_noisy_sample)
# decode and visualization
input = 1 / vae.config.scaling_factor * input
sample_images = vae.decode(input.float(), return_dict=False)[0] # (b, 3, 512, 512)
image_pil = render_image.resize((512,512))
segmentation_mask = segmentation_mask[0].squeeze().cpu().numpy()
character_mask_pil = Image.fromarray(((segmentation_mask!=0)*255).astype('uint8')).resize((512,512))
character_mask_highlight_pil = segmentation_mask_visualization(args.font_path,segmentation_mask)
caption_pil = make_caption_pil(args.font_path, captions)
# save pred_img
pred_image_list = []
for image in sample_images.float():
image = (image / 2 + 0.5).clamp(0, 1).unsqueeze(0)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = Image.fromarray((image * 255).round().astype("uint8")).convert('RGB')
pred_image_list.append(image)
blank_pil = combine_image_gradio(args, None, pred_image_list, image_pil, character_mask_pil, character_mask_highlight_pil, caption_pil)
intermediate_result = Image.new('RGB', (512*3, 512))
intermediate_result.paste(image_pil, (0, 0))
intermediate_result.paste(character_mask_pil, (512, 0))
intermediate_result.paste(character_mask_highlight_pil, (512*2, 0))
return blank_pil, intermediate_result | null |
183,686 | import os
import re
import zipfile
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, combine_image_gradio, transform_mask, transform_mask_pil, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
args = parse_args()
print(f'{colored("[√]", "green")} Logging dir is set to {logging_dir}.')
tokenizer = CLIPTokenizer.from_pretrained(
args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
)
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision).cuda()
unet = UNet2DConditionModel.from_pretrained(
args.resume_from_checkpoint, subfolder="unet", revision=None
).cuda()
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
def to_tensor(image):
if isinstance(image, Image.Image):
image = np.array(image)
elif not isinstance(image, np.ndarray):
raise TypeError("Error")
image = image.astype(np.float32) / 255.0
image = np.transpose(image, (2, 0, 1))
tensor = torch.from_numpy(image)
return tensor
segmenter = UNet(3, 96, True).cuda()
segmenter = torch.nn.DataParallel(segmenter)
segmenter.load_state_dict(torch.load(args.character_segmenter_path))
segmenter.eval()
print(f'{colored("[√]", "green")} Text segmenter is successfully loaded.')
import gradio as gr
def segmentation_mask_visualization(font_path: str, segmentation_mask: np.array):
"""
This function visualizes the segmentaiton masks with characters.
Args:
font_path (str): The path of font. We recommand to use Arial.ttf
segmentation_mask (np.array): The character-level segmentation mask.
"""
segmentation_mask = cv2.resize(segmentation_mask, (64, 64), interpolation=cv2.INTER_NEAREST)
font = ImageFont.truetype(font_path, 8)
blank = Image.new('RGB', (512,512), (0,0,0))
d = ImageDraw.Draw(blank)
for i in range(64):
for j in range(64):
if int(segmentation_mask[i][j]) == 0 or int(segmentation_mask[i][j])-1 >= len(alphabet):
continue
else:
d.text((j*8, i*8), alphabet[int(segmentation_mask[i][j])-1], font=font, fill=(0, 255, 0))
return blank
def make_caption_pil(font_path: str, captions: List[str]):
"""
This function converts captions into pil images.
Args:
font_path (str): The path of font. We recommand to use Arial.ttf
captions (List[str]): List of captions.
"""
caption_pil_list = []
font = ImageFont.truetype(font_path, 18)
for caption in captions:
border_size = 2
img = Image.new('RGB', (512-4,48-4), (255,255,255))
img = ImageOps.expand(img, border=(border_size, border_size, border_size, border_size), fill=(127, 127, 127))
draw = ImageDraw.Draw(img)
border_size = 2
text = caption
lines = textwrap.wrap(text, width=40)
x, y = 4, 4
line_height = font.getsize('A')[1] + 4
start = 0
for line in lines:
draw.text((x, y+start), line, font=font, fill=(200, 127, 0))
y += line_height
caption_pil_list.append(img)
return caption_pil_list
def filter_segmentation_mask(segmentation_mask: np.array):
"""
This function removes some noisy predictions of segmentation masks.
Args:
segmentation_mask (np.array): The character-level segmentation mask.
"""
segmentation_mask[segmentation_mask==alphabet_dic['-']] = 0
segmentation_mask[segmentation_mask==alphabet_dic[' ']] = 0
return segmentation_mask
def combine_image_gradio(args, sub_output_dir: str, pred_image_list: List, image_pil: Image, character_mask_pil: Image, character_mask_highlight_pil: Image, caption_pil_list: List):
"""
This function combines all the outputs and useful inputs together.
Args:
args (argparse.ArgumentParser): The arguments.
pred_image_list (List): List of predicted images.
image_pil (Image): The original image.
character_mask_pil (Image): The character-level segmentation mask.
character_mask_highlight_pil (Image): The character-level segmentation mask highlighting character regions with green color.
caption_pil_list (List): List of captions.
"""
size = len(pred_image_list)
if size == 1:
return pred_image_list[0]
elif size == 2:
blank = Image.new('RGB', (512*2, 512), (0,0,0))
blank.paste(pred_image_list[0],(0,0))
blank.paste(pred_image_list[1],(512,0))
elif size == 3:
blank = Image.new('RGB', (512*3, 512), (0,0,0))
blank.paste(pred_image_list[0],(0,0))
blank.paste(pred_image_list[1],(512,0))
blank.paste(pred_image_list[2],(1024,0))
elif size == 4:
blank = Image.new('RGB', (512*2, 512*2), (0,0,0))
blank.paste(pred_image_list[0],(0,0))
blank.paste(pred_image_list[1],(512,0))
blank.paste(pred_image_list[2],(0,512))
blank.paste(pred_image_list[3],(512,512))
return blank
def text_to_image_with_template(prompt,template_image,slider_step,slider_guidance,slider_batch, binary):
if slider_step>=100:
slider_step = 100
orig_template_image = template_image.resize((512,512)).convert('RGB')
args.prompt = prompt
sample_num = slider_batch
# If passed along, set the training seed now.
# seed = slider_seed
seed = random.randint(0, 10000000)
set_seed(seed)
scheduler.set_timesteps(slider_step)
noise = torch.randn((sample_num, 4, 64, 64)).to("cuda") # (b, 4, 64, 64)
input = noise # (b, 4, 64, 64)
captions = [args.prompt] * sample_num
captions_nocond = [""] * sample_num
print(f'{colored("[√]", "green")} Prompt is loaded: {args.prompt}.')
# encode text prompts
inputs = tokenizer(
captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
).input_ids # (b, 77)
encoder_hidden_states = text_encoder(inputs)[0].cuda() # (b, 77, 768)
print(f'{colored("[√]", "green")} encoder_hidden_states: {encoder_hidden_states.shape}.')
inputs_nocond = tokenizer(
captions_nocond, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
).input_ids # (b, 77)
encoder_hidden_states_nocond = text_encoder(inputs_nocond)[0].cuda() # (b, 77, 768)
print(f'{colored("[√]", "green")} encoder_hidden_states_nocond: {encoder_hidden_states_nocond.shape}.')
#### text-to-image-with-template ####
template_image = template_image.resize((256,256)).convert('RGB')
# whether binarization is needed
print(f'{colored("[Warning]", "red")} args.binarization is set to {binary}. You may need it when using handwritten images as templates.')
if binary:
gray = ImageOps.grayscale(template_image)
binary = gray.point(lambda x: 255 if x > 96 else 0, '1')
template_image = binary.convert('RGB')
# to_tensor = transforms.ToTensor()
image_tensor = to_tensor(template_image).unsqueeze(0).cuda().sub_(0.5).div_(0.5) # (b, 3, 256, 256)
with torch.no_grad():
segmentation_mask = segmenter(image_tensor) # (b, 96, 256, 256)
segmentation_mask = segmentation_mask.max(1)[1].squeeze(0) # (256, 256)
segmentation_mask = filter_segmentation_mask(segmentation_mask) # (256, 256)
segmentation_mask = torch.nn.functional.interpolate(segmentation_mask.unsqueeze(0).unsqueeze(0).float(), size=(256, 256), mode='nearest') # (b, 1, 256, 256)
segmentation_mask = segmentation_mask.squeeze(1).repeat(sample_num, 1, 1).long().to('cuda') # (b, 1, 256, 256)
print(f'{colored("[√]", "green")} Character-level segmentation_mask: {segmentation_mask.shape}.')
feature_mask = torch.ones(sample_num, 1, 64, 64).to('cuda') # (b, 1, 64, 64)
masked_image = torch.zeros(sample_num, 3, 512, 512).to('cuda') # (b, 3, 512, 512)
masked_feature = vae.encode(masked_image).latent_dist.sample() # (b, 4, 64, 64)
masked_feature = masked_feature * vae.config.scaling_factor # (b, 4, 64, 64)
# diffusion process
intermediate_images = []
for t in tqdm(scheduler.timesteps):
with torch.no_grad():
noise_pred_cond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states, segmentation_mask=segmentation_mask, feature_mask=feature_mask, masked_feature=masked_feature).sample # b, 4, 64, 64
noise_pred_uncond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states_nocond, segmentation_mask=segmentation_mask, feature_mask=feature_mask, masked_feature=masked_feature).sample # b, 4, 64, 64
noisy_residual = noise_pred_uncond + slider_guidance * (noise_pred_cond - noise_pred_uncond) # b, 4, 64, 64
prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
input = prev_noisy_sample
intermediate_images.append(prev_noisy_sample)
# decode and visualization
input = 1 / vae.config.scaling_factor * input
sample_images = vae.decode(input.float(), return_dict=False)[0] # (b, 3, 512, 512)
image_pil = None
segmentation_mask = segmentation_mask[0].squeeze().cpu().numpy()
character_mask_pil = Image.fromarray(((segmentation_mask!=0)*255).astype('uint8')).resize((512,512))
character_mask_highlight_pil = segmentation_mask_visualization(args.font_path,segmentation_mask)
caption_pil = make_caption_pil(args.font_path, captions)
# save pred_img
pred_image_list = []
for image in sample_images.float():
image = (image / 2 + 0.5).clamp(0, 1).unsqueeze(0)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = Image.fromarray((image * 255).round().astype("uint8")).convert('RGB')
pred_image_list.append(image)
blank_pil = combine_image_gradio(args, None, pred_image_list, image_pil, character_mask_pil, character_mask_highlight_pil, caption_pil)
intermediate_result = Image.new('RGB', (512*3, 512))
intermediate_result.paste(orig_template_image, (0, 0))
intermediate_result.paste(character_mask_pil, (512, 0))
intermediate_result.paste(character_mask_highlight_pil, (512*2, 0))
return blank_pil, intermediate_result | null |
183,687 | import os
import re
import zipfile
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, combine_image_gradio, transform_mask, transform_mask_pil, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
args = parse_args()
print(f'{colored("[√]", "green")} Logging dir is set to {logging_dir}.')
tokenizer = CLIPTokenizer.from_pretrained(
args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
)
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision).cuda()
unet = UNet2DConditionModel.from_pretrained(
args.resume_from_checkpoint, subfolder="unet", revision=None
).cuda()
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
def to_tensor(image):
if isinstance(image, Image.Image):
image = np.array(image)
elif not isinstance(image, np.ndarray):
raise TypeError("Error")
image = image.astype(np.float32) / 255.0
image = np.transpose(image, (2, 0, 1))
tensor = torch.from_numpy(image)
return tensor
segmenter = UNet(3, 96, True).cuda()
segmenter = torch.nn.DataParallel(segmenter)
segmenter.load_state_dict(torch.load(args.character_segmenter_path))
segmenter.eval()
print(f'{colored("[√]", "green")} Text segmenter is successfully loaded.')
import gradio as gr
def transform_mask_pil(mask_root):
"""
This function extracts the mask area and text area from the images.
Args:
mask_root (str): The path of mask image.
* The white area is the unmasked area
* The gray area is the masked area
* The white area is the text area
"""
img = np.array(mask_root)
img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_NEAREST)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY) # pixel value is set to 0 or 255 according to the threshold
return 1 - (binary.astype(np.float32) / 255)
def segmentation_mask_visualization(font_path: str, segmentation_mask: np.array):
"""
This function visualizes the segmentaiton masks with characters.
Args:
font_path (str): The path of font. We recommand to use Arial.ttf
segmentation_mask (np.array): The character-level segmentation mask.
"""
segmentation_mask = cv2.resize(segmentation_mask, (64, 64), interpolation=cv2.INTER_NEAREST)
font = ImageFont.truetype(font_path, 8)
blank = Image.new('RGB', (512,512), (0,0,0))
d = ImageDraw.Draw(blank)
for i in range(64):
for j in range(64):
if int(segmentation_mask[i][j]) == 0 or int(segmentation_mask[i][j])-1 >= len(alphabet):
continue
else:
d.text((j*8, i*8), alphabet[int(segmentation_mask[i][j])-1], font=font, fill=(0, 255, 0))
return blank
def make_caption_pil(font_path: str, captions: List[str]):
"""
This function converts captions into pil images.
Args:
font_path (str): The path of font. We recommand to use Arial.ttf
captions (List[str]): List of captions.
"""
caption_pil_list = []
font = ImageFont.truetype(font_path, 18)
for caption in captions:
border_size = 2
img = Image.new('RGB', (512-4,48-4), (255,255,255))
img = ImageOps.expand(img, border=(border_size, border_size, border_size, border_size), fill=(127, 127, 127))
draw = ImageDraw.Draw(img)
border_size = 2
text = caption
lines = textwrap.wrap(text, width=40)
x, y = 4, 4
line_height = font.getsize('A')[1] + 4
start = 0
for line in lines:
draw.text((x, y+start), line, font=font, fill=(200, 127, 0))
y += line_height
caption_pil_list.append(img)
return caption_pil_list
def filter_segmentation_mask(segmentation_mask: np.array):
"""
This function removes some noisy predictions of segmentation masks.
Args:
segmentation_mask (np.array): The character-level segmentation mask.
"""
segmentation_mask[segmentation_mask==alphabet_dic['-']] = 0
segmentation_mask[segmentation_mask==alphabet_dic[' ']] = 0
return segmentation_mask
def combine_image_gradio(args, sub_output_dir: str, pred_image_list: List, image_pil: Image, character_mask_pil: Image, character_mask_highlight_pil: Image, caption_pil_list: List):
"""
This function combines all the outputs and useful inputs together.
Args:
args (argparse.ArgumentParser): The arguments.
pred_image_list (List): List of predicted images.
image_pil (Image): The original image.
character_mask_pil (Image): The character-level segmentation mask.
character_mask_highlight_pil (Image): The character-level segmentation mask highlighting character regions with green color.
caption_pil_list (List): List of captions.
"""
size = len(pred_image_list)
if size == 1:
return pred_image_list[0]
elif size == 2:
blank = Image.new('RGB', (512*2, 512), (0,0,0))
blank.paste(pred_image_list[0],(0,0))
blank.paste(pred_image_list[1],(512,0))
elif size == 3:
blank = Image.new('RGB', (512*3, 512), (0,0,0))
blank.paste(pred_image_list[0],(0,0))
blank.paste(pred_image_list[1],(512,0))
blank.paste(pred_image_list[2],(1024,0))
elif size == 4:
blank = Image.new('RGB', (512*2, 512*2), (0,0,0))
blank.paste(pred_image_list[0],(0,0))
blank.paste(pred_image_list[1],(512,0))
blank.paste(pred_image_list[2],(0,512))
blank.paste(pred_image_list[3],(512,512))
return blank
def text_inpainting(prompt,orig_image,mask_image,slider_step,slider_guidance,slider_batch):
if slider_step>=100:
slider_step = 100
args.prompt = prompt
sample_num = slider_batch
# If passed along, set the training seed now.
# seed = slider_seed
seed = random.randint(0, 10000000)
set_seed(seed)
scheduler.set_timesteps(slider_step)
noise = torch.randn((sample_num, 4, 64, 64)).to("cuda") # (b, 4, 64, 64)
input = noise # (b, 4, 64, 64)
captions = [args.prompt] * sample_num
captions_nocond = [""] * sample_num
print(f'{colored("[√]", "green")} Prompt is loaded: {args.prompt}.')
# encode text prompts
inputs = tokenizer(
captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
).input_ids # (b, 77)
encoder_hidden_states = text_encoder(inputs)[0].cuda() # (b, 77, 768)
print(f'{colored("[√]", "green")} encoder_hidden_states: {encoder_hidden_states.shape}.')
inputs_nocond = tokenizer(
captions_nocond, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
).input_ids # (b, 77)
encoder_hidden_states_nocond = text_encoder(inputs_nocond)[0].cuda() # (b, 77, 768)
print(f'{colored("[√]", "green")} encoder_hidden_states_nocond: {encoder_hidden_states_nocond.shape}.')
mask_image = cv2.resize(mask_image, (512,512))
# mask_image = mask_image.resize((512,512)).convert('RGB')
text_mask = np.array(mask_image)
threshold = 128
_, text_mask = cv2.threshold(text_mask, threshold, 255, cv2.THRESH_BINARY)
text_mask = Image.fromarray(text_mask).convert('RGB').resize((256,256))
text_mask.save('text_mask.png')
text_mask_tensor = to_tensor(text_mask).unsqueeze(0).cuda().sub_(0.5).div_(0.5)
with torch.no_grad():
segmentation_mask = segmenter(text_mask_tensor)
segmentation_mask = segmentation_mask.max(1)[1].squeeze(0)
segmentation_mask = filter_segmentation_mask(segmentation_mask)
segmentation_mask = torch.nn.functional.interpolate(segmentation_mask.unsqueeze(0).unsqueeze(0).float(), size=(256, 256), mode='nearest')
image_mask = transform_mask_pil(mask_image)
image_mask = torch.from_numpy(image_mask).cuda().unsqueeze(0).unsqueeze(0)
orig_image = orig_image.convert('RGB').resize((512,512))
image = orig_image
image_tensor = to_tensor(image).unsqueeze(0).cuda().sub_(0.5).div_(0.5)
masked_image = image_tensor * (1-image_mask)
masked_feature = vae.encode(masked_image).latent_dist.sample().repeat(sample_num, 1, 1, 1)
masked_feature = masked_feature * vae.config.scaling_factor
image_mask = torch.nn.functional.interpolate(image_mask, size=(256, 256), mode='nearest').repeat(sample_num, 1, 1, 1)
segmentation_mask = segmentation_mask * image_mask
feature_mask = torch.nn.functional.interpolate(image_mask, size=(64, 64), mode='nearest')
# diffusion process
intermediate_images = []
for t in tqdm(scheduler.timesteps):
with torch.no_grad():
noise_pred_cond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states, segmentation_mask=segmentation_mask, feature_mask=feature_mask, masked_feature=masked_feature).sample # b, 4, 64, 64
noise_pred_uncond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states_nocond, segmentation_mask=segmentation_mask, feature_mask=feature_mask, masked_feature=masked_feature).sample # b, 4, 64, 64
noisy_residual = noise_pred_uncond + slider_guidance * (noise_pred_cond - noise_pred_uncond) # b, 4, 64, 64
prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
input = prev_noisy_sample
intermediate_images.append(prev_noisy_sample)
# decode and visualization
input = 1 / vae.config.scaling_factor * input
sample_images = vae.decode(input.float(), return_dict=False)[0] # (b, 3, 512, 512)
image_pil = None
segmentation_mask = segmentation_mask[0].squeeze().cpu().numpy()
character_mask_pil = Image.fromarray(((segmentation_mask!=0)*255).astype('uint8')).resize((512,512))
character_mask_highlight_pil = segmentation_mask_visualization(args.font_path,segmentation_mask)
caption_pil = make_caption_pil(args.font_path, captions)
# save pred_img
pred_image_list = []
for image in sample_images.float():
image = (image / 2 + 0.5).clamp(0, 1).unsqueeze(0)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = Image.fromarray((image * 255).round().astype("uint8")).convert('RGB')
# need to merge
# image = inpainting_merge_image(orig_image, Image.fromarray(mask_image).convert('L'), image)
pred_image_list.append(image)
character_mask_pil.save('character_mask_pil.png')
character_mask_highlight_pil.save('character_mask_highlight_pil.png')
blank_pil = combine_image_gradio(args, None, pred_image_list, image_pil, character_mask_pil, character_mask_highlight_pil, caption_pil)
background = orig_image.resize((512, 512))
alpha = Image.new('L', background.size, int(255 * 0.2))
background.putalpha(alpha)
# foreground
foreground = Image.fromarray(mask_image).convert('L').resize((512, 512))
threshold = 200
alpha = foreground.point(lambda x: 0 if x > threshold else 255, '1')
foreground.putalpha(alpha)
merge_image = Image.alpha_composite(foreground.convert('RGBA'), background.convert('RGBA')).convert('RGB')
intermediate_result = Image.new('RGB', (512*3, 512))
intermediate_result.paste(merge_image, (0, 0))
intermediate_result.paste(character_mask_pil, (512, 0))
intermediate_result.paste(character_mask_highlight_pil, (512*2, 0))
return blank_pil, intermediate_result | null |
183,688 | import os
import cv2
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from termcolor import colored
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import datasets
from datasets import load_dataset
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
from util import segmentation_mask_visualization, make_caption_pil, combine_image, transform_mask, filter_segmentation_mask, inpainting_merge_image
from model.layout_generator import get_layout_from_prompt
from model.text_segmenter.unet import UNet
import torchsnooper
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default='runwayml/stable-diffusion-v1-5', # no need to modify this
help="Path to pretrained model or model identifier from huggingface.co/models. Please do not modify this.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--mode",
type=str,
default=None,
required=True,
choices=["text-to-image", "text-to-image-with-template", "text-inpainting"],
help="Three modes can be used.",
)
parser.add_argument(
"--prompt",
type=str,
default="",
required=False,
help="The text prompts provided by users.",
)
parser.add_argument(
"--prompt_list",
type=str,
default="",
required=True,
help="The list of prompts.",
)
parser.add_argument(
"--template_image",
type=str,
default="",
help="The template image should be given when using 【text-to-image-with-template】 mode.",
)
parser.add_argument(
"--original_image",
type=str,
default="",
help="The original image should be given when using 【text-inpainting】 mode.",
)
parser.add_argument(
"--text_mask",
type=str,
default="",
help="The text mask should be given when using 【text-inpainting】 mode.",
)
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="The path of the generation directory.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument(
"--seed",
type=int,
default=0, # set to 0 during evaluation
help="A seed for reproducible training."
)
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--classifier_free_scale",
type=float,
default=7.5, # following stable diffusion (https://github.com/CompVis/stable-diffusion)
help="Classifier free scale following https://arxiv.org/abs/2207.12598.",
)
parser.add_argument(
"--drop_caption",
action="store_true",
help="Whether to drop captions during training following https://arxiv.org/abs/2207.12598.."
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help="Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the model to the Hub."
)
parser.add_argument(
"--hub_token",
type=str,
default=None,
help="The token to use to push to the Model Hub."
)
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default='fp16',
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank"
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=5,
help=(
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
" for more docs"
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None, # should be specified during inference
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention",
action="store_true",
help="Whether or not to use xformers."
)
parser.add_argument(
"--font_path",
type=str,
default='Arial.ttf',
help="The path of font for visualization."
)
parser.add_argument(
"--sample_steps",
type=int,
default=50, # following stable diffusion (https://github.com/CompVis/stable-diffusion)
help="Diffusion steps for sampling."
)
parser.add_argument(
"--vis_num",
type=int,
default=9, # please decreases the number if out-of-memory error occurs
help="Number of images to be sample. Please decrease it when encountering out of memory error."
)
parser.add_argument(
"--binarization",
action="store_true",
help="Whether to binarize the template image."
)
parser.add_argument(
"--use_pillow_segmentation_mask",
type=bool,
default=True,
help="In the 【text-to-image】 mode, please specify whether to use the segmentation masks provided by PILLOW"
)
parser.add_argument(
"--character_segmenter_path",
type=str,
default='textdiffuser-ckpt/text_segmenter.pth',
help="checkpoint of character-level segmenter"
)
args = parser.parse_args()
print(f'{colored("[√]", "green")} Arguments are loaded.')
print(args)
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args | null |
183,690 | import os
from PIL import Image
import numpy as np
import torch
from tqdm import tqdm
import argparse
import cv2
import torchvision.transforms as transforms
def load_stablediffusion():
def test_stablediffusion(prompt, save_path, num_images_per_prompt=4,
pipe=None, generator=None):
def load_deepfloyd_if():
def test_deepfloyd_if(stage_1, stage_2, stage_3, prompt, save_path, num_images_per_prompt=4, generator=None):
def load_controlnet_cannyedge():
def test_controlnet_cannyedge(prompt, save_path, canny_path, num_images_per_prompt=4,
pipe=None, generator=None, low_threshold=100, high_threshold=200):
def MARIOEval_generate_results(root, dataset, method='controlnet', num_images_per_prompt=4, split=0, total_split=1):
root_eval = os.path.join(root, "MARIOEval")
render_path = os.path.join(root_eval, dataset, 'render')
root_res = os.path.join(root, "generation", method)
for idx in range(num_images_per_prompt):
os.makedirs(os.path.join(root_res, dataset, 'images_' + str(idx)), exist_ok=True)
generator = torch.Generator(device="cuda").manual_seed(0)
if method == 'controlnet':
pipe = load_controlnet_cannyedge()
elif method == 'stablediffusion':
pipe = load_stablediffusion()
elif method == 'deepfloyd':
stage_1, stage_2, stage_3 = load_deepfloyd_if()
with open(os.path.join(root_eval, dataset, dataset + '.txt'), 'r') as fr:
prompts = fr.readlines()
prompts = [_.strip() for _ in prompts]
for idx, prompt in tqdm(enumerate(prompts)):
if idx < split * len(prompts) / total_split or idx > (split + 1) * len(prompts) / total_split:
continue
if method == 'controlnet':
test_controlnet_cannyedge(prompt=prompt, num_images_per_prompt=num_images_per_prompt,
save_path=os.path.join(root_res, dataset, 'images', str(idx) + '.jpg'),
canny_path=os.path.join(render_path, str(idx) + '.png'),
pipe=pipe, generator=generator)
elif method == 'stablediffusion':
test_stablediffusion(prompt=prompt, num_images_per_prompt=num_images_per_prompt,
save_path=os.path.join(root_res, dataset, 'images', str(idx) + '.jpg'),
pipe=pipe, generator=generator)
elif method == 'deepfloyd':
test_deepfloyd_if(stage_1, stage_2, stage_3, num_images_per_prompt=num_images_per_prompt,
save_path=os.path.join(root_res, dataset, 'images', str(idx) + '.jpg'),
prompt=prompt, generator=generator) | null |
183,691 | import os
from PIL import Image
import numpy as np
import torch
from tqdm import tqdm
import argparse
import cv2
import torchvision.transforms as transforms
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--dataset",
type=str,
default='TMDBEval500',
required=False,
choices=['TMDBEval500', 'OpenLibraryEval500', 'LAIONEval4000',
'ChineseDrawText', 'DrawBenchText', 'DrawTextCreative']
)
parser.add_argument(
"--root",
type=str,
default="/path/to/eval",
required=True,
)
parser.add_argument(
"--method",
type=str,
default='controlnet',
required=False,
choices=['controlnet', 'deepfloyd', 'stablediffusion', 'textdiffuser']
)
parser.add_argument(
"--gpu",
type=int,
default=0,
required=False,
)
parser.add_argument(
"--split",
type=int,
default=0,
required=False,
)
parser.add_argument(
"--total_split",
type=int,
default=1,
required=False,
)
args = parser.parse_args()
return args | null |
183,692 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
def _inception_v3(*args, **kwargs):
"""Wraps `torchvision.models.inception_v3`"""
try:
version = tuple(map(int, torchvision.__version__.split('.')[:2]))
except ValueError:
# Just a caution against weird version strings
version = (0,)
# Skips default weight inititialization if supported by torchvision
# version. See https://github.com/mseitzer/pytorch-fid/issues/28.
if version >= (0, 6):
kwargs['init_weights'] = False
# Backwards compatibility: `weights` argument was handled by `pretrained`
# argument prior to version 0.13.
if version < (0, 13) and 'weights' in kwargs:
if kwargs['weights'] == 'DEFAULT':
kwargs['pretrained'] = True
elif kwargs['weights'] is None:
kwargs['pretrained'] = False
else:
raise ValueError(
'weights=={} not supported in torchvision {}'.format(
kwargs['weights'], torchvision.__version__
)
)
del kwargs['weights']
return torchvision.models.inception_v3(*args, **kwargs)
class FIDInceptionA(torchvision.models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(torchvision.models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(torchvision.models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(torchvision.models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
The provided code snippet includes necessary dependencies for implementing the `fid_inception_v3` function. Write a Python function `def fid_inception_v3()` to solve the following problem:
Build pretrained Inception model for FID computation The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model.
Here is the function:
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = _inception_v3(num_classes=1008,
aux_logits=False,
weights=None)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
return inception | Build pretrained Inception model for FID computation The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model. |
183,693 | import json
import os
import numpy as np
import argparse
from clipscore import cal_clipscore
from fid_score import calculate_fid_given_paths
def eval_clipscore(root_eval, root_res, dataset, device="cuda:0", num_images_per_prompt=4):
with open(os.path.join(root_eval, dataset, dataset + '.txt'), 'r') as fr:
text_list = fr.readlines()
text_list = [_.strip() for _ in text_list]
clip_scores = []
scores = []
for seed in range(num_images_per_prompt):
if 'stablediffusion' in root_res:
format = '.png'
else:
format = '.jpg'
image_list = [os.path.join(root_res, dataset, 'images_' + str(seed),
str(idx) + '_' + str(seed) + format) for idx in range(len(text_list))]
image_ids = [str(idx) + '_' + str(seed) + format for idx in range(len(text_list))]
score = cal_clipscore(image_ids=image_ids, image_paths=image_list, text_list=text_list, device=device)
clip_score = np.mean([s['CLIPScore'] for s in score.values()])
clip_scores.append(clip_score)
scores.append(score)
print("clip_score:", np.mean(clip_scores), clip_scores)
return np.mean(clip_scores), scores
def calculate_fid_given_paths(paths, batch_size=50, device="cuda:0", dims=2048, num_workers=1):
"""Calculates the FID of two paths"""
for p in paths:
if type(p) is list:
for subp in p:
if not os.path.exists(subp):
raise RuntimeError('Invalid path: %s' % subp)
else:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
m1, s1 = compute_statistics_of_path(paths[0], model, batch_size,
dims, device, num_workers)
m2, s2 = compute_statistics_of_path(paths[1], model, batch_size,
dims, device, num_workers)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
def MARIOEval_evaluate_results(root, datasets_with_images, datasets, methods, gpu,
eval_clipscore_flag=True, eval_fid_flag=True, num_images_per_prompt=4):
root_eval = os.path.join(root, "MARIOEval")
method_res = {}
device = "cuda:" + str(gpu)
for method_idx, method in enumerate(methods):
if method_idx != gpu: # running in different gpus simultaneously to save time
continue
print("\nmethod:", method)
dataset_res = {}
root_res = os.path.join(root, 'generation', method)
for dataset in datasets:
print("dataset:", dataset)
dataset_res[dataset] = {}
if eval_clipscore_flag:
dataset_res[dataset]['clipscore'], dataset_res[dataset]['scores'] =\
eval_clipscore(root_eval, root_res, dataset, device, num_images_per_prompt)
if eval_fid_flag and dataset in datasets_with_images:
gt_path = os.path.join(root_eval, dataset, 'images')
fids = []
for idx in range(num_images_per_prompt):
gen_path = os.path.join(root_res, dataset, 'images_' + str(idx))
fids.append(calculate_fid_given_paths(paths=[gt_path, gen_path]))
print("fid:", np.mean(fids), fids)
dataset_res[dataset]['fid'] = np.mean(fids)
if eval_clipscore_flag:
method_clipscores = []
for seed in range(num_images_per_prompt):
clipscore_list = []
for dataset in dataset_res.keys():
clipscore_list += [_['CLIPScore'] for _ in dataset_res[dataset]['scores'][seed].values()]
method_clipscores.append(np.mean(clipscore_list))
method_clipscore = np.mean(method_clipscores)
dataset_res['clipscore'] = method_clipscore
if eval_fid_flag:
method_fids = []
for idx in range(num_images_per_prompt):
gt_paths = []
gen_paths = []
for dataset in dataset_res.keys():
if dataset in datasets_with_images:
gt_paths.append(os.path.join(root_eval, dataset, 'images'))
gen_paths.append(os.path.join(root_res, dataset, 'images_' + str(idx)))
if len(gt_paths):
method_fids.append(calculate_fid_given_paths(paths=[gt_paths, gen_paths]))
print("fid:", np.mean(method_fids), method_fids)
method_fid = np.mean(method_fids)
dataset_res['fid'] = method_fid
method_res[method] = dataset_res
with open(os.path.join(root_res, 'eval.json'), 'w') as fw:
json.dump(dataset_res, fw)
print(method_res)
with open(os.path.join(root, 'generation', 'eval.json'), 'w') as fw:
json.dump(method_res, fw) | null |
183,694 | import json
import os
import numpy as np
import argparse
from clipscore import cal_clipscore
from fid_score import calculate_fid_given_paths
def merge_eval_results(root, methods):
method_res = {}
for method_idx, method in enumerate(methods):
root_res = os.path.join(root, 'generation', method)
with open(os.path.join(root_res, 'eval.json'), 'r') as fr:
dataset_res = json.load(fr)
for k, v in dataset_res.items():
if type(v) is dict:
del v['scores'] # too long
method_res[method] = dataset_res
with open(os.path.join(root, 'generation', 'eval.json'), 'w') as fw:
json.dump(method_res, fw) | null |
183,695 | import json
import os
import numpy as np
import argparse
from clipscore import cal_clipscore
from fid_score import calculate_fid_given_paths
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--dataset",
type=str,
default='TMDBEval500',
required=False,
choices=['TMDBEval500', 'OpenLibraryEval500', 'LAIONEval4000',
'ChineseDrawText', 'DrawBenchText', 'DrawTextCreative']
)
parser.add_argument(
"--root",
type=str,
default="/path/to/data/TextDiffuser/evaluation/",
required=True,
)
parser.add_argument(
"--method",
type=str,
default='controlnet',
required=False,
choices=['controlnet', 'deepfloyd', 'stablediffusion', 'textdiffuser']
)
parser.add_argument(
"--gpu",
type=int,
default=0,
required=False,
)
parser.add_argument(
"--split",
type=int,
default=0,
required=False,
)
parser.add_argument(
"--total_split",
type=int,
default=1,
required=False,
)
args = parser.parse_args()
return args | null |
183,696 | import os
import re
import copy
def get_key_words(text: str):
words = []
text = text
matches = re.findall(r"'(.*?)'", text) # find the keywords enclosed by ''
if matches:
for match in matches:
words.extend(match.split())
return words | null |
183,697 | import os
import re
import copy
def get_p_r_acc(method, pred, gt):
pred = [p.strip().lower() for p in pred]
gt = [g.strip().lower() for g in gt]
pred_orig = copy.deepcopy(pred)
gt_orig = copy.deepcopy(gt)
pred_length = len(pred)
gt_length = len(gt)
for p in pred:
if p in gt_orig:
pred_orig.remove(p)
gt_orig.remove(p)
p = (pred_length - len(pred_orig)) / (pred_length + 1e-8)
r = (gt_length - len(gt_orig)) / (gt_length + 1e-8)
pred_sorted = sorted(pred)
gt_sorted = sorted(gt)
if ''.join(pred_sorted) == ''.join(gt_sorted):
acc = 1
else:
acc = 0
return p, r, acc | null |
183,698 | import os
import pathlib
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import numpy as np
import torch
import torchvision.transforms as TF
from PIL import Image
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from pytorch_fid.inception import InceptionV3
def compute_statistics_of_path(path, model, batch_size, dims, device,
num_workers=1):
if type(path) is not list and path.endswith('.npz'):
with np.load(path) as f:
m, s = f['mu'][:], f['sigma'][:]
else:
if type(path) is list:
files = []
for p in path:
p = pathlib.Path(p)
files += sorted([file for ext in IMAGE_EXTENSIONS for file in p.glob('*.{}'.format(ext))])
files = sorted(files)
else:
path = pathlib.Path(path)
files = sorted([file for ext in IMAGE_EXTENSIONS for file in path.glob('*.{}'.format(ext))])
m, s = calculate_activation_statistics(files, model, batch_size, dims, device, num_workers)
return m, s
The provided code snippet includes necessary dependencies for implementing the `save_fid_stats` function. Write a Python function `def save_fid_stats(paths, batch_size, device, dims, num_workers=1)` to solve the following problem:
Calculates the FID of two paths
Here is the function:
def save_fid_stats(paths, batch_size, device, dims, num_workers=1):
"""Calculates the FID of two paths"""
if not os.path.exists(paths[0]):
raise RuntimeError('Invalid path: %s' % paths[0])
if os.path.exists(paths[1]):
raise RuntimeError('Existing output file: %s' % paths[1])
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
print(f"Saving statistics for {paths[0]}")
m1, s1 = compute_statistics_of_path(paths[0], model, batch_size,
dims, device, num_workers)
np.savez_compressed(paths[1], mu=m1, sigma=s1) | Calculates the FID of two paths |
183,699 | import argparse
import clip
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
import torch
import tqdm
import numpy as np
import sklearn.preprocessing
import collections
import os
import pathlib
import json
import warnings
from packaging import version
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.spice.spice import Spice
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'candidates_json',
type=str,
help='Candidates json mapping from image_id --> candidate.')
parser.add_argument(
'image_dir',
type=str,
help='Directory of images, with the filenames as image ids.')
parser.add_argument(
'--references_json',
default=None,
help='Optional references json mapping from image_id --> [list of references]')
parser.add_argument(
'--compute_other_ref_metrics',
default=1,
type=int,
help='If references is specified, should we compute standard reference-based metrics?')
parser.add_argument(
'--save_per_instance',
default=None,
help='if set, we will save per instance clipscores to this file')
args = parser.parse_args()
if isinstance(args.save_per_instance, str) and not args.save_per_instance.endswith('.json'):
print('if you\'re saving per-instance, please make sure the filepath ends in json.')
quit()
return args | null |
183,700 | import copy
import json
import logging
import os
import re
from multiprocessing import Pool
import torch
from lxml import html
from torch.utils.data import TensorDataset
from tqdm import tqdm
from transformers import DataProcessor
def get_text(node):
textnodes = node.xpath(".//text()")
s = "".join([text for text in textnodes])
return re.sub(r"\s+", " ", s).strip() | null |
183,701 | import copy
import json
import logging
import os
import re
from multiprocessing import Pool
import torch
from lxml import html
from torch.utils.data import TensorDataset
from tqdm import tqdm
from transformers import DataProcessor
def get_prop(node, name):
title = node.get("title")
props = title.split(";")
for prop in props:
(key, args) = prop.split(None, 1)
args = args.strip('"')
if key == name:
return args
return None | null |
183,702 | import copy
import json
import logging
import os
import re
from multiprocessing import Pool
import torch
from lxml import html
from torch.utils.data import TensorDataset
from tqdm import tqdm
from transformers import DataProcessor
logger = logging.getLogger(__name__)
class CdipProcessor(DataProcessor):
"""Processor for the CDIP data set."""
def worker(self, line):
file, label = line.split()
text, bbox = self.read_hocr_file(self.data_dir, file)
return [text, bbox, label]
def get_examples(self, data_dir, mode):
self.data_dir = data_dir
with open(os.path.join(data_dir, "labels", "{}.txt".format(mode))) as f:
lines = f.readlines()
examples = []
with tqdm(lines, desc="Gettting {} examples".format(mode)) as t, Pool(24) as p:
for example in p.imap(self.worker, lines):
examples.append(example)
t.update()
return self._create_examples(examples, mode)
def _get_examples(self, data_dir, mode):
with open(os.path.join(data_dir, "labels", "{}.txt".format(mode))) as f:
lines = []
for line in tqdm(f.readlines(), desc="Gettting {} examples".format(mode)):
file, label = line.split()
text, bbox = self.read_hocr_file(data_dir, file)
lines.append([text, bbox, label])
return self._create_examples(lines, mode)
def read_hocr_file(self, data_dir, file):
hocr_file = os.path.join(data_dir, "images", file[:-4] + ".xml")
text_buffer = []
bbox_buffer = []
try:
doc = html.parse(hocr_file)
except AssertionError:
logger.warning(
"%s is empty or its format is unacceptable. Skipped.", hocr_file
)
return [], []
for page in doc.xpath("//*[@class='ocr_page']"):
page_bbox = [int(x) for x in get_prop(page, "bbox").split()]
width, height = page_bbox[2], page_bbox[3]
for word in doc.xpath("//*[@class='ocrx_word']"):
textnodes = word.xpath(".//text()")
s = "".join([text for text in textnodes])
text = re.sub(r"\s+", " ", s).strip()
if text:
text_buffer.append(text)
bbox = [int(x) for x in get_prop(word, "bbox").split()]
bbox = [
bbox[0] / width,
bbox[1] / height,
bbox[2] / width,
bbox[3] / height,
]
bbox = [int(x * 1000) for x in bbox]
bbox_buffer.append(bbox)
return text_buffer, bbox_buffer
def get_labels(self):
return list(map(str, list(range(16))))
def _create_examples(self, lines, mode):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (mode, i)
text = line[0]
bbox = line[1]
label = line[2]
examples.append(
DocExample(guid=guid, text_a=text, text_b=None, bbox=bbox, label=label)
)
return examples
def convert_examples_to_features(
examples,
tokenizer,
max_length=512,
label_list=None,
pad_on_left=False,
pad_token="[PAD]",
pad_token_id=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
):
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(tqdm(examples)):
tokens = []
bboxes = []
if len(example.text_a) == 0:
bboxes.append([0, 0, 0, 0])
tokens.append(pad_token)
for token, bbox in zip(example.text_a, example.bbox):
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
bboxes.append(bbox)
tokens.append(sub_token)
tokens = tokens[: max_length - 2]
bboxes = bboxes[: max_length - 2]
bboxes = [[0, 0, 0, 0]] + bboxes + [[1000, 1000, 1000, 1000]]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = [tokenizer.cls_token_id] + input_ids + [tokenizer.sep_token_id]
token_type_ids = [0] * len(input_ids)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token_id] * padding_length) + input_ids
bboxes = ([[0, 0, 0, 0]] * padding_length) + bboxes
attention_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token_id] * padding_length)
bboxes = bboxes + ([[0, 0, 0, 0]] * padding_length)
attention_mask = attention_mask + (
[0 if mask_padding_with_zero else 1] * padding_length
)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(
len(input_ids), max_length
)
assert len(bboxes) == max_length, "Error with input length {} vs {}".format(
len(bboxes), max_length
)
assert (
len(attention_mask) == max_length
), "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert (
len(token_type_ids) == max_length
), "Error with input length {} vs {}".format(len(token_type_ids), max_length)
label = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_ids: %s" % " ".join([str(x) for x in bboxes]))
logger.info(
"attention_mask: %s" % " ".join([str(x) for x in attention_mask])
)
logger.info(
"token_type_ids: %s" % " ".join([str(x) for x in token_type_ids])
)
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
DocFeature(
input_ids=input_ids,
bboxes=bboxes,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label,
)
)
return features
def load_and_cache_examples(args, tokenizer, mode="train"):
if args.local_rank not in [-1, 0] and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = CdipProcessor()
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
examples = processor.get_examples(args.data_dir, mode)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.pad_token,
pad_token_id=tokenizer.pad_token_id,
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_bboxes = torch.tensor([f.bboxes for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long
)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long
)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_bboxes
)
return dataset | null |
183,703 | import logging
import os
import torch
from torch.utils.data import Dataset
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode))
image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f, open(
box_file_path, encoding="utf-8"
) as fb, open(image_file_path, encoding="utf-8") as fi:
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
for line, bline, iline in zip(f, fb, fi):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(
InputExample(
guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
guid_index += 1
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
isplits = iline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 2
assert len(isplits) == 4
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-1].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
actual_bbox = [int(b) for b in isplits[1].split()]
actual_bboxes.append(actual_bbox)
page_size = [int(i) for i in isplits[2].split()]
file_name = isplits[3].strip()
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return examples | null |
183,704 | import logging
import os
import torch
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
input_ids,
input_mask,
segment_ids,
label_ids,
boxes,
actual_bboxes,
file_name,
page_size,
):
assert (
0 <= all(boxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(
boxes
)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_segment_id=0, pad_token_label_id=-1, sequence_a_segment_id=0, mask_padding_with_zero=True, )` to solve the following problem:
Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
Here is the function:
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
file_name = example.file_name
page_size = example.page_size
width, height = page_size
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
token_boxes = []
actual_bboxes = []
label_ids = []
for word, label, box, actual_bbox in zip(
example.words, example.labels, example.boxes, example.actual_bboxes
):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
actual_bboxes.extend([actual_bbox] * len(word_tokens))
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend(
[label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)
)
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]
actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
token_boxes += [cls_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
token_boxes = [cls_token_box] + token_boxes
actual_bboxes = [[0, 0, width, height]] + actual_bboxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
token_boxes = ([pad_token_box] * padding_length) + token_boxes
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
token_boxes += [pad_token_box] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
logger.info("boxes: %s", " ".join([str(x) for x in token_boxes]))
logger.info("actual_bboxes: %s", " ".join([str(x) for x in actual_bboxes]))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
boxes=token_boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return features | Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) |
183,705 | import argparse
import json
import os
from PIL import Image
from transformers import AutoTokenizer
def bbox_string(box, width, length):
def actual_bbox_string(box, width, length):
def convert(args):
with open(
os.path.join(args.output_dir, args.data_split + ".txt.tmp"),
"w",
encoding="utf8",
) as fw, open(
os.path.join(args.output_dir, args.data_split + "_box.txt.tmp"),
"w",
encoding="utf8",
) as fbw, open(
os.path.join(args.output_dir, args.data_split + "_image.txt.tmp"),
"w",
encoding="utf8",
) as fiw:
for file in os.listdir(args.data_dir):
file_path = os.path.join(args.data_dir, file)
with open(file_path, "r", encoding="utf8") as f:
data = json.load(f)
image_path = file_path.replace("annotations", "images")
image_path = image_path.replace("json", "png")
file_name = os.path.basename(image_path)
image = Image.open(image_path)
width, length = image.size
for item in data["form"]:
words, label = item["words"], item["label"]
words = [w for w in words if w["text"].strip() != ""]
if len(words) == 0:
continue
if label == "other":
for w in words:
fw.write(w["text"] + "\tO\n")
fbw.write(
w["text"]
+ "\t"
+ bbox_string(w["box"], width, length)
+ "\n"
)
fiw.write(
w["text"]
+ "\t"
+ actual_bbox_string(w["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
else:
if len(words) == 1:
fw.write(words[0]["text"] + "\tS-" + label.upper() + "\n")
fbw.write(
words[0]["text"]
+ "\t"
+ bbox_string(words[0]["box"], width, length)
+ "\n"
)
fiw.write(
words[0]["text"]
+ "\t"
+ actual_bbox_string(words[0]["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
else:
fw.write(words[0]["text"] + "\tB-" + label.upper() + "\n")
fbw.write(
words[0]["text"]
+ "\t"
+ bbox_string(words[0]["box"], width, length)
+ "\n"
)
fiw.write(
words[0]["text"]
+ "\t"
+ actual_bbox_string(words[0]["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
for w in words[1:-1]:
fw.write(w["text"] + "\tI-" + label.upper() + "\n")
fbw.write(
w["text"]
+ "\t"
+ bbox_string(w["box"], width, length)
+ "\n"
)
fiw.write(
w["text"]
+ "\t"
+ actual_bbox_string(w["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
fw.write(words[-1]["text"] + "\tE-" + label.upper() + "\n")
fbw.write(
words[-1]["text"]
+ "\t"
+ bbox_string(words[-1]["box"], width, length)
+ "\n"
)
fiw.write(
words[-1]["text"]
+ "\t"
+ actual_bbox_string(words[-1]["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
fw.write("\n")
fbw.write("\n")
fiw.write("\n") | null |
183,706 | import argparse
import json
import os
from PIL import Image
from transformers import AutoTokenizer
def seg_file(file_path, tokenizer, max_len):
subword_len_counter = 0
output_path = file_path[:-4]
with open(file_path, "r", encoding="utf8") as f_p, open(
output_path, "w", encoding="utf8"
) as fw_p:
for line in f_p:
line = line.rstrip()
if not line:
fw_p.write(line + "\n")
subword_len_counter = 0
continue
token = line.split("\t")[0]
current_subwords_len = len(tokenizer.tokenize(token))
# Token contains strange control characters like \x96 or \x95
# Just filter out the complete line
if current_subwords_len == 0:
continue
if (subword_len_counter + current_subwords_len) > max_len:
fw_p.write("\n" + line + "\n")
subword_len_counter = current_subwords_len
continue
subword_len_counter += current_subwords_len
fw_p.write(line + "\n")
def seg(args):
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, do_lower_case=True
)
seg_file(
os.path.join(args.output_dir, args.data_split + ".txt.tmp"),
tokenizer,
args.max_len,
)
seg_file(
os.path.join(args.output_dir, args.data_split + "_box.txt.tmp"),
tokenizer,
args.max_len,
)
seg_file(
os.path.join(args.output_dir, args.data_split + "_image.txt.tmp"),
tokenizer,
args.max_len,
) | null |
183,707 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import shutil
import numpy as np
import torch
from seqeval.metrics import (
classification_report,
f1_score,
precision_score,
recall_score,
)
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from layoutlm import FunsdDataset, LayoutlmConfig, LayoutlmForTokenClassification
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels | null |
183,708 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import shutil
import numpy as np
import torch
from seqeval.metrics import (
classification_report,
f1_score,
precision_score,
recall_score,
)
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from layoutlm import FunsdDataset, LayoutlmConfig, LayoutlmForTokenClassification
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def collate_fn(data):
batch = [i for i in zip(*data)]
for i in range(len(batch)):
if i < len(batch) - 2:
batch[i] = torch.stack(batch[i], 0)
return tuple(batch)
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size,
collate_fn=None,
)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
with torch.no_grad():
inputs = {
"input_ids": batch[0].to(args.device),
"attention_mask": batch[1].to(args.device),
"labels": batch[3].to(args.device),
}
if args.model_type in ["layoutlm"]:
inputs["bbox"] = batch[4].to(args.device)
inputs["token_type_ids"] = (
batch[2].to(args.device)
if args.model_type in ["bert", "layoutlm"]
else None
) # RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = (
tmp_eval_loss.mean()
) # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0
)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
report = classification_report(out_label_list, preds_list)
logger.info("\n" + report)
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( # noqa C901 args, train_dataset, model, tokenizer, labels, pad_token_label_id )` to solve the following problem:
Train the model
Here is the function:
def train( # noqa C901
args, train_dataset, model, tokenizer, labels, pad_token_label_id
):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(logdir="runs/" + os.path.basename(args.output_dir))
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = (
RandomSampler(train_dataset)
if args.local_rank == -1
else DistributedSampler(train_dataset)
)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=None,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps
// (len(train_dataloader) // args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]
)
for step, batch in enumerate(epoch_iterator):
model.train()
inputs = {
"input_ids": batch[0].to(args.device),
"attention_mask": batch[1].to(args.device),
"labels": batch[3].to(args.device),
}
if args.model_type in ["layoutlm"]:
inputs["bbox"] = batch[4].to(args.device)
inputs["token_type_ids"] = (
batch[2].to(args.device) if args.model_type in ["bert", "layoutlm"] else None
) # RoBERTa don"t use segment_ids
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm
)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0]
and args.logging_steps > 0
and global_step % args.logging_steps == 0
):
# Log metrics
if (
args.local_rank in [-1, 0] and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(
args,
model,
tokenizer,
labels,
pad_token_label_id,
mode="dev",
)
for key, value in results.items():
tb_writer.add_scalar(
"eval_{}".format(key), value, global_step
)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar(
"loss",
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
logging_loss = tr_loss
if (
args.local_rank in [-1, 0]
and args.save_steps > 0
and global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(
args.output_dir, "checkpoint-{}".format(global_step)
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step | Train the model |
183,709 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizerFast,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from layoutlm import LayoutlmConfig, LayoutlmForSequenceClassification
from layoutlm.data.rvl_cdip import CdipProcessor, load_and_cache_examples
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def evaluate(args, model, tokenizer, mode, prefix=""):
results = {}
eval_dataset = load_and_cache_examples(args, tokenizer, mode=mode)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size
)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
if args.model_type != "layoutlm":
batch = batch[:4]
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
if args.model_type == "layoutlm":
inputs["bbox"] = batch[4]
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "layoutlm"] else None
) # RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0
)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=1)
result = {"acc": simple_accuracy(preds=preds, labels=out_label_ids)}
results.update(result)
output_eval_file = os.path.join(
args.output_dir, prefix, "{}_results.txt".format(mode)
)
with open(output_eval_file, "w") as writer:
logger.info("***** {} results {} *****".format(mode, prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
output_eval_file = os.path.join(
args.output_dir, prefix, "{}_compare.txt".format(mode)
)
with open(output_eval_file, "w") as writer:
for p, l in zip(preds, out_label_ids):
writer.write("%s %s\n" % (p, l))
return results
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer)` to solve the following problem:
Train the model
Here is the function:
def train(args, train_dataset, model, tokenizer): # noqa C901
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(comment="_" + os.path.basename(args.output_dir))
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = (
RandomSampler(train_dataset)
if args.local_rank == -1
else DistributedSampler(train_dataset)
)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps
// (len(train_dataloader) // args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]
)
for step, batch in enumerate(epoch_iterator):
model.train()
if args.model_type != "layoutlm":
batch = batch[:4]
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
if args.model_type == "layoutlm":
inputs["bbox"] = batch[4]
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "layoutlm"] else None
) # RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[
0
] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0]
and args.logging_steps > 0
and global_step % args.logging_steps == 0
):
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, "val")
for key, value in results.items():
tb_writer.add_scalar(
"eval_{}".format(key), value, global_step
)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar(
"loss",
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
logging_loss = tr_loss
if (
args.local_rank in [-1, 0]
and args.save_steps > 0
and global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(
args.output_dir, "checkpoint-{}".format(global_step)
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
tokenizer.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step | Train the model |
183,710 | import json
from PIL import Image
import random
import string
from tqdm import tqdm
import string
import argparse
import logging
import math
import os
import random
from pathlib import Path
from PIL import Image
import accelerate
import datasets
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.state import AcceleratorState
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from transformers.utils import ContextManagers
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--input_pertubation", type=float, default=0, help="The scale of input pretubation. Recommended 0.1."
)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--vis_num",
type=int,
default=16,
help="The number of images to be visualized during training."
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--train_data_dir",
type=str,
default=None,
help=(
"A folder containing the training data. Folder contents must follow the structure described in"
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
),
)
parser.add_argument(
"--image_column", type=str, default="image", help="The column of the dataset containing an image."
)
parser.add_argument(
"--caption_column",
type=str,
default="text",
help="The column of the dataset containing a caption or a list of captions.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
parser.add_argument(
"--validation_prompts",
type=str,
default=None,
nargs="+",
help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."),
)
parser.add_argument(
"--output_dir",
type=str,
default="sd-model-finetuned",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--snr_gamma",
type=float,
default=None,
help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
"More details here: https://arxiv.org/abs/2303.09556.",
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--index_file_path",
type=str,
default='/home/jingyechen/jingyechen/amlt_test/diffusers_combine/examples/text_to_image/train_dataset_index.txt',
help="The txt file that provides the index of training samples. The format of each line should be XXXXX_XXXXXXXXX."
)
parser.add_argument(
"--dataset_path",
type=str,
default='/path/to/laion-ocr-select',
required=True,
help="the root of the dataset, please follow the code in textdiffuser-1"
)
parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
parser.add_argument(
"--non_ema_revision",
type=str,
default=None,
required=False,
help=(
"Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or"
" remote repository specified with --pretrained_model_name_or_path."
),
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=10,
help=(
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
" for more docs"
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
parser.add_argument(
"--validation_epochs",
type=int,
default=5,
help="Run validation every X epochs.",
)
parser.add_argument(
"--tracker_project_name",
type=str,
default="text2image-fine-tune",
help=(
"The `project_name` argument passed to Accelerator.init_trackers for"
" more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator"
),
)
parser.add_argument(
"--max_length",
default=77,
type=int,
help="Maximum length of the prompt. Can enlarge this value to adapt longer coord representation."
)
parser.add_argument(
"--granularity",
type=int,
default=128,
help="The granularity of coordinates, ranging from 1~512."
)
parser.add_argument(
"--coord_mode",
type=str,
default='lt',
choices=['lt', 'center', 'ltrb'],
help="The way to represent coordinates. Can use one point or two points"
)
parser.add_argument(
"--vis_interval",
type=int,
default=1000,
help="The interval for visualization."
)
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
# default to using the same revision for the non-ema model if not specified
if args.non_ema_revision is None:
args.non_ema_revision = args.revision
return args | null |
183,711 | import json
from PIL import Image
import random
import string
from tqdm import tqdm
import string
import argparse
import logging
import math
import os
import random
from pathlib import Path
from PIL import Image
import accelerate
import datasets
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.state import AcceleratorState
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from transformers.utils import ContextManagers
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
def check_merge(box1, box2):
x_center1, y_center1, x_min1, y_min1, x_max1, y_max1, pred1 = box1
x_center2, y_center2, x_min2, y_min2, x_max2, y_max2, pred2 = box2
if y_center1 >= y_min2 and y_center1 <= y_max2:
if y_center2 >= y_min1 and y_center2 <= y_max1:
pass
else:
return False
else:
return False
distance1 = x_max2 - x_min1
distance2 = (x_max2 - x_min2) + (x_max1 - x_min1)
if distance2 / distance1 >= 0.8:
if x_min1 < x_min2:
pred = pred1 + ' ' + pred2
else:
pred = pred2 + ' ' + pred1
x_min = min(x_min1, x_min2)
y_min = min(y_min1, y_min2)
x_max = max(x_max1, x_max2)
y_max = max(y_max1, y_max2)
x_center = (x_min + x_max) // 2
y_center = (y_min + y_max) // 2
return [x_center, y_center, x_min, y_min, x_max, y_max, pred]
else:
return False
def merge_boxes(boxes):
results = []
while True:
if len(boxes) == 0:
break
flag = False
sample = boxes[0]
boxes.remove(sample)
for item in boxes:
result = check_merge(sample, item)
if result:
boxes.remove(item)
boxes.append(result)
boxes = sorted(boxes, key=lambda x: x[0])
flag = True
break
else:
pass
if flag is False:
results.append(sample)
return results | null |
183,712 | import os
import cv2
import random
import logging
import argparse
import numpy as np
import time
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from PIL import Image
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import string
import datasets
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default='runwayml/stable-diffusion-v1-5', # no need to modify this
help="Path to pretrained model or model identifier from huggingface.co/models. Please do not modify this.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument(
"--seed",
type=int,
default=None,
help="A seed for reproducible training."
)
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--drop_caption",
action="store_true",
help="Whether to drop captions during training following https://arxiv.org/abs/2207.12598.."
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help="Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the model to the Hub."
)
parser.add_argument(
"--hub_token",
type=str,
default=None,
help="The token to use to push to the Model Hub."
)
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default='fp16',
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank"
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=5,
help=(
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
" for more docs"
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None, # should be specified during inference
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention",
action="store_true",
help="Whether or not to use xformers."
)
#### newly added parameters
parser.add_argument(
"--granularity",
type=int,
default=128,
help="The granularity of coordinates, ranging from 1~512."
)
parser.add_argument(
"--coord_mode",
type=str,
default='lt',
choices=['lt', 'center', 'ltrb'],
help="The way to represent coordinates."
)
parser.add_argument(
"--max_length",
default=77,
type=int,
help="Maximum length of the composed prompt."
)
parser.add_argument(
"--cfg",
default=7,
type=float,
help="classifier free guidance."
)
parser.add_argument(
"--sample_steps",
default=50,
type=int,
help="steps for sampling for diffusion models."
)
parser.add_argument(
"--input_format",
required=True,
type=str,
help="specify the input format",
choices=['prompt', 'prompts_txt_file', 'prompt_layout_txt_file']
)
parser.add_argument(
"--input_prompt",
type=str,
)
parser.add_argument(
"--input_file",
type=str,
)
parser.add_argument(
"--prompts_txt_file",
type=str,
)
parser.add_argument(
"--m1_model_path",
type=str,
help="the checkpoint of layout planner"
)
parser.add_argument(
"--vis_num",
type=int,
default=16,
help=("The number of images to be visualized."),
)
args = parser.parse_args()
print(args)
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args | null |
183,713 | import os
import cv2
import random
import logging
import argparse
import numpy as np
import time
from pathlib import Path
from tqdm.auto import tqdm
from typing import Optional
from packaging import version
from PIL import Image
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import string
import datasets
from datasets import disable_caching
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}" | null |
183,714 | import argparse
import logging
import math
import os
import random
import shutil
from pathlib import Path
import glob
import json
import datasets
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset, Dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from PIL import Image
import string
def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- lora
inference: true
---
"""
model_card = f"""
# LoRA text2image fine-tuning - {repo_id}
These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n
{img_str}
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card) | null |
183,715 | import argparse
import logging
import math
import os
import random
import shutil
from pathlib import Path
import glob
import json
import datasets
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset, Dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from PIL import Image
import string
def check_merge(box1, box2):
x_center1, y_center1, x_min1, y_min1, x_max1, y_max1, pred1 = box1
x_center2, y_center2, x_min2, y_min2, x_max2, y_max2, pred2 = box2
if y_center1 >= y_min2 and y_center1 <= y_max2:
if y_center2 >= y_min1 and y_center2 <= y_max1:
pass
else:
return False
else:
return False
distance1 = x_max2 - x_min1
distance2 = (x_max2 - x_min2) + (x_max1 - x_min1)
if distance2 / distance1 >= 0.8:
if x_min1 < x_min2:
pred = pred1 + ' ' + pred2
else:
pred = pred2 + ' ' + pred1
x_min = min(x_min1, x_min2)
y_min = min(y_min1, y_min2)
x_max = max(x_max1, x_max2)
y_max = max(y_max1, y_max2)
x_center = (x_min + x_max) // 2
y_center = (y_min + y_max) // 2
return [x_center, y_center, x_min, y_min, x_max, y_max, pred]
else:
return False
def merge_boxes(boxes):
results = []
while True:
if len(boxes) == 0:
break
flag = False
sample = boxes[0]
boxes.remove(sample)
for item in boxes:
result = check_merge(sample, item)
if result:
boxes.remove(item)
boxes.append(result)
boxes = sorted(boxes, key=lambda x: x[0])
flag = True
break
else:
pass
if flag is False:
results.append(sample)
return results | null |
183,716 | import argparse
import logging
import math
import os
import random
import shutil
from pathlib import Path
import glob
import json
import datasets
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset, Dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from PIL import Image
import string
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--dataset_name",
type=str,
default='lambdalabs/pokemon-blip-captions',
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The config of the Dataset, leave as None if there's only one config.",
)
parser.add_argument(
"--train_data_dir",
type=str,
default=None,
help=(
"A folder containing the training data. Folder contents must follow the structure described in"
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
),
)
parser.add_argument(
"--image_column", type=str, default="image", help="The column of the dataset containing an image."
)
parser.add_argument(
"--caption_column",
type=str,
default="text",
help="The column of the dataset containing a caption or a list of captions.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="sd-model-finetuned-lora",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop",
default=False,
action="store_true",
help=(
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
" cropped. The images will be resized to the resolution first before cropping."
),
)
parser.add_argument(
"--random_flip",
action="store_true",
help="whether to randomly flip images horizontally",
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4, #### lora is trained with higher lr
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--text_encoder_learning_rate",
type=float,
default=1e-5, #### the text encoder is trained with lower lr to avoid the forgetting
help="Initial learning rate for the text encoder (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--snr_gamma",
type=float,
default=None,
help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
"More details here: https://arxiv.org/abs/2303.09556.",
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--prediction_type",
type=str,
default=None,
help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.",
)
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--checkpointing_steps",
type=int,
default=2500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=10, # should be decreased for saving space
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
parser.add_argument(
"--rank",
type=int,
default=4,
help=("The dimension of the LoRA update matrices."),
)
parser.add_argument(
"--vis_num",
type=int,
default=16,
help=("The dimension of the LoRA update matrices."),
)
parser.add_argument(
"--vis_interval",
type=int,
default=1000,
help="The interval for visualization."
)
#### newly added parameters
parser.add_argument(
"--granularity",
type=int,
default=128, #### limit the coord range to 0~128 will make the feature space compact
help="The granularity of coordinates, ranging from 1~512."
)
parser.add_argument(
"--coord_mode",
type=str,
default='lt',
choices=['lt', 'center', 'ltrb'], #### l, t, r, b stand for left, top, right, bottom
help="The way to represent coordinates"
)
parser.add_argument(
"--drop_coord", #### not used in the experiment. model is hard to train without the coord guidance
action='store_true',
help="Whether to drop coord during training. Add more diversity."
)
parser.add_argument(
"--max_length",
default=77, #### enlarge the context length of text encoder. empirically, enlarging the context length can proceed longer sequence. However, we observe that it will be hard to render general objects
type=int,
help="Maximum length of the composed prompt"
)
parser.add_argument(
"--index_file_path",
type=str,
default='/path/to/train_dataset_index.txt',
required=True,
help="The path of data index file, each line should follow the format 00123_0012300567 ...."
)
parser.add_argument(
"--dataset_path",
type=str,
default='/path/to/laion-ocr-select',
required=True,
help="the root of the dataset, please follow the code in textdiffuser-1"
)
######################################################################
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
# Sanity checks
if args.dataset_name is None and args.train_data_dir is None:
raise ValueError("Need either a dataset name or a training folder.")
return args | null |
183,717 | import os
import cv2
import math
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from typing import Optional
from packaging import version
from collections import OrderedDict
from PIL import Image, ImageDraw, ImageFont
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import datasets
import transformers
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
from termcolor import colored
import string
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default='runwayml/stable-diffusion-v1-5',
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="sd-model-finetuned",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument(
"--seed",
type=int,
default=None,
help="A seed for reproducible training."
)
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--train_batch_size",
type=int,
default=16,
help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--num_train_epochs",
type=int,
default=2
)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps",
type=int,
default=0,
help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--use_8bit_adam",
action="store_true",
help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument(
"--dataset_name",
type=str,
default='MARIO-10M',
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--use_ema",
action="store_true",
help="Whether to use EMA model."
)
parser.add_argument(
"--segmentation_mask_aug",
action="store_true",
help="Whether to augment the segmentation masks (inspired by https://arxiv.org/abs/2211.13227)."
)
parser.add_argument(
"--non_ema_revision",
type=str,
default=None,
required=False,
help=(
"Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or"
" remote repository specified with --pretrained_model_name_or_path."
),
)
parser.add_argument(
"--image_column",
type=str,
default="image",
help="The column of the dataset containing an image."
)
parser.add_argument(
"--caption_column",
type=str,
default="text",
help="The column of the dataset containing a caption or a list of captions.",
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--adam_beta1",
type=float,
default=0.9,
help="The beta1 parameter for the Adam optimizer."
)
parser.add_argument(
"--adam_beta2",
type=float,
default=0.999,
help="The beta2 parameter for the Adam optimizer."
)
parser.add_argument(
"--adam_weight_decay",
type=float,
default=1e-2,
help="Weight decay to use."
)
parser.add_argument(
"--adam_epsilon",
type=float,
default=1e-08,
help="Epsilon value for the Adam optimizer"
)
parser.add_argument(
"--max_grad_norm",
default=1.0,
type=float,
help="Max gradient norm."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the model to the Hub."
)
parser.add_argument(
"--hub_token",
type=str,
default=None,
help="The token to use to push to the Model Hub."
)
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank"
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=2500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=10,
help=(
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
" for more docs"
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention",
action="store_true",
help="Whether or not to use xformers."
)
parser.add_argument(
"--noise_offset",
type=float,
default=0,
help="The scale of noise offset."
)
parser.add_argument(
"--dataset_path",
type=str,
default='/path/to/laion-ocr-select',
help="The path of dataset."
)
parser.add_argument(
"--train_dataset_index_file",
type=str,
default='/path/to/train_dataset_index.txt',
help="The txt file that provides the index of training samples. The format of each line should be XXXXX_XXXXXXXXX."
)
parser.add_argument(
"--vis_num",
type=int,
default=4,
help="The number of images to be visualized during training."
)
parser.add_argument(
"--vis_interval",
type=int,
default=500,
help="The interval for visualization."
)
parser.add_argument(
"--max_length",
default=77,
type=int,
help="Maximum length of the prompt. Can enlarge this value to adapt longer coord representation."
)
args = parser.parse_args()
print('***************')
print(args)
print('***************')
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
# default to using the same revision for the non-ema model if not specified
if args.non_ema_revision is None:
args.non_ema_revision = args.revision
return args | null |
183,718 | import os
import cv2
import math
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from typing import Optional
from packaging import version
from collections import OrderedDict
from PIL import Image, ImageDraw, ImageFont
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import datasets
import transformers
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
from termcolor import colored
import string
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}" | null |
183,719 | import os
import cv2
import math
import random
import logging
import argparse
import numpy as np
from pathlib import Path
from typing import Optional
from packaging import version
from collections import OrderedDict
from PIL import Image, ImageDraw, ImageFont
from huggingface_hub import HfFolder, Repository, create_repo, whoami
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torchvision import transforms
import datasets
import transformers
import accelerate
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, deprecate
from diffusers.utils.import_utils import is_xformers_available
from termcolor import colored
import string
def check_merge(box1, box2):
x_center1, y_center1, x_min1, y_min1, x_max1, y_max1, pred1 = box1
x_center2, y_center2, x_min2, y_min2, x_max2, y_max2, pred2 = box2
if y_center1 >= y_min2 and y_center1 <= y_max2:
if y_center2 >= y_min1 and y_center2 <= y_max1:
pass
else:
return False
else:
return False
distance1 = x_max2 - x_min1
distance2 = (x_max2 - x_min2) + (x_max1 - x_min1)
if distance2 / distance1 >= 0.8:
if x_min1 < x_min2:
pred = pred1 + ' ' + pred2
else:
pred = pred2 + ' ' + pred1
x_min = min(x_min1, x_min2)
y_min = min(y_min1, y_min2)
x_max = max(x_max1, x_max2)
y_max = max(y_max1, y_max2)
x_center = (x_min + x_max) // 2
y_center = (y_min + y_max) // 2
return [x_center, y_center, x_min, y_min, x_max, y_max, pred]
else:
return False
def merge_boxes(boxes):
results = []
while True:
if len(boxes) == 0:
break
flag = False
sample = boxes[0]
boxes.remove(sample)
for item in boxes:
result = check_merge(sample, item)
if result:
boxes.remove(item)
boxes.append(result)
boxes = sorted(boxes, key=lambda x: x[0])
flag = True
break
else:
pass
if flag is False:
results.append(sample)
return results | null |
183,720 | import os
import re
import zipfile
import torch
import gradio as gr
import numpy as np
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
global_dict = {}
def skip_fun(i, t, guest_id):
global_dict[guest_id]['state'] = 0
# global state
# state = 0 | null |
183,721 | import os
import re
import zipfile
import torch
import gradio as gr
import numpy as np
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
global_dict = {}
def exe_undo(i, orig_i, t, guest_id):
global_dict[guest_id]['stack'] = []
global_dict[guest_id]['state'] = 0
return copy.deepcopy(orig_i) | null |
183,722 | import os
import re
import zipfile
import torch
import gradio as gr
print('hello', gr.__version__)
import numpy as np
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
print('***************')
print(len(tokenizer))
print(len(tokenizer))
print('***************')
global_dict = {}
font = ImageFont.truetype("./Arial.ttf", 20)
def exe_redo(i, orig_i, t, guest_id):
print('redo ',orig_i)
if type(orig_i) == str:
orig_i = Image.open(orig_i)
# global state
# state = 0
global_dict[guest_id]['state'] = 0
if len(global_dict[guest_id]['stack']) > 0:
global_dict[guest_id]['stack'].pop()
image = copy.deepcopy(orig_i)
draw = ImageDraw.Draw(image)
for items in global_dict[guest_id]['stack']:
# print('now', items)
text_position, t = items
if len(text_position) == 2:
x, y = text_position
text_color = (255, 0, 0)
draw.text((x+2, y), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x-r, y-r)
rightDownPoint = (x+r, y+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
elif len(text_position) == 4:
x0, y0, x1, y1 = text_position
text_color = (255, 0, 0)
draw.text((x0+2, y0), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x0-r, y0-r)
rightDownPoint = (x0+r, y0+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
draw.rectangle((x0,y0,x1,y1), outline=(255, 0, 0) )
print('stack', global_dict[guest_id]['stack'])
return image | null |
183,723 | import os
import re
import zipfile
import torch
import gradio as gr
print('hello', gr.__version__)
import numpy as np
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
print('***************')
print(len(tokenizer))
print(len(tokenizer))
print('***************')
global_dict = {}
font = ImageFont.truetype("./Arial.ttf", 20)
with gr.Blocks() as demo:
gr.HTML(
"""
<div style="text-align: center; max-width: 1600px; margin: 20px auto;">
<h2 style="font-weight: 900; font-size: 2.3rem; margin: 0rem">
TextDiffuser-2: Unleashing the Power of Language Models for Text Rendering
</h2>
<h2 style="font-weight: 900; font-size: 1.3rem; margin: 0rem">
(Demo for <b>Text Inpainting</b> 🖼️🖌️)
</h2>
<h2 style="font-weight: 460; font-size: 1.1rem; margin: 0rem">
<a href="https://jingyechen.github.io/">Jingye Chen</a>, <a href="https://hypjudy.github.io/website/">Yupan Huang</a>, <a href="https://scholar.google.com/citations?user=0LTZGhUAAAAJ&hl=en">Tengchao Lv</a>, <a href="https://www.microsoft.com/en-us/research/people/lecu/">Lei Cui</a>, <a href="https://cqf.io/">Qifeng Chen</a>, <a href="https://thegenerality.com/">Furu Wei</a>
</h2>
<h2 style="font-weight: 460; font-size: 1.1rem; margin: 0rem">
HKUST, Sun Yat-sen University, Microsoft Research
</h2>
<h3 style="font-weight: 450; font-size: 1rem; margin: 0rem">
[<a href="https://arxiv.org/abs/2311.16465" style="color:blue;">arXiv</a>]
[<a href="https://github.com/microsoft/unilm/tree/master/textdiffuser-2" style="color:blue;">Code</a>]
[<a href="https://jingyechen.github.io/textdiffuser2/" style="color:blue;">Project Page</a>]
[<a href="https://discord.gg/q7eHPupu" style="color:purple;">Discord</a>]
</h3>
<h2 style="text-align: left; font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
TextDiffuser-2 leverages language models to enhance text rendering, achieving greater flexibility. Different from text editing, the text inpainting task aims to add or modify text guided by users, ensuring that the inpainted text has a reasonable style (i.e., no need to match the style of the original text during modification exactly) and is coherent with backgrounds. TextDiffuser-2 offers an <b>improved user experience</b>. Specifically, users only need to type the text they wish to inpaint into the provided input box and then select key points on the Canvas.
</h2>
<h2 style="text-align: left; font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
👀 <b>Tips for using this demo</b>: <b>(1)</b> Please carefully read the disclaimer in the below. Current verison can only support English. <b>(2)</b> The <b>prompt is optional</b>. If provided, the generated image may be more accurate. <b>(3)</b> Redo is used to cancel the last keyword, and undo is used to clear all keywords. <b>(4)</b> Current version only supports input image with resolution 512x512. <b>(5)</b> You can use either two points or four points to specify the text box. Using four points can better represent the perspective boxes. <b>(6)</b> Leave "Text to be inpaintd" empty can function as the text removal task. <b>(7)</b> Classifier-free guidance is set to a small value (e.g. 1) in default. It is noticed that a larger cfg may result in chromatic aberration against the background. <b>(8)</b> You can inpaint many text regions at one time. <b>(9)</b> Thanks for reading these tips, shall we start now?
</h2>
<img src="https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/inpainting_blank.jpg" alt="textdiffuser-2">
</div>
""")
with gr.Tab("Text Inpainting"):
with gr.Row():
with gr.Column():
keywords = gr.Textbox(label="(Optional) Keywords. Should be seperated by / (e.g., keyword1/keyword2/...)", placeholder="keyword1/keyword2", visible=False)
positive_prompt = gr.Textbox(label="(Optional) Positive prompt", value="", visible=False)
i = gr.Image(label="Image", type='filepath', value='https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example11.jpg')
orig_i = gr.Image(label="Placeholder", type='filepath', height=512, width=512, visible=False)
radio = gr.Radio(["Two Points", "Four Points"], label="Number of points to represent the text box.", value="Two Points", visible=True)
with gr.Row():
t = gr.Textbox(label="Text to be inpainted", value='Test')
prompt = gr.Textbox(label="(Optional) Prompt.")
with gr.Row():
redo = gr.Button(value='Redo - Cancel the last keyword')
undo = gr.Button(value='Undo - Clear the canvas')
# skip_button = gr.Button(value='Skip - Operate the next keyword')
slider_natural = gr.Checkbox(label="Natural image generation", value=False, info="The text position and content info will not be incorporated.", visible=False)
slider_step = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Sampling step", info="The sampling step for TextDiffuser-2.")
slider_guidance = gr.Slider(minimum=1, maximum=13, value=1, step=0.5, label="Scale of classifier-free guidance", info="The scale of cfg and is set to 1 in default. Smaller cfg produce stable results.")
slider_batch = gr.Slider(minimum=1, maximum=6, value=4, step=1, label="Batch size", info="The number of images to be sampled.")
slider_temperature = gr.Slider(minimum=0.1, maximum=2, value=1.4, step=0.1, label="Temperature", info="Control the diversity of layout planner. Higher value indicates more diversity.", visible=False)
# slider_seed = gr.Slider(minimum=1, maximum=10000, label="Seed", randomize=True)
button = gr.Button("Generate")
guest_id_box = gr.Textbox(label="guest_id", value=f"-1", visible=False)
i.select(get_pixels,[i,orig_i,radio,t,guest_id_box],[i,orig_i,guest_id_box])
redo.click(exe_redo, [i,orig_i,t,guest_id_box],[i])
undo.click(exe_undo, [i,orig_i,t,guest_id_box],[i])
# skip_button.click(skip_fun, [i,t,guest_id_box])
with gr.Column():
output = gr.Gallery(label='Generated image', rows=2, height=768)
with gr.Accordion("Intermediate results", open=False, visible=False):
gr.Markdown("Composed prompt")
composed_prompt = gr.Textbox(label='')
# gr.Markdown("Layout visualization")
# layout = gr.Image(height=256, width=256)
button.click(text_to_image, inputs=[guest_id_box, i, orig_i, prompt,keywords,positive_prompt, radio,slider_step,slider_guidance,slider_batch,slider_temperature,slider_natural], outputs=[output, composed_prompt])
gr.Markdown("## Image Examples")
template = None
gr.Examples(
[
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example1.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example2.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example3.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example4.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example5.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example7.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example8.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example11.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example12.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example13.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example14.jpg"],
["https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/example15.jpg"],
],
[
i
],
examples_per_page=25,
)
gr.HTML(
"""
<div style="text-align: justify; max-width: 1100px; margin: 20px auto;">
<h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem">
<b>Version</b>: 1.0
</h3>
<h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem">
<b>Contact</b>:
For help or issues using TextDiffuser-2, please email Jingye Chen <a href="mailto:qwerty.chen@connect.ust.hk">(qwerty.chen@connect.ust.hk)</a>, Yupan Huang <a href="mailto:huangyp28@mail2.sysu.edu.cn">(huangyp28@mail2.sysu.edu.cn)</a> or submit a GitHub issue. For other communications related to TextDiffuser-2, please contact Lei Cui <a href="mailto:lecu@microsoft.com">(lecu@microsoft.com)</a> or Furu Wei <a href="mailto:fuwei@microsoft.com">(fuwei@microsoft.com)</a>.
</h3>
<h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem">
<b>Disclaimer</b>:
Please note that the demo is intended for academic and research purposes <b>ONLY</b>. Any use of the demo for generating inappropriate content is strictly prohibited. The responsibility for any misuse or inappropriate use of the demo lies solely with the users who generated such content, and this demo shall not be held liable for any such use.
</h3>
</div>
"""
)
def get_pixels(i, orig_i, radio, t, guest_id, evt: gr.SelectData):
print('hi1 ', i)
print('hi2 ', orig_i)
width, height = Image.open(i).size
# register
if guest_id == '-1': # register for the first time
seed = str(int(time.time()))
global_dict[str(seed)] = {
'state': 0,
'stack': [],
'image_id': [list(Image.open(i).resize((512,512)).getdata())] # an image has been recorded
}
guest_id = str(seed)
else:
seed = guest_id
if type(i) == str:
i = Image.open(i)
i = i.resize((512,512))
images = global_dict[str(seed)]['image_id']
flag = False
for image in images:
if image == list(i.getdata()):
print('find it')
flag = True
break
if not flag:
global_dict[str(seed)]['image_id'] = [list(i.getdata())]
global_dict[str(seed)]['stack'] = []
global_dict[str(seed)]['state'] = 0
orig_i = i
else:
if orig_i is not None:
orig_i = Image.open(orig_i)
orig_i = orig_i.resize((512,512))
else:
orig_i = i
global_dict[guest_id]['stack'] = []
global_dict[guest_id]['state'] = 0
text_position = evt.index
print('hello ', text_position)
if radio == 'Two Points':
if global_dict[guest_id]['state'] == 0:
global_dict[guest_id]['stack'].append(
(text_position, t)
)
print(text_position, global_dict[guest_id]['stack'])
global_dict[guest_id]['state'] = 1
else:
(_, t) = global_dict[guest_id]['stack'].pop()
x, y = _
global_dict[guest_id]['stack'].append(
((x,y,text_position[0],text_position[1]), t)
)
global_dict[guest_id]['state'] = 0
image = copy.deepcopy(orig_i)
draw = ImageDraw.Draw(image)
for items in global_dict[guest_id]['stack']:
text_position, t = items
if len(text_position) == 2:
x, y = text_position
x = int(512 * x / width)
y = int(512 * y / height)
text_color = (255, 0, 0)
draw.text((x+2, y), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x-r, y-r)
rightDownPoint = (x+r, y+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
elif len(text_position) == 4:
x0, y0, x1, y1 = text_position
x0 = int(512 * x0 / width)
x1 = int(512 * x1 / width)
y0 = int(512 * y0 / height)
y1 = int(512 * y1 / height)
text_color = (255, 0, 0)
draw.text((x0+2, y0), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x0-r, y0-r)
rightDownPoint = (x0+r, y0+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
draw.rectangle((x0,y0,x1,y1), outline=(255, 0, 0) )
elif radio == 'Four Points':
if global_dict[guest_id]['state'] == 0:
global_dict[guest_id]['stack'].append(
(text_position, t)
)
print(text_position, global_dict[guest_id]['stack'])
global_dict[guest_id]['state'] = 1
elif global_dict[guest_id]['state'] == 1:
(_, t) = global_dict[guest_id]['stack'].pop()
x, y = _
global_dict[guest_id]['stack'].append(
((x,y,text_position[0],text_position[1]), t)
)
global_dict[guest_id]['state'] = 2
elif global_dict[guest_id]['state'] == 2:
(_, t) = global_dict[guest_id]['stack'].pop()
x0, y0, x1, y1 = _
global_dict[guest_id]['stack'].append(
((x0, y0, x1, y1,text_position[0],text_position[1]), t)
)
global_dict[guest_id]['state'] = 3
elif global_dict[guest_id]['state'] == 3:
(_, t) = global_dict[guest_id]['stack'].pop()
x0, y0, x1, y1, x2, y2 = _
global_dict[guest_id]['stack'].append(
((x0, y0, x1, y1, x2, y2,text_position[0],text_position[1]), t)
)
global_dict[guest_id]['state'] = 0
image = copy.deepcopy(orig_i)
draw = ImageDraw.Draw(image)
for items in global_dict[guest_id]['stack']:
text_position, t = items
if len(text_position) == 2:
x, y = text_position
x = int(512 * x / width)
y = int(512 * y / height)
text_color = (255, 0, 0)
draw.text((x+2, y), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x-r, y-r)
rightDownPoint = (x+r, y+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
elif len(text_position) == 4:
x0, y0, x1, y1 = text_position
text_color = (255, 0, 0)
draw.text((x0+2, y0), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x0-r, y0-r)
rightDownPoint = (x0+r, y0+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
draw.line(((x0,y0),(x1,y1)), fill=(255, 0, 0) )
elif len(text_position) == 6:
x0, y0, x1, y1, x2, y2 = text_position
text_color = (255, 0, 0)
draw.text((x0+2, y0), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x0-r, y0-r)
rightDownPoint = (x0+r, y0+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
draw.line(((x0,y0),(x1,y1)), fill=(255, 0, 0) )
draw.line(((x1,y1),(x2,y2)), fill=(255, 0, 0) )
elif len(text_position) == 8:
x0, y0, x1, y1, x2, y2, x3, y3 = text_position
text_color = (255, 0, 0)
draw.text((x0+2, y0), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x0-r, y0-r)
rightDownPoint = (x0+r, y0+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
draw.line(((x0,y0),(x1,y1)), fill=(255, 0, 0) )
draw.line(((x1,y1),(x2,y2)), fill=(255, 0, 0) )
draw.line(((x2,y2),(x3,y3)), fill=(255, 0, 0) )
draw.line(((x3,y3),(x0,y0)), fill=(255, 0, 0) )
print('stack', global_dict[guest_id]['stack'])
global_dict[str(seed)]['image_id'].append(list(image.getdata()))
return image, orig_i, seed | null |
183,724 | import os
import re
import zipfile
import torch
import gradio as gr
print('hello', gr.__version__)
import numpy as np
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
print('***************')
print(len(tokenizer))
print(len(tokenizer))
print('***************')
def test_fn(x,y):
print('hello') | null |
183,725 | import os
import re
import zipfile
import torch
import gradio as gr
print('hello', gr.__version__)
import numpy as np
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
text_encoder = CLIPTextModel.from_pretrained(
'JingyeChen22/textdiffuser2-full-ft-inpainting', subfolder="text_encoder"
).cuda().half()
tokenizer = CLIPTokenizer.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder="tokenizer"
)
print('***************')
print(len(tokenizer))
for c in alphabet:
tokenizer.add_tokens([f'[{c}]'])
print(len(tokenizer))
print('***************')
vae = AutoencoderKL.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="vae").half().cuda()
unet = UNet2DConditionModel.from_pretrained(
'JingyeChen22/textdiffuser2-full-ft-inpainting', subfolder="unet"
).half().cuda()
text_encoder.resize_token_embeddings(len(tokenizer))
global_dict = {}
def get_layout_image(ocrs):
blank = Image.new('RGB', (256,256), (0,0,0))
draw = ImageDraw.ImageDraw(blank)
for line in ocrs.split('\n'):
line = line.strip()
if len(line) == 0:
break
pred = ' '.join(line.split()[:-1])
box = line.split()[-1]
l, t, r, b = [int(i)*2 for i in box.split(',')] # the size of canvas is 256x256
draw.rectangle([(l, t), (r, b)], outline ="red")
draw.text((l, t), pred, font=font_layout)
return blank
def to_tensor(image):
if isinstance(image, Image.Image):
image = np.array(image)
elif not isinstance(image, np.ndarray):
raise TypeError("Error")
image = image.astype(np.float32) / 255.0
image = np.transpose(image, (2, 0, 1))
tensor = torch.from_numpy(image)
return tensor
def text_to_image(guest_id, i, orig_i, prompt,keywords,positive_prompt,radio,slider_step,slider_guidance,slider_batch,slider_temperature,slider_natural):
# print(type(i))
# exit(0)
print(f'[info] Prompt: {prompt} | Keywords: {keywords} | Radio: {radio} | Steps: {slider_step} | Guidance: {slider_guidance} | Natural: {slider_natural}')
# global stack
# global state
if len(positive_prompt.strip()) != 0:
prompt += positive_prompt
with torch.no_grad():
time1 = time.time()
user_prompt = prompt
if slider_natural:
user_prompt = f'{user_prompt}'
composed_prompt = user_prompt
prompt = tokenizer.encode(user_prompt)
layout_image = None
else:
if guest_id not in global_dict or len(global_dict[guest_id]['stack']) == 0:
if len(keywords.strip()) == 0:
template = f'Given a prompt that will be used to generate an image, plan the layout of visual text for the image. The size of the image is 128x128. Therefore, all properties of the positions should not exceed 128, including the coordinates of top, left, right, and bottom. All keywords are included in the caption. You dont need to specify the details of font styles. At each line, the format should be keyword left, top, right, bottom. So let us begin. Prompt: {user_prompt}'
else:
keywords = keywords.split('/')
keywords = [i.strip() for i in keywords]
template = f'Given a prompt that will be used to generate an image, plan the layout of visual text for the image. The size of the image is 128x128. Therefore, all properties of the positions should not exceed 128, including the coordinates of top, left, right, and bottom. In addition, we also provide all keywords at random order for reference. You dont need to specify the details of font styles. At each line, the format should be keyword left, top, right, bottom. So let us begin. Prompt: {prompt}. Keywords: {str(keywords)}'
msg = template
conv = get_conversation_template(m1_model_path)
conv.append_message(conv.roles[0], msg)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
inputs = m1_tokenizer([prompt], return_token_type_ids=False)
inputs = {k: torch.tensor(v).to('cuda') for k, v in inputs.items()}
output_ids = m1_model.generate(
**inputs,
do_sample=True,
temperature=slider_temperature,
repetition_penalty=1.0,
max_new_tokens=512,
)
if m1_model.config.is_encoder_decoder:
output_ids = output_ids[0]
else:
output_ids = output_ids[0][len(inputs["input_ids"][0]) :]
outputs = m1_tokenizer.decode(
output_ids, skip_special_tokens=True, spaces_between_special_tokens=False
)
print(f"[{conv.roles[0]}]\n{msg}")
print(f"[{conv.roles[1]}]\n{outputs}")
layout_image = get_layout_image(outputs)
ocrs = outputs.split('\n')
time2 = time.time()
print(time2-time1)
# user_prompt = prompt
current_ocr = ocrs
ocr_ids = []
print('user_prompt', user_prompt)
print('current_ocr', current_ocr)
for ocr in current_ocr:
ocr = ocr.strip()
if len(ocr) == 0 or '###' in ocr or '.com' in ocr:
continue
items = ocr.split()
pred = ' '.join(items[:-1])
box = items[-1]
l,t,r,b = box.split(',')
l,t,r,b = int(l), int(t), int(r), int(b)
ocr_ids.extend(['l'+str(l), 't'+str(t), 'r'+str(r), 'b'+str(b)])
char_list = list(pred)
char_list = [f'[{i}]' for i in char_list]
ocr_ids.extend(char_list)
ocr_ids.append(tokenizer.eos_token_id)
caption_ids = tokenizer(
user_prompt, truncation=True, return_tensors="pt"
).input_ids[0].tolist()
try:
ocr_ids = tokenizer.encode(ocr_ids)
prompt = caption_ids + ocr_ids
except:
prompt = caption_ids
user_prompt = tokenizer.decode(prompt)
composed_prompt = tokenizer.decode(prompt)
else:
user_prompt += ' <|endoftext|><|startoftext|>'
layout_image = None
image_mask = Image.new('L', (512,512), 0)
draw = ImageDraw.Draw(image_mask)
for items in global_dict[guest_id]['stack']:
position, text = items
# feature_mask
# masked_feature
if len(position) == 2:
x, y = position
x = x // 4
y = y // 4
text_str = ' '.join([f'[{c}]' for c in list(text)])
user_prompt += f' l{x} t{y} {text_str} <|endoftext|>'
elif len(position) == 4:
x0, y0, x1, y1 = position
x0 = x0 // 4
y0 = y0 // 4
x1 = x1 // 4
y1 = y1 // 4
text_str = ' '.join([f'[{c}]' for c in list(text)])
user_prompt += f' l{x0} t{y0} r{x1} b{y1} {text_str} <|endoftext|>'
draw.rectangle((x0*4, y0*4, x1*4, y1*4), fill=1)
print('prompt ', user_prompt)
elif len(position) == 8: # four points
x0, y0, x1, y1, x2, y2, x3, y3 = position
draw.polygon([(x0, y0), (x1, y1), (x2, y2), (x3, y3)], fill=1)
x0 = x0 // 4
y0 = y0 // 4
x1 = x1 // 4
y1 = y1 // 4
x2 = x2 // 4
y2 = y2 // 4
x3 = x3 // 4
y3 = y3 // 4
xmin = min(x0, x1, x2, x3)
ymin = min(y0, y1, y2, y3)
xmax = max(x0, x1, x2, x3)
ymax = max(y0, y1, y2, y3)
text_str = ' '.join([f'[{c}]' for c in list(text)])
user_prompt += f' l{xmin} t{ymin} r{xmax} b{ymax} {text_str} <|endoftext|>'
print('prompt ', user_prompt)
prompt = tokenizer.encode(user_prompt)
composed_prompt = tokenizer.decode(prompt)
prompt = prompt[:77]
while len(prompt) < 77:
prompt.append(tokenizer.pad_token_id)
prompts_cond = prompt
prompts_nocond = [tokenizer.pad_token_id]*77
prompts_cond = [prompts_cond] * slider_batch
prompts_nocond = [prompts_nocond] * slider_batch
prompts_cond = torch.Tensor(prompts_cond).long().cuda()
prompts_nocond = torch.Tensor(prompts_nocond).long().cuda()
scheduler = DDPMScheduler.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="scheduler")
scheduler.set_timesteps(slider_step)
noise = torch.randn((slider_batch, 4, 64, 64)).to("cuda").half()
input = noise
encoder_hidden_states_cond = text_encoder(prompts_cond)[0].half()
encoder_hidden_states_nocond = text_encoder(prompts_nocond)[0].half()
image_mask = torch.Tensor(np.array(image_mask)).float().half().cuda()
image_mask = image_mask.unsqueeze(0).unsqueeze(0).repeat(slider_batch, 1, 1, 1)
image = Image.open(orig_i).resize((512,512))
image_tensor = to_tensor(image).unsqueeze(0).cuda().sub_(0.5).div_(0.5)
print(f'image_tensor.shape {image_tensor.shape}')
masked_image = image_tensor * (1-image_mask)
masked_feature = vae.encode(masked_image.half()).latent_dist.sample()
masked_feature = masked_feature * vae.config.scaling_factor
masked_feature = masked_feature.half()
print(f'masked_feature.shape {masked_feature.shape}')
feature_mask = torch.nn.functional.interpolate(image_mask, size=(64,64), mode='nearest').cuda()
for t in tqdm(scheduler.timesteps):
with torch.no_grad(): # classifier free guidance
noise_pred_cond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states_cond[:slider_batch],feature_mask=feature_mask, masked_feature=masked_feature).sample # b, 4, 64, 64
noise_pred_uncond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states_nocond[:slider_batch],feature_mask=feature_mask, masked_feature=masked_feature).sample # b, 4, 64, 64
noisy_residual = noise_pred_uncond + slider_guidance * (noise_pred_cond - noise_pred_uncond) # b, 4, 64, 64
input = scheduler.step(noisy_residual, t, input).prev_sample
del noise_pred_cond
del noise_pred_uncond
torch.cuda.empty_cache()
# decode
input = 1 / vae.config.scaling_factor * input
images = vae.decode(input, return_dict=False)[0]
width, height = 512, 512
results = []
new_image = Image.new('RGB', (2*width, 2*height))
for index, image in enumerate(images.cpu().float()):
image = (image / 2 + 0.5).clamp(0, 1).unsqueeze(0)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = Image.fromarray((image * 255).round().astype("uint8")).convert('RGB')
results.append(image)
row = index // 2
col = index % 2
new_image.paste(image, (col*width, row*height))
# os.system('nvidia-smi')
torch.cuda.empty_cache()
# os.system('nvidia-smi')
return tuple(results), composed_prompt | null |
183,726 | import argparse
import logging
import math
import os
import random
import shutil
from pathlib import Path
import glob
import time
import datasets
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset, Dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from PIL import Image
import string
def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- lora
inference: true
---
"""
model_card = f"""
# LoRA text2image fine-tuning - {repo_id}
These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n
{img_str}
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card) | null |
183,727 | import argparse
import logging
import math
import os
import random
import shutil
from pathlib import Path
import glob
import time
import datasets
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset, Dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from PIL import Image
import string
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--dataset_name",
type=str,
default='lambdalabs/pokemon-blip-captions',
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="sd-model-finetuned-lora",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument(
"--rank",
type=int,
default=4,
help=("The dimension of the LoRA update matrices."),
)
parser.add_argument(
"--vis_num",
type=int,
default=16,
help=("The number of images to be visualized."),
)
#### newly added parameters
parser.add_argument(
"--granularity",
type=int,
default=128,
help="The granularity of coordinates, ranging from 1~512."
)
parser.add_argument(
"--coord_mode",
type=str,
default='lt',
choices=['lt', 'center', 'ltrb'],
help="The way to represent coordinates."
)
parser.add_argument(
"--max_length",
default=77,
type=int,
help="Maximum length of the composed prompt."
)
parser.add_argument(
"--cfg",
default=7,
type=float,
help="classifier free guidance."
)
parser.add_argument(
"--sample_steps",
default=50,
type=int,
help="steps for sampling for diffusion models."
)
parser.add_argument(
"--input_format",
required=True,
type=str,
help="specify the input format",
choices=['prompt', 'prompts_txt_file', 'prompt_layout_txt_file']
)
parser.add_argument(
"--input_prompt",
type=str,
)
parser.add_argument(
"--input_file",
type=str,
)
parser.add_argument(
"--prompts_txt_file",
type=str,
)
parser.add_argument(
"--m1_model_path",
type=str,
help="the checkpoint of layout planner"
)
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args | null |
183,728 | import os
import re
import zipfile
import torch
import gradio as gr
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline, LCMScheduler
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
from fastchat.model import load_model, get_conversation_template
from transformers import AutoTokenizer, AutoModelForCausalLM
global_dict = {}
def skip_fun(i, t, guest_id):
global_dict[guest_id]['state'] = 0
# global state
# state = 0 | null |
183,729 | import os
import re
import zipfile
import torch
import gradio as gr
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline, LCMScheduler
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
from fastchat.model import load_model, get_conversation_template
from transformers import AutoTokenizer, AutoModelForCausalLM
global_dict = {}
def exe_undo(i, t, guest_id):
global_dict[guest_id]['stack'] = []
global_dict[guest_id]['state'] = 0
# global stack
# global state
# state = 0
# stack = []
image = Image.open(f'./gray256.jpg')
# print('stack', stack)
return image | null |
183,730 | import os
import re
import zipfile
import torch
import gradio as gr
print('hello', gr.__version__)
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline, LCMScheduler
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
from fastchat.model import load_model, get_conversation_template
from transformers import AutoTokenizer, AutoModelForCausalLM
print('***************')
print(len(tokenizer))
print(len(tokenizer))
print('***************')
global_dict = {}
font = ImageFont.truetype("./Arial.ttf", 32)
def exe_redo(i, t, guest_id):
# global state
# state = 0
global_dict[guest_id]['state'] = 0
if len(global_dict[guest_id]['stack']) > 0:
global_dict[guest_id]['stack'].pop()
image = Image.open(f'./gray256.jpg')
draw = ImageDraw.Draw(image)
for items in global_dict[guest_id]['stack']:
# print('now', items)
text_position, t = items
if len(text_position) == 2:
x, y = text_position
text_color = (255, 0, 0)
draw.text((x+2, y), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x-r, y-r)
rightDownPoint = (x+r, y+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
elif len(text_position) == 4:
x0, y0, x1, y1 = text_position
text_color = (255, 0, 0)
draw.text((x0+2, y0), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x0-r, y0-r)
rightDownPoint = (x0+r, y0+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
draw.rectangle((x0,y0,x1,y1), outline=(255, 0, 0) )
print('stack', global_dict[guest_id]['stack'])
return image | null |
183,731 | import os
import re
import zipfile
import torch
import gradio as gr
print('hello', gr.__version__)
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline, LCMScheduler
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
from fastchat.model import load_model, get_conversation_template
from transformers import AutoTokenizer, AutoModelForCausalLM
print('***************')
print(len(tokenizer))
print(len(tokenizer))
print('***************')
global_dict = {}
font = ImageFont.truetype("./Arial.ttf", 32)
with gr.Blocks() as demo:
# guest_id = random.randint(0,100000000)
# register
gr.HTML(
"""
<div style="text-align: center; max-width: 1600px; margin: 20px auto;">
<h2 style="font-weight: 900; font-size: 2.3rem; margin: 0rem">
TextDiffuser-2: Unleashing the Power of Language Models for Text Rendering
</h2>
<h2 style="font-weight: 460; font-size: 1.1rem; margin: 0rem">
<a href="https://jingyechen.github.io/">Jingye Chen</a>, <a href="https://hypjudy.github.io/website/">Yupan Huang</a>, <a href="https://scholar.google.com/citations?user=0LTZGhUAAAAJ&hl=en">Tengchao Lv</a>, <a href="https://www.microsoft.com/en-us/research/people/lecu/">Lei Cui</a>, <a href="https://cqf.io/">Qifeng Chen</a>, <a href="https://thegenerality.com/">Furu Wei</a>
</h2>
<h2 style="font-weight: 460; font-size: 1.1rem; margin: 0rem">
HKUST, Sun Yat-sen University, Microsoft Research
</h2>
<h3 style="font-weight: 450; font-size: 1rem; margin: 0rem">
[<a href="https://arxiv.org/abs/2311.16465" style="color:blue;">arXiv</a>]
[<a href="https://github.com/microsoft/unilm/tree/master/textdiffuser-2" style="color:blue;">Code</a>]
[<a href="https://jingyechen.github.io/textdiffuser2/" style="color:blue;">Project Page</a>]
[<a href="https://discord.gg/q7eHPupu" style="color:purple;">Discord</a>]
</h3>
<h2 style="text-align: left; font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
We propose <b>TextDiffuser-2</b>, aiming at unleashing the power of language models for text rendering. Specifically, we <b>tame a language model into a layout planner</b> to transform user prompt into a layout using the caption-OCR pairs. The language model demonstrates flexibility and automation by inferring keywords from user prompts or incorporating user-specified keywords to determine their positions. Secondly, we <b>leverage the language model in the diffusion model as the layout encoder</b> to represent the position and content of text at the line level. This approach enables diffusion models to generate text images with broader diversity.
</h2>
<h2 style="text-align: left; font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
👀 <b>Tips for using this demo</b>: <b>(1)</b> Please carefully read the disclaimer in the below. Current verison can only support English. <b>(2)</b> The specification of keywords is optional. If provided, the language model will do its best to plan layouts using the given keywords. <b>(3)</b> If a template is given, the layout planner (M1) is not used. <b>(4)</b> Three operations, including redo, undo, and skip are provided. When using skip, only the left-top point of a keyword will be recorded, resulting in more diversity but sometimes decreasing the accuracy. <b>(5)</b> The layout planner can produce different layouts. You can increase the temperature to enhance the diversity. ✨ <b>(6)</b> We also provide the experimental demo combining <b>TextDiffuser-2</b> and <b>LCM</b>. The inference is fast using less sampling steps, although the precision in text rendering might decrease.
</h2>
<img src="https://raw.githubusercontent.com/JingyeChen/jingyechen.github.io/master/textdiffuser2/static/images/architecture_blank.jpg" alt="textdiffuser-2">
</div>
""")
with gr.Tab("Text-to-Image"):
with gr.Row():
with gr.Column(scale=1):
prompt = gr.Textbox(label="Prompt. You can let language model automatically identify keywords, or provide them below", placeholder="A beautiful city skyline stamp of Shanghai")
keywords = gr.Textbox(label="(Optional) Keywords. Should be seperated by / (e.g., keyword1/keyword2/...)", placeholder="keyword1/keyword2")
positive_prompt = gr.Textbox(label="(Optional) Positive prompt", value=", digital art, very detailed, fantasy, high definition, cinematic light, dnd, trending on artstation")
# many encounter concurrent problem
with gr.Accordion("(Optional) Template - Click to paint", open=False):
with gr.Row():
with gr.Column(scale=1):
i = gr.Image(label="Canvas", type='filepath', value=f'./gray256.jpg', height=256, width=256)
with gr.Column(scale=1):
t = gr.Textbox(label="Keyword", value='input_keyword')
redo = gr.Button(value='Redo - Cancel the last keyword')
undo = gr.Button(value='Undo - Clear the canvas')
skip_button = gr.Button(value='Skip - Operate the next keyword')
radio = gr.Radio(["TextDiffuser-2", "TextDiffuser-2-LCM"], label="Choice of models", value="TextDiffuser-2")
slider_natural = gr.Checkbox(label="Natural image generation", value=False, info="The text position and content info will not be incorporated.")
slider_step = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Sampling step", info="The sampling step for TextDiffuser-2. You may decease the step to 4 when using LCM.")
slider_guidance = gr.Slider(minimum=1, maximum=13, value=7.5, step=0.5, label="Scale of classifier-free guidance", info="The scale of cfg and is set to 7.5 in default. When using LCM, cfg is set to 1.")
slider_batch = gr.Slider(minimum=1, maximum=6, value=4, step=1, label="Batch size", info="The number of images to be sampled.")
slider_temperature = gr.Slider(minimum=0.1, maximum=2, value=1.4, step=0.1, label="Temperature", info="Control the diversity of layout planner. Higher value indicates more diversity.")
# slider_seed = gr.Slider(minimum=1, maximum=10000, label="Seed", randomize=True)
button = gr.Button("Generate")
guest_id_box = gr.Textbox(label="guest_id", value=f"-1")
i.select(get_pixels,[i,t,guest_id_box],[i,guest_id_box])
redo.click(exe_redo, [i,t,guest_id_box],[i])
undo.click(exe_undo, [i,t,guest_id_box],[i])
skip_button.click(skip_fun, [i,t,guest_id_box])
with gr.Column(scale=1):
output = gr.Gallery(label='Generated image')
with gr.Accordion("Intermediate results", open=False):
gr.Markdown("Composed prompt")
composed_prompt = gr.Textbox(label='')
gr.Markdown("Layout visualization")
layout = gr.Image(height=256, width=256)
button.click(text_to_image, inputs=[guest_id_box, prompt,keywords,positive_prompt, radio,slider_step,slider_guidance,slider_batch,slider_temperature,slider_natural], outputs=[output, composed_prompt, layout])
gr.Markdown("## Prompt Examples")
gr.Examples(
[
["A beautiful city skyline stamp of Shanghai", "", False],
["The words 'KFC VIVO50' are inscribed upon the wall in a neon light effect", "KFC/VIVO50", False],
["A logo of superman", "", False],
["A pencil sketch of a tree with the title nothing to tree here", "", False],
["handwritten signature of peter", "", False],
["Delicate greeting card of happy birthday to xyz", "", False],
["Book cover of good morning baby ", "", False],
["The handwritten words Hello World displayed on a wall in a neon light effect", "", False],
["Logo of winter in artistic font, made by snowflake", "", False],
["A book cover named summer vibe", "", False],
["Newspaper with the title Love Story", "", False],
["A logo for the company EcoGrow, where the letters look like plants", "EcoGrow", False],
["A poster titled 'Quails of North America', showing different kinds of quails.", "Quails/of/North/America", False],
["A detailed portrait of a fox guardian with a shield with Kung Fu written on it, by victo ngai and justin gerard, digital art, realistic painting", "kung/fu", False],
["A stamp of breath of the wild", "breath/of/the/wild", False],
["Poster of the incoming movie Transformers", "Transformers", False],
["Some apples are on a table", "", True],
["a hotdog with mustard and other toppings on it", "", True],
["a bathroom that has a slanted ceiling and a large bath tub", "", True],
["a man holding a tennis racquet on a tennis court", "", True],
["hamburger with bacon, lettuce, tomato and cheese| promotional image| hyperquality| products shot| full - color| extreme render| mouthwatering", "", True],
],
[
prompt,
keywords,
slider_natural
],
examples_per_page=25
)
gr.HTML(
"""
<div style="text-align: justify; max-width: 1100px; margin: 20px auto;">
<h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem">
<b>Version</b>: 1.0
</h3>
<h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem">
<b>Contact</b>:
For help or issues using TextDiffuser-2, please email Jingye Chen <a href="mailto:qwerty.chen@connect.ust.hk">(qwerty.chen@connect.ust.hk)</a>, Yupan Huang <a href="mailto:huangyp28@mail2.sysu.edu.cn">(huangyp28@mail2.sysu.edu.cn)</a> or submit a GitHub issue. For other communications related to TextDiffuser-2, please contact Lei Cui <a href="mailto:lecu@microsoft.com">(lecu@microsoft.com)</a> or Furu Wei <a href="mailto:fuwei@microsoft.com">(fuwei@microsoft.com)</a>.
</h3>
<h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem">
<b>Disclaimer</b>:
Please note that the demo is intended for academic and research purposes <b>ONLY</b>. Any use of the demo for generating inappropriate content is strictly prohibited. The responsibility for any misuse or inappropriate use of the demo lies solely with the users who generated such content, and this demo shall not be held liable for any such use.
</h3>
</div>
"""
)
def get_pixels(i, t, guest_id, evt: gr.SelectData):
# global state
# register
if guest_id == '-1':
seed = str(int(time.time()))
global_dict[str(seed)] = {
'state': 0,
'stack': []
}
guest_id = str(seed)
else:
seed = guest_id
text_position = evt.index
if global_dict[guest_id]['state'] == 0:
global_dict[guest_id]['stack'].append(
(text_position, t)
)
print(text_position, global_dict[guest_id]['stack'])
global_dict[guest_id]['state'] = 1
else:
(_, t) = global_dict[guest_id]['stack'].pop()
x, y = _
global_dict[guest_id]['stack'].append(
((x,y,text_position[0],text_position[1]), t)
)
global_dict[guest_id]['state'] = 0
image = Image.open(f'./gray256.jpg')
draw = ImageDraw.Draw(image)
for items in global_dict[guest_id]['stack']:
# print('now', items)
text_position, t = items
if len(text_position) == 2:
x, y = text_position
text_color = (255, 0, 0)
draw.text((x+2, y), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x-r, y-r)
rightDownPoint = (x+r, y+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
elif len(text_position) == 4:
x0, y0, x1, y1 = text_position
text_color = (255, 0, 0)
draw.text((x0+2, y0), t, font=font, fill=text_color)
r = 4
leftUpPoint = (x0-r, y0-r)
rightDownPoint = (x0+r, y0+r)
draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
draw.rectangle((x0,y0,x1,y1), outline=(255, 0, 0) )
print('stack', global_dict[guest_id]['stack'])
return image, seed | null |
183,732 | import os
import re
import zipfile
import torch
import gradio as gr
print('hello', gr.__version__)
import time
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DiffusionPipeline, LCMScheduler
from tqdm import tqdm
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
import random
import copy
import string
from fastchat.model import load_model, get_conversation_template
from transformers import AutoTokenizer, AutoModelForCausalLM
m1_model_path = 'JingyeChen22/textdiffuser2_layout_planner'
m1_tokenizer = AutoTokenizer.from_pretrained(m1_model_path, use_fast=False)
m1_model = AutoModelForCausalLM.from_pretrained(
m1_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
).cuda()
text_encoder = CLIPTextModel.from_pretrained(
'JingyeChen22/textdiffuser2-full-ft', subfolder="text_encoder"
).cuda().half()
tokenizer = CLIPTokenizer.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder="tokenizer"
)
print('***************')
print(len(tokenizer))
for i in range(520):
tokenizer.add_tokens(['l' + str(i) ]) # left
tokenizer.add_tokens(['t' + str(i) ]) # top
tokenizer.add_tokens(['r' + str(i) ]) # width
tokenizer.add_tokens(['b' + str(i) ]) # height
for c in alphabet:
tokenizer.add_tokens([f'[{c}]'])
print(len(tokenizer))
print('***************')
vae = AutoencoderKL.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="vae").half().cuda()
unet = UNet2DConditionModel.from_pretrained(
'JingyeChen22/textdiffuser2-full-ft', subfolder="unet"
).half().cuda()
text_encoder.resize_token_embeddings(len(tokenizer))
pipe = DiffusionPipeline.from_pretrained(model_id, unet=copy.deepcopy(unet), tokenizer=tokenizer, text_encoder=copy.deepcopy(text_encoder), torch_dtype=torch.float16)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(lcm_lora_id)
pipe.to(device="cuda")
global_dict = {}
def get_layout_image(ocrs):
blank = Image.new('RGB', (256,256), (0,0,0))
draw = ImageDraw.ImageDraw(blank)
for line in ocrs.split('\n'):
line = line.strip()
if len(line) == 0:
break
pred = ' '.join(line.split()[:-1])
box = line.split()[-1]
l, t, r, b = [int(i)*2 for i in box.split(',')] # the size of canvas is 256x256
draw.rectangle([(l, t), (r, b)], outline ="red")
draw.text((l, t), pred, font=font_layout)
return blank
def text_to_image(guest_id, prompt,keywords,positive_prompt,radio,slider_step,slider_guidance,slider_batch,slider_temperature,slider_natural):
print(f'[info] Prompt: {prompt} | Keywords: {keywords} | Radio: {radio} | Steps: {slider_step} | Guidance: {slider_guidance} | Natural: {slider_natural}')
# global stack
# global state
if len(positive_prompt.strip()) != 0:
prompt += positive_prompt
with torch.no_grad():
time1 = time.time()
user_prompt = prompt
if slider_natural:
user_prompt = f'{user_prompt}'
composed_prompt = user_prompt
prompt = tokenizer.encode(user_prompt)
layout_image = None
else:
if guest_id not in global_dict or len(global_dict[guest_id]['stack']) == 0:
if len(keywords.strip()) == 0:
template = f'Given a prompt that will be used to generate an image, plan the layout of visual text for the image. The size of the image is 128x128. Therefore, all properties of the positions should not exceed 128, including the coordinates of top, left, right, and bottom. All keywords are included in the caption. You dont need to specify the details of font styles. At each line, the format should be keyword left, top, right, bottom. So let us begin. Prompt: {user_prompt}'
else:
keywords = keywords.split('/')
keywords = [i.strip() for i in keywords]
template = f'Given a prompt that will be used to generate an image, plan the layout of visual text for the image. The size of the image is 128x128. Therefore, all properties of the positions should not exceed 128, including the coordinates of top, left, right, and bottom. In addition, we also provide all keywords at random order for reference. You dont need to specify the details of font styles. At each line, the format should be keyword left, top, right, bottom. So let us begin. Prompt: {prompt}. Keywords: {str(keywords)}'
msg = template
conv = get_conversation_template(m1_model_path)
conv.append_message(conv.roles[0], msg)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
inputs = m1_tokenizer([prompt], return_token_type_ids=False)
inputs = {k: torch.tensor(v).to('cuda') for k, v in inputs.items()}
output_ids = m1_model.generate(
**inputs,
do_sample=True,
temperature=slider_temperature,
repetition_penalty=1.0,
max_new_tokens=512,
)
if m1_model.config.is_encoder_decoder:
output_ids = output_ids[0]
else:
output_ids = output_ids[0][len(inputs["input_ids"][0]) :]
outputs = m1_tokenizer.decode(
output_ids, skip_special_tokens=True, spaces_between_special_tokens=False
)
print(f"[{conv.roles[0]}]\n{msg}")
print(f"[{conv.roles[1]}]\n{outputs}")
layout_image = get_layout_image(outputs)
ocrs = outputs.split('\n')
time2 = time.time()
print(time2-time1)
# user_prompt = prompt
current_ocr = ocrs
ocr_ids = []
print('user_prompt', user_prompt)
print('current_ocr', current_ocr)
for ocr in current_ocr:
ocr = ocr.strip()
if len(ocr) == 0 or '###' in ocr or '.com' in ocr:
continue
items = ocr.split()
pred = ' '.join(items[:-1])
box = items[-1]
l,t,r,b = box.split(',')
l,t,r,b = int(l), int(t), int(r), int(b)
ocr_ids.extend(['l'+str(l), 't'+str(t), 'r'+str(r), 'b'+str(b)])
char_list = list(pred)
char_list = [f'[{i}]' for i in char_list]
ocr_ids.extend(char_list)
ocr_ids.append(tokenizer.eos_token_id)
caption_ids = tokenizer(
user_prompt, truncation=True, return_tensors="pt"
).input_ids[0].tolist()
try:
ocr_ids = tokenizer.encode(ocr_ids)
prompt = caption_ids + ocr_ids
except:
prompt = caption_ids
user_prompt = tokenizer.decode(prompt)
composed_prompt = tokenizer.decode(prompt)
else:
user_prompt += ' <|endoftext|><|startoftext|>'
layout_image = None
for items in global_dict[guest_id]['stack']:
position, text = items
if len(position) == 2:
x, y = position
x = x // 4
y = y // 4
text_str = ' '.join([f'[{c}]' for c in list(text)])
user_prompt += f' l{x} t{y} {text_str} <|endoftext|>'
elif len(position) == 4:
x0, y0, x1, y1 = position
x0 = x0 // 4
y0 = y0 // 4
x1 = x1 // 4
y1 = y1 // 4
text_str = ' '.join([f'[{c}]' for c in list(text)])
user_prompt += f' l{x0} t{y0} r{x1} b{y1} {text_str} <|endoftext|>'
# composed_prompt = user_prompt
prompt = tokenizer.encode(user_prompt)
composed_prompt = tokenizer.decode(prompt)
prompt = prompt[:77]
while len(prompt) < 77:
prompt.append(tokenizer.pad_token_id)
if radio == 'TextDiffuser-2':
prompts_cond = prompt
prompts_nocond = [tokenizer.pad_token_id]*77
prompts_cond = [prompts_cond] * slider_batch
prompts_nocond = [prompts_nocond] * slider_batch
prompts_cond = torch.Tensor(prompts_cond).long().cuda()
prompts_nocond = torch.Tensor(prompts_nocond).long().cuda()
scheduler = DDPMScheduler.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="scheduler")
scheduler.set_timesteps(slider_step)
noise = torch.randn((slider_batch, 4, 64, 64)).to("cuda").half()
input = noise
encoder_hidden_states_cond = text_encoder(prompts_cond)[0].half()
encoder_hidden_states_nocond = text_encoder(prompts_nocond)[0].half()
for t in tqdm(scheduler.timesteps):
with torch.no_grad(): # classifier free guidance
noise_pred_cond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states_cond[:slider_batch]).sample # b, 4, 64, 64
noise_pred_uncond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states_nocond[:slider_batch]).sample # b, 4, 64, 64
noisy_residual = noise_pred_uncond + slider_guidance * (noise_pred_cond - noise_pred_uncond) # b, 4, 64, 64
input = scheduler.step(noisy_residual, t, input).prev_sample
del noise_pred_cond
del noise_pred_uncond
torch.cuda.empty_cache()
# decode
input = 1 / vae.config.scaling_factor * input
images = vae.decode(input, return_dict=False)[0]
width, height = 512, 512
results = []
new_image = Image.new('RGB', (2*width, 2*height))
for index, image in enumerate(images.cpu().float()):
image = (image / 2 + 0.5).clamp(0, 1).unsqueeze(0)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = Image.fromarray((image * 255).round().astype("uint8")).convert('RGB')
results.append(image)
row = index // 2
col = index % 2
new_image.paste(image, (col*width, row*height))
# os.system('nvidia-smi')
torch.cuda.empty_cache()
# os.system('nvidia-smi')
return tuple(results), composed_prompt, layout_image
elif radio == 'TextDiffuser-2-LCM':
generator = torch.Generator(device=pipe.device).manual_seed(random.randint(0,1000))
image = pipe(
prompt=user_prompt,
generator=generator,
# negative_prompt=negative_prompt,
num_inference_steps=slider_step,
guidance_scale=1,
# num_images_per_prompt=slider_batch,
).images
# os.system('nvidia-smi')
torch.cuda.empty_cache()
# os.system('nvidia-smi')
return tuple(image), composed_prompt, layout_image | null |
183,733 | import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension
from setuptools import Extension, find_packages, setup
version = write_version_py()
with open("readme.md") as f:
readme = f.read()
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.